diff --git a/pull313/.buildinfo b/pull313/.buildinfo new file mode 100644 index 00000000..e8152a73 --- /dev/null +++ b/pull313/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: f7ef13dbe321777adddfb93ffa218c5b +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/pull313/_images/11-bootstrapping7-1.png b/pull313/_images/11-bootstrapping7-1.png new file mode 100644 index 00000000..6bd170c3 Binary files /dev/null and b/pull313/_images/11-bootstrapping7-1.png differ diff --git a/pull313/_images/ML-paradigm-test.png b/pull313/_images/ML-paradigm-test.png new file mode 100644 index 00000000..328c2254 Binary files /dev/null and b/pull313/_images/ML-paradigm-test.png differ diff --git a/pull313/_images/NASA-API-Rho-Ophiuchi.png b/pull313/_images/NASA-API-Rho-Ophiuchi.png new file mode 100644 index 00000000..814aaaa4 Binary files /dev/null and b/pull313/_images/NASA-API-Rho-Ophiuchi.png differ diff --git a/pull313/_images/NASA-API-limits.png b/pull313/_images/NASA-API-limits.png new file mode 100644 index 00000000..610177aa Binary files /dev/null and b/pull313/_images/NASA-API-limits.png differ diff --git a/pull313/_images/NASA-API-parameters.png b/pull313/_images/NASA-API-parameters.png new file mode 100644 index 00000000..ea4cd337 Binary files /dev/null and b/pull313/_images/NASA-API-parameters.png differ diff --git a/pull313/_images/NASA-API-signup.png b/pull313/_images/NASA-API-signup.png new file mode 100644 index 00000000..b39e2470 Binary files /dev/null and b/pull313/_images/NASA-API-signup.png differ diff --git a/pull313/_images/activate-and-run-button-annotated.png b/pull313/_images/activate-and-run-button-annotated.png new file mode 100644 index 00000000..b278e158 Binary files /dev/null and b/pull313/_images/activate-and-run-button-annotated.png differ diff --git a/pull313/_images/add_collab_01.png b/pull313/_images/add_collab_01.png new file mode 100644 index 00000000..c7f2d223 Binary files /dev/null and b/pull313/_images/add_collab_01.png differ diff --git a/pull313/_images/add_collab_02.png b/pull313/_images/add_collab_02.png new file mode 100644 index 00000000..3e4150ed Binary files /dev/null and b/pull313/_images/add_collab_02.png differ diff --git a/pull313/_images/add_collab_03.png b/pull313/_images/add_collab_03.png new file mode 100644 index 00000000..3e73db8a Binary files /dev/null and b/pull313/_images/add_collab_03.png differ diff --git a/pull313/_images/add_collab_04.png b/pull313/_images/add_collab_04.png new file mode 100644 index 00000000..78d6bac6 Binary files /dev/null and b/pull313/_images/add_collab_04.png differ diff --git a/pull313/_images/add_collab_05.png b/pull313/_images/add_collab_05.png new file mode 100644 index 00000000..eff7aad3 Binary files /dev/null and b/pull313/_images/add_collab_05.png differ diff --git a/pull313/_images/altair_syntax.png b/pull313/_images/altair_syntax.png new file mode 100644 index 00000000..55676cdb Binary files /dev/null and b/pull313/_images/altair_syntax.png differ diff --git a/pull313/_images/chapter_overview.jpeg b/pull313/_images/chapter_overview.jpeg new file mode 100644 index 00000000..f5fdcf3b Binary files /dev/null and b/pull313/_images/chapter_overview.jpeg differ diff --git a/pull313/_images/clone_01.png b/pull313/_images/clone_01.png new file mode 100644 index 00000000..01b4982e Binary files /dev/null and b/pull313/_images/clone_01.png differ diff --git a/pull313/_images/clone_02.png b/pull313/_images/clone_02.png new file mode 100644 index 00000000..5f43e241 Binary files /dev/null and b/pull313/_images/clone_02.png differ diff --git a/pull313/_images/clone_03.png b/pull313/_images/clone_03.png new file mode 100644 index 00000000..162454f7 Binary files /dev/null and b/pull313/_images/clone_03.png differ diff --git a/pull313/_images/clone_04.png b/pull313/_images/clone_04.png new file mode 100644 index 00000000..03274f6d Binary files /dev/null and b/pull313/_images/clone_04.png differ diff --git a/pull313/_images/code-cell-not-run.png b/pull313/_images/code-cell-not-run.png new file mode 100644 index 00000000..0d51635d Binary files /dev/null and b/pull313/_images/code-cell-not-run.png differ diff --git a/pull313/_images/code-cell-run.png b/pull313/_images/code-cell-run.png new file mode 100644 index 00000000..700ec6a1 Binary files /dev/null and b/pull313/_images/code-cell-run.png differ diff --git a/pull313/_images/completion_menu.png b/pull313/_images/completion_menu.png new file mode 100644 index 00000000..1de73d77 Binary files /dev/null and b/pull313/_images/completion_menu.png differ diff --git a/pull313/_images/convert-to-markdown-cell.png b/pull313/_images/convert-to-markdown-cell.png new file mode 100644 index 00000000..43a995c6 Binary files /dev/null and b/pull313/_images/convert-to-markdown-cell.png differ diff --git a/pull313/_images/craigslist_human.png b/pull313/_images/craigslist_human.png new file mode 100644 index 00000000..f944e7c5 Binary files /dev/null and b/pull313/_images/craigslist_human.png differ diff --git a/pull313/_images/create-new-code-cell.png b/pull313/_images/create-new-code-cell.png new file mode 100644 index 00000000..f6dc92cc Binary files /dev/null and b/pull313/_images/create-new-code-cell.png differ diff --git a/pull313/_images/create-new-file_01.png b/pull313/_images/create-new-file_01.png new file mode 100644 index 00000000..a47b35c4 Binary files /dev/null and b/pull313/_images/create-new-file_01.png differ diff --git a/pull313/_images/create-new-file_02.png b/pull313/_images/create-new-file_02.png new file mode 100644 index 00000000..cffa48ed Binary files /dev/null and b/pull313/_images/create-new-file_02.png differ diff --git a/pull313/_images/create-new-file_03.png b/pull313/_images/create-new-file_03.png new file mode 100644 index 00000000..ecf829ae Binary files /dev/null and b/pull313/_images/create-new-file_03.png differ diff --git a/pull313/_images/cv.png b/pull313/_images/cv.png new file mode 100644 index 00000000..51131ca5 Binary files /dev/null and b/pull313/_images/cv.png differ diff --git a/pull313/_images/data_frame_slides_cdn.004.jpeg b/pull313/_images/data_frame_slides_cdn.004.jpeg new file mode 100644 index 00000000..8675de1e Binary files /dev/null and b/pull313/_images/data_frame_slides_cdn.004.jpeg differ diff --git a/pull313/_images/docker-1.png b/pull313/_images/docker-1.png new file mode 100644 index 00000000..2fbf70df Binary files /dev/null and b/pull313/_images/docker-1.png differ diff --git a/pull313/_images/docker-2.png b/pull313/_images/docker-2.png new file mode 100644 index 00000000..96d628e7 Binary files /dev/null and b/pull313/_images/docker-2.png differ diff --git a/pull313/_images/docker-3.png b/pull313/_images/docker-3.png new file mode 100644 index 00000000..828c7163 Binary files /dev/null and b/pull313/_images/docker-3.png differ diff --git a/pull313/_images/docker-4.png b/pull313/_images/docker-4.png new file mode 100644 index 00000000..4f41fea0 Binary files /dev/null and b/pull313/_images/docker-4.png differ diff --git a/pull313/_images/ds-a-first-intro-graphic.jpg b/pull313/_images/ds-a-first-intro-graphic.jpg new file mode 100644 index 00000000..0b249383 Binary files /dev/null and b/pull313/_images/ds-a-first-intro-graphic.jpg differ diff --git a/pull313/_images/filesystem.jpeg b/pull313/_images/filesystem.jpeg new file mode 100644 index 00000000..4539e5bf Binary files /dev/null and b/pull313/_images/filesystem.jpeg differ diff --git a/pull313/_images/filter_rows.png b/pull313/_images/filter_rows.png new file mode 100644 index 00000000..5d15ca4f Binary files /dev/null and b/pull313/_images/filter_rows.png differ diff --git a/pull313/_images/filter_rows_and_columns.png b/pull313/_images/filter_rows_and_columns.png new file mode 100644 index 00000000..124a7dc4 Binary files /dev/null and b/pull313/_images/filter_rows_and_columns.png differ diff --git a/pull313/_images/generate-pat_01.png b/pull313/_images/generate-pat_01.png new file mode 100644 index 00000000..31cc94cc Binary files /dev/null and b/pull313/_images/generate-pat_01.png differ diff --git a/pull313/_images/generate-pat_02.png b/pull313/_images/generate-pat_02.png new file mode 100644 index 00000000..8148686a Binary files /dev/null and b/pull313/_images/generate-pat_02.png differ diff --git a/pull313/_images/generate-pat_03.png b/pull313/_images/generate-pat_03.png new file mode 100644 index 00000000..c887efcd Binary files /dev/null and b/pull313/_images/generate-pat_03.png differ diff --git a/pull313/_images/gentoo.jpg b/pull313/_images/gentoo.jpg new file mode 100644 index 00000000..8c505e42 Binary files /dev/null and b/pull313/_images/gentoo.jpg differ diff --git a/pull313/_images/git_add_01.png b/pull313/_images/git_add_01.png new file mode 100644 index 00000000..93486904 Binary files /dev/null and b/pull313/_images/git_add_01.png differ diff --git a/pull313/_images/git_add_02.png b/pull313/_images/git_add_02.png new file mode 100644 index 00000000..a75144ec Binary files /dev/null and b/pull313/_images/git_add_02.png differ diff --git a/pull313/_images/git_add_03.png b/pull313/_images/git_add_03.png new file mode 100644 index 00000000..8cd6b975 Binary files /dev/null and b/pull313/_images/git_add_03.png differ diff --git a/pull313/_images/git_commit_01.png b/pull313/_images/git_commit_01.png new file mode 100644 index 00000000..d02aa68f Binary files /dev/null and b/pull313/_images/git_commit_01.png differ diff --git a/pull313/_images/git_commit_03.png b/pull313/_images/git_commit_03.png new file mode 100644 index 00000000..d89f8e27 Binary files /dev/null and b/pull313/_images/git_commit_03.png differ diff --git a/pull313/_images/git_pull_00.png b/pull313/_images/git_pull_00.png new file mode 100644 index 00000000..44198790 Binary files /dev/null and b/pull313/_images/git_pull_00.png differ diff --git a/pull313/_images/git_pull_01.png b/pull313/_images/git_pull_01.png new file mode 100644 index 00000000..b859cb60 Binary files /dev/null and b/pull313/_images/git_pull_01.png differ diff --git a/pull313/_images/git_pull_02.png b/pull313/_images/git_pull_02.png new file mode 100644 index 00000000..850f3848 Binary files /dev/null and b/pull313/_images/git_pull_02.png differ diff --git a/pull313/_images/git_pull_03.png b/pull313/_images/git_pull_03.png new file mode 100644 index 00000000..e79d7951 Binary files /dev/null and b/pull313/_images/git_pull_03.png differ diff --git a/pull313/_images/git_pull_04.png b/pull313/_images/git_pull_04.png new file mode 100644 index 00000000..bc3f11f5 Binary files /dev/null and b/pull313/_images/git_pull_04.png differ diff --git a/pull313/_images/git_push_01.png b/pull313/_images/git_push_01.png new file mode 100644 index 00000000..d8695eb4 Binary files /dev/null and b/pull313/_images/git_push_01.png differ diff --git a/pull313/_images/git_push_02.png b/pull313/_images/git_push_02.png new file mode 100644 index 00000000..0656daa4 Binary files /dev/null and b/pull313/_images/git_push_02.png differ diff --git a/pull313/_images/git_push_03.png b/pull313/_images/git_push_03.png new file mode 100644 index 00000000..652acf1f Binary files /dev/null and b/pull313/_images/git_push_03.png differ diff --git a/pull313/_images/git_push_04.png b/pull313/_images/git_push_04.png new file mode 100644 index 00000000..0fc29598 Binary files /dev/null and b/pull313/_images/git_push_04.png differ diff --git a/pull313/_images/help_dialog.png b/pull313/_images/help_dialog.png new file mode 100644 index 00000000..c2197ab7 Binary files /dev/null and b/pull313/_images/help_dialog.png differ diff --git a/pull313/_images/help_read_csv.png b/pull313/_images/help_read_csv.png new file mode 100644 index 00000000..93c06460 Binary files /dev/null and b/pull313/_images/help_read_csv.png differ diff --git a/pull313/_images/intro-bootstrap.jpeg b/pull313/_images/intro-bootstrap.jpeg new file mode 100644 index 00000000..69b2aeec Binary files /dev/null and b/pull313/_images/intro-bootstrap.jpeg differ diff --git a/pull313/_images/issue_01.png b/pull313/_images/issue_01.png new file mode 100644 index 00000000..55b9a8cf Binary files /dev/null and b/pull313/_images/issue_01.png differ diff --git a/pull313/_images/issue_02.png b/pull313/_images/issue_02.png new file mode 100644 index 00000000..17c5d646 Binary files /dev/null and b/pull313/_images/issue_02.png differ diff --git a/pull313/_images/issue_03.png b/pull313/_images/issue_03.png new file mode 100644 index 00000000..12e8e26e Binary files /dev/null and b/pull313/_images/issue_03.png differ diff --git a/pull313/_images/issue_04.png b/pull313/_images/issue_04.png new file mode 100644 index 00000000..d16a7234 Binary files /dev/null and b/pull313/_images/issue_04.png differ diff --git a/pull313/_images/issue_06.png b/pull313/_images/issue_06.png new file mode 100644 index 00000000..8ed17c7f Binary files /dev/null and b/pull313/_images/issue_06.png differ diff --git a/pull313/_images/jlab-1.png b/pull313/_images/jlab-1.png new file mode 100644 index 00000000..f83bd42a Binary files /dev/null and b/pull313/_images/jlab-1.png differ diff --git a/pull313/_images/jlab-2.png b/pull313/_images/jlab-2.png new file mode 100644 index 00000000..7a074938 Binary files /dev/null and b/pull313/_images/jlab-2.png differ diff --git a/pull313/_images/jupyter.png b/pull313/_images/jupyter.png new file mode 100644 index 00000000..91a1ff85 Binary files /dev/null and b/pull313/_images/jupyter.png differ diff --git a/pull313/_images/launcher-annotated.png b/pull313/_images/launcher-annotated.png new file mode 100644 index 00000000..86e1565a Binary files /dev/null and b/pull313/_images/launcher-annotated.png differ diff --git a/pull313/_images/markdown-cell-not-run.png b/pull313/_images/markdown-cell-not-run.png new file mode 100644 index 00000000..4740a317 Binary files /dev/null and b/pull313/_images/markdown-cell-not-run.png differ diff --git a/pull313/_images/markdown-cell-run.png b/pull313/_images/markdown-cell-run.png new file mode 100644 index 00000000..001cba76 Binary files /dev/null and b/pull313/_images/markdown-cell-run.png differ diff --git a/pull313/_images/merge_conflict_01.png b/pull313/_images/merge_conflict_01.png new file mode 100644 index 00000000..8567d62d Binary files /dev/null and b/pull313/_images/merge_conflict_01.png differ diff --git a/pull313/_images/merge_conflict_03.png b/pull313/_images/merge_conflict_03.png new file mode 100644 index 00000000..980718af Binary files /dev/null and b/pull313/_images/merge_conflict_03.png differ diff --git a/pull313/_images/merge_conflict_04.png b/pull313/_images/merge_conflict_04.png new file mode 100644 index 00000000..456a5cca Binary files /dev/null and b/pull313/_images/merge_conflict_04.png differ diff --git a/pull313/_images/merge_conflict_05.png b/pull313/_images/merge_conflict_05.png new file mode 100644 index 00000000..dd0e4e4f Binary files /dev/null and b/pull313/_images/merge_conflict_05.png differ diff --git a/pull313/_images/merge_conflict_06.png b/pull313/_images/merge_conflict_06.png new file mode 100644 index 00000000..6cb0ec41 Binary files /dev/null and b/pull313/_images/merge_conflict_06.png differ diff --git a/pull313/_images/new_repository_01.png b/pull313/_images/new_repository_01.png new file mode 100644 index 00000000..21c241c2 Binary files /dev/null and b/pull313/_images/new_repository_01.png differ diff --git a/pull313/_images/new_repository_02.png b/pull313/_images/new_repository_02.png new file mode 100644 index 00000000..87684b7e Binary files /dev/null and b/pull313/_images/new_repository_02.png differ diff --git a/pull313/_images/new_repository_03.png b/pull313/_images/new_repository_03.png new file mode 100644 index 00000000..f07121bb Binary files /dev/null and b/pull313/_images/new_repository_03.png differ diff --git a/pull313/_images/open_data_w_editor_01.png b/pull313/_images/open_data_w_editor_01.png new file mode 100644 index 00000000..4197ca4d Binary files /dev/null and b/pull313/_images/open_data_w_editor_01.png differ diff --git a/pull313/_images/open_data_w_editor_02.png b/pull313/_images/open_data_w_editor_02.png new file mode 100644 index 00000000..1e038950 Binary files /dev/null and b/pull313/_images/open_data_w_editor_02.png differ diff --git a/pull313/_images/out-of-order-1.png b/pull313/_images/out-of-order-1.png new file mode 100644 index 00000000..e64fb825 Binary files /dev/null and b/pull313/_images/out-of-order-1.png differ diff --git a/pull313/_images/out-of-order-2.png b/pull313/_images/out-of-order-2.png new file mode 100644 index 00000000..9f1d0651 Binary files /dev/null and b/pull313/_images/out-of-order-2.png differ diff --git a/pull313/_images/out-of-order-3.png b/pull313/_images/out-of-order-3.png new file mode 100644 index 00000000..902d5f18 Binary files /dev/null and b/pull313/_images/out-of-order-3.png differ diff --git a/pull313/_images/pandas_dataframe_series-3.png b/pull313/_images/pandas_dataframe_series-3.png new file mode 100644 index 00000000..6a2eea54 Binary files /dev/null and b/pull313/_images/pandas_dataframe_series-3.png differ diff --git a/pull313/_images/pandas_dataframe_series.png b/pull313/_images/pandas_dataframe_series.png new file mode 100644 index 00000000..75ffc893 Binary files /dev/null and b/pull313/_images/pandas_dataframe_series.png differ diff --git a/pull313/_images/pandas_melt_args_labels.png b/pull313/_images/pandas_melt_args_labels.png new file mode 100644 index 00000000..a24eb439 Binary files /dev/null and b/pull313/_images/pandas_melt_args_labels.png differ diff --git a/pull313/_images/pandas_melt_wide-long.png b/pull313/_images/pandas_melt_wide-long.png new file mode 100644 index 00000000..03b30975 Binary files /dev/null and b/pull313/_images/pandas_melt_wide-long.png differ diff --git a/pull313/_images/pandas_pivot_args_labels.png b/pull313/_images/pandas_pivot_args_labels.png new file mode 100644 index 00000000..0f961aaf Binary files /dev/null and b/pull313/_images/pandas_pivot_args_labels.png differ diff --git a/pull313/_images/pandas_pivot_long-wide.png b/pull313/_images/pandas_pivot_long-wide.png new file mode 100644 index 00000000..faff307b Binary files /dev/null and b/pull313/_images/pandas_pivot_long-wide.png differ diff --git a/pull313/_images/pen-tool_01.png b/pull313/_images/pen-tool_01.png new file mode 100644 index 00000000..8c329704 Binary files /dev/null and b/pull313/_images/pen-tool_01.png differ diff --git a/pull313/_images/pen-tool_02.png b/pull313/_images/pen-tool_02.png new file mode 100644 index 00000000..6f8499de Binary files /dev/null and b/pull313/_images/pen-tool_02.png differ diff --git a/pull313/_images/pen-tool_03.png b/pull313/_images/pen-tool_03.png new file mode 100644 index 00000000..4f63cf23 Binary files /dev/null and b/pull313/_images/pen-tool_03.png differ diff --git a/pull313/_images/pivot_functions.001.jpeg b/pull313/_images/pivot_functions.001.jpeg new file mode 100644 index 00000000..fc5123f3 Binary files /dev/null and b/pull313/_images/pivot_functions.001.jpeg differ diff --git a/pull313/_images/pivot_functions.002.jpeg b/pull313/_images/pivot_functions.002.jpeg new file mode 100644 index 00000000..961c0813 Binary files /dev/null and b/pull313/_images/pivot_functions.002.jpeg differ diff --git a/pull313/_images/plot-sketches-1.png b/pull313/_images/plot-sketches-1.png new file mode 100644 index 00000000..47bb6b0c Binary files /dev/null and b/pull313/_images/plot-sketches-1.png differ diff --git a/pull313/_images/png-vs-svg.png b/pull313/_images/png-vs-svg.png new file mode 100644 index 00000000..fd8be581 Binary files /dev/null and b/pull313/_images/png-vs-svg.png differ diff --git a/pull313/_images/population_vs_sample.png b/pull313/_images/population_vs_sample.png new file mode 100644 index 00000000..86708d5c Binary files /dev/null and b/pull313/_images/population_vs_sample.png differ diff --git a/pull313/_images/read_csv_function.png b/pull313/_images/read_csv_function.png new file mode 100644 index 00000000..4593eaa9 Binary files /dev/null and b/pull313/_images/read_csv_function.png differ diff --git a/pull313/_images/restart-kernel-run-all.png b/pull313/_images/restart-kernel-run-all.png new file mode 100644 index 00000000..03abe2da Binary files /dev/null and b/pull313/_images/restart-kernel-run-all.png differ diff --git a/pull313/_images/select_columns.png b/pull313/_images/select_columns.png new file mode 100644 index 00000000..f316180d Binary files /dev/null and b/pull313/_images/select_columns.png differ diff --git a/pull313/_images/sg1.png b/pull313/_images/sg1.png new file mode 100644 index 00000000..71479085 Binary files /dev/null and b/pull313/_images/sg1.png differ diff --git a/pull313/_images/sg2.png b/pull313/_images/sg2.png new file mode 100644 index 00000000..a3d49b5e Binary files /dev/null and b/pull313/_images/sg2.png differ diff --git a/pull313/_images/sg3.png b/pull313/_images/sg3.png new file mode 100644 index 00000000..b87cfc28 Binary files /dev/null and b/pull313/_images/sg3.png differ diff --git a/pull313/_images/sg4.png b/pull313/_images/sg4.png new file mode 100644 index 00000000..afb982b8 Binary files /dev/null and b/pull313/_images/sg4.png differ diff --git a/pull313/_images/sort_values.png b/pull313/_images/sort_values.png new file mode 100644 index 00000000..770ce22d Binary files /dev/null and b/pull313/_images/sort_values.png differ diff --git a/pull313/_images/spreadsheet_vs_df.png b/pull313/_images/spreadsheet_vs_df.png new file mode 100644 index 00000000..2887c05b Binary files /dev/null and b/pull313/_images/spreadsheet_vs_df.png differ diff --git a/pull313/_images/str-split_args_labels.png b/pull313/_images/str-split_args_labels.png new file mode 100644 index 00000000..aa7531ca Binary files /dev/null and b/pull313/_images/str-split_args_labels.png differ diff --git a/pull313/_images/summarize.001.jpeg b/pull313/_images/summarize.001.jpeg new file mode 100644 index 00000000..7960e61e Binary files /dev/null and b/pull313/_images/summarize.001.jpeg differ diff --git a/pull313/_images/summarize.002.jpeg b/pull313/_images/summarize.002.jpeg new file mode 100644 index 00000000..97995520 Binary files /dev/null and b/pull313/_images/summarize.002.jpeg differ diff --git a/pull313/_images/summarize.004.jpeg b/pull313/_images/summarize.004.jpeg new file mode 100644 index 00000000..476ad698 Binary files /dev/null and b/pull313/_images/summarize.004.jpeg differ diff --git a/pull313/_images/summarize.005.jpeg b/pull313/_images/summarize.005.jpeg new file mode 100644 index 00000000..d1a4f710 Binary files /dev/null and b/pull313/_images/summarize.005.jpeg differ diff --git a/pull313/_images/tidy_data.001-cropped.jpeg b/pull313/_images/tidy_data.001-cropped.jpeg new file mode 100644 index 00000000..edae0dd4 Binary files /dev/null and b/pull313/_images/tidy_data.001-cropped.jpeg differ diff --git a/pull313/_images/train-test-overview.jpeg b/pull313/_images/train-test-overview.jpeg new file mode 100644 index 00000000..8c295672 Binary files /dev/null and b/pull313/_images/train-test-overview.jpeg differ diff --git a/pull313/_images/training_test.jpeg b/pull313/_images/training_test.jpeg new file mode 100644 index 00000000..16f9bd35 Binary files /dev/null and b/pull313/_images/training_test.jpeg differ diff --git a/pull313/_images/ubuntu-docker.png b/pull313/_images/ubuntu-docker.png new file mode 100644 index 00000000..2db3e674 Binary files /dev/null and b/pull313/_images/ubuntu-docker.png differ diff --git a/pull313/_images/upload-files_01.png b/pull313/_images/upload-files_01.png new file mode 100644 index 00000000..2dccf9b3 Binary files /dev/null and b/pull313/_images/upload-files_01.png differ diff --git a/pull313/_images/upload-files_02.png b/pull313/_images/upload-files_02.png new file mode 100644 index 00000000..8386d22a Binary files /dev/null and b/pull313/_images/upload-files_02.png differ diff --git a/pull313/_images/vc-ba2-add.png b/pull313/_images/vc-ba2-add.png new file mode 100644 index 00000000..41639b0b Binary files /dev/null and b/pull313/_images/vc-ba2-add.png differ diff --git a/pull313/_images/vc-ba3-commit.png b/pull313/_images/vc-ba3-commit.png new file mode 100644 index 00000000..c56f66cb Binary files /dev/null and b/pull313/_images/vc-ba3-commit.png differ diff --git a/pull313/_images/vc1-no-changes.png b/pull313/_images/vc1-no-changes.png new file mode 100644 index 00000000..d835de6b Binary files /dev/null and b/pull313/_images/vc1-no-changes.png differ diff --git a/pull313/_images/vc2-changes.png b/pull313/_images/vc2-changes.png new file mode 100644 index 00000000..37a7db62 Binary files /dev/null and b/pull313/_images/vc2-changes.png differ diff --git a/pull313/_images/vc5-push.png b/pull313/_images/vc5-push.png new file mode 100644 index 00000000..911c34f0 Binary files /dev/null and b/pull313/_images/vc5-push.png differ diff --git a/pull313/_images/vc6-remote-changes.png b/pull313/_images/vc6-remote-changes.png new file mode 100644 index 00000000..3cfd7200 Binary files /dev/null and b/pull313/_images/vc6-remote-changes.png differ diff --git a/pull313/_images/vc7-pull.png b/pull313/_images/vc7-pull.png new file mode 100644 index 00000000..a7045bb2 Binary files /dev/null and b/pull313/_images/vc7-pull.png differ diff --git a/pull313/_sources/acknowledgements.md b/pull313/_sources/acknowledgements.md new file mode 100644 index 00000000..233e18fc --- /dev/null +++ b/pull313/_sources/acknowledgements.md @@ -0,0 +1,64 @@ +--- +jupytext: + cell_metadata_filter: -all + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.8 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +# Acknowledgments + +We'd like to thank everyone that has contributed to the development of +[*Data Science: A First Introduction*](https://datasciencebook.ca). +This is an open source textbook that began as a collection of course readings +for DSCI 100, a new introductory data science course +at the University of British Columbia (UBC). +Several faculty members in the UBC Department of Statistics +were pivotal in shaping the direction of that course, +and as such, contributed greatly to the broad structure and +list of topics in this book. We would especially like to thank Matías +Salibían-Barrera for his mentorship during the initial development and roll-out +of both DSCI 100 and this book. His door was always open when +we needed to chat about how to best introduce and teach data science to our first-year students. +We would also like to thank Gabriela Cohen Freue for her DSCI 561 (Regression I) teaching materials +from the UBC Master of Data Science program, as some of our linear regression figures were inspired from these. + +We would also like to thank all those who contributed to the process of +publishing this book. In particular, we would like to thank all of our reviewers for their feedback and suggestions: +Rohan Alexander, Isabella Ghement, Virgilio Gómez Rubio, Albert Kim, Adam Loy, Maria Prokofieva, Emily Riederer, and Greg Wilson. +The book was improved substantially by their insights. +We would like to give special thanks to Jim Zidek +for his support and encouragement throughout the process, and to +Roger Peng for graciously offering to write the Foreword. + +Finally, we owe a debt of gratitude to all of the students of DSCI 100 over the past +few years. They provided invaluable feedback on the book and worksheets; +they found bugs for us (and stood by very patiently in class while +we frantically fixed those bugs); and they brought a level of enthusiasm to the class +that sustained us during the hard work of creating a new course and writing a textbook. +Our interactions with them taught us how to teach data science, and that learning +is reflected in the content of this book. + + +## Acknowledgments for the Python Edition + +We'd like to thank everyone that has contributed to the development of +[*Data Science: A First Introduction (Python Edition)*](https://python.datasciencebook.ca). +This is an open source Python translation of the original [*Data Science: A First Introduction*](https://datasciencebook.ca) +book, which focused on the R programming language. Both of these books are +used to teach DSCI 100 at the University of British Columbia (UBC). +We would like to give special thanks to Navya Dahiya and Gloria Ye +for completing the first round of translation of the R material to Python, +and to Philip Austin for his leadership and guidance throughout the translation process. +We also gratefully acknowledge the UBC Open Educational Resources Fund +and the UBC Department of Statistics for supporting the translation of +the original R textbook and exercises to the python programming language. + + diff --git a/pull313/_sources/authors.md b/pull313/_sources/authors.md new file mode 100644 index 00000000..e365683c --- /dev/null +++ b/pull313/_sources/authors.md @@ -0,0 +1,73 @@ +--- +jupytext: + cell_metadata_filter: -all + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.8 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +# About the authors + +The original version of this textbook was developed by Tiffany Timbers, Trevor +Campbell, and Melissa Lee for the R programming language. The content of the R +textbook was adapted to Python by Trevor Campbell, Joel Ostblom, and Lindsey +Heagy. + +**[Trevor Campbell](https://trevorcampbell.me/)** is an Associate Professor in the Department of Statistics at +the University of British Columbia. His research focuses on automated, scalable +Bayesian inference algorithms, Bayesian nonparametrics, streaming data, and +Bayesian theory. He was previously a postdoctoral associate advised by Tamara +Broderick in the Computer Science and Artificial Intelligence Laboratory +(CSAIL) and Institute for Data, Systems, and Society (IDSS) at MIT, a Ph.D. +candidate under Jonathan How in the Laboratory for Information and Decision +Systems (LIDS) at MIT, and before that he was in the Engineering Science +program at the University of Toronto. + ++++ + +**[Tiffany Timbers](https://www.tiffanytimbers.com/)** is an Associate Professor of Teaching in the Department of +Statistics and Co-Director for the Master of Data Science program (Vancouver +Option) at the University of British Columbia. In these roles she teaches and +develops curriculum around the responsible application of Data Science to solve +real-world problems. One of her favorite courses she teaches is a graduate +course on collaborative software development, which focuses on teaching how to +create R and Python packages using modern tools and workflows. ++++ + +**[Melissa Lee](https://www.stat.ubc.ca/users/melissa-lee)** is an Assistant Professor of Teaching in the Department of +Statistics at the University of British Columbia. She teaches and develops +curriculum for undergraduate statistics and data science courses. Her work +focuses on student-centered approaches to teaching, developing and assessing +open educational resources, and promoting equity, diversity, and inclusion +initiatives. + ++++ + +**[Lindsey Heagy](https://lindseyjh.ca/)** is an Assistant Professor in the Department of Earth, Ocean, and Atmospheric +Sciences and director of the Geophysical Inversion Facility at the University of British Columbia. +Her research combines computational methods in numerical simulations, inversions, and machine +learning to answer questions about the subsurface of the Earth. Primary applications include +mineral exploration, carbon sequestration, groundwater and environmental studies. She +completed her BSc at the University of Alberta, her PhD at the University of British Columbia, +and held a Postdoctoral research position at the University of California Berkeley prior to +starting her current position at UBC. + ++++ + +**[Joel Ostblom](https://joelostblom.com/)** is an Assistant Professor of Teaching in the Department of +Statistics at the University of British Columbia. +During his PhD, Joel developed a passion for data science and reproducibility +through the development of quantitative image analysis pipelines for studying +stem cell and developmental biology. He has since co-created or lead the +development of several courses and workshops at the University of Toronto and +is now an assistant professor of teaching in the statistics department at the +University of British Columbia. Joel cares deeply about spreading data literacy +and excitement over programmatic data analysis, which is reflected in his +contributions to open source projects and data science learning resources. diff --git a/pull313/_sources/classification1.md b/pull313/_sources/classification1.md new file mode 100644 index 00000000..38b14e42 --- /dev/null +++ b/pull313/_sources/classification1.md @@ -0,0 +1,1969 @@ +--- +jupytext: + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.7 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +```{code-cell} ipython3 +:tags: [remove-cell] +from chapter_preamble import * +from IPython.display import HTML +from sklearn.metrics.pairwise import euclidean_distances +import numpy as np +import plotly.express as px +import plotly.graph_objects as go +``` + +(classification1)= +# Classification I: training & predicting + +## Overview +In previous chapters, we focused solely on descriptive and exploratory +data analysis questions. +This chapter and the next together serve as our first +foray into answering *predictive* questions about data. In particular, we will +focus on *classification*, i.e., using one or more +variables to predict the value of a categorical variable of interest. This chapter +will cover the basics of classification, how to preprocess data to make it +suitable for use in a classifier, and how to use our observed data to make +predictions. The next chapter will focus on how to evaluate how accurate the +predictions from our classifier are, as well as how to improve our classifier +(where possible) to maximize its accuracy. + +## Chapter learning objectives + +By the end of the chapter, readers will be able to do the following: + +- Recognize situations where a classifier would be appropriate for making predictions. +- Describe what a training data set is and how it is used in classification. +- Interpret the output of a classifier. +- Compute, by hand, the straight-line (Euclidean) distance between points on a graph when there are two predictor variables. +- Explain the $K$-nearest neighbor classification algorithm. +- Perform $K$-nearest neighbor classification in Python using `scikit-learn`. +- Use `StandardScaler` and `make_column_transformer` to preprocess data to be centered and scaled. +- Use `sample` to preprocess data to be balanced. +- Combine preprocessing and model training using `make_pipeline`. + ++++ + +## The classification problem + +```{index} predictive question, classification, class, categorical variable +``` + +```{index} see: feature ; predictor +``` + +In many situations, we want to make predictions based on the current situation +as well as past experiences. For instance, a doctor may want to diagnose a +patient as either diseased or healthy based on their symptoms and the doctor's +past experience with patients; an email provider might want to tag a given +email as "spam" or "not spam" based on the email's text and past email text data; +or a credit card company may want to predict whether a purchase is fraudulent based +on the current purchase item, amount, and location as well as past purchases. +These tasks are all examples of **classification**, i.e., predicting a +categorical class (sometimes called a *label*) for an observation given its +other variables (sometimes called *features*). + +```{index} training set +``` + +Generally, a classifier assigns an observation without a known class (e.g., a new patient) +to a class (e.g., diseased or healthy) on the basis of how similar it is to other observations +for which we do know the class (e.g., previous patients with known diseases and +symptoms). These observations with known classes that we use as a basis for +prediction are called a **training set**; this name comes from the fact that +we use these data to train, or teach, our classifier. Once taught, we can use +the classifier to make predictions on new data for which we do not know the class. + +```{index} K-nearest neighbors, classification; binary +``` + +There are many possible methods that we could use to predict +a categorical class/label for an observation. In this book, we will +focus on the widely used **$K$-nearest neighbors** algorithm {cite:p}`knnfix,knncover`. +In your future studies, you might encounter decision trees, support vector machines (SVMs), +logistic regression, neural networks, and more; see the additional resources +section at the end of the next chapter for where to begin learning more about +these other methods. It is also worth mentioning that there are many +variations on the basic classification problem. For example, +we focus on the setting of **binary classification** where only two +classes are involved (e.g., a diagnosis of either healthy or diseased), but you may +also run into multiclass classification problems with more than two +categories (e.g., a diagnosis of healthy, bronchitis, pneumonia, or a common cold). + +## Exploring a data set + +```{index} breast cancer, question; classification +``` + +In this chapter and the next, we will study a data set of +[digitized breast cancer image features](https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29), +created by Dr. William H. Wolberg, W. Nick Street, and Olvi L. Mangasarian {cite:p}`streetbreastcancer`. +Each row in the data set represents an +image of a tumor sample, including the diagnosis (benign or malignant) and +several other measurements (nucleus texture, perimeter, area, and more). +Diagnosis for each image was conducted by physicians. + +As with all data analyses, we first need to formulate a precise question that +we want to answer. Here, the question is *predictive*: can +we use the tumor +image measurements available to us to predict whether a future tumor image +(with unknown diagnosis) shows a benign or malignant tumor? Answering this +question is important because traditional, non-data-driven methods for tumor +diagnosis are quite subjective and dependent upon how skilled and experienced +the diagnosing physician is. Furthermore, benign tumors are not normally +dangerous; the cells stay in the same place, and the tumor stops growing before +it gets very large. By contrast, in malignant tumors, the cells invade the +surrounding tissue and spread into nearby organs, where they can cause serious +damage {cite:p}`stanfordhealthcare`. +Thus, it is important to quickly and accurately diagnose the tumor type to +guide patient treatment. + ++++ + +### Loading the cancer data + +Our first step is to load, wrangle, and explore the data using visualizations +in order to better understand the data we are working with. We start by +loading the `pandas` and `altair` packages needed for our analysis. + +```{code-cell} ipython3 +import pandas as pd +import altair as alt +``` + +In this case, the file containing the breast cancer data set is a `.csv` +file with headers. We'll use the `read_csv` function with no additional +arguments, and then inspect its contents: + +```{index} read function; read\_csv +``` + +```{code-cell} ipython3 +:tags: ["output_scroll"] +cancer = pd.read_csv("data/wdbc.csv") +cancer +``` + +### Describing the variables in the cancer data set + +Breast tumors can be diagnosed by performing a *biopsy*, a process where +tissue is removed from the body and examined for the presence of disease. +Traditionally these procedures were quite invasive; modern methods such as fine +needle aspiration, used to collect the present data set, extract only a small +amount of tissue and are less invasive. Based on a digital image of each breast +tissue sample collected for this data set, ten different variables were measured +for each cell nucleus in the image (items 3–12 of the list of variables below), and then the mean + for each variable across the nuclei was recorded. As part of the +data preparation, these values have been *standardized (centered and scaled)*; we will discuss what this +means and why we do it later in this chapter. Each image additionally was given +a unique ID and a diagnosis by a physician. Therefore, the +total set of variables per image in this data set is: + +1. ID: identification number +2. Class: the diagnosis (M = malignant or B = benign) +3. Radius: the mean of distances from center to points on the perimeter +4. Texture: the standard deviation of gray-scale values +5. Perimeter: the length of the surrounding contour +6. Area: the area inside the contour +7. Smoothness: the local variation in radius lengths +8. Compactness: the ratio of squared perimeter and area +9. Concavity: severity of concave portions of the contour +10. Concave Points: the number of concave portions of the contour +11. Symmetry: how similar the nucleus is when mirrored +12. Fractal Dimension: a measurement of how "rough" the perimeter is + ++++ + +```{index} info +``` + +Below we use the `info` method to preview the data frame. This method can +make it easier to inspect the data when we have a lot of columns: +it prints only the column names down the page (instead of across), +as well as their data types and the number of non-missing entries. + +```{code-cell} ipython3 +cancer.info() +``` + +```{index} unique +``` + +From the summary of the data above, we can see that `Class` is of type `object`. +We can use the `unique` method on the `Class` column to see all unique values +present in that column. We see that there are two diagnoses: +benign, represented by `"B"`, and malignant, represented by `"M"`. + +```{code-cell} ipython3 +cancer["Class"].unique() +``` + +We will improve the readability of our analysis +by renaming `"M"` to `"Malignant"` and `"B"` to `"Benign"` using the `replace` +method. The `replace` method takes one argument: a dictionary that maps +previous values to desired new values. +We will verify the result using the `unique` method. + +```{index} replace +``` + +```{code-cell} ipython3 +cancer["Class"] = cancer["Class"].replace({ + "M" : "Malignant", + "B" : "Benign" +}) + +cancer["Class"].unique() +``` + +### Exploring the cancer data + +```{index} groupby, count +``` + +```{code-cell} ipython3 +:tags: [remove-cell] +glue("benign_count", "{:0.0f}".format(cancer["Class"].value_counts()["Benign"])) +glue("benign_pct", "{:0.0f}".format(100*cancer["Class"].value_counts(normalize=True)["Benign"])) +glue("malignant_count", "{:0.0f}".format(cancer["Class"].value_counts()["Malignant"])) +glue("malignant_pct", "{:0.0f}".format(100*cancer["Class"].value_counts(normalize=True)["Malignant"])) +``` + +Before we start doing any modeling, let's explore our data set. Below we use +the `groupby` and `count` methods to find the number and percentage +of benign and malignant tumor observations in our data set. When paired with +`groupby`, `count` counts the number of observations for each value of the `Class` +variable. Then we calculate the percentage in each group by dividing by the total +number of observations and multiplying by 100. +The total number of observations equals the number of rows in the data frame, +which we can access via the `shape` attribute of the data frame +(`shape[0]` is the number of rows and `shape[1]` is the number of columns). +We have +{glue:text}`benign_count` ({glue:text}`benign_pct`\%) benign and +{glue:text}`malignant_count` ({glue:text}`malignant_pct`\%) malignant +tumor observations. + +```{code-cell} ipython3 +100 * cancer.groupby("Class").size() / cancer.shape[0] +``` + +```{index} value_counts +``` + +The `pandas` package also has a more convenient specialized `value_counts` method for +counting the number of occurrences of each value in a column. If we pass no arguments +to the method, it outputs a series containing the number of occurences +of each value. If we instead pass the argument `normalize=True`, it instead prints the fraction +of occurrences of each value. + +```{code-cell} ipython3 +cancer["Class"].value_counts() +``` + +```{code-cell} ipython3 +cancer["Class"].value_counts(normalize=True) +``` + +```{index} visualization; scatter +``` + +Next, let's draw a colored scatter plot to visualize the relationship between the +perimeter and concavity variables. Recall that the default palette in `altair` +is colorblind-friendly, so we can stick with that here. + +```{code-cell} ipython3 +perim_concav = alt.Chart(cancer).mark_circle().encode( + x=alt.X("Perimeter").title("Perimeter (standardized)"), + y=alt.Y("Concavity").title("Concavity (standardized)"), + color=alt.Color("Class").title("Diagnosis") +) +perim_concav +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:05-scatter +:figclass: caption-hack + +Scatter plot of concavity versus perimeter colored by diagnosis label. +``` + ++++ + +In {numref}`fig:05-scatter`, we can see that malignant observations typically fall in +the upper right-hand corner of the plot area. By contrast, benign +observations typically fall in the lower left-hand corner of the plot. In other words, +benign observations tend to have lower concavity and perimeter values, and malignant +ones tend to have larger values. Suppose we +obtain a new observation not in the current data set that has all the variables +measured *except* the label (i.e., an image without the physician's diagnosis +for the tumor class). We could compute the standardized perimeter and concavity values, +resulting in values of, say, 1 and 1. Could we use this information to classify +that observation as benign or malignant? Based on the scatter plot, how might +you classify that new observation? If the standardized concavity and perimeter +values are 1 and 1 respectively, the point would lie in the middle of the +orange cloud of malignant points and thus we could probably classify it as +malignant. Based on our visualization, it seems like +it may be possible to make accurate predictions of the `Class` variable (i.e., a diagnosis) for +tumor images with unknown diagnoses. + ++++ + +## Classification with $K$-nearest neighbors + +```{code-cell} ipython3 +:tags: [remove-cell] + +new_point = [2, 4] +glue("new_point_1_0", "{:.1f}".format(new_point[0])) +glue("new_point_1_1", "{:.1f}".format(new_point[1])) +attrs = ["Perimeter", "Concavity"] +points_df = pd.DataFrame( + {"Perimeter": new_point[0], "Concavity": new_point[1], "Class": ["Unknown"]} +) +perim_concav_with_new_point_df = pd.concat((cancer, points_df), ignore_index=True) +# Find the euclidean distances from the new point to each of the points +# in the orginal data set +my_distances = euclidean_distances(perim_concav_with_new_point_df[attrs])[ + len(cancer) +][:-1] +``` + +```{index} K-nearest neighbors; classification +``` + +In order to actually make predictions for new observations in practice, we +will need a classification algorithm. +In this book, we will use the $K$-nearest neighbors classification algorithm. +To predict the label of a new observation (here, classify it as either benign +or malignant), the $K$-nearest neighbors classifier generally finds the $K$ +"nearest" or "most similar" observations in our training set, and then uses +their diagnoses to make a prediction for the new observation's diagnosis. $K$ +is a number that we must choose in advance; for now, we will assume that someone has chosen +$K$ for us. We will cover how to choose $K$ ourselves in the next chapter. + +To illustrate the concept of $K$-nearest neighbors classification, we +will walk through an example. Suppose we have a +new observation, with standardized perimeter +of {glue:text}`new_point_1_0` and standardized concavity +of {glue:text}`new_point_1_1`, whose +diagnosis "Class" is unknown. This new observation is +depicted by the red, diamond point in {numref}`fig:05-knn-2`. + +```{code-cell} ipython3 +:tags: [remove-cell] + +perim_concav_with_new_point = ( + alt.Chart(perim_concav_with_new_point_df) + .mark_point(opacity=0.6, filled=True, size=40) + .encode( + x=alt.X("Perimeter").title("Perimeter (standardized)"), + y=alt.Y("Concavity").title("Concavity (standardized)"), + color=alt.Color("Class").title("Diagnosis"), + shape=alt.Shape("Class").scale(range=["circle", "circle", "diamond"]), + size=alt.condition("datum.Class == 'Unknown'", alt.value(100), alt.value(30)), + stroke=alt.condition("datum.Class == 'Unknown'", alt.value("black"), alt.value(None)), + ) +) +glue('fig:05-knn-2', perim_concav_with_new_point, display=True) +``` + +:::{glue:figure} fig:05-knn-2 +:name: fig:05-knn-2 + +Scatter plot of concavity versus perimeter with new observation represented as a red diamond. +::: + +```{code-cell} ipython3 +:tags: [remove-cell] + +near_neighbor_df = pd.concat([ + cancer.loc[[np.argmin(my_distances)], attrs], + perim_concav_with_new_point_df.loc[[cancer.shape[0]], attrs], +]) +glue("1-neighbor_per", "{:.1f}".format(near_neighbor_df.iloc[0, :]["Perimeter"])) +glue("1-neighbor_con", "{:.1f}".format(near_neighbor_df.iloc[0, :]["Concavity"])) +``` + +{numref}`fig:05-knn-3` shows that the nearest point to this new observation is +**malignant** and located at the coordinates ({glue:text}`1-neighbor_per`, +{glue:text}`1-neighbor_con`). The idea here is that if a point is close to another +in the scatter plot, then the perimeter and concavity values are similar, +and so we may expect that they would have the same diagnosis. + +```{code-cell} ipython3 +:tags: [remove-cell] + +line = ( + alt.Chart(near_neighbor_df) + .mark_line() + .encode(x="Perimeter", y="Concavity", color=alt.value("black")) +) + +glue('fig:05-knn-3', (perim_concav_with_new_point + line), display=True) +``` + +:::{glue:figure} fig:05-knn-3 +:name: fig:05-knn-3 + +Scatter plot of concavity versus perimeter. The new observation is represented +as a red diamond with a line to the one nearest neighbor, which has a malignant +label. +::: + +```{code-cell} ipython3 +:tags: [remove-cell] + +new_point = [0.2, 3.3] +attrs = ["Perimeter", "Concavity"] +points_df2 = pd.DataFrame( + {"Perimeter": new_point[0], "Concavity": new_point[1], "Class": ["Unknown"]} +) +perim_concav_with_new_point_df2 = pd.concat((cancer, points_df2), ignore_index=True) +# Find the euclidean distances from the new point to each of the points +# in the orginal data set +my_distances2 = euclidean_distances(perim_concav_with_new_point_df2[attrs])[ + len(cancer) +][:-1] +glue("new_point_2_0", "{:.1f}".format(new_point[0])) +glue("new_point_2_1", "{:.1f}".format(new_point[1])) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +perim_concav_with_new_point2 = ( + alt.Chart( + perim_concav_with_new_point_df2, + ) + .mark_point(opacity=0.6, filled=True, size=40) + .encode( + x=alt.X("Perimeter", title="Perimeter (standardized)"), + y=alt.Y("Concavity", title="Concavity (standardized)"), + color=alt.Color( + "Class", + title="Diagnosis", + ), + shape=alt.Shape( + "Class", scale=alt.Scale(range=["circle", "circle", "diamond"]) + ), + size=alt.condition("datum.Class == 'Unknown'", alt.value(80), alt.value(30)), + stroke=alt.condition("datum.Class == 'Unknown'", alt.value("black"), alt.value(None)), + ) +) + +near_neighbor_df2 = pd.concat([ + cancer.loc[[np.argmin(my_distances2)], attrs], + perim_concav_with_new_point_df2.loc[[cancer.shape[0]], attrs], +]) +line2 = alt.Chart(near_neighbor_df2).mark_line().encode( + x="Perimeter", + y="Concavity", + color=alt.value("black") +) + +glue("2-neighbor_per", "{:.1f}".format(near_neighbor_df2.iloc[0, :]["Perimeter"])) +glue("2-neighbor_con", "{:.1f}".format(near_neighbor_df2.iloc[0, :]["Concavity"])) +glue('fig:05-knn-4', (perim_concav_with_new_point2 + line2), display=True) +``` + +Suppose we have another new observation with standardized perimeter +{glue:text}`new_point_2_0` and concavity of {glue:text}`new_point_2_1`. Looking at the +scatter plot in {numref}`fig:05-knn-4`, how would you classify this red, +diamond observation? The nearest neighbor to this new point is a +**benign** observation at ({glue:text}`2-neighbor_per`, {glue:text}`2-neighbor_con`). +Does this seem like the right prediction to make for this observation? Probably +not, if you consider the other nearby points. + ++++ + +:::{glue:figure} fig:05-knn-4 +:name: fig:05-knn-4 + +Scatter plot of concavity versus perimeter. The new observation is represented +as a red diamond with a line to the one nearest neighbor, which has a benign +label. +::: + +```{code-cell} ipython3 +:tags: [remove-cell] + +# The index of 3 rows that has smallest distance to the new point +min_3_idx = np.argpartition(my_distances2, 3)[:3] +near_neighbor_df3 = pd.concat([ + cancer.loc[[min_3_idx[1]], attrs], + perim_concav_with_new_point_df2.loc[[cancer.shape[0]], attrs], +]) +near_neighbor_df4 = pd.concat([ + cancer.loc[[min_3_idx[2]], attrs], + perim_concav_with_new_point_df2.loc[[cancer.shape[0]], attrs], +]) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +line3 = alt.Chart(near_neighbor_df3).mark_line().encode( + x="Perimeter", + y="Concavity", + color=alt.value("black") +) +line4 = alt.Chart(near_neighbor_df4).mark_line().encode( + x="Perimeter", + y="Concavity", + color=alt.value("black") +) +glue("fig:05-knn-5", (perim_concav_with_new_point2 + line2 + line3 + line4), display=True) +``` + +To improve the prediction we can consider several +neighboring points, say $K = 3$, that are closest to the new observation +to predict its diagnosis class. Among those 3 closest points, we use the +*majority class* as our prediction for the new observation. As shown in {numref}`fig:05-knn-5`, we +see that the diagnoses of 2 of the 3 nearest neighbors to our new observation +are malignant. Therefore we take majority vote and classify our new red, diamond +observation as malignant. + ++++ + +:::{glue:figure} fig:05-knn-5 +:name: fig:05-knn-5 + +Scatter plot of concavity versus perimeter with three nearest neighbors. +::: + ++++ + +Here we chose the $K=3$ nearest observations, but there is nothing special +about $K=3$. We could have used $K=4, 5$ or more (though we may want to choose +an odd number to avoid ties). We will discuss more about choosing $K$ in the +next chapter. + ++++ + +### Distance between points + +```{index} distance; K-nearest neighbors, straight line; distance +``` + +We decide which points are the $K$ "nearest" to our new observation using the +*straight-line distance* (we will often just refer to this as *distance*). +Suppose we have two observations $a$ and $b$, each having two predictor +variables, $x$ and $y$. Denote $a_x$ and $a_y$ to be the values of variables +$x$ and $y$ for observation $a$; $b_x$ and $b_y$ have similar definitions for +observation $b$. Then the straight-line distance between observation $a$ and +$b$ on the x-y plane can be computed using the following formula: + +$$\mathrm{Distance} = \sqrt{(a_x -b_x)^2 + (a_y - b_y)^2}$$ + ++++ + +To find the $K$ nearest neighbors to our new observation, we compute the distance +from that new observation to each observation in our training data, and select the $K$ observations corresponding to the +$K$ *smallest* distance values. For example, suppose we want to use $K=5$ neighbors to classify a new +observation with perimeter {glue:text}`3-new_point_0` and +concavity {glue:text}`3-new_point_1`, shown as a red diamond in {numref}`fig:05-multiknn-1`. Let's calculate the distances +between our new point and each of the observations in the training set to find +the $K=5$ neighbors that are nearest to our new point. +You will see in the code below, we compute the straight-line +distance using the formula above: we square the differences between the two observations' perimeter +and concavity coordinates, add the squared differences, and then take the square root. +In order to find the $K=5$ nearest neighbors, we will use the `nsmallest` function from `pandas`. + +```{index} nsmallest +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +new_point = [0, 3.5] +attrs = ["Perimeter", "Concavity"] +points_df3 = pd.DataFrame( + {"Perimeter": new_point[0], "Concavity": new_point[1], "Class": ["Unknown"]} +) +perim_concav_with_new_point_df3 = pd.concat((cancer, points_df3), ignore_index=True) +perim_concav_with_new_point3 = ( + alt.Chart( + perim_concav_with_new_point_df3, + ) + .mark_point(opacity=0.6, filled=True, size=40) + .encode( + x=alt.X("Perimeter", title="Perimeter (standardized)"), + y=alt.Y("Concavity", title="Concavity (standardized)"), + color=alt.Color( + "Class", + title="Diagnosis", + ), + shape=alt.Shape( + "Class", scale=alt.Scale(range=["circle", "circle", "diamond"]) + ), + size=alt.condition("datum.Class == 'Unknown'", alt.value(80), alt.value(30)), + stroke=alt.condition("datum.Class == 'Unknown'", alt.value("black"), alt.value(None)), + ) +) + +glue("3-new_point_0", "{:.1f}".format(new_point[0])) +glue("3-new_point_1", "{:.1f}".format(new_point[1])) +glue("fig:05-multiknn-1", perim_concav_with_new_point3) +``` + +:::{glue:figure} fig:05-multiknn-1 +:name: fig:05-multiknn-1 + +Scatter plot of concavity versus perimeter with new observation represented as a red diamond. +::: + +```{index} pandas.DataFrame; assign +``` + +```{code-cell} ipython3 +new_obs_Perimeter = 0 +new_obs_Concavity = 3.5 +cancer["dist_from_new"] = ( + (cancer["Perimeter"] - new_obs_Perimeter) ** 2 + + (cancer["Concavity"] - new_obs_Concavity) ** 2 +)**(1/2) +cancer.nsmallest(5, "dist_from_new")[[ + "Perimeter", + "Concavity", + "Class", + "dist_from_new" +]] +``` + +```{code-cell} ipython3 +:tags: [remove-cell] +# code needed to render the latex table with distance calculations +from IPython.display import Latex +five_neighbors = ( + cancer + [["Perimeter", "Concavity", "Class"]] + .assign(dist_from_new = ( + (cancer["Perimeter"] - new_obs_Perimeter) ** 2 + + (cancer["Concavity"] - new_obs_Concavity) ** 2 + )**(1/2)) + .nsmallest(5, "dist_from_new") +).reset_index() + +for i in range(5): + glue(f"gn{i}_perim", "{:0.2f}".format(five_neighbors["Perimeter"][i])) + glue(f"gn{i}_concav", "{:0.2f}".format(five_neighbors["Concavity"][i])) + glue(f"gn{i}_class", five_neighbors["Class"][i]) + + # typeset perimeter,concavity with parentheses if negative for latex + nperim = f"{five_neighbors['Perimeter'][i]:.2f}" if five_neighbors['Perimeter'][i] > 0 else f"({five_neighbors['Perimeter'][i]:.2f})" + nconcav = f"{five_neighbors['Concavity'][i]:.2f}" if five_neighbors['Concavity'][i] > 0 else f"({five_neighbors['Concavity'][i]:.2f})" + + glue(f"gdisteqn{i}", Latex(f"\sqrt{{(0-{nperim})^2+(3.5-{nconcav})^2}}={five_neighbors['dist_from_new'][i]:.2f}")) +``` + +In {numref}`tab:05-multiknn-mathtable` we show in mathematical detail how +we computed the `dist_from_new` variable (the +distance to the new observation) for each of the 5 nearest neighbors in the +training data. + +```{table} Evaluating the distances from the new observation to each of its 5 nearest neighbors +:name: tab:05-multiknn-mathtable +| Perimeter | Concavity | Distance | Class | +|-----------|-----------|----------------------------------------|-------| +| {glue:text}`gn0_perim` | {glue:text}`gn0_concav` | {glue:}`gdisteqn0` | {glue:text}`gn0_class` | +| {glue:text}`gn1_perim` | {glue:text}`gn1_concav` | {glue:}`gdisteqn1` | {glue:text}`gn1_class` | +| {glue:text}`gn2_perim` | {glue:text}`gn2_concav` | {glue:}`gdisteqn2` | {glue:text}`gn2_class` | +| {glue:text}`gn3_perim` | {glue:text}`gn3_concav` | {glue:}`gdisteqn3` | {glue:text}`gn3_class` | +| {glue:text}`gn4_perim` | {glue:text}`gn4_concav` | {glue:}`gdisteqn4` | {glue:text}`gn4_class` | +``` + ++++ + +The result of this computation shows that 3 of the 5 nearest neighbors to our new observation are +malignant; since this is the majority, we classify our new observation as malignant. +These 5 neighbors are circled in {numref}`fig:05-multiknn-3`. + +```{code-cell} ipython3 +:tags: [remove-cell] + +circle_path_df = pd.DataFrame( + { + "Perimeter": new_point[0] + 1.4 * np.cos(np.linspace(0, 2 * np.pi, 100)), + "Concavity": new_point[1] + 1.4 * np.sin(np.linspace(0, 2 * np.pi, 100)), + } +) +circle = alt.Chart(circle_path_df.reset_index()).mark_line(color="black").encode( + x="Perimeter", + y="Concavity", + order="index" +) + +glue("fig:05-multiknn-3", (perim_concav_with_new_point3 + circle)) +``` + +:::{glue:figure} fig:05-multiknn-3 +:name: fig:05-multiknn-3 + +Scatter plot of concavity versus perimeter with 5 nearest neighbors circled. +::: + ++++ + +### More than two explanatory variables + +Although the above description is directed toward two predictor variables, +exactly the same $K$-nearest neighbors algorithm applies when you +have a higher number of predictor variables. Each predictor variable may give us new +information to help create our classifier. The only difference is the formula +for the distance between points. Suppose we have $m$ predictor +variables for two observations $a$ and $b$, i.e., +$a = (a_{1}, a_{2}, \dots, a_{m})$ and +$b = (b_{1}, b_{2}, \dots, b_{m})$. + +```{index} distance; more than two variables +``` + +The distance formula becomes + +$$\mathrm{Distance} = \sqrt{(a_{1} -b_{1})^2 + (a_{2} - b_{2})^2 + \dots + (a_{m} - b_{m})^2}.$$ + +This formula still corresponds to a straight-line distance, just in a space +with more dimensions. Suppose we want to calculate the distance between a new +observation with a perimeter of 0, concavity of 3.5, and symmetry of 1, and +another observation with a perimeter, concavity, and symmetry of 0.417, 2.31, and +0.837 respectively. We have two observations with three predictor variables: +perimeter, concavity, and symmetry. Previously, when we had two variables, we +added up the squared difference between each of our (two) variables, and then +took the square root. Now we will do the same, except for our three variables. +We calculate the distance as follows + +$$\mathrm{Distance} =\sqrt{(0 - 0.417)^2 + (3.5 - 2.31)^2 + (1 - 0.837)^2} = 1.27.$$ + +Let's calculate the distances between our new observation and each of the +observations in the training set to find the $K=5$ neighbors when we have these +three predictors. + +```{code-cell} ipython3 +new_obs_Perimeter = 0 +new_obs_Concavity = 3.5 +new_obs_Symmetry = 1 +cancer["dist_from_new"] = ( + (cancer["Perimeter"] - new_obs_Perimeter) ** 2 + + (cancer["Concavity"] - new_obs_Concavity) ** 2 + + (cancer["Symmetry"] - new_obs_Symmetry) ** 2 +)**(1/2) +cancer.nsmallest(5, "dist_from_new")[[ + "Perimeter", + "Concavity", + "Symmetry", + "Class", + "dist_from_new" +]] +``` + +Based on $K=5$ nearest neighbors with these three predictors we would classify +the new observation as malignant since 4 out of 5 of the nearest neighbors are malignant class. +{numref}`fig:05-more` shows what the data look like when we visualize them +as a 3-dimensional scatter with lines from the new observation to its five nearest neighbors. + +```{code-cell} ipython3 +:tags: [remove-cell] + +new_point = [0, 3.5, 1] +attrs = ["Perimeter", "Concavity", "Symmetry"] +points_df4 = pd.DataFrame( + { + "Perimeter": new_point[0], + "Concavity": new_point[1], + "Symmetry": new_point[2], + "Class": ["Unknown"], + } +) +perim_concav_with_new_point_df4 = pd.concat((cancer, points_df4), ignore_index=True) +# Find the euclidean distances from the new point to each of the points +# in the orginal data set +my_distances4 = euclidean_distances(perim_concav_with_new_point_df4[attrs])[ + len(cancer) +][:-1] +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +# The index of 5 rows that has smallest distance to the new point +min_5_idx = np.argpartition(my_distances4, 5)[:5] + +neighbor_df_list = [] +for idx in min_5_idx: + neighbor_df = pd.concat( + ( + cancer.loc[idx, attrs + ["Class"]], + perim_concav_with_new_point_df4.loc[len(cancer), attrs + ["Class"]], + ), + axis=1, + ).T + neighbor_df_list.append(neighbor_df) +``` + +```{code-cell} ipython3 +:tags: [remove-input] + +fig = px.scatter_3d( + perim_concav_with_new_point_df4, + x="Perimeter", + y="Concavity", + z="Symmetry", + color="Class", + symbol="Class", + opacity=0.5, +) +# specify trace names and symbols in a dict +symbols = {"Malignant": "circle", "Benign": "circle", "Unknown": "diamond"} + +# set all symbols in fig +for i, d in enumerate(fig.data): + fig.data[i].marker.symbol = symbols[fig.data[i].name] + +# specify trace names and colors in a dict +colors = {"Malignant": "#ff7f0e", "Benign": "#1f77b4", "Unknown": "red"} + +# set all colors in fig +for i, d in enumerate(fig.data): + fig.data[i].marker.color = colors[fig.data[i].name] + +# set a fixed custom marker size +fig.update_traces(marker={"size": 5}) + +# add lines +for neighbor_df in neighbor_df_list: + fig.add_trace( + go.Scatter3d( + x=neighbor_df["Perimeter"], + y=neighbor_df["Concavity"], + z=neighbor_df["Symmetry"], + line_color=colors[neighbor_df.iloc[0]["Class"]], + name=neighbor_df.iloc[0]["Class"], + mode="lines", + line=dict(width=2), + showlegend=False, + ) + ) + + +# tight layout +fig.update_layout(margin=dict(l=0, r=0, b=0, t=1), template="plotly_white") + +glue("fig:05-more", fig) +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:05-more +:figclass: caption-hack + +3D scatter plot of the standardized symmetry, concavity, and perimeter +variables. Note that in general we recommend against using 3D visualizations; +here we show the data in 3D only to illustrate what higher dimensions and +nearest neighbors look like, for learning purposes. +``` + ++++ + +### Summary of $K$-nearest neighbors algorithm + +In order to classify a new observation using a $K$-nearest neighbor classifier, we have to do the following: + +1. Compute the distance between the new observation and each observation in the training set. +2. Find the $K$ rows corresponding to the $K$ smallest distances. +3. Classify the new observation based on a majority vote of the neighbor classes. + ++++ + +## $K$-nearest neighbors with `scikit-learn` + +```{index} scikit-learn +``` + +Coding the $K$-nearest neighbors algorithm in Python ourselves can get complicated, +especially if we want to handle multiple classes, more than two variables, +or predict the class for multiple new observations. Thankfully, in Python, +the $K$-nearest neighbors algorithm is +implemented in [the `scikit-learn` Python package](https://scikit-learn.org/stable/index.html) {cite:p}`sklearn_api` along with +many [other models](https://scikit-learn.org/stable/user_guide.html) that you will encounter in this and future chapters of the book. Using the functions +in the `scikit-learn` package (named `sklearn` in Python) will help keep our code simple, readable and accurate; the +less we have to code ourselves, the fewer mistakes we will likely make. +Before getting started with $K$-nearest neighbors, we need to tell the `sklearn` package +that we prefer using `pandas` data frames over regular arrays via the `set_config` function. +```{note} +You will notice a new way of importing functions in the code below: `from ... import ...`. This lets us +import *just* `set_config` from `sklearn`, and then call `set_config` without any package prefix. +We will import functions using `from` extensively throughout +this and subsequent chapters to avoid very long names from `scikit-learn` +that clutter the code +(like `sklearn.neighbors.KNeighborsClassifier`, which has 38 characters!). +``` + +```{code-cell} ipython3 +from sklearn import set_config + +# Output dataframes instead of arrays +set_config(transform_output="pandas") +``` + +We can now get started with $K$-nearest neighbors. The first step is to + import the `KNeighborsClassifier` from the `sklearn.neighbors` module. + +```{code-cell} ipython3 +from sklearn.neighbors import KNeighborsClassifier +``` + +Let's walk through how to use `KNeighborsClassifier` to perform $K$-nearest neighbors classification. +We will use the `cancer` data set from above, with +perimeter and concavity as predictors and $K = 5$ neighbors to build our classifier. Then +we will use the classifier to predict the diagnosis label for a new observation with +perimeter 0, concavity 3.5, and an unknown diagnosis label. Let's pick out our two desired +predictor variables and class label and store them with the name `cancer_train`: + +```{code-cell} ipython3 +cancer_train = cancer[["Class", "Perimeter", "Concavity"]] +cancer_train +``` + +```{index} scikit-learn; model object, scikit-learn; KNeighborsClassifier +``` + +Next, we create a *model object* for $K$-nearest neighbors classification +by creating a `KNeighborsClassifier` instance, specifying that we want to use $K = 5$ neighbors; +we will discuss how to choose $K$ in the next chapter. + +```{note} +You can specify the `weights` argument in order to control +how neighbors vote when classifying a new observation. The default is `"uniform"`, where +each of the $K$ nearest neighbors gets exactly 1 vote as described above. Other choices, +which weigh each neighbor's vote differently, can be found on +[the `scikit-learn` website](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html?highlight=kneighborsclassifier#sklearn.neighbors.KNeighborsClassifier). +``` + +```{code-cell} ipython3 +knn = KNeighborsClassifier(n_neighbors=5) +knn +``` + +```{index} scikit-learn; X & y +``` + +In order to fit the model on the breast cancer data, we need to call `fit` on +the model object. The `X` argument is used to specify the data for the predictor +variables, while the `y` argument is used to specify the data for the response variable. +So below, we set `X=cancer_train[["Perimeter", "Concavity"]]` and +`y=cancer_train["Class"]` to specify that `Class` is the response +variable (the one we want to predict), and both `Perimeter` and `Concavity` are +to be used as the predictors. Note that the `fit` function might look like it does not +do much from the outside, but it is actually doing all the heavy lifting to train +the K-nearest neighbors model, and modifies the `knn` model object. + +```{code-cell} ipython3 +knn.fit(X=cancer_train[["Perimeter", "Concavity"]], y=cancer_train["Class"]); +``` + +```{index} scikit-learn; predict +``` + +After using the `fit` function, we can make a prediction on a new observation +by calling `predict` on the classifier object, passing the new observation +itself. As above, when we ran the $K$-nearest neighbors classification +algorithm manually, the `knn` model object classifies the new observation as +"Malignant". Note that the `predict` function outputs an `array` with the +model's prediction; you can actually make multiple predictions at the same +time using the `predict` function, which is why the output is stored as an `array`. + +```{code-cell} ipython3 +new_obs = pd.DataFrame({"Perimeter": [0], "Concavity": [3.5]}) +knn.predict(new_obs) +``` + +Is this predicted malignant label the actual class for this observation? +Well, we don't know because we do not have this +observation's diagnosis— that is what we were trying to predict! The +classifier's prediction is not necessarily correct, but in the next chapter, we will +learn ways to quantify how accurate we think our predictions are. + ++++ + +## Data preprocessing with `scikit-learn` + +### Centering and scaling + +```{index} scaling +``` + +When using $K$-nearest neighbor classification, the *scale* of each variable +(i.e., its size and range of values) matters. Since the classifier predicts +classes by identifying observations nearest to it, any variables with +a large scale will have a much larger effect than variables with a small +scale. But just because a variable has a large scale *doesn't mean* that it is +more important for making accurate predictions. For example, suppose you have a +data set with two features, salary (in dollars) and years of education, and +you want to predict the corresponding type of job. When we compute the +neighbor distances, a difference of \$1000 is huge compared to a difference of +10 years of education. But for our conceptual understanding and answering of +the problem, it's the opposite; 10 years of education is huge compared to a +difference of \$1000 in yearly salary! + ++++ + +```{index} centering +``` + +In many other predictive models, the *center* of each variable (e.g., its mean) +matters as well. For example, if we had a data set with a temperature variable +measured in degrees Kelvin, and the same data set with temperature measured in +degrees Celsius, the two variables would differ by a constant shift of 273 +(even though they contain exactly the same information). Likewise, in our +hypothetical job classification example, we would likely see that the center of +the salary variable is in the tens of thousands, while the center of the years +of education variable is in the single digits. Although this doesn't affect the +$K$-nearest neighbor classification algorithm, this large shift can change the +outcome of using many other predictive models. + +```{index} standardization; K-nearest neighbors +``` + +To scale and center our data, we need to find +our variables' *mean* (the average, which quantifies the "central" value of a +set of numbers) and *standard deviation* (a number quantifying how spread out values are). +For each observed value of the variable, we subtract the mean (i.e., center the variable) +and divide by the standard deviation (i.e., scale the variable). When we do this, the data +is said to be *standardized*, and all variables in a data set will have a mean of 0 +and a standard deviation of 1. To illustrate the effect that standardization can have on the $K$-nearest +neighbor algorithm, we will read in the original, unstandardized Wisconsin breast +cancer data set; we have been using a standardized version of the data set up +until now. We will apply the same initial wrangling steps as we did earlier, +and to keep things simple we will just use the `Area`, `Smoothness`, and `Class` +variables: + +```{code-cell} ipython3 +unscaled_cancer = pd.read_csv("data/wdbc_unscaled.csv")[["Class", "Area", "Smoothness"]] +unscaled_cancer["Class"] = unscaled_cancer["Class"].replace({ + "M" : "Malignant", + "B" : "Benign" +}) +unscaled_cancer +``` + +Looking at the unscaled and uncentered data above, you can see that the differences +between the values for area measurements are much larger than those for +smoothness. Will this affect predictions? In order to find out, we will create a scatter plot of these two +predictors (colored by diagnosis) for both the unstandardized data we just +loaded, and the standardized version of that same data. But first, we need to +standardize the `unscaled_cancer` data set with `scikit-learn`. + +```{index} pipeline, scikit-learn; make_column_transformer +``` + +```{index} double: scikit-learn; pipeline +``` + +The `scikit-learn` framework provides a collection of *preprocessors* used to manipulate +data in the [`preprocessing` module](https://scikit-learn.org/stable/modules/preprocessing.html). +Here we will use the `StandardScaler` transformer to standardize the predictor variables in +the `unscaled_cancer` data. In order to tell the `StandardScaler` which variables to standardize, +we wrap it in a +[`ColumnTransformer`](https://scikit-learn.org/stable/modules/generated/sklearn.compose.ColumnTransformer.html#sklearn.compose.ColumnTransformer) object +using the [`make_column_transformer`](https://scikit-learn.org/stable/modules/generated/sklearn.compose.make_column_transformer.html#sklearn.compose.make_column_transformer) function. +`ColumnTransformer` objects also enable the use of multiple preprocessors at +once, which is especially handy when you want to apply different preprocessing to each of the predictor variables. +The primary argument of the `make_column_transformer` function is a sequence of +pairs of (1) a preprocessor, and (2) the columns to which you want to apply that preprocessor. +In the present case, we just have the one `StandardScaler` preprocessor to apply to the `Area` and `Smoothness` columns. + +```{code-cell} ipython3 +from sklearn.preprocessing import StandardScaler +from sklearn.compose import make_column_transformer + +preprocessor = make_column_transformer( + (StandardScaler(), ["Area", "Smoothness"]), +) +preprocessor +``` + +```{index} scikit-learn; ColumnTransformer, scikit-learn; StandardScaler, scikit-learn; fit_transform +``` + +```{index} ColumnTransformer; StandardScaler +``` + +```{index} scikit-learn; fit, scikit-learn; transform +``` + +You can see that the preprocessor includes a single standardization step +that is applied to the `Area` and `Smoothness` columns. +Note that here we specified which columns to apply the preprocessing step to +by individual names; this approach can become quite difficult, e.g., when we have many +predictor variables. Rather than writing out the column names individually, +we can instead use the +[`make_column_selector`](https://scikit-learn.org/stable/modules/generated/sklearn.compose.make_column_selector.html#sklearn.compose.make_column_selector) function. For +example, if we wanted to standardize all *numerical* predictors, +we would use `make_column_selector` and specify the `dtype_include` argument to be `"number"`. +This creates a preprocessor equivalent to the one we created previously. + +```{code-cell} ipython3 +from sklearn.compose import make_column_selector + +preprocessor = make_column_transformer( + (StandardScaler(), make_column_selector(dtype_include="number")), +) +preprocessor +``` + +```{index} see: fit, transform, fit_transform; scikit-learn +``` + +We are now ready to standardize the numerical predictor columns in the `unscaled_cancer` data frame. +This happens in two steps. We first use the `fit` function to compute the values necessary to apply +the standardization (the mean and standard deviation of each variable), passing the `unscaled_cancer` data as an argument. +Then we use the `transform` function to actually apply the standardization. +It may seem a bit unnecessary to use two steps---`fit` *and* `transform`---to standardize the data. +However, we do this in two steps so that we can specify a different data set in the `transform` step if we want. +This enables us to compute the quantities needed to standardize using one data set, and then +apply that standardization to another data set. + +```{code-cell} ipython3 +preprocessor.fit(unscaled_cancer) +scaled_cancer = preprocessor.transform(unscaled_cancer) +scaled_cancer +``` +```{code-cell} ipython3 +:tags: [remove-cell] +glue("scaled-cancer-column-0", '"'+scaled_cancer.columns[0]+'"') +glue("scaled-cancer-column-1", '"'+scaled_cancer.columns[1]+'"') +``` +It looks like our `Smoothness` and `Area` variables have been standardized. Woohoo! +But there are two important things to notice about the new `scaled_cancer` data frame. First, it only keeps +the columns from the input to `transform` (here, `unscaled_cancer`) that had a preprocessing step applied +to them. The default behavior of the `ColumnTransformer` that we build using `make_column_transformer` +is to *drop* the remaining columns. This default behavior works well with the rest of `sklearn` (as we will see below +in {numref}`08:puttingittogetherworkflow`), but for visualizing the result of preprocessing it can be useful to keep the other columns +in our original data frame, such as the `Class` variable here. +To keep other columns, we need to set the `remainder` argument to `"passthrough"` in the `make_column_transformer` function. +Furthermore, you can see that the new column names---{glue:text}`scaled-cancer-column-0` +and {glue:text}`scaled-cancer-column-1`---include the name +of the preprocessing step separated by underscores. This default behavior is useful in `sklearn` because we sometimes want to apply +multiple different preprocessing steps to the same columns; but again, for visualization it can be useful to preserve +the original column names. To keep original column names, we need to set the `verbose_feature_names_out` argument to `False`. + +```{note} +Only specify the `remainder` and `verbose_feature_names_out` arguments when you want to examine the result +of your preprocessing step. In most cases, you should leave these arguments at their default values. +``` + +```{code-cell} ipython3 +preprocessor_keep_all = make_column_transformer( + (StandardScaler(), make_column_selector(dtype_include="number")), + remainder="passthrough", + verbose_feature_names_out=False +) +preprocessor_keep_all.fit(unscaled_cancer) +scaled_cancer_all = preprocessor_keep_all.transform(unscaled_cancer) +scaled_cancer_all +``` + +You may wonder why we are doing so much work just to center and +scale our variables. Can't we just manually scale and center the `Area` and +`Smoothness` variables ourselves before building our $K$-nearest neighbor model? Well, +technically *yes*; but doing so is error-prone. In particular, we might +accidentally forget to apply the same centering / scaling when making +predictions, or accidentally apply a *different* centering / scaling than what +we used while training. Proper use of a `ColumnTransformer` helps keep our code simple, +readable, and error-free. Furthermore, note that using `fit` and `transform` on +the preprocessor is required only when you want to inspect the result of the +preprocessing steps +yourself. You will see further on in +{numref}`08:puttingittogetherworkflow` that `scikit-learn` provides tools to +automatically streamline the preprocesser and the model so that you can call `fit` +and `transform` on the `Pipeline` as necessary without additional coding effort. + +{numref}`fig:05-scaling-plt` shows the two scatter plots side-by-side—one for `unscaled_cancer` and one for +`scaled_cancer`. Each has the same new observation annotated with its $K=3$ nearest neighbors. +In the original unstandardized data plot, you can see some odd choices +for the three nearest neighbors. In particular, the "neighbors" are visually +well within the cloud of benign observations, and the neighbors are all nearly +vertically aligned with the new observation (which is why it looks like there +is only one black line on this plot). {numref}`fig:05-scaling-plt-zoomed` +shows a close-up of that region on the unstandardized plot. Here the computation of nearest +neighbors is dominated by the much larger-scale area variable. The plot for standardized data +on the right in {numref}`fig:05-scaling-plt` shows a much more intuitively reasonable +selection of nearest neighbors. Thus, standardizing the data can change things +in an important way when we are using predictive algorithms. +Standardizing your data should be a part of the preprocessing you do +before predictive modeling and you should always think carefully about your problem domain and +whether you need to standardize your data. + +```{code-cell} ipython3 +:tags: [remove-cell] + +def class_dscp(x): + if x == "M": + return "Malignant" + elif x == "B": + return "Benign" + else: + return x + + +attrs = ["Area", "Smoothness"] +new_obs = pd.DataFrame({"Class": ["Unknown"], "Area": 400, "Smoothness": 0.135}) +unscaled_cancer["Class"] = unscaled_cancer["Class"].apply(class_dscp) +area_smoothness_new_df = pd.concat((unscaled_cancer, new_obs), ignore_index=True) +my_distances = euclidean_distances(area_smoothness_new_df[attrs])[ + len(unscaled_cancer) +][:-1] +area_smoothness_new_point = ( + alt.Chart( + area_smoothness_new_df, + title=alt.TitleParams(text="Unstandardized data", anchor="start"), + ) + .mark_point(opacity=0.6, filled=True, size=40) + .encode( + x=alt.X("Area"), + y=alt.Y("Smoothness"), + color=alt.Color( + "Class", + title="Diagnosis", + ), + shape=alt.Shape( + "Class", scale=alt.Scale(range=["circle", "circle", "diamond"]) + ), + size=alt.condition("datum.Class == 'Unknown'", alt.value(80), alt.value(30)), + stroke=alt.condition("datum.Class == 'Unknown'", alt.value("black"), alt.value(None)) + ) +) + +# The index of 3 rows that has smallest distance to the new point +min_3_idx = np.argpartition(my_distances, 3)[:3] +neighbor1 = pd.concat([ + unscaled_cancer.loc[[min_3_idx[0]], attrs], + new_obs[attrs], +]) +neighbor2 = pd.concat([ + unscaled_cancer.loc[[min_3_idx[1]], attrs], + new_obs[attrs], +]) +neighbor3 = pd.concat([ + unscaled_cancer.loc[[min_3_idx[2]], attrs], + new_obs[attrs], +]) + +line1 = ( + alt.Chart(neighbor1) + .mark_line() + .encode(x="Area", y="Smoothness", color=alt.value("black")) +) +line2 = ( + alt.Chart(neighbor2) + .mark_line() + .encode(x="Area", y="Smoothness", color=alt.value("black")) +) +line3 = ( + alt.Chart(neighbor3) + .mark_line() + .encode(x="Area", y="Smoothness", color=alt.value("black")) +) + +area_smoothness_new_point = area_smoothness_new_point + line1 + line2 + line3 +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +attrs = ["Area", "Smoothness"] +new_obs_scaled = pd.DataFrame({"Class": ["Unknown"], "Area": -0.72, "Smoothness": 2.8}) +scaled_cancer_all["Class"] = scaled_cancer_all["Class"].apply(class_dscp) +area_smoothness_new_df_scaled = pd.concat( + (scaled_cancer_all, new_obs_scaled), ignore_index=True +) +my_distances_scaled = euclidean_distances(area_smoothness_new_df_scaled[attrs])[ + len(scaled_cancer_all) +][:-1] +area_smoothness_new_point_scaled = ( + alt.Chart( + area_smoothness_new_df_scaled, + title=alt.TitleParams(text="Standardized data", anchor="start"), + ) + .mark_point(opacity=0.6, filled=True, size=40) + .encode( + x=alt.X("Area", title="Area (standardized)"), + y=alt.Y("Smoothness", title="Smoothness (standardized)"), + color=alt.Color( + "Class", + title="Diagnosis", + ), + shape=alt.Shape( + "Class", scale=alt.Scale(range=["circle", "circle", "diamond"]) + ), + size=alt.condition("datum.Class == 'Unknown'", alt.value(80), alt.value(30)), + stroke=alt.condition("datum.Class == 'Unknown'", alt.value("black"), alt.value(None)) + ) +) +min_3_idx_scaled = np.argpartition(my_distances_scaled, 3)[:3] +neighbor1_scaled = pd.concat([ + scaled_cancer_all.loc[[min_3_idx_scaled[0]], attrs], + new_obs_scaled[attrs], +]) +neighbor2_scaled = pd.concat([ + scaled_cancer_all.loc[[min_3_idx_scaled[1]], attrs], + new_obs_scaled[attrs], +]) +neighbor3_scaled = pd.concat([ + scaled_cancer_all.loc[[min_3_idx_scaled[2]], attrs], + new_obs_scaled[attrs], +]) + +line1_scaled = ( + alt.Chart(neighbor1_scaled) + .mark_line() + .encode(x="Area", y="Smoothness", color=alt.value("black")) +) +line2_scaled = ( + alt.Chart(neighbor2_scaled) + .mark_line() + .encode(x="Area", y="Smoothness", color=alt.value("black")) +) +line3_scaled = ( + alt.Chart(neighbor3_scaled) + .mark_line() + .encode(x="Area", y="Smoothness", color=alt.value("black")) +) + +area_smoothness_new_point_scaled = ( + area_smoothness_new_point_scaled + line1_scaled + line2_scaled + line3_scaled +) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue( + "fig:05-scaling-plt", + area_smoothness_new_point | area_smoothness_new_point_scaled +) +``` + +:::{glue:figure} fig:05-scaling-plt +:name: fig:05-scaling-plt + +Comparison of K = 3 nearest neighbors with standardized and unstandardized data. +::: + +```{code-cell} ipython3 +:tags: [remove-cell] + +zoom_area_smoothness_new_point = ( + alt.Chart( + area_smoothness_new_df, + title=alt.TitleParams(text="Unstandardized data", anchor="start"), + ) + .mark_point(clip=True, opacity=0.6, filled=True, size=40) + .encode( + x=alt.X("Area", scale=alt.Scale(domain=(395, 405))), + y=alt.Y("Smoothness", scale=alt.Scale(domain=(0.08, 0.14))), + color=alt.Color( + "Class", + title="Diagnosis", + ), + shape=alt.Shape( + "Class", scale=alt.Scale(range=["circle", "circle", "diamond"]) + ), + size=alt.condition("datum.Class == 'Unknown'", alt.value(80), alt.value(30)), + stroke=alt.condition("datum.Class == 'Unknown'", alt.value("black"), alt.value(None)) + ) +) +zoom_area_smoothness_new_point + line1 + line2 + line3 +glue("fig:05-scaling-plt-zoomed", (zoom_area_smoothness_new_point + line1 + line2 + line3)) +``` + +:::{glue:figure} fig:05-scaling-plt-zoomed +:name: fig:05-scaling-plt-zoomed + +Close-up of three nearest neighbors for unstandardized data. +::: + ++++ + +### Balancing + +```{index} balance, imbalance +``` + +Another potential issue in a data set for a classifier is *class imbalance*, +i.e., when one label is much more common than another. Since classifiers like +the $K$-nearest neighbor algorithm use the labels of nearby points to predict +the label of a new point, if there are many more data points with one label +overall, the algorithm is more likely to pick that label in general (even if +the "pattern" of data suggests otherwise). Class imbalance is actually quite a +common and important problem: from rare disease diagnosis to malicious email +detection, there are many cases in which the "important" class to identify +(presence of disease, malicious email) is much rarer than the "unimportant" +class (no disease, normal email). + +To better illustrate the problem, let's revisit the scaled breast cancer data, +`cancer`; except now we will remove many of the observations of malignant tumors, simulating +what the data would look like if the cancer was rare. We will do this by +picking only 3 observations from the malignant group, and keeping all +of the benign observations. We choose these 3 observations using the `.head()` +method, which takes the number of rows to select from the top (`n`). +We will then use the [`concat`](https://pandas.pydata.org/docs/reference/api/pandas.concat.html) +function from `pandas` to glue the two resulting filtered +data frames back together. The `concat` function *concatenates* data frames +along an axis. By default, it concatenates the data frames vertically along `axis=0` yielding a single +*taller* data frame, which is what we want to do here. If we instead wanted to concatenate horizontally +to produce a *wider* data frame, we would specify `axis=1`. +The new imbalanced data is shown in {numref}`fig:05-unbalanced`, +and we print the counts of the classes using the `value_counts` function. + +```{code-cell} ipython3 +rare_cancer = pd.concat(( + cancer[cancer["Class"] == "Benign"], + cancer[cancer["Class"] == "Malignant"].head(3) +)) + +rare_plot = alt.Chart(rare_cancer).mark_circle().encode( + x=alt.X("Perimeter").title("Perimeter (standardized)"), + y=alt.Y("Concavity").title("Concavity (standardized)"), + color=alt.Color("Class").title("Diagnosis") +) +rare_plot +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:05-unbalanced +:figclass: caption-hack + +Imbalanced data. +``` + +```{code-cell} ipython3 +rare_cancer["Class"].value_counts() +``` + ++++ + +Suppose we now decided to use $K = 7$ in $K$-nearest neighbor classification. +With only 3 observations of malignant tumors, the classifier +will *always predict that the tumor is benign, no matter what its concavity and perimeter +are!* This is because in a majority vote of 7 observations, at most 3 will be +malignant (we only have 3 total malignant observations), so at least 4 must be +benign, and the benign vote will always win. For example, {numref}`fig:05-upsample` +shows what happens for a new tumor observation that is quite close to three observations +in the training data that were tagged as malignant. + +```{code-cell} ipython3 +:tags: [remove-cell] + +attrs = ["Perimeter", "Concavity"] +new_point = [2, 2] +new_point_df = pd.DataFrame( + {"Class": ["Unknown"], "Perimeter": new_point[0], "Concavity": new_point[1]} +) +rare_cancer["Class"] = rare_cancer["Class"].apply(class_dscp) +rare_cancer_with_new_df = pd.concat((rare_cancer, new_point_df), ignore_index=True) +my_distances = euclidean_distances(rare_cancer_with_new_df[attrs])[ + len(rare_cancer) +][:-1] + +# First layer: scatter plot, with unknwon point labeled as red "unknown" diamond +rare_plot = ( + alt.Chart( + rare_cancer_with_new_df + ) + .mark_point(opacity=0.6, filled=True, size=40) + .encode( + x=alt.X("Perimeter", title="Perimeter (standardized)"), + y=alt.Y("Concavity", title="Concavity (standardized)"), + color=alt.Color( + "Class", + title="Diagnosis", + ), + shape=alt.Shape( + "Class", scale=alt.Scale(range=["circle", "circle", "diamond"]) + ), + size=alt.condition("datum.Class == 'Unknown'", alt.value(80), alt.value(30)), + stroke=alt.condition("datum.Class == 'Unknown'", alt.value("black"), alt.value(None)) + ) +) + +# Find the 7 NNs +min_7_idx = np.argpartition(my_distances, 7)[:7] + +# For loop: each iteration adds a line segment of corresponding color +for i in range(7): + clr = "#1f77b4" + if rare_cancer.iloc[min_7_idx[i], :]["Class"] == "Malignant": + clr = "#ff7f0e" + neighbor = pd.concat([ + rare_cancer.iloc[[min_7_idx[i]], :][attrs], + new_point_df[attrs], + ]) + rare_plot = rare_plot + ( + alt.Chart(neighbor) + .mark_line(opacity=0.3) + .encode(x="Perimeter", y="Concavity", color=alt.value(clr)) + ) + +glue("fig:05-upsample", rare_plot) +``` + +:::{glue:figure} fig:05-upsample +:name: fig:05-upsample + +Imbalanced data with 7 nearest neighbors to a new observation highlighted. +::: + ++++ + +{numref}`fig:05-upsample-2` shows what happens if we set the background color of +each area of the plot to the predictions the $K$-nearest neighbor +classifier would make. We can see that the decision is +always "benign," corresponding to the blue color. + +```{code-cell} ipython3 +:tags: [remove-cell] + +knn = KNeighborsClassifier(n_neighbors=7) +knn.fit(X=rare_cancer[["Perimeter", "Concavity"]], y=rare_cancer["Class"]) + +# create a prediction pt grid +per_grid = np.linspace( + rare_cancer["Perimeter"].min() * 1.05, rare_cancer["Perimeter"].max() * 1.05, 50 +) +con_grid = np.linspace( + rare_cancer["Concavity"].min() * 1.05, rare_cancer["Concavity"].max() * 1.05, 50 +) +pcgrid = np.array(np.meshgrid(per_grid, con_grid)).reshape(2, -1).T +pcgrid = pd.DataFrame(pcgrid, columns=["Perimeter", "Concavity"]) +pcgrid + +knnPredGrid = knn.predict(pcgrid) +prediction_table = pcgrid.copy() +prediction_table["Class"] = knnPredGrid +prediction_table + +# create the scatter plot +rare_plot = ( + alt.Chart( + rare_cancer, + ) + .mark_point(opacity=0.6, filled=True, size=40) + .encode( + x=alt.X("Perimeter", title="Perimeter (standardized)"), + y=alt.Y("Concavity", title="Concavity (standardized)"), + color=alt.Color("Class", title="Diagnosis"), + ) +) + +# add a prediction layer, also scatter plot +prediction_plot = ( + alt.Chart( + prediction_table, + title="Imbalanced data", + ) + .mark_point(opacity=0.05, filled=True, size=300) + .encode( + x=alt.X( + "Perimeter", + title="Perimeter (standardized)", + scale=alt.Scale( + domain=(rare_cancer["Perimeter"].min() * 1.05, rare_cancer["Perimeter"].max() * 1.05), + nice=False + ), + ), + y=alt.Y( + "Concavity", + title="Concavity (standardized)", + scale=alt.Scale( + domain=(rare_cancer["Concavity"].min() * 1.05, rare_cancer["Concavity"].max() * 1.05), + nice=False + ), + ), + color=alt.Color("Class", title="Diagnosis"), + ) +) +#rare_plot + prediction_plot +glue("fig:05-upsample-2", (rare_plot + prediction_plot)) +``` + +:::{glue:figure} fig:05-upsample-2 +:name: fig:05-upsample-2 + +Imbalanced data with background color indicating the decision of the classifier and the points represent the labeled data. +::: + ++++ + +```{index} oversampling, scikit-learn; sample +``` + +Despite the simplicity of the problem, solving it in a statistically sound manner is actually +fairly nuanced, and a careful treatment would require a lot more detail and mathematics than we will cover in this textbook. +For the present purposes, it will suffice to rebalance the data by *oversampling* the rare class. +In other words, we will replicate rare observations multiple times in our data set to give them more +voting power in the $K$-nearest neighbor algorithm. In order to do this, we will +first separate the classes out into their own data frames by filtering. +Then, we will +use the `sample` method on the rare class data frame to increase the number of `Malignant` observations to be the same as the number +of `Benign` observations. We set the `n` argument to be the number of `Malignant` observations we want, and set `replace=True` +to indicate that we are sampling with replacement. +Finally, we use the `value_counts` method to see that our classes are now balanced. +Note that `sample` picks which data to replicate *randomly*; we will learn more about properly handling randomness +in data analysis in {numref}`Chapter %s `. + +```{code-cell} ipython3 +:tags: [remove-cell] +# hidden seed call to make the below resample reproducible +# we haven't taught students about seeds / prngs yet, so +# for now just hide this. +np.random.seed(1) +``` + +```{code-cell} ipython3 +malignant_cancer = rare_cancer[rare_cancer["Class"] == "Malignant"] +benign_cancer = rare_cancer[rare_cancer["Class"] == "Benign"] +malignant_cancer_upsample = malignant_cancer.sample( + n=benign_cancer.shape[0], replace=True +) +upsampled_cancer = pd.concat((malignant_cancer_upsample, benign_cancer)) +upsampled_cancer["Class"].value_counts() +``` + +Now suppose we train our $K$-nearest neighbor classifier with $K=7$ on this *balanced* data. +{numref}`fig:05-upsample-plot` shows what happens now when we set the background color +of each area of our scatter plot to the decision the $K$-nearest neighbor +classifier would make. We can see that the decision is more reasonable; when the points are close +to those labeled malignant, the classifier predicts a malignant tumor, and vice versa when they are +closer to the benign tumor observations. + +```{code-cell} ipython3 +:tags: [remove-cell] + +knn = KNeighborsClassifier(n_neighbors=7) +knn.fit( + X=upsampled_cancer[["Perimeter", "Concavity"]], y=upsampled_cancer["Class"] +) + +# create a prediction pt grid +knnPredGrid = knn.predict(pcgrid) +prediction_table = pcgrid +prediction_table["Class"] = knnPredGrid + +# create the scatter plot +rare_plot = ( + alt.Chart(rare_cancer) + .mark_point(opacity=0.6, filled=True, size=40) + .encode( + x=alt.X( + "Perimeter", + title="Perimeter (standardized)", + scale=alt.Scale( + domain=(rare_cancer["Perimeter"].min() * 1.05, rare_cancer["Perimeter"].max() * 1.05), + nice=False + ), + ), + y=alt.Y( + "Concavity", + title="Concavity (standardized)", + scale=alt.Scale( + domain=(rare_cancer["Concavity"].min() * 1.05, rare_cancer["Concavity"].max() * 1.05), + nice=False + ), + ), + color=alt.Color("Class", title="Diagnosis"), + ) +) + +# add a prediction layer, also scatter plot +upsampled_plot = ( + alt.Chart(prediction_table) + .mark_point(opacity=0.05, filled=True, size=300) + .encode( + x=alt.X("Perimeter", title="Perimeter (standardized)"), + y=alt.Y("Concavity", title="Concavity (standardized)"), + color=alt.Color("Class", title="Diagnosis"), + ) +) +#rare_plot + upsampled_plot +glue("fig:05-upsample-plot", (rare_plot + upsampled_plot)) +``` + +:::{glue:figure} fig:05-upsample-plot +:name: fig:05-upsample-plot + +Upsampled data with background color indicating the decision of the classifier. +::: + +### Missing data + +```{index} missing data +``` + +One of the most common issues in real data sets in the wild is *missing data*, +i.e., observations where the values of some of the variables were not recorded. +Unfortunately, as common as it is, handling missing data properly is very +challenging and generally relies on expert knowledge about the data, setting, +and how the data were collected. One typical challenge with missing data is +that missing entries can be *informative*: the very fact that an entries were +missing is related to the values of other variables. For example, survey +participants from a marginalized group of people may be less likely to respond +to certain kinds of questions if they fear that answering honestly will come +with negative consequences. In that case, if we were to simply throw away data +with missing entries, we would bias the conclusions of the survey by +inadvertently removing many members of that group of respondents. So ignoring +this issue in real problems can easily lead to misleading analyses, with +detrimental impacts. In this book, we will cover only those techniques for +dealing with missing entries in situations where missing entries are just +"randomly missing", i.e., where the fact that certain entries are missing +*isn't related to anything else* about the observation. + +Let's load and examine a modified subset of the tumor image data +that has a few missing entries: + +```{code-cell} ipython3 +missing_cancer = pd.read_csv("data/wdbc_missing.csv")[["Class", "Radius", "Texture", "Perimeter"]] +missing_cancer["Class"] = missing_cancer["Class"].replace({ + "M" : "Malignant", + "B" : "Benign" +}) +missing_cancer +``` + +Recall that K-nearest neighbor classification makes predictions by computing +the straight-line distance to nearby training observations, and hence requires +access to the values of *all* variables for *all* observations in the training +data. So how can we perform K-nearest neighbor classification in the presence +of missing data? Well, since there are not too many observations with missing +entries, one option is to simply remove those observations prior to building +the K-nearest neighbor classifier. We can accomplish this by using the +`dropna` method prior to working with the data. + +```{code-cell} ipython3 +no_missing_cancer = missing_cancer.dropna() +no_missing_cancer +``` + +However, this strategy will not work when many of the rows have missing +entries, as we may end up throwing away too much data. In this case, another +possible approach is to *impute* the missing entries, i.e., fill in synthetic +values based on the other observations in the data set. One reasonable choice +is to perform *mean imputation*, where missing entries are filled in using the +mean of the present entries in each variable. To perform mean imputation, we +use a `SimpleImputer` transformer with the default arguments, and wrap it in a +`ColumnTransformer` to indicate which columns need imputation. + +```{code-cell} ipython3 +from sklearn.impute import SimpleImputer + +preprocessor = make_column_transformer( + (SimpleImputer(), ["Radius", "Texture", "Perimeter"]), + verbose_feature_names_out=False +) +preprocessor +``` + +To visualize what mean imputation does, let's just apply the transformer directly to the `missing_cancer` +data frame using the `fit` and `transform` functions. The imputation step fills in the missing +entries with the mean values of their corresponding variables. + +```{code-cell} ipython3 +preprocessor.fit(missing_cancer) +imputed_cancer = preprocessor.transform(missing_cancer) +imputed_cancer +``` + +Many other options for missing data imputation can be found in +[the `scikit-learn` documentation](https://scikit-learn.org/stable/modules/impute.html). However +you decide to handle missing data in your data analysis, it is always crucial +to think critically about the setting, how the data were collected, and the +question you are answering. + ++++ + +(08:puttingittogetherworkflow)= +## Putting it together in a `Pipeline` + +```{index} scikit-learn; pipeline +``` + +The `scikit-learn` package collection also provides the [`Pipeline`](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html?highlight=pipeline#sklearn.pipeline.Pipeline), +a way to chain together multiple data analysis steps without a lot of otherwise necessary code for intermediate steps. +To illustrate the whole workflow, let's start from scratch with the `wdbc_unscaled.csv` data. +First we will load the data, create a model, and specify a preprocessor for the data. + +```{code-cell} ipython3 +# load the unscaled cancer data, make Class readable +unscaled_cancer = pd.read_csv("data/wdbc_unscaled.csv") +unscaled_cancer["Class"] = unscaled_cancer["Class"].replace({ + "M" : "Malignant", + "B" : "Benign" +}) +unscaled_cancer + +# create the KNN model +knn = KNeighborsClassifier(n_neighbors=7) + +# create the centering / scaling preprocessor +preprocessor = make_column_transformer( + (StandardScaler(), ["Area", "Smoothness"]), +) +``` + +```{index} scikit-learn; make_pipeline, scikit-learn; fit +``` + +Next we place these steps in a `Pipeline` using +the [`make_pipeline`](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.make_pipeline.html#sklearn.pipeline.make_pipeline) function. +The `make_pipeline` function takes a list of steps to apply in your data analysis; in this +case, we just have the `preprocessor` and `knn` steps. +Finally, we call `fit` on the pipeline. +Notice that we do not need to separately call `fit` and `transform` on the `preprocessor`; the +pipeline handles doing this properly for us. +Also notice that when we call `fit` on the pipeline, we can pass +the whole `unscaled_cancer` data frame to the `X` argument, since the preprocessing +step drops all the variables except the two we listed: `Area` and `Smoothness`. +For the `y` response variable argument, we pass the `unscaled_cancer["Class"]` series as before. + +```{code-cell} ipython3 +from sklearn.pipeline import make_pipeline + +knn_pipeline = make_pipeline(preprocessor, knn) +knn_pipeline.fit( + X=unscaled_cancer, + y=unscaled_cancer["Class"] +) +knn_pipeline +``` + +As before, the fit object lists the function that trains the model. But now the fit object also includes information about +the overall workflow, including the standardization preprocessing step. +In other words, when we use the `predict` function with the `knn_pipeline` object to make a prediction for a new +observation, it will first apply the same preprocessing steps to the new observation. +As an example, we will predict the class label of two new observations: +one with `Area = 500` and `Smoothness = 0.075`, and one with `Area = 1500` and `Smoothness = 0.1`. + +```{code-cell} ipython3 +new_observation = pd.DataFrame({"Area": [500, 1500], "Smoothness": [0.075, 0.1]}) +prediction = knn_pipeline.predict(new_observation) +prediction +``` + +The classifier predicts that the first observation is benign, while the second is +malignant. {numref}`fig:05-workflow-plot` visualizes the predictions that this +trained $K$-nearest neighbor model will make on a large range of new observations. +Although you have seen colored prediction map visualizations like this a few times now, +we have not included the code to generate them, as it is a little bit complicated. +For the interested reader who wants a learning challenge, we now include it below. +The basic idea is to create a grid of synthetic new observations using the `meshgrid` function from `numpy`, +predict the label of each, and visualize the predictions with a colored scatter having a very high transparency +(low `opacity` value) and large point radius. See if you can figure out what each line is doing! + +```{note} +Understanding this code is not required for the remainder of the +textbook. It is included for those readers who would like to use similar +visualizations in their own data analyses. +``` + +```{code-cell} ipython3 +:tags: [remove-output] +import numpy as np + +# create the grid of area/smoothness vals, and arrange in a data frame +are_grid = np.linspace( + unscaled_cancer["Area"].min() * 0.95, unscaled_cancer["Area"].max() * 1.05, 50 +) +smo_grid = np.linspace( + unscaled_cancer["Smoothness"].min() * 0.95, unscaled_cancer["Smoothness"].max() * 1.05, 50 +) +asgrid = np.array(np.meshgrid(are_grid, smo_grid)).reshape(2, -1).T +asgrid = pd.DataFrame(asgrid, columns=["Area", "Smoothness"]) + +# use the fit workflow to make predictions at the grid points +knnPredGrid = knn_pipeline.predict(asgrid) + +# bind the predictions as a new column with the grid points +prediction_table = asgrid.copy() +prediction_table["Class"] = knnPredGrid + +# plot: +# 1. the colored scatter of the original data +unscaled_plot = alt.Chart(unscaled_cancer).mark_point( + opacity=0.6, + filled=True, + size=40 +).encode( + x=alt.X("Area") + .scale( + nice=False, + domain=( + unscaled_cancer["Area"].min() * 0.95, + unscaled_cancer["Area"].max() * 1.05 + ) + ), + y=alt.Y("Smoothness") + .scale( + nice=False, + domain=( + unscaled_cancer["Smoothness"].min() * 0.95, + unscaled_cancer["Smoothness"].max() * 1.05 + ) + ), + color=alt.Color("Class").title("Diagnosis") +) + +# 2. the faded colored scatter for the grid points +prediction_plot = alt.Chart(prediction_table).mark_point( + opacity=0.05, + filled=True, + size=300 +).encode( + x="Area", + y="Smoothness", + color=alt.Color("Class").title("Diagnosis") +) +unscaled_plot + prediction_plot +``` + +```{code-cell} ipython3 +:tags: [remove-input] +glue("fig:05-workflow-plot", (unscaled_plot + prediction_plot)) +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:05-workflow-plot +:figclass: caption-hack + +Scatter plot of smoothness versus area where background color indicates the decision of the classifier. +``` + ++++ + +## Exercises + +Practice exercises for the material covered in this chapter +can be found in the accompanying +[worksheets repository](https://worksheets.python.datasciencebook.ca) +in the "Classification I: training and predicting" row. +You can launch an interactive version of the worksheet in your browser by clicking the "launch binder" button. +You can also preview a non-interactive version of the worksheet by clicking "view worksheet." +If you instead decide to download the worksheet and run it on your own machine, +make sure to follow the instructions for computer setup +found in {numref}`Chapter %s `. This will ensure that the automated feedback +and guidance that the worksheets provide will function as intended. + ++++ +## References + +```{bibliography} +:filter: docname in docnames +``` diff --git a/pull313/_sources/classification2.md b/pull313/_sources/classification2.md new file mode 100644 index 00000000..bc250318 --- /dev/null +++ b/pull313/_sources/classification2.md @@ -0,0 +1,2050 @@ +--- +jupytext: + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.5 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(classification2)= +# Classification II: evaluation & tuning + +```{code-cell} ipython3 +:tags: [remove-cell] + +from chapter_preamble import * +``` + +## Overview +This chapter continues the introduction to predictive modeling through +classification. While the previous chapter covered training and data +preprocessing, this chapter focuses on how to evaluate the performance of +a classifier, as well as how to improve the classifier (where possible) +to maximize its accuracy. + +## Chapter learning objectives +By the end of the chapter, readers will be able to do the following: + +- Describe what training, validation, and test data sets are and how they are used in classification. +- Split data into training, validation, and test data sets. +- Describe what a random seed is and its importance in reproducible data analysis. +- Set the random seed in Python using the `numpy.random.seed` function. +- Describe and interpret accuracy, precision, recall, and confusion matrices. +- Evaluate classification accuracy in Python using a validation data set. +- Produce a confusion matrix in Python. +- Execute cross-validation in Python to choose the number of neighbors in a $K$-nearest neighbors classifier. +- Describe the advantages and disadvantages of the $K$-nearest neighbors classification algorithm. + ++++ + +## Evaluating performance + +```{index} breast cancer +``` + +Sometimes our classifier might make the wrong prediction. A classifier does not +need to be right 100\% of the time to be useful, though we don't want the +classifier to make too many wrong predictions. How do we measure how "good" our +classifier is? Let's revisit the +[breast cancer images data](https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+%28Diagnostic%29) {cite:p}`streetbreastcancer` +and think about how our classifier will be used in practice. A biopsy will be +performed on a *new* patient's tumor, the resulting image will be analyzed, +and the classifier will be asked to decide whether the tumor is benign or +malignant. The key word here is *new*: our classifier is "good" if it provides +accurate predictions on data *not seen during training*, as this implies that +it has actually learned about the relationship between the predictor variables and response variable, +as opposed to simply memorizing the labels of individual training data examples. +But then, how can we evaluate our classifier without visiting the hospital to collect more +tumor images? + + +```{index} training set, test set +``` + +The trick is to split the data into a **training set** and **test set** ({numref}`fig:06-training-test`) +and use only the **training set** when building the classifier. +Then, to evaluate the performance of the classifier, we first set aside the labels from the **test set**, +and then use the classifier to predict the labels in the **test set**. If our predictions match the actual +labels for the observations in the **test set**, then we have some +confidence that our classifier might also accurately predict the class +labels for new observations without known class labels. + +```{index} golden rule of machine learning +``` + +```{note} +If there were a golden rule of machine learning, it might be this: +*you cannot use the test data to build the model!* If you do, the model gets to +"see" the test data in advance, making it look more accurate than it really +is. Imagine how bad it would be to overestimate your classifier's accuracy +when predicting whether a patient's tumor is malignant or benign! +``` + ++++ + +```{figure} img/classification2/training_test.jpeg +:name: fig:06-training-test + +Splitting the data into training and testing sets. +``` + ++++ + +```{index} see: prediction accuracy; accuracy +``` + +```{index} accuracy +``` + +How exactly can we assess how well our predictions match the actual labels for +the observations in the test set? One way we can do this is to calculate the +prediction **accuracy**. This is the fraction of examples for which the +classifier made the correct prediction. To calculate this, we divide the number +of correct predictions by the number of predictions made. +The process for assessing if our predictions match the actual labels in the +test set is illustrated in {numref}`fig:06-ML-paradigm-test`. + +$$\mathrm{accuracy} = \frac{\mathrm{number \; of \; correct \; predictions}}{\mathrm{total \; number \; of \; predictions}}$$ + ++++ + +```{figure} img/classification2/ML-paradigm-test.png +:name: fig:06-ML-paradigm-test + +Process for splitting the data and finding the prediction accuracy. +``` + +Accuracy is a convenient, general-purpose way to summarize the performance of a classifier with +a single number. But prediction accuracy by itself does not tell the whole +story. In particular, accuracy alone only tells us how often the classifier +makes mistakes in general, but does not tell us anything about the *kinds* of +mistakes the classifier makes. A more comprehensive view of performance can be +obtained by additionally examining the **confusion matrix**. The confusion +matrix shows how many test set labels of each type are predicted correctly and +incorrectly, which gives us more detail about the kinds of mistakes the +classifier tends to make. {numref}`confusion-matrix-table` shows an example +of what a confusion matrix might look like for the tumor image data with +a test set of 65 observations. + +```{list-table} An example confusion matrix for the tumor image data. +:header-rows: 1 +:name: confusion-matrix-table + +* - + - Predicted Malignant + - Predicted Benign +* - **Actually Malignant** + - 1 + - 3 +* - **Actually Benign** + - 4 + - 57 +``` + +In the example in {numref}`confusion-matrix-table`, we see that there was +1 malignant observation that was correctly classified as malignant (top left corner), +and 57 benign observations that were correctly classified as benign (bottom right corner). +However, we can also see that the classifier made some mistakes: +it classified 3 malignant observations as benign, and 4 benign observations as +malignant. The accuracy of this classifier is roughly +89%, given by the formula + +$$\mathrm{accuracy} = \frac{\mathrm{number \; of \; correct \; predictions}}{\mathrm{total \; number \; of \; predictions}} = \frac{1+57}{1+57+4+3} = 0.892$$ + +But we can also see that the classifier only identified 1 out of 4 total malignant +tumors; in other words, it misclassified 75% of the malignant cases present in the +data set! In this example, misclassifying a malignant tumor is a potentially +disastrous error, since it may lead to a patient who requires treatment not receiving it. +Since we are particularly interested in identifying malignant cases, this +classifier would likely be unacceptable even with an accuracy of 89%. + +Focusing more on one label than the other is +common in classification problems. In such cases, we typically refer to the label we are more +interested in identifying as the *positive* label, and the other as the +*negative* label. In the tumor example, we would refer to malignant +observations as *positive*, and benign observations as *negative*. We can then +use the following terms to talk about the four kinds of prediction that the +classifier can make, corresponding to the four entries in the confusion matrix: + +- **True Positive:** A malignant observation that was classified as malignant (top left in {numref}`confusion-matrix-table`). +- **False Positive:** A benign observation that was classified as malignant (bottom left in {numref}`confusion-matrix-table`). +- **True Negative:** A benign observation that was classified as benign (bottom right in {numref}`confusion-matrix-table`). +- **False Negative:** A malignant observation that was classified as benign (top right in {numref}`confusion-matrix-table`). + +A perfect classifier would have zero false negatives and false positives (and +therefore, 100% accuracy). However, classifiers in practice will almost always +make some errors. So you should think about which kinds of error are most +important in your application, and use the confusion matrix to quantify and +report them. Two commonly used metrics that we can compute using the confusion +matrix are the **precision** and **recall** of the classifier. These are often +reported together with accuracy. *Precision* quantifies how many of the +positive predictions the classifier made were actually positive. Intuitively, +we would like a classifier to have a *high* precision: for a classifier with +high precision, if the classifier reports that a new observation is positive, +we can trust that the new observation is indeed positive. We can compute the +precision of a classifier using the entries in the confusion matrix, with the +formula + +$$\mathrm{precision} = \frac{\mathrm{number \; of \; correct \; positive \; predictions}}{\mathrm{total \; number \; of \; positive \; predictions}}.$$ + +*Recall* quantifies how many of the positive observations in the test set were +identified as positive. Intuitively, we would like a classifier to have a +*high* recall: for a classifier with high recall, if there is a positive +observation in the test data, we can trust that the classifier will find it. +We can also compute the recall of the classifier using the entries in the +confusion matrix, with the formula + +$$\mathrm{recall} = \frac{\mathrm{number \; of \; correct \; positive \; predictions}}{\mathrm{total \; number \; of \; positive \; test \; set \; observations}}.$$ + +In the example presented in {numref}`confusion-matrix-table`, we have that the precision and recall are + +$$\mathrm{precision} = \frac{1}{1+4} = 0.20, \quad \mathrm{recall} = \frac{1}{1+3} = 0.25.$$ + +So even with an accuracy of 89%, the precision and recall of the classifier +were both relatively low. For this data analysis context, recall is +particularly important: if someone has a malignant tumor, we certainly want to +identify it. A recall of just 25% would likely be unacceptable! + +```{note} +It is difficult to achieve both high precision and high recall at +the same time; models with high precision tend to have low recall and vice +versa. As an example, we can easily make a classifier that has *perfect +recall*: just *always* guess positive! This classifier will of course find +every positive observation in the test set, but it will make lots of false +positive predictions along the way and have low precision. Similarly, we can +easily make a classifier that has *perfect precision*: *never* guess +positive! This classifier will never incorrectly identify an obsevation as +positive, but it will make a lot of false negative predictions along the way. +In fact, this classifier will have 0% recall! Of course, most real +classifiers fall somewhere in between these two extremes. But these examples +serve to show that in settings where one of the classes is of interest (i.e., +there is a *positive* label), there is a trade-off between precision and recall that one has to +make when designing a classifier. +``` + ++++ + +(randomseeds)= +## Randomness and seeds + +```{index} random +``` + +Beginning in this chapter, our data analyses will often involve the use +of *randomness*. We use randomness any time we need to make a decision in our +analysis that needs to be fair, unbiased, and not influenced by human input. +For example, in this chapter, we need to split +a data set into a training set and test set to evaluate our classifier. We +certainly do not want to choose how to split +the data ourselves by hand, as we want to avoid accidentally influencing the result +of the evaluation. So instead, we let Python *randomly* split the data. +In future chapters we will use randomness +in many other ways, e.g., to help us select a small subset of data from a larger data set, +to pick groupings of data, and more. + +```{index} reproducible, seed +``` + +```{index} see: random seed; seed +``` + +```{index} seed; numpy.random.seed +``` + +However, the use of randomness runs counter to one of the main +tenets of good data analysis practice: *reproducibility*. Recall that a reproducible +analysis produces the same result each time it is run; if we include randomness +in the analysis, would we not get a different result each time? +The trick is that in Python—and other programming languages—randomness +is not actually random! Instead, Python uses a *random number generator* that +produces a sequence of numbers that +are completely determined by a + *seed value*. Once you set the seed value, everything after that point may *look* random, +but is actually totally reproducible. As long as you pick the same seed +value, you get the same result! + +```{index} sample; numpy.random.choice +``` + +Let's use an example to investigate how randomness works in Python. Say we +have a series object containing the integers from 0 to 9. We want +to randomly pick 10 numbers from that list, but we want it to be reproducible. +Before randomly picking the 10 numbers, +we call the `seed` function from the `numpy` package, and pass it any integer as the argument. +Below we use the seed number `1`. At +that point, Python will keep track of the randomness that occurs throughout the code. +For example, we can call the `sample` method +on the series of numbers, passing the argument `n = 10` to indicate that we want 10 samples. + +```{code-cell} ipython3 +import numpy as np +import pandas as pd + +np.random.seed(1) + +nums_0_to_9 = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + +random_numbers1 = nums_0_to_9.sample(n = 10).to_numpy() +random_numbers1 +``` +You can see that `random_numbers1` is a list of 10 numbers +from 0 to 9 that, from all appearances, looks random. If +we run the `sample` method again, +we will get a fresh batch of 10 numbers that also look random. + +```{code-cell} ipython3 +random_numbers2 = nums_0_to_9.sample(n = 10).to_numpy() +random_numbers2 +``` + +If we want to force Python to produce the same sequences of random numbers, +we can simply call the `np.random.seed` function with the seed value `1`---the same +as before---and then call the `sample` method again. + +```{code-cell} ipython3 +np.random.seed(1) +random_numbers1_again = nums_0_to_9.sample(n = 10).to_numpy() +random_numbers1_again +``` + +```{code-cell} ipython3 +random_numbers2_again = nums_0_to_9.sample(n = 10).to_numpy() +random_numbers2_again +``` + +Notice that after calling `np.random.seed`, we get the same +two sequences of numbers in the same order. `random_numbers1` and `random_numbers1_again` +produce the same sequence of numbers, and the same can be said about `random_numbers2` and +`random_numbers2_again`. And if we choose a different value for the seed---say, 4235---we +obtain a different sequence of random numbers. + +```{code-cell} ipython3 +np.random.seed(4235) +random_numbers = nums_0_to_9.sample(n = 10).to_numpy() +random_numbers +``` + +```{code-cell} ipython3 +random_numbers = nums_0_to_9.sample(n = 10).to_numpy() +random_numbers +``` + +In other words, even though the sequences of numbers that Python is generating *look* +random, they are totally determined when we set a seed value! + +So what does this mean for data analysis? Well, `sample` is certainly not the +only data frame method that uses randomness in Python. Many of the functions +that we use in `scikit-learn`, `pandas`, and beyond use randomness—many +of them without even telling you about it. Also note that when Python starts +up, it creates its own seed to use. So if you do not explicitly +call the `np.random.seed` function, your results +will likely not be reproducible. Finally, be careful to set the seed *only once* at +the beginning of a data analysis. Each time you set the seed, you are inserting +your own human input, thereby influencing the analysis. For example, if you use +the `sample` many times throughout your analysis but set the seed each time, the +randomness that Python uses will not look as random as it should. + +In summary: if you want your analysis to be reproducible, i.e., produce *the same result* +each time you run it, make sure to use `np.random.seed` exactly once +at the beginning of the analysis. Different argument values +in `np.random.seed` will lead to different patterns of randomness, but as long as you pick the same +value your analysis results will be the same. In the remainder of the textbook, +we will set the seed once at the beginning of each chapter. + +````{note} +When you use `np.random.seed`, you are really setting the seed for the `numpy` +package's *default random number generator*. Using the global default random +number generator is easier than other methods, but has some potential drawbacks. For example, +other code that you may not notice (e.g., code buried inside some +other package) could potentially *also* call `np.random.seed`, thus modifying +your analysis in an undesirable way. Furthermore, not *all* functions use +`numpy`'s random number generator; some may use another one entirely. +In that case, setting `np.random.seed` may not actually make your whole analysis +reproducible. + +In this book, we will generally only use packages that play nicely with `numpy`'s +default random number generator, so we will stick with `np.random.seed`. +You can achieve more careful control over randomness in your analysis +by creating a `numpy` [`RandomState` object](https://numpy.org/doc/1.16/reference/generated/numpy.random.RandomState.html) +once at the beginning of your analysis, and passing it to +the `random_state` argument that is available in many `pandas` and `scikit-learn` +functions. Those functions will then use your `RandomState` to generate random numbers instead of +`numpy`'s default generator. For example, we can reproduce our earlier example by using a `RandomState` +object with the `seed` value set to 1; we get the same lists of numbers once again. +```{code} +rnd = np.random.RandomState(seed = 1) +random_numbers1_third = nums_0_to_9.sample(n = 10, random_state = rnd).to_numpy() +random_numbers1_third +``` +```{code} +array([2, 9, 6, 4, 0, 3, 1, 7, 8, 5]) +``` +```{code} +random_numbers2_third = nums_0_to_9.sample(n = 10, random_state = rnd).to_numpy() +random_numbers2_third +``` +```{code} +array([9, 5, 3, 0, 8, 4, 2, 1, 6, 7]) +``` + +```` + +## Evaluating performance with `scikit-learn` + +```{index} scikit-learn, visualization; scatter +``` + +Back to evaluating classifiers now! +In Python, we can use the `scikit-learn` package not only to perform $K$-nearest neighbors +classification, but also to assess how well our classification worked. +Let's work through an example of how to use tools from `scikit-learn` to evaluate a classifier + using the breast cancer data set from the previous chapter. +We begin the analysis by loading the packages we require, +reading in the breast cancer data, +and then making a quick scatter plot visualization of +tumor cell concavity versus smoothness colored by diagnosis in {numref}`fig:06-precode`. +You will also notice that we set the random seed using the `np.random.seed` function, +as described in {numref}`randomseeds`. + +```{code-cell} ipython3 +# load packages +import altair as alt +import pandas as pd +from sklearn import set_config + +# Output dataframes instead of arrays +set_config(transform_output="pandas") + +# set the seed +np.random.seed(1) + +# load data +cancer = pd.read_csv("data/wdbc_unscaled.csv") +# re-label Class "M" as "Malignant", and Class "B" as "Benign" +cancer["Class"] = cancer["Class"].replace({ + "M" : "Malignant", + "B" : "Benign" +}) + +# create scatter plot of tumor cell concavity versus smoothness, +# labeling the points be diagnosis class + +perim_concav = alt.Chart(cancer).mark_circle().encode( + x=alt.X("Smoothness").scale(zero=False), + y="Concavity", + color=alt.Color("Class").title("Diagnosis") +) +perim_concav +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:06-precode + +Scatter plot of tumor cell concavity versus smoothness colored by diagnosis label. +``` + ++++ + +### Create the train / test split + +Once we have decided on a predictive question to answer and done some +preliminary exploration, the very next thing to do is to split the data into +the training and test sets. Typically, the training set is between 50% and 95% of +the data, while the test set is the remaining 5% to 50%; the intuition is that +you want to trade off between training an accurate model (by using a larger +training data set) and getting an accurate evaluation of its performance (by +using a larger test data set). Here, we will use 75% of the data for training, +and 25% for testing. + ++++ + +```{index} scikit-learn; train_test_split, shuffling, stratification +``` + +The `train_test_split` function from `scikit-learn` handles the procedure of splitting +the data for us. We can specify two very important parameters when using `train_test_split` to ensure +that the accuracy estimates from the test data are reasonable. First, +setting `shuffle=True` (which is the default) means the data will be shuffled before splitting, +which ensures that any ordering present +in the data does not influence the data that ends up in the training and testing sets. +Second, by specifying the `stratify` parameter to be the response variable in the training set, +it **stratifies** the data by the class label, to ensure that roughly +the same proportion of each class ends up in both the training and testing sets. For example, +in our data set, roughly 63% of the +observations are from the benign class (`Benign`), and 37% are from the malignant class (`Malignant`), +so specifying `stratify` as the class column ensures that roughly 63% of the training data are benign, +37% of the training data are malignant, +and the same proportions exist in the testing data. + +Let's use the `train_test_split` function to create the training and testing sets. +We first need to import the function from the `sklearn` package. Then +we will specify that `train_size=0.75` so that 75% of our original data set ends up +in the training set. We will also set the `stratify` argument to the categorical label variable +(here, `cancer["Class"]`) to ensure that the training and testing subsets contain the +right proportions of each category of observation. + +```{code-cell} ipython3 +:tags: [remove-cell] +# seed hacking to get a split that makes 10-fold have a lower std error than 5-fold +np.random.seed(5) +``` + +```{code-cell} ipython3 +from sklearn.model_selection import train_test_split + +cancer_train, cancer_test = train_test_split( + cancer, train_size=0.75, stratify=cancer["Class"] +) +cancer_train.info() +``` + +```{code-cell} ipython3 +cancer_test.info() +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("cancer_train_nrow", "{:d}".format(len(cancer_train))) +glue("cancer_test_nrow", "{:d}".format(len(cancer_test))) +``` + +```{index} info +``` + +We can see from the `info` method above that the training set contains {glue:text}`cancer_train_nrow` observations, +while the test set contains {glue:text}`cancer_test_nrow` observations. This corresponds to +a train / test split of 75% / 25%, as desired. Recall from {numref}`Chapter %s ` +that we use the `info` method to preview the number of rows, the variable names, their data types, and +missing entries of a data frame. + +```{index} groupby, count +``` + +We can use the `value_counts` method with the `normalize` argument set to `True` +to find the percentage of malignant and benign classes +in `cancer_train`. We see about {glue:text}`cancer_train_b_prop`% of the training +data are benign and {glue:text}`cancer_train_m_prop`% +are malignant, indicating that our class proportions were roughly preserved when we split the data. + +```{code-cell} ipython3 +cancer_train["Class"].value_counts(normalize=True) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("cancer_train_b_prop", "{:0.0f}".format(cancer_train["Class"].value_counts(normalize = True)["Benign"]*100)) +glue("cancer_train_m_prop", "{:0.0f}".format(cancer_train["Class"].value_counts(normalize = True)["Malignant"]*100)) +``` + +### Preprocess the data + +As we mentioned in the last chapter, $K$-nearest neighbors is sensitive to the scale of the predictors, +so we should perform some preprocessing to standardize them. An +additional consideration we need to take when doing this is that we should +create the standardization preprocessor using **only the training data**. This ensures that +our test data does not influence any aspect of our model training. Once we have +created the standardization preprocessor, we can then apply it separately to both the +training and test data sets. + ++++ + +```{index} pipeline, pipeline; make_column_transformer, pipeline; StandardScaler +``` + +Fortunately, `scikit-learn` helps us handle this properly as long as we wrap our +analysis steps in a `Pipeline`, as in {numref}`Chapter %s `. +So below we construct and prepare +the preprocessor using `make_column_transformer` just as before. + +```{code-cell} ipython3 +from sklearn.preprocessing import StandardScaler +from sklearn.compose import make_column_transformer + +cancer_preprocessor = make_column_transformer( + (StandardScaler(), ["Smoothness", "Concavity"]), +) +``` + +### Train the classifier + +Now that we have split our original data set into training and test sets, we +can create our $K$-nearest neighbors classifier with only the training set using +the technique we learned in the previous chapter. For now, we will just choose +the number $K$ of neighbors to be 3, and use only the concavity and smoothness predictors by +selecting them from the `cancer_train` data frame. +We will first import the `KNeighborsClassifier` model and `make_pipeline` from `sklearn`. +Then as before we will create a model object, combine +the model object and preprocessor into a `Pipeline` using the `make_pipeline` function, and then finally +use the `fit` method to build the classifier. + +```{code-cell} ipython3 +from sklearn.neighbors import KNeighborsClassifier +from sklearn.pipeline import make_pipeline + +knn = KNeighborsClassifier(n_neighbors=3) + +X = cancer_train[["Smoothness", "Concavity"]] +y = cancer_train["Class"] + +knn_pipeline = make_pipeline(cancer_preprocessor, knn) +knn_pipeline.fit(X, y) + +knn_pipeline +``` + +### Predict the labels in the test set + +```{index} pandas.concat +``` + +Now that we have a $K$-nearest neighbors classifier object, we can use it to +predict the class labels for our test set and +augment the original test data with a column of predictions. +The `Class` variable contains the actual +diagnoses, while the `predicted` contains the predicted diagnoses from the +classifier. Note that below we print out just the `ID`, `Class`, and `predicted` +variables in the output data frame. + +```{code-cell} ipython3 +cancer_test["predicted"] = knn_pipeline.predict(cancer_test[["Smoothness", "Concavity"]]) +cancer_test[["ID", "Class", "predicted"]] +``` + +### Evaluate performance + +```{index} scikit-learn; score +``` + +Finally, we can assess our classifier's performance. First, we will examine accuracy. +We could compute the accuracy manually +by using our earlier formula: the number of correct predictions divided by the total +number of predictions. First we filter the rows to find the number of correct predictions, +and then divide the number of rows with correct predictions by the total number of rows +using the `shape` attribute. +```{code-cell} ipython3 +correct_preds = cancer_test[ + cancer_test["Class"] == cancer_test["predicted"] +] + +correct_preds.shape[0] / cancer_test.shape[0] +``` + +The `scitkit-learn` package also provides a more convenient way to do this using +the `score` method. To use the `score` method, we need to specify two arguments: +predictors and the actual labels. We pass the same test data +for the predictors that we originally passed into `predict` when making predictions, +and we provide the actual labels via the `cancer_test["Class"]` series. + +```{code-cell} ipython3 +cancer_acc_1 = knn_pipeline.score( + cancer_test[["Smoothness", "Concavity"]], + cancer_test["Class"] +) +cancer_acc_1 +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("cancer_acc_1", "{:0.0f}".format(100*cancer_acc_1)) +``` + ++++ + +The output shows that the estimated accuracy of the classifier on the test data +was {glue:text}`cancer_acc_1`%. +We can also look at the *confusion matrix* for the classifier +using the `crosstab` function from `pandas`. The `crosstab` function takes two +arguments: the actual labels first, then the predicted labels second. + +```{code-cell} ipython3 +pd.crosstab( + cancer_test["Class"], + cancer_test["predicted"] +) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] +_ctab = pd.crosstab(cancer_test["Class"], + cancer_test["predicted"] + ) + +c11 = _ctab["Malignant"]["Malignant"] +c00 = _ctab["Benign"]["Benign"] +c10 = _ctab["Benign"]["Malignant"] # classify benign, true malignant +c01 = _ctab["Malignant"]["Benign"] # classify malignant, true benign + +glue("confu11", "{:d}".format(c11)) +glue("confu00", "{:d}".format(c00)) +glue("confu10", "{:d}".format(c10)) +glue("confu01", "{:d}".format(c01)) +glue("confu11_00", "{:d}".format(c11 + c00)) +glue("confu10_11", "{:d}".format(c10 + c11)) +glue("confu_fal_neg", "{:0.0f}".format(100 * c10 / (c10 + c11))) +glue("confu_accuracy", "{:.2f}".format(100*(c00+c11)/(c00+c11+c01+c10))) +glue("confu_precision", "{:.2f}".format(100*c11/(c11+c01))) +glue("confu_recall", "{:.2f}".format(100*c11/(c11+c10))) +glue("confu_precision_0", "{:0.0f}".format(100*c11/(c11+c01))) +glue("confu_recall_0", "{:0.0f}".format(100*c11/(c11+c10))) +``` + +The confusion matrix shows {glue:text}`confu11` observations were correctly predicted +as malignant, and {glue:text}`confu00` were correctly predicted as benign. +It also shows that the classifier made some mistakes; in particular, +it classified {glue:text}`confu10` observations as benign when they were actually malignant, +and {glue:text}`confu01` observations as malignant when they were actually benign. +Using our formulas from earlier, we see that the accuracy agrees with what Python reported, +and can also compute the precision and recall of the classifier: + +```{code-cell} ipython3 +:tags: [remove-cell] + +from IPython.display import display, Math +# accuracy string +acc_eq_str = r"\mathrm{accuracy} = \frac{\mathrm{number \; of \; correct \; predictions}}{\mathrm{total \; number \; of \; predictions}} = \frac{" +acc_eq_str += str(c00) + "+" + str(c11) + "}{" + str(c00) + "+" + str(c11) + "+" + str(c01) + "+" + str(c10) + "} = " + str( np.round(100*(c00+c11)/(c00+c11+c01+c10),2)) +acc_eq_math = Math(acc_eq_str) +glue("acc_eq_math_glued", acc_eq_math) + +prec_eq_str = r"\mathrm{precision} = \frac{\mathrm{number \; of \; correct \; positive \; predictions}}{\mathrm{total \; number \; of \; positive \; predictions}} = \frac{" +prec_eq_str += str(c00) + "}{" + str(c00) + "+" + str(c01) + "} = " + str( np.round(100*c11/(c11+c01), 2)) +prec_eq_math = Math(prec_eq_str) +glue("prec_eq_math_glued", prec_eq_math) + +rec_eq_str = r"\mathrm{recall} = \frac{\mathrm{number \; of \; correct \; positive \; predictions}}{\mathrm{total \; number \; of \; positive \; test \; set \; observations}} = \frac{" +rec_eq_str += str(c00) + "}{" + str(c00) + "+" + str(c10) + "} = " + str( np.round(100*c11/(c11+c10), 2)) +rec_eq_math = Math(rec_eq_str) +glue("rec_eq_math_glued", rec_eq_math) +``` + +```{glue:math} acc_eq_math_glued +``` + +```{glue:math} prec_eq_math_glued +``` + +```{glue:math} rec_eq_math_glued +``` + ++++ + +### Critically analyze performance + +We now know that the classifier was {glue:text}`cancer_acc_1`% accurate +on the test data set, and had a precision of {glue:text}`confu_precision_0`% and +a recall of {glue:text}`confu_recall_0`%. +That sounds pretty good! Wait, *is* it good? +Or do we need something higher? + +```{index} accuracy; assessment +``` + +In general, a *good* value for accuracy (as well as precision and recall, if applicable) +depends on the application; you must critically analyze your accuracy in the context of the problem +you are solving. For example, if we were building a classifier for a kind of tumor that is benign 99% +of the time, a classifier with 99% accuracy is not terribly impressive (just always guess benign!). +And beyond just accuracy, we need to consider the precision and recall: as mentioned +earlier, the *kind* of mistake the classifier makes is +important in many applications as well. In the previous example with 99% benign observations, it might be very bad for the +classifier to predict "benign" when the actual class is "malignant" (a false negative), as this +might result in a patient not receiving appropriate medical attention. In other +words, in this context, we need the classifier to have a *high recall*. On the +other hand, it might be less bad for the classifier to guess "malignant" when +the actual class is "benign" (a false positive), as the patient will then likely see a doctor who +can provide an expert diagnosis. In other words, we are fine with sacrificing +some precision in the interest of achieving high recall. This is why it is +important not only to look at accuracy, but also the confusion matrix. + + +```{index} classification; majority +``` + +However, there is always an easy baseline that you can compare to for any +classification problem: the *majority classifier*. The majority classifier +*always* guesses the majority class label from the training data, regardless of +the predictor variables' values. It helps to give you a sense of +scale when considering accuracies. If the majority classifier obtains a 90% +accuracy on a problem, then you might hope for your $K$-nearest neighbors +classifier to do better than that. If your classifier provides a significant +improvement upon the majority classifier, this means that at least your method +is extracting some useful information from your predictor variables. Be +careful though: improving on the majority classifier does not *necessarily* +mean the classifier is working well enough for your application. + +As an example, in the breast cancer data, recall the proportions of benign and malignant +observations in the training data are as follows: + +```{code-cell} ipython3 +cancer_train["Class"].value_counts(normalize=True) +``` + +Since the benign class represents the majority of the training data, +the majority classifier would *always* predict that a new observation +is benign. The estimated accuracy of the majority classifier is usually +fairly close to the majority class proportion in the training data. +In this case, we would suspect that the majority classifier will have +an accuracy of around {glue:text}`cancer_train_b_prop`%. +The $K$-nearest neighbors classifier we built does quite a bit better than this, +with an accuracy of {glue:text}`cancer_acc_1`%. +This means that from the perspective of accuracy, +the $K$-nearest neighbors classifier improved quite a bit on the basic +majority classifier. Hooray! But we still need to be cautious; in +this application, it is likely very important not to misdiagnose any malignant tumors to avoid missing +patients who actually need medical care. The confusion matrix above shows +that the classifier does, indeed, misdiagnose a significant number of +malignant tumors as benign ({glue:text}`confu10` out of {glue:text}`confu10_11` malignant tumors, or {glue:text}`confu_fal_neg`%!). +Therefore, even though the accuracy improved upon the majority classifier, +our critical analysis suggests that this classifier may not have appropriate performance +for the application. + ++++ + +## Tuning the classifier + +```{index} parameter +``` + +```{index} see: tuning parameter; parameter +``` + +The vast majority of predictive models in statistics and machine learning have +*parameters*. A *parameter* +is a number you have to pick in advance that determines +some aspect of how the model behaves. For example, in the $K$-nearest neighbors +classification algorithm, $K$ is a parameter that we have to pick +that determines how many neighbors participate in the class vote. +By picking different values of $K$, we create different classifiers +that make different predictions. + +So then, how do we pick the *best* value of $K$, i.e., *tune* the model? +And is it possible to make this selection in a principled way? In this book, +we will focus on maximizing the accuracy of the classifier. Ideally, +we want somehow to maximize the accuracy of our classifier on data *it +hasn't seen yet*. But we cannot use our test data set in the process of building +our model. So we will play the same trick we did before when evaluating +our classifier: we'll split our *training data itself* into two subsets, +use one to train the model, and then use the other to evaluate it. +In this section, we will cover the details of this procedure, as well as +how to use it to help you pick a good parameter value for your classifier. + +**And remember:** don't touch the test set during the tuning process. Tuning is a part of model training! + ++++ + +### Cross-validation + +```{index} validation set +``` + +The first step in choosing the parameter $K$ is to be able to evaluate the +classifier using only the training data. If this is possible, then we can compare +the classifier's performance for different values of $K$—and pick the best—using +only the training data. As suggested at the beginning of this section, we will +accomplish this by splitting the training data, training on one subset, and evaluating +on the other. The subset of training data used for evaluation is often called the **validation set**. + +There is, however, one key difference from the train/test split +that we performed earlier. In particular, we were forced to make only a *single split* +of the data. This is because at the end of the day, we have to produce a single classifier. +If we had multiple different splits of the data into training and testing data, +we would produce multiple different classifiers. +But while we are tuning the classifier, we are free to create multiple classifiers +based on multiple splits of the training data, evaluate them, and then choose a parameter +value based on __*all*__ of the different results. If we just split our overall training +data *once*, our best parameter choice will depend strongly on whatever data +was lucky enough to end up in the validation set. Perhaps using multiple +different train/validation splits, we'll get a better estimate of accuracy, +which will lead to a better choice of the number of neighbors $K$ for the +overall set of training data. + +Let's investigate this idea in Python! In particular, we will generate five different train/validation +splits of our overall training data, train five different $K$-nearest neighbors +models, and evaluate their accuracy. We will start with just a single +split. + +```{code-cell} ipython3 +# create the 25/75 split of the *training data* into sub-training and validation +cancer_subtrain, cancer_validation = train_test_split( + cancer_train, test_size=0.25 +) + +# fit the model on the sub-training data +knn = KNeighborsClassifier(n_neighbors=3) +X = cancer_subtrain[["Smoothness", "Concavity"]] +y = cancer_subtrain["Class"] +knn_pipeline = make_pipeline(cancer_preprocessor, knn) +knn_pipeline.fit(X, y) + +# compute the score on validation data +acc = knn_pipeline.score( + cancer_validation[["Smoothness", "Concavity"]], + cancer_validation["Class"] +) +acc +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +accuracies = [acc] +for i in range(1, 5): + # create the 25/75 split of the training data into training and validation + cancer_subtrain, cancer_validation = train_test_split( + cancer_train, test_size=0.25 + ) + + # fit the model on the sub-training data + knn = KNeighborsClassifier(n_neighbors=3) + X = cancer_subtrain[["Smoothness", "Concavity"]] + y = cancer_subtrain["Class"] + knn_pipeline = make_pipeline(cancer_preprocessor, knn).fit(X, y) + + # compute the score on validation data + accuracies.append(knn_pipeline.score( + cancer_validation[["Smoothness", "Concavity"]], + cancer_validation["Class"] + )) +avg_accuracy = np.round(np.array(accuracies).mean()*100,1) +accuracies = list(np.round(np.array(accuracies)*100, 1)) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] +glue("acc_seed1", "{:0.1f}".format(100 * acc)) +glue("avg_5_splits", "{:0.1f}".format(avg_accuracy)) +glue("accuracies", "[" + "%, ".join(["{:0.1f}".format(acc) for acc in accuracies]) + "%]") +``` +```{code-cell} ipython3 +:tags: [remove-cell] + +``` + +The accuracy estimate using this split is {glue:text}`acc_seed1`%. +Now we repeat the above code 4 more times, which generates 4 more splits. +Therefore we get five different shuffles of the data, and therefore five different values for +accuracy: {glue:text}`accuracies`. None of these values are +necessarily "more correct" than any other; they're +just five estimates of the true, underlying accuracy of our classifier built +using our overall training data. We can combine the estimates by taking their +average (here {glue:text}`avg_5_splits`%) to try to get a single assessment of our +classifier's accuracy; this has the effect of reducing the influence of any one +(un)lucky validation set on the estimate. + +```{index} cross-validation +``` + +In practice, we don't use random splits, but rather use a more structured +splitting procedure so that each observation in the data set is used in a +validation set only a single time. The name for this strategy is +**cross-validation**. In **cross-validation**, we split our **overall training +data** into $C$ evenly sized chunks. Then, iteratively use $1$ chunk as the +**validation set** and combine the remaining $C-1$ chunks +as the **training set**. +This procedure is shown in {numref}`fig:06-cv-image`. +Here, $C=5$ different chunks of the data set are used, +resulting in 5 different choices for the **validation set**; we call this +*5-fold* cross-validation. + ++++ + +```{figure} img/classification2/cv.png +:name: fig:06-cv-image + +5-fold cross-validation. +``` + + ++++ + +```{index} cross-validation; cross_validate, scikit-learn; cross_validate +``` + +To perform 5-fold cross-validation in Python with `scikit-learn`, we use another +function: `cross_validate`. This function requires that we specify +a modelling `Pipeline` as the `estimator` argument, +the number of folds as the `cv` argument, +and the training data predictors and labels as the `X` and `y` arguments. +Since the `cross_validate` function outputs a dictionary, we use `pd.DataFrame` to convert it to a `pandas` +dataframe for better visualization. +Note that the `cross_validate` function handles stratifying the classes in +each train and validate fold automatically. + +```{code-cell} ipython3 +from sklearn.model_selection import cross_validate + +knn = KNeighborsClassifier(n_neighbors=3) +cancer_pipe = make_pipeline(cancer_preprocessor, knn) +X = cancer_train[["Smoothness", "Concavity"]] +y = cancer_train["Class"] +cv_5_df = pd.DataFrame( + cross_validate( + estimator=cancer_pipe, + cv=5, + X=X, + y=y + ) +) + +cv_5_df +``` + +The validation scores we are interested in are contained in the `test_score` column. +We can then aggregate the *mean* and *standard error* +of the classifier's validation accuracy across the folds. +You should consider the mean (`mean`) to be the estimated accuracy, while the standard +error (`sem`) is a measure of how uncertain we are in that mean value. A detailed treatment of this +is beyond the scope of this chapter; but roughly, if your estimated mean is {glue:text}`cv_5_mean` and standard +error is {glue:text}`cv_5_std`, you can expect the *true* average accuracy of the +classifier to be somewhere roughly between {glue:text}`cv_5_lower`% and {glue:text}`cv_5_upper`% (although it may +fall outside this range). You may ignore the other columns in the metrics data frame. + +```{code-cell} ipython3 +cv_5_metrics = cv_5_df.agg(["mean", "sem"]) +cv_5_metrics +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("cv_5_mean", "{:.2f}".format(cv_5_metrics.loc["mean", "test_score"])) +glue("cv_5_std", "{:.2f}".format(cv_5_metrics.loc["sem", "test_score"])) +glue("cv_5_upper", + "{:0.0f}".format( + 100 + * ( + round(cv_5_metrics.loc["mean", "test_score"], 2) + + round(cv_5_metrics.loc["sem", "test_score"], 2) + ) + ) +) +glue("cv_5_lower", + "{:0.0f}".format( + 100 + * ( + round(cv_5_metrics.loc["mean", "test_score"], 2) + - round(cv_5_metrics.loc["sem", "test_score"], 2) + ) + ) +) +``` + +We can choose any number of folds, and typically the more we use the better our +accuracy estimate will be (lower standard error). However, we are limited +by computational power: the +more folds we choose, the more computation it takes, and hence the more time +it takes to run the analysis. So when you do cross-validation, you need to +consider the size of the data, the speed of the algorithm (e.g., $K$-nearest +neighbors), and the speed of your computer. In practice, this is a +trial-and-error process, but typically $C$ is chosen to be either 5 or 10. Here +we will try 10-fold cross-validation to see if we get a lower standard error. + +```{code-cell} ipython3 +cv_10 = pd.DataFrame( + cross_validate( + estimator=cancer_pipe, + cv=10, + X=X, + y=y + ) +) + +cv_10_df = pd.DataFrame(cv_10) +cv_10_metrics = cv_10_df.agg(["mean", "sem"]) +cv_10_metrics +``` + +In this case, using 10-fold instead of 5-fold cross validation did +reduce the standard error very slightly. In fact, due to the randomness in how the data are split, sometimes +you might even end up with a *higher* standard error when increasing the number of folds! +We can make the reduction in standard error more dramatic by increasing the number of folds +by a large amount. In the following code we show the result when $C = 50$; +picking such a large number of folds can take a long time to run in practice, +so we usually stick to 5 or 10. + +```{code-cell} ipython3 +cv_50_df = pd.DataFrame( + cross_validate( + estimator=cancer_pipe, + cv=50, + X=X, + y=y + ) +) +cv_50_metrics = cv_50_df.agg(["mean", "sem"]) +cv_50_metrics +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("cv_10_mean", "{:0.0f}".format(100 * cv_10_metrics.loc["mean", "test_score"])) +``` + +### Parameter value selection + +Using 5- and 10-fold cross-validation, we have estimated that the prediction +accuracy of our classifier is somewhere around {glue:text}`cv_10_mean`%. +Whether that is good or not +depends entirely on the downstream application of the data analysis. In the +present situation, we are trying to predict a tumor diagnosis, with expensive, +damaging chemo/radiation therapy or patient death as potential consequences of +misprediction. Hence, we might like to +do better than {glue:text}`cv_10_mean`% for this application. + +In order to improve our classifier, we have one choice of parameter: the number of +neighbors, $K$. Since cross-validation helps us evaluate the accuracy of our +classifier, we can use cross-validation to calculate an accuracy for each value +of $K$ in a reasonable range, and then pick the value of $K$ that gives us the +best accuracy. The `scikit-learn` package collection provides built-in +functionality, named `GridSearchCV`, to automatically handle the details for us. +Before we use `GridSearchCV`, we need to create a new pipeline +with a `KNeighborsClassifier` that has the number of neighbors left unspecified. + +```{code-cell} ipython3 +knn = KNeighborsClassifier() +cancer_tune_pipe = make_pipeline(cancer_preprocessor, knn) +``` + ++++ + +Next we specify the grid of parameter values that we want to try for +each tunable parameter. We do this in a Python dictionary: the key is +the identifier of the parameter to tune, and the value is a list of parameter values +to try when tuning. We can find the "identifier" of a parameter by using +the `get_params` method on the pipeline. +```{code-cell} ipython3 +cancer_tune_pipe.get_params() +``` +Wow, there's quite a bit of *stuff* there! If you sift through the muck +a little bit, you will see one parameter identifier that stands out: +`"kneighborsclassifier__n_neighbors"`. This identifier combines the name +of the K nearest neighbors classification step in our pipeline, `kneighborsclassifier`, +with the name of the parameter, `n_neighbors`. +We now construct the `parameter_grid` dictionary that will tell `GridSearchCV` +what parameter values to try. +Note that you can specify multiple tunable parameters +by creating a dictionary with multiple key-value pairs, but +here we just have to tune the number of neighbors. +```{code-cell} ipython3 +parameter_grid = { + "kneighborsclassifier__n_neighbors": range(1, 100, 5), +} +``` +The `range` function in Python that we used above allows us to specify a sequence of values. +The first argument is the starting number (here, `1`), +the second argument is *one greater than* the final number (here, `100`), +and the third argument is the number to values to skip between steps in the sequence (here, `5`). +So in this case we generate the sequence 1, 6, 11, 16, ..., 96. +If we instead specified `range(0, 100, 5)`, we would get the sequence 0, 5, 10, 15, ..., 90, 95. +The number 100 is not included because the third argument is *one greater than* the final possible +number in the sequence. There are two additional useful ways to employ `range`. +If we call `range` with just one argument, Python counts +up to that number starting at 0. So `range(4)` is the same as `range(0, 4, 1)` and generates the sequence 0, 1, 2, 3. +If we call `range` with two arguments, Python counts starting at the first number up to the second number. +So `range(1, 4)` is the same as `range(1, 4, 1)` and generates the sequence `1, 2, 3`. + +```{index} cross-validation; GridSearchCV, scikit-learn; GridSearchCV, scikit-learn; RandomizedSearchCV +``` + +Okay! We are finally ready to create the `GridSearchCV` object. +First we import it from the `sklearn` package. +Then we pass it the `cancer_tune_pipe` pipeline in the `estimator` argument, +the `parameter_grid` in the `param_grid` argument, +and specify `cv=10` folds. Note that this does not actually run +the tuning yet; just as before, we will have to use the `fit` method. + +```{code-cell} ipython3 +from sklearn.model_selection import GridSearchCV + +cancer_tune_grid = GridSearchCV( + estimator=cancer_tune_pipe, + param_grid=parameter_grid, + cv=10 +) +``` + +Now we use the `fit` method on the `GridSearchCV` object to begin the tuning process. +We pass the training data predictors and labels as the two arguments to `fit` as usual. +The `cv_results_` attribute of the output contains the resulting cross-validation +accuracy estimate for each choice of `n_neighbors`, but it isn't in an easily used +format. We will wrap it in a `pd.DataFrame` to make it easier to understand, +and print the `info` of the result. + +```{code-cell} ipython3 +cancer_tune_grid.fit( + cancer_train[["Smoothness", "Concavity"]], + cancer_train["Class"] +) +accuracies_grid = pd.DataFrame(cancer_tune_grid.cv_results_) +accuracies_grid.info() +``` + +There is a lot of information to look at here, but we are most interested +in three quantities: the number of neighbors (`param_kneighbors_classifier__n_neighbors`), +the cross-validation accuracy estimate (`mean_test_score`), +and the standard error of the accuracy estimate. Unfortunately `GridSearchCV` does +not directly output the standard error for each cross-validation accuracy; but +it *does* output the standard *deviation* (`std_test_score`). We can compute +the standard error from the standard deviation by dividing it by the square +root of the number of folds, i.e., + +$$\text{Standard Error} = \frac{\text{Standard Deviation}}{\sqrt{\text{Number of Folds}}}.$$ + +We will also rename the parameter name column to be a bit more readable, +and drop the now unused `std_test_score` column. + +```{code-cell} ipython3 +accuracies_grid["sem_test_score"] = accuracies_grid["std_test_score"] / 10**(1/2) +accuracies_grid = ( + accuracies_grid[[ + "param_kneighborsclassifier__n_neighbors", + "mean_test_score", + "sem_test_score" + ]] + .rename(columns={"param_kneighborsclassifier__n_neighbors": "n_neighbors"}) +) +accuracies_grid +``` + +We can decide which number of neighbors is best by plotting the accuracy versus $K$, +as shown in {numref}`fig:06-find-k`. +Here we are using the shortcut `point=True` to layer a point and line chart. + +```{code-cell} ipython3 +:tags: [remove-output] + +accuracy_vs_k = alt.Chart(accuracies_grid).mark_line(point=True).encode( + x=alt.X("n_neighbors").title("Neighbors"), + y=alt.Y("mean_test_score") + .scale(domain=(0.85, 0.90)) + .title("Accuracy estimate") +) + +accuracy_vs_k +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:06-find-k", accuracy_vs_k) +glue("best_k_unique", "{:d}".format(accuracies_grid["n_neighbors"][accuracies_grid["mean_test_score"].idxmax()])) +glue("best_acc", "{:.1f}".format(accuracies_grid["mean_test_score"].max()*100)) +``` + +:::{glue:figure} fig:06-find-k +:name: fig:06-find-k + +Plot of estimated accuracy versus the number of neighbors. +::: + +We can also obtain the number of neighbours with the highest accuracy programmatically by accessing +the `best_params_` attribute of the fit `GridSearchCV` object. Note that it is still useful to visualize +the results as we did above since this provides additional information on how the model performance varies. +```{code-cell} ipython3 +cancer_tune_grid.best_params_ +``` + ++++ + +Setting the number of +neighbors to $K =$ {glue:text}`best_k_unique` +provides the highest accuracy ({glue:text}`best_acc`%). But there is no exact or perfect answer here; +any selection from $K = 30$ to $80$ or so would be reasonably justified, as all +of these differ in classifier accuracy by a small amount. Remember: the +values you see on this plot are *estimates* of the true accuracy of our +classifier. Although the +$K =$ {glue:text}`best_k_unique` value is +higher than the others on this plot, +that doesn't mean the classifier is actually more accurate with this parameter +value! Generally, when selecting $K$ (and other parameters for other predictive +models), we are looking for a value where: + +- we get roughly optimal accuracy, so that our model will likely be accurate; +- changing the value to a nearby one (e.g., adding or subtracting a small number) doesn't decrease accuracy too much, so that our choice is reliable in the presence of uncertainty; +- the cost of training the model is not prohibitive (e.g., in our situation, if $K$ is too large, predicting becomes expensive!). + +We know that $K =$ {glue:text}`best_k_unique` +provides the highest estimated accuracy. Further, {numref}`fig:06-find-k` shows that the estimated accuracy +changes by only a small amount if we increase or decrease $K$ near $K =$ {glue:text}`best_k_unique`. +And finally, $K =$ {glue:text}`best_k_unique` does not create a prohibitively expensive +computational cost of training. Considering these three points, we would indeed select +$K =$ {glue:text}`best_k_unique` for the classifier. + ++++ + +### Under/Overfitting + +To build a bit more intuition, what happens if we keep increasing the number of +neighbors $K$? In fact, the cross-validation accuracy estimate actually starts to decrease! +Let's specify a much larger range of values of $K$ to try in the `param_grid` +argument of `GridSearchCV`. {numref}`fig:06-lots-of-ks` shows a plot of estimated accuracy as +we vary $K$ from 1 to almost the number of observations in the data set. + +```{code-cell} ipython3 +:tags: [remove-output] + +large_param_grid = { + "kneighborsclassifier__n_neighbors": range(1, 385, 10), +} + +large_cancer_tune_grid = GridSearchCV( + estimator=cancer_tune_pipe, + param_grid=large_param_grid, + cv=10 +) + +large_cancer_tune_grid.fit( + cancer_train[["Smoothness", "Concavity"]], + cancer_train["Class"] +) + +large_accuracies_grid = pd.DataFrame(large_cancer_tune_grid.cv_results_) + +large_accuracy_vs_k = alt.Chart(large_accuracies_grid).mark_line(point=True).encode( + x=alt.X("param_kneighborsclassifier__n_neighbors").title("Neighbors"), + y=alt.Y("mean_test_score") + .scale(domain=(0.60, 0.90)) + .title("Accuracy estimate") +) + +large_accuracy_vs_k +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:06-lots-of-ks", large_accuracy_vs_k) +``` + +:::{glue:figure} fig:06-lots-of-ks +:name: fig:06-lots-of-ks + +Plot of accuracy estimate versus number of neighbors for many K values. +::: + ++++ + +```{index} underfitting; classification +``` + +**Underfitting:** What is actually happening to our classifier that causes +this? As we increase the number of neighbors, more and more of the training +observations (and those that are farther and farther away from the point) get a +"say" in what the class of a new observation is. This causes a sort of +"averaging effect" to take place, making the boundary between where our +classifier would predict a tumor to be malignant versus benign to smooth out +and become *simpler.* If you take this to the extreme, setting $K$ to the total +training data set size, then the classifier will always predict the same label +regardless of what the new observation looks like. In general, if the model +*isn't influenced enough* by the training data, it is said to **underfit** the +data. + +```{index} overfitting; classification +``` + +**Overfitting:** In contrast, when we decrease the number of neighbors, each +individual data point has a stronger and stronger vote regarding nearby points. +Since the data themselves are noisy, this causes a more "jagged" boundary +corresponding to a *less simple* model. If you take this case to the extreme, +setting $K = 1$, then the classifier is essentially just matching each new +observation to its closest neighbor in the training data set. This is just as +problematic as the large $K$ case, because the classifier becomes unreliable on +new data: if we had a different training set, the predictions would be +completely different. In general, if the model *is influenced too much* by the +training data, it is said to **overfit** the data. + +```{code-cell} ipython3 +:tags: [remove-cell] +alt.data_transformers.disable_max_rows() + +cancer_plot = ( + alt.Chart( + cancer_train, + ) + .mark_point(opacity=0.6, filled=True, size=40) + .encode( + x=alt.X( + "Smoothness", + scale=alt.Scale( + domain=( + cancer_train["Smoothness"].min() * 0.95, + cancer_train["Smoothness"].max() * 1.05, + ) + ), + ), + y=alt.Y( + "Concavity", + scale=alt.Scale( + domain=( + cancer_train["Concavity"].min() -0.025, + cancer_train["Concavity"].max() * 1.05, + ) + ), + ), + color=alt.Color("Class", title="Diagnosis"), + ) +) + +X = cancer_train[["Smoothness", "Concavity"]] +y = cancer_train["Class"] + +# create a prediction pt grid +smo_grid = np.linspace( + cancer_train["Smoothness"].min() * 0.95, cancer_train["Smoothness"].max() * 1.05, 100 +) +con_grid = np.linspace( + cancer_train["Concavity"].min() - 0.025, cancer_train["Concavity"].max() * 1.05, 100 +) +scgrid = np.array(np.meshgrid(smo_grid, con_grid)).reshape(2, -1).T +scgrid = pd.DataFrame(scgrid, columns=["Smoothness", "Concavity"]) + +plot_list = [] +for k in [1, 7, 20, 300]: + cancer_pipe = make_pipeline(cancer_preprocessor, KNeighborsClassifier(n_neighbors=k)) + cancer_pipe.fit(X, y) + + knnPredGrid = cancer_pipe.predict(scgrid) + prediction_table = scgrid.copy() + prediction_table["Class"] = knnPredGrid + + # add a prediction layer + prediction_plot = ( + alt.Chart( + prediction_table, + title=f"K = {k}" + ) + .mark_point(opacity=0.2, filled=True, size=20) + .encode( + x=alt.X( + "Smoothness", + scale=alt.Scale( + domain=( + cancer_train["Smoothness"].min() * 0.95, + cancer_train["Smoothness"].max() * 1.05 + ), + nice=False + ) + ), + y=alt.Y( + "Concavity", + scale=alt.Scale( + domain=( + cancer_train["Concavity"].min() -0.025, + cancer_train["Concavity"].max() * 1.05 + ), + nice=False + ) + ), + color=alt.Color("Class", title="Diagnosis"), + ) + ) + plot_list.append(cancer_plot + prediction_plot) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue( + "fig:06-decision-grid-K", + ((plot_list[0] | plot_list[1]) + & (plot_list[2] | plot_list[3])).configure_legend( + orient="bottom", titleAnchor="middle" + ), +) +``` + +:::{glue:figure} fig:06-decision-grid-K +:name: fig:06-decision-grid-K + +Effect of K in overfitting and underfitting. +::: + ++++ + +Both overfitting and underfitting are problematic and will lead to a model that +does not generalize well to new data. When fitting a model, we need to strike a +balance between the two. You can see these two effects in +{numref}`fig:06-decision-grid-K`, which shows how the classifier changes as we +set the number of neighbors $K$ to 1, 7, 20, and 300. + ++++ + +## Summary + +Classification algorithms use one or more quantitative variables to predict the +value of another categorical variable. In particular, the $K$-nearest neighbors +algorithm does this by first finding the $K$ points in the training data +nearest to the new observation, and then returning the majority class vote from +those training observations. We can tune and evaluate a classifier by splitting +the data randomly into a training and test data set. The training set is used +to build the classifier, and we can tune the classifier (e.g., select the number +of neighbors in $K$-nearest neighbors) by maximizing estimated accuracy via +cross-validation. After we have tuned the model, we can use the test set to +estimate its accuracy. The overall process is summarized in +{numref}`fig:06-overview`. + ++++ + +```{figure} img/classification2/train-test-overview.jpeg +:name: fig:06-overview + +Overview of KNN classification. +``` + ++++ + +```{index} scikit-learn, pipeline, cross-validation, K-nearest neighbors; classification, classification +``` + +The overall workflow for performing $K$-nearest neighbors classification using `scikit-learn` is as follows: + +1. Use the `train_test_split` function to split the data into a training and test set. Set the `stratify` argument to the class label column of the dataframe. Put the test set aside for now. +2. Create a `Pipeline` that specifies the preprocessing steps and the classifier. +3. Define the parameter grid by passing the set of $K$ values that you would like to tune. +4. Use `GridSearchCV` to estimate the classifier accuracy for a range of $K$ values. Pass the pipeline and parameter grid defined in steps 2. and 3. as the `param_grid` argument and the `estimator` argument, respectively. +5. Execute the grid search by passing the training data to the `fit` method on the `GridSearchCV` instance created in step 4. +6. Pick a value of $K$ that yields a high cross-validation accuracy estimate that doesn't change much if you change $K$ to a nearby value. +7. Create a new model object for the best parameter value (i.e., $K$), and retrain the classifier by calling the `fit` method. +8. Evaluate the estimated accuracy of the classifier on the test set using the `score` method. + +In these last two chapters, we focused on the $K$-nearest neighbor algorithm, +but there are many other methods we could have used to predict a categorical label. +All algorithms have their strengths and weaknesses, and we summarize these for +the $K$-NN here. + +**Strengths:** $K$-nearest neighbors classification + +1. is a simple, intuitive algorithm, +2. requires few assumptions about what the data must look like, and +3. works for binary (two-class) and multi-class (more than 2 classes) classification problems. + +**Weaknesses:** $K$-nearest neighbors classification + +1. becomes very slow as the training data gets larger, +2. may not perform well with a large number of predictors, and +3. may not perform well when classes are imbalanced. + ++++ + +## Predictor variable selection + +```{note} +This section is not required reading for the remainder of the textbook. It is included for those readers +interested in learning how irrelevant variables can influence the performance of a classifier, and how to +pick a subset of useful variables to include as predictors. +``` + +```{index} irrelevant predictors +``` + +Another potentially important part of tuning your classifier is to choose which +variables from your data will be treated as predictor variables. Technically, you can choose +anything from using a single predictor variable to using every variable in your +data; the $K$-nearest neighbors algorithm accepts any number of +predictors. However, it is **not** the case that using more predictors always +yields better predictions! In fact, sometimes including irrelevant predictors can +actually negatively affect classifier performance. + ++++ {"toc-hr-collapsed": true} + +### The effect of irrelevant predictors + +Let's take a look at an example where $K$-nearest neighbors performs +worse when given more predictors to work with. In this example, we modified +the breast cancer data to have only the `Smoothness`, `Concavity`, and +`Perimeter` variables from the original data. Then, we added irrelevant +variables that we created ourselves using a random number generator. +The irrelevant variables each take a value of 0 or 1 with equal probability for each observation, regardless +of what the value `Class` variable takes. In other words, the irrelevant variables have +no meaningful relationship with the `Class` variable. + +```{code-cell} ipython3 +:tags: [remove-cell] + +np.random.seed(4) +cancer_irrelevant = cancer[["Class", "Smoothness", "Concavity", "Perimeter"]] +d = { + f"Irrelevant{i+1}": np.random.choice( + [0, 1], size=len(cancer_irrelevant), replace=True + ) + for i in range(40) ## in R textbook, it is 500, but the downstream analysis only uses up to 40 +} +cancer_irrelevant = pd.concat((cancer_irrelevant, pd.DataFrame(d)), axis=1) +``` + +```{code-cell} ipython3 +cancer_irrelevant[ + ["Class", "Smoothness", "Concavity", "Perimeter", "Irrelevant1", "Irrelevant2"] +] +``` + +Next, we build a sequence of KNN classifiers that include `Smoothness`, +`Concavity`, and `Perimeter` as predictor variables, but also increasingly many irrelevant +variables. In particular, we create 6 data sets with 0, 5, 10, 15, 20, and 40 irrelevant predictors. +Then we build a model, tuned via 5-fold cross-validation, for each data set. +{numref}`fig:06-performance-irrelevant-features` shows +the estimated cross-validation accuracy versus the number of irrelevant predictors. As +we add more irrelevant predictor variables, the estimated accuracy of our +classifier decreases. This is because the irrelevant variables add a random +amount to the distance between each pair of observations; the more irrelevant +variables there are, the more (random) influence they have, and the more they +corrupt the set of nearest neighbors that vote on the class of the new +observation to predict. + +```{code-cell} ipython3 +:tags: [remove-cell] + +# get accuracies after including k irrelevant features +ks = [0, 5, 10, 15, 20, 40] +fixedaccs = list() +accs = list() +nghbrs = list() + +for i in range(len(ks)): + cancer_irrelevant_subset = cancer_irrelevant.iloc[:, : (4 + ks[i])] + cancer_preprocessor = make_column_transformer( + ( + StandardScaler(), + list(cancer_irrelevant_subset.drop(columns=["Class"]).columns), + ), + ) + cancer_tune_pipe = make_pipeline(cancer_preprocessor, KNeighborsClassifier()) + param_grid = { + "kneighborsclassifier__n_neighbors": range(1, 21), + } ## double check: in R textbook, it is tune_grid(..., grid = 20), so I guess it matches RandomizedSearchCV + ## instead of GridSeachCV? + # param_grid_rand = { + # "kneighborsclassifier__n_neighbors": range(1, 100), + # } + # cancer_tune_grid = RandomizedSearchCV( + # estimator=cancer_tune_pipe, + # param_distributions=param_grid_rand, + # n_iter=20, + # cv=5, + # n_jobs=-1, + # return_train_score=True, + # ) + cancer_tune_grid = GridSearchCV( + estimator=cancer_tune_pipe, + param_grid=param_grid, + cv=5, + n_jobs=-1, + return_train_score=True, + ) + + X = cancer_irrelevant_subset.drop(columns=["Class"]) + y = cancer_irrelevant_subset["Class"] + + cancer_model_grid = cancer_tune_grid.fit(X, y) + accuracies_grid = pd.DataFrame(cancer_model_grid.cv_results_) + sorted_accuracies = accuracies_grid.sort_values( + by="mean_test_score", ascending=False + ) + + res = sorted_accuracies.iloc[0, :] + accs.append(res["mean_test_score"]) + nghbrs.append(res["param_kneighborsclassifier__n_neighbors"]) + + ## Use fixed n_neighbors=3 + cancer_fixed_pipe = make_pipeline( + cancer_preprocessor, KNeighborsClassifier(n_neighbors=3) + ) + + cv_5 = cross_validate(estimator=cancer_fixed_pipe, X=X, y=y, cv=5) + cv_5_metrics = pd.DataFrame(cv_5).agg(["mean", "sem"]) + fixedaccs.append(cv_5_metrics.loc["mean", "test_score"]) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +summary_df = pd.DataFrame( + {"ks": ks, "nghbrs": nghbrs, "accs": accs, "fixedaccs": fixedaccs} +) +plt_irrelevant_accuracies = ( + alt.Chart(summary_df) + .mark_line() #point=True + .encode( + x=alt.X("ks", title="Number of Irrelevant Predictors"), + y=alt.Y( + "accs", + title="Model Accuracy Estimate", + scale=alt.Scale(domain=(0.80, 0.95)), + ), + ) +) +glue("fig:06-performance-irrelevant-features", plt_irrelevant_accuracies) +``` + +:::{glue:figure} fig:06-performance-irrelevant-features +:name: fig:06-performance-irrelevant-features + +Effect of inclusion of irrelevant predictors. +::: + +Although the accuracy decreases as expected, one surprising thing about +{numref}`fig:06-performance-irrelevant-features` is that it shows that the method +still outperforms the baseline majority classifier (with about {glue:text}`cancer_train_b_prop`% accuracy) +even with 40 irrelevant variables. +How could that be? {numref}`fig:06-neighbors-irrelevant-features` provides the answer: +the tuning procedure for the $K$-nearest neighbors classifier combats the extra randomness from the irrelevant variables +by increasing the number of neighbors. Of course, because of all the extra noise in the data from the irrelevant +variables, the number of neighbors does not increase smoothly; but the general trend is increasing. {numref}`fig:06-fixed-irrelevant-features` corroborates +this evidence; if we fix the number of neighbors to $K=3$, the accuracy falls off more quickly. + +```{code-cell} ipython3 +:tags: [remove-cell] + +plt_irrelevant_nghbrs = ( + alt.Chart(summary_df) + .mark_line() # point=True + .encode( + x=alt.X("ks", title="Number of Irrelevant Predictors"), + y=alt.Y( + "nghbrs", + title="Number of neighbors", + ), + ) +) +glue("fig:06-neighbors-irrelevant-features", plt_irrelevant_nghbrs) +``` + +:::{glue:figure} fig:06-neighbors-irrelevant-features +:name: fig:06-neighbors-irrelevant-features + +Tuned number of neighbors for varying number of irrelevant predictors. +::: + +```{code-cell} ipython3 +:tags: [remove-cell] + +melted_summary_df = summary_df.melt( + id_vars=["ks", "nghbrs"], var_name="Type", value_name="Accuracy" + ) +melted_summary_df["Type"] = melted_summary_df["Type"].apply(lambda x: "Tuned K" if x=="accs" else "K = 3") + +plt_irrelevant_nghbrs_fixed = ( + alt.Chart( + melted_summary_df + ) + .mark_line() # point=True + .encode( + x=alt.X("ks", title="Number of Irrelevant Predictors"), + y=alt.Y( + "Accuracy", + scale=alt.Scale(domain=(0.75, 0.95)), + ), + color=alt.Color("Type"), + ) +) +glue("fig:06-fixed-irrelevant-features", plt_irrelevant_nghbrs_fixed) +``` + +:::{glue:figure} fig:06-fixed-irrelevant-features +:name: fig:06-fixed-irrelevant-features + +Accuracy versus number of irrelevant predictors for tuned and untuned number of neighbors. +::: + ++++ + +### Finding a good subset of predictors + +So then, if it is not ideal to use all of our variables as predictors without consideration, how +do we choose which variables we *should* use? A simple method is to rely on your scientific understanding +of the data to tell you which variables are not likely to be useful predictors. For example, in the cancer +data that we have been studying, the `ID` variable is just a unique identifier for the observation. +As it is not related to any measured property of the cells, the `ID` variable should therefore not be used +as a predictor. That is, of course, a very clear-cut case. But the decision for the remaining variables +is less obvious, as all seem like reasonable candidates. It +is not clear which subset of them will create the best classifier. One could use visualizations and +other exploratory analyses to try to help understand which variables are potentially relevant, but +this process is both time-consuming and error-prone when there are many variables to consider. +Therefore we need a more systematic and programmatic way of choosing variables. +This is a very difficult problem to solve in +general, and there are a number of methods that have been developed that apply +in particular cases of interest. Here we will discuss two basic +selection methods as an introduction to the topic. See the additional resources at the end of +this chapter to find out where you can learn more about variable selection, including more advanced methods. + +```{index} variable selection; best subset +``` + +```{index} see: predictor selection; variable selection +``` + +The first idea you might think of for a systematic way to select predictors +is to try all possible subsets of predictors and then pick the set that results in the "best" classifier. +This procedure is indeed a well-known variable selection method referred to +as *best subset selection* {cite:p}`bealesubset,hockingsubset`. +In particular, you + +1. create a separate model for every possible subset of predictors, +2. tune each one using cross-validation, and +3. pick the subset of predictors that gives you the highest cross-validation accuracy. + +Best subset selection is applicable to any classification method ($K$-NN or otherwise). +However, it becomes very slow when you have even a moderate +number of predictors to choose from (say, around 10). This is because the number of possible predictor subsets +grows very quickly with the number of predictors, and you have to train the model (itself +a slow process!) for each one. For example, if we have 2 predictors—let's call +them A and B—then we have 3 variable sets to try: A alone, B alone, and finally A +and B together. If we have 3 predictors—A, B, and C—then we have 7 +to try: A, B, C, AB, BC, AC, and ABC. In general, the number of models +we have to train for $m$ predictors is $2^m-1$; in other words, when we +get to 10 predictors we have over *one thousand* models to train, and +at 20 predictors we have over *one million* models to train! +So although it is a simple method, best subset selection is usually too computationally +expensive to use in practice. + +```{index} variable selection; forward +``` + +Another idea is to iteratively build up a model by adding one predictor variable +at a time. This method—known as *forward selection* {cite:p}`forwardefroymson,forwarddraper`—is also widely +applicable and fairly straightforward. It involves the following steps: + +1. Start with a model having no predictors. +2. Run the following 3 steps until you run out of predictors: + 1. For each unused predictor, add it to the model to form a *candidate model*. + 2. Tune all of the candidate models. + 3. Update the model to be the candidate model with the highest cross-validation accuracy. +3. Select the model that provides the best trade-off between accuracy and simplicity. + +Say you have $m$ total predictors to work with. In the first iteration, you have to make +$m$ candidate models, each with 1 predictor. Then in the second iteration, you have +to make $m-1$ candidate models, each with 2 predictors (the one you chose before and a new one). +This pattern continues for as many iterations as you want. If you run the method +all the way until you run out of predictors to choose, you will end up training +$\frac{1}{2}m(m+1)$ separate models. This is a *big* improvement from the $2^m-1$ +models that best subset selection requires you to train! For example, while best subset selection requires +training over 1000 candidate models with 10 predictors, forward selection requires training only 55 candidate models. +Therefore we will continue the rest of this section using forward selection. + +```{note} +One word of caution before we move on. Every additional model that you train +increases the likelihood that you will get unlucky and stumble +on a model that has a high cross-validation accuracy estimate, but a low true +accuracy on the test data and other future observations. +Since forward selection involves training a lot of models, you run a fairly +high risk of this happening. To keep this risk low, only use forward selection +when you have a large amount of data and a relatively small total number of +predictors. More advanced methods do not suffer from this +problem as much; see the additional resources at the end of this chapter for +where to learn more about advanced predictor selection methods. +``` + ++++ + +### Forward selection in `scikit-learn` + +We now turn to implementing forward selection in Python. +First we will extract a smaller set of predictors to work with in this illustrative example—`Smoothness`, +`Concavity`, `Perimeter`, `Irrelevant1`, `Irrelevant2`, and `Irrelevant3`—as well as the `Class` variable as the label. +We will also extract the column names for the full set of predictors. + +```{code-cell} ipython3 +cancer_subset = cancer_irrelevant[ + [ + "Class", + "Smoothness", + "Concavity", + "Perimeter", + "Irrelevant1", + "Irrelevant2", + "Irrelevant3", + ] +] + +names = list(cancer_subset.drop( + columns=["Class"] +).columns.values) + +cancer_subset +``` + +To perform forward selection, we could use the +[`SequentialFeatureSelector`](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.SequentialFeatureSelector.html) +from `scikit-learn`; but it is difficult to combine this approach with parameter tuning to find a good number of neighbors +for each set of features. Instead we will code the forward selection algorithm manually. +In particular, we need code that tries adding each available predictor to a model, finding the best, and iterating. +If you recall the end of the wrangling chapter, we mentioned +that sometimes one needs more flexible forms of iteration than what +we have used earlier, and in these cases one typically resorts to +a *for loop*; see +the [control flow section](https://wesmckinney.com/book/python-basics.html#control_for) in +*Python for Data Analysis* {cite:p}`mckinney2012python`. +Here we will use two for loops: one over increasing predictor set sizes +(where you see `for i in range(1, n_total + 1):` below), +and another to check which predictor to add in each round (where you see `for j in range(len(names))` below). +For each set of predictors to try, we extract the subset of predictors, +pass it into a preprocessor, build a `Pipeline` that tunes +a K-NN classifier using 10-fold cross-validation, +and finally records the estimated accuracy. + +```{code-cell} ipython3 +from sklearn.compose import make_column_selector + +accuracy_dict = {"size": [], "selected_predictors": [], "accuracy": []} + +# store the total number of predictors +n_total = len(names) + +# start with an empty list of selected predictors +selected = [] + +# create the pipeline and CV grid search objects +param_grid = { + "kneighborsclassifier__n_neighbors": range(1, 61, 5), +} +cancer_preprocessor = make_column_transformer( + (StandardScaler(), make_column_selector(dtype_include="number")) +) +cancer_tune_pipe = make_pipeline(cancer_preprocessor, KNeighborsClassifier()) +cancer_tune_grid = GridSearchCV( + estimator=cancer_tune_pipe, + param_grid=param_grid, + cv=10, + n_jobs=-1 +) + +# for every possible number of predictors +for i in range(1, n_total + 1): + accs = np.zeros(len(names)) + # for every possible predictor to add + for j in range(len(names)): + # Add remaining predictor j to the model + X = cancer_subset[selected + [names[j]]] + y = cancer_subset["Class"] + + # Find the best K for this set of predictors + cancer_tune_grid.fit(X, y) + accuracies_grid = pd.DataFrame(cancer_tune_grid.cv_results_) + + # Store the tuned accuracy for this set of predictors + accs[j] = accuracies_grid["mean_test_score"].max() + + # get the best new set of predictors that maximize cv accuracy + best_set = selected + [names[accs.argmax()]] + + # store the results for this round of forward selection + accuracy_dict["size"].append(i) + accuracy_dict["selected_predictors"].append(", ".join(best_set)) + accuracy_dict["accuracy"].append(accs.max()) + + # update the selected & available sets of predictors + selected = best_set + del names[accs.argmax()] + +accuracies = pd.DataFrame(accuracy_dict) +accuracies +``` + +```{index} variable selection; elbow method +``` + +Interesting! The forward selection procedure first added the three meaningful variables `Perimeter`, +`Concavity`, and `Smoothness`, followed by the irrelevant variables. {numref}`fig:06-fwdsel-3` +visualizes the accuracy versus the number of predictors in the model. You can see that +as meaningful predictors are added, the estimated accuracy increases substantially; and as you add irrelevant +variables, the accuracy either exhibits small fluctuations or decreases as the model attempts to tune the number +of neighbors to account for the extra noise. In order to pick the right model from the sequence, you have +to balance high accuracy and model simplicity (i.e., having fewer predictors and a lower chance of overfitting). +The way to find that balance is to look for the *elbow* +in {numref}`fig:06-fwdsel-3`, i.e., the place on the plot where the accuracy stops increasing dramatically and +levels off or begins to decrease. The elbow in {numref}`fig:06-fwdsel-3` appears to occur at the model with +3 predictors; after that point the accuracy levels off. So here the right trade-off of accuracy and number of predictors +occurs with 3 variables: `Perimeter, Concavity, Smoothness`. In other words, we have successfully removed irrelevant +predictors from the model! It is always worth remembering, however, that what cross-validation gives you +is an *estimate* of the true accuracy; you have to use your judgement when looking at this plot to decide +where the elbow occurs, and whether adding a variable provides a meaningful increase in accuracy. + +```{code-cell} ipython3 +:tags: [remove-cell] + +fwd_sel_accuracies_plot = ( + alt.Chart(accuracies) + .mark_line() # point=True + .encode( + x=alt.X("size", title="Number of Predictors"), + y=alt.Y( + "accuracy", + title="Estimated Accuracy", + scale=alt.Scale(domain=(0.89, 0.935)), + ), + ) +) +glue("fig:06-fwdsel-3", fwd_sel_accuracies_plot) +``` + +:::{glue:figure} fig:06-fwdsel-3 +:name: fig:06-fwdsel-3 + +Estimated accuracy versus the number of predictors for the sequence of models built using forward selection. +::: + ++++ + +```{note} +Since the choice of which variables to include as predictors is +part of tuning your classifier, you *cannot use your test data* for this +process! +``` + +## Exercises + +Practice exercises for the material covered in this chapter +can be found in the accompanying +[worksheets repository](https://worksheets.python.datasciencebook.ca) +in the "Classification II: evaluation and tuning" row. +You can launch an interactive version of the worksheet in your browser by clicking the "launch binder" button. +You can also preview a non-interactive version of the worksheet by clicking "view worksheet." +If you instead decide to download the worksheet and run it on your own machine, +make sure to follow the instructions for computer setup +found in {numref}`Chapter %s `. This will ensure that the automated feedback +and guidance that the worksheets provide will function as intended. + ++++ + +## Additional resources + ++++ + +- The [`scikit-learn` website](https://scikit-learn.org/stable/) is an excellent + reference for more details on, and advanced usage of, the functions and + packages in the past two chapters. Aside from that, it also offers many + useful [tutorials](https://scikit-learn.org/stable/tutorial/index.html) + to get you started. It's worth noting that the `scikit-learn` package + does a lot more than just classification, and so the + examples on the website similarly go beyond classification as well. In the next + two chapters, you'll learn about another kind of predictive modeling setting, + so it might be worth visiting the website only after reading through those + chapters. +- [*An Introduction to Statistical Learning*](https://www.statlearning.com/) {cite:p}`james2013introduction` provides + a great next stop in the process of + learning about classification. Chapter 4 discusses additional basic techniques + for classification that we do not cover, such as logistic regression, linear + discriminant analysis, and naive Bayes. Chapter 5 goes into much more detail + about cross-validation. Chapters 8 and 9 cover decision trees and support + vector machines, two very popular but more advanced classification methods. + Finally, Chapter 6 covers a number of methods for selecting predictor + variables. Note that while this book is still a very accessible introductory + text, it requires a bit more mathematical background than we require. + + +## References + ++++ + +```{bibliography} +:filter: docname in docnames +``` diff --git a/pull313/_sources/clustering.md b/pull313/_sources/clustering.md new file mode 100644 index 00000000..25de3c0c --- /dev/null +++ b/pull313/_sources/clustering.md @@ -0,0 +1,1052 @@ +--- +jupytext: + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.14.7 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(clustering)= +# Clustering + +```{code-cell} ipython3 +:tags: [remove-cell] + +# get rid of futurewarnings from sklearn kmeans +import warnings +warnings.simplefilter(action='ignore', category=FutureWarning) + +from chapter_preamble import * +``` + +## Overview + +As part of exploratory data analysis, it is often helpful to see if there are +meaningful subgroups (or *clusters*) in the data. +This grouping can be used for many purposes, +such as generating new questions or improving predictive analyses. +This chapter provides an introduction to clustering +using the K-means algorithm, +including techniques to choose the number of clusters. + +## Chapter learning objectives + +By the end of the chapter, readers will be able to do the following: + +* Describe a case where clustering is appropriate, +and what insight it might extract from the data. +* Explain the K-means clustering algorithm. +* Interpret the output of a K-means analysis. +* Differentiate between clustering and classification. +* Identify when it is necessary to scale variables before clustering and do this using Python +* Perform k-means clustering in Python using `scikit-learn` +* Use the elbow method to choose the number of clusters for K-means. +* Visualize the output of k-means clustering in Python using a coloured scatter plot +* Describe advantages, limitations and assumptions of the kmeans clustering algorithm. + +## Clustering + +```{index} clustering +``` + +Clustering is a data analysis task +involving separating a data set into subgroups of related data. +For example, we might use clustering to separate a +data set of documents into groups that correspond to topics, a data set of +human genetic information into groups that correspond to ancestral +subpopulations, or a data set of online customers into groups that correspond +to purchasing behaviors. Once the data are separated, we can, for example, +use the subgroups to generate new questions about the data and follow up with a +predictive modeling exercise. In this course, clustering will be used only for +exploratory analysis, i.e., uncovering patterns in the data. + +```{index} classification, regression, supervised, unsupervised +``` + +Note that clustering is a fundamentally different kind of task than +classification or regression. In particular, both classification and +regression are *supervised tasks* where there is a *response variable* (a +category label or value), and we have examples of past data with labels/values +that help us predict those of future data. By contrast, clustering is an +*unsupervised task*, as we are trying to understand and examine the structure +of data without any response variable labels or values to help us. This +approach has both advantages and disadvantages. Clustering requires no +additional annotation or input on the data. For example, while it would be +nearly impossible to annotate all the articles on Wikipedia with human-made +topic labels, we can cluster the articles without this information to find +groupings corresponding to topics automatically. However, given that there is +no response variable, it is not as easy to evaluate the "quality" of a +clustering. With classification, we can use a test data set to assess +prediction performance. In clustering, there is not a single good choice for +evaluation. In this book, we will use visualization to ascertain the quality of +a clustering, and leave rigorous evaluation for more advanced courses. + +Given that there is no response variable, it is not as easy to evaluate +the "quality" of a clustering. With classification, we can use a test data set +to assess prediction performance. In clustering, there is not a single good +choice for evaluation. In this book, we will use visualization to ascertain the +quality of a clustering, and leave rigorous evaluation for more advanced +courses. + +```{index} K-means +``` + +As in the case of classification, +there are many possible methods that we could use to cluster our observations +to look for subgroups. +In this book, we will focus on the widely used K-means algorithm {cite:p}`kmeans`. +In your future studies, you might encounter hierarchical clustering, +principal component analysis, multidimensional scaling, and more; +see the additional resources section at the end of this chapter +for where to begin learning more about these other methods. + +```{index} semisupervised +``` + +```{note} +There are also so-called *semisupervised* tasks, +where only some of the data come with response variable labels/values, +but the vast majority don't. +The goal is to try to uncover underlying structure in the data +that allows one to guess the missing labels. +This sort of task is beneficial, for example, +when one has an unlabeled data set that is too large to manually label, +but one is willing to provide a few informative example labels as a "seed" +to guess the labels for all the data. +``` + +## An illustrative example + +```{index} Palmer penguins +``` + +In this chapter we will focus on a data set from +[the `palmerpenguins` R package](https://allisonhorst.github.io/palmerpenguins/) {cite:p}`palmerpenguins`. This +data set was collected by Dr. Kristen Gorman and +the Palmer Station, Antarctica Long Term Ecological Research Site, and includes +measurements for adult penguins ({numref}`09-penguins`) found near there {cite:p}`penguinpaper`. +Our goal will be to use two +variables—penguin bill and flipper length, both in millimeters—to determine whether +there are distinct types of penguins in our data. +Understanding this might help us with species discovery and classification in a data-driven +way. Note that we have reduced the size of the data set to 18 observations and 2 variables; +this will help us make clear visualizations that illustrate how clustering works for learning purposes. + +```{figure} img/clustering/gentoo.jpg +--- +height: 400px +name: 09-penguins +--- +A Gentoo penguin. +``` + +Before we get started, we will set a random seed. +This will ensure that our analysis will be reproducible. +As we will learn in more detail later in the chapter, +setting the seed here is important +because the K-means clustering algorithm uses randomness +when choosing a starting position for each cluster. + +```{index} seed; numpy.random.seed +``` + +```{code-cell} ipython3 +import numpy as np + +np.random.seed(6) +``` + +```{index} read function; read_csv +``` + +Now we can load and preview the `penguins` data. + +```{code-cell} ipython3 +import pandas as pd + +penguins = pd.read_csv("data/penguins.csv") +penguins +``` + +We will begin by using a version of the data that we have standardized, `penguins_standardized`, +to illustrate how K-means clustering works (recall standardization from {numref}`Chapter %s `). +Later in this chapter, we will return to the original `penguins` data to see how to include standardization automatically +in the clustering pipeline. + +```{code-cell} ipython3 +:tags: [remove-cell] +penguins_standardized = penguins.assign( + bill_length_standardized = (penguins["bill_length_mm"] - penguins["bill_length_mm"].mean())/penguins["bill_length_mm"].std(), + flipper_length_standardized = (penguins["flipper_length_mm"] - penguins["flipper_length_mm"].mean())/penguins["flipper_length_mm"].std() +).drop( + columns = ["bill_length_mm", "flipper_length_mm"] +) +``` + +```{code-cell} ipython3 +penguins_standardized +``` + +Next, we can create a scatter plot using this data set +to see if we can detect subtypes or groups in our data set. + +```{code-cell} ipython3 +import altair as alt + +scatter_plot = alt.Chart(penguins_standardized).mark_circle().encode( + x=alt.X("flipper_length_standardized").title("Flipper Length (standardized)"), + y=alt.Y("bill_length_standardized").title("Bill Length (standardized)") +) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("scatter_plot", scatter_plot, display=True) +``` + +:::{glue:figure} scatter_plot +:figwidth: 700px +:name: scatter_plot + +Scatter plot of standardized bill length versus standardized flipper length. +::: + +```{index} altair, altair; mark_circle +``` + +Based on the visualization in {numref}`scatter_plot`, +we might suspect there are a few subtypes of penguins within our data set. +We can see roughly 3 groups of observations in {numref}`scatter_plot`, +including: + +1. a small flipper and bill length group, +2. a small flipper length, but large bill length group, and +3. a large flipper and bill length group. + +```{index} K-means, elbow method +``` + +Data visualization is a great tool to give us a rough sense of such patterns +when we have a small number of variables. +But if we are to group data—and select the number of groups—as part of +a reproducible analysis, we need something a bit more automated. +Additionally, finding groups via visualization becomes more difficult +as we increase the number of variables we consider when clustering. +The way to rigorously separate the data into groups +is to use a clustering algorithm. +In this chapter, we will focus on the *K-means* algorithm, +a widely used and often very effective clustering method, +combined with the *elbow method* +for selecting the number of clusters. +This procedure will separate the data into groups; +{numref}`colored_scatter_plot` shows these groups +denoted by colored scatter points. + +```{code-cell} ipython3 +:tags: [remove-cell] +from sklearn import set_config +from sklearn.cluster import KMeans + +# Output dataframes instead of arrays +set_config(transform_output="pandas") + +kmeans = KMeans(n_clusters=3) + +penguin_clust = kmeans.fit(penguins_standardized) + +penguins_clustered = penguins_standardized.assign(cluster = penguin_clust.labels_) + +colored_scatter_plot = alt.Chart(penguins_clustered).mark_circle().encode( + x=alt.X("flipper_length_standardized", title="Flipper Length (standardized)"), + y=alt.Y("bill_length_standardized", title="Bill Length (standardized)"), + color=alt.Color("cluster:N") +) + +glue("colored_scatter_plot", colored_scatter_plot, display=True) +``` + +:::{glue:figure} colored_scatter_plot +:figwidth: 700px +:name: colored_scatter_plot + +Scatter plot of standardized bill length versus standardized flipper length with colored groups. +::: + + +What are the labels for these groups? Unfortunately, we don't have any. K-means, +like almost all clustering algorithms, just outputs meaningless "cluster labels" +that are typically whole numbers: 0, 1, 2, 3, etc. But in a simple case like this, +where we can easily visualize the clusters on a scatter plot, we can give +human-made labels to the groups using their positions on +the plot: + +- small flipper length and small bill length (orange cluster), +- small flipper length and large bill length (blue cluster). +- and large flipper length and large bill length (red cluster). + +Once we have made these determinations, we can use them to inform our species +classifications or ask further questions about our data. For example, we might +be interested in understanding the relationship between flipper length and bill +length, and that relationship may differ depending on the type of penguin we +have. + +## K-means + +### Measuring cluster quality + +```{code-cell} ipython3 +:tags: [remove-cell] + +clus = penguins_clustered[penguins_clustered["cluster"] == 0][["bill_length_standardized", "flipper_length_standardized"]] +``` + +```{index} see: within-cluster sum-of-squared-distances; WSSD +``` + +```{index} WSSD +``` + +The K-means algorithm is a procedure that groups data into K clusters. +It starts with an initial clustering of the data, and then iteratively +improves it by making adjustments to the assignment of data +to clusters until it cannot improve any further. But how do we measure +the "quality" of a clustering, and what does it mean to improve it? +In K-means clustering, we measure the quality of a cluster by its +*within-cluster sum-of-squared-distances* (WSSD), also called *intertia*. Computing this involves two steps. +First, we find the cluster centers by computing the mean of each variable +over data points in the cluster. For example, suppose we have a +cluster containing four observations, and we are using two variables, $x$ and $y$, to cluster the data. +Then we would compute the coordinates, $\mu_x$ and $\mu_y$, of the cluster center via + + +$$ +\mu_x = \frac{1}{4}(x_1+x_2+x_3+x_4) \quad \mu_y = \frac{1}{4}(y_1+y_2+y_3+y_4) +$$ + +```{code-cell} ipython3 +:tags: [remove-cell] + +clus_rows = clus.shape[0] + +mean_flipper_len_std = round(np.mean(clus["flipper_length_standardized"]),2) +mean_bill_len_std = round(np.mean(clus["bill_length_standardized"]),2) + +glue("clus_rows_glue", "{:d}".format(clus_rows)) +glue("mean_flipper_len_std_glue","{:.2f}".format(mean_flipper_len_std)) +glue("mean_bill_len_std_glue", "{:.2f}".format(mean_bill_len_std)) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +toy_example_clus1_center = alt.layer( + alt.Chart(clus).mark_circle(size=75, opacity=1, color='steelblue').encode( + x=alt.X("flipper_length_standardized"), + y=alt.Y("bill_length_standardized") + ), + alt.Chart(clus).mark_circle(color='coral', size=500, opacity=1).encode( + x=alt.X("mean(flipper_length_standardized)") + .scale(zero=False, padding=20) + .title("Flipper Length (standardized)"), + y=alt.Y("mean(bill_length_standardized)") + .scale(zero=False, padding=30) + .title("Bill Length (standardized)"), + ) +) + +glue('toy-example-clus1-center', toy_example_clus1_center, display=True) +``` + +In the first cluster from the example, there are {glue:text}`clus_rows_glue` data points. These are shown with their cluster center +(standardized flipper length {glue:text}`mean_flipper_len_std_glue`, standardized bill length {glue:text}`mean_bill_len_std_glue`) highlighted +in {numref}`toy-example-clus1-center` + +:::{glue:figure} toy-example-clus1-center +:figwidth: 700px +:name: toy-example-clus1-center + +Cluster 0 from the `penguins_standardized` data set example. Observations are in blue, with the cluster center highlighted in orange. +::: + +```{code-cell} ipython3 +:tags: [remove-cell] + +centroid_lines = alt.Chart( + clus.assign( + mean_bill_length=clus['bill_length_standardized'].mean(), + mean_flipper_length=clus['flipper_length_standardized'].mean() + ) +).mark_rule(size=1.5).encode( + alt.Y('bill_length_standardized'), + alt.Y2('mean_bill_length'), + alt.X('flipper_length_standardized'), + alt.X2('mean_flipper_length') +) +toy_example_clus1_dists = centroid_lines + toy_example_clus1_center + +glue('toy-example-clus1-dists', toy_example_clus1_dists, display=True) +``` + +```{index} distance; K-means +``` + +The second step in computing the WSSD is to add up the squared distance +between each point in the cluster +and the cluster center. +We use the straight-line / Euclidean distance formula +that we learned about in {numref}`Chapter %s `. +In the {glue:text}`clus_rows_glue`-observation cluster example above, +we would compute the WSSD $S^2$ via + +$$ +S^2 = \left((x_1 - \mu_x)^2 + (y_1 - \mu_y)^2\right) + \left((x_2 - \mu_x)^2 + (y_2 - \mu_y)^2\right)\\ + + \left((x_3 - \mu_x)^2 + (y_3 - \mu_y)^2\right) + \left((x_4 - \mu_x)^2 + (y_4 - \mu_y)^2\right) +$$ + +These distances are denoted by lines in {numref}`toy-example-clus1-dists` for the first cluster of the penguin data example. + +:::{glue:figure} toy-example-clus1-dists +:figwidth: 700px +:name: toy-example-clus1-dists + +Cluster 0 from the `penguins_standardized` data set example. Observations are in blue, with the cluster center highlighted in orange. The distances from the observations to the cluster center are represented as black lines. +::: + +```{code-cell} ipython3 +:tags: [remove-cell] + +toy_example_all_clus_dists = alt.layer( + alt.Chart( + penguins_clustered.assign( + mean_bill_length=penguins_clustered.groupby('cluster')['bill_length_standardized'].transform('mean'), + mean_flipper_length=penguins_clustered.groupby('cluster')['flipper_length_standardized'].transform('mean') + ) + ).mark_rule(size=1.25).encode( + alt.Y('bill_length_standardized'), + alt.Y2('mean_bill_length'), + alt.X('flipper_length_standardized'), + alt.X2('mean_flipper_length') + ), + alt.Chart(penguins_clustered).mark_circle(size=40, opacity=1).encode( + alt.X("flipper_length_standardized"), + alt.Y("bill_length_standardized"), + alt.Color('cluster:N') + ), + alt.Chart(penguins_clustered).mark_circle(color='coral', size=200, opacity=1).encode( + alt.X("mean(flipper_length_standardized)") + .scale(zero=False) + .title("Flipper Length (standardized)"), + alt.Y("mean(bill_length_standardized)") + .scale(zero=False) + .title("Bill Length (standardized)"), + alt.Detail('cluster:N') + ) +) +glue('toy-example-all-clus-dists', toy_example_all_clus_dists, display=True) +``` + +The larger the value of $S^2$, the more spread out the cluster is, since large $S^2$ means +that points are far from the cluster center. Note, however, that "large" is relative to *both* the +scale of the variables for clustering *and* the number of points in the cluster. A cluster where points +are very close to the center might still have a large $S^2$ if there are many data points in the cluster. + +After we have calculated the WSSD for all the clusters, +we sum them together to get the *total WSSD*. For our example, +this means adding up all the squared distances for the 18 observations. +These distances are denoted by black lines in +{numref}`toy-example-all-clus-dists`. + +:::{glue:figure} toy-example-all-clus-dists +:figwidth: 700px +:name: toy-example-all-clus-dists + +All clusters from the `penguins_standardized` data set example. Observations are in blue, orange, and red with the cluster center highlighted in orange. The distances from the observations to each of the respective cluster centers are represented as black lines. +::: + +Since K-means uses the straight-line distance to measure the quality of a clustering, +it is limited to clustering based on quantitative variables. +However, note that there are variants of the K-means algorithm, +as well as other clustering algorithms entirely, +that use other distance metrics +to allow for non-quantitative data to be clustered. +These are beyond the scope of this book. + ++++ + +### The clustering algorithm + +```{index} K-means; algorithm +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +# Set up the initial "random" label assignment the same as in the R book +penguins_standardized['label'] = [ + 2, 2, 1, 1, 0, 0, 0, 1, + 2, 2, 1, 2, 1, 2, + 0, 1, 2, 2 +] +points_kmeans_init = alt.Chart(penguins_standardized).mark_point(size=75, filled=True, opacity=1).encode( + alt.X("flipper_length_standardized").title("Flipper Length (standardized)"), + alt.Y("bill_length_standardized").title("Bill Length (standardized)"), + alt.Color('label:N').legend(None), + alt.Shape('label:N').legend(None).scale(range=['square', 'circle', 'triangle']), + alt.Size('label:O').legend(None).scale(type='ordinal', range=[50, 50, 100]), +) + +glue('toy-kmeans-init-1', points_kmeans_init, display=True) +``` + +We begin the K-means algorithm by picking K, +and randomly assigning a roughly equal number of observations +to each of the K clusters. +An example random initialization is shown in {numref}`toy-kmeans-init-1` + + +:::{glue:figure} toy-kmeans-init-1 +:figwidth: 700px +:name: toy-kmeans-init-1 + +Random initialization of labels. +Each cluster is depicted as a different color and shape. +::: + +```{code-cell} ipython3 +:tags: [remove-cell] + +from sklearn.metrics import euclidean_distances + +def plot_kmean_iterations(iterations, data, centroid_init): + """Plot kmeans cluster and label updates for multiple iterations""" + dfs = [] + centroid_inits = [] + for i in range(1, iterations+1): + data['iteration'] = f'Iteration {i}' + data['update_type'] = 'Center Update' + data['flipper_centroid'] = data['label'].map(centroid_init['flipper_length_standardized']) + data['bill_centroid'] = data['label'].map(centroid_init['bill_length_standardized']) + dfs.append(data.copy()) + + data['iteration'] = f'Iteration {i}' + data['update_type'] = 'Label Update' + cluster_columns = ['bill_length_standardized', 'flipper_length_standardized'] + data['label'] = np.argmin(euclidean_distances(data[cluster_columns], centroid_init), axis=1) + data['flipper_centroid'] = data['label'].map(centroid_init['flipper_length_standardized']) + data['bill_centroid'] = data['label'].map(centroid_init['bill_length_standardized']) + dfs.append(data.copy()) + + centroid_init = data.groupby('label')[cluster_columns].mean() + + points = alt.Chart( + pd.concat(dfs), + width=200, + height=200 + ).mark_point(filled=True, size=50, opacity=1).encode( + alt.X("flipper_length_standardized").scale(domain=(-2, 2)), + alt.Y("bill_length_standardized").scale(domain=(-2, 2)), + alt.Color('label:N').legend(None), + alt.Shape('label:N').legend(None).scale(range=['square', 'circle', 'triangle']), + alt.Size('label:O').legend(None).scale(type='ordinal', range=[50, 50, 100]), + ) + + centroids = points.mark_point(filled=True, stroke='black', strokeWidth=1.25).encode( + alt.X("mean(flipper_centroid)") + .scale(domain=(-2, 2)) + .title("Flipper Length (standardized)"), + alt.Y("mean(bill_centroid)") + .scale(domain=(-2, 2)) + .title("Bill Length (standardized)"), + size=alt.value(200) + ) + + return (points + centroids).facet( + row=alt.Row('iteration', header=alt.Header(title='', labelFontSize=18)), + column=alt.Column('update_type', header=alt.Header(title='', labelFontSize=18)) + ) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +centroid_init = penguins_standardized.groupby('label').mean() + +glue('toy-kmeans-iter-1', plot_kmean_iterations(3, penguins_standardized.copy(), centroid_init.copy()), display=True) +``` + +```{index} WSSD; total +``` + +Then K-means consists of two major steps that attempt to minimize the +sum of WSSDs over all the clusters, i.e., the *total WSSD*: + +1. **Center update:** Compute the center of each cluster. +2. **Label update:** Reassign each data point to the cluster with the nearest center. + +These two steps are repeated until the cluster assignments no longer change. +We show what the first three iterations of K-means would look like in +{numref}`toy-kmeans-iter-1`. Each row corresponds to an iteration, +where the left column depicts the center update, +and the right column depicts the label update (i.e., the reassignment of data to clusters). + +:::{glue:figure} toy-kmeans-iter-1 +:figwidth: 700px +:name: toy-kmeans-iter-1 + +First three iterations of K-means clustering on the `penguins_standardized` example data set. Each pair of plots corresponds to an iteration. Within the pair, the first plot depicts the center update, and the second plot depicts the reassignment of data to clusters. Cluster centers are indicated by larger points that are outlined in black. +::: + ++++ + +Note that at this point, we can terminate the algorithm since none of the assignments changed +in the fourth iteration; both the centers and labels will remain the same from this point onward. + +```{index} K-means; termination +``` + +```{note} +Is K-means *guaranteed* to stop at some point, or could it iterate forever? As it turns out, +thankfully, the answer is that K-means is guaranteed to stop after *some* number of iterations. For the interested reader, the +logic for this has three steps: (1) both the label update and the center update decrease total WSSD in each iteration, +(2) the total WSSD is always greater than or equal to 0, and (3) there are only a finite number of possible +ways to assign the data to clusters. So at some point, the total WSSD must stop decreasing, which means none of the assignments +are changing, and the algorithm terminates. +``` + +### Random restarts + +```{index} K-means; init argument +``` + +Unlike the classification and regression models we studied in previous chapters, K-means can get "stuck" in a bad solution. +For example, {numref}`toy-kmeans-bad-init-1` illustrates an unlucky random initialization by K-means. + +```{code-cell} ipython3 +:tags: [remove-cell] + +# Set up the initial "random" label assignment the same as in the R book +penguins_standardized['label'] = [1, 1, 2, 2, 0, 2, 0, 2, 2, 2, 1, 2, 0, 0, 0, 1, 1, 1] +centroid_init = penguins_standardized.groupby('label').mean() + +points_kmeans_init = alt.Chart(penguins_standardized).mark_point(size=75, filled=True, opacity=1).encode( + alt.X("flipper_length_standardized").title("Flipper Length (standardized)"), + alt.Y("bill_length_standardized").title("Bill Length (standardized)"), + alt.Color('label:N').legend(None), + alt.Shape('label:N').legend(None).scale(range=['square', 'circle', 'triangle']), + alt.Size('label:O').legend(None).scale(type='ordinal', range=[50, 50, 100]), +) + +glue('toy-kmeans-bad-init-1', points_kmeans_init, display=True) +``` + +:::{glue:figure} toy-kmeans-bad-init-1 +:figwidth: 700px +:name: toy-kmeans-bad-init-1 + +Random initialization of labels. +::: + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue('toy-kmeans-bad-iter-1', plot_kmean_iterations(4, penguins_standardized.copy(), centroid_init.copy()), display=True) +``` + +{numref}`toy-kmeans-bad-iter-1` shows what the iterations of K-means would look like with the unlucky random initialization shown in {numref}`toy-kmeans-bad-init-1` + + +:::{glue:figure} toy-kmeans-bad-iter-1 +:figwidth: 700px +:name: toy-kmeans-bad-iter-1 + +First four iterations of K-means clustering on the `penguins_standardized` example data set with a poor random initialization. Each pair of plots corresponds to an iteration. Within the pair, the first plot depicts the center update, and the second plot depicts the reassignment of data to clusters. Cluster centers are indicated by larger points that are outlined in black. +::: + +This looks like a relatively bad clustering of the data, but K-means cannot improve it. +To solve this problem when clustering data using K-means, we should randomly re-initialize the labels a few times, run K-means for each initialization, +and pick the clustering that has the lowest final total WSSD. + +### Choosing K + +In order to cluster data using K-means, +we also have to pick the number of clusters, K. +But unlike in classification, we have no response variable +and cannot perform cross-validation with some measure of model prediction error. +Further, if K is chosen too small, then multiple clusters get grouped together; +if K is too large, then clusters get subdivided. +In both cases, we will potentially miss interesting structure in the data. +{numref}`toy-kmeans-vary-k-1` illustrates the impact of K +on K-means clustering of our penguin flipper and bill length data +by showing the different clusterings for K's ranging from 1 to 9. + +```{code-cell} ipython3 +:tags: [remove-cell] + +from sklearn.cluster import KMeans + +penguins_standardized = penguins_standardized.drop(columns=["label"]) + +dfs = [] +inertias = [] +for i in range(1, 10): + data = penguins_standardized.copy() + knn = KMeans(n_clusters=i, n_init='auto') + knn.fit(data) + data['n_clusters'] = f'{i} Cluster' + ('' if i == 1 else 's') + data['label'] = knn.labels_ + dfs.append(data) + inertias.append(knn.inertia_) + +points = alt.Chart(pd.concat(dfs), width=200, height=200).mark_point(filled=True, opacity=1).encode( + alt.X('bill_length_standardized') + .scale(zero=False) + .title("Flipper Length (standardized)"), + alt.Y('flipper_length_standardized') + .scale(zero=False) + .title("Bill Length (standardized)"), + alt.Color('label:N').legend(None), + alt.Shape('label:N').legend(None).scale(range=['square', 'circle', 'triangle', 'cross', 'diamond', 'triangle-right', 'triangle-down', 'triangle-left']), + alt.Size('label:O').legend(None).scale(type='ordinal', range=[50, 50, 100, 100, 100, 100, 100, 100]), + # alt.Shape('label:N').legend(None), +) + +vary_k = alt.layer( + points, + points.mark_point(filled=True, stroke='black', strokeWidth=1.25).encode( + alt.X('mean(bill_length_standardized)'), + alt.Y('mean(flipper_length_standardized)'), + size=alt.value(200) + ) +).facet( + alt.Facet( + 'n_clusters:N', + header=alt.Header(title='', labelFontSize=16) + ), + columns=3 +) +glue('toy-kmeans-vary-k-1', vary_k, display=True) +``` + + + +:::{glue:figure} toy-kmeans-vary-k-1 +:figwidth: 700px +:name: toy-kmeans-vary-k-1 + +Clustering of the penguin data for K clusters ranging from 1 to 9. Cluster centers are indicated by larger points that are outlined in black. +::: + + +```{index} elbow method +``` + +If we set K less than 3, then the clustering merges separate groups of data; this causes a large +total WSSD, since the cluster center (denoted by large shapes with black outlines) is not close to any of the data in the cluster. On +the other hand, if we set K greater than 3, the clustering subdivides subgroups of data; this does indeed still +decrease the total WSSD, but by only a *diminishing amount*. If we plot the total WSSD versus the number of +clusters, we see that the decrease in total WSSD levels off (or forms an "elbow shape") when we reach roughly +the right number of clusters ({numref}`toy-kmeans-elbow`). + +```{code-cell} ipython3 +:tags: [remove-cell] + +elbow_plot = alt.layer( + alt.Chart( + pd.DataFrame({ + 'wssd': inertias, + 'k': range(1, len(inertias) + 1) + }) + ).mark_line(point=True).encode( + x=alt.X("k").title("Number of clusters"), + y=alt.Y("wssd").title("Total within-cluster sum of squares"), + ), + alt.Chart().mark_text(size=22, align='left', baseline='bottom').encode( + x=alt.datum(3.3), + y=alt.datum(9.8), + text=alt.datum('Elbow') + ), + alt.Chart().mark_text(size=50, align='left', baseline='bottom', fontWeight=100, angle=25).encode( + x=alt.datum(2.8), + y=alt.datum(5), + text=alt.datum('🠃') + ) +) + +glue('toy-kmeans-elbow', elbow_plot, display=True) +``` + +:::{glue:figure} toy-kmeans-elbow +:figwidth: 700px +:name: toy-kmeans-elbow + +Total WSSD for K clusters ranging from 1 to 9. +::: + +## K-means in Python + +```{index} K-means; kmeans function, scikit-learn; KMeans +``` + +We can perform K-means in Python using a workflow similar to those +in the earlier classification and regression chapters. +Returning to the original (unstandardized) `penguins` data, +recall that K-means clustering uses straight-line distance to decide which points are similar to +each other. Therefore, the *scale* of each of the variables in the data +will influence which cluster data points end up being assigned. +Variables with a large scale will have a much larger +effect on deciding cluster assignment than variables with a small scale. +To address this problem, we typically standardize our data before clustering, +which ensures that each variable has a mean of 0 and standard deviation of 1. +The `StandardScaler` function in `scikit-learn` can be used to do this. + +```{code-cell} ipython3 +from sklearn.preprocessing import StandardScaler +from sklearn.compose import make_column_transformer +from sklearn import set_config + +# Output dataframes instead of arrays +set_config(transform_output="pandas") + +preprocessor = make_column_transformer( + (StandardScaler(), ["bill_length_mm", "flipper_length_mm"]), + verbose_feature_names_out=False, +) +preprocessor +``` + +To indicate that we are performing K-means clustering, we will create a `KMeans` +model object. It takes at +least one argument: the number of clusters `n_clusters`, which we set to 3. + +```{code-cell} ipython3 +from sklearn.cluster import KMeans + +kmeans = KMeans(n_clusters=3) +kmeans +``` + +To actually run the K-means clustering, we combine the preprocessor and model object +in a `Pipeline`, and use the `fit` function. Note that the K-means +algorithm uses a random initialization of assignments, but since we set +the random seed in the beginning of this chapter, the clustering will be reproducible. + +```{code-cell} ipython3 +from sklearn.pipeline import make_pipeline + +penguin_clust = make_pipeline(preprocessor, kmeans) +penguin_clust.fit(penguins) +penguin_clust +``` + +```{index} K-means; inertia_, K-means; cluster_centers_, K-means; labels_, K-means; predict +``` + +The fit `KMeans` object—which is the second item in the +pipeline, and can be accessed as `penguin_clust[1]`—has a lot of information +that can be used to visualize the clusters, pick K, and evaluate the total WSSD. +Let's start by visualizing the clusters as a colored scatter plot! In +order to do that, we first need to augment our +original `penguins` data frame with the cluster assignments. +We can access these using the `labels_` attribute of the clustering object +("labels" is a common alternative term to "assignments" in clustering), and +add them to the data frame. + +```{code-cell} ipython3 +penguins["cluster"] = penguin_clust[1].labels_ +penguins +``` + +Now that we have the cluster assignments included in the `penguins` data frame, we can +visualize them as shown in {numref}`cluster_plot`. +Note that we are plotting the *un-standardized* data here; if we for some reason wanted to +visualize the *standardized* data, we would need to use the `fit` and `transform` functions +on the `StandardScaler` preprocessor directly to obtain that first. +As in {numref}`Chapter %s `, +adding the `:N` suffix ensures that `altair` +will treat the `cluster` variable as a nominal/categorical variable, and +hence use a discrete color map for the visualization. + +```{code-cell} ipython3 +cluster_plot=alt.Chart(penguins).mark_circle().encode( + x=alt.X("flipper_length_mm").title("Flipper Length").scale(zero=False), + y=alt.Y("bill_length_mm").title("Bill Length").scale(zero=False), + color=alt.Color("cluster:N").title("Cluster"), +) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("cluster_plot", cluster_plot, display=True) +``` + +:::{glue:figure} cluster_plot +:figwidth: 700px +:name: cluster_plot + +The data colored by the cluster assignments returned by K-means. +::: + +```{index} WSSD; total, K-means; inertia_ +``` + +```{index} see: WSSD; K-means inertia +``` + +As mentioned above, +we also need to select K +by finding where the "elbow" occurs in the plot of total WSSD versus the number of clusters. +The total WSSD is stored in the `.inertia_` attribute +of the clustering object ("inertia" is the term `scikit-learn` uses to denote WSSD). + +```{code-cell} ipython3 +penguin_clust[1].inertia_ +``` + +To calculate the total WSSD for a variety of Ks, we will +create a data frame that contains different values of `k` +and the WSSD of running KMeans with each values of k. +To create this dataframe, +we will use what is called a "list comprehension" in Python, +where we repeat an operation multiple times +and return a list with the result. +Here is an examples of a list comprehension that stores the numbers 0-2 in a list: + +```{code-cell} ipython3 +[n for n in range(3)] +``` + +We can change the variable `n` to be called whatever we prefer +and we can also perform any operation we want as part of the list comprehension. +For example, +we could square all the numbers from 1-4 and store them in a list: + +```{code-cell} ipython3 +[number ** 2 for number in range(1, 5)] +``` + +Next, we will use this approach to compute the WSSD for the K-values 1 through 9. +For each value of K, +we create a new KMeans model +and wrap it in a `scikit-learn` pipeline +with the preprocessor we created earlier. +We store the WSSD values in a list that we will use to create a dataframe +of both the K-values and their corresponding WSSDs. + +```{note} +We are creating the variable `ks` to store the range of possible k-values, +so that we only need to change this range in one place +if we decide to change which values of k we want to explore. +Otherwise it would be easy to forget to update it +in either the list comprehension or in the data frame assignment. +If you are using a value multiple times, +it is always the safest to assign it to a variable name for reuse. +``` + +```{code-cell} ipython3 +ks = range(1, 10) +wssds = [ + make_pipeline( + preprocessor, + KMeans(n_clusters=k) # Create a new KMeans model with `k` clusters + ).fit(penguins)[1].inertia_ + for k in ks +] + +penguin_clust_ks = pd.DataFrame({ + "k": ks, + "wssd": wssds, +}) + +penguin_clust_ks +``` + +Now that we have `wssd` and `k` as columns in a data frame, we can make a line plot +({numref}`elbow_plot`) and search for the "elbow" to find which value of K to use. + +```{code-cell} ipython3 +elbow_plot = alt.Chart(penguin_clust_ks).mark_line(point=True).encode( + x=alt.X("k").title("Number of clusters"), + y=alt.Y("wssd").title("Total within-cluster sum of squares"), +) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("elbow_plot", elbow_plot, display=True) +``` + +:::{glue:figure} elbow_plot +:figwidth: 700px +:name: elbow_plot + +A plot showing the total WSSD versus the number of clusters. +::: + +```{index} K-means; init argument +``` + +It looks like three clusters is the right choice for this data, +since that is where the "elbow" of the line is the most distinct. +In the plot, +you can also see that the WSSD is always decreasing, +as we would expect when we add more clusters. +However, +it is possible to have an elbow plot +where the WSSD increases at one of the steps, +causing a small bump in the line. +This is because K-means can get "stuck" in a bad solution +due to an unlucky initialization of the initial center positions +as we mentioned earlier in the chapter. + +```{note} +It is rare that the KMeans function from `scikit-learn` +gets stuck in a bad solution, because `scikit-learn` tries to choose +the initial centers carefully to prevent this from happening. +If you still find yourself in a situation where you have a bump in the elbow plot, +you can increase the `n_init` parameter +when creating the `KMeans` object, e.g., `KMeans(n_clusters=k, n_init=10)`, to try more different random center initializations. +The larger the value the better from an analysis perspective, +but there is a trade-off that doing many clusterings could take a long time. +``` + +## Exercises + +Practice exercises for the material covered in this chapter +can be found in the accompanying +[worksheets repository](https://worksheets.python.datasciencebook.ca) +in the "Clustering" row. +You can launch an interactive version of the worksheet in your browser by clicking the "launch binder" button. +You can also preview a non-interactive version of the worksheet by clicking "view worksheet." +If you instead decide to download the worksheet and run it on your own machine, +make sure to follow the instructions for computer setup +found in {numref}`Chapter %s `. This will ensure that the automated feedback +and guidance that the worksheets provide will function as intended. + +## Additional resources + +- Chapter 10 of *An Introduction to Statistical + Learning* {cite:p}`james2013introduction` provides a + great next stop in the process of learning about clustering and unsupervised + learning in general. In the realm of clustering specifically, it provides a + great companion introduction to K-means, but also covers *hierarchical* + clustering for when you expect there to be subgroups, and then subgroups within + subgroups, etc., in your data. In the realm of more general unsupervised + learning, it covers *principal components analysis (PCA)*, which is a very + popular technique for reducing the number of predictors in a data set. + +## References + ++++ + +```{bibliography} +:filter: docname in docnames +``` diff --git a/pull313/_sources/index.md b/pull313/_sources/index.md new file mode 100644 index 00000000..e75806a1 --- /dev/null +++ b/pull313/_sources/index.md @@ -0,0 +1,42 @@ +--- +jupytext: + cell_metadata_filter: -all + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.8 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +![](img/frontmatter/ds-a-first-intro-graphic.jpg) + +# Data Science + +## *A First Introduction (Python Edition)* + +*Tiffany Timbers, Trevor Campbell, Melissa Lee, Joel Ostblom, Lindsey Heagy* + +### Welcome! + +This is the [website](https://python.datasciencebook.ca) for *Data Science: A First Introduction (Python Edition)*. +You can read the web version of the book on this site. Click a section in the table of contents +on the left side of the page to navigate to it. If you are on a mobile device, +you may need to open the table of contents first by clicking the menu button on +the top left of the page. + +For the R version of the textbook, please visit https://datasciencebook.ca. + + + +This work by [Tiffany Timbers](https://www.tiffanytimbers.com/), +[Trevor Campbell](https://trevorcampbell.me/), +[Melissa Lee](https://www.stat.ubc.ca/users/melissa-lee), +[Joel Ostblom](https://joelostblom.com/), +and [Lindsey Heagy](https://lindseyjh.ca/) +is licensed under +a [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](http://creativecommons.org/licenses/by-nc-sa/4.0/). diff --git a/pull313/_sources/inference.md b/pull313/_sources/inference.md new file mode 100644 index 00000000..bdb9d4b7 --- /dev/null +++ b/pull313/_sources/inference.md @@ -0,0 +1,1354 @@ +--- +jupytext: + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.5 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(inference)= +# Statistical inference + +```{code-cell} ipython3 +:tags: [remove-cell] + +from chapter_preamble import * +``` + +## Overview + +A typical data analysis task in practice is to draw conclusions about some +unknown aspect of a population of interest based on observed data sampled from +that population; we typically do not get data on the *entire* population. Data +analysis questions regarding how summaries, patterns, trends, or relationships +in a data set extend to the wider population are called *inferential +questions*. This chapter will start with the fundamental ideas of sampling from +populations and then introduce two common techniques in statistical inference: +*point estimation* and *interval estimation*. + +## Chapter learning objectives + +By the end of the chapter, readers will be able to do the following: + +* Describe real-world examples of questions that can be answered with statistical inference. +* Define common population parameters (e.g., mean, proportion, standard deviation) that are often estimated using sampled data, and estimate these from a sample. +* Define the following statistical sampling terms (population, sample, population parameter, point estimate, sampling distribution). +* Explain the difference between a population parameter and a sample point estimate. +* Use Python to draw random samples from a finite population. +* Use Python to create a sampling distribution from a finite population. +* Describe how sample size influences the sampling distribution. +* Define bootstrapping. +* Use Python to create a bootstrap distribution to approximate a sampling distribution. +* Contrast the bootstrap and sampling distributions. + ++++ + +## Why do we need sampling? + +We often need to understand how quantities we observe in a subset +of data relate to the same quantities in the broader population. For example, suppose a +retailer is considering selling iPhone accessories, and they want to estimate +how big the market might be. Additionally, they want to strategize how they can +market their products on North American college and university campuses. This +retailer might formulate the following question: + +*What proportion of all undergraduate students in North America own an iPhone?* + +```{index} population, population; parameter +``` + +In the above question, we are interested in making a conclusion about *all* +undergraduate students in North America; this is referred to as the **population**. In +general, the population is the complete collection of individuals or cases we +are interested in studying. Further, in the above question, we are interested +in computing a quantity—the proportion of iPhone owners—based on +the entire population. This proportion is referred to as a **population parameter**. In +general, a population parameter is a numerical characteristic of the entire +population. To compute this number in the example above, we would need to ask +every single undergraduate in North America whether they own an iPhone. In +practice, directly computing population parameters is often time-consuming and +costly, and sometimes impossible. + +```{index} sample, sample; estimate, inference +``` + +```{index} see: statistical inference; inference +``` + +A more practical approach would be to make measurements for a **sample**, i.e., a +subset of individuals collected from the population. We can then compute a +**sample estimate**—a numerical characteristic of the sample—that +estimates the population parameter. For example, suppose we randomly selected +ten undergraduate students across North America (the sample) and computed the +proportion of those students who own an iPhone (the sample estimate). In that +case, we might suspect that proportion is a reasonable estimate of the +proportion of students who own an iPhone in the entire population. +{numref}`fig:11-population-vs-sample` illustrates this process. +In general, the process of using a sample to make a conclusion about the +broader population from which it is taken is referred to as **statistical inference**. + ++++ + +```{figure} img/inference/population_vs_sample.png +:name: fig:11-population-vs-sample + +Population versus sample. +``` + ++++ + +Note that proportions are not the *only* kind of population parameter we might +be interested in. For example, suppose an undergraduate student studying at the University +of British Columbia in Canada is looking for an apartment +to rent. They need to create a budget, so they want to know something about +studio apartment rental prices in Vancouver, BC. This student might +formulate the following question: + +*What is the average price-per-month of studio apartment rentals in Vancouver, Canada?* + +In this case, the population consists of all studio apartment rentals in Vancouver, and the +population parameter is the *average price-per-month*. Here we used the average +as a measure of the center to describe the "typical value" of studio apartment +rental prices. But even within this one example, we could also be interested in +many other population parameters. For instance, we know that not every studio +apartment rental in Vancouver will have the same price per month. The student +might be interested in how much monthly prices vary and want to find a measure +of the rentals' spread (or variability), such as the standard deviation. Or perhaps the +student might be interested in the fraction of studio apartment rentals that +cost more than \$1000 per month. The question we want to answer will help us +determine the parameter we want to estimate. If we were somehow able to observe +the whole population of studio apartment rental offerings in Vancouver, we +could compute each of these numbers exactly; therefore, these are all +population parameters. There are many kinds of observations and population +parameters that you will run into in practice, but in this chapter, we will +focus on two settings: + +1. Using categorical observations to estimate the proportion of a category +2. Using quantitative observations to estimate the average (or mean) + ++++ + +## Sampling distributions + +### Sampling distributions for proportions + +```{index} Airbnb +``` + +We will look at an example using data from +[Inside Airbnb](http://insideairbnb.com/) {cite:p}`insideairbnb`. Airbnb is an online +marketplace for arranging vacation rentals and places to stay. The data set +contains listings for Vancouver, Canada, in September 2020. Our data +includes an ID number, neighborhood, type of room, the number of people the +rental accommodates, number of bathrooms, bedrooms, beds, and the price per +night. + +```{code-cell} ipython3 +import pandas as pd + +airbnb = pd.read_csv("data/listings.csv") +airbnb +``` + +Suppose the city of Vancouver wants information about Airbnb rentals to help +plan city bylaws, and they want to know how many Airbnb places are listed as +entire homes and apartments (rather than as private or shared rooms). Therefore +they may want to estimate the true proportion of all Airbnb listings where the +room type is listed as "entire home or apartment." Of course, we usually +do not have access to the true population, but here let's imagine (for learning +purposes) that our data set represents the population of all Airbnb rental +listings in Vancouver, Canada. +We can find the proportion of listings for each room type +by using the `value_counts` function with the `normalize` parameter +as we did in previous chapters. + +```{index} pandas.DataFrame; df[], count, len +``` + +```{code-cell} ipython3 +airbnb["room_type"].value_counts(normalize=True) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("population_proportion", "{:.3f}".format(airbnb["room_type"].value_counts(normalize=True)["Entire home/apt"])) +``` + +We can see that the proportion of `Entire home/apt` listings in +the data set is {glue:text}`population_proportion`. This +value, {glue:text}`population_proportion`, is the population parameter. Remember, this +parameter value is usually unknown in real data analysis problems, as it is +typically not possible to make measurements for an entire population. + +```{index} pandas.DataFrame; sample +``` + +Instead, perhaps we can approximate it with a small subset of data! +To investigate this idea, let's try randomly selecting 40 listings (*i.e.,* taking a random sample of +size 40 from our population), and computing the proportion for that sample. +We will use the `sample` method of the `pandas.DataFrame` +object to take the sample. The argument `n` of `sample` is the size of the sample to take +and since we are starting to use randomness here, +we are also setting the random seed via numpy to make the results reproducible. + +```{code-cell} ipython3 +import numpy as np + + +np.random.seed(155) + +airbnb.sample(n=40)["room_type"].value_counts(normalize=True) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("sample_1_proportion", "{:.3f}".format(airbnb.sample(n=40, random_state=155)["room_type"].value_counts(normalize=True)["Entire home/apt"])) +``` + +Here we see that the proportion of entire home/apartment listings in this +random sample is {glue:text}`sample_1_proportion`. Wow—that's close to our +true population value! But remember, we computed the proportion using a random sample of size 40. +This has two consequences. First, this value is only an *estimate*, i.e., our best guess +of our population parameter using this sample. +Given that we are estimating a single value here, we often +refer to it as a **point estimate**. Second, since the sample was random, +if we were to take *another* random sample of size 40 and compute the proportion for that sample, +we would not get the same answer: + +```{code-cell} ipython3 +airbnb.sample(n=40)["room_type"].value_counts(normalize=True) +``` + +Confirmed! We get a different value for our estimate this time. +That means that our point estimate might be unreliable. Indeed, estimates vary from sample to +sample due to **sampling variability**. But just how much +should we expect the estimates of our random samples to vary? +Or in other words, how much can we really trust our point estimate based on a single sample? + +```{index} sampling distribution +``` + +To understand this, we will simulate many samples (much more than just two) +of size 40 from our population of listings and calculate the proportion of +entire home/apartment listings in each sample. This simulation will create +many sample proportions, which we can visualize using a histogram. The +distribution of the estimate for all possible samples of a given size (which we +commonly refer to as $n$) from a population is called +a **sampling distribution**. The sampling distribution will help us see how much we would +expect our sample proportions from this population to vary for samples of size 40. + +```{index} pandas.DataFrame; sample +``` + +We again use the `sample` to take samples of size 40 from our +population of Airbnb listings. But this time we use a list comprehension +to repeat the operation multiple times (as we did previously in {numref}`Chapter %s `). +In this case we repeat the operation 20,000 times to obtain 20,000 samples of size 40. +To make it clear which rows in the data frame come +which of the 20,000 samples, we also add a column called `replicate` with this information using the `assign` function, +introduced previously in {numref}`Chapter %s `. +The call to `concat` concatenates all the 20,000 data frames +returned from the list comprehension into a single big data frame. + +```{code-cell} ipython3 +samples = pd.concat([ + airbnb.sample(40).assign(replicate=n) + for n in range(20_000) +]) +samples +``` + +Since the column `replicate` indicates the replicate/sample number, +we can verify that we indeed seem to have 20,0000 samples +starting at sample 0 and ending at sample 19,999. + ++++ + +Now that we have obtained the samples, we need to compute the +proportion of entire home/apartment listings in each sample. +We first `query` the observations with room type of "Entire home/apt"; +group the data by the `replicate` variable—to group the +set of listings in each sample together—and then use `count` +to compute the number of qualified observations in each sample; finally compute the proportion. +Both the first and last few entries of the resulting data frame are printed +below to show that we end up with 20,000 point estimates, one for each of the 20,000 samples. + +```{code-cell} ipython3 +( + samples + .groupby("replicate") + ["room_type"] + .value_counts(normalize=True) +) +``` + +The returned object is a series, +and as we have previously learned +we can use `reset_index` to change it to a data frame. +However, +there is one caveat here: +when we use the `value_counts` function +on a grouped series and try to `reset_index` +we will end up with two columns with the same name +and therefore get an error +(in this case, `room_type` will occur twice). +Fortunately, +there is a simple solution: +when we call `reset_index`, +we can specify the name of the new column +with the `name` parameter: + +```{code-cell} ipython3 +( + samples + .groupby("replicate") + ["room_type"] + .value_counts(normalize=True) + .reset_index(name="sample_proportion") +) +``` + +Below we put everything together +and also filter the data frame to keep only the room types +that we are interested in. + +```{code-cell} ipython3 +sample_estimates = ( + samples + .groupby("replicate") + ["room_type"] + .value_counts(normalize=True) + .reset_index(name="sample_proportion") +) + +sample_estimates = sample_estimates[sample_estimates["room_type"] == "Entire home/apt"] +sample_estimates +``` + +We can now visualize the sampling distribution of sample proportions +for samples of size 40 using a histogram in {numref}`fig:11-example-proportions7`. Keep in mind: in the real world, +we don't have access to the full population. So we +can't take many samples and can't actually construct or visualize the sampling distribution. +We have created this particular example +such that we *do* have access to the full population, which lets us visualize the +sampling distribution directly for learning purposes. + +```{code-cell} ipython3 +:tags: [remove-output] + +sampling_distribution = alt.Chart(sample_estimates).mark_bar().encode( + x=alt.X("sample_proportion") + .bin(maxbins=20) + .title("Sample proportions"), + y=alt.Y("count()").title("Count"), +) + +sampling_distribution +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:11-example-proportions7", sampling_distribution) +``` + +:::{glue:figure} fig:11-example-proportions7 +:name: fig:11-example-proportions7 + +Sampling distribution of the sample proportion for sample size 40. +::: + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("sample_proportion_center", "{:.2f}".format(sample_estimates["sample_proportion"].mean())) +glue("sample_proportion_min", "{:.2f}".format(sample_estimates["sample_proportion"].quantile(0.004))) +glue("sample_proportion_max", "{:.2f}".format(sample_estimates["sample_proportion"].quantile(0.9997))) +``` + +```{index} sampling distribution; shape +``` + +The sampling distribution in {numref}`fig:11-example-proportions7` appears +to be bell-shaped, is roughly symmetric, and has one peak. It is centered +around {glue:text}`sample_proportion_center` and the sample proportions +range from about {glue:text}`sample_proportion_min` to about +{glue:text}`sample_proportion_max`. In fact, we can +calculate the mean of the sample proportions. + +```{code-cell} ipython3 +sample_estimates["sample_proportion"].mean() +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("sample_proportion_mean", "{:.3f}".format(sample_estimates["sample_proportion"].mean())) +``` + +We notice that the sample proportions are centered around the population +proportion value, {glue:text}`sample_proportion_mean`! In general, the mean of +the sampling distribution should be equal to the population proportion. +This is great news because it means that the sample proportion is neither an overestimate nor an +underestimate of the population proportion. +In other words, if you were to take many samples as we did above, there is no tendency +towards over or underestimating the population proportion. +In a real data analysis setting where you just have access to your single +sample, this implies that you would suspect that your sample point estimate is +roughly equally likely to be above or below the true population proportion. + ++++ + +### Sampling distributions for means + +In the previous section, our variable of interest—`room_type`—was +*categorical*, and the population parameter was a proportion. As mentioned in +the chapter introduction, there are many choices of the population parameter +for each type of variable. What if we wanted to infer something about a +population of *quantitative* variables instead? For instance, a traveler +visiting Vancouver, Canada may wish to estimate the +population *mean* (or average) price per night of Airbnb listings. Knowing +the average could help them tell whether a particular listing is overpriced. +We can visualize the population distribution of the price per night with a histogram. + +```{code-cell} ipython3 +:tags: [remove-output] + +population_distribution = alt.Chart(airbnb).mark_bar().encode( + x=alt.X("price") + .bin(maxbins=30) + .title("Price per night (dollars)"), + y=alt.Y("count()", title="Count"), +) + +population_distribution +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:11-example-means2", population_distribution) +``` + +:::{glue:figure} fig:11-example-means2 +:name: fig:11-example-means2 + +Population distribution of price per night (dollars) for all Airbnb listings in Vancouver, Canada. +::: + ++++ + +```{index} population; distribution +``` + +In {numref}`fig:11-example-means2`, we see that the population distribution +has one peak. It is also skewed (i.e., is not symmetric): most of the listings are +less than \$250 per night, but a small number of listings cost much more, +creating a long tail on the histogram's right side. +Along with visualizing the population, we can calculate the population mean, +the average price per night for all the Airbnb listings. + +```{code-cell} ipython3 +airbnb["price"].mean() +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("population_mean", "{:.2f}".format(airbnb["price"].mean())) +``` + +```{index} population; parameter +``` + +The price per night of all Airbnb rentals in Vancouver, BC +is \${glue:text}`population_mean`, on average. This value is our +population parameter since we are calculating it using the population data. + +```{index} pandas.DataFrame; sample +``` + +Now suppose we did not have access to the population data (which is usually the +case!), yet we wanted to estimate the mean price per night. We could answer +this question by taking a random sample of as many Airbnb listings as our time +and resources allow. Let's say we could do this for 40 listings. What would +such a sample look like? Let's take advantage of the fact that we do have +access to the population data and simulate taking one random sample of 40 +listings in Python, again using `sample`. + +```{code-cell} ipython3 +one_sample = airbnb.sample(40) +``` + +We can create a histogram to visualize the distribution of observations in the +sample ({numref}`fig:11-example-means-sample-hist`), and calculate the mean +of our sample. + +```{code-cell} ipython3 +:tags: [remove-output] + +sample_distribution = alt.Chart(one_sample).mark_bar().encode( + x=alt.X("price") + .bin(maxbins=30) + .title("Price per night (dollars)"), + y=alt.Y("count()").title("Count"), +) + +sample_distribution +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:11-example-means-sample-hist", sample_distribution) +``` + +:::{glue:figure} fig:11-example-means-sample-hist +:name: fig:11-example-means-sample-hist + +Distribution of price per night (dollars) for sample of 40 Airbnb listings. +::: + +```{code-cell} ipython3 +one_sample["price"].mean() +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("estimate_mean", "{:.2f}".format(one_sample["price"].mean())) +glue("diff_perc", "{:.1f}".format(100 * abs(1 - (one_sample["price"].mean() / airbnb["price"].mean())))) +``` + +The average value of the sample of size 40 +is \${glue:text}`estimate_mean`. This +number is a point estimate for the mean of the full population. +Recall that the population mean was +\${glue:text}`population_mean`. So our estimate was fairly close to +the population parameter: the mean was about +{glue:text}`diff_perc`% +off. Note that we usually cannot compute the estimate's accuracy in practice +since we do not have access to the population parameter; if we did, we wouldn't +need to estimate it! + +```{index} sampling distribution +``` + +Also, recall from the previous section that the point estimate can vary; if we +took another random sample from the population, our estimate's value might +change. So then, did we just get lucky with our point estimate above? How much +does our estimate vary across different samples of size 40 in this example? +Again, since we have access to the population, we can take many samples and +plot the sampling distribution of sample means to get a sense for this variation. +In this case, we'll use the 20,000 samples of size +40 that we already stored in the `samples` variable. +First we will calculate the sample mean for each replicate +and then plot the sampling +distribution of sample means for samples of size 40. + +```{code-cell} ipython3 +sample_estimates = ( + samples + .groupby("replicate") + ["price"] + .mean() + .reset_index() + .rename(columns={"price": "mean_price"}) +) +sample_estimates +``` + +```{code-cell} ipython3 +:tags: [remove-output] + +sampling_distribution = alt.Chart(sample_estimates).mark_bar().encode( + x=alt.X("mean_price") + .bin(maxbins=30) + .title("Sample mean price per night (dollars)"), + y=alt.Y("count()").title("Count") +) + +sampling_distribution +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:11-example-means4", sampling_distribution) +``` + +:::{glue:figure} fig:11-example-means4 +:name: fig:11-example-means4 + +Sampling distribution of the sample means for sample size of 40. +::: + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("quantile_1", "{:0.0f}".format(round(sample_estimates["mean_price"].quantile(0.25), -1))) +glue("quantile_3", "{:0.0f}".format(round(sample_estimates["mean_price"].quantile(0.75), -1))) +``` + +```{index} sampling distribution; shape +``` + +In {numref}`fig:11-example-means4`, the sampling distribution of the mean +has one peak and is bell-shaped. Most of the estimates are between +about \${glue:text}`quantile_1` and +\${glue:text}`quantile_3`; but there are +a good fraction of cases outside this range (i.e., where the point estimate was +not close to the population parameter). So it does indeed look like we were +quite lucky when we estimated the population mean with only +{glue:text}`diff_perc`% error. + +```{index} sampling distribution; compared to population distribution +``` + +Let's visualize the population distribution, distribution of the sample, and +the sampling distribution on one plot to compare them in {numref}`fig:11-example-means5`. Comparing these three distributions, the centers +of the distributions are all around the same price (around \$150). The original +population distribution has a long right tail, and the sample distribution has +a similar shape to that of the population distribution. However, the sampling +distribution is not shaped like the population or sample distribution. Instead, +it has a bell shape, and it has a lower spread than the population or sample +distributions. The sample means vary less than the individual observations +because there will be some high values and some small values in any random +sample, which will keep the average from being too extreme. + + + +```{code-cell} ipython3 +:tags: [remove-input] + +glue( + "fig:11-example-means5", + alt.vconcat( + population_distribution.mark_bar(clip=True).encode( + x=alt.X( + "price", + bin=alt.Bin(extent=[0, 660], maxbins=40), + title="Price per night (dollars)", + #scale=alt.Scale(domainMax=700) + ) + ).properties( + title="Population", height=150 + ), + sample_distribution.encode( + x=alt.X("price") + .bin(extent=[0, 660], maxbins=40) + .title("Price per night (dollars)") + ).properties(title="Sample (n = 40)").properties(height=150), + sampling_distribution.encode( + x=alt.X("mean_price") + .bin(extent=[0, 660], maxbins=40) + .title("Price per night (dollars)") + ).properties( + title=alt.TitleParams( + "Sampling distribution of the mean", + subtitle="For 20,000 samples of size 40" + ) + ).properties(height=150) + ).resolve_scale( + x="shared" + ) +) +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:11-example-means5 +:figclass: caption-hack + +Comparison of population distribution, sample distribution, and sampling distribution. +``` + ++++ + +Given that there is quite a bit of variation in the sampling distribution of +the sample mean—i.e., the point estimate that we obtain is not very +reliable—is there any way to improve the estimate? One way to improve a +point estimate is to take a *larger* sample. To illustrate what effect this +has, we will take many samples of size 20, 50, 100, and 500, and plot the +sampling distribution of the sample mean. We indicate the mean of the sampling +distribution with a orange vertical line. + +```{code-cell} ipython3 +:tags: [remove-input] + +# Plot sampling distributions for multiple sample sizes +base = alt.Chart( + pd.concat([ + pd.concat([ + airbnb.sample(sample_size).assign(sample_size=sample_size, replicate=replicate) + for sample_size in [20, 50, 100, 500] + ]) + for replicate in range(20_000) + ]).groupby( + ["sample_size", "replicate"], + as_index=False + )["price"].mean(), + height=150 +) + +glue( + "fig:11-example-means7", + alt.layer( + base.mark_bar().encode( + alt.X("price", bin=alt.Bin(maxbins=30)), + alt.Y("count()") + ), + base.mark_rule(color="#f58518", size=3).encode( + x="mean(price)" + ), + base.mark_text(align="left", color="#f58518", size=12, fontWeight="bold", dx=10).transform_aggregate( + mean_price = "mean(price)", + ).transform_calculate( + label = "'Mean = ' + round(datum.mean_price * 10) / 10" + ).encode( + x=alt.X("mean_price:Q", title="Sample mean price per night (dollars)"), + y=alt.value(10), + text="label:N" + ) + ).facet( + alt.Facet( + "sample_size:N", + header=alt.Header( + title="", + labelFontWeight="bold", + labelFontSize=12, + labelPadding=3, + labelExpr='"Sample size = " + datum.value' + ) + ), + columns=1, + ).resolve_scale( + y="independent" + ) +) +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:11-example-means7 +:figclass: caption-hack + +Comparison of sampling distributions, with mean highlighted as a vertical orange line. +``` + ++++ + +```{index} sampling distribution; effect of sample size +``` + +Based on the visualization in {numref}`fig:11-example-means7`, three points +about the sample mean become clear: + +1. The mean of the sample mean (across + samples) is equal to the population mean. In other words, the sampling + distribution is centered at the population mean. +2. Increasing the size of + the sample decreases the spread (i.e., the variability) of the sampling + distribution. Therefore, a larger sample size results in a more reliable point + estimate of the population parameter. +3. The distribution of the sample mean is roughly bell-shaped. + +```{note} +You might notice that in the `n = 20` case in {numref}`fig:11-example-means7`, +the distribution is not *quite* bell-shaped. There is a bit of skew towards the right! +You might also notice that in the `n = 50` case and larger, that skew seems to disappear. +In general, the sampling distribution—for both means and proportions—only +becomes bell-shaped *once the sample size is large enough*. +How large is "large enough?" Unfortunately, it depends entirely on the problem at hand. But +as a rule of thumb, often a sample size of at least 20 will suffice. +``` + + + ++++ + +### Summary + +1. A point estimate is a single value computed using a sample from a population (e.g., a mean or proportion). +2. The sampling distribution of an estimate is the distribution of the estimate for all possible samples of a fixed size from the same population. +3. The shape of the sampling distribution is usually bell-shaped with one peak and centered at the population mean or proportion. +4. The spread of the sampling distribution is related to the sample size. As the sample size increases, the spread of the sampling distribution decreases. + ++++ + +## Bootstrapping + ++++ + +### Overview + +*Why all this emphasis on sampling distributions?* + +We saw in the previous section that we could compute a **point estimate** of a +population parameter using a sample of observations from the population. And +since we constructed examples where we had access to the population, we could +evaluate how accurate the estimate was, and even get a sense of how much the +estimate would vary for different samples from the population. But in real +data analysis settings, we usually have *just one sample* from our population +and do not have access to the population itself. Therefore we cannot construct +the sampling distribution as we did in the previous section. And as we saw, our +sample estimate's value can vary significantly from the population parameter. +So reporting the point estimate from a single sample alone may not be enough. +We also need to report some notion of *uncertainty* in the value of the point +estimate. + +```{index} bootstrap, confidence interval +``` + +```{index} see: interval; confidence interval +``` + +Unfortunately, we cannot construct the exact sampling distribution without +full access to the population. However, if we could somehow *approximate* what +the sampling distribution would look like for a sample, we could +use that approximation to then report how uncertain our sample +point estimate is (as we did above with the *exact* sampling +distribution). There are several methods to accomplish this; in this book, we +will use the *bootstrap*. We will discuss **interval estimation** and +construct +**confidence intervals** using just a single sample from a population. A +confidence interval is a range of plausible values for our population parameter. + +Here is the key idea. First, if you take a big enough sample, it *looks like* +the population. Notice the histograms' shapes for samples of different sizes +taken from the population in {numref}`fig:11-example-bootstrapping0`. We +see that the sample’s distribution looks like that of the population for a +large enough sample. + +```{code-cell} ipython3 +:tags: [remove-cell] + +# plot sample distributions for n = 10, 20, 50, 100, 200 and population distribution +sample_distribution_dict = {} +for sample_n in [10, 20, 50, 100, 200]: + sample = airbnb.sample(sample_n) + sample_distribution_dict[f"sample_distribution_{sample_n}"] = ( + alt.Chart(sample, title=f"n = {sample_n}").mark_bar().encode( + x=alt.X( + "price", + bin=alt.Bin(extent=[0, 600], step=20), + title="Price per night (dollars)", + ), + y=alt.Y("count()", title="Count"), + ) + ).properties(height=150) +# add title and standardize the x axis ticks for population histogram +population_distribution.title = "Population distribution" +population_distribution.encoding["x"]["bin"] = alt.Bin(extent=[0, 600], step=20) + +glue( + "fig:11-example-bootstrapping0", + ( + ( + sample_distribution_dict["sample_distribution_10"] + | sample_distribution_dict["sample_distribution_20"] + ) + & ( + sample_distribution_dict["sample_distribution_50"] + | sample_distribution_dict["sample_distribution_100"] + ) + & ( + sample_distribution_dict["sample_distribution_200"] + | population_distribution.properties(width=350, height=150) + ) + ), +) +``` + +:::{glue:figure} fig:11-example-bootstrapping0 +:name: fig:11-example-bootstrapping0 + +Comparison of samples of different sizes from the population. +::: + ++++ + +```{index} bootstrap; distribution +``` + +In the previous section, we took many samples of the same size *from our +population* to get a sense of the variability of a sample estimate. But if our +sample is big enough that it looks like our population, we can pretend that our +sample *is* the population, and take more samples (with replacement) of the +same size from it instead! This very clever technique is +called **the bootstrap**. Note that by taking many samples from our single, observed +sample, we do not obtain the true sampling distribution, but rather an +approximation that we call **the bootstrap distribution**. + +```{note} +We must sample *with* replacement when using the bootstrap. +Otherwise, if we had a sample of size $n$, and obtained a sample from it of +size $n$ *without* replacement, it would just return our original sample! +``` + +This section will explore how to create a bootstrap distribution from a single +sample using Python. The process is visualized in {numref}`fig:11-intro-bootstrap-image`. +For a sample of size $n$, you would do the following: + ++++ + +1. Randomly select an observation from the original sample, which was drawn from the population. +2. Record the observation's value. +3. Replace that observation. +4. Repeat steps 1–3 (sampling *with* replacement) until you have $n$ observations, which form a bootstrap sample. +5. Calculate the bootstrap point estimate (e.g., mean, median, proportion, slope, etc.) of the $n$ observations in your bootstrap sample. +6. Repeat steps 1–5 many times to create a distribution of point estimates (the bootstrap distribution). +7. Calculate the plausible range of values around our observed point estimate. + ++++ + +```{figure} img/inference/intro-bootstrap.jpeg +:name: fig:11-intro-bootstrap-image + +Overview of the bootstrap process. +``` + ++++ + +### Bootstrapping in Python + +Let’s continue working with our Airbnb example to illustrate how we might create +and use a bootstrap distribution using just a single sample from the population. +Once again, suppose we are +interested in estimating the population mean price per night of all Airbnb +listings in Vancouver, Canada, using a single sample size of 40. +Recall our point estimate was \${glue:text}`estimate_mean`. The +histogram of prices in the sample is displayed in {numref}`fig:11-bootstrapping1`. + +```{code-cell} ipython3 +one_sample +``` + +```{code-cell} ipython3 +:tags: [] + +one_sample_dist = alt.Chart(one_sample).mark_bar().encode( + x=alt.X("price") + .bin(maxbins=30) + .title("Price per night (dollars)"), + y=alt.Y("count()").title("Count"), +) + +one_sample_dist +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:11-bootstrapping1 +:figclass: caption-hack + +Histogram of price per night (dollars) for one sample of size 40. +``` + ++++ + +The histogram for the sample is skewed, with a few observations out to the right. The +mean of the sample is \${glue:text}`estimate_mean`. +Remember, in practice, we usually only have this one sample from the population. So +this sample and estimate are the only data we can work with. + +```{index} bootstrap; in Python, scikit-learn; resample (bootstrap) +``` + +We now perform steps 1–5 listed above to generate a single bootstrap +sample in Python and calculate a point estimate from that bootstrap sample. We will +continue using the `sample` function of our dataframe, +Critically, note that we now +set `frac=1` ("fraction") to indicate that we want to draw as many samples as there are rows in the dataframe +(we could also have set `n=40` but then we would need to manually keep track of how many rows there are). +Since we need to sample with replacement when bootstrapping, +we change the `replace` parameter to `True`. + +```{code-cell} ipython3 +:tags: [] + +boot1 = one_sample.sample(frac=1, replace=True) +boot1_dist = alt.Chart(boot1).mark_bar().encode( + x=alt.X("price") + .bin(maxbins=30) + .title("Price per night (dollars)"), + y=alt.Y("count()", title="Count"), +) + +boot1_dist +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:11-bootstrapping3 +:figclass: caption-hack + +Bootstrap distribution. +``` + +```{code-cell} ipython3 +boot1["price"].mean() +``` + +Notice in {numref}`fig:11-bootstrapping3` that the histogram of our bootstrap sample +has a similar shape to the original sample histogram. Though the shapes of +the distributions are similar, they are not identical. You'll also notice that +the original sample mean and the bootstrap sample mean differ. How might that +happen? Remember that we are sampling with replacement from the original +sample, so we don't end up with the same sample values again. We are *pretending* +that our single sample is close to the population, and we are trying to +mimic drawing another sample from the population by drawing one from our original +sample. + +Let's now take 20,000 bootstrap samples from the original sample (`one_sample`) +and calculate the means for +each of those replicates. Recall that this assumes that `one_sample` *looks like* +our original population; but since we do not have access to the population itself, +this is often the best we can do. +Note that here we break the list comprehension over multiple lines +so that it is easier to read. + +```{code-cell} ipython3 +boot20000 = pd.concat([ + one_sample.sample(frac=1, replace=True).assign(replicate=n) + for n in range(20_000) +]) +boot20000 +``` + +Let's take a look at histograms of the first six replicates of our bootstrap samples. + +```{code-cell} ipython3 +:tags: [] + +six_bootstrap_samples = boot20000.query("replicate < 6") +alt.Chart(six_bootstrap_samples, height=150).mark_bar().encode( + x=alt.X("price") + .bin(maxbins=20) + .title("Price per night (dollars)"), + y=alt.Y("count()").title("Count") +).facet( + "replicate:N", # Recall that `:N` converts the variable to a categorical type + columns=2 +) +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:11-bootstrapping-six-bootstrap-samples +:figclass: caption-hack + +Histograms of first six replicates of bootstrap samples. +``` + ++++ + +We see in {numref}`fig:11-bootstrapping-six-bootstrap-samples` how the distributions of the +bootstrap samples differ. If we calculate the sample mean for each of +these six samples, we can see that these are also different between samples. +To compute the mean for each sample, +we first group by the "replicate" which is the column containing the sample/replicate number. +Then we compute the mean of the `price` column and rename it to `mean_price` +for it to be more descriptive. +Finally we use `reset_index` to get the `replicate` values back as a column in the dataframe. + +```{code-cell} ipython3 +( + six_bootstrap_samples + .groupby("replicate") + ["price"] + .mean() + .reset_index() + .rename(columns={"price": "mean_price"}) +) +``` + +The distributions and the means differ between the bootstrapped samples +because we are sampling *with replacement*. +If we instead would have sampled *without replacement*, +we would end up with the exact same values in the sample each time. + +We will now calculate point estimates of the mean for our 20,000 bootstrap samples and +generate a bootstrap distribution of these point estimates. The bootstrap +distribution ({numref}`fig:11-bootstrapping5`) suggests how we might expect +our point estimate to behave if we take multiple samples. + +```{code-cell} ipython3 +boot20000_means = ( + boot20000 + .groupby("replicate") + ["price"] + .mean() + .reset_index() + .rename(columns={"price": "mean_price"}) +) + +boot20000_means +``` + +```{code-cell} ipython3 +:tags: [] + +boot_est_dist = alt.Chart(boot20000_means).mark_bar().encode( + x=alt.X("mean_price") + .bin(maxbins=20) + .title("Sample mean price per night (dollars)"), + y=alt.Y("count()").title("Count"), +) + +boot_est_dist +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:11-bootstrapping5 +:figclass: caption-hack + +Distribution of the bootstrap sample means. +``` + ++++ + +Let's compare the bootstrap distribution—which we construct by taking many samples from our original sample of size 40—with +the true sampling distribution—which corresponds to taking many samples from the population. + +```{code-cell} ipython3 +:tags: [remove-input] + +sampling_distribution.encoding.x["bin"]["extent"] = (90, 250) +alt.vconcat( + alt.layer( + sampling_distribution, + alt.Chart(sample_estimates).mark_rule(color="#f58518", size=2).encode(x="mean(mean_price)"), + alt.Chart(sample_estimates).mark_text(color="#f58518", size=12, align="left", dx=16, fontWeight="bold").encode( + x="mean(mean_price)", + y=alt.value(7), + text=alt.value(f"Mean = {sampling_distribution['data']['mean_price'].mean().round(1)}") + ) + ).properties(title="Sampling distribution", height=150), + alt.layer( + boot_est_dist, + alt.Chart(boot20000_means).mark_rule(color="#f58518", size=2).encode(x="mean(mean_price)"), + alt.Chart(boot20000_means).mark_text(color="#f58518", size=12, align="left", dx=18, fontWeight="bold").encode( + x="mean(mean_price)", + y=alt.value(7), + text=alt.value(f"Mean = {boot_est_dist['data']['mean_price'].mean().round(1)}") + ) + ).properties(title="Bootstrap distribution", height=150) +) +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:11-bootstrapping6 +:figclass: caption-hack + +Comparison of the distribution of the bootstrap sample means and sampling distribution. +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("one_sample_mean", "{:.2f}".format(one_sample["price"].mean())) +``` + +```{index} sampling distribution; compared to bootstrap distribution +``` + +There are two essential points that we can take away from +{numref}`fig:11-bootstrapping6`. First, the shape and spread of the true sampling +distribution and the bootstrap distribution are similar; the bootstrap +distribution lets us get a sense of the point estimate's variability. The +second important point is that the means of these two distributions are +slightly different. The sampling distribution is centered at +\${glue:text}`population_mean`, the population mean value. However, the bootstrap +distribution is centered at the original sample's mean price per night, +\${glue:text}`one_sample_mean`. Because we are resampling from the +original sample repeatedly, we see that the bootstrap distribution is centered +at the original sample's mean value (unlike the sampling distribution of the +sample mean, which is centered at the population parameter value). + +{numref}`fig:11-bootstrapping7` summarizes the bootstrapping process. +The idea here is that we can use this distribution of bootstrap sample means to +approximate the sampling distribution of the sample means when we only have one +sample. Since the bootstrap distribution pretty well approximates the sampling +distribution spread, we can use the bootstrap spread to help us develop a +plausible range for our population parameter along with our estimate! + +```{figure} img/inference/11-bootstrapping7-1.png +:name: fig:11-bootstrapping7 + +Summary of bootstrapping process. +``` + ++++ + +### Using the bootstrap to calculate a plausible range + +```{index} confidence interval +``` + +Now that we have constructed our bootstrap distribution, let's use it to create +an approximate 95\% percentile bootstrap confidence interval. +A **confidence interval** is a range of plausible values for the population parameter. We will +find the range of values covering the middle 95\% of the bootstrap +distribution, giving us a 95\% confidence interval. You may be wondering, what +does "95\% confidence" mean? If we took 100 random samples and calculated 100 +95\% confidence intervals, then about 95\% of the ranges would capture the +population parameter's value. Note there's nothing special about 95\%. We +could have used other levels, such as 90\% or 99\%. There is a balance between +our level of confidence and precision. A higher confidence level corresponds to +a wider range of the interval, and a lower confidence level corresponds to a +narrower range. Therefore the level we choose is based on what chance we are +willing to take of being wrong based on the implications of being wrong for our +application. In general, we choose confidence levels to be comfortable with our +level of uncertainty but not so strict that the interval is unhelpful. For +instance, if our decision impacts human life and the implications of being +wrong are deadly, we may want to be very confident and choose a higher +confidence level. + +To calculate a 95\% percentile bootstrap confidence interval, we will do the following: + +1. Arrange the observations in the bootstrap distribution in ascending order. +2. Find the value such that 2.5\% of observations fall below it (the 2.5\% percentile). Use that value as the lower bound of the interval. +3. Find the value such that 97.5\% of observations fall below it (the 97.5\% percentile). Use that value as the upper bound of the interval. + +To do this in Python, we can use the `quantile` function of our DataFrame. +Quantiles are expressed in proportions rather than percentages, +so the 2.5th and 97.5th percentiles +would be the 0.025 and 0.975 quantiles, respectively. + +```{index} numpy; percentile, pandas.DataFrame; df[] +``` + +```{code-cell} ipython3 +ci_bounds = boot20000_means["mean_price"].quantile([0.025, 0.975]) +ci_bounds +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("ci_lower", "{:.2f}".format(ci_bounds[0.025])) +glue("ci_upper", "{:.2f}".format(ci_bounds[0.975])) +``` + +Our interval, \${glue:text}`ci_lower` to \${glue:text}`ci_upper`, captures +the middle 95\% of the sample mean prices in the bootstrap distribution. We can +visualize the interval on our distribution in {numref}`fig:11-bootstrapping9`. + +```{code-cell} ipython3 +# Create the annotation for for the 2.5th percentile +rule_025 = alt.Chart().mark_rule(color="#f58518", size=3, strokeDash=[5]).encode( + x=alt.datum(ci_bounds[0.025]) +).properties( + width=500 +) +text_025 = rule_025.mark_text( + color="#f58518", + size=12, + fontWeight="bold", + dy=-160 +).encode( + text=alt.datum(f"2.5th percentile ({ci_bounds[0.025].round(1)})") +) + +# Create the annotation for for the 97.5th percentile +text_975 = text_025.encode( + x=alt.datum(ci_bounds[0.975]), + text=alt.datum(f"97.5th percentile ({ci_bounds[0.975].round(1)})") +) +rule_975 = rule_025.encode(x=alt.datum(ci_bounds[0.975])) + +# Layer the annotations on top of the distribution plot +boot_est_dist + rule_025 + text_025 + rule_975 + text_975 +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:11-bootstrapping9 +:figclass: caption-hack + +Distribution of the bootstrap sample means with percentile lower and upper bounds. +``` + ++++ + +To finish our estimation of the population parameter, we would report the point +estimate and our confidence interval's lower and upper bounds. Here the sample +mean price-per-night of 40 Airbnb listings was +\${glue:text}`one_sample_mean`, and we are 95\% "confident" that the true +population mean price-per-night for all Airbnb listings in Vancouver is between +\${glue:text}`ci_lower` and \${glue:text}`ci_upper`. +Notice that our interval does indeed contain the true +population mean value, \${glue:text}`population_mean`\! However, in +practice, we would not know whether our interval captured the population +parameter or not because we usually only have a single sample, not the entire +population. This is the best we can do when we only have one sample! + +This chapter is only the beginning of the journey into statistical inference. +We can extend the concepts learned here to do much more than report point +estimates and confidence intervals, such as testing for real differences +between populations, tests for associations between variables, and so much +more. We have just scratched the surface of statistical inference; however, the +material presented here will serve as the foundation for more advanced +statistical techniques you may learn about in the future! + ++++ + +## Exercises + +Practice exercises for the material covered in this chapter +can be found in the accompanying +[worksheets repository](https://worksheets.python.datasciencebook.ca) +in the two "Statistical inference" rows. +You can launch an interactive version of each worksheet in your browser by clicking the "launch binder" button. +You can also preview a non-interactive version of each worksheet by clicking "view worksheet." +If you instead decide to download the worksheets and run them on your own machine, +make sure to follow the instructions for computer setup +found in {numref}`Chapter %s `. This will ensure that the automated feedback +and guidance that the worksheets provide will function as intended. + ++++ + +## Additional resources + +- Chapters 4 to 7 of *OpenIntro Statistics* {cite:p}`openintro` + provide a good next step in learning about inference. Although it is still certainly + an introductory text, things get a bit more mathematical here. Depending on + your background, you may actually want to start going through Chapters 1 to 3 + first, where you will learn some fundamental concepts in probability theory. + Although it may seem like a diversion, probability theory is *the language of + statistics*; if you have a solid grasp of probability, more advanced statistics + will come naturally to you! + ++++ + +## References + ++++ + +```{bibliography} +:filter: docname in docnames +``` diff --git a/pull313/_sources/intro.md b/pull313/_sources/intro.md new file mode 100644 index 00000000..d0921e89 --- /dev/null +++ b/pull313/_sources/intro.md @@ -0,0 +1,1232 @@ +--- +jupytext: + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.5 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(intro)= +# Python and Pandas + +```{code-cell} ipython3 +:tags: [remove-cell] + +from chapter_preamble import * +``` + +## Overview + +This chapter provides an introduction to data science and the Python programming language. +The goal here is to get your hands dirty right from the start! We will walk through an entire data analysis, +and along the way introduce different types of data analysis question, some fundamental programming +concepts in Python, and the basics of loading, cleaning, and visualizing data. In the following chapters, we will +dig into each of these steps in much more detail; but for now, let's jump in to see how much we can do +with data science! + +## Chapter learning objectives + +By the end of the chapter, readers will be able to do the following: + +- Identify the different types of data analysis question and categorize a question into the correct type. +- Load the `pandas` package into Python. +- Read tabular data with `read_csv`. +- Use `help()` to access help and documentation tools in Python. +- Create new variables and objects in Python. +- Create and organize subsets of tabular data using `[]`, `loc[]`, `sort_values`, and `head`. +- Chain multiple operations in sequence. +- Visualize data with an `altair` bar plot. + +## Canadian languages data set + +```{index} Canadian languages +``` + +In this chapter, we will walk through a full analysis of a data set relating to +languages spoken at home by Canadian residents. Many Indigenous peoples exist in Canada +with their own cultures and languages; these languages are often unique to Canada and not spoken +anywhere else in the world {cite:p}`statcan2018mothertongue`. Sadly, colonization has +led to the loss of many of these languages. For instance, generations of +children were not allowed to speak their mother tongue (the first language an +individual learns in childhood) in Canadian residential schools. Colonizers +also renamed places they had "discovered" {cite:p}`wilson2018`. Acts such as these +have significantly harmed the continuity of Indigenous languages in Canada, and +some languages are considered "endangered" as few people report speaking them. +To learn more, please see *Canadian Geographic*'s article, "Mapping Indigenous Languages in +Canada" {cite:p}`walker2017`, +*They Came for the Children: Canada, Aboriginal +peoples, and Residential Schools* {cite:p}`children2012` +and the *Truth and Reconciliation Commission of Canada's* +*Calls to Action* {cite:p}`calls2015`. + +The data set we will study in this chapter is taken from +[the `canlang` R data package](https://ttimbers.github.io/canlang/) +{cite:p}`timbers2020canlang`, which has +population language data collected during the 2016 Canadian census {cite:p}`cancensus2016`. +In this data, there are 214 languages recorded, each having six different properties: + +1. `category`: Higher-level language category, describing whether the language is an Official Canadian language, an Aboriginal (i.e., Indigenous) language, or a Non-Official and Non-Aboriginal language. +2. `language`: The name of the language. +3. `mother_tongue`: Number of Canadian residents who reported the language as their mother tongue. Mother tongue is generally defined as the language someone was exposed to since birth. +4. `most_at_home`: Number of Canadian residents who reported the language as being spoken most often at home. +5. `most_at_work`: Number of Canadian residents who reported the language as being used most often at work. +6. `lang_known`: Number of Canadian residents who reported knowledge of the language. + +According to the census, more than 60 Aboriginal languages were reported +as being spoken in Canada. Suppose we want to know which are the most common; +then we might ask the following question, which we wish to answer using our data: + +*Which ten Aboriginal languages were most often reported in 2016 as mother +tongues in Canada, and how many people speak each of them?* + +```{index} data science; good practices +``` + +```{note} +Data science cannot be done without +a deep understanding of the data and +problem domain. In this book, we have simplified the data sets used in our +examples to concentrate on methods and fundamental concepts. But in real +life, you cannot and should not do data science without a domain expert. +Alternatively, it is common to practice data science in your own domain of +expertise! Remember that when you work with data, it is essential to think +about *how* the data were collected, which affects the conclusions you can +draw. If your data are biased, then your results will be biased! +``` + +## Asking a question + +Every good data analysis begins with a *question*—like the +above—that you aim to answer using data. As it turns out, there +are actually a number of different *types* of question regarding data: +descriptive, exploratory, inferential, predictive, causal, and mechanistic, +all of which are defined in {numref}`questions-table`. {cite:p}`leek2015question,peng2015art` +Carefully formulating a question as early as possible in your analysis—and +correctly identifying which type of question it is—will guide your overall approach to +the analysis as well as the selection of appropriate tools. + +```{index} question; data analysis, descriptive question; definition, exploratory question; definition +``` + +```{index} predictive question; definition, inferential question; definition, causal question; definition, mechanistic question; definition +``` + +```{list-table} Types of data analysis question. +:header-rows: 1 +:name: questions-table + +* - Question type + - Description + - Example +* - Descriptive + - A question that asks about summarized characteristics of a data set without interpretation (i.e., report a fact). + - How many people live in each province and territory in Canada? +* - Exploratory + - A question that asks if there are patterns, trends, or relationships within a single data set. Often used to propose hypotheses for future study. + - Does political party voting change with indicators of wealth in a set of data collected on 2,000 people living in Canada? +* - Predictive + - A question that asks about predicting measurements or labels for individuals (people or things). The focus is on what things predict some outcome, but not what causes the outcome. + - What political party will someone vote for in the next Canadian election? +* - Inferential + - A question that looks for patterns, trends, or relationships in a single data set **and** also asks for quantification of how applicable these findings are to the wider population. + - Does political party voting change with indicators of wealth for all people living in Canada? +* - Causal + - A question that asks about whether changing one factor will lead to a change in another factor, on average, in the wider population. + - Does wealth lead to voting for a certain political party in Canadian elections? +* - Mechanistic + - A question that asks about the underlying mechanism of the observed patterns, trends, or relationships (i.e., how does it happen?) + - How does wealth lead to voting for a certain political party in Canadian elections? + +``` + + +In this book, you will learn techniques to answer the +first four types of question: descriptive, exploratory, predictive, and inferential; +causal and mechanistic questions are beyond the scope of this book. +In particular, you will learn how to apply the following analysis tools: + +```{index} summarization; overview, visualization; overview, classification; overview, regression; overview +``` + +```{index} clustering; overview, estimation; overview +``` + +1. **Summarization:** computing and reporting aggregated values pertaining to a data set. +Summarization is most often used to answer descriptive questions, +and can occasionally help with answering exploratory questions. +For example, you might use summarization to answer the following question: +*What is the average race time for runners in this data set?* +Tools for summarization are covered in detail in {numref}`Chapters %s ` +and {numref}`%s `, but appear regularly throughout the text. +1. **Visualization:** plotting data graphically. +Visualization is typically used to answer descriptive and exploratory questions, +but plays a critical supporting role in answering all of the types of question in {numref}`questions-table`. +For example, you might use visualization to answer the following question: +*Is there any relationship between race time and age for runners in this data set?* +This is covered in detail in {numref}`Chapter %s `, but again appears regularly throughout the book. +3. **Classification:** predicting a class or category for a new observation. +Classification is used to answer predictive questions. +For example, you might use classification to answer the following question: +*Given measurements of a tumor's average cell area and perimeter, is the tumor benign or malignant?* +Classification is covered in {numref}`Chapters %s ` and {numref}`%s `. +4. **Regression:** predicting a quantitative value for a new observation. +Regression is also used to answer predictive questions. +For example, you might use regression to answer the following question: +*What will be the race time for a 20-year-old runner who weighs 50kg?* +Regression is covered in {numref}`Chapters %s ` and {numref}`%s `. +5. **Clustering:** finding previously unknown/unlabeled subgroups in a +data set. Clustering is often used to answer exploratory questions. +For example, you might use clustering to answer the following question: +*What products are commonly bought together on Amazon?* +Clustering is covered in {numref}`Chapter %s `. +6. **Estimation:** taking measurements for a small number of items from a large group + and making a good guess for the average or proportion for the large group. Estimation +is used to answer inferential questions. +For example, you might use estimation to answer the following question: +*Given a survey of cellphone ownership of 100 Canadians, what proportion +of the entire Canadian population own Android phones?* +Estimation is covered in {numref}`Chapter %s `. + +Referring to {numref}`questions-table`, our question about +Aboriginal languages is an example of a *descriptive question*: we are +summarizing the characteristics of a data set without further interpretation. +And referring to the list above, it looks like we should use visualization +and perhaps some summarization to answer the question. So in the remainder +of this chapter, we will work towards making a visualization that shows +us the ten most common Aboriginal languages in Canada and their associated counts, +according to the 2016 census. + +## Loading a tabular data set + +```{index} tabular data +``` + +A data set is, at its core essence, a structured collection of numbers and characters. +Aside from that, there are really no strict rules; data sets can come in +many different forms! Perhaps the most common form of data set that you will +find in the wild, however, is *tabular data*. Think spreadsheets in Microsoft Excel: tabular data are +rectangular-shaped and spreadsheet-like, as shown in {numref}`img-spreadsheet-vs-data frame`. In this book, we will focus primarily on tabular data. + +```{index} data frame; overview, observation, variable +``` + +Since we are using Python for data analysis in this book, the first step for us is to +load the data into Python. When we load tabular data into +Python, it is represented as a *data frame* object. {numref}`img-spreadsheet-vs-data frame` shows that a Python data frame is very similar +to a spreadsheet. We refer to the rows as **observations**; these are the things that we +collect the data on, e.g., voters, cities, etc. We refer to the columns as +**variables**; these are the characteristics of those observations, e.g., voters' political +affiliations, cities' populations, etc. + + +```{figure} img/intro/spreadsheet_vs_df.png +--- +height: 500px +name: img-spreadsheet-vs-data frame +--- +A spreadsheet versus a data frame in Python +``` + +```{index} see: comma-separated values; csv +``` + +```{index} csv +``` + +The first kind of data file that we will learn how to load into Python as a data +frame is the *comma-separated values* format (`.csv` for short). These files +have names ending in `.csv`, and can be opened and saved using common +spreadsheet programs like Microsoft Excel and Google Sheets. For example, the +`.csv` file named `can_lang.csv` +is included with [the code for this book](https://github.com/UBC-DSCI/introduction-to-datascience-python/tree/main/source/data). +If we were to open this data in a plain text editor (a program like Notepad that just shows +text with no formatting), we would see each row on its own line, and each entry in the table separated by a comma: + +```text +category,language,mother_tongue,most_at_home,most_at_work,lang_known +Aboriginal languages,"Aboriginal languages, n.o.s.",590,235,30,665 +Non-Official & Non-Aboriginal languages,Afrikaans,10260,4785,85,23415 +Non-Official & Non-Aboriginal languages,"Afro-Asiatic languages, n.i.e.",1150,44 +Non-Official & Non-Aboriginal languages,Akan (Twi),13460,5985,25,22150 +Non-Official & Non-Aboriginal languages,Albanian,26895,13135,345,31930 +Aboriginal languages,"Algonquian languages, n.i.e.",45,10,0,120 +Aboriginal languages,Algonquin,1260,370,40,2480 +Non-Official & Non-Aboriginal languages,American Sign Language,2685,3020,1145,21 +Non-Official & Non-Aboriginal languages,Amharic,22465,12785,200,33670 +``` + +```{index} function, argument, read function; read\_csv +``` + +To load this data into Python so that we can do things with it (e.g., perform +analyses or create data visualizations), we will need to use a *function.* A +function is a special word in Python that takes instructions (we call these +*arguments*) and does something. The function we will use to load a `.csv` file +into Python is called `read_csv`. In its most basic +use-case, `read_csv` expects that the data file: + +- has column names (or *headers*), +- uses a comma (`,`) to separate the columns, and +- does not have row names. + ++++ + +```{index} package, import, pandas +``` + +Below you'll see the code used to load the data into Python using the `read_csv` +function. Note that the `read_csv` function is not included in the base +installation of Python, meaning that it is not one of the primary functions ready to +use when you install Python. Therefore, you need to load it from somewhere else +before you can use it. The place from which we will load it is called a Python *package*. +A Python package is a collection of functions that can be used in addition to the +built-in Python package functions once loaded. The `read_csv` function, in +particular, can be made accessible by loading +[the `pandas` Python package](https://pypi.org/project/pandas/) {cite:p}`reback2020pandas,mckinney-proc-scipy-2010` +using the `import` command. The `pandas` package contains many +functions that we will use throughout this book to load, clean, wrangle, +and visualize data. + ++++ + +```{code-cell} ipython3 +import pandas as pd +``` + +This command has two parts. The first is `import pandas`, which loads the `pandas` package. +The second is `as pd`, which give the `pandas` package the much shorter *alias* (another name) `pd`. +We can now use the `read_csv` function by writing `pd.read_csv`, i.e., the package name, then a dot, then the function name. +You can see why we gave `pandas` a shorter alias; if we had to type `pandas.` before every function we wanted to use, +our code would become much longer and harder to read! + +Now that the `pandas` package is loaded, we can use the `read_csv` function by passing +it a single argument: the name of the file, `"can_lang.csv"`. We have to +put quotes around file names and other letters and words that we use in our +code to distinguish it from the special words (like functions!) that make up the Python programming +language. The file's name is the only argument we need to provide because our +file satisfies everything else that the `read_csv` function expects in the default +use-case. {numref}`img-read-csv` describes how we use the `read_csv` +to read data into Python. + +```{figure} img/intro/read_csv_function.png +--- +height: 220px +name: img-read-csv +--- +Syntax for the `read_csv` function +``` + + ++++ +```{code-cell} ipython3 +:tags: ["output_scroll"] +pd.read_csv("data/can_lang.csv") + +``` + + + +## Naming things in Python + +When we loaded the 2016 Canadian census language data +using `read_csv`, we did not give this data frame a name. +Therefore the data was just printed on the screen, +and we cannot do anything else with it. That isn't very useful. +What would be more useful would be to give a name +to the data frame that `read_csv` outputs, +so that we can refer to it later for analysis and visualization. + +```{index} see: =; assignment symbol +``` + +```{index} assignment symbol, string +``` + +The way to assign a name to a value in Python is via the *assignment symbol* `=`. +On the left side of the assignment symbol you put the name that you want +to use, and on the right side of the assignment symbol +you put the value that you want the name to refer to. +Names can be used to refer to almost anything in Python, such as numbers, +words (also known as *strings* of characters), and data frames! +Below, we set `my_number` to `3` (the result of `1+2`) +and we set `name` to the string `"Alice"`. + +```{code-cell} ipython3 +my_number = 1 + 2 + +name = "Alice" +``` + +Note that when +we name something in Python using the assignment symbol, `=`, +we do not need to surround the name we are creating with quotes. This is +because we are formally telling Python that this special word denotes +the value of whatever is on the right-hand side. +Only characters and words that act as *values* on the right-hand side of the assignment +symbol—e.g., the file name `"data/can_lang.csv"` that we specified before, or `"Alice"` above—need +to be surrounded by quotes. + +After making the assignment, we can use the special name words we have created in +place of their values. For example, if we want to do something with the value `3` later on, +we can just use `my_number` instead. Let's try adding 2 to `my_number`; you will see that +Python just interprets this as adding 2 and 3: + +```{code-cell} ipython3 +my_number + 2 +``` + +```{index} object +``` + +Object names can consist of letters, numbers, and underscores (`_`). +Other symbols won't work since they have their own meanings in Python. For example, +`-` is the subtraction symbol; if we try to assign a name with +the `-` symbol, Python will complain and we will get an error! + +``` +my-number = 1 +``` + +``` +SyntaxError: cannot assign to expression here. Maybe you meant '==' instead of '='? +``` + +```{index} object; naming convention +``` + +There are certain conventions for naming objects in Python. +When naming an object we +suggest using only lowercase letters, numbers and underscores `_` to separate +the words in a name. Python is case sensitive, which means that `Letter` and +`letter` would be two different objects in Python. You should also try to give your +objects meaningful names. For instance, you *can* name a data frame `x`. +However, using more meaningful terms, such as `language_data`, will help you +remember what each name in your code represents. We recommend following the +**PEP 8** naming conventions outlined in the *[PEP 8](https://peps.python.org/pep-0008/)* {cite:p}`pep8-style-guide`. Let's +now use the assignment symbol to give the name +`can_lang` to the 2016 Canadian census language data frame that we get from +`read_csv`. + +```{code-cell} ipython3 +can_lang = pd.read_csv("data/can_lang.csv") +``` + +Wait a minute, nothing happened this time! Where's our data? +Actually, something did happen: the data was loaded in +and now has the name `can_lang` associated with it. +And we can use that name to access the data frame and do things with it. +For example, we can type the name of the data frame to print both the first few rows +and the last few rows. The three dots (`...`) indicate that there are additional rows that are not printed. +You will also see that the number of observations (i.e., rows) and +variables (i.e., columns) are printed just underneath the data frame (214 rows and 6 columns in this case). +Printing a few rows from data frame like this is a handy way to get a quick sense for what is contained in it. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +can_lang +``` + +## Creating subsets of data frames with `[]` & `loc[]` + +```{index} pandas.DataFrame; [], pandas.DataFrame; loc[] +``` + +Now that we've loaded our data into Python, we can start wrangling the data to +find the ten Aboriginal languages that were most often reported +in 2016 as mother tongues in Canada. In particular, we want to construct +a table with the ten Aboriginal languages that have the largest +counts in the `mother_tongue` column. The first step is to extract +from our `can_lang` data only those rows that correspond to Aboriginal languages, +and then the second step is to keep only the `language` and `mother_tongue` columns. +The `[]` and `loc[]` operations on the `pandas` data frame will help us +here. The `[]` allows you to obtain a subset of (i.e., *filter*) the rows of a data frame, +or to obtain a subset of (i.e., *select*) the columns of a data frame. +The `loc[]` operation allows you to both filter rows *and* select columns +at the same time. We will first investigate filtering rows and selecting +columns with the `[]` operation, +and then use `loc[]` to do both in our analysis of the Aboriginal languages data. + +```{note} +The `[]` and `loc[]` operations, and related operations, in `pandas` +are much more powerful than we describe in this chapter. +You will learn more sophisticated ways to index data frames later on +in {numref}`Chapter %s `. +``` + +### Using `[]` to filter rows +Looking at the `can_lang` data above, we see the column `category` contains different +high-level categories of languages, which include "Aboriginal languages", +"Non-Official & Non-Aboriginal languages" and "Official languages". To answer +our question we want to filter our data set so we restrict our attention +to only those languages in the "Aboriginal languages" category. + +```{index} pandas.DataFrame; [], filter, logical statement, logical statement; equivalency operator, string +``` + +We can use the `[]` operation to obtain the subset of rows with desired values +from a data frame. {numref}`img-filter` shows the syntax we need to use to filter +rows with the `[]` operation. First, we type the name of the data frame---here, `can_lang`---followed +by square brackets. Inside the square brackets, we write a *logical statement* to +use when filtering the rows. A logical statement evaluates to either `True` or `False` +for each row in the data frame; the `[]` operation keeps only those rows +for which the logical statement evaluates to `True`. For example, in our analysis, +we are interested in keeping only languages in the `"Aboriginal languages"` higher-level +category. We can use the *equivalency operator* `==` to compare the values of the `category` +column---denoted by `can_lang["category"]`---with the value `"Aboriginal languages"`. +You will learn about many other kinds of logical +statement in {numref}`Chapter %s `. Similar to when we loaded the data file and put quotes +around the file name, here we need to put quotes around both `"Aboriginal languages"` and `"category"`. Using +quotes tells Python that this is a *string value* (e.g., a column name, or word data) +and not one of the special words that make up the Python programming language, +or one of the names we have given to objects in the code we have already written. + +```{note} +In Python, single quotes (`'`) and double quotes (`"`) are generally +treated the same. So we could have written `'Aboriginal languages'` instead +of `"Aboriginal languages"` above, or `'category'` instead of `"category"`. +Try both out for yourself! +``` + +```{figure} img/intro/filter_rows.png +--- +height: 220px +name: img-filter +--- +Syntax for using the `[]` operation to filter rows. +``` + +This operation returns a data frame that has all the columns of the input data frame, +but only those rows corresponding to Aboriginal languages that we asked for in the logical statement. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +can_lang[can_lang["category"] == "Aboriginal languages"] +``` + +### Using `[]` to select columns + + +```{index} pandas.DataFrame; [], select; +``` + +We can also use the `[]` operation to select columns from a data frame. +{numref}`img-select` displays the syntax needed to select columns. +We again first type the name of the data frame---here, `can_lang`---followed +by square brackets. Inside the square brackets, we provide a *list* of +column names. In Python, we denote a *list* using square brackets, where +each item is separated by a comma (`,`). So if we are interested in +selecting only the `language` and `mother_tongue` columns from our original +`can_lang` data frame, we put the list `["language", "mother_tongue"]` +containing those two column names inside the square brackets of the `[]` operation. + +```{figure} img/intro/select_columns.png +--- +height: 220px +name: img-select +--- +Syntax for using the `[]` operation to select columns. +``` + +This operation returns a data frame that has all the rows of the input data frame, +but only those columns that we named in the selection list. + +```{code-cell} ipython3 +can_lang[["language", "mother_tongue"]] +``` + +### Using `loc[]` to filter rows and select columns + +```{index} pandas.DataFrame; loc[] +``` + +The `[]` operation is only used when you want to filter rows *or* select columns; +it cannot be used to do both operations at the same time. But in order to answer +our original data analysis question in this chapter, we need to *both* filter the rows +for Aboriginal languages, *and* select the `language` and `mother_tongue` columns. +Fortunately, `pandas` provides the `loc[]` operation, which lets us do just that. +The syntax is very similar to the `[]` operation we have already covered: we will +essentially combine both our row filtering and column selection steps from before. +In particular, we first write the name of the data frame---`can_lang` again---then follow +that with the `.loc[]` operation. Inside the square brackets, +we write our row filtering logical statement, +then a comma, then our list of columns to select. + +```{figure} img/intro/filter_rows_and_columns.png +--- +height: 220px +name: img-loc +--- +Syntax for using the `loc[]` operation to filter rows and select columns. +``` + +```{code-cell} ipython3 +aboriginal_lang = can_lang.loc[can_lang["category"] == "Aboriginal languages", ["language", "mother_tongue"]] +``` +There is one very important thing to notice in this code example. +The first is that we used the `loc[]` operation on the `can_lang` data frame by +writing `can_lang.loc[]`---first the data frame name, then a dot, then `loc[]`. +There's that dot again! If you recall, earlier in this chapter we used the `read_csv` function from `pandas` (aliased as `pd`), +and wrote `pd.read_csv`. The dot means that the thing on the left (`pd`, i.e., the `pandas` package) *provides* the +thing on the right (the `read_csv` function). In the case of `can_lang.loc[]`, the thing on the left (the `can_lang` data frame) +*provides* the thing on the right (the `loc[]` operation). In Python, +both packages (like `pandas`) *and* objects (like our `can_lang` data frame) can provide functions +and other objects that we access using the dot syntax. + +```{note} +A note on terminology: when an object `obj` provides a function `f` with the +dot syntax (as in `obj.f()`), sometimes we call that function `f` a *method* of `obj` or an *operation* on `obj`. +Similarly, when an object `obj` provides another object `x` with the dot syntax (as in `obj.x`), sometimes we call the object `x` an *attribute* of `obj`. +We will use all of these terms throughout the book, as you will see them used commonly in the community. +And just because we programmers like to be confusing for no apparent reason: we *don't* use the "method", "operation", or "attribute" terminology +when referring to functions and objects from packages, like `pandas`. So for example, `pd.read_csv` +would typically just be referred to as a function, but not as a method or operation, even though it uses the dot syntax. +``` + +At this point, if we have done everything correctly, `aboriginal_lang` should be a data frame +containing *only* rows where the `category` is `"Aboriginal languages"`, +and containing *only* the `language` and `mother_tongue` columns. +Any time you take a step in a data analysis, it's good practice to check the output +by printing the result. +```{code-cell} ipython3 +aboriginal_lang +``` +We can see the original `can_lang` data set contained 214 rows +with multiple kinds of `category`. The data frame +`aboriginal_lang` contains only 67 rows, and looks like it only contains Aboriginal languages. +So it looks like the `loc[]` operation gave us the result we wanted! + +## Using `sort_values` and `head` to select rows by ordered values + +```{index} pandas.DataFrame; sort_values, pandas.DataFrame; head +``` + +We have used the `[]` and `loc[]` operations on a data frame to obtain a table +with only the Aboriginal languages in the data set and their associated counts. +However, we want to know the **ten** languages that are spoken most often. As a +next step, we will order the `mother_tongue` column from largest to smallest +value and then extract only the top ten rows. This is where the `sort_values` +and `head` functions come to the rescue! + +The `sort_values` function allows us to order the rows of a data frame by the +values of a particular column. We need to specify the column name +by which we want to sort the data frame by passing it to the argument `by`. +Since we want to choose the ten Aboriginal languages most often reported as a mother tongue +language, we will use the `sort_values` function to order the rows in our +`selected_lang` data frame by the `mother_tongue` column. We want to +arrange the rows in descending order (from largest to smallest), +so we specify the argument `ascending` as `False`. + +```{figure} img/intro/sort_values.png +--- +height: 220px +name: img-sort-values +--- +Syntax for using `sort_values` to arrange rows in decending order. +``` + +```{code-cell} ipython3 +arranged_lang = aboriginal_lang.sort_values(by="mother_tongue", ascending=False) +arranged_lang +``` + +Next, we will obtain the ten most common Aboriginal languages by selecting only +the first ten rows of the `arranged_lang` data frame. +We do this using the `head` function, and specifying the argument +`10`. + + +```{code-cell} ipython3 +ten_lang = arranged_lang.head(10) +ten_lang +``` + +(ch1-adding-modifying)= +## Adding and modifying columns + +```{index} assign +``` + +Recall that our data analysis question referred to the *count* of Canadians +that speak each of the top ten most commonly reported Aboriginal languages as +their mother tongue, and the `ten_lang` data frame indeed contains those +counts... But perhaps, seeing these numbers, we became curious about the +*percentage* of the population of Canada associated with each count. It is +common to come up with new data analysis questions in the process of answering +a first one—so fear not and explore! To answer this small +question-along-the-way, we need to divide each count in the `mother_tongue` +column by the total Canadian population according to the 2016 +census—i.e., 35,151,728—and multiply it by 100. We can perform +this computation using the code `100 * ten_lang["mother_tongue"] / canadian_population`. +Then to store the result in a new column (or +overwrite an existing column), we specify the name of the new +column to create (or old column to modify), then the assignment symbol `=`, +and then the computation to store in that column. In this case, we will opt to +create a new column called `mother_tongue_percent`. + +```{note} +You will see below that we write the Canadian population in +Python as `35_151_728`. The underscores (`_`) are just there for readability, +and do not affect how Python interprets the number. In other words, +`35151728` and `35_151_728` are treated identically in Python, +although the latter is much clearer! +``` + +```{code-cell} ipython3 +:tags: [remove-cell] +# disable setting with copy warning +# it's not important for this chapter and just distracting +# only occurs here because we did a much earlier .loc operation that is being picked up below by the coln assignment +pd.options.mode.chained_assignment = None +``` + +```{code-cell} ipython3 +canadian_population = 35_151_728 +ten_lang["mother_tongue_percent"] = 100 * ten_lang["mother_tongue"] / canadian_population +ten_lang +``` + +The `ten_lang_percent` data frame shows that +the ten Aboriginal languages in the `ten_lang` data frame were spoken +as a mother tongue by between 0.008% and 0.18% of the Canadian population. + +## Combining analysis steps with chaining and multiline expressions + +```{index} chaining methods +``` + +It took us 3 steps to find the ten Aboriginal languages most often reported in +2016 as mother tongues in Canada. Starting from the `can_lang` data frame, we: + +1) used `loc` to filter the rows so that only the + `Aboriginal languages` category remained, and selected the + `language` and `mother_tongue` columns, +2) used `sort_values` to sort the rows by `mother_tongue` in descending order, and +3) obtained only the top 10 values using `head`. + +One way of performing these steps is to just write +multiple lines of code, storing temporary, intermediate objects as you go. +```{code-cell} ipython3 +aboriginal_lang = can_lang.loc[can_lang["category"] == "Aboriginal languages", ["language", "mother_tongue"]] +arranged_lang_sorted = aboriginal_lang.sort_values(by="mother_tongue", ascending=False) +ten_lang = arranged_lang_sorted.head(10) +``` + +```{index} multi-line expression +``` + +You might find that code hard to read. You're not wrong; it is! +There are two main issues with readability here. First, each line of code is quite long. +It is hard to keep track of what methods are being called, and what arguments were used. +Second, each line introduces a new temporary object. In this case, both `aboriginal_lang` and `arranged_lang_sorted` +are just temporary results on the way to producing the `ten_lang` data frame. +This makes the code hard to read, as one has to trace where each temporary object +goes, and hard to understand, since introducing many named objects also suggests that they +are of some importance, when really they are just intermediates. +The need to call multiple methods in a sequence to process a data frame is +quite common, so this is an important issue to address! + +To solve the first problem, we can actually split the long expressions above across +multiple lines. Although in most cases, a single expression in Python must be contained +in a single line of code, there are a small number of situations where lets us do this. +Let's rewrite this code in a more readable format using multiline expressions. + +```{code-cell} ipython3 +aboriginal_lang = can_lang.loc[ + can_lang["category"] == "Aboriginal languages", + ["language", "mother_tongue"] +] +arranged_lang_sorted = aboriginal_lang.sort_values( + by="mother_tongue", + ascending=False +) +ten_lang = arranged_lang_sorted.head(10) +``` + +This code is the same as the code we showed earlier; you can see the same +sequence of methods and arguments is used. But long expressions are split +across multiple lines when they would otherwise get long and unwieldy, +improving the readability of the code. +How does Python know when to keep +reading on the next line for a single expression? +For the line starting with `aboriginal_lang = ...`, Python sees that the line ends with a left +bracket symbol `[`, and knows that our +expression cannot end until we close it with an appropriate corresponding right bracket symbol `]`. +We put the same two arguments as we did before, and then +the corresponding right bracket appears after `["language", "mother_tongue"]`). +For the line starting with `arranged_lang_sorted = ...`, Python sees that the line ends with a left parenthesis symbol `(`, +and knows the expression cannot end until we close it with the corresponding right parenthesis symbol `)`. +Again we use the same two arguments as before, and then the +corresponding right parenthesis appears right after `ascending=False`. +In both cases, Python keeps reading the next line to figure out +what the rest of the expression is. We could, of course, +put all of the code on one line of code, but splitting it across +multiple lines helps a lot with code readability. + +We still have to handle the issue that each line of code---i.e., each step in the analysis---introduces +a new temporary object. To address this issue, we can *chain* multiple operations together without +assigning intermediate objects. The key idea of chaining is that the *output* of +each step in the analysis is a data frame, which means that you can just directly keep calling methods +that operate on the output of each step in a sequence! This simplifies the code and makes it +easier to read. The code below demonstrates the use of both multiline expressions and chaining together. +The code is now much cleaner, and the `ten_lang` data frame that we get is equivalent to the one +from the messy code above! + +```{code-cell} ipython3 +# obtain the 10 most common Aboriginal languages +ten_lang = ( + can_lang.loc[ + can_lang["category"] == "Aboriginal languages", + ["language", "mother_tongue"] + ] + .sort_values(by="mother_tongue", ascending=False) + .head(10) +) +ten_lang +``` + +Let's parse this new block of code piece by piece. +The code above starts with a left parenthesis, `(`, and so Python +knows to keep reading to subsequent lines until it finds the corresponding +right parenthesis symbol `)`. The `loc` method performs the filtering and selecting steps as before. The line after this +starts with a period (`.`) that "chains" the output of the `loc` step with the next operation, +`sort_values`. Since the output of `loc` is a data frame, we can use the `sort_values` method on it +without first giving it a name! That is what the `.sort_values` does on the next line. +Finally, we once again "chain" together the output of `sort_values` with `head` to ask for the 10 +most common languages. Finally, the right parenthesis `)` corresponding to the very first left parenthesis +appears on the second last line, completing the multiline expression. +Instead of creating intermediate objects, with chaining, we take the output of +one operation and use that to perform the next operation. In doing so, we remove the need to create and +store intermediates. This can help with readability by simplifying the code. + +Now that we've shown you chaining as an alternative to storing +temporary objects and composing code, does this mean you should *never* store +temporary objects or compose code? Not necessarily! +There are times when temporary objects are handy to keep around. +For example, you might store a temporary object before feeding it into a plot function +so you can iteratively change the plot without having to +redo all of your data transformations. +Chaining many functions can be overwhelming and difficult to debug; +you may want to store a temporary object midway through to inspect your result +before moving on with further steps. + +## Exploring data with visualizations + +```{index} visualization +``` +We have now answered our initial question by generating the `ten_lang` table! +Are we done? Well, not quite; tables are almost never the best way to present +the result of your analysis to your audience. Even the `ten_lang` table with +only two columns presents some difficulty: for example, you have to scrutinize +the table quite closely to get a sense for the relative numbers of speakers of +each language. When you move on to more complicated analyses, this issue only +gets worse. In contrast, a *visualization* would convey this information in a much +more easily understood format. +Visualizations are a great tool for summarizing information to help you +effectively communicate with your audience, and creating effective data visualizations +is an essential component of any data +analysis. In this section we will develop a visualization of the + ten Aboriginal languages that were most often reported in 2016 as mother tongues in +Canada, as well as the number of people that speak each of them. + +### Using `altair` to create a bar plot + +```{index} altair, visualization; bar +``` + +In our data set, we can see that `language` and `mother_tongue` are in separate +columns (or variables). In addition, there is a single row (or observation) for each language. +The data are, therefore, in what we call a *tidy data* format. Tidy data is a +fundamental concept and will be a significant focus in the remainder of this +book: many of the functions from `pandas` require tidy data, as does the +`altair` package that we will use shortly for our visualization. We will +formally introduce tidy data in {numref}`Chapter %s `. + +```{index} see: plot; visualization +``` + +```{index} see: visualization; altair +``` + +We will make a bar plot to visualize our data. A bar plot is a chart where the +lengths of the bars represent certain values, like counts or proportions. We +will make a bar plot using the `mother_tongue` and `language` columns from our +`ten_lang` data frame. To create a bar plot of these two variables using the +`altair` package, we must specify the data frame, which variables +to put on the x and y axes, and what kind of plot to create. +First, we need to import the `altair` package. + +```{code-cell} ipython3 +import altair as alt + +``` + ++++ + +The fundamental object in `altair` is the `Chart`, which takes a data frame as an argument: `alt.Chart(ten_lang)`. +With a chart object in hand, we can now specify how we would like the data to be visualized. +We first indicate what kind of graphical *mark* we want to use to represent the data. Here we set the mark attribute +of the chart object using the `Chart.mark_bar` function, because we want to create a bar chart. +Next, we need to *encode* the variables of the data frame using +the `x` and `y` *channels* (which represent the x-axis and y-axis position of the points). We use the `encode()` +function to handle this: we specify that the `language` column should correspond to the x-axis, +and that the `mother_tongue` column should correspond to the y-axis. + +```{figure} img/intro/altair_syntax.png +--- +height: 220px +name: img-altair +--- +Syntax for using `altair` to make a bar chart. +``` + ++++ + +```{code-cell} ipython3 +:tags: [] + +barplot_mother_tongue = ( + alt.Chart(ten_lang).mark_bar().encode(x="language", y="mother_tongue") +) + + +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] + +glue("barplot-mother-tongue", barplot_mother_tongue, display=True) + +``` + +:::{glue:figure} barplot-mother-tongue +:figwidth: 700px +:name: barplot-mother-tongue + +Bar plot of the ten Aboriginal languages most often reported by Canadian residents as their mother tongue +::: + ++++ + +```{index} see: .; chaining methods +``` + +### Formatting `altair` charts + +It is exciting that we can already visualize our data to help answer our +question, but we are not done yet! We can (and should) do more to improve the +interpretability of the data visualization that we created. For example, by +default, Python uses the column names as the axis labels. Usually these +column names do not have enough information about the variable in the column. +We really should replace this default with a more informative label. For the +example above, Python uses the column name `mother_tongue` as the label for the +y axis, but most people will not know what that is. And even if they did, they +will not know how we measured this variable, or the group of people on which the +measurements were taken. An axis label that reads "Mother Tongue (Number of +Canadian Residents)" would be much more informative. To make the code easier to +read, we're spreading it out over multiple lines just as we did in the previous +section with pandas. + +```{index} plot; labels, plot; axis labels +``` + +Adding additional labels to our visualizations that we create in `altair` is +one common and easy way to improve and refine our data visualizations. We can add titles for the axes +in the `altair` objects using `alt.X` and `alt.Y` with the `title` method to make +the axes titles more informative (you will learn more about `alt.X` and `alt.Y` in {numref}`Chapter %s `). +Again, since we are specifying +words (e.g. `"Mother Tongue (Number of Canadian Residents)"`) as arguments to +the `title` method, we surround them with quotation marks. We can do many other modifications +to format the plot further, and we will explore these in {numref}`Chapter %s `. + +```{code-cell} ipython3 +barplot_mother_tongue = alt.Chart(ten_lang).mark_bar().encode( + x=alt.X("language").title("Language"), + y=alt.Y("mother_tongue").title("Mother Tongue (Number of Canadian Residents)") +) +``` + + +```{code-cell} ipython3 +:tags: ["remove-cell"] + +glue("barplot-mother-tongue-labs", barplot_mother_tongue, display=True) + +``` + + +:::{glue:figure} barplot-mother-tongue-labs +:figwidth: 700px +:name: barplot-mother-tongue-labs + +Bar plot of the ten Aboriginal languages most often reported by Canadian residents as their mother tongue with x and y labels. Note that this visualization is not done yet; there are still improvements to be made. +::: + + +The result is shown in {numref}`barplot-mother-tongue-labs`. +This is already quite an improvement! Let's tackle the next major issue with the visualization +in {numref}`barplot-mother-tongue-labs`: the vertical x axis labels, which are +currently making it difficult to read the different language names. +One solution is to rotate the plot such that the bars are horizontal rather than vertical. +To accomplish this, we will swap the x and y coordinate axes: + + +```{code-cell} ipython3 +barplot_mother_tongue_axis = alt.Chart(ten_lang).mark_bar().encode( + x=alt.X("mother_tongue").title("Mother Tongue (Number of Canadian Residents)"), + y=alt.Y("language").title("Language") +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] + +glue("barplot-mother-tongue-labs-axis", barplot_mother_tongue_axis, display=True) + +``` + +:::{glue:figure} barplot-mother-tongue-labs-axis +:figwidth: 700px +:name: barplot-mother-tongue-labs-axis + +Horizontal bar plot of the ten Aboriginal languages most often reported by Canadian residents as their mother tongue. There are no more serious issues with this visualization, but it could be refined further. +::: + +```{index} altair; sort +``` + +Another big step forward, as shown in {numref}`barplot-mother-tongue-labs-axis`! There +are no more serious issues with the visualization. Now comes time to refine +the visualization to make it even more well-suited to answering the question +we asked earlier in this chapter. For example, the visualization could be made more transparent by +organizing the bars according to the number of Canadian residents reporting +each language, rather than in alphabetical order. We can reorder the bars using +the `sort` method, which orders a variable (here `language`) based on the +values of the variable(`mother_tongue`) on the `x-axis`. + +```{code-cell} ipython3 +ordered_barplot_mother_tongue = alt.Chart(ten_lang).mark_bar().encode( + x=alt.X("mother_tongue").title("Mother Tongue (Number of Canadian Residents)"), + y=alt.Y("language").sort("x").title("Language") +) +``` + ++++ + +```{code-cell} ipython3 +:tags: ["remove-cell"] + +glue("barplot-mother-tongue-reorder", ordered_barplot_mother_tongue, display=True) + +``` + + +:::{glue:figure} barplot-mother-tongue-reorder +:figwidth: 700px +:name: barplot-mother-tongue-reorder + +Bar plot of the ten Aboriginal languages most often reported by Canadian residents as their mother tongue with bars reordered. +::: + + +{numref}`barplot-mother-tongue-reorder` provides a very clear and well-organized +answer to our original question; we can see what the ten most often reported Aboriginal languages +were, according to the 2016 Canadian census, and how many people speak each of them. For +instance, we can see that the Aboriginal language most often reported was Cree +n.o.s. with over 60,000 Canadian residents reporting it as their mother tongue. + +```{note} +"n.o.s." means "not otherwise specified", so Cree n.o.s. refers to +individuals who reported Cree as their mother tongue. In this data set, the +Cree languages include the following categories: Cree n.o.s., Swampy Cree, +Plains Cree, Woods Cree, and a 'Cree not included elsewhere' category (which +includes Moose Cree, Northern East Cree and Southern East Cree) +{cite:p}`language2016`. +``` + +### Putting it all together + +```{index} comment +``` + +```{index} see: #; comment +``` + +In the block of code below, we put everything from this chapter together, with a few +modifications. In particular, we have combined all of our steps into one expression +split across multiple lines using the left and right parenthesis symbols `(` and `)`. +We have also provided *comments* next to +many of the lines of code below using the +hash symbol `#`. When Python sees a `#` sign, it +will ignore all of the text that +comes after the symbol on that line. So you can use comments to explain lines +of code for others, and perhaps more importantly, your future self! +It's good practice to get in the habit of +commenting your code to improve its readability. + +This exercise demonstrates the power of Python. In relatively few lines of code, we +performed an entire data science workflow with a highly effective data +visualization! We asked a question, loaded the data into Python, wrangled the data +(using `[]`, `loc[]`, `sort_values`, and `head`) and created a data visualization to +help answer our question. In this chapter, you got a quick taste of the data +science workflow; continue on with the next few chapters to learn each of +these steps in much more detail! + +```{code-cell} ipython3 +# load the data set +can_lang = pd.read_csv("data/can_lang.csv") + +# obtain the 10 most common Aboriginal languages +ten_lang = ( + can_lang.loc[can_lang["category"] == "Aboriginal languages", ["language", "mother_tongue"]] + .sort_values(by="mother_tongue", ascending=False) + .head(10) +) + +# create the visualization +ten_lang_plot = alt.Chart(ten_lang).mark_bar().encode( + x=alt.X("mother_tongue").title("Mother Tongue (Number of Canadian Residents)"), + y=alt.Y("language").sort("x").title("Language") +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] + +glue("final_plot", ten_lang_plot, display=True) + +``` + + +:::{glue:figure} final_plot +:figwidth: 700px +:name: final_plot + +Bar plot of the ten Aboriginal languages most often reported by Canadian residents as their mother tongue +::: + +## Accessing documentation + +```{index} documentation +``` + +```{index} see: help; documentation +``` + +```{index} see: __doc__; documentation +``` + +There are many Python functions in the `pandas` package (and beyond!), and +nobody can be expected to remember what every one of them does +or all of the arguments we have to give them. Fortunately, Python provides +the `help` function, which +provides an easy way to pull up the documentation for +most functions quickly. To use the `help` function to access the documentation, you +just put the name of the function you are curious about as an argument inside the `help` function. +For example, if you had forgotten what the `pd.read_csv` function +did or exactly what arguments to pass in, you could run the following +code: + +```{code-cell} ipython3 +:tags: ["remove-output"] +help(pd.read_csv) +``` + +{numref}`help_read_csv` shows the documentation that will pop up, +including a high-level description of the function, its arguments, +a description of each, and more. Note that you may find some of the +text in the documentation a bit too technical right now. +Fear not: as you work through this book, many of these terms will be introduced +to you, and slowly but surely you will become more adept at understanding and navigating +documentation like that shown in {numref}`help_read_csv`. But do keep in mind that the documentation +is not written to *teach* you about a function; it is just there as a reference to *remind* +you about the different arguments and usage of functions that you have already learned about elsewhere. + ++++ + +```{figure} img/intro/help_read_csv.png +--- +height: 700px +name: help_read_csv +--- +The documentation for the read_csv function including a high-level description, a list of arguments and their meanings, and more. +``` + ++++ + +If you are working in a Jupyter Lab environment, there are some conveniences that will help you lookup function names +and access the documentation. First, rather than `help`, you can use the more concise `?` character. So for example, +to read the documentation for the `pd.read_csv` function, you can run the following code: +```{code-cell} ipython3 +:tags: ["remove-output"] +?pd.read_csv +``` +You can also type the first characters of the function you want to use, +and then press Tab to bring up small menu +that shows you all the available functions +that starts with those characters. +This is helpful both for remembering function names +and to prevent typos. + ++++ + +```{figure} img/intro/completion_menu.png +--- +height: 400px +name: completion_menu +--- +The suggestions that are shown after typing `pd.read` and pressing Tab. +``` + ++++ + +To get more info on the function you want to use, +you can type out the full name +and then hold Shift while pressing Tab +to bring up a help dialogue including the same information as when using `help()`. + ++++ + +```{figure} img/intro/help_dialog.png +--- +height: 400px +name: help_dialog +--- +The help dialog that is shown after typing `pd.read_csv` and then pressing Shift + Tab. +``` + ++++ + +Finally, +it can be helpful to have this help dialog open at all times, +especially when you start out learning about programming and data science. +You can achieve this by clicking on the `Help` text +in the menu bar at the top +and then selecting `Show Contextual Help`. + +## Exercises + +Practice exercises for the material covered in this chapter +can be found in the accompanying +[worksheets repository](https://worksheets.python.datasciencebook.ca) +in the "Python and Pandas" row. +You can launch an interactive version of the worksheet in your browser by clicking the "launch binder" button. +You can also preview a non-interactive version of the worksheet by clicking "view worksheet." +If you instead decide to download the worksheet and run it on your own machine, +make sure to follow the instructions for computer setup +found in {numref}`Chapter %s `. This will ensure that the automated feedback +and guidance that the worksheets provide will function as intended. + +## References + ++++ + +```{bibliography} +:filter: docname in docnames +``` + diff --git a/pull313/_sources/jupyter.md b/pull313/_sources/jupyter.md new file mode 100644 index 00000000..6f14c442 --- /dev/null +++ b/pull313/_sources/jupyter.md @@ -0,0 +1,514 @@ +--- +jupytext: + cell_metadata_filter: -all + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.8 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(getting-started-with-jupyter)= +# Combining code and text with Jupyter + +## Overview + +A typical data analysis involves not only writing and executing code, but also writing text and displaying images +that help tell the story of the analysis. In fact, ideally, we would like to *interleave* these three media, +with the text and images serving as narration for the code and its output. +In this chapter we will show you how to accomplish this using Jupyter notebooks, a common coding platform in +data science. Jupyter notebooks do precisely what we need: they let you combine text, images, and (executable!) code in a single +document. In this chapter, we will focus on the *use* of Jupyter notebooks to program in Python and write +text via a web interface. +These skills are essential to getting your analysis running; think of it like getting dressed in the morning! +Note that we assume that you already have Jupyter set up and ready to use. If that is not the case, please first read +{numref}`Chapter %s ` to learn how to install and configure Jupyter on your own +computer. + +## Chapter learning objectives + +By the end of the chapter, readers will be able to do the following: + +- Create new Jupyter notebooks. +- Write, edit, and execute Python code in a Jupyter notebook. +- Write, edit, and view text in a Jupyter notebook. +- Open and view plain text data files in Jupyter. +- Export Jupyter notebooks to other standard file types (e.g., `.html`, `.pdf`). + +## Jupyter + +```{index} Jupyter notebook, reproducible +``` + +Jupyter is a web-based interactive development environment for creating, editing, +and executing documents called Jupyter notebooks. Jupyter notebooks are +documents that contain a mix of computer code (and its output) and formattable +text. Given that they combine these two analysis artifacts in a single +document—code is not separate from the output or written report—notebooks are +one of the leading tools to create reproducible data analyses. Reproducible data +analysis is one where you can reliably and easily re-create the same results when +analyzing the same data. Although this sounds like something that should always +be true of any data analysis, in reality, this is not often the case; one needs +to make a conscious effort to perform data analysis in a reproducible manner. +An example of what a Jupyter notebook looks like is shown in +{numref}`img-jupyter`. + + +```{figure} img/jupyter/jupyter.png +--- +name: img-jupyter +--- +A screenshot of a Jupyter Notebook. +``` + +### Accessing Jupyter + +```{index} JupyterHub +``` + + +One of the easiest ways to start working with Jupyter is to use a +web-based platform called JupyterHub. JupyterHubs often have Jupyter, Python, a number of Python +packages, and collaboration tools installed, configured and ready to use. +JupyterHubs are usually created and provisioned by organizations, +and require authentication to gain access. For example, if you are reading +this book as part of a course, your instructor may have a JupyterHub +already set up for you to use! Jupyter can also be installed on your +own computer; see {numref}`Chapter %s ` for instructions. + +## Code cells + +```{index} Jupyter notebook; code cell +``` + +The sections of a Jupyter notebook that contain code are referred to as code cells. +A code cell that has not yet been +executed has no number inside the square brackets to the left of the cell +({numref}`code-cell-not-run`). Running a code cell will execute all of +the code it contains, and the output (if any exists) will be displayed directly +underneath the code that generated it. Outputs may include printed text or +numbers, data frames and data visualizations. Cells that have been executed +also have a number inside the square brackets to the left of the cell. +This number indicates the order in which the cells were run +({numref}`code-cell-run`). + +```{figure} img/jupyter/code-cell-not-run.png +--- +name: code-cell-not-run +--- +A code cell in Jupyter that has not yet been executed. +``` + +```{figure} img/jupyter/code-cell-run.png +--- +name: code-cell-run +--- +A code cell in Jupyter that has been executed. +``` + + + ++++ + +### Executing code cells + +```{index} Jupyter notebook; cell execution +``` + +Code cells can be run independently or as part of executing the entire notebook +using one of the "**Run all**" commands found in the **Run** or **Kernel** menus +in Jupyter. Running a single code cell independently is a workflow typically +used when editing or writing your own Python code. Executing an entire notebook is a +workflow typically used to ensure that your analysis runs in its entirety before +sharing it with others, and when using a notebook as part of an automated +process. + +To run a code cell independently, the cell needs to first be activated. This +is done by clicking on it with the cursor. Jupyter will indicate a cell has been +activated by highlighting it with a blue rectangle to its left. After the cell +has been activated ({numref}`activate-and-run-button`), the cell can be run by either pressing +the **Run** (▸) button in the toolbar, or by using a keyboard shortcut of +`Shift + Enter`. + +```{figure} img/jupyter/activate-and-run-button-annotated.png +--- +name: activate-and-run-button +--- +An activated cell that is ready to be run. The red arrow points to the blue +rectangle to the cell's left. The blue rectangle indicates that it is ready to +be run. This can be done by clicking the run button (circled in red). +``` + +To execute all of the code cells in an entire notebook, you have three options: + +1. Select **Run** >> **Run All Cells** from the menu. + +2. Select **Kernel** >> **Restart Kernel and Run All Cells...** from the menu ({numref}`restart-kernel-run-all`). + +3. Click the (⏭) button in the tool bar. + +All of these commands result in all of the code cells in a notebook being run. +However, there is a slight difference between them. In particular, only +options 2 and 3 above will restart the Python session before running all of the +cells; option 1 will not restart the session. Restarting the Python session means +that all previous objects that were created from running cells before this +command was run will be deleted. In other words, restarting the session and +then running all cells (options 2 or 3) emulates how your notebook code would +run if you completely restarted Jupyter before executing your entire notebook. + +```{figure} img/jupyter/restart-kernel-run-all.png +--- +name: restart-kernel-run-all +--- +Restarting the Python session can be accomplished by clicking Restart Kernel and Run All Cells... +``` + + +### The Kernel + +```{index} kernel, Jupyter notebook; kernel +``` + +The kernel is a program that executes the code inside your notebook and +outputs the results. Kernels for many different programming languages have +been created for Jupyter, which means that Jupyter can interpret and execute +the code of many different programming languages. To run Python code, your notebook +will need a Python kernel. In the top right of your window, you can see a circle +that indicates the status of your kernel. If the circle is empty +(◯), the kernel is idle and ready to execute code. If the circle is filled in +(⬤), the kernel is busy running some code. + +```{index} kernel; interrupt, kernel; restart +``` + +You may run into problems where your kernel is stuck for an excessive amount +of time, your notebook is very slow and unresponsive, or your kernel loses its +connection. If this happens, try the following steps: + +1. At the top of your screen, click **Kernel**, then **Interrupt Kernel**. +2. If that doesn't help, click **Kernel**, then **Restart Kernel...** If you do this, you will have to run your code cells from the start of your notebook up until where you paused your work. +3. If that still doesn't help, restart Jupyter. First, save your work by clicking **File** at the top left of your screen, then **Save Notebook**. Next, if you are accessing Jupyter using a JupyterHub server, from the **File** menu click **Hub Control Panel**. Choose **Stop My Server** to shut it down, then the **My Server** button to start it back up. If you are running Jupyter on your own computer, from the **File** menu click **Shut Down**, then start Jupyter again. Finally, navigate back to the notebook you were working on. + +### Creating new code cells + +To create a new code cell in Jupyter ({numref}`create-new-code-cell`), click the `+` button in the +toolbar. By default, all new cells in Jupyter start out as code cells, +so after this, all you have to do is write Python code within the new cell you just +created! + +```{figure} img/jupyter/create-new-code-cell.png +--- +name: create-new-code-cell +--- +New cells can be created by clicking the + button, and are by default code cells. +``` + +## Markdown cells + +```{index} markdown, Jupyter notebook; markdown cell +``` + +Text cells inside a Jupyter notebook are called Markdown cells. Markdown cells +are rich formatted text cells, which means you can **bold** and *italicize* +text, create subject headers, create bullet and numbered lists, and more. These cells are +given the name "Markdown" because they use *Markdown language* to specify the rich text formatting. +You do not need to learn Markdown to write text in the Markdown cells in +Jupyter; plain text will work just fine. However, you might want to learn a bit +about Markdown eventually to enable you to create nicely formatted analyses. +See the additional resources at the end of this chapter to find out +where you can start learning Markdown. + +### Editing Markdown cells + +To edit a Markdown cell in Jupyter, you need to double click on the cell. Once +you do this, the unformatted (or *unrendered*) version of the text will be +shown ({numref}`markdown-cell-not-run`). You +can then use your keyboard to edit the text. To view the formatted +(or *rendered*) text ({numref}`markdown-cell-run`), click the **Run** (▸) button in the toolbar, +or use the `Shift + Enter` keyboard shortcut. + +```{figure} img/jupyter/markdown-cell-not-run.png +--- +name: markdown-cell-not-run +--- +A Markdown cell in Jupyter that has not yet been rendered and can be edited. +``` + +```{figure} img/jupyter/markdown-cell-run.png +--- +name: markdown-cell-run +--- +A Markdown cell in Jupyter that has been rendered and exhibits rich text formatting. +``` + +### Creating new Markdown cells + +To create a new Markdown cell in Jupyter, click the `+` button in the toolbar. +By default, all new cells in Jupyter start as code cells, so +the cell format needs to be changed to be recognized and rendered as a Markdown +cell. To do this, click on the cell with your cursor to +ensure it is activated. Then click on the drop-down box on the toolbar that says "Code" (it +is next to the ⏭ button), and change it from "**Code**" to "**Markdown**" ({numref}`convert-to-markdown-cell`). + +```{figure} img/jupyter/convert-to-markdown-cell.png +--- +name: convert-to-markdown-cell +--- +New cells are by default code cells. To create Markdown cells, the cell format must be changed. +``` + +## Saving your work + +As with any file you work on, it is critical to save your work often so you +don't lose your progress! Jupyter has an autosave feature, where open files are +saved periodically. The default for this is every two minutes. You can also +manually save a Jupyter notebook by selecting **Save Notebook** from the +**File** menu, by clicking the disk icon on the toolbar, +or by using a keyboard shortcut (`Control + S` for Windows, or `Command + S` for +Mac OS). + +## Best practices for running a notebook + +### Best practices for executing code cells + +```{index} Jupyter notebook; best practices +``` + +As you might know (or at least imagine) by now, Jupyter notebooks are great for +interactively editing, writing and running Python code; this is what they were +designed for! Consequently, Jupyter notebooks are flexible in regards to code +cell execution order. This flexibility means that code cells can be run in any +arbitrary order using the **Run** (▸) button. But this flexibility has a downside: +it can lead to Jupyter notebooks whose code cannot be executed in a linear +order (from top to bottom of the notebook). A nonlinear notebook is problematic +because a linear order is the conventional way code documents are run, and +others will have this expectation when running your notebook. Finally, if the +code is used in some automated process, it will need to run in a linear order, +from top to bottom of the notebook. + +The most common way to inadvertently create a nonlinear notebook is to rely solely +on using the (▸) button to execute cells. For example, +suppose you write some Python code that creates a Python object, say a variable named +`y`. When you execute that cell and create `y`, it will continue +to exist until it is deliberately deleted with Python code, or when the Jupyter +notebook Python session (*i.e.*, kernel) is stopped or restarted. It can also be +referenced in another distinct code cell ({numref}`out-of-order-1`). +Together, this means that you could then write a code cell further above in the +notebook that references `y` and execute it without error in the current session +({numref}`out-of-order-2`). This could also be done successfully in +future sessions if, and only if, you run the cells in the same unconventional +order. However, it is difficult to remember this unconventional order, and it +is not the order that others would expect your code to be executed in. Thus, in +the future, this would lead +to errors when the notebook is run in the conventional +linear order ({numref}`out-of-order-3`). + +```{figure} img/jupyter/out-of-order-1.png +--- +name: out-of-order-1 +--- +Code that was written out of order, but not yet executed. +``` + +```{figure} img/jupyter/out-of-order-2.png +--- +name: out-of-order-2 +--- +Code that was written out of order, and was executed using the run button in a +nonlinear order without error. The order of execution can be traced by +following the numbers to the left of the code cells; their order indicates the +order in which the cells were executed. +``` + ++++ + + +```{figure} img/jupyter/out-of-order-3.png +--- +name: out-of-order-3 +--- +Code that was written out of order, and was executed in a linear order using +"Restart Kernel and Run All Cells..." This resulted in an error at the +execution of the second code cell and it failed to run all code cells in the +notebook. +``` + + + +You can also accidentally create a nonfunctioning notebook by +creating an object in a cell that later gets deleted. In such a +scenario, that object only exists for that one particular Python session and will +not exist once the notebook is restarted and run again. If that +object was referenced in another cell in that notebook, an error +would occur when the notebook was run again in a new session. + +These events may not negatively affect the current Python session when +the code is being written; but as you might now see, they will likely lead to +errors when that notebook is run in a future session. Regularly executing +the entire notebook in a fresh Python session will help guard +against this. If you restart your session and new errors seem to pop up when +you run all of your cells in linear order, you can at least be aware that there +is an issue. Knowing this sooner rather than later will allow you to +fix the issue and ensure your notebook can be run linearly from start to finish. + +We recommend as a best practice to run the entire notebook in a fresh Python session +at least 2–3 times within any period of work. Note that, +critically, you *must do this in a fresh Python session* by restarting your kernel. +We recommend using either the **Kernel** >> +**Restart Kernel and Run All Cells...** command from the menu or the ⏭ +button in the toolbar. Note that the **Run** >> **Run All Cells** +menu item will not restart the kernel, and so it is not sufficient +to guard against these errors. + +### Best practices for including Python packages in notebooks + +Most data analyses these days depend on functions from external Python packages that +are not built into Python. One example is the `pandas` package that we +heavily rely on in this book. This package provides us access to functions like +`read_csv` for reading data, and `loc[]` for subsetting rows and columns. +We also use the `altair` package for creating high-quality graphics. + +As mentioned earlier in the book, external Python packages need to be loaded before +the functions they contain can be used. Our recommended way to do this is via +`import package_name`, and perhaps also to give it a shorter alias like +`import package_name as pn`. But where should this line of code be written in a +Jupyter notebook? One idea could be to load the library right before the +function is used in the notebook. However, although this technically works, this +causes hidden, or at least non-obvious, Python package dependencies when others view +or try to run the notebook. These hidden dependencies can lead to errors when +the notebook is executed on another computer if the needed Python packages are not +installed. Additionally, if the data analysis code takes a long time to run, +uncovering the hidden dependencies that need to be installed so that the +analysis can run without error can take a great deal of time to uncover. + +Therefore, we recommend you load all Python packages in a code cell near the top of +the Jupyter notebook. Loading all your packages at the start ensures that all +packages are loaded before their functions are called, assuming the notebook is +run in a linear order from top to bottom as recommended above. It also makes it +easy for others viewing or running the notebook to see what external Python packages +are used in the analysis, and hence, what packages they should install on +their computer to run the analysis successfully. + +### Summary of best practices for running a notebook + +1. Write code so that it can be executed in a linear order. + +2. As you write code in a Jupyter notebook, run the notebook in a linear order +and in its entirety often (2–3 times every work session) via the **Kernel** >> +**Restart Kernel and Run All Cells...** command from the Jupyter menu or the ⏭ +button in the toolbar. + +3. Write the code that loads external Python packages near the top of the Jupyter +notebook. + +## Exploring data files + +It is essential to preview data files before you try to read them into Python to see +whether or not there are column names, what the delimiters are, and if there are +lines you need to skip. In Jupyter, you preview data files stored as plain text +files (e.g., comma- and tab-separated files) in their plain text format ({numref}`open-data-w-editor-2`) by +right-clicking on the file's name in the Jupyter file explorer, selecting +**Open with**, and then selecting **Editor** ({numref}`open-data-w-editor-1`). +Suppose you do not specify to open +the data file with an editor. In that case, Jupyter will render a nice table +for you, and you will not be able to see the column delimiters, and therefore +you will not know which function to use, nor which arguments to use and values +to specify for them. + +```{figure} img/jupyter/open_data_w_editor_01.png +--- +name: open-data-w-editor-1 +--- +Opening data files with an editor in Jupyter. +``` + +```{figure} img/jupyter/open_data_w_editor_02.png +--- +name: open-data-w-editor-2 +--- +A data file as viewed in an editor in Jupyter. +``` + + + +## Exporting to a different file format + +```{index} Jupyter notebook; export +``` + +In Jupyter, viewing, editing and running Python code is done in the Jupyter notebook +file format with file extension `.ipynb`. This file format is not easy to open and +view outside of Jupyter. Thus, to share your analysis with people who do not +commonly use Jupyter, it is recommended that you export your executed analysis +as a more common file type, such as an `.html` file, or a `.pdf`. We recommend +exporting the Jupyter notebook after executing the analysis so that you can +also share the outputs of your code. Note, however, that your audience will not be +able to *run* your analysis using a `.html` or `.pdf` file. If you want your audience +to be able to reproduce the analysis, you must provide them with the `.ipynb` Jupyter notebook file. + +### Exporting to HTML +Exporting to `.html` will result in a shareable file that anyone can open +using a web browser (e.g., Firefox, Safari, Chrome, or Edge). The `.html` +output will produce a document that is visually similar to what the Jupyter notebook +looked like inside Jupyter. One point of caution here is that if there are +images in your Jupyter notebook, you will need to share the image files and the +`.html` file to see them. + +### Exporting to PDF +Exporting to `.pdf` will result in a shareable file that anyone can open +using many programs, including Adobe Acrobat, Preview, web browsers and many +more. The benefit of exporting to PDF is that it is a standalone document, +even if the Jupyter notebook included references to image files. +Unfortunately, the default settings will result in a document +that visually looks quite different from what the Jupyter notebook looked +like. The font, page margins, and other details will appear different in the `.pdf` output. + +## Creating a new Jupyter notebook + +At some point, you will want to create a new, fresh Jupyter notebook for your +own project instead of viewing, running or editing a notebook that was started +by someone else. To do this, navigate to the **Launcher** tab, and click on +the Python icon under the **Notebook** heading. If no **Launcher** tab is visible, +you can get a new one via clicking the **+** button at the top of the Jupyter +file explorer ({numref}`launcher`). + +```{figure} img/jupyter/launcher-annotated.png +--- +name: launcher +--- +Clicking on the Python icon under the Notebook heading will create a new Jupyter notebook with a Python kernel. +``` + ++++ + +Once you have created a new Jupyter notebook, be sure to give it a descriptive +name, as the default file name is `Untitled.ipynb`. You can rename files by +first right-clicking on the file name of the notebook you just created, and +then clicking **Rename**. This will make +the file name editable. Use your keyboard to +change the name. Pressing `Enter` or clicking anywhere else in the Jupyter +interface will save the changed file name. + +We recommend not using white space or non-standard characters in file names. +Doing so will not prevent you from using that file in Jupyter. However, these +sorts of things become troublesome as you start to do more advanced data +science projects that involve repetition and automation. We recommend naming +files using lower case characters and separating words by a dash (`-`) or an +underscore (`_`). + +## Additional resources +- The [JupyterLab Documentation](https://jupyterlab.readthedocs.io/en/latest/) + is a good next place to look for more information about working in Jupyter + notebooks. This documentation goes into significantly more detail about all of + the topics we covered in this chapter, and covers more advanced topics as well. +- If you are keen to learn about the Markdown language for rich text + formatting, two good places to start are CommonMark's [Markdown + cheatsheet](https://commonmark.org/help/) and [Markdown + tutorial](https://commonmark.org/help/tutorial/). diff --git a/pull313/_sources/preface-text.md b/pull313/_sources/preface-text.md new file mode 100644 index 00000000..78148f79 --- /dev/null +++ b/pull313/_sources/preface-text.md @@ -0,0 +1,79 @@ +--- +jupytext: + cell_metadata_filter: -all + formats: md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.8 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +# Preface + +```{index} data science, auditable, reproducible +``` + + + +This textbook aims to be an approachable introduction to the world of data science. +In this book, we define **data science** as the process of generating +insight from data through **reproducible** and **auditable** processes. +If you analyze some data and give your analysis to a friend or colleague, they should +be able to re-run the analysis from start to finish and get the same result you did (*reproducibility*). +They should also be able to see and understand all the steps in the analysis, as well as the history of how +the analysis developed (*auditability*). Creating reproducible and auditable +analyses allows both you and others to easily double-check and validate your work. + +At a high level, in this book, you will learn how to + +1. identify common problems in data science, and +2. solve those problems with reproducible and auditable workflows. + +{numref}`preface-overview-fig` summarizes what you will learn in each chapter +of this book. Throughout, you will learn how to use the [Python programming language](https://www.python.org/) to perform +all the tasks associated with data analysis. You will +spend the first four chapters learning how to use Python to load, clean, wrangle +(i.e., restructure the data into a usable format) and visualize data +while answering descriptive and exploratory data analysis questions. In the next +six chapters, you will learn how to answer predictive, exploratory, and inferential +data analysis questions with common methods in data science, including +classification, regression, clustering, and estimation. +In the final chapters +you will learn how to combine Python code, formatted text, and images +in a single coherent document with Jupyter, use version control for +collaboration, and install and configure the software needed for data science +on your own computer. If you are reading this book as part of a course that you are +taking, the instructor may have set up all of these tools already for you; in this +case, you can continue on through the book reading the chapters in order. +But if you are reading this independently, you may want to jump to these last three chapters +early before going on to make sure your computer is set up in such a way that you can +try out the example code that we include throughout the book. + +```{figure} img/frontmatter/chapter_overview.jpeg +--- +height: 400px +name: preface-overview-fig +--- +Where are we going? +``` + + + +Each chapter in the book has an accompanying worksheet that provides exercises +to help you practice the concepts you will learn. We strongly recommend that you +work through the worksheet when you finish reading each chapter +before moving on to the next chapter. All of the worksheets +are available at +[https://worksheets.python.datasciencebook.ca](https://worksheets.python.datasciencebook.ca); +the "Exercises" section at the end of each chapter points you to the right worksheet for that chapter. +For each worksheet, you can either launch an interactive version of the worksheet in your browser by clicking the "launch binder" button, +or preview a non-interactive version of the worksheet by clicking "view worksheet." +If you instead decide to download the worksheet and run it on your own machine, +make sure to follow the instructions for computer setup +found in {numref}`Chapter %s `. This will ensure that the automated feedback +and guidance that the worksheets provide will function as intended. diff --git a/pull313/_sources/reading.md b/pull313/_sources/reading.md new file mode 100644 index 00000000..e461c241 --- /dev/null +++ b/pull313/_sources/reading.md @@ -0,0 +1,1596 @@ +--- +jupytext: + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.5 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(reading)= +# Reading in data locally and from the web + + +## Overview + +```{index} see: loading; reading +``` + +```{index} reading; definition +``` + +In this chapter, you’ll learn to read tabular data of various formats into Python +from your local device (e.g., your laptop) and the web. “Reading” (or “loading”) +is the process of +converting data (stored as plain text, a database, HTML, etc.) into an object +(e.g., a data frame) that Python can easily access and manipulate. Thus reading data +is the gateway to any data analysis; you won’t be able to analyze data unless +you’ve loaded it first. And because there are many ways to store data, there +are similarly many ways to read data into Python. The more time you spend upfront +matching the data reading method to the type of data you have, the less time +you will have to devote to re-formatting, cleaning and wrangling your data (the +second step to all data analyses). It’s like making sure your shoelaces are +tied well before going for a run so that you don’t trip later on! + +## Chapter learning objectives +By the end of the chapter, readers will be able to do the following: + +- Define the following: + - absolute file path + - relative file path + - **U**niform **R**esource **L**ocator (URL) +- Read data into Python using an absolute path, relative path and a URL. +- Compare and contrast the following functions: + - `read_csv` + - `read_excel` +- Match the following `pandas` `read_csv` function arguments to their descriptions: + - `filepath_or_buffer` + - `sep` + - `names` + - `skiprows` +- Choose the appropriate `read_csv` function arguments to load a given plain text tabular data set into Python. +- Use the `rename` function to rename columns in a data frame. +- Use `pandas` package's `read_excel` function and arguments to load a sheet from an excel file into Python. +- Connect to a database using the `ibis` library's `connect` function. +- List the tables in a database using the `ibis` library's `list_tables` function. +- Create a reference to a database table using the `ibis` library's `table` function. +- Execute queries to bring data from a database into Python using the `ibis` library's `execute` function. +- Use `to_csv` to save a data frame to a `.csv` file. +- (*Optional*) Obtain data using **a**pplication **p**rogramming **i**nterfaces (APIs) and web scraping. + - Read/scrape data from an internet URL using the `BeautifulSoup` package. + - Read data from the NASA "Astronomy Picture of the Day" using the `requests` package. + - Compare downloading tabular data from a plain text file (e.g., `.csv`), accessing data from an API, and scraping the HTML source code from a website. + +## Absolute and relative file paths + +```{index} see: location; path +``` + +```{index} path; local, path; remote, path; relative, path; absolute +``` + +This chapter will discuss the different functions we can use to import data +into Python, but before we can talk about *how* we read the data into Python with these +functions, we first need to talk about *where* the data lives. When you load a +data set into Python, you first need to tell Python where those files live. The file +could live on your computer (*local*) or somewhere on the internet (*remote*). + +The place where the file lives on your computer is referred to as its "path". You can +think of the path as directions to the file. There are two kinds of paths: +*relative* paths and *absolute* paths. A relative path indicates where the file is +with respect to your *working directory* (i.e., "where you are currently") on the computer. +On the other hand, an absolute path indicates where the file is +with respect to the computer's filesystem base (or *root*) folder, regardless of where you are working. + +```{index} Happiness Report +``` + +Suppose our computer's filesystem looks like the picture in +{numref}`Filesystem`. We are working in a +file titled `worksheet_02.ipynb`, and our current working directory is `worksheet_02`; +typically, as is the case here, the working directory is the directory containing the file you are currently +working on. + +```{figure} img/reading/filesystem.jpeg +--- +height: 500px +name: Filesystem +--- +Example file system +``` + +Let's say we wanted to open the `happiness_report.csv` file. We have two options to indicate +where the file is: using a relative path, or using an absolute path. +The absolute path of the file always starts with a slash `/`—representing the root folder on the computer—and +proceeds by listing out the sequence of folders you would have to enter to reach the file, each separated by another slash `/`. +So in this case, `happiness_report.csv` would be reached by starting at the root, and entering the `home` folder, +then the `dsci-100` folder, then the `worksheet_02` folder, and then finally the `data` folder. So its absolute +path would be `/home/dsci-100/worksheet_02/data/happiness_report.csv`. We can load the file using its absolute path +as a string passed to the `read_csv` function from `pandas`. +```python +happy_data = pd.read_csv("/home/dsci-100/worksheet_02/data/happiness_report.csv") +``` +If we instead wanted to use a relative path, we would need to list out the sequence of steps needed to get from our current +working directory to the file, with slashes `/` separating each step. Since we are currently in the `worksheet_02` folder, +we just need to enter the `data` folder to reach our desired file. Hence the relative path is `data/happiness_report.csv`, +and we can load the file using its relative path as a string passed to `read_csv`. +```python +happy_data = pd.read_csv("data/happiness_report.csv") +``` +Note that there is no forward slash at the beginning of a relative path; if we accidentally typed `"/data/happiness_report.csv"`, +Python would look for a folder named `data` in the root folder of the computer—but that doesn't exist! + +Aside from specifying places to go in a path using folder names (like `data` and `worksheet_02`), we can also specify two additional +special places: the *current directory* and the *previous directory*. We indicate the current working directory with a single dot `.`, and +the previous directory with two dots `..`. So for instance, if we wanted to reach the `bike_share.csv` file from the `worksheet_02` folder, we could +use the relative path `../tutorial_01/bike_share.csv`. We can even combine these two; for example, we could reach the `bike_share.csv` file using +the (very silly) path `../tutorial_01/../tutorial_01/./bike_share.csv` with quite a few redundant directions: it says to go back a folder, then open `tutorial_01`, +then go back a folder again, then open `tutorial_01` again, then stay in the current directory, then finally get to `bike_share.csv`. Whew, what a long trip! + +So which kind of path should you use: relative, or absolute? Generally speaking, you should use relative paths. +Using a relative path helps ensure that your code can be run +on a different computer (and as an added bonus, relative paths are often shorter—easier to type!). +This is because a file's relative path is often the same across different computers, while a +file's absolute path (the names of +all of the folders between the computer's root, represented by `/`, and the file) isn't usually the same +across different computers. For example, suppose Fatima and Jayden are working on a +project together on the `happiness_report.csv` data. Fatima's file is stored at + +``` +/home/Fatima/project/data/happiness_report.csv +``` + +while Jayden's is stored at + +``` +/home/Jayden/project/data/happiness_report.csv +``` + +Even though Fatima and Jayden stored their files in the same place on their +computers (in their home folders), the absolute paths are different due to +their different usernames. If Jayden has code that loads the +`happiness_report.csv` data using an absolute path, the code won't work on +Fatima's computer. But the relative path from inside the `project` folder +(`data/happiness_report.csv`) is the same on both computers; any code that uses +relative paths will work on both! In the additional resources section, +we include a link to a short video on the +difference between absolute and relative paths. + +```{index} URL +``` + +Beyond files stored on your computer (i.e., locally), we also need a way to locate resources +stored elsewhere on the internet (i.e., remotely). For this purpose we use a +*Uniform Resource Locator (URL)*, i.e., a web address that looks something +like https://datasciencebook.ca/. URLs indicate the location of a resource on the internet, and +start with a web domain, followed by a forward slash `/`, and then a path +to where the resource is located on the remote machine. + +## Reading tabular data from a plain text file into Python + +(readcsv)= +### `read_csv` to read in comma-separated values files + +```{index} csv, reading; separator, read function; read\_csv +``` + +Now that we have learned about *where* data could be, we will learn about *how* +to import data into Python using various functions. Specifically, we will learn how +to *read* tabular data from a plain text file (a document containing only text) +*into* Python and *write* tabular data to a file *out of* Python. The function we use to do this +depends on the file's format. For example, in the last chapter, we learned about using +the `read_csv` function from `pandas` when reading `.csv` (**c**omma-**s**eparated **v**alues) +files. In that case, the *separator* that divided our columns was a +comma (`,`). We only learned the case where the data matched the expected defaults +of the `read_csv` function +(column names are present, and commas are used as the separator between columns). +In this section, we will learn how to read +files that do not satisfy the default expectations of `read_csv`. + +```{index} Canadian languages; canlang data +``` + +Before we jump into the cases where the data aren't in the expected default format +for `pandas` and `read_csv`, let's revisit the more straightforward +case where the defaults hold, and the only argument we need to give to the function +is the path to the file, `data/can_lang.csv`. The `can_lang` data set contains +language data from the 2016 Canadian census. +We put `data/` before the file's +name when we are loading the data set because this data set is located in a +sub-folder, named `data`, relative to where we are running our Python code. +Here is what the text in the file `data/can_lang.csv` looks like. + +```text +category,language,mother_tongue,most_at_home,most_at_work,lang_known +Aboriginal languages,"Aboriginal languages, n.o.s.",590,235,30,665 +Non-Official & Non-Aboriginal languages,Afrikaans,10260,4785,85,23415 +Non-Official & Non-Aboriginal languages,"Afro-Asiatic languages, n.i.e.",1150,44 +Non-Official & Non-Aboriginal languages,Akan (Twi),13460,5985,25,22150 +Non-Official & Non-Aboriginal languages,Albanian,26895,13135,345,31930 +Aboriginal languages,"Algonquian languages, n.i.e.",45,10,0,120 +Aboriginal languages,Algonquin,1260,370,40,2480 +Non-Official & Non-Aboriginal languages,American Sign Language,2685,3020,1145,21 +Non-Official & Non-Aboriginal languages,Amharic,22465,12785,200,33670 +``` + +```{index} pandas +``` + +And here is a review of how we can use `read_csv` to load it into Python. First we +load the `pandas` package to gain access to useful +functions for reading the data. + +```{code-cell} ipython3 +import pandas as pd +``` + +Next we use `read_csv` to load the data into Python, and in that call we specify the +relative path to the file. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +canlang_data = pd.read_csv("data/can_lang.csv") +canlang_data +``` + +### Skipping rows when reading in data + +Oftentimes, information about how data was collected, or other relevant +information, is included at the top of the data file. This information is +usually written in sentence and paragraph form, with no separator because it is +not organized into columns. An example of this is shown below. This information +gives the data scientist useful context and information about the data, +however, it is not well formatted or intended to be read into a data frame cell +along with the tabular data that follows later in the file. + +```text +Data source: https://ttimbers.github.io/canlang/ +Data originally published in: Statistics Canada Census of Population 2016. +Reproduced and distributed on an as-is basis with their permission. +category,language,mother_tongue,most_at_home,most_at_work,lang_known +Aboriginal languages,"Aboriginal languages, n.o.s.",590,235,30,665 +Non-Official & Non-Aboriginal languages,Afrikaans,10260,4785,85,23415 +Non-Official & Non-Aboriginal languages,"Afro-Asiatic languages, n.i.e.",1150,445,10,2775 +Non-Official & Non-Aboriginal languages,Akan (Twi),13460,5985,25,22150 +Non-Official & Non-Aboriginal languages,Albanian,26895,13135,345,31930 +Aboriginal languages,"Algonquian languages, n.i.e.",45,10,0,120 +Aboriginal languages,Algonquin,1260,370,40,2480 +Non-Official & Non-Aboriginal languages,American Sign Language,2685,3020,1145,21930 +Non-Official & Non-Aboriginal languages,Amharic,22465,12785,200,33670 +``` + +With this extra information being present at the top of the file, using +`read_csv` as we did previously does not allow us to correctly load the data +into Python. In the case of this file, Python just prints a `ParserError` +message, indicating that it wasn't able to read the file. + +```python +canlang_data = pd.read_csv("data/can_lang_meta-data.csv") +``` +```text +ParserError: Error tokenizing data. C error: Expected 1 fields in line 4, saw 6 +``` + +```{index} Error +``` + +```{index} read function; skiprows argument +``` + +To successfully read data like this into Python, the `skiprows` +argument can be useful to tell Python +how many rows to skip before +it should start reading in the data. In the example above, we would set this +value to 3 to read and load the data correctly. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +canlang_data = pd.read_csv("data/can_lang_meta-data.csv", skiprows=3) +canlang_data +``` + +How did we know to skip three rows? We looked at the data! The first three rows +of the data had information we didn't need to import: + +```text +Data source: https://ttimbers.github.io/canlang/ +Data originally published in: Statistics Canada Census of Population 2016. +Reproduced and distributed on an as-is basis with their permission. +``` + +The column names began at row 4, so we skipped the first three rows. + +### Using the `sep` argument for different separators + +Another common way data is stored is with tabs as the separator. Notice the +data file, `can_lang.tsv`, has tabs in between the columns instead of +commas. + +```text +category language mother_tongue most_at_home most_at_work lang_known +Aboriginal languages Aboriginal languages, n.o.s. 590 235 30 665 +Non-Official & Non-Aboriginal languages Afrikaans 10260 4785 85 23415 +Non-Official & Non-Aboriginal languages Afro-Asiatic languages, n.i.e. 1150 445 10 2775 +Non-Official & Non-Aboriginal languages Akan (Twi) 13460 5985 25 22150 +Non-Official & Non-Aboriginal languages Albanian 26895 13135 345 31930 +Aboriginal languages Algonquian languages, n.i.e. 45 10 0 120 +Aboriginal languages Algonquin 1260 370 40 2480 +Non-Official & Non-Aboriginal languages American Sign Language 2685 3020 1145 21930 +Non-Official & Non-Aboriginal languages Amharic 22465 12785 200 33670 +``` +```{index} read function; sep argument +``` + +```{index} see: tab-separated values; tsv +``` + +```{index} tsv, read function; read_tsv +``` + +To read in `.tsv` (**t**ab **s**eparated **v**alues) files, we can set the `sep` argument +in the `read_csv` function to the *tab character* `\t`. + +```{index} escape character +``` + +```{note} +`\t` is an example of an *escaped character*, +which always starts with a backslash (`\`). +Escaped characters are used to represent non-printing characters +(like the tab) or characters with special meanings (such as quotation marks). +``` + + +```{code-cell} ipython3 +:tags: ["output_scroll"] +canlang_data = pd.read_csv("data/can_lang.tsv", sep="\t") +canlang_data +``` + +If you compare the data frame here to the data frame we obtained in +{numref}`readcsv` using `read_csv`, you'll notice that they look identical: they have +the same number of columns and rows, the same column names, and the same entries! +So even though we needed to use different +arguments depending on the file format, our resulting data frame +(`canlang_data`) in both cases was the same. + +### Using the `header` argument to handle missing column names + +```{index} read function; header, reading; separator +``` + +The `can_lang_no_names.tsv` file contains a slightly different version +of this data set, except with no column names, and tabs for separators. +Here is how the file looks in a text editor: + +```text +Aboriginal languages Aboriginal languages, n.o.s. 590 235 30 665 +Non-Official & Non-Aboriginal languages Afrikaans 10260 4785 85 23415 +Non-Official & Non-Aboriginal languages Afro-Asiatic languages, n.i.e. 1150 445 10 2775 +Non-Official & Non-Aboriginal languages Akan (Twi) 13460 5985 25 22150 +Non-Official & Non-Aboriginal languages Albanian 26895 13135 345 31930 +Aboriginal languages Algonquian languages, n.i.e. 45 10 0 120 +Aboriginal languages Algonquin 1260 370 40 2480 +Non-Official & Non-Aboriginal languages American Sign Language 2685 3020 1145 21930 +Non-Official & Non-Aboriginal languages Amharic 22465 12785 200 33670 + +``` + +Data frames in Python need to have column names. Thus if you read in data +without column names, Python will assign names automatically. In this example, +Python assigns the column names `0, 1, 2, 3, 4, 5`. +To read this data into Python, we specify the first +argument as the path to the file (as done with `read_csv`), and then provide +values to the `sep` argument (here a tab, which we represent by `"\t"`), +and finally set `header = None` to tell `pandas` that the data file does not +contain its own column names. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +canlang_data = pd.read_csv( + "data/can_lang_no_names.tsv", + sep="\t", + header=None +) +canlang_data +``` + +```{index} pandas.DataFrame; rename, pandas +``` + +It is best to rename your columns manually in this scenario. The current column names +(`0, 1`, etc.) are problematic for two reasons: first, because they not very descriptive names, which will make your analysis +confusing; and second, because your column names should generally be *strings*, but are currently *integers*. +To rename your columns, you can use the `rename` function +from the [pandas package](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.rename.html#). +The argument of the `rename` function is `columns`, which takes a mapping between the old column names and the new column names. +In this case, we want to rename the old columns (`0, 1, ..., 5`) in the `canlang_data` data frame to more descriptive names. + +To specify the mapping, we create a *dictionary*: a Python object that represents +a mapping from *keys* to *values*. We can create a dictionary by using a pair of curly +braces `{ }`, and inside the braces placing pairs of `key : value` separated by commas. +Below, we create a dictionary called `col_map` that maps the old column names in `canlang_data` to new column +names, and then pass it to the `rename` function. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +col_map = { + 0 : "category", + 1 : "language", + 2 : "mother_tongue", + 3 : "most_at_home", + 4 : "most_at_work", + 5 : "lang_known" +} +canlang_data_renamed = canlang_data.rename(columns = col_map) +canlang_data_renamed +``` + +```{index} read function; names argument +``` + +The column names can also be assigned to the data frame immediately upon reading it from the file by passing a +list of column names to the `names` argument in `read_csv`. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +canlang_data = pd.read_csv( + "data/can_lang_no_names.tsv", + sep="\t", + header=None, + names=[ + "category", + "language", + "mother_tongue", + "most_at_home", + "most_at_work", + "lang_known", + ], +) +canlang_data +``` + +### Reading tabular data directly from a URL + +```{index} URL; reading from +``` + +We can also use `read_csv` to read in data directly from a **U**niform **R**esource **L**ocator (URL) that +contains tabular data. Here, we provide the URL of a remote file +to `read_csv`, instead of a path to a local file on our +computer. We need to surround the URL with quotes similar to when we specify a +path on our local computer. All other arguments that we use are the same as +when using these functions with a local file on our computer. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +url = "https://raw.githubusercontent.com/UBC-DSCI/introduction-to-datascience-python/reading/source/data/can_lang.csv" +pd.read_csv(url) +canlang_data = pd.read_csv(url) + +canlang_data +``` + +### Previewing a data file before reading it into Python + +In many of the examples above, we gave you previews of the data file before we read +it into Python. Previewing data is essential to see whether or not there are column +names, what the separators are, and if there are rows you need to skip. You +should do this yourself when trying to read in data files: open the file in whichever +text editor you prefer to inspect its contents prior to reading it into Python. + +## Reading tabular data from a Microsoft Excel file + +```{index} Excel spreadsheet +``` + +```{index} see: Microsoft Excel; Excel spreadsheet +``` + +```{index} see: xlsx; Excel spreadsheet +``` + +There are many other ways to store tabular data sets beyond plain text files, +and similarly, many ways to load those data sets into Python. For example, it is +very common to encounter, and need to load into Python, data stored as a Microsoft +Excel spreadsheet (with the file name +extension `.xlsx`). To be able to do this, a key thing to know is that even +though `.csv` and `.xlsx` files look almost identical when loaded into Excel, +the data themselves are stored completely differently. While `.csv` files are +plain text files, where the characters you see when you open the file in a text +editor are exactly the data they represent, this is not the case for `.xlsx` +files. Take a look at a snippet of what a `.xlsx` file would look like in a text editor: + ++++ + +```text +,?'O + _rels/.rels???J1??>E?{7? +?4'?|??hlIo??F +t 8f??3wn +????t??u"/ + %~Ed2??`: we will obtain only those rows corresponding to Aboriginal languages, and keep only +the `language` and `mother_tongue` columns. +We can use the `[]` operation with a logical statement +to obtain only certain rows. Below we filter the data to include only Aboriginal languages. + +```{index} database; filter, ibis; +``` + +```{code-cell} ipython3 +canlang_table_filtered = canlang_table[canlang_table["category"] == "Aboriginal languages"] +canlang_table_filtered +``` +Above you can see that we have not yet executed this command; `canlang_table_filtered` is just showing +the first part of our query (the part that starts with `Selection[r0]` above). +We didn't call `execute` because we are not ready to bring the data into Python yet. +We can still use the database to do some work to obtain *only* the small amount of data we want to work with locally +in Python. Let's add the second part of our SQL query: selecting only the `language` and `mother_tongue` columns. + +```{index} database; select, ibis; +``` + +```{code-cell} ipython3 +canlang_table_selected = canlang_table_filtered[["language", "mother_tongue"]] +canlang_table_selected +``` +Now you can see that the `ibis` query will have two steps: it will first find rows corresponding to +Aboriginal languages, then it will extract only the `language` and `mother_tongue` columns that we are interested in. +Let's actually execute the query now to bring the data into Python as a `pandas` data frame, and print the result. +```{code-cell} ipython3 +aboriginal_lang_data = canlang_table_selected.execute() +aboriginal_lang_data +``` + +`ibis` provides many more functions (not just the `[]` operation) +that you can use to manipulate the data within the database before calling +`execute` to obtain the data in Python. But `ibis` does not provide *every* function +that we need for analysis; we do eventually need to call `execute`. +For example, `ibis` does not provide the `tail` function to look at the last +rows in a database, even though `pandas` does. + +```{index} pandas.DataFrame; tail +``` + +```{code-cell} ipython3 +:tags: ["output_scroll"] +canlang_table_selected.tail(6) +``` + +```{code-cell} ipython3 +aboriginal_lang_data.tail(6) +``` + +So once you have finished your data wrangling of the database reference object, it is advisable to +bring it into Python as a `pandas` data frame using the `execute` function. +But be very careful using `execute`: databases are often *very* big, +and reading an entire table into Python might take a long time to run or even possibly +crash your machine. So make sure you select and filter the database table +to reduce the data to a reasonable size before using `execute` to read it into Python! + +### Reading data from a PostgreSQL database + +```{index} database; PostgreSQL +``` + +PostgreSQL (also called Postgres) is a very popular +and open-source option for relational database software. +Unlike SQLite, +PostgreSQL uses a client–server database engine, as it was designed to be used +and accessed on a network. This means that you have to provide more information +to Python when connecting to Postgres databases. The additional information that you +need to include when you call the `connect` function is listed below: + +- `database`: the name of the database (a single PostgreSQL instance can host more than one database) +- `host`: the URL pointing to where the database is located (`localhost` if it is on your local machine) +- `port`: the communication endpoint between Python and the PostgreSQL database (usually `5432`) +- `user`: the username for accessing the database +- `password`: the password for accessing the database + +Below we demonstrate how to connect to a version of +the `can_mov_db` database, which contains information about Canadian movies. +Note that the `host` (`fakeserver.stat.ubc.ca`), `user` (`user0001`), and +`password` (`abc123`) below are *not real*; you will not actually +be able to connect to a database using this information. + +```python +conn = ibis.postgres.connect( + database = "can_mov_db", + host = "fakeserver.stat.ubc.ca", + port = 5432, + user = "user0001", + password = "abc123" +) +``` + +Aside from needing to provide that additional information, `ibis` makes it so +that connecting to and working with a Postgres database is identical to +connecting to and working with an SQLite database. For example, we can again use +`list_tables` to find out what tables are in the `can_mov_db` database: + +```python +conn.list_tables() +``` + +```text +["themes", "medium", "titles", "title_aliases", "forms", "episodes", "names", "names_occupations", "occupation", "ratings"] +``` + +We see that there are 10 tables in this database. Let's first look at the +`"ratings"` table to find the lowest rating that exists in the `can_mov_db` +database. + +```python +ratings_table = conn.table("ratings") +ratings_table +``` + +```text +AlchemyTable: ratings + title string + average_rating float64 + num_votes int64 +``` + +```{index} ibis; select +``` + +To find the lowest rating that exists in the data base, we first need to +select the `average_rating` column: + +```python +avg_rating = ratings_table[["average_rating"]] +avg_rating +``` + +```text +r0 := AlchemyTable: ratings + title string + average_rating float64 + num_votes int64 + +Selection[r0] + selections: + average_rating: r0.average_rating +``` + +```{index} database; order_by, ibis; head, ibis; ibis +``` + +Next we use the `order_by` function from `ibis` order the table by `average_rating`, +and then the `head` function to select the first row (i.e., the lowest score). + +```python +lowest = avg_rating.order_by("average_rating").head(1) +lowest.execute() +``` + +```{code-cell} ipython3 +:tags: ["remove-input"] +lowest = pd.DataFrame({"average_rating" : [1.0]}) +lowest +``` + + +We see the lowest rating given to a movie is 1, indicating that it must have +been a really bad movie... + +### Why should we bother with databases at all? + +```{index} database; reasons to use +``` + +Opening a database involved a lot more effort than just opening a `.csv`, or any of the +other plain text or Excel formats. We had to open a connection to the database, +then use `ibis` to translate `pandas`-like +commands (the `[]` operation, `head`, etc.) into SQL queries that the database +understands, and then finally `execute` them. And not all `pandas` commands can currently be translated +via `ibis` into database queries. So you might be wondering: why should we use +databases at all? + +Databases are beneficial in a large-scale setting: + +- They enable storing large data sets across multiple computers with backups. +- They provide mechanisms for ensuring data integrity and validating input. +- They provide security and data access control. +- They allow multiple users to access data simultaneously + and remotely without conflicts and errors. + For example, there are billions of Google searches conducted daily in 2021 {cite:p}`googlesearches`. + Can you imagine if Google stored all of the data + from those searches in a single `.csv` file!? Chaos would ensue! + +## Writing data from Python to a `.csv` file + +```{index} write function; to_csv, pandas.DataFrame; to_csv +``` + +At the middle and end of a data analysis, we often want to write a data frame +that has changed (through selecting columns, filtering rows, etc.) +to a file to share it with others or use it for another step in the analysis. +The most straightforward way to do this is to use the `to_csv` function +from the `pandas` package. The default +arguments are to use a comma (`,`) as the separator, and to include column names +in the first row. We also specify `index = False` to tell `pandas` not to print +row numbers in the `.csv` file. Below we demonstrate creating a new version of the Canadian +languages data set without the "Official languages" category according to the +Canadian 2016 Census, and then writing this to a `.csv` file: + +```{code-cell} ipython3 +no_official_lang_data = canlang_data[canlang_data["category"] != "Official languages"] +no_official_lang_data.to_csv("data/no_official_languages.csv", index=False) +``` + +## Obtaining data from the web + +```{note} +This section is not required reading for the remainder of the textbook. It +is included for those readers interested in learning a little bit more about +how to obtain different types of data from the web. +``` + +```{index} see: application programming interface; API +``` + +```{index} API +``` + +Data doesn't just magically appear on your computer; you need to get it from +somewhere. Earlier in the chapter we showed you how to access data stored in a +plain text, spreadsheet-like format (e.g., comma- or tab-separated) from a web +URL using the `read_csv` function from `pandas`. But as time goes on, it is +increasingly uncommon to find data (especially large amounts of data) in this +format available for download from a URL. Instead, websites now often offer +something known as an **a**pplication **p**rogramming **i**nterface (API), +which provides a programmatic way to ask for subsets of a data set. This allows +the website owner to control *who* has access to the data, *what portion* of +the data they have access to, and *how much* data they can access. Typically, +the website owner will give you a *token* or *key* (a secret string of characters +somewhat like a password) that you have to provide when accessing the API. + +```{index} web scraping, CSS, HTML +``` + +```{index} see: hypertext markup language; HTML +``` + +```{index} see: cascading style sheet; CSS +``` + +Another interesting thought: websites themselves *are* data! When you type a +URL into your browser window, your browser asks the *web server* (another +computer on the internet whose job it is to respond to requests for the +website) to give it the website's data, and then your browser translates that +data into something you can see. If the website shows you some information that +you're interested in, you could *create* a data set for yourself by copying and +pasting that information into a file. This process of taking information +directly from what a website displays is called +*web scraping* (or sometimes *screen scraping*). Now, of course, copying and pasting +information manually is a painstaking and error-prone process, especially when +there is a lot of information to gather. So instead of asking your browser to +translate the information that the web server provides into something you can +see, you can collect that data programmatically—in the form of +**h**yper**t**ext **m**arkup **l**anguage (HTML) and **c**ascading **s**tyle **s**heet (CSS) +code—and process it to extract useful information. HTML provides the +basic structure of a site and tells the webpage how to display the content +(e.g., titles, paragraphs, bullet lists etc.), whereas CSS helps style the +content and tells the webpage how the HTML elements should +be presented (e.g., colors, layouts, fonts etc.). + +This subsection will show you the basics of both web scraping +with the [`BeautifulSoup` Python package](https://beautiful-soup-4.readthedocs.io/en/latest/) {cite:p}`beautifulsoup` +and accessing the NASA "Astronomy Picture of the Day" API +using the [`requests` Python package](https://requests.readthedocs.io/en/latest/) {cite:p}`requests`. + ++++ + +### Web scraping + +#### HTML and CSS selectors + +```{index} web scraping, HTML; selector, CSS; selector, Craiglist +``` + +When you enter a URL into your browser, your browser connects to the +web server at that URL and asks for the *source code* for the website. +This is the data that the browser translates +into something you can see; so if we +are going to create our own data by scraping a website, we have to first understand +what that data looks like! For example, let's say we are interested +in knowing the average rental price (per square foot) of the most recently +available one-bedroom apartments in Vancouver +on [Craiglist](https://vancouver.craigslist.org). When we visit the Vancouver Craigslist +website and search for one-bedroom apartments, +we should see something similar to {numref}`fig:craigslist-human`. + ++++ + +```{figure} img/reading/craigslist_human.png +:name: fig:craigslist-human + +Craigslist webpage of advertisements for one-bedroom apartments. +``` + ++++ + +Based on what our browser shows us, it's pretty easy to find the size and price +for each apartment listed. But we would like to be able to obtain that information +using Python, without any manual human effort or copying and pasting. We do this by +examining the *source code* that the web server actually sent our browser to +display for us. We show a snippet of it below; the +entire source +is [included with the code for this book](https://github.com/UBC-DSCI/introduction-to-datascience-python/blob/main/source/data/website_source.txt): + +```html + + $800 + + 1br - + + (13768 108th Avenue) + + map + + + hide this posting + + + + restore + restore this posting + + $2285 + +``` + +Oof...you can tell that the source code for a web page is not really designed +for humans to understand easily. However, if you look through it closely, you +will find that the information we're interested in is hidden among the muck. +For example, near the top of the snippet +above you can see a line that looks like + +```html +$800 +``` + +That snippet is definitely storing the price of a particular apartment. With some more +investigation, you should be able to find things like the date and time of the +listing, the address of the listing, and more. So this source code most likely +contains all the information we are interested in! + +```{index} HTML; tag +``` + +Let's dig into that line above a bit more. You can see that +that bit of code has an *opening tag* (words between `<` and `>`, like +``) and a *closing tag* (the same with a slash, like ``). HTML +source code generally stores its data between opening and closing tags like +these. Tags are keywords that tell the web browser how to display or format +the content. Above you can see that the information we want (`$800`) is stored +between an opening and closing tag (`` and ``). In the opening +tag, you can also see a very useful "class" (a special word that is sometimes +included with opening tags): `class="result-price"`. Since we want R to +programmatically sort through all of the source code for the website to find +apartment prices, maybe we can look for all the tags with the `"result-price"` +class, and grab the information between the opening and closing tag. Indeed, +take a look at another line of the source snippet above: + +```html +$2285 +``` + +It's yet another price for an apartment listing, and the tags surrounding it +have the `"result-price"` class. Wonderful! Now that we know what pattern we +are looking for—a dollar amount between opening and closing tags that have the +`"result-price"` class—we should be able to use code to pull out all of the +matching patterns from the source code to obtain our data. This sort of "pattern" +is known as a *CSS selector* (where CSS stands for **c**ascading **s**tyle **s**heet). + +The above was a simple example of "finding the pattern to look for"; many +websites are quite a bit larger and more complex, and so is their website +source code. Fortunately, there are tools available to make this process +easier. For example, +[SelectorGadget](https://selectorgadget.com/) is +an open-source tool that simplifies identifying the generating +and finding of CSS selectors. +At the end of the chapter in the additional resources section, we include a link to +a short video on how to install and use the SelectorGadget tool to +obtain CSS selectors for use in web scraping. +After installing and enabling the tool, you can click the +website element for which you want an appropriate selector. For +example, if we click the price of an apartment listing, we +find that SelectorGadget shows us the selector `.result-price` +in its toolbar, and highlights all the other apartment +prices that would be obtained using that selector ({numref}`fig:sg1`). + +```{figure} img/reading/sg1.png +:name: fig:sg1 + +Using the SelectorGadget on a Craigslist webpage to obtain the CCS selector useful for obtaining apartment prices. +``` + +If we then click the size of an apartment listing, SelectorGadget shows us +the `span` selector, and highlights many of the lines on the page; this indicates that the +`span` selector is not specific enough to capture only apartment sizes ({numref}`fig:sg3`). + +```{figure} img/reading/sg3.png +:name: fig:sg3 + +Using the SelectorGadget on a Craigslist webpage to obtain a CCS selector useful for obtaining apartment sizes. +``` + +To narrow the selector, we can click one of the highlighted elements that +we *do not* want. For example, we can deselect the "pic/map" links, +resulting in only the data we want highlighted using the `.housing` selector ({numref}`fig:sg2`). + +```{figure} img/reading/sg2.png +:name: fig:sg2 + +Using the SelectorGadget on a Craigslist webpage to refine the CCS selector to one that is most useful for obtaining apartment sizes. +``` + +So to scrape information about the square footage and rental price +of apartment listings, we need to use +the two CSS selectors `.housing` and `.result-price`, respectively. +The selector gadget returns them to us as a comma-separated list (here +`.housing , .result-price`), which is exactly the format we need to provide to +Python if we are using more than one CSS selector. + +**Caution: are you allowed to scrape that website?** + +```{index} web scraping; permission +``` + ++++ + +*Before* scraping data from the web, you should always check whether or not +you are *allowed* to scrape it! There are two documents that are important +for this: the `robots.txt` file and the Terms of Service +document. If we take a look at [Craigslist's Terms of Service document](https://www.craigslist.org/about/terms.of.use), +we find the following text: *"You agree not to copy/collect CL content +via robots, spiders, scripts, scrapers, crawlers, or any automated or manual equivalent (e.g., by hand)."* +So unfortunately, without explicit permission, we are not allowed to scrape the website. + +```{index} Wikipedia +``` + +What to do now? Well, we *could* ask the owner of Craigslist for permission to scrape. +However, we are not likely to get a response, and even if we did they would not likely give us permission. +The more realistic answer is that we simply cannot scrape Craigslist. If we still want +to find data about rental prices in Vancouver, we must go elsewhere. +To continue learning how to scrape data from the web, let's instead +scrape data on the population of Canadian cities from Wikipedia. +We have checked the [Terms of Service document](https://foundation.wikimedia.org/wiki/Terms_of_Use/en), +and it does not mention that web scraping is disallowed. +We will use the SelectorGadget tool to pick elements that we are interested in +(city names and population counts) and deselect others to indicate that we are not +interested in them (province names), as shown in {numref}`fig:sg4`. + +```{figure} img/reading/sg4.png +:name: fig:sg4 + +Using the SelectorGadget on a Wikipedia webpage. +``` + +We include a link to a short video tutorial on this process at the end of the chapter +in the additional resources section. SelectorGadget provides in its toolbar +the following list of CSS selectors to use: + +```text +td:nth-child(8) , +td:nth-child(4) , +.largestCities-cell-background+ td a +``` + +Now that we have the CSS selectors that describe the properties of the elements +that we want to target, we can use them to find certain elements in web pages and extract data. + + +#### Scraping with `BeautifulSoup` + +```{index} BeautifulSoup, requests +``` + +We will use the `requests` and `BeautifulSoup` Python packages to scrape data +from the Wikipedia page. After loading those packages, we tell Python which +page we want to scrape by providing its URL in quotations to the `requests.get` +function. This function obtains the raw HTML of the page, which we then +pass to the `BeautifulSoup` function for parsing: + +```python +import requests +import bs4 + +wiki = requests.get("https://en.wikipedia.org/wiki/Canada") +page = bs4.BeautifulSoup(wiki.content, "html.parser") +``` + +```{code-cell} ipython3 +:tags: [remove-cell] +import bs4 + +# the above cell doesn't actually run; this one does run +# and loads the html data from a local, static file + +with open("data/canada_wiki.html", "r") as f: + wiki_hidden = f.read() +page = bs4.BeautifulSoup(wiki_hidden, "html.parser") +``` + +The `requests.get` function downloads the HTML source code for the page at the +URL you specify, just like your browser would if you navigated to this site. +But instead of displaying the website to you, the `requests.get` function just +returns the HTML source code itself—stored in the `wiki.content` +variable—which we then parse using `BeautifulSoup` and store in the +`page` variable. Next, we pass the CSS selectors we obtained from +SelectorGadget to the `select` method of the `page` object. Make sure to +surround the selectors with quotation marks; `select` expects that argument is +a string. We store the result of the `select` function in the `population_nodes` +variable. Note that `select` returns a list; below we slice the list to +print only the first 5 elements for clarity. + +```{code-cell} ipython3 +population_nodes = page.select( + "td:nth-child(8) , td:nth-child(4) , .largestCities-cell-background+ td a" +) +population_nodes[:5] +``` + +Each of the items in the `population_nodes` list is a *node* from the HTML document that matches the CSS +selectors you specified. A *node* is an HTML tag pair (e.g., `` and `` +which defines the cell of a table) combined with the content stored between the +tags. For our CSS selector `td:nth-child(4)`, an example node that would be +selected would be: + +```html + +London + +``` + +Next, we extract the meaningful data—in other words, we get rid of the +HTML code syntax and tags—from the nodes using the `get_text` function. +In the case of the example node above, `get_text` function returns `"London"`. +Once again we show only the first 5 elements for clarity. + +```{code-cell} ipython3 +[row.get_text() for row in population_nodes[:5]] +``` + +Fantastic! We seem to have extracted the data of interest from the raw HTML +source code. But we are not quite done; the data is not yet in an optimal +format for data analysis. Both the city names and population are encoded as +characters in a single vector, instead of being in a data frame with one +character column for city and one numeric column for population (like a +spreadsheet). Additionally, the populations contain commas (not useful for +programmatically dealing with numbers), and some even contain a line break +character at the end (`\n`). In {numref}`Chapter %s `, we will learn +more about how to *wrangle* data such as this into a more useful format for +data analysis using Python. + ++++ + +#### Scraping with `read_html` + +Using `requests` and `BeautifulSoup` to extract data based on CSS selectors is +a very general way to scrape data from the web, albeit perhaps a little bit +complicated. Fortunately, `pandas` provides the +[`read_html`](https://pandas.pydata.org/docs/reference/api/pandas.read_html.html) +function, which is easier method to try when the data +appear on the webpage already in a tabular format. The `read_html` function takes one +argument—the URL of the page to scrape—and will return a list of +data frames corresponding to all the tables it finds at that URL. We can see +below that `read_html` found 17 tables on the Wikipedia page for Canada. + +```python +canada_wiki_tables = pd.read_html("https://en.wikipedia.org/wiki/Canada") +len(canada_wiki_tables) +``` + +```{code-cell} ipython3 +:tags: [remove-input] +canada_wiki_tables = pd.read_html("data/canada_wiki.html") +len(canada_wiki_tables) +``` + +After manually searching through these, we find that the table containing the +population counts of the largest metropolitan areas in Canada is contained in +index 1. We use the `droplevel` method to simplify the column names in the resulting +data frame: + +```{code-cell} ipython3 +canada_wiki_df = canada_wiki_tables[1] +canada_wiki_df.columns = canada_wiki_df.columns.droplevel() +canada_wiki_df +``` + +Once again, we have managed to extract the data of interest from the raw HTML +source code—but this time using the convenient `read_html` function, +without needing to explicitly use CSS selectors! However, once again, we still +need to do some cleaning of this result. Referring back to {numref}`fig:sg4`, +we can see that the table is formatted with two sets of columns (e.g., `Name` +and `Name.1`) that we will need to somehow merge. In {numref}`Chapter %s +`, we will learn more about how to *wrangle* data into a useful +format for data analysis. + +### Using an API + +```{index} API +``` + +Rather than posting a data file at a URL for you to download, many websites +these days provide an API that can be accessed through a programming language +like Python. The benefit of using an API is that data owners have much more control +over the data they provide to users. However, unlike web scraping, there is no +consistent way to access an API across websites. Every website typically has +its own API designed especially for its own use case. Therefore we will just +provide one example of accessing data through an API in this book, with the +hope that it gives you enough of a basic idea that you can learn how to use +another API if needed. In particular, in this book we will show you the basics +of how to use the `requests` package in Python to access data from the NASA "Astronomy Picture +of the Day" API (a great source of desktop backgrounds, by the way—take a look at the stunning +picture of the Rho-Ophiuchi cloud complex in {numref}`fig:NASA-API-Rho-Ophiuchi` from July 13, 2023!). + +```{index} API; requests, NASA, API; token; key +``` + +```{figure} img/reading/NASA-API-Rho-Ophiuchi.png +:name: fig:NASA-API-Rho-Ophiuchi +:width: 400px + +The James Webb Space Telescope's NIRCam image of the Rho Ophiuchi molecular cloud complex {cite:p}`rhoophiuchi`. +``` + ++++ + +First, you will need to visit the [NASA APIs page](https://api.nasa.gov/) and generate an API key (i.e., a password used to identify you when accessing the API). +Note that a valid email address is required to +associate with the key. The signup form looks something like {numref}`fig:NASA-API-signup`. +After filling out the basic information, you will receive the token via email. +Make sure to store the key in a safe place, and keep it private. + + +```{figure} img/reading/NASA-API-signup.png +:name: fig:NASA-API-signup + +Generating the API access token for the NASA API. +``` + +**Caution: think about your API usage carefully!** + +When you access an API, you are initiating a transfer of data from a web server +to your computer. Web servers are expensive to run and do not have infinite resources. +If you try to ask for *too much data* at once, you can use up a huge amount of the server's bandwidth. +If you try to ask for data *too frequently*—e.g., if you +make many requests to the server in quick succession—you can also bog the server down and make +it unable to talk to anyone else. Most servers have mechanisms to revoke your access if you are not +careful, but you should try to prevent issues from happening in the first place by being extra careful +with how you write and run your code. You should also keep in mind that when a website owner +grants you API access, they also usually specify a limit (or *quota*) of how much data you can ask for. +Be careful not to overrun your quota! So *before* we try to use the API, we will first visit +[the NASA website](https://api.nasa.gov/) to see what limits we should abide by when using the API. +These limits are outlined in {numref}`fig:NASA-API-limits`. + +```{figure} img/reading/NASA-API-limits.png +:name: fig:NASA-API-limits + +The NASA website specifies an hourly limit of 1,000 requests. +``` + +After checking the NASA website, it seems like we can send at most 1,000 requests per hour. +That should be more than enough for our purposes in this section. + ++++ + +#### Accessing the NASA API + +The NASA API is what is known as an *HTTP API*: this is a particularly common +kind of API, where you can obtain data simply by accessing a +particular URL as if it were a regular website. To make a query to the NASA +API, we need to specify three things. First, we specify the URL *endpoint* of +the API, which is simply a URL that helps the remote server understand which +API you are trying to access. NASA offers a variety of APIs, each with its own +endpoint; in the case of the NASA "Astronomy Picture of the Day" API, the URL +endpoint is `https://api.nasa.gov/planetary/apod`. Second, we write `?`, which denotes that a +list of *query parameters* will follow. And finally, we specify a list of +query parameters of the form `parameter=value`, separated by `&` characters. The NASA +"Astronomy Picture of the Day" API accepts the parameters shown in +{numref}`fig:NASA-API-parameters`. + +```{figure} img/reading/NASA-API-parameters.png +:name: fig:NASA-API-parameters + +The set of parameters that you can specify when querying the NASA "Astronomy Picture of the Day" API, +along with syntax, default settings, and a description of each. +``` + +So for example, to obtain the image of the day +from July 13, 2023, the API query would have two parameters: `api_key=YOUR_API_KEY` +and `date=2023-07-13`. Remember to replace `YOUR_API_KEY` with the API key you +received from NASA in your email! Putting it all together, the query will look like the following: +``` +https://api.nasa.gov/planetary/apod?api_key=YOUR_API_KEY&date=2023-07-13 +``` +If you try putting this URL into your web browser, you'll actually find that the server +responds to your request with some text: + +```json +{"date":"2023-07-13","explanation":"A mere 390 light-years away, Sun-like stars +and future planetary systems are forming in the Rho Ophiuchi molecular cloud +complex, the closest star-forming region to our fair planet. The James Webb +Space Telescope's NIRCam peered into the nearby natal chaos to capture this +infrared image at an inspiring scale. The spectacular cosmic snapshot was +released to celebrate the successful first year of Webb's exploration of the +Universe. The frame spans less than a light-year across the Rho Ophiuchi region +and contains about 50 young stars. Brighter stars clearly sport Webb's +characteristic pattern of diffraction spikes. Huge jets of shocked molecular +hydrogen blasting from newborn stars are red in the image, with the large, +yellowish dusty cavity carved out by the energetic young star near its center. +Near some stars in the stunning image are shadows cast by their protoplanetary +disks.","hdurl":"https://apod.nasa.gov/apod/image/2307/STScI-01_RhoOph.png", +"media_type":"image","service_version":"v1","title":"Webb's +Rho Ophiuchi","url":"https://apod.nasa.gov/apod/image/2307/STScI-01_RhoOph1024.png"} +``` + +Neat! There is definitely some data there, but it's a bit hard to +see what it all is. As it turns out, this is a common format for data called +*JSON* (JavaScript Object Notation). We won't encounter this kind of data much in this book, +but for now you can interpret this data just like +you'd interpret a Python dictionary: these are `key : value` pairs separated by +commas. For example, if you look closely, you'll see that the first entry is +`"date":"2023-07-13"`, which indicates that we indeed successfully received +data corresponding to July 13, 2023. + +So now our job is to do all of this programmatically in Python. We will load +the `requests` package, and make the query using the `get` function, which takes a single URL argument; +you will recognize the same query URL that we pasted into the browser earlier. +We will then obtain a JSON representation of the +response using the `json` method. + + +```python +import requests + +nasa_data_single = requests.get( + "https://api.nasa.gov/planetary/apod?api_key=YOUR_API_KEY&date=2023-07-13" + ).json() +nasa_data_single +``` + +```{code-cell} ipython3 +:tags: [remove-input] +import json +with open("data/nasa.json", "r") as f: + nasa_data = json.load(f) +# the last entry in the stored data is July 13, 2023, so print that +nasa_data[-1] +``` + +We can obtain more records at once by using the `start_date` and `end_date` parameters, as +shown in the table of parameters in {numref}`fig:NASA-API-parameters`. +Let's obtain all the records between May 1, 2023, and July 13, 2023, and store the result +in an object called `nasa_data`; now the response +will take the form of a Python list. Each item in the list will correspond to a single day's record (just like the `nasa_data_single` object), +and there will be 74 items total, one for each day between the start and end dates: + +```python +nasa_data = requests.get( + "https://api.nasa.gov/planetary/apod?api_key=YOUR_API_KEY&start_date=2023-05-01&end_date=2023-07-13" + ).json() +len(nasa_data) +``` + +```{code-cell} ipython3 +:tags: [remove-input] +len(nasa_data) +``` + +For further data processing using the techniques in this book, you'll need to turn this list of dictionaries +into a `pandas` data frame. Here we will extract the `date`, `title`, `copyright`, and `url` variables +from the JSON data, and construct a `pandas` DataFrame using the extracted information. + +```{note} +Understanding this code is not required for the remainder of the textbook. It is included for those +readers who would like to parse JSON data into a `pandas` data frame in their own data analyses. +``` + +```{code-cell} ipython3 +data_dict = { + "date":[], + "title": [], + "copyright" : [], + "url": [] +} + +for item in nasa_data: + if "copyright" not in item: + item["copyright"] = None + for entry in ["url", "title", "date", "copyright"]: + data_dict[entry].append(item[entry]) + +nasa_df = pd.DataFrame(data_dict) +nasa_df +``` + +Success—we have created a small data set using the NASA +API! This data is also quite different from what we obtained from web scraping; +the extracted information is readily available in a JSON format, as opposed to raw +HTML code (although not *every* API will provide data in such a nice format). +From this point onward, the `nasa_df` data frame is stored on your +machine, and you can play with it to your heart's content. For example, you can use +`pandas.to_csv` to save it to a file and `pandas.read_csv` to read it into Python again later; +and after reading the next few chapters you will have the skills to +do even more interesting things! If you decide that you want +to ask any of the various NASA APIs for more data +(see [the list of awesome NASA APIS here](https://api.nasa.gov/) +for more examples of what is possible), just be mindful as usual about how much +data you are requesting and how frequently you are making requests. + ++++ + +## Exercises + +Practice exercises for the material covered in this chapter +can be found in the accompanying +[worksheets repository](https://worksheets.python.datasciencebook.ca) +in the "Reading in data locally and from the web" row. +You can launch an interactive version of the worksheet in your browser by clicking the "launch binder" button. +You can also preview a non-interactive version of the worksheet by clicking "view worksheet." +If you instead decide to download the worksheet and run it on your own machine, +make sure to follow the instructions for computer setup +found in {numref}`Chapter %s `. This will ensure that the automated feedback +and guidance that the worksheets provide will function as intended. + +## Additional resources + +- The [`pandas` documentation](https://pandas.pydata.org/docs/getting_started/index.html) + provides the documentation for the functions we cover in this chapter. + It is where you should look if you want to learn more about these functions, the + full set of arguments you can use, and other related functions. +- Sometimes you might run into data in such poor shape that the reading + functions we cover in this chapter do not work. In that case, you can consult the + [data loading chapter](https://wesmckinney.com/book/accessing-data.html#io_flat_files) + from [*Python for Data Analysis*](https://wesmckinney.com/book/) {cite:p}`mckinney2012python`, which goes into a lot + more detail about how Python parses text from files into data frames. +- A [video](https://www.youtube.com/embed/ephId3mYu9o) from the Udacity + course *Linux Command Line Basics* provides a good explanation of absolute versus relative paths. +- If you read the subsection on obtaining data from the web via scraping and + APIs, we provide two companion tutorial video links for how to use the + SelectorGadget tool to obtain desired CSS selectors for: + - [extracting the data for apartment listings on Craigslist](https://www.youtube.com/embed/YdIWI6K64zo), and + - [extracting Canadian city names and populations from Wikipedia](https://www.youtube.com/embed/O9HKbdhqYzk). + +## References + ++++ + +```{bibliography} +:filter: docname in docnames +``` diff --git a/pull313/_sources/regression1.md b/pull313/_sources/regression1.md new file mode 100644 index 00000000..e20fe67c --- /dev/null +++ b/pull313/_sources/regression1.md @@ -0,0 +1,1196 @@ +--- +jupytext: + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.5 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(regression1)= +# Regression I: K-nearest neighbors + +```{code-cell} ipython3 +:tags: [remove-cell] + +from chapter_preamble import * +from IPython.display import HTML +import plotly.express as px +import plotly.graph_objects as go +``` + +## Overview + +This chapter continues our foray into answering predictive questions. +Here we will focus on predicting *numerical* variables +and will use *regression* to perform this task. +This is unlike the past two chapters, which focused on predicting categorical +variables via classification. However, regression does have many similarities +to classification: for example, just as in the case of classification, +we will split our data into training, validation, and test sets, we will +use `scikit-learn` workflows, we will use a K-nearest neighbors (KNN) +approach to make predictions, and we will use cross-validation to choose K. +Because of how similar these procedures are, make sure to read +{numref}`Chapters %s ` and {numref}`%s ` before reading +this one—we will move a little bit faster here with the +concepts that have already been covered. +This chapter will primarily focus on the case where there is a single predictor, +but the end of the chapter shows how to perform +regression with more than one predictor variable, i.e., *multivariable regression*. +It is important to note that regression +can also be used to answer inferential and causal questions, +however that is beyond the scope of this book. + ++++ + +## Chapter learning objectives +By the end of the chapter, readers will be able to do the following: + +* Recognize situations where a simple regression analysis would be appropriate for making predictions. +* Explain the K-nearest neighbor (KNN) regression algorithm and describe how it differs from KNN classification. +* Interpret the output of a KNN regression. +* In a data set with two or more variables, perform K-nearest neighbor regression in Python using a `scikit-learn` workflow. +* Execute cross-validation in Python to choose the number of neighbors. +* Evaluate KNN regression prediction accuracy in Python using a test data set and the root mean squared prediction error (RMSPE). +* In the context of KNN regression, compare and contrast goodness of fit and prediction properties (namely RMSE vs RMSPE). +* Describe the advantages and disadvantages of K-nearest neighbors regression. + ++++ + +## The regression problem + +```{index} predictive question, response variable +``` + +Regression, like classification, is a predictive problem setting where we want +to use past information to predict future observations. But in the case of +regression, the goal is to predict *numerical* values instead of *categorical* values. +The variable that you want to predict is often called the *response variable*. +For example, we could try to use the number of hours a person spends on +exercise each week to predict their race time in the annual Boston marathon. As +another example, we could try to use the size of a house to +predict its sale price. Both of these response variables—race time and sale price—are +numerical, and so predicting them given past data is considered a regression problem. + +```{index} classification; comparison to regression +``` + +Just like in the classification setting, there are many possible methods that we can use +to predict numerical response variables. In this chapter we will +focus on the **K-nearest neighbors** algorithm {cite:p}`knnfix,knncover`, and in the next chapter +we will study **linear regression**. +In your future studies, you might encounter regression trees, splines, +and general local regression methods; see the additional resources +section at the end of the next chapter for where to begin learning more about +these other methods. + +Many of the concepts from classification map over to the setting of regression. For example, +a regression model predicts a new observation's response variable based on the response variables +for similar observations in the data set of past observations. When building a regression model, +we first split the data into training and test sets, in order to ensure that we assess the performance +of our method on observations not seen during training. And finally, we can use cross-validation to evaluate different +choices of model parameters (e.g., K in a K-nearest neighbors model). The major difference +is that we are now predicting numerical variables instead of categorical variables. + +```{index} categorical variable, numerical variable +``` + +```{note} +You can usually tell whether a variable is numerical or +categorical—and therefore whether you need to perform regression or +classification—by taking two response variables X and Y from your data, +and asking the question, "is response variable X *more* than response +variable Y?" If the variable is categorical, the question will make no sense. +(Is blue more than red? Is benign more than malignant?) If the variable is +numerical, it will make sense. (Is 1.5 hours more than 2.25 hours? Is +\$500,000 more than \$400,000?) Be careful when applying this heuristic, +though: sometimes categorical variables will be encoded as numbers in your +data (e.g., "1" represents "benign", and "0" represents "malignant"). In +these cases you have to ask the question about the *meaning* of the labels +("benign" and "malignant"), not their values ("1" and "0"). +``` + ++++ + +## Exploring a data set + +```{index} Sacramento real estate, question; regression +``` + +In this chapter and the next, we will study +a data set of +[932 real estate transactions in Sacramento, California](https://support.spatialkey.com/spatialkey-sample-csv-data/) +originally reported in the *Sacramento Bee* newspaper. +We first need to formulate a precise question that +we want to answer. In this example, our question is again predictive: +Can we use the size of a house in the Sacramento, CA area to predict +its sale price? A rigorous, quantitative answer to this question might help +a realtor advise a client as to whether the price of a particular listing +is fair, or perhaps how to set the price of a new listing. +We begin the analysis by loading and examining the data, +as well as setting the seed value. + +```{code-cell} ipython3 +import altair as alt +import numpy as np +import pandas as pd +from sklearn.model_selection import GridSearchCV, train_test_split +from sklearn.compose import make_column_transformer +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler +from sklearn import set_config + +# Output dataframes instead of arrays +set_config(transform_output="pandas") + +np.random.seed(10) + +sacramento = pd.read_csv("data/sacramento.csv") +sacramento +``` + +```{index} altair; mark_circle, visualization; scatter +``` + +The scientific question guides our initial exploration: the columns in the +data that we are interested in are `sqft` (house size, in livable square feet) +and `price` (house sale price, in US dollars (USD)). The first step is to visualize +the data as a scatter plot where we place the predictor variable +(house size) on the x-axis, and we place the response variable that we +want to predict (sale price) on the y-axis. + +```{note} +Given that the y-axis unit is dollars in {numref}`fig:07-edaRegr`, +we format the axis labels to put dollar signs in front of the house prices, +as well as commas to increase the readability of the larger numbers. +We can do this in `altair` by passing the `axis=alt.Axis(format="$,.0f")` argument +to the `y` encoding channel in an `altair` specification. +``` + +```{code-cell} ipython3 +:tags: [remove-output] + +scatter = alt.Chart(sacramento).mark_circle().encode( + x=alt.X("sqft") + .scale(zero=False) + .title("House size (square feet)"), + y=alt.Y("price") + .axis(format="$,.0f") + .title("Price (USD)") +) + +scatter +``` + +```{code-cell} ipython3 +:tags: [remove-cell] +glue("fig:07-edaRegr", scatter) +``` + +:::{glue:figure} fig:07-edaRegr +:name: fig:07-edaRegr + +Scatter plot of price (USD) versus house size (square feet). +::: + ++++ + +The plot is shown in {numref}`fig:07-edaRegr`. +We can see that in Sacramento, CA, as the +size of a house increases, so does its sale price. Thus, we can reason that we +may be able to use the size of a not-yet-sold house (for which we don't know +the sale price) to predict its final sale price. Note that we do not suggest here +that a larger house size *causes* a higher sale price; just that house price +tends to increase with house size, and that we may be able to use the latter to +predict the former. + ++++ + +## K-nearest neighbors regression + +```{index} K-nearest neighbors; regression +``` + +Much like in the case of classification, +we can use a K-nearest neighbors-based +approach in regression to make predictions. +Let's take a small sample of the data in {numref}`fig:07-edaRegr` +and walk through how K-nearest neighbors (KNN) works +in a regression context before we dive in to creating our model and assessing +how well it predicts house sale price. This subsample is taken to allow us to +illustrate the mechanics of KNN regression with a few data points; later in +this chapter we will use all the data. + +```{index} pandas.DataFrame; sample +``` + +To take a small random sample of size 30, we'll use the +`sample` method on the `sacramento` data frame, specifying +that we want to select `n=30` rows. + +```{code-cell} ipython3 +small_sacramento = sacramento.sample(n=30) +``` + +Next let's say we come across a 2,000 square-foot house in Sacramento we are +interested in purchasing, with an advertised list price of \$350,000. Should we +offer to pay the asking price for this house, or is it overpriced and we should +offer less? Absent any other information, we can get a sense for a good answer +to this question by using the data we have to predict the sale price given the +sale prices we have already observed. But in {numref}`fig:07-small-eda-regr`, +you can see that we have no +observations of a house of size *exactly* 2,000 square feet. How can we predict +the sale price? + +```{code-cell} ipython3 +:tags: [remove-output] + +small_plot = alt.Chart(small_sacramento).mark_circle().encode( + x=alt.X("sqft") + .scale(zero=False) + .title("House size (square feet)"), + y=alt.Y("price") + .axis(format="$,.0f") + .title("Price (USD)") +) + +# add an overlay to the base plot +line_df = pd.DataFrame({"x": [2000]}) +rule = alt.Chart(line_df).mark_rule(strokeDash=[2, 4]).encode(x="x") + +small_plot + rule +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:07-small-eda-regr", (small_plot + rule)) +``` + +:::{glue:figure} fig:07-small-eda-regr +:name: fig:07-small-eda-regr + +Scatter plot of price (USD) versus house size (square feet) with vertical line indicating 2,000 square feet on x-axis. +::: + ++++ + +```{index} pandas.DataFrame; assign, pandas.DataFrame; head, pandas.DataFrame; sort_values, abs +``` + +We will employ the same intuition from {numref}`Chapters %s ` and {numref}`%s `, and use the +neighboring points to the new point of interest to suggest/predict what its +sale price might be. +For the example shown in {numref}`fig:07-small-eda-regr`, +we find and label the 5 nearest neighbors to our observation +of a house that is 2,000 square feet. + +```{index} nsmallest +``` + +```{code-cell} ipython3 +small_sacramento["dist"] = (2000 - small_sacramento["sqft"]).abs() +nearest_neighbors = small_sacramento.nsmallest(5, "dist") +nearest_neighbors +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + + +nn_plot = small_plot + rule + +# plot horizontal lines which is perpendicular to x=2000 +h_lines = [] +for i in range(5): + h_line_df = pd.DataFrame({ + "sqft": [nearest_neighbors.iloc[i, 4], 2000], + "price": [nearest_neighbors.iloc[i, 6]] * 2 + }) + h_lines.append(alt.Chart(h_line_df).mark_line(color="orange").encode(x="sqft", y="price")) + +nn_plot = alt.layer(*h_lines, small_plot, rule) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:07-knn5-example", nn_plot) +``` + +:::{glue:figure} fig:07-knn5-example +:name: fig:07-knn5-example + +Scatter plot of price (USD) versus house size (square feet) with lines to 5 nearest neighbors. +::: + ++++ + +{numref}`fig:07-knn5-example` illustrates the difference between the house sizes +of the 5 nearest neighbors (in terms of house size) to our new +2,000 square-foot house of interest. Now that we have obtained these nearest neighbors, +we can use their values to predict the +sale price for the new home. Specifically, we can take the mean (or +average) of these 5 values as our predicted value, as illustrated by +the red point in {numref}`fig:07-predictedViz-knn`. + +```{code-cell} ipython3 +prediction = nearest_neighbors["price"].mean() +prediction +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +nn_plot_pred = nn_plot + alt.Chart( + pd.DataFrame({"sqft": [2000], "price": [prediction]}) +).mark_circle(size=40).encode(x="sqft", y="price", color=alt.value("red")) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("knn-5-pred", "{0:,.0f}".format(prediction)) +glue("fig:07-predictedViz-knn", nn_plot_pred) +``` + +:::{glue:figure} fig:07-predictedViz-knn +:name: fig:07-predictedViz-knn + +Scatter plot of price (USD) versus house size (square feet) with predicted price for a 2,000 square-foot house based on 5 nearest neighbors represented as a red dot. +::: + ++++ + +Our predicted price is \${glue:text}`knn-5-pred` +(shown as a red point in {numref}`fig:07-predictedViz-knn`), which is much less than \$350,000; perhaps we +might want to offer less than the list price at which the house is advertised. +But this is only the very beginning of the story. We still have all the same +unanswered questions here with KNN regression that we had with KNN +classification: which $K$ do we choose, and is our model any good at making +predictions? In the next few sections, we will address these questions in the +context of KNN regression. + +One strength of the KNN regression algorithm +that we would like to draw attention to at this point +is its ability to work well with non-linear relationships +(i.e., if the relationship is not a straight line). +This stems from the use of nearest neighbors to predict values. +The algorithm really has very few assumptions +about what the data must look like for it to work. + ++++ + +## Training, evaluating, and tuning the model + +```{index} training data, test data +``` + +As usual, we must start by putting some test data away in a lock box +that we will come back to only after we choose our final model. +Let's take care of that now. +Note that for the remainder of the chapter +we'll be working with the entire Sacramento data set, +as opposed to the smaller sample of 30 points +that we used earlier in the chapter ({numref}`fig:07-small-eda-regr`). + ++++ + +```{note} +We are not specifying the `stratify` argument here like we did in +{numref}`Chapter %s `, since +the `train_test_split` function cannot stratify based on a +quantitative variable. +``` + +```{code-cell} ipython3 +sacramento_train, sacramento_test = train_test_split( + sacramento, train_size=0.75 +) +``` + +```{index} cross-validation, RMSPE +``` + +```{index} see: root mean square prediction error; RMSPE +``` + +Next, we'll use cross-validation to choose $K$. In KNN classification, we used +accuracy to see how well our predictions matched the true labels. We cannot use +the same metric in the regression setting, since our predictions will almost never +*exactly* match the true response variable values. Therefore in the +context of KNN regression we will use root mean square prediction error (RMSPE) instead. +The mathematical formula for calculating RMSPE is: + +$$\text{RMSPE} = \sqrt{\frac{1}{n}\sum\limits_{i=1}^{n}(y_i - \hat{y}_i)^2}$$ + +where: + +- $n$ is the number of observations, +- $y_i$ is the observed value for the $i^\text{th}$ observation, and +- $\hat{y}_i$ is the forecasted/predicted value for the $i^\text{th}$ observation. + +In other words, we compute the *squared* difference between the predicted and true response +value for each observation in our test (or validation) set, compute the average, and then finally +take the square root. The reason we use the *squared* difference (and not just the difference) +is that the differences can be positive or negative, i.e., we can overshoot or undershoot the true +response value. {numref}`fig:07-verticalerrors` illustrates both positive and negative differences +between predicted and true response values. +So if we want to measure error—a notion of distance between our predicted and true response values—we +want to make sure that we are only adding up positive values, with larger positive values representing larger +mistakes. +If the predictions are very close to the true values, then +RMSPE will be small. If, on the other-hand, the predictions are very +different from the true values, then RMSPE will be quite large. When we +use cross-validation, we will choose the $K$ that gives +us the smallest RMSPE. + +```{code-cell} ipython3 +:tags: [remove-cell] + +from sklearn.neighbors import KNeighborsRegressor + +# (synthetic) new prediction points +pts = pd.DataFrame({"sqft": [1250, 1850, 2250], "price": [250000, 200000, 500000]}) +finegrid = pd.DataFrame({"sqft": np.arange(900, 3901, 10)}) + +# preprocess the data, make the pipeline +sacr_preprocessor = make_column_transformer((StandardScaler(), ["sqft"])) +sacr_pipeline = make_pipeline(sacr_preprocessor, KNeighborsRegressor(n_neighbors=4)) + +# fit the model +X = small_sacramento[["sqft"]] +y = small_sacramento[["price"]] +sacr_pipeline.fit(X, y) + +# predict on the full grid and new data pts +sacr_full_preds_hid = pd.concat( + (finegrid, pd.DataFrame(sacr_pipeline.predict(finegrid), columns=["predicted"])), + axis=1, +) + +sacr_new_preds_hid = pd.concat( + (pts, pd.DataFrame(sacr_pipeline.predict(pts), columns=["predicted"])), + axis=1, +) + +# to make altair mark_line works, need to create separate dataframes for each vertical error line +sacr_new_preds_melted_df = sacr_new_preds_hid.melt(id_vars=["sqft"]) +errors_plot = ( + small_plot + + alt.Chart(sacr_full_preds_hid).mark_line().encode(x="sqft", y="predicted") + + alt.Chart(sacr_new_preds_hid) + .mark_circle(opacity=1) + .encode(x="sqft", y="price") +) +v_lines = [] +for i in pts["sqft"]: + line_df = sacr_new_preds_melted_df.query("sqft == @i") + v_lines.append(alt.Chart(line_df).mark_line(color="red").encode(x="sqft", y="value")) + +errors_plot = alt.layer(*v_lines, errors_plot) +errors_plot +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:07-verticalerrors", errors_plot, display=False) +``` + +:::{glue:figure} fig:07-verticalerrors +:name: fig:07-verticalerrors + +Scatter plot of price (USD) versus house size (square feet) with example predictions (blue line) and the error in those predictions compared with true response values for three selected observations (vertical red lines). +::: + ++++ + +```{index} RMSPE; comparison with RMSE +``` + +```{note} +When using many code packages, the evaluation output +we will get to assess the prediction quality of +our KNN regression models is labeled "RMSE", or "root mean squared +error". Why is this so, and why not RMSPE? +In statistics, we try to be very precise with our +language to indicate whether we are calculating the prediction error on the +training data (*in-sample* prediction) versus on the testing data +(*out-of-sample* prediction). When predicting and evaluating prediction quality on the training data, we +say RMSE. By contrast, when predicting and evaluating prediction quality +on the testing or validation data, we say RMSPE. +The equation for calculating RMSE and RMSPE is exactly the same; all that changes is whether the $y$s are +training or testing data. But many people just use RMSE for both, +and rely on context to denote which data the root mean squared error is being calculated on. +``` + +```{index} scikit-learn, scikit-learn; pipeline, scikit-learn; make_pipeline, scikit-learn; make_column_transformer +``` + +Now that we know how we can assess how well our model predicts a numerical +value, let's use Python to perform cross-validation and to choose the optimal +$K$. First, we will create a transformer for preprocessing our data. Note +that we include standardization in our preprocessing to build good habits, but +since we only have one predictor, it is technically not necessary; there is no +risk of comparing two predictors of different scales. Next we create a model +pipeline for K-nearest neighbors regression. Note that we use the +`KNeighborsRegressor` model object now to denote a regression problem, as +opposed to the classification problems from the previous chapters. The use of +`KNeighborsRegressor` essentially tells `scikit-learn` that we need to use +different metrics (instead of accuracy) for tuning and evaluation. Next we +specify a parameter grid containing numbers of neighbors +ranging from 1 to 200. Then we create a 5-fold `GridSearchCV` object, and +pass in the pipeline and parameter grid. +There is one additional slight complication: unlike classification models in `scikit-learn`---which +by default use accuracy for tuning, as desired---regression models in `scikit-learn` +do not use the RMSPE for tuning by default. +So we need to specify that we want to use the RMSPE for tuning by setting the +`scoring` argument to `"neg_root_mean_squared_error"`. + +```{note} +We obtained the identifier of the parameter representing the number +of neighbours, `"kneighborsregressor__n_neighbors"` by examining the output +of `sacr_pipeline.get_params()`, as we did in {numref}`Chapter %s `. +``` + +```{index} scikit-learn; GridSearchCV +``` + +```{code-cell} ipython3 +# import the KNN regression model +from sklearn.neighbors import KNeighborsRegressor + +# preprocess the data, make the pipeline +sacr_preprocessor = make_column_transformer((StandardScaler(), ["sqft"])) +sacr_pipeline = make_pipeline(sacr_preprocessor, KNeighborsRegressor()) + +# create the 5-fold GridSearchCV object +param_grid = { + "kneighborsregressor__n_neighbors": range(1, 201, 3), +} +sacr_gridsearch = GridSearchCV( + estimator=sacr_pipeline, + param_grid=param_grid, + cv=5, + scoring="neg_root_mean_squared_error", +) +``` + +Next, we use the run cross validation by calling the `fit` method +on `sacr_gridsearch`. Note the use of two brackets for the input features +(`sacramento_train[["sqft"]]`), which creates a data frame with a single column. +As we learned in {numref}`Chapter %s `, we can obtain a data frame with a +subset of columns by passing a list of column names; `["sqft"]` is a list with one +item, so we obtain a data frame with one column. If instead we used +just one bracket (`sacramento_train["sqft"]`), we would obtain a series. +In `scikit-learn`, it is easier to work with the input features as a data frame +rather than a series, so we opt for two brackets here. On the other hand, the response variable +can be a series, so we use just one bracket there (`sacramento_train["price"]`). + +As in {numref}`Chapter %s `, once the model has been fit +we will wrap the `cv_results_` output in a data frame, extract +only the relevant columns, compute the standard error based on 5 folds, +and rename the parameter column to be more readable. + + +```{code-cell} ipython3 +# fit the GridSearchCV object +sacr_gridsearch.fit( + sacramento_train[["sqft"]], # A single-column data frame + sacramento_train["price"] # A series +) + +# Retrieve the CV scores +sacr_results = pd.DataFrame(sacr_gridsearch.cv_results_) +sacr_results["sem_test_score"] = sacr_results["std_test_score"] / 5**(1/2) +sacr_results = ( + sacr_results[[ + "param_kneighborsregressor__n_neighbors", + "mean_test_score", + "sem_test_score" + ]] + .rename(columns={"param_kneighborsregressor__n_neighbors": "n_neighbors"}) +) +sacr_results +``` + +In the `sacr_results` results data frame, we see that the +`n_neighbors` variable contains the values of $K$, +and `mean_test_score` variable contains the value of the RMSPE estimated via +cross-validation...Wait a moment! Isn't the RMSPE supposed to be nonnegative? +Recall that when we specified the `scoring` argument in the `GridSearchCV` object, +we used the value `"neg_root_mean_squared_error"`. See the `neg_` at the start? +That stands for *negative*! As it turns out, `scikit-learn` always tries to *maximize* a score +when it tunes a model. But we want to *minimize* the RMSPE when we tune a regression +model. So `scikit-learn` gets around this by working with the *negative* RMSPE instead. +It is a little convoluted, but we need to add one more step to convert the negative +RMSPE back to the regular RMSPE. + +```{code-cell} ipython3 +sacr_results["mean_test_score"] = -sacr_results["mean_test_score"] +sacr_results +``` + +Alright, now the `mean_test_score` variable actually has values of the RMSPE +for different numbers of neighbors. Finally, the `sem_test_score` variable +contains the standard error of our cross-validation RMSPE estimate, which +is a measure of how uncertain we are in the mean value. Roughly, if +your estimated mean RMSPE is \$100,000 and standard error is \$1,000, you can expect the +*true* RMSPE to be somewhere roughly between \$99,000 and \$101,000 (although it +may fall outside this range). + +{numref}`fig:07-choose-k-knn-plot` visualizes how the RMSPE varies with the number of neighbors $K$. +We take the *minimum* RMSPE to find the best setting for the number of neighbors. +The smallest RMSPE occurs when $K$ is {glue:text}`best_k_sacr`. + +```{code-cell} ipython3 +:tags: [remove-cell] +best_k_sacr = sacr_results["n_neighbors"][sacr_results["mean_test_score"].idxmin()] +best_cv_RMSPE = min(sacr_results["mean_test_score"]) +glue("best_k_sacr", "{:d}".format(best_k_sacr)) +glue("cv_RMSPE", "{0:,.0f}".format(best_cv_RMSPE)) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +sacr_tunek_plot = alt.Chart(sacr_results).mark_line(point=True).encode( + x=alt.X("n_neighbors:Q", title="Neighbors"), + y=alt.Y("mean_test_score", scale=alt.Scale(zero=False), title="Cross-Validation RMSPE Estimate") +) + +sacr_tunek_plot +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:07-choose-k-knn-plot", sacr_tunek_plot, display=False) +``` + +:::{glue:figure} fig:07-choose-k-knn-plot +:name: fig:07-choose-k-knn-plot + +Effect of the number of neighbors on the RMSPE. +::: + +To see which parameter value corresponds to the minimum RMSPE, +we can also access the `best_params_` attribute of the original fit `GridSearchCV` object. +Note that it is still useful to visualize the results as we did above +since this provides additional information on how the model performance varies. + +```{code-cell} ipython3 +sacr_gridsearch.best_params_ +``` + ++++ + +## Underfitting and overfitting +Similar to the setting of classification, by setting the number of neighbors +to be too small or too large, we cause the RMSPE to increase, as shown in +{numref}`fig:07-choose-k-knn-plot`. What is happening here? + +{numref}`fig:07-howK` visualizes the effect of different settings of $K$ on the +regression model. Each plot shows the predicted values for house sale price from +our KNN regression model for 6 different values for $K$: 1, 3, {glue:text}`best_k_sacr`, 41, 250, and 699 (i.e., all of the training data). +For each model, we predict prices for the range of possible home sizes we +observed in the data set (here 500 to 5,000 square feet) and we plot the +predicted prices as a orange line. + +```{code-cell} ipython3 +:tags: [remove-cell] + +gridvals = [ + 1, + 3, + best_k_sacr, + 41, + 250, + len(sacramento_train), +] + +plots = list() + +sacr_preprocessor = make_column_transformer((StandardScaler(), ["sqft"])) +X = sacramento_train[["sqft"]] +y = sacramento_train[["price"]] + +base_plot = ( + alt.Chart(sacramento_train) + .mark_circle() + .encode( + x=alt.X("sqft", title="House size (square feet)", scale=alt.Scale(zero=False)), + y=alt.Y("price", title="Price (USD)", axis=alt.Axis(format="$,.0f")), + ) +) +for i in range(len(gridvals)): + # make the pipeline based on n_neighbors + sacr_pipeline = make_pipeline( + sacr_preprocessor, KNeighborsRegressor(n_neighbors=gridvals[i]) + ) + sacr_pipeline.fit(X, y) + # predictions + sacr_preds = sacramento_train + sacr_preds = sacr_preds.assign(predicted=sacr_pipeline.predict(sacramento_train)) + # overlay the plots + plots.append( + base_plot + + alt.Chart(sacr_preds, title=f"K = {gridvals[i]}") + .mark_line(color="#ff7f0e") + .encode(x="sqft", y="predicted") + ) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue( + "fig:07-howK", (plots[0] | plots[1]) & (plots[2] | plots[3]) & (plots[4] | plots[5]) +) +``` + +:::{glue:figure} fig:07-howK +:name: fig:07-howK + +Predicted values for house price (represented as a orange line) from KNN regression models for six different values for $K$. +::: + ++++ + +```{index} overfitting; regression +``` + +{numref}`fig:07-howK` shows that when $K$ = 1, the orange line runs perfectly +through (almost) all of our training observations. +This happens because our +predicted values for a given region (typically) depend on just a single observation. +In general, when $K$ is too small, the line follows the training data quite +closely, even if it does not match it perfectly. +If we used a different training data set of house prices and sizes +from the Sacramento real estate market, we would end up with completely different +predictions. In other words, the model is *influenced too much* by the data. +Because the model follows the training data so closely, it will not make accurate +predictions on new observations which, generally, will not have the same fluctuations +as the original training data. +Recall from the classification +chapters that this behavior—where the model is influenced too much +by the noisy data—is called *overfitting*; we use this same term +in the context of regression. + +```{index} underfitting; regression +``` + +What about the plots in {numref}`fig:07-howK` where $K$ is quite large, +say, $K$ = 250 or 699? +In this case the orange line becomes extremely smooth, and actually becomes flat +once $K$ is equal to the number of datapoints in the entire data set. +This happens because our predicted values for a given x value (here, home +size), depend on many neighboring observations; in the case where $K$ is equal +to the size of the data set, the prediction is just the mean of the house prices +in the data set (completely ignoring the house size). +In contrast to the $K=1$ example, +the smooth, inflexible orange line does not follow the training observations very closely. +In other words, the model is *not influenced enough* by the training data. +Recall from the classification +chapters that this behavior is called *underfitting*; we again use this same +term in the context of regression. + +Ideally, what we want is neither of the two situations discussed above. Instead, +we would like a model that (1) follows the overall "trend" in the training data, so the model +actually uses the training data to learn something useful, and (2) does not follow +the noisy fluctuations, so that we can be confident that our model will transfer/generalize +well to other new data. If we explore +the other values for $K$, in particular $K$ = {glue:text}`best_k_sacr` (as suggested by cross-validation), +we can see it achieves this goal: it follows the increasing trend of house price +versus house size, but is not influenced too much by the idiosyncratic variations +in price. All of this is similar to how +the choice of $K$ affects K-nearest neighbors classification, as discussed in the previous +chapter. + +## Evaluating on the test set + +To assess how well our model might do at predicting on unseen data, we will +assess its RMSPE on the test data. To do this, we first need to retrain the +KNN regression model on the entire training data set using $K =$ {glue:text}`best_k_sacr` +neighbors. Fortunately we do not have to do this ourselves manually; `scikit-learn` +does it for us automatically. To make predictions with the best model on the test data, +we can use the `predict` method of the fit `GridSearchCV` object. +We then use the `mean_squared_error` +function (with the `y_true` and `y_pred` arguments) +to compute the mean squared prediction error, and finally take the +square root to get the RMSPE. The reason that we do not just use the `score` +method---as in {numref}`Chapter %s `---is that the `KNeighborsRegressor` +model uses a different default scoring metric than the RMSPE. + +```{code-cell} ipython3 +from sklearn.metrics import mean_squared_error + +sacramento_test["predicted"] = sacr_gridsearch.predict(sacramento_test) +RMSPE = mean_squared_error( + y_true = sacramento_test["price"], + y_pred = sacramento_test["predicted"] +)**(1/2) +RMSPE +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("test_RMSPE", "{0:,.0f}".format(RMSPE)) +``` + +Our final model's test error as assessed by RMSPE +is \${glue:text}`test_RMSPE`. +Note that RMSPE is measured in the same units as the response variable. +In other words, on new observations, we expect the error in our prediction to be +*roughly* \${glue:text}`test_RMSPE`. +From one perspective, this is good news: this is about the same as the cross-validation +RMSPE estimate of our tuned model +(which was \${glue:text}`cv_RMSPE`, +so we can say that the model appears to generalize well +to new data that it has never seen before. +However, much like in the case of KNN classification, whether this value for RMSPE is *good*—i.e., +whether an error of around \${glue:text}`test_RMSPE` +is acceptable—depends entirely on the application. +In this application, this error +is not prohibitively large, but it is not negligible either; +\${glue:text}`test_RMSPE` +might represent a substantial fraction of a home buyer's budget, and +could make or break whether or not they could afford put an offer on a house. + +Finally, {numref}`fig:07-predict-all` shows the predictions that our final +model makes across the range of house sizes we might encounter in the +Sacramento area. +Note that instead of predicting the house price only for those house sizes that happen to appear in our data, +we predict it for evenly spaced values between the minimum and maximum in the data set +(roughly 500 to 5000 square feet). +We superimpose this prediction line on a scatter +plot of the original housing price data, +so that we can qualitatively assess if the model seems to fit the data well. +You have already seen a +few plots like this in this chapter, but here we also provide the code that +generated it as a learning opportunity. + +```{code-cell} ipython3 +:tags: [remove-output] + +# Create a grid of evenly spaced values along the range of the sqft data +sqft_prediction_grid = pd.DataFrame({ + "sqft": np.arange(sacramento["sqft"].min(), sacramento["sqft"].max(), 10) +}) +# Predict the price for each of the sqft values in the grid +sqft_prediction_grid["predicted"] = sacr_gridsearch.predict(sqft_prediction_grid) + +# Plot all the houses +base_plot = alt.Chart(sacramento).mark_circle(opacity=0.4).encode( + x=alt.X("sqft") + .scale(zero=False) + .title("House size (square feet)"), + y=alt.Y("price") + .axis(format="$,.0f") + .title("Price (USD)") +) + +# Add the predictions as a line +sacr_preds_plot = base_plot + alt.Chart( + sqft_prediction_grid, + title=f"K = {best_k_sacr}" +).mark_line( + color="#ff7f0e" +).encode( + x="sqft", + y="predicted" +) + +sacr_preds_plot +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:07-predict-all", sacr_preds_plot) +``` + +:::{glue:figure} fig:07-predict-all +:name: fig:07-predict-all + +Predicted values of house price (orange line) for the final KNN regression model. +::: + ++++ + +## Multivariable KNN regression + +As in KNN classification, we can use multiple predictors in KNN regression. +In this setting, we have the same concerns regarding the scale of the predictors. Once again, + predictions are made by identifying the $K$ +observations that are nearest to the new point we want to predict; any +variables that are on a large scale will have a much larger effect than +variables on a small scale. Hence, we should re-define the preprocessor in the +pipeline to incorporate all predictor variables. + +Note that we also have the same concern regarding the selection of predictors +in KNN regression as in KNN classification: having more predictors is **not** always +better, and the choice of which predictors to use has a potentially large influence +on the quality of predictions. Fortunately, we can use the predictor selection +algorithm from {numref}`Chapter %s ` in KNN regression as well. +As the algorithm is the same, we will not cover it again in this chapter. + +```{index} K-nearest neighbors; multivariable regression, Sacramento real estate +``` + +We will now demonstrate a multivariable KNN regression analysis of the +Sacramento real estate data using `scikit-learn`. This time we will use +house size (measured in square feet) as well as number of bedrooms as our +predictors, and continue to use house sale price as our response variable +that we are trying to predict. +It is always a good practice to do exploratory data analysis, such as +visualizing the data, before we start modeling the data. {numref}`fig:07-bedscatter` +shows that the number of bedrooms might provide useful information +to help predict the sale price of a house. + +```{code-cell} ipython3 +:tags: [remove-output] + +plot_beds = alt.Chart(sacramento).mark_circle().encode( + x=alt.X("beds").title("Number of Bedrooms"), + y=alt.Y("price").title("Price (USD)").axis(format="$,.0f"), +) + +plot_beds +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:07-bedscatter", plot_beds) +``` + +:::{glue:figure} fig:07-bedscatter +:name: fig:07-bedscatter + +Scatter plot of the sale price of houses versus the number of bedrooms. +::: + ++++ + +{numref}`fig:07-bedscatter` shows that as the number of bedrooms increases, +the house sale price tends to increase as well, but that the relationship +is quite weak. Does adding the number of bedrooms +to our model improve our ability to predict price? To answer that +question, we will have to create a new KNN regression +model using house size and number of bedrooms, and then we can compare it to +the model we previously came up with that only used house +size. Let's do that now! + +First we'll build a new model object and preprocessor for the analysis. +Note that we pass the list `["sqft", "beds"]` into the `make_column_transformer` +function to denote that we have two predictors. Moreover, we do not specify `n_neighbors` in +`KNeighborsRegressor`, indicating that we want this parameter to be tuned by `GridSearchCV`. + +```{code-cell} ipython3 +sacr_preprocessor = make_column_transformer((StandardScaler(), ["sqft", "beds"])) +sacr_pipeline = make_pipeline(sacr_preprocessor, KNeighborsRegressor()) +``` + +Next, we'll use 5-fold cross-validation with a `GridSearchCV` object +to choose the number of neighbors via the minimum RMSPE: + +```{code-cell} ipython3 +# create the 5-fold GridSearchCV object +param_grid = { + "kneighborsregressor__n_neighbors": range(1, 50), +} + +sacr_gridsearch = GridSearchCV( + estimator=sacr_pipeline, + param_grid=param_grid, + cv=5, + scoring="neg_root_mean_squared_error" +) + +sacr_gridsearch.fit( + sacramento_train[["sqft", "beds"]], + sacramento_train["price"] +) + +# retrieve the CV scores +sacr_results = pd.DataFrame(sacr_gridsearch.cv_results_) +sacr_results["sem_test_score"] = sacr_results["std_test_score"] / 5**(1/2) +sacr_results["mean_test_score"] = -sacr_results["mean_test_score"] +sacr_results = ( + sacr_results[[ + "param_kneighborsregressor__n_neighbors", + "mean_test_score", + "sem_test_score" + ]] + .rename(columns={"param_kneighborsregressor__n_neighbors" : "n_neighbors"}) +) + +# show only the row of minimum RMSPE +sacr_results.nsmallest(1, "mean_test_score") +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +best_k_sacr_multi = sacr_results["n_neighbors"][sacr_results["mean_test_score"].idxmin()] +min_rmspe_sacr_multi = min(sacr_results["mean_test_score"]) +glue("best_k_sacr_multi", "{:d}".format(best_k_sacr_multi)) +glue("cv_RMSPE_2pred", "{0:,.0f}".format(min_rmspe_sacr_multi)) +``` + +Here we see that the smallest estimated RMSPE from cross-validation occurs when $K =$ {glue:text}`best_k_sacr_multi`. +If we want to compare this multivariable KNN regression model to the model with only a single +predictor *as part of the model tuning process* (e.g., if we are running forward selection as described +in the chapter on evaluating and tuning classification models), +then we must compare the RMSPE estimated using only the training data via cross-validation. +Looking back, the estimated cross-validation RMSPE for the single-predictor +model was \${glue:text}`cv_RMSPE`. +The estimated cross-validation RMSPE for the multivariable model is +\${glue:text}`cv_RMSPE_2pred`. +Thus in this case, we did not improve the model +by a large amount by adding this additional predictor. + +Regardless, let's continue the analysis to see how we can make predictions with a multivariable KNN regression model +and evaluate its performance on test data. As previously, we will use the best model to make predictions on the test data +via the `predict` method of the fit `GridSearchCV` object. Finally, we will use the `mean_squared_error` function +to compute the RMSPE. + +```{code-cell} ipython3 +sacramento_test["predicted"] = sacr_gridsearch.predict(sacramento_test) +RMSPE_mult = mean_squared_error( + y_true = sacramento_test["price"], + y_pred = sacramento_test["predicted"] +)**(1/2) +RMSPE_mult + +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("RMSPE_mult", "{0:,.0f}".format(RMSPE_mult)) +``` + +This time, when we performed KNN regression on the same data set, but also +included number of bedrooms as a predictor, we obtained a RMSPE test error +of \${glue:text}`RMSPE_mult`. +{numref}`fig:07-knn-mult-viz` visualizes the model's predictions overlaid on top of the data. This +time the predictions are a surface in 3D space, instead of a line in 2D space, as we have 2 +predictors instead of 1. + +```{code-cell} ipython3 +:tags: [remove-input] + +# create a prediction pt grid +xvals = np.linspace( + sacramento_train["sqft"].min(), sacramento_train["sqft"].max(), 50 +) +yvals = np.linspace( + sacramento_train["beds"].min(), sacramento_train["beds"].max(), 50 +) +xygrid = np.array(np.meshgrid(xvals, yvals)).reshape(2, -1).T +xygrid = pd.DataFrame(xygrid, columns=["sqft", "beds"]) + +# add prediction +knnPredGrid = sacr_gridsearch.predict(xygrid) + +fig = px.scatter_3d( + sacramento_train, + x="sqft", + y="beds", + z="price", + opacity=0.4, + labels={"sqft": "Size (sq ft)", "beds": "Bedrooms", "price": "Price (USD)"}, +) + +fig.update_traces(marker={"size": 2, "color": "red"}) + +fig.add_trace( + go.Surface( + x=xvals, + y=yvals, + z=knnPredGrid.reshape(50, -1), + name="Predictions", + colorscale="viridis", + colorbar={"title": "Price (USD)"} + ) +) + +fig.update_layout( + margin=dict(l=0, r=0, b=0, t=1), + template="plotly_white", +) + +glue("fig:07-knn-mult-viz", fig) +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:07-knn-mult-viz +:figclass: caption-hack + +KNN regression model’s predictions represented as a surface in 3D space overlaid on top of the data using three predictors (price, house size, and the number of bedrooms). Note that in general we recommend against using 3D visualizations; here we use a 3D visualization only to illustrate what the surface of predictions looks like for learning purposes. +``` + ++++ + +We can see that the predictions in this case, where we have 2 predictors, form +a surface instead of a line. Because the newly added predictor (number of bedrooms) is +related to price (as price changes, so does number of bedrooms) +and is not totally determined by house size (our other predictor), +we get additional and useful information for making our +predictions. For example, in this model we would predict that the cost of a +house with a size of 2,500 square feet generally increases slightly as the number +of bedrooms increases. Without having the additional predictor of number of +bedrooms, we would predict the same price for these two houses. + ++++ + +## Strengths and limitations of KNN regression + +As with KNN classification (or any prediction algorithm for that matter), KNN +regression has both strengths and weaknesses. Some are listed here: + +**Strengths:** K-nearest neighbors regression + +1. is a simple, intuitive algorithm, +2. requires few assumptions about what the data must look like, and +3. works well with non-linear relationships (i.e., if the relationship is not a straight line). + +**Weaknesses:** K-nearest neighbors regression + +1. becomes very slow as the training data gets larger, +2. may not perform well with a large number of predictors, and +3. may not predict well beyond the range of values input in your training data. + ++++ + +## Exercises + +Practice exercises for the material covered in this chapter +can be found in the accompanying +[worksheets repository](https://worksheets.python.datasciencebook.ca) +in the "Regression I: K-nearest neighbors" row. +You can launch an interactive version of the worksheet in your browser by clicking the "launch binder" button. +You can also preview a non-interactive version of the worksheet by clicking "view worksheet." +If you instead decide to download the worksheet and run it on your own machine, +make sure to follow the instructions for computer setup +found in {numref}`Chapter %s `. This will ensure that the automated feedback +and guidance that the worksheets provide will function as intended. + ++++ + +## References + ++++ + +```{bibliography} +:filter: docname in docnames +``` diff --git a/pull313/_sources/regression2.md b/pull313/_sources/regression2.md new file mode 100644 index 00000000..edca8052 --- /dev/null +++ b/pull313/_sources/regression2.md @@ -0,0 +1,1374 @@ +--- +jupytext: + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.5 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(regression2)= +# Regression II: linear regression + +```{code-cell} ipython3 +:tags: [remove-cell] + +from chapter_preamble import * +from IPython.display import HTML +import numpy as np +import plotly.express as px +import plotly.graph_objects as go +``` + +## Overview +Up to this point, we have solved all of our predictive problems—both classification +and regression—using K-nearest neighbors (KNN)-based approaches. In the context of regression, +there is another commonly used method known as *linear regression*. This chapter provides an introduction +to the basic concept of linear regression, shows how to use `scikit-learn` to perform linear regression in Python, +and characterizes its strengths and weaknesses compared to KNN regression. The focus is, as usual, +on the case where there is a single predictor and single response variable of interest; but the chapter +concludes with an example using *multivariable linear regression* when there is more than one +predictor. + +## Chapter learning objectives +By the end of the chapter, readers will be able to do the following: + +* Use Python and `scikit-learn` to fit a linear regression model on training data. +* Evaluate the linear regression model on test data. +* Compare and contrast predictions obtained from K-nearest neighbor regression to those obtained using linear regression from the same data set. + ++++ + +## Simple linear regression + +```{index} regression; linear +``` + +At the end of the previous chapter, we noted some limitations of KNN regression. +While the method is simple and easy to understand, KNN regression does not +predict well beyond the range of the predictors in the training data, and +the method gets significantly slower as the training data set grows. +Fortunately, there is an alternative to KNN regression—*linear regression*—that addresses +both of these limitations. Linear regression is also very commonly +used in practice because it provides an interpretable mathematical equation that describes +the relationship between the predictor and response variables. In this first part of the chapter, we will focus on *simple* linear regression, +which involves only one predictor variable and one response variable; later on, we will consider + *multivariable* linear regression, which involves multiple predictor variables. + Like KNN regression, simple linear regression involves +predicting a numerical response variable (like race time, house price, or height); +but *how* it makes those predictions for a new observation is quite different from KNN regression. + Instead of looking at the K nearest neighbors and averaging +over their values for a prediction, in simple linear regression, we create a +straight line of best fit through the training data and then +"look up" the prediction using the line. + ++++ + +```{index} regression; logistic +``` + +```{note} +Although we did not cover it in earlier chapters, there +is another popular method for classification called *logistic +regression* (it is used for classification even though the name, somewhat confusingly, +has the word "regression" in it). In logistic regression—similar to linear regression—you +"fit" the model to the training data and then "look up" the prediction for each new observation. +Logistic regression and KNN classification have an advantage/disadvantage comparison +similar to that of linear regression and KNN +regression. It is useful to have a good understanding of linear regression before learning about +logistic regression. After reading this chapter, see the "Additional Resources" section at the end of the +classification chapters to learn more about logistic regression. +``` + ++++ + +```{index} Sacramento real estate, question; regression +``` + +Let's return to the Sacramento housing data from {numref}`Chapter %s ` to learn +how to apply linear regression and compare it to KNN regression. For now, we +will consider +a smaller version of the housing data to help make our visualizations clear. +Recall our predictive question: can we use the size of a house in the Sacramento, CA area to predict +its sale price? In particular, recall that we have come across a new 2,000 square-foot house we are interested +in purchasing with an advertised list price of +\$350,000. Should we offer the list price, or is that over/undervalued? +To answer this question using simple linear regression, we use the data we have +to draw the straight line of best fit through our existing data points. +The small subset of data as well as the line of best fit are shown +in {numref}`fig:08-lin-reg1`. + +```{code-cell} ipython3 +:tags: [remove-cell] + +import pandas as pd + +np.random.seed(2) + +sacramento = pd.read_csv("data/sacramento.csv") + +small_sacramento = sacramento.sample(n=30) + +small_plot = ( + alt.Chart(small_sacramento) + .mark_circle() + .encode( + x=alt.X("sqft") + .scale(zero=False) + .title("House size (square feet)"), + y=alt.Y("price") + .axis(format="$,.0f") + .scale(zero=False) + .title("Price (USD)"), + ) +) + +small_plot += small_plot.transform_regression("sqft", "price").mark_line() + +small_plot +``` + +```{code-cell} ipython3 +:tags: [remove-cell] +glue("fig:08-lin-reg1", small_plot) +``` + +:::{glue:figure} fig:08-lin-reg1 +:name: fig:08-lin-reg1 + +Scatter plot of sale price versus size with line of best fit for subset of the Sacramento housing data. +::: + ++++ + +```{index} straight line; equation +``` + +The equation for the straight line is: + +$$\text{house sale price} = \beta_0 + \beta_1 \cdot (\text{house size}),$$ +where + +- $\beta_0$ is the *vertical intercept* of the line (the price when house size is 0) +- $\beta_1$ is the *slope* of the line (how quickly the price increases as you increase house size) + +Therefore using the data to find the line of best fit is equivalent to finding coefficients +$\beta_0$ and $\beta_1$ that *parametrize* (correspond to) the line of best fit. +Now of course, in this particular problem, the idea of a 0 square-foot house is a bit silly; +but you can think of $\beta_0$ here as the "base price," and +$\beta_1$ as the increase in price for each square foot of space. +Let's push this thought even further: what would happen in the equation for the line if you +tried to evaluate the price of a house with size 6 *million* square feet? +Or what about *negative* 2,000 square feet? As it turns out, nothing in the formula breaks; linear +regression will happily make predictions for crazy predictor values if you ask it to. But even though +you *can* make these wild predictions, you shouldn't. You should only make predictions roughly within +the range of your original data, and perhaps a bit beyond it only if it makes sense. For example, +the data in {numref}`fig:08-lin-reg1` only reaches around 600 square feet on the low end, but +it would probably be reasonable to use the linear regression model to make a prediction at 500 square feet, say. + +Back to the example! Once we have the coefficients $\beta_0$ and $\beta_1$, we can use the equation +above to evaluate the predicted sale price given the value we have for the +predictor variable—here 2,000 square feet. {numref}`fig:08-lin-reg2` demonstrates this process. + +```{code-cell} ipython3 +:tags: [remove-cell] +from sklearn.linear_model import LinearRegression + +lm = LinearRegression() +lm.fit(small_sacramento[["sqft"]], small_sacramento[["price"]]) +prediction = float(lm.predict(pd.DataFrame({"sqft": [2000]}))) + +# the vertical dotted line +line_df = pd.DataFrame({"x": [2000]}) +rule = alt.Chart(line_df).mark_rule(strokeDash=[2, 4]).encode(x="x") + +# the red point +point_df = pd.DataFrame({"x": [2000], "y": [prediction]}) +point = alt.Chart(point_df).mark_circle(color="red", size=100).encode(x="x", y="y") + +# overlay all plots +small_plot_2000_pred = ( + small_plot + + rule + + point + # add the text + + alt.Chart( + pd.DataFrame( + { + "x": [2350], + "y": [prediction - 41000], + "prediction": ["$" + "{0:,.0f}".format(prediction)], + } + ) + ) + .mark_text(dy=-5, size=15) + .encode(x="x", y="y", text="prediction") +) + +small_plot_2000_pred +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:08-lin-reg2", small_plot_2000_pred) +glue("pred_2000", "{0:,.0f}".format(prediction)) +``` + +:::{glue:figure} fig:08-lin-reg2 +:name: fig:08-lin-reg2 + +Scatter plot of sale price versus size with line of best fit and a red dot at the predicted sale price for a 2,000 square-foot home. +::: + ++++ + +By using simple linear regression on this small data set to predict the sale price +for a 2,000 square-foot house, we get a predicted value of +\${glue:text}`pred_2000`. But wait a minute...how +exactly does simple linear regression choose the line of best fit? Many +different lines could be drawn through the data points. +Some plausible examples are shown in {numref}`fig:08-several-lines`. + +```{code-cell} ipython3 +:tags: [remove-cell] + +intercept_l = [-64542.23, -6900, -64542.23] +slope_l = [190, 175, 160] +line_color_l = ["green", "purple", "red"] + +# set the domains (range of x values) of lines +min_x = small_sacramento["sqft"].min() +max_x = small_sacramento["sqft"].max() + +several_lines_plot = small_plot.copy() + +for i in range(len(slope_l)): + several_lines_plot += ( + alt.Chart( + pd.DataFrame( + { + "x": [min_x, max_x], + "y": [ + intercept_l[i] + slope_l[i] * min_x, + intercept_l[i] + slope_l[i] * max_x, + ], + } + ) + ) + .mark_line(color=line_color_l[i]) + .encode(x="x", y="y") + ) + +several_lines_plot +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:08-several-lines", several_lines_plot) +``` + +:::{glue:figure} fig:08-several-lines +:name: fig:08-several-lines + +Scatter plot of sale price versus size with many possible lines that could be drawn through the data points. +::: + ++++ + +```{index} RMSPE +``` + +Simple linear regression chooses the straight line of best fit by choosing +the line that minimizes the **average squared vertical distance** between itself and +each of the observed data points in the training data. {numref}`fig:08-verticalDistToMin` illustrates +these vertical distances as red lines. Finally, to assess the predictive +accuracy of a simple linear regression model, +we use RMSPE—the same measure of predictive performance we used with KNN regression. + +```{code-cell} ipython3 +:tags: [remove-cell] + +small_sacramento_pred = small_sacramento +# get prediction +small_sacramento_pred = small_sacramento_pred.assign( + predicted=lm.predict(small_sacramento[["sqft"]]) +) +# melt the dataframe to create separate df to create lines +small_sacramento_pred = small_sacramento_pred[["sqft", "price", "predicted"]].melt( + id_vars=["sqft"] +) + +v_lines = [] +for i in range(len(small_sacramento)): + sqft_val = small_sacramento.iloc[i]["sqft"] + line_df = small_sacramento_pred.query("sqft == @sqft_val") + v_lines.append(alt.Chart(line_df).mark_line(color="red").encode(x="sqft", y="value")) + +error_plot = alt.layer(*v_lines, small_plot).configure_circle(opacity=1) +error_plot +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:08-verticalDistToMin", error_plot) +``` + +:::{glue:figure} fig:08-verticalDistToMin +:name: fig:08-verticalDistToMin + +Scatter plot of sale price versus size with red lines denoting the vertical distances between the predicted values and the observed data points. +::: + ++++ + +## Linear regression in Python + ++++ + +```{index} scikit-learn +``` + +We can perform simple linear regression in Python using `scikit-learn` in a +very similar manner to how we performed KNN regression. +To do this, instead of creating a `KNeighborsRegressor` model object, +we use a `LinearRegression` model object; +and as usual, we first have to import it from `sklearn`. +Another difference is that we do not need to choose $K$ in the +context of linear regression, and so we do not need to perform cross-validation. +Below we illustrate how we can use the usual `scikit-learn` workflow to predict house sale +price given house size. We use a simple linear regression approach on the full +Sacramento real estate data set. + +```{index} scikit-learn; random_state +``` + +As usual, we start by loading packages, setting the seed, loading data, and +putting some test data away in a lock box that we +can come back to after we choose our final model. Let's take care of that now. + +```{code-cell} ipython3 +import numpy as np +import altair as alt +import pandas as pd +from sklearn.model_selection import train_test_split +from sklearn.linear_model import LinearRegression +from sklearn.metrics import mean_squared_error +from sklearn import set_config + +# Output dataframes instead of arrays +set_config(transform_output="pandas") + +np.random.seed(1) + +sacramento = pd.read_csv("data/sacramento.csv") + +sacramento_train, sacramento_test = train_test_split( + sacramento, train_size=0.6 +) +``` + +Now that we have our training data, we will create +and fit the linear regression model object. +We will also extract the slope of the line +via the `coef_[0]` property, as well as the +intercept of the line via the `intercept_` property. + +```{index} scikit-learn; fit +``` + +```{code-cell} ipython3 +# fit the linear regression model +lm = LinearRegression() +lm.fit( + sacramento_train[["sqft"]], # A single-column data frame + sacramento_train["price"] # A series +) + +# make a dataframe containing slope and intercept coefficients +pd.DataFrame({"slope": [lm.coef_[0]], "intercept": [lm.intercept_]}) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("train_lm_slope", "{:0.0f}".format(lm.coef_[0])) +glue("train_lm_intercept", "{:0.0f}".format(lm.intercept_)) +glue("train_lm_slope_f", "{0:,.0f}".format(lm.coef_[0])) +glue("train_lm_intercept_f", "{0:,.0f}".format(lm.intercept_)) +``` + +```{index} standardization +``` + +```{note} +An additional difference that you will notice here is that we do +not standardize (i.e., scale and center) our +predictors. In K-nearest neighbors models, recall that the model fit changes +depending on whether we standardize first or not. In linear regression, +standardization does not affect the fit (it *does* affect the coefficients in +the equation, though!). So you can standardize if you want—it won't +hurt anything—but if you leave the predictors in their original form, +the best fit coefficients are usually easier to interpret afterward. +``` + ++++ + +Our coefficients are +(intercept) $\beta_0=$ {glue:text}`train_lm_intercept` +and (slope) $\beta_1=$ {glue:text}`train_lm_slope`. +This means that the equation of the line of best fit is + +$\text{house sale price} =$ {glue:text}`train_lm_intercept` $+$ {glue:text}`train_lm_slope` $\cdot (\text{house size}).$ + +In other words, the model predicts that houses +start at \${glue:text}`train_lm_intercept_f` for 0 square feet, and that +every extra square foot increases the cost of +the house by \${glue:text}`train_lm_slope_f`. Finally, +we predict on the test data set to assess how well our model does. + +```{code-cell} ipython3 +# make predictions +sacramento_test["predicted"] = lm.predict(sacramento_test[["sqft"]]) + +# calculate RMSPE +RMSPE = mean_squared_error( + y_true = sacramento_test["price"], + y_pred = sacramento_test["predicted"] +)**(1/2) + +RMSPE +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("sacr_RMSPE", "{0:,.0f}".format(RMSPE)) +``` + +```{index} RMSPE +``` + +Our final model's test error as assessed by RMSPE +is \${glue:text}`sacr_RMSPE`. +Remember that this is in units of the response variable, and here that +is US Dollars (USD). Does this mean our model is "good" at predicting house +sale price based off of the predictor of home size? Again, answering this is +tricky and requires knowledge of how you intend to use the prediction. + +To visualize the simple linear regression model, we can plot the predicted house +sale price across all possible house sizes we might encounter. +Since our model is linear, +we only need to compute the predicted price of the minimum and maximum house size, +and then connect them with a straight line. +We superimpose this prediction line on a scatter +plot of the original housing price data, +so that we can qualitatively assess if the model seems to fit the data well. +{numref}`fig:08-lm-predict-all` displays the result. + +```{code-cell} ipython3 +:tags: [remove-output] +sqft_prediction_grid = sacramento[["sqft"]].agg(["min", "max"]) +sqft_prediction_grid["predicted"] = lm.predict(sqft_prediction_grid) + +all_points = alt.Chart(sacramento).mark_circle(opacity=0.4).encode( + x=alt.X("sqft") + .scale(zero=False) + .title("House size (square feet)"), + y=alt.Y("price") + .axis(format="$,.0f") + .scale(zero=False) + .title("Price (USD)") +) + +sacr_preds_plot = all_points + alt.Chart(sqft_prediction_grid).mark_line( + color="#ff7f0e" +).encode( + x="sqft", + y="predicted" +) + +sacr_preds_plot +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:08-lm-predict-all", sacr_preds_plot) +``` + +:::{glue:figure} fig:08-lm-predict-all +:name: fig:08-lm-predict-all + +Scatter plot of sale price versus size with line of best fit for the full Sacramento housing data. +::: + +## Comparing simple linear and KNN regression + +```{index} regression; comparison of methods +``` + +Now that we have a general understanding of both simple linear and KNN +regression, we can start to compare and contrast these methods as well as the +predictions made by them. To start, let's look at the visualization of the +simple linear regression model predictions for the Sacramento real estate data +(predicting price from house size) and the "best" KNN regression model +obtained from the same problem, shown in {numref}`fig:08-compareRegression`. + +```{code-cell} ipython3 +:tags: [remove-cell] +from sklearn.model_selection import GridSearchCV +from sklearn.compose import make_column_transformer +from sklearn.neighbors import KNeighborsRegressor +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import StandardScaler + +# preprocess the data, make the pipeline +sacr_preprocessor = make_column_transformer((StandardScaler(), ["sqft"])) +sacr_pipeline_knn = make_pipeline( + sacr_preprocessor, KNeighborsRegressor(n_neighbors=25) +) # 25 is the best parameter obtained through cross validation in regression1 chapter + +sacr_pipeline_knn.fit(sacramento_train[["sqft"]], sacramento_train[["price"]]) + +# knn in-sample predictions (on training split) +sacr_preds_knn = sacramento_train +sacr_preds_knn = sacr_preds_knn.assign( + knn_predicted=sacr_pipeline_knn.predict(sacramento_train) +) + +# knn out-of-sample predictions (on test split) +sacr_preds_knn_test = sacramento_test +sacr_preds_knn_test = sacr_preds_knn_test.assign( + knn_predicted=sacr_pipeline_knn.predict(sacramento_test) +) + +sacr_rmspe_knn = np.sqrt( + mean_squared_error( + y_true=sacr_preds_knn_test["price"], y_pred=sacr_preds_knn_test["knn_predicted"] + ) +) + +# plot knn in-sample predictions overlaid on scatter plot +knn_plot_final = ( + alt.Chart(sacr_preds_knn, title="KNN regression") + .mark_circle() + .encode( + x=alt.X("sqft", title="House size (square feet)", scale=alt.Scale(zero=False)), + y=alt.Y( + "price", + title="Price (USD)", + axis=alt.Axis(format="$,.0f"), + scale=alt.Scale(zero=False), + ), + ) +) + +knn_plot_final = ( + knn_plot_final + + knn_plot_final.mark_line(color="#ff7f0e").encode(x="sqft", y="knn_predicted") + + alt.Chart( # add the text + pd.DataFrame( + { + "x": [3500], + "y": [100000], + "rmspe": [f"RMSPE = {round(sacr_rmspe_knn)}"], + } + ) + ) + .mark_text(dy=-5, size=15) + .encode(x="x", y="y", text="rmspe") +) + + +# add more components to lm_plot_final +lm_plot_final = ( + alt.Chart(sacramento_train, title="linear regression") + .mark_circle() + .encode( + x=alt.X("sqft", title="House size (square feet)", scale=alt.Scale(zero=False)), + y=alt.Y( + "price", + title="Price (USD)", + axis=alt.Axis(format="$,.0f"), + scale=alt.Scale(zero=False), + ), + ) +) + +lm_plot_final = ( + lm_plot_final + + lm_plot_final.transform_regression("sqft", "price").mark_line(color="#ff7f0e") + + alt.Chart( # add the text + pd.DataFrame( + { + "x": [3500], + "y": [100000], + "rmspe": [f"RMSPE = {round(RMSPE)}"], + } + ) + ) + .mark_text(dy=-5, size=15) + .encode(x="x", y="y", text="rmspe") +) +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("fig:08-compareRegression", (lm_plot_final | knn_plot_final)) +``` + +:::{glue:figure} fig:08-compareRegression +:name: fig:08-compareRegression + +Comparison of simple linear regression and KNN regression. +::: + ++++ + +What differences do we observe in {numref}`fig:08-compareRegression`? One obvious +difference is the shape of the orange lines. In simple linear regression we are +restricted to a straight line, whereas in KNN regression our line is much more +flexible and can be quite wiggly. But there is a major interpretability advantage in limiting the +model to a straight line. A +straight line can be defined by two numbers, the +vertical intercept and the slope. The intercept tells us what the prediction is when +all of the predictors are equal to 0; and the slope tells us what unit increase in the response +variable we predict given a unit increase in the predictor +variable. KNN regression, as simple as it is to implement and understand, has no such +interpretability from its wiggly line. + +```{index} underfitting; regression +``` + +There can, however, also be a disadvantage to using a simple linear regression +model in some cases, particularly when the relationship between the response variable and +the predictor is not linear, but instead some other shape (e.g., curved or oscillating). In +these cases the prediction model from a simple linear regression +will underfit (have high bias), meaning that model/predicted values do not +match the actual observed values very well. Such a model would probably have a +quite high RMSE when assessing model goodness of fit on the training data and +a quite high RMSPE when assessing model prediction quality on a test data +set. On such a data set, KNN regression may fare better. Additionally, there +are other types of regression you can learn about in future books that may do +even better at predicting with such data. + +How do these two models compare on the Sacramento house prices data set? In +{numref}`fig:08-compareRegression`, we also printed the RMSPE as calculated from +predicting on the test data set that was not used to train/fit the models. The RMSPE for the simple linear +regression model is slightly lower than the RMSPE for the KNN regression model. +Considering that the simple linear regression model is also more interpretable, +if we were comparing these in practice we would likely choose to use the simple +linear regression model. + +```{index} extrapolation +``` + +Finally, note that the KNN regression model becomes "flat" +at the left and right boundaries of the data, while the linear model +predicts a constant slope. Predicting outside the range of the observed +data is known as *extrapolation*; KNN and linear models behave quite differently +when extrapolating. Depending on the application, the flat +or constant slope trend may make more sense. For example, if our housing +data were slightly different, the linear model may have actually predicted +a *negative* price for a small house (if the intercept $\beta_0$ was negative), +which obviously does not match reality. On the other hand, the trend of increasing +house size corresponding to increasing house price probably continues for large houses, +so the "flat" extrapolation of KNN likely does not match reality. + ++++ + +## Multivariable linear regression + ++++ + +```{index} regression; multivariable linear, regression; multivariable linear equation +``` + +```{index} see: multivariable linear equation; plane equation +``` + +As in KNN classification and KNN regression, we can move beyond the simple +case of only one predictor to the case with multiple predictors, +known as *multivariable linear regression*. +To do this, we follow a very similar approach to what we did for +KNN regression: we just specify the training data by adding more predictors. +But recall that we do not need to use cross-validation to choose any parameters, +nor do we need to standardize (i.e., center and scale) the data for linear regression. +Note once again that we have the same concerns regarding multiple predictors + as in the settings of multivariable KNN regression and classification: having more predictors is **not** always +better. But because the same predictor selection +algorithm from {numref}`Chapter %s ` extends to the setting of linear regression, +it will not be covered again in this chapter. + +```{index} Sacramento real estate +``` + +We will demonstrate multivariable linear regression using the Sacramento real estate +data with both house size +(measured in square feet) as well as number of bedrooms as our predictors, and +continue to use house sale price as our response variable. +The `scikit-learn` framework makes this easy to do: we just need to set +both the `sqft` and `beds` variables as predictors, and then use the `fit` +method as usual. + +```{code-cell} ipython3 + +mlm = LinearRegression() +mlm.fit( + sacramento_train[["sqft", "beds"]], + sacramento_train["price"] +) +``` +Finally, we make predictions on the test data set to assess the quality of our model. + +```{code-cell} ipython3 +sacramento_test["predicted"] = mlm.predict(sacramento_test[["sqft","beds"]]) + +lm_mult_test_RMSPE = mean_squared_error( + y_true = sacramento_test["price"], + y_pred = sacramento_test["predicted"] +)**(1/2) +lm_mult_test_RMSPE +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("sacr_mult_RMSPE", "{0:,.0f}".format(lm_mult_test_RMSPE)) +``` + +Our model's test error as assessed by RMSPE +is \${glue:text}`sacr_mult_RMSPE`. +In the case of two predictors, we can plot the predictions made by our linear regression creates a *plane* of best fit, as +shown in {numref}`fig:08-3DlinReg`. + +```{code-cell} ipython3 +:tags: [remove-input] + +# create a prediction pt grid +xvals = np.linspace( + sacramento_train["sqft"].min(), sacramento_train["sqft"].max(), 50 +) +yvals = np.linspace( + sacramento_train["beds"].min(), sacramento_train["beds"].max(), 50 +) +xygrid = np.array(np.meshgrid(xvals, yvals)).reshape(2, -1).T +xygrid = pd.DataFrame(xygrid, columns=["sqft", "beds"]) + +# add prediction +mlmPredGrid = mlm.predict(xygrid) + +fig = px.scatter_3d( + sacramento_train, + x="sqft", + y="beds", + z="price", + opacity=0.4, + labels={"sqft": "Size (sq ft)", "beds": "Bedrooms", "price": "Price (USD)"}, +) + +fig.update_traces(marker={"size": 2, "color": "red"}) + +fig.add_trace( + go.Surface( + x=xvals, + y=yvals, + z=mlmPredGrid.reshape(50, -1), + name="Predictions", + colorscale="viridis", + colorbar={"title": "Price (USD)"} + ) +) + +fig.update_layout( + margin=dict(l=0, r=0, b=0, t=1), + template="plotly_white", +) + +glue("fig:08-3DlinReg", fig) +``` + +```{figure} data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7 +:name: fig:08-3DlinReg +:figclass: caption-hack + +Linear regression plane of best fit overlaid on top of the data (using price, +house size, and number of bedrooms as predictors). Note that in general we +recommend against using 3D visualizations; here we use a 3D visualization only +to illustrate what the regression plane looks like for learning purposes. +``` + ++++ + +We see that the predictions from linear regression with two predictors form a +flat plane. This is the hallmark of linear regression, and differs from the +wiggly, flexible surface we get from other methods such as KNN regression. + As discussed, this can be advantageous in one aspect, which is that for each +predictor, we can get slopes/intercept from linear regression, and thus describe the +plane mathematically. We can extract those slope values from the `coef_` property +of our model object, and the intercept from the `intercept_` property, +as shown below. + +```{code-cell} ipython3 +mlm.coef_ +``` + +```{code-cell} ipython3 +mlm.intercept_ +``` + +When we have multiple predictor variables, it is not easy to +know which variable goes with which coefficient in `mlm.coef_`. In particular, +you will see that `mlm.coef_` above is just an array of values without any variable names. +Unfortunately you have to do this mapping yourself: the coefficients in `mlm.coef_` appear +in the *same order* as the columns of the predictor data frame you used when training. +So since we used `sacramento_train[["sqft", "beds"]]` when training, +we have that `mlm.coef_[0]` corresponds to `sqft`, and `mlm.coef_[1]` corresponds to `beds`. +Once you sort out the correspondence, you can then use those slopes to write a mathematical equation to describe the prediction plane: + +```{index} plane equation +``` + + + +$$\text{house sale price} = \beta_0 + \beta_1\cdot(\text{house size}) + \beta_2\cdot(\text{number of bedrooms}),$$ +where: + +- $\beta_0$ is the *vertical intercept* of the hyperplane (the price when both house size and number of bedrooms are 0) +- $\beta_1$ is the *slope* for the first predictor (how quickly the price increases as you increase house size) +- $\beta_2$ is the *slope* for the second predictor (how quickly the price increases as you increase the number of bedrooms) + +Finally, we can fill in the values for $\beta_0$, $\beta_1$ and $\beta_2$ from the model output above +to create the equation of the plane of best fit to the data: + +```{code-cell} ipython3 +:tags: [remove-cell] + +icept = "{0:,.0f}".format(mlm.intercept_) +sqftc = "{0:,.0f}".format(mlm.coef_[0]) +bedsc = "{0:,.0f}".format(mlm.coef_[1]) +glue("icept", icept) +glue("sqftc", sqftc) +glue("bedsc", bedsc) +``` + +$\text{house sale price} =$ {glue:text}`icept` $+$ {glue:text}`sqftc` $\cdot (\text{house size})$ {glue:text}`bedsc` $\cdot (\text{number of bedrooms})$ + +This model is more interpretable than the multivariable KNN +regression model; we can write a mathematical equation that explains how +each predictor is affecting the predictions. But as always, we should +question how well multivariable linear regression is doing compared to +the other tools we have, such as simple linear regression +and multivariable KNN regression. If this comparison is part of +the model tuning process—for example, if we are trying + out many different sets of predictors for multivariable linear +and KNN regression—we must perform this comparison using +cross-validation on only our training data. But if we have already +decided on a small number (e.g., 2 or 3) of tuned candidate models and +we want to make a final comparison, we can do so by comparing the prediction +error of the methods on the test data. + +```{code-cell} ipython3 +lm_mult_test_RMSPE +``` + +```{index} RMSPE +``` + +We obtain an RMSPE for the multivariable linear regression model +of \${glue:text}`sacr_mult_RMSPE`. This prediction error + is less than the prediction error for the multivariable KNN regression model, +indicating that we should likely choose linear regression for predictions of +house sale price on this data set. Revisiting the simple linear regression model +with only a single predictor from earlier in this chapter, we see that the RMSPE for that model was +\${glue:text}`sacr_RMSPE`, +which is slightly higher than that of our more complex model. Our model with two predictors +provided a slightly better fit on test data than our model with just one. +As mentioned earlier, this is not always the case: sometimes including more +predictors can negatively impact the prediction performance on unseen +test data. + ++++ + +## Multicollinearity and outliers + +What can go wrong when performing (possibly multivariable) linear regression? +This section will introduce two common issues—*outliers* and *collinear predictors*—and +illustrate their impact on predictions. + ++++ + +### Outliers + +```{index} outliers +``` + +Outliers are data points that do not follow the usual pattern of the rest of the data. +In the setting of linear regression, these are points that + have a vertical distance to the line of best fit that is either much higher or much lower +than you might expect based on the rest of the data. The problem with outliers is that +they can have *too much influence* on the line of best fit. In general, it is very difficult +to judge accurately which data are outliers without advanced techniques that are beyond +the scope of this book. + +But to illustrate what can happen when you have outliers, {numref}`fig:08-lm-outlier` +shows a small subset of the Sacramento housing data again, except we have added a *single* data point (highlighted +in red). This house is 5,000 square feet in size, and sold for only \$50,000. Unbeknownst to the +data analyst, this house was sold by a parent to their child for an absurdly low price. Of course, +this is not representative of the real housing market values that the other data points follow; +the data point is an *outlier*. In orange we plot the original line of best fit, and in red +we plot the new line of best fit including the outlier. You can see how different the red line +is from the orange line, which is entirely caused by that one extra outlier data point. + +```{code-cell} ipython3 +:tags: [remove-cell] + +sacramento_train_small = sacramento_train.sample(100, random_state=2) +sacramento_outlier = pd.DataFrame({"sqft": [5000], "price": [50000]}) +sacramento_concat_df = pd.concat((sacramento_train_small, sacramento_outlier)) + +lm_plot_outlier = ( + alt.Chart(sacramento_train_small) + .mark_circle() + .encode( + x=alt.X("sqft", title="House size (square feet)", scale=alt.Scale(zero=False)), + y=alt.Y( + "price", + title="Price (USD)", + axis=alt.Axis(format="$,.0f"), + scale=alt.Scale(zero=False), + ), + ) +) +lm_plot_outlier += lm_plot_outlier.transform_regression("sqft", "price").mark_line( + color="#ff7f0e" +) + +outlier_pt = ( + alt.Chart(sacramento_outlier) + .mark_circle(color="red", size=100) + .encode(x="sqft", y="price") +) + +outlier_line = ( + ( + alt.Chart(sacramento_concat_df) + .mark_circle() + .encode( + x=alt.X( + "sqft", title="House size (square feet)", scale=alt.Scale(zero=False) + ), + y=alt.Y( + "price", + title="Price (USD)", + axis=alt.Axis(format="$,.0f"), + scale=alt.Scale(zero=False), + ), + ) + ) + .transform_regression("sqft", "price") + .mark_line(color="red") +) + +lm_plot_outlier += outlier_pt + outlier_line + +glue("fig:08-lm-outlier", lm_plot_outlier) +``` + +:::{glue:figure} fig:08-lm-outlier +:name: fig:08-lm-outlier + +Scatter plot of a subset of the data, with outlier highlighted in red. +::: + ++++ + +Fortunately, if you have enough data, the inclusion of one or two +outliers—as long as their values are not *too* wild—will +typically not have a large effect on the line of best fit. {numref}`fig:08-lm-outlier-2` shows how that same outlier data point from earlier +influences the line of best fit when we are working with the entire original +Sacramento training data. You can see that with this larger data set, the line +changes much less when adding the outlier. +Nevertheless, it is still important when working with linear regression to critically +think about how much any individual data point is influencing the model. + +```{code-cell} ipython3 +:tags: [remove-cell] + +sacramento_concat_df = pd.concat((sacramento_train, sacramento_outlier)) + +lm_plot_outlier_large = ( + alt.Chart(sacramento_train) + .mark_circle() + .encode( + x=alt.X("sqft", title="House size (square feet)", scale=alt.Scale(zero=False)), + y=alt.Y( + "price", + title="Price (USD)", + axis=alt.Axis(format="$,.0f"), + scale=alt.Scale(zero=False), + ), + ) +) +lm_plot_outlier_large += lm_plot_outlier_large.transform_regression( + "sqft", "price" +).mark_line(color="#ff7f0e") + +outlier_line = ( + ( + alt.Chart(sacramento_concat_df) + .mark_circle() + .encode( + x=alt.X( + "sqft", title="House size (square feet)", scale=alt.Scale(zero=False) + ), + y=alt.Y( + "price", + title="Price (USD)", + axis=alt.Axis(format="$,.0f"), + scale=alt.Scale(zero=False), + ), + ) + ) + .transform_regression("sqft", "price") + .mark_line(color="red") +) + +lm_plot_outlier_large += outlier_pt + outlier_line + +glue("fig:08-lm-outlier-2", lm_plot_outlier_large) +``` + +:::{glue:figure} fig:08-lm-outlier-2 +:name: fig:08-lm-outlier-2 + +Scatter plot of the full data, with outlier highlighted in red. +::: + ++++ + +### Multicollinearity + +```{index} colinear +``` + +```{index} see: multicolinear; colinear +``` + +The second, and much more subtle, issue can occur when performing multivariable +linear regression. In particular, if you include multiple predictors that are +strongly linearly related to one another, the coefficients that describe the +plane of best fit can be very unreliable—small changes to the data can +result in large changes in the coefficients. Consider an extreme example using +the Sacramento housing data where the house was measured twice by two people. +Since the two people are each slightly inaccurate, the two measurements might +not agree exactly, but they are very strongly linearly related to each other, +as shown in {numref}`fig:08-lm-multicol`. + +```{code-cell} ipython3 +:tags: [remove-cell] + +np.random.seed(1) +sacramento_train = sacramento_train.assign( + sqft1=sacramento_train["sqft"] + + 100 + * np.random.choice(range(1000000), size=len(sacramento_train), replace=True) + / 1000000 +) +sacramento_train = sacramento_train.assign( + sqft2=sacramento_train["sqft"] + + 100 + * np.random.choice(range(1000000), size=len(sacramento_train), replace=True) + / 1000000 +) +sacramento_train = sacramento_train.assign( + sqft3=sacramento_train["sqft"] + + 100 + * np.random.choice(range(1000000), size=len(sacramento_train), replace=True) + / 1000000 +) +sacramento_train + +lm_plot_multicol_1 = ( + alt.Chart(sacramento_train) + .mark_circle() + .encode( + x=alt.X("sqft", title="House size measurement 1 (square feet)"), + y=alt.Y("sqft1", title="House size measurement 2 (square feet)"), + ) +) + +glue("fig:08-lm-multicol", lm_plot_multicol_1) +``` + +:::{glue:figure} fig:08-lm-multicol +:name: fig:08-lm-multicol + +Scatter plot of house size (in square feet) measured by person 1 versus house size (in square feet) measured by person 2. +::: + +```{code-cell} ipython3 +:tags: [remove-cell] + +# first LM +lm_fit1 = LinearRegression() +X_train = sacramento_train[["sqft", "sqft1"]] +y_train = sacramento_train[["price"]] + +lm_fit1.fit(X_train, y_train) + +icept1 = "{0:,.0f}".format(lm_fit1.intercept_[0]) +sqft1 = "{0:,.0f}".format(lm_fit1.coef_[0][0]) +sqft11 = "{0:,.0f}".format(lm_fit1.coef_[0][1]) +glue("icept1", icept1) +glue("sqft1", sqft1) +glue("sqft11", sqft11) + +# second LM +lm_fit2 = LinearRegression() +X_train = sacramento_train[["sqft", "sqft2"]] +y_train = sacramento_train[["price"]] + +lm_fit2.fit(X_train, y_train) + +icept2 = "{0:,.0f}".format(lm_fit2.intercept_[0]) +sqft2 = "{0:,.0f}".format(lm_fit2.coef_[0][0]) +sqft22 = "{0:,.0f}".format(lm_fit2.coef_[0][1]) +glue("icept2", icept2) +glue("sqft2", sqft2) +glue("sqft22", sqft22) + +# third LM +lm_fit3 = LinearRegression() +X_train = sacramento_train[["sqft", "sqft3"]] +y_train = sacramento_train[["price"]] + +lm_fit3.fit(X_train, y_train) + +icept3 = "{0:,.0f}".format(lm_fit3.intercept_[0]) +sqft3 = "{0:,.0f}".format(lm_fit3.coef_[0][0]) +sqft33 = "{0:,.0f}".format(lm_fit3.coef_[0][1]) +glue("icept3", icept3) +glue("sqft3", sqft3) +glue("sqft33", sqft33) +``` + + If we again fit the multivariable linear regression model on this data, then the plane of best fit +has regression coefficients that are very sensitive to the exact values in the data. For example, +if we change the data ever so slightly—e.g., by running cross-validation, which splits +up the data randomly into different chunks—the coefficients vary by large amounts: + +Best Fit 1: $\text{house sale price} =$ {glue:text}`icept1` $+$ {glue:text}`sqft1` $\cdot (\text{house size 1}$ $(\text{ft}^2)) +$ {glue:text}`sqft11` $\cdot (\text{house size 2}$ $(\text{ft}^2)).$ + +Best Fit 2: $\text{house sale price} =$ {glue:text}`icept2` $+$ {glue:text}`sqft2` $\cdot (\text{house size 1}$ $(\text{ft}^2)) +$ {glue:text}`sqft22` $\cdot (\text{house size 2}$ $(\text{ft}^2)).$ + +Best Fit 3: $\text{house sale price} =$ {glue:text}`icept3` $+$ {glue:text}`sqft3` $\cdot (\text{house size 1}$ $(\text{ft}^2)) +$ {glue:text}`sqft33` $\cdot (\text{house size 2}$ $(\text{ft}^2)).$ + + Therefore, when performing multivariable linear regression, it is important to avoid including very +linearly related predictors. However, techniques for doing so are beyond the scope of this +book; see the list of additional resources at the end of this chapter to find out where you can learn more. + ++++ + +## Designing new predictors + +We were quite fortunate in our initial exploration to find a predictor variable (house size) +that seems to have a meaningful and nearly linear relationship with our response variable (sale price). +But what should we do if we cannot immediately find such a nice variable? +Well, sometimes it is just a fact that the variables in the data do not have enough of +a relationship with the response variable to provide useful predictions. For example, +if the only available predictor was "the current house owner's favorite ice cream flavor", +we likely would have little hope of using that variable to predict the house's sale price +(barring any future remarkable scientific discoveries about the relationship between +the housing market and homeowner ice cream preferences). In cases like these, +the only option is to obtain measurements of more useful variables. + +There are, however, a wide variety of cases where the predictor variables do have a +meaningful relationship with the response variable, but that relationship does not fit +the assumptions of the regression method you have chosen. For example, a data frame `df` +with two variables—`x` and `y`—with a nonlinear relationship between the two variables +will not be fully captured by simple linear regression, as shown in {numref}`fig:08-predictor-design`. + +```{code-cell} ipython3 +:tags: [remove-cell] + +np.random.seed(3) +df = pd.DataFrame({"x": np.random.choice(range(10000), size=100, replace=True) / 10000}) +df = df.assign( + y=df["x"] ** 3 + + 0.2 * np.random.choice(range(10000), size=100, replace=True) / 10000 + - 0.1 +) +``` + +```{code-cell} ipython3 +df +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +curve_plt = ( + alt.Chart(df) + .mark_circle() + .encode( + x=alt.X("x", scale=alt.Scale(zero=False)), + y=alt.Y( + "y", + scale=alt.Scale(zero=False), + ), + ) +) + + +curve_plt += curve_plt.transform_regression("x", "y").mark_line(color="#ff7f0e") + +glue("fig:08-predictor-design", curve_plt) +``` + +:::{glue:figure} fig:08-predictor-design +:name: fig:08-predictor-design + +Example of a data set with a nonlinear relationship between the predictor and the response. +::: + ++++ + +```{index} predictor design +``` + +Instead of trying to predict the response `y` using a linear regression on `x`, +we might have some scientific background about our problem to suggest that `y` +should be a cubic function of `x`. So before performing regression, +we might *create a new predictor variable* `z`: + +```{code-cell} ipython3 +df["z"] = df["x"] ** 3 +``` + +Then we can perform linear regression for `y` using the predictor variable `z`, +as shown in {numref}`fig:08-predictor-design-2`. +Here you can see that the transformed predictor `z` helps the +linear regression model make more accurate predictions. +Note that none of the `y` response values have changed between {numref}`fig:08-predictor-design` +and {numref}`fig:08-predictor-design-2`; the only change is that the `x` values +have been replaced by `z` values. + +```{code-cell} ipython3 +:tags: [remove-cell] + +curve_plt2 = ( + alt.Chart(df) + .mark_circle() + .encode( + x=alt.X("z", title="z = x³" ,scale=alt.Scale(zero=False)), + y=alt.Y( + "y", + scale=alt.Scale(zero=False), + ), + ) +) + + +curve_plt2 += curve_plt2.transform_regression("z", "y").mark_line(color="#ff7f0e") + +glue("fig:08-predictor-design-2", curve_plt2) +``` + +:::{glue:figure} fig:08-predictor-design-2 +:name: fig:08-predictor-design-2 + +Relationship between the transformed predictor and the response. +::: + ++++ + +```{index} see: feature engineering; predictor design +``` + +The process of +transforming predictors (and potentially combining multiple predictors in the process) +is known as *feature engineering*. In real data analysis +problems, you will need to rely on +a deep understanding of the problem—as well as the wrangling tools +from previous chapters—to engineer useful new features that improve +predictive performance. + +```{note} +Feature engineering +is *part of tuning your model*, and as such you must not use your test data +to evaluate the quality of the features you produce. You are free to use +cross-validation, though! +``` + ++++ + +## The other sides of regression + +So far in this textbook we have used regression only in the context of +prediction. However, regression can also be seen as a method to understand and +quantify the effects of individual variables on a response variable of interest. +In the housing example from this chapter, beyond just using past data +to predict future sale prices, +we might also be interested in describing the +individual relationships of house size and the number of bedrooms with house price, +quantifying how strong each of these relationships are, and assessing how accurately we +can estimate their magnitudes. And even beyond that, we may be interested in +understanding whether the predictors *cause* changes in the price. +These sides of regression are well beyond the scope of this book; but +the material you have learned here should give you a foundation of knowledge +that will serve you well when moving to more advanced books on the topic. + ++++ + +## Exercises + +Practice exercises for the material covered in this chapter +can be found in the accompanying +[worksheets repository](https://worksheets.python.datasciencebook.ca) +in the "Regression II: linear regression" row. +You can launch an interactive version of the worksheet in your browser by clicking the "launch binder" button. +You can also preview a non-interactive version of the worksheet by clicking "view worksheet." +If you instead decide to download the worksheet and run it on your own machine, +make sure to follow the instructions for computer setup +found in {numref}`Chapter %s `. This will ensure that the automated feedback +and guidance that the worksheets provide will function as intended. + ++++ + +## Additional resources + +- The [`scikit-learn` website](https://scikit-learn.org/stable/) is an excellent + reference for more details on, and advanced usage of, the functions and + packages in the past two chapters. Aside from that, it also offers many + useful [tutorials](https://scikit-learn.org/stable/tutorial/index.html) and [an extensive list + of more advanced examples](https://scikit-learn.org/stable/auto_examples/index.html#general-examples) + that you can use to continue learning beyond the scope of this book. +- *An Introduction to Statistical Learning* {cite:p}`james2013introduction` provides + a great next stop in the process of + learning about regression. Chapter 3 covers linear regression at a slightly + more mathematical level than we do here, but it is not too large a leap and so + should provide a good stepping stone. Chapter 6 discusses how to pick a subset + of "informative" predictors when you have a data set with many predictors, and + you expect only a few of them to be relevant. Chapter 7 covers regression + models that are more flexible than linear regression models but still enjoy the + computational efficiency of linear regression. In contrast, the KNN methods we + covered earlier are indeed more flexible but become very slow when given lots + of data. + +## References + ++++ + +```{bibliography} +:filter: docname in docnames +``` diff --git a/pull313/_sources/setup.md b/pull313/_sources/setup.md new file mode 100644 index 00000000..45f81c3e --- /dev/null +++ b/pull313/_sources/setup.md @@ -0,0 +1,343 @@ +--- +jupytext: + cell_metadata_filter: -all + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.8 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(move-to-your-own-machine)= +# Setting up your computer + +## Overview + +In this chapter, you'll learn how to set up the software needed to follow along +with this book on your own computer. Given that installation instructions can +vary based on computer setup, we provide instructions for +multiple operating systems (Ubuntu Linux, MacOS, and Windows). +Although the instructions in this chapter will likely work on many systems, +we have specifically verified that they work on a computer that: + +- runs Windows 10 Home, MacOS 13 Ventura, or Ubuntu 22.04, +- uses a 64-bit CPU, +- has a connection to the internet, +- uses English as the default language. + + +## Chapter learning objectives + +By the end of the chapter, readers will be able to do the following: + +- Download the worksheets that accompany this book. +- Install the Docker virtualization engine. +- Edit and run the worksheets using JupyterLab running inside a Docker container. +- Install Git, JupyterLab Desktop, and python packages. +- Edit and run the worksheets using JupyterLab Desktop. + +## Obtaining the worksheets for this book + +The worksheets containing exercises for this book +are online at [https://worksheets.python.datasciencebook.ca](https://worksheets.python.datasciencebook.ca). +The worksheets can be launched directly from that page using the Binder links in the rightmost +column of the table. This is the easiest way to access the worksheets, but note that you will not +be able to save your work and return to it again later. +In order to save your progress, you will need to download the worksheets to your own computer and +work on them locally. You can download the worksheets as a compressed zip file +using [the link at the top of the page](https://github.com/UBC-DSCI/data-science-a-first-intro-python-worksheets/archive/refs/heads/main.zip). +Once you unzip the downloaded file, you will have a folder containing all of the Jupyter notebook worksheets +accompanying this book. See {numref}`Chapter %s ` for +instructions on working with Jupyter notebooks. + +## Working with Docker + +Once you have downloaded the worksheets, you will next need to install and run +the software required to work on Jupyter notebooks on your own computer. Doing +this setup manually can be quite tricky, as it involves quite a few different +software packages, not to mention getting the right versions of +everything—the worksheets and autograder tests may not work unless all the versions are +exactly right! To keep things simple, we instead recommend that you install +[Docker](https://docker.com). Docker lets you run your Jupyter notebooks inside +a pre-built *container* that comes with precisely the right versions of +all software packages needed run the worksheets that come with this book. +```{index} Docker +``` + +```{note} +A *container* is a virtualized user space within your computer. +Within the container, you can run software in isolation without interfering with the +other software that already exists on your machine. In this book, we use +a container to run a specific version of the python programming +language, as well as other necessary packages. The container ensures that +the worksheets function correctly, even if you have a different version of python +installed on your computer—or even if you haven't installed python at all! +``` + +### Windows + +**Installation** To install Docker on Windows, +visit [the online Docker documentation](https://docs.docker.com/desktop/install/windows-install/), +and download the `Docker Desktop Installer.exe` file. Double-click the file to open the installer +and follow the instructions on the installation wizard, choosing **WSL-2** instead of **Hyper-V** when prompted. + +```{note} +Occasionally, when you first run Docker on Windows, you will encounter an error message. Some common errors you may see: + +- If you need to update WSL, you can enter `cmd.exe` in the Start menu to run the command line. Type `wsl --update` to update WSL. +- If the admin account on your computer is different to your user account, you must add the user to the "docker-users" group. + Run Computer Management as an administrator and navigate to `Local Users` and `Groups -> Groups -> docker-users`. Right-click to + add the user to the group. Log out and log back in for the changes to take effect. +- If you need to enable virtualization, you will need to edit your BIOS. Restart your computer, and enter the BIOS using the hotkey + (usually Delete, Esc, and/or one of the F# keys). Look for an "Advanced" menu, and under your CPU settings, set the "Virtualization" option + to "enabled". Then save the changes and reboot your machine. If you are not familiar with BIOS editing, you may want to find an expert + to help you with this, as editing the BIOS can be dangerous. Detailed instructions for doing this are beyond the scope of this book. +``` + +**Running JupyterLab** Run Docker Desktop. Once it is running, you need to download and run the +Docker *image* that we have made available for the worksheets (an *image* is like a "snapshot" of a +computer with all the right packages pre-installed). You only need to do this step one time; the image will remain +the next time you run Docker Desktop. +In the Docker Desktop search bar, enter `ubcdsci/py-dsci-100`, as this is +the name of the image. You will see the `ubcdsci/py-dsci-100` image in the list ({numref}`docker-desktop-search`), +and "latest" in the Tag drop down menu. We need to change "latest" to the right image version before proceeding. +To find the right tag, open +the [`Dockerfile` in the worksheets repository](https://raw.githubusercontent.com/UBC-DSCI/data-science-a-first-intro-python-worksheets/main/Dockerfile), +and look for the line `FROM ubcdsci/py-dsci-100:` followed by the tag consisting of a sequence of numbers and letters. +Back in Docker Desktop, in the "Tag" drop down menu, click that tag to select the correct image version. Then click +the "Pull" button to download the image. + +```{figure} img/setup/docker-1.png +--- +height: 400px +name: docker-desktop-search +--- +The Docker Desktop search window. Make sure to click the Tag drop down menu and find the right version of the image before clicking the Pull button to download it. +``` + +Once the image is done downloading, click the "Images" button on the left side +of the Docker Desktop window ({numref}`docker-desktop-images`). You +will see the recently downloaded image listed there under the "Local" tab. + +```{figure} img/setup/docker-2.png +--- +height: 400px +name: docker-desktop-images +--- +The Docker Desktop images tab. +``` + +To start up a *container* using that image, click the play button beside the +image. This will open the run configuration menu ({numref}`docker-desktop-runconfig`). +Expand the "Optional settings" drop down menu. In the "Host port" textbox, enter +`8888`. In the "Volumes" section, click the "Host path" box and navigate to the +folder where your Jupyter worksheets are stored. In the "Container path" text +box, enter `/home/jovyan/work`. Then click the "Run" button to start the +container. + +```{figure} img/setup/docker-3.png +--- +height: 400px +name: docker-desktop-runconfig +--- +The Docker Desktop container run configuration menu. +``` + +After clicking the "Run" button, you will see a terminal. The terminal will then print +some text as the Docker container starts. Once the text stops scrolling, find the +URL in the terminal that starts +with `http://127.0.0.1:8888` (highlighted by the red box in {numref}`docker-desktop-url`), and paste it +into your browser to start JupyterLab. + +```{figure} img/setup/docker-4.png +--- +height: 400px +name: docker-desktop-url +--- +The terminal text after running the Docker container. The red box indicates the URL that you should paste into your browser to open JupyterLab. +``` + +When you are done working, make sure to shut down and remove the container by +clicking the red trash can symbol (in the top right corner of {numref}`docker-desktop-url`). +You will not be able to start the container again until you do so. +More information on installing and running +Docker on Windows, as well as troubleshooting tips, can +be found in [the online Docker documentation](https://docs.docker.com/desktop/install/windows-install/). + +### MacOS + +**Installation** To install Docker on MacOS, +visit [the online Docker documentation](https://docs.docker.com/desktop/install/mac-install/), and +download the `Docker.dmg` installation file that is appropriate for your +computer. To know which installer is right for your machine, you need to know +whether your computer has an Intel processor (older machines) or an +Apple processor (newer machines); the [Apple support page](https://support.apple.com/en-ca/HT211814) has +information to help you determine which processor you have. Once downloaded, double-click +the file to open the installer, then drag the Docker icon to the Applications folder. +Double-click the icon in the Applications folder to start Docker. In the installation +window, use the recommended settings. + +**Running JupyterLab** Run Docker Desktop. Once it is running, follow the +instructions above in the Windows section on *Running JupyterLab* (the user +interface is the same). More information on installing and running Docker on +MacOS, as well as troubleshooting tips, can be +found in [the online Docker documentation](https://docs.docker.com/desktop/install/mac-install/). + +### Ubuntu + +**Installation** To install Docker on Ubuntu, open the terminal and enter the following five commands. +``` +sudo apt update +sudo apt install ca-certificates curl gnupg +curl -fsSL https://get.docker.com -o get-docker.sh +sudo chmod u+x get-docker.sh +sudo sh get-docker.sh +``` + +**Running JupyterLab** First, open the [`Dockerfile` in the worksheets repository](https://raw.githubusercontent.com/UBC-DSCI/data-science-a-first-intro-python-worksheets/main/Dockerfile), +and look for the line `FROM ubcdsci/py-dsci-100:` followed by a tag consisting of a sequence of numbers and letters. +Then in the terminal, navigate to the directory where you want to run JupyterLab, and run +the following command, replacing `TAG` with the *tag* you found earlier. +``` +docker run --rm -v $(pwd):/home/jovyan/work -p 8888:8888 ubcdsci/py-dsci-100:TAG jupyter lab +``` +The terminal will then print some text as the Docker container starts. Once the text stops scrolling, find the +URL in your terminal that starts with `http://127.0.0.1:8888` (highlighted by the +red box in {numref}`ubuntu-docker-terminal`), and paste it into your browser to start JupyterLab. +More information on installing and running Docker on Ubuntu, as well as troubleshooting tips, can be found in +[the online Docker documentation](https://docs.docker.com/engine/install/ubuntu/). + +```{figure} img/setup/ubuntu-docker.png +--- +height: 400px +name: ubuntu-docker-terminal +--- +The terminal text after running the Docker container in Ubuntu. The red box indicates the URL that you should paste into your browser to open JupyterLab. +``` + + +## Working with JupyterLab Desktop + +You can also run the worksheets accompanying this book on your computer +using [JupyterLab Desktop](https://github.com/jupyterlab/jupyterlab-desktop). +The advantage of JupyterLab Desktop over Docker is that it can be easier to install; +Docker can sometimes run into some fairly technical issues (especially on Windows computers) +that require expert troubleshooting. The downside of JupyterLab Desktop is that there is a (very) small chance that +you may not end up with the right versions of all the python packages needed for the worksheets. Docker, on the other hand, +*guarantees* that the worksheets will work exactly as intended. + +In this section, we will cover how to install JupyterLab Desktop, +Git and the JupyterLab Git extension (for version control, as discussed in {numref}`Chapter %s `), and +all of the python packages needed to run +the code in this book. +```{index} JupyterLab Desktop, git;installation +``` + +### Windows + +**Installation** First, we will install Git for version control. +Go to [the Git download page](https://git-scm.com/download/win) and +download the Windows version of Git. Once the download has finished, run the installer and accept +the default configuration for all pages. +Next, visit the ["Installation" section of the JupyterLab Desktop homepage](https://github.com/jupyterlab/jupyterlab-desktop#installation). +Download the `JupyterLab-Setup-Windows.exe` installer file for Windows. +Double-click the installer to run it, use the default settings. +Run JupyterLab Desktop by clicking the icon on your desktop. + + +**Configuring JupyterLab Desktop** +Next, in the JupyterLab Desktop graphical interface that appears ({numref}`setup-jlab-gui`), +you will see text at the bottom saying "Python environment not found". Click "Install using the bundled installer" +to set up the environment. + +```{figure} img/setup/jlab-1.png +--- +height: 400px +name: setup-jlab-gui +--- +The JupyterLab Desktop graphical user interface. +``` + +Next, we need to add the JupyterLab Git extension (so that +we can use version control directly from within JupyterLab Desktop), +the IPython kernel (to enable the python programming language), +and various python software packages. Click "New session..." in the JupyterLab Desktop +user interface, then scroll to the bottom, and click "Terminal" under the "Other" heading ({numref}`setup-jlab-gui-2`). + +```{figure} img/setup/jlab-2.png +--- +height: 400px +name: setup-jlab-gui-2 +--- +A JupyterLab Desktop session, showing the Terminal option at the bottom. +``` + + +In this terminal, run the following commands: +``` +pip install --upgrade jupyterlab-git +conda env update --file https://raw.githubusercontent.com/UBC-DSCI/data-science-a-first-intro-python-worksheets/main/environment.yml +``` +The second command installs the specific python and package versions specified in +the `environment.yml` file found in +[the worksheets repository](https://worksheets.python.datasciencebook.ca). +We will always keep the versions in the `environment.yml` file updated +so that they are compatible with the exercise worksheets that accompany the book. +Once all of the software installation is complete, it is a good idea to restart +JupyterLab Desktop entirely before you proceed to doing your data analysis. +This will ensure all the software and settings you put in place are +correctly set up and ready for use. + + +### MacOS + +**Installation** First, we will install Git for version control. +Open the terminal ([how-to video](https://youtu.be/5AJbWEWwnbY)) +and type the following command: + +``` +xcode-select --install +``` +Next, visit the ["Installation" section of the JupyterLab Desktop homepage](https://github.com/jupyterlab/jupyterlab-desktop#installation). +Download the `JupyterLab-Setup-MacOS-x64.dmg` or `JupyterLab-Setup-MacOS-arm64.dmg` installer file. +To know which installer is right for your machine, you need to know +whether your computer has an Intel processor (older machines) or an +Apple processor (newer machines); the [Apple support page](https://support.apple.com/en-ca/HT211814) has +information to help you determine which processor you have. +Once downloaded, double-click the file to open the installer, then drag +the JupyterLab Desktop icon to the Applications folder. Double-click +the icon in the Applications folder to start JupyterLab Desktop. + +**Configuring JupyterLab Desktop** From this point onward, with JupyterLab Desktop running, +follow the instructions in the Windows section on *Configuring JupyterLab Desktop* to set up the +environment, install the JupyterLab Git extension, and install +the various python software packages needed for the worksheets. + +### Ubuntu + +**Installation** First, we will install Git for version control. +Open the terminal and type the following commands: +``` +sudo apt update +sudo apt install git +``` +Next, visit the ["Installation" section of the JupyterLab Desktop homepage](https://github.com/jupyterlab/jupyterlab-desktop#installation). +Download the `JupyterLab-Setup-Debian.deb` installer file for Ubuntu/Debian. +Open a terminal, navigate to where the installer file was downloaded, and run the command +``` +sudo dpkg -i JupyterLab-Setup-Debian.deb +``` +Run JupyterLab Desktop using the command +``` +jlab +``` + +**Configuring JupyterLab Desktop** From this point onward, with JupyterLab Desktop running, +follow the instructions in the Windows section on *Configuring JupyterLab Desktop* to set up the +environment, install the JupyterLab Git extension, and install +the various python software packages needed for the worksheets. diff --git a/pull313/_sources/version-control.md b/pull313/_sources/version-control.md new file mode 100644 index 00000000..a553efdf --- /dev/null +++ b/pull313/_sources/version-control.md @@ -0,0 +1,1181 @@ +--- +jupytext: + cell_metadata_filter: -all + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.8 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(getting-started-with-version-control)= +# Collaboration with version control + +> *You mostly collaborate with yourself, +> and me-from-two-months-ago never responds to email.* +> +> --Mark T. Holder + ++++ + +## Overview + +```{index} git, GitHub +``` + +This chapter will introduce the concept of using version control systems +to track changes to a project over its lifespan, to share +and edit code in a collaborative team, +and to distribute the finished project to its intended audience. +This chapter will also introduce how to use +the two most common version control tools: Git for local version control, +and GitHub for remote version control. +We will focus on the most common version control operations +used day-to-day in a standard data science project. +There are many user interfaces for Git; in this chapter +we will cover the Jupyter Git interface. + +## Chapter learning objectives + +By the end of the chapter, readers will be able to do the following: + +- Describe what version control is and why data analysis projects can benefit from it. +- Create a remote version control repository on GitHub. +- Use Jupyter's Git version control tools for project versioning and collaboration: + - Clone a remote version control repository to create a local repository. + - Commit changes to a local version control repository. + - Push local changes to a remote version control repository. + - Pull changes from a remote version control repository to a local version control repository. + - Resolve merge conflicts. +- Give collaborators access to a remote GitHub repository. +- Communicate with collaborators using GitHub issues. +- Use best practices when collaborating on a project with others. + +## What is version control, and why should I use it? + +Data analysis projects often require iteration +and revision to move from an initial idea to a finished product +ready for the intended audience. +Without deliberate and conscious effort towards tracking changes +made to the analysis, projects tend to become messy. +This mess can have serious, negative repercussions on an analysis project, +including interesting results files that your code cannot reproduce, +temporary files with snippets of ideas that are forgotten or +not easy to find, mind-boggling file names that make it unclear which is +the current working version of the file (e.g., `document_final_draft_final.txt`, +`to_hand_in_final_v2.txt`, etc.), and more. + +Additionally, the iterative nature of data analysis projects +means that most of the time, the final version of the analysis that is +shared with the audience is only a fraction of what was explored during +the development of that analysis. +Changes in data visualizations and modeling approaches, +as well as some negative results, are often not observable from +reviewing only the final, polished analysis. +The lack of observability of these parts of the analysis development +can lead to others repeating things that did not work well, +instead of seeing what did not work well, +and using that as a springboard to new, more fruitful approaches. + +Finally, data analyses are typically completed by a team of people +rather than a single person. +This means that files need to be shared across multiple computers, +and multiple people often end up editing the project simultaneously. +In such a situation, determining who has the latest version of the +project—and how to resolve conflicting edits—can be a real challenge. + +```{index} version control +``` + +*Version control* helps solve these challenges. Version control is the process +of keeping a record of changes to documents, including when the changes were +made and who made them, throughout the history of their development. It also +provides the means both to view earlier versions of the project and to revert +changes. Version control is most commonly used in software development, but +can be used for any electronic files for any type of project, including data +analyses. Being able to record and view the history of a data analysis project +is important for understanding how and why decisions to use one method or +another were made, among other things. Version control also facilitates +collaboration via tools to share edits with others and resolve conflicting +edits. But even if you're working on a project alone, you should still use +version control. It helps you keep track of what you've done, when you did it, +and what you're planning to do next! + ++++ + +```{index} version control;system, version control;repository hosting +``` + +To version control a project, you generally need two things: +a *version control system* and a *repository hosting service*. +The version control system is the software responsible +for tracking changes, sharing changes you make with others, +obtaining changes from others, and resolving conflicting edits. +The repository hosting service is responsible for storing a copy +of the version-controlled project online (a *repository*), +where you and your collaborators can access it remotely, +discuss issues and bugs, and distribute your final product. +For both of these items, there is a wide variety of choices. +In this textbook we'll use Git for version control, +and GitHub for repository hosting, +because both are currently the most widely used platforms. +In the +additional resources section at the end of the chapter, +we list many of the common version control systems +and repository hosting services in use today. + +```{note} +Technically you don't *have to* use a repository hosting service. +You can, for example, version control a project +that is stored only in a folder on your computer—never +sharing it on a repository hosting service. +But using a repository hosting service provides a few big benefits, +including managing collaborator access permissions, +tools to discuss and track bugs, +and the ability to have external collaborators contribute work, +not to mention the safety of having your work backed up in the cloud. +Since most repository hosting services now offer free accounts, +there are not many situations in which you wouldn't +want to use one for your project. +``` + +## Version control repositories + +```{index} repository, repository;local, repository;remote +``` + +Typically, when we put a data analysis project under version control, +we create two copies of the repository ({numref}`vc1-no-changes`). +One copy we use as our primary workspace where we create, edit, and delete files. +This copy is commonly referred to as the **local repository**. The local +repository most commonly exists on our computer or laptop, but can also exist within +a workspace on a server (e.g., JupyterHub). +The other copy is typically stored in a repository hosting service (e.g., GitHub), where +we can easily share it with our collaborators. +This copy is commonly referred to as the **remote repository**. + +```{figure} img/version-control/vc1-no-changes.png +--- +name: vc1-no-changes +--- +Schematic of local and remote version control repositories. +``` + +```{index} working directory, git;commit +``` + +Both copies of the repository have a **working directory** +where you can create, store, edit, and delete +files (e.g., `analysis.ipynb` in {numref}`vc1-no-changes`). +Both copies of the repository also maintain a full project history +({numref}`vc1-no-changes`). This history is a record of all versions of the +project files that have been created. The repository history is not +automatically generated; Git must be explicitly told when to record +a version of the project. These records are called **commits**. They +are a snapshot of the file contents as well +metadata about the repository at that time the record was created (who made the +commit, when it was made, etc.). In the local and remote repositories shown in +{numref}`vc1-no-changes`, there are two commits represented as gray +circles. Each commit can be identified by a +human-readable **message**, which you write when you make a commit, and a +**commit hash** that Git automatically adds for you. + +The purpose of the message is to contain a brief, rich description +of what work was done since the last commit. +Messages act as a very useful narrative +of the changes to a project over its lifespan. +If you ever want to view or revert to an earlier version of the project, +the message can help you identify which commit to view or revert to. +In {numref}`vc1-no-changes`, you can see two such messages, +one for each commit: `Created README.md` and `Added analysis draft`. + +```{index} hash +``` + + + +The hash is a string of characters consisting of about 40 letters and numbers. +The purpose of the hash is to serve as a unique identifier for the commit, +and is used by Git to index project history. Although hashes are quite long—imagine +having to type out 40 precise characters to view an old project version!—Git is able +to work with shorter versions of hashes. In {numref}`vc1-no-changes`, you can see +two of these shortened hashes, one for each commit: `Daa29d6` and `884c7ce`. + +## Version control workflows + +When you work in a local version-controlled repository, there are generally three additional +steps you must take as part of your regular workflow. In addition to +just working on files—creating, +editing, and deleting files as you normally would—you must: + +1. Tell Git when to make a commit of your own changes in the local repository. +2. Tell Git when to send your new commits to the remote GitHub repository. +3. Tell Git when to retrieve any new changes (that others made) from the remote GitHub repository. + +In this section we will discuss all three of these steps in detail. + +(commit-changes)= +### Committing changes to a local repository + +When working on files in your local version control +repository (e.g., using Jupyter) and saving your work, these changes will only initially exist in the +working directory of the local repository ({numref}`vc2-changes`). + +```{figure} img/version-control/vc2-changes.png +--- +name: vc2-changes +--- +Local repository with changes to files. +``` + +```{index} git;add, staging area +``` + +Once you reach a point that you want Git to keep a record +of the current version of your work, you need to commit +(i.e., snapshot) your changes. A prerequisite to this is telling Git which +files should be included in that snapshot. We call this step **adding** the +files to the **staging area**. +Note that the staging area is not a real physical location on your computer; +it is instead a conceptual placeholder for these files until they are committed. +The benefit of the Git version control system using a staging area is that you +can choose to commit changes in only certain files. For example, +in {numref}`vc-ba2-add`, we add only the two files +that are important to the analysis project (`analysis.ipynb` and `README.md`) +and not our personal scratch notes for the project (`notes.txt`). + +```{figure} img/version-control/vc-ba2-add.png +--- +name: vc-ba2-add +--- +Adding modified files to the staging area in the local repository. +``` + + + +Once the files we wish to commit have been added +to the staging area, we can then commit those files to the repository history ({numref}`vc-ba3-commit`). +When we do this, we are required to include a helpful *commit message* to tell +collaborators (which often includes future you!) about the changes that were +made. In {numref}`vc-ba3-commit`, the message is `Message about changes...`; in +your work you should make sure to replace this with an +informative message about what changed. It is also important to note here that +these changes are only being committed to the local repository's history. The +remote repository on GitHub has not changed, and collaborators would not yet be +able to see your new changes. + +```{figure} img/version-control/vc-ba3-commit.png +--- +name: vc-ba3-commit +--- +Committing the modified files in the staging area to the local repository history, with an informative message about what changed. +``` + + +### Pushing changes to a remote repository + +```{index} git;push +``` + + + +Once you have made one or more commits that you want to share with your collaborators, +you need to **push** (i.e., send) those commits back to GitHub ({numref}`vc5-push`). This updates +the history in the remote repository (i.e., GitHub) to match what you have in your +local repository. Now when collaborators interact with the remote repository, they will be able +to see the changes you made. And you can also take comfort in the fact that your work is now backed +up in the cloud! + +```{figure} img/version-control/vc5-push.png +--- +name: vc5-push +--- +Pushing the commit to send the changes to the remote repository on GitHub. +``` + +### Pulling changes from a remote repository + +If you are working on a project with collaborators, they will also be making changes to files +(e.g., to the analysis code in a Jupyter notebook and the project's README file), +committing them to their own local repository, and pushing their commits to the remote GitHub repository +to share them with you. When they push their changes, those changes will only initially exist in +the remote GitHub repository and not in your local repository ({numref}`vc6-remote-changes`). + +```{figure} img/version-control/vc6-remote-changes.png +--- +name: vc6-remote-changes +--- +Changes pushed by collaborators, or created directly on GitHub will not be automatically sent to your local repository. +``` + +```{index} git;pull +``` + +To obtain the new changes from the remote repository on GitHub, you will need +to **pull** those changes to your own local repository. By pulling changes, +you synchronize your local repository to what is present on GitHub ({numref}`vc7-pull`). +Additionally, until you pull changes from the remote repository, you will not +be able to push any more changes yourself (though you will still be able to +work and make commits in your own local repository). + +```{figure} img/version-control/vc7-pull.png +--- +name: vc7-pull +--- +Pulling changes from the remote GitHub repository to synchronize your local repository. +``` + + + +## Working with remote repositories using GitHub + +```{index} repository;remote, GitHub, git;clone +``` + + + +Now that you have been introduced to some of the key general concepts +and workflows of Git version control, we will walk through the practical steps. +There are several different ways to start using version control +with a new project. For simplicity and ease of setup, +we recommend creating a remote repository first. +This section covers how to both create and edit a remote repository on GitHub. +Once you have a remote repository set up, we recommend **cloning** (or copying) that +repository to create a local repository in which you primarily work. +You can clone the repository either +on your own computer or in a workspace on a server (e.g., a JupyterHub server). +{numref}`local-repo-jupyter` below will cover this second step in detail. + +### Creating a remote repository on GitHub + +Before you can create remote repositories on GitHub, +you will need a GitHub account; you can sign up for a free account +at [github.com](https://github.com/). +Once you have logged into your account, you can create a new repository to host +your project by clicking on the "+" icon in the upper right-hand +corner, and then on "New Repository," as shown in +{numref}`new-repository-01`. + +```{figure} img/version-control/new_repository_01.png +--- +name: new-repository-01 +--- +New repositories on GitHub can be created by clicking on "New Repository" from the + menu. +``` + +```{index} repository;public +``` + + +Repositories can be set up with a variety of configurations, including a name, +optional description, and the inclusion (or not) of several template files. +One of the most important configuration items to choose is the visibility to the outside world, +either public or private. *Public* repositories can be viewed by anyone. +*Private* repositories can be viewed by only you. Both public and private repositories +are only editable by you, but you can change that by giving access to other collaborators. + +To get started with a *public* repository having a template `README.md` file, take the +following steps shown in {numref}`new-repository-02`: + +1. Enter the name of your project repository. In the example below, we use `canadian_languages`. Most repositories follow a similar naming convention involving only lowercase letter words separated by either underscores or hyphens. +2. Choose an option for the privacy of your repository. +3. Select "Add a README file." This creates a template `README.md` file in your repository's root folder. +4. When you are happy with your repository name and configuration, click on the green "Create Repository" button. + +```{figure} img/version-control/new_repository_02.png +--- +name: new-repository-02 +--- +Repository configuration for a project that is public and initialized with a README.md template file. +``` + + + +A newly created public repository with a `README.md` template file should look something +like what is shown in {numref}`new-repository-03`. + +```{figure} img/version-control/new_repository_03.png +--- +name: new-repository-03 +--- +Respository configuration for a project that is public and initialized with a README.md template file. +``` + + + ++++ + +### Editing files on GitHub with the pen tool + +```{index} GitHub; pen tool +``` + +The pen tool can be used to edit existing plain text files. When you click on +the pen tool, the file will be opened in a text box where you can use your +keyboard to make changes ({numref}`pen-tool-01` and {numref}`pen-tool-02`). + +```{figure} img/version-control/pen-tool_01.png +--- +name: pen-tool-01 +--- +Clicking on the pen tool opens a text box for editing plain text files. +``` + + +```{figure} img/version-control/pen-tool_02.png +--- +name: pen-tool-02 +--- +The text box where edits can be made after clicking on the pen tool. +``` + +```{index} GitHub; commit +``` + + + +After you are done with your edits, they can be "saved" by *committing* your +changes. When you *commit a file* in a repository, the version control system +takes a snapshot of what the file looks like. As you continue working on the +project, over time you will possibly make many commits to a single file; this +generates a useful version history for that file. On GitHub, if you click the +green "Commit changes" button, it will save the file and then make a commit +({numref}`pen-tool-03`). + +Recall from {numref}`commit-changes` that you normally have to add files +to the staging area before committing them. Why don't we have to do that when +we work directly on GitHub? Behind the scenes, when you click the green "Commit changes" +button, GitHub *is* adding that one file to the staging area prior to committing it. +But note that on GitHub you are limited to committing changes to only one file at a time. +When you work in your own local repository, you can commit +changes to multiple files simultaneously. This is especially useful when one +"improvement" to the project involves modifying multiple files. +You can also do things like run code when working in a local repository, which you cannot +do on GitHub. In general, editing on GitHub is reserved for small edits to plain text files. + +```{figure} img/version-control/pen-tool_03.png +--- +name: pen-tool-03 +--- +Saving changes using the pen tool requires committing those changes, and an associated commit message. +``` + +### Creating files on GitHub with the "Add file" menu + +```{index} GitHub; add file +``` + + + +The "Add file" menu can be used to create new plain text files and upload files +from your computer. To create a new plain text file, click the "Add file" +drop-down menu and select the "Create new file" option +({numref}`create-new-file-01`). + +```{figure} img/version-control/create-new-file_01.png +--- +name: create-new-file-01 +--- +New plain text files can be created directly on GitHub. +``` + +```{index} markdown +``` + + + +A page will open with a small text box for the file name to be entered, and a +larger text box where the desired file content text can be entered. Note the two +tabs, "Edit new file" and "Preview". Toggling between them lets you enter and +edit text and view what the text will look like when rendered, respectively +({numref}`create-new-file-02`). +Note that GitHub understands and renders `.md` files using a +[markdown syntax](https://guides.github.com/pdfs/markdown-cheatsheet-online.pdf) +very similar to Jupyter notebooks, so the "Preview" tab is especially helpful +for checking markdown code correctness. + +```{figure} img/version-control/create-new-file_02.png +--- +name: create-new-file-02 +--- +New plain text files require a file name in the text box circled in red, and file content entered in the larger text box (red arrow). +``` + +Save and commit your changes by clicking the green "Commit changes" button at the +bottom of the page ({numref}`create-new-file-03`). + +```{figure} img/version-control/create-new-file_03.png +--- +name: create-new-file-03 +--- +To be saved, newly created files are required to be committed along with an associated commit message. +``` + +You can also upload files that you have created on your local machine by using +the "Add file" drop-down menu and selecting "Upload files" +({numref}`upload-files-01`). +To select the files from your local computer to upload, you can either drag and +drop them into the gray box area shown below, or click the "choose your files" +link to access a file browser dialog. Once the files you want to upload have +been selected, click the green "Commit changes" button at the bottom of the +page ({numref}`upload-files-02`). + +```{figure} img/version-control/upload-files_01.png +--- +name: upload-files-01 +--- +New files of any type can be uploaded to GitHub. +``` + +```{figure} img/version-control/upload-files_02.png +--- +name: upload-files-02 +--- +Specify files to upload by dragging them into the GitHub website (red circle) +or by clicking on "choose your files." Uploaded files are also required to be +committed along with an associated commit message. +``` + + +Note that Git and GitHub are designed to track changes in individual files. +**Do not** upload your whole project in an archive file (e.g., `.zip`). If you do, +then Git can only keep track of changes to the entire `.zip` file, which will not +be human-readable. Committing one big archive defeats the whole purpose of using +version control: you won't be able to see, interpret, or find changes in the history +of any of the actual content of your project! + +(local-repo-jupyter)= +## Working with local repositories using Jupyter + +```{index} git;Jupyter extension +``` + +Although there are several ways to create and edit files on GitHub, they are +not quite powerful enough for efficiently creating and editing complex files, +or files that need to be executed to assess whether they work (e.g., files +containing code). For example, you wouldn't be able to run an analysis written +with Python code directly on GitHub. Thus, it is useful to be able to connect the +remote repository that was created on GitHub to a local coding environment. This +can be done by creating and working in a local copy of the repository. +In this chapter, we focus on interacting with Git via Jupyter using +the Jupyter Git extension. The Jupyter Git extension +can be run by Jupyter on your local computer, or on a JupyterHub server. +We recommend reading {numref}`Chapter %s ` +to learn how to use Jupyter before reading this chapter. + +### Generating a GitHub personal access token + +```{index} GitHub; personal access token +``` + + + +To send and retrieve work between your local repository +and the remote repository on GitHub, +you will frequently need to authenticate with GitHub +to prove you have the required permission. +There are several methods to do this, +but for beginners we recommend using the HTTPS method +because it is easier and requires less setup. +In order to use the HTTPS method, +GitHub requires you to provide a *personal access token*. +A personal access token is like a password—so keep it a secret!—but it gives +you more fine-grained control over what parts of your account +the token can be used to access, and lets you set an expiry date for the authentication. +To generate a personal access token, +you must first visit [https://github.com/settings/tokens](https://github.com/settings/tokens), +which will take you to the "Personal access tokens" page in your account settings. +Once there, click "Generate new token" ({numref}`generate-pat-01`). +Note that you may be asked to re-authenticate with your username +and password to proceed. + + +```{figure} img/version-control/generate-pat_01.png +--- +name: generate-pat-01 +--- +The "Generate new token" button used to initiate the creation of a new personal +access token. It is found in the "Personal access tokens" section of the +"Developer settings" page in your account settings. +``` + + +You will be asked to add a note to describe the purpose for your personal access token. +Next, you need to select permissions for the token; this is where +you can control what parts of your account the token can be used to access. +Make sure to choose only those permissions that you absolutely require. In +{numref}`generate-pat-02`, we tick only the "repo" box, which gives the +token access to our repositories (so that we can push and pull) but none of our other GitHub +account features. Finally, to generate the token, scroll to the bottom of that page +and click the green "Generate token" button ({numref}`generate-pat-02`). + +```{figure} img/version-control/generate-pat_02.png +--- +name: generate-pat-02 +--- +Webpage for creating a new personal access token. +``` + + +Finally, you will be taken to a page where you will be able to see +and copy the personal access token you just generated ({numref}`generate-pat-03`). +Since it provides access to certain parts of your account, you should +treat this token like a password; for example, you should consider +securely storing it (and your other passwords and tokens, too!) using a password manager. +Note that this page will only display the token to you once, +so make sure you store it in a safe place right away. If you accidentally forget to +store it, though, do not fret—you can delete that token by clicking the +"Delete" button next to your token, and generate a new one from scratch. +To learn more about GitHub authentication, +see the additional resources section at the end of this chapter. + +```{figure} img/version-control/generate-pat_03.png +--- +name: generate-pat-03 +--- +Display of the newly generated personal access token. +``` + + + +### Cloning a repository using Jupyter + + + +```{index} git;clone +``` + + + +*Cloning* a remote repository from GitHub +to create a local repository results in a +copy that knows where it was obtained from so that it knows where to send/receive +new committed edits. In order to do this, first copy the URL from the HTTPS tab +of the Code drop-down menu on GitHub ({numref}`clone-02`). + +```{figure} img/version-control/clone_02.png +--- +name: clone-02 +--- +The green "Code" drop-down menu contains the remote address (URL) corresponding to the location of the remote GitHub repository. +``` + +Open Jupyter, and click the Git+ icon on the file browser tab +({numref}`clone-01`). + +```{figure} img/version-control/clone_01.png +--- +name: clone-01 +--- +The Jupyter Git Clone icon (red circle). +``` + + + +Paste the URL of the GitHub project repository you +created and click the blue "CLONE" button ({numref}`clone-03`). + +```{figure} img/version-control/clone_03.png +--- +name: clone-03 +--- +Prompt where the remote address (URL) corresponding to the location of the GitHub repository needs to be input in Jupyter. +``` + +On the file browser tab, you will now see a folder for the repository. +Inside this folder will be all the files that existed on GitHub ({numref}`clone-04`). + +```{figure} img/version-control/clone_04.png +--- +name: clone-04 +--- +Cloned GitHub repositories can been seen and accessed via the Jupyter file browser. +``` + + +### Specifying files to commit +Now that you have cloned the remote repository from GitHub to create a local repository, +you can get to work editing, creating, and deleting files. +For example, suppose you created and saved a new file (named `eda.ipynb`) that you would +like to send back to the project repository on GitHub ({numref}`git-add-01`). +To "add" this modified file to the staging area (i.e., flag that this is a +file whose changes we would like to commit), click the Jupyter Git extension +icon on the far left-hand side of Jupyter ({numref}`git-add-01`). + +```{figure} img/version-control/git_add_01.png +--- +name: git-add-01 +--- +Jupyter Git extension icon (circled in red). +``` + +```{index} git;add +``` + + +This opens the Jupyter Git graphical user interface pane. Next, +click the plus sign (+) beside the file(s) that you want to "add" +({numref}`git-add-02`). Note that because this is the +first change for this file, it falls under the "Untracked" heading. +However, next time you edit this file and want to add the changes, +you will find it under the "Changed" heading. + +You will also see an `eda-checkpoint.ipynb` file under the "Untracked" heading. +This is a temporary "checkpoint file" created by Jupyter when you work on `eda.ipynb`. +You generally do not want to add auto-generated files to Git repositories; +only add the files you directly create and edit. + +```{figure} img/version-control/git_add_02.png +--- +name: git-add-02 +--- +`eda.ipynb` is added to the staging area via the plus sign (+). +``` + +Clicking the plus sign (+) moves the file from the "Untracked" heading to the "Staged" heading, +so that Git knows you want a snapshot of its current state +as a commit ({numref}`git-add-03`). Now you are ready to "commit" the changes. +Make sure to include a (clear and helpful!) message about what was changed +so that your collaborators (and future you) know what happened in this commit. + + +```{figure} img/version-control/git_add_03.png +--- +name: git-add-03 +--- +Adding `eda.ipynb` makes it visible in the staging area. +``` + + +### Making the commit + +```{index} git;commit +``` + + + +To snapshot the changes with an associated commit message, +you must put a message in the text box at the bottom of the Git pane +and click on the blue "Commit" button ({numref}`git-commit-01`). +It is highly recommended to write useful and meaningful messages about what +was changed. These commit messages, and the datetime stamp for a given +commit, are the primary means to navigate through the project's history in the +event that you need to view or retrieve a past version of a file, or +revert your project to an earlier state. +When you click the "Commit" button for the first time, you will be prompted to +enter your name and email. This only needs to be done once for each machine +you use Git on. + +```{figure} img/version-control/git_commit_01.png +--- +name: git-commit-01 +--- +A commit message must be added into the Jupyter Git extension commit text box before the blue Commit button can be used to record the commit. +``` + + +After "committing" the file(s), you will see there are 0 "Staged" files. +You are now ready to push your changes +to the remote repository on GitHub ({numref}`git-commit-03`). + + +```{figure} img/version-control/git_commit_03.png +--- +name: git-commit-03 +--- +After recording a commit, the staging area should be empty. +``` + + + +### Pushing the commits to GitHub + +```{index} git;push +``` + + + +To send the committed changes back to the remote repository on +GitHub, you need to *push* them. To do this, +click on the cloud icon with the up arrow on the Jupyter Git tab +({numref}`git-push-01`). + +```{figure} img/version-control/git_push_01.png +--- +name: git-push-01 +--- +The Jupyter Git extension "push" button (circled in red). +``` + + +You will then be prompted to enter your GitHub username +and the personal access token that you generated +earlier (not your account password!). Click +the blue "OK" button to initiate the push ({numref}`git-push-02`). + +```{figure} img/version-control/git_push_02.png +--- +name: git-push-02 +--- +Enter your Git credentials to authorize the push to the remote repository. +``` + + +If the files were successfully pushed to the project repository on +GitHub, you will be shown a success message ({numref}`git-push-03`). +Click "Dismiss" to continue working in Jupyter. + +```{figure} img/version-control/git_push_03.png +--- +name: git-push-03 +--- +The prompt that the push was successful. +``` + + +If you visit the remote repository on GitHub, +you will see that the changes now exist there too +({numref}`git-push-04`)! + +```{figure} img/version-control/git_push_04.png +--- +name: git-push-04 +--- +The GitHub web interface shows a preview of the commit message, and the time of the most recently pushed commit for each file. +``` + + +## Collaboration + +### Giving collaborators access to your project + +```{index} GitHub; collaborator access +``` + + + +As mentioned earlier, GitHub allows you to control who has access to your +project. The default of both public and private projects are that only the +person who created the GitHub repository has permissions to create, edit and +delete files (*write access*). To give your collaborators write access to the +projects, navigate to the "Settings" tab ({numref}`add-collab-01`). + +```{figure} img/version-control/add_collab_01.png +--- +name: add-collab-01 +--- +The "Settings" tab on the GitHub web interface. +``` + +Then click "Manage access" ({numref}`add-collab-02`). + +```{figure} img/version-control/add_collab_02.png +--- +name: add-collab-02 +--- +The "Manage access" tab on the GitHub web interface. +``` + +Then click the green "Invite a collaborator" button ({numref}`add-collab-03`). + +```{figure} img/version-control/add_collab_03.png +--- +name: add-collab-03 +--- +The "Invite a collaborator" button on the GitHub web interface. +``` + +Type in the collaborator's GitHub username or email, +and select their name when it appears ({numref}`add-collab-04`). + +```{figure} img/version-control/add_collab_04.png +--- +name: add-collab-04 +--- +The text box where a collaborator's GitHub username or email can be entered. +``` + +Finally, click the green "Add to this repository" button ({numref}`add-collab-05`). + +```{figure} img/version-control/add_collab_05.png +--- +name: add-collab-05 +--- +The confirmation button for adding a collaborator to a repository on the GitHub web interface. +``` + +After this, you should see your newly added collaborator listed under the +"Manage access" tab. They should receive an email invitation to join the +GitHub repository as a collaborator. They need to accept this invitation +to enable write access. + +### Pulling changes from GitHub using Jupyter + +We will now walk through how to use the Jupyter Git extension tool to pull changes +to our `eda.ipynb` analysis file that were made by a collaborator +({numref}`git-pull-00`). + +```{figure} img/version-control/git_pull_00.png +--- +name: git-pull-00 +--- +The GitHub interface indicates the name of the last person to push a commit to the remote repository, a preview of the associated commit message, the unique commit identifier, and how long ago the commit was snapshotted. +``` + +```{index} git;pull +``` + +You can tell Git to "pull" by clicking on the cloud icon with +the down arrow in Jupyter ({numref}`git-pull-01`). + +```{figure} img/version-control/git_pull_01.png +--- +name: git-pull-01 +--- +The Jupyter Git extension clone button. +``` + +Once the files are successfully pulled from GitHub, you need to click "Dismiss" +to keep working ({numref}`git-pull-02`). + +```{figure} img/version-control/git_pull_02.png +--- +name: git-pull-02 +--- +The prompt after changes have been successfully pulled from a remote repository. +``` + +And then when you open (or refresh) the files whose changes you just pulled, +you should be able to see them ({numref}`git-pull-03`). + +```{figure} img/version-control/git_pull_03.png +--- +name: git-pull-03 +--- +Changes made by the collaborator to `eda.ipynb` (code highlighted by red arrows). +``` + +It can be very useful to review the history of the changes to your project. You +can do this directly in Jupyter by clicking "History" in the Git tab +({numref}`git-pull-04`). + +```{figure} img/version-control/git_pull_04.png +--- +name: git-pull-04 +--- +Version control repository history viewed using the Jupyter Git extension. +``` + + +It is good practice to pull any changes at the start of *every* work session +before you start working on your local copy. +If you do not do this, +and your collaborators have pushed some changes to the project to GitHub, +then you will be unable to push your changes to GitHub until you pull. +This situation can be recognized by the error message +shown in {numref}`merge-conflict-01`. + +```{figure} img/version-control/merge_conflict_01.png +--- +name: merge-conflict-01 +--- +Error message that indicates that there are changes on the remote repository that you do not have locally. +``` + + +Usually, getting out of this situation is not too troublesome. First you need +to pull the changes that exist on GitHub that you do not yet have in the local +repository. Usually when this happens, Git can automatically merge the changes +for you, even if you and your collaborators were working on different parts of +the same file! + +If, however, you and your collaborators made changes to the same line of the +same file, Git will not be able to automatically merge the changes—it will +not know whether to keep your version of the line(s), your collaborators +version of the line(s), or some blend of the two. When this happens, Git will +tell you that you have a merge conflict in certain file(s) ({numref}`merge-conflict-03`). + +```{figure} img/version-control/merge_conflict_03.png +--- +name: merge-conflict-03 +--- +Error message that indicates you and your collaborators made changes to the +same line of the same file and that Git will not be able to automatically merge +the changes. +``` + + + +### Handling merge conflicts + +```{index} git;merge conflict +``` + + + +To fix the merge conflict, you need to open the offending file +in a plain text editor and look for special marks that Git puts in the file to +tell you where the merge conflict occurred ({numref}`merge-conflict-04`). + + +```{figure} img/version-control/merge_conflict_04.png +--- +name: merge-conflict-04 +--- +How to open a Jupyter notebook as a plain text file view in Jupyter. +``` + +The beginning of the merge +conflict is preceded by `<<<<<<< HEAD` and the end of the merge conflict is +marked by `>>>>>>>`. Between these markings, Git also inserts a separator +(`=======`). The version of the change before the separator is your change, and +the version that follows the separator was the change that existed on GitHub. +In {numref}`merge-conflict-05`, you can see that in your local repository +there is a line of code that sets the axis scaling to `"sqrt"`. +It looks like your collaborator made an edit to that line too, except with axis scaling `"log"`! + +```{figure} img/version-control/merge_conflict_05.png +--- +name: merge-conflict-05 +--- +Merge conflict identifiers (highlighted in red). +``` + +Once you have decided which version of the change (or what combination!) to +keep, you need to use the plain text editor to remove the special marks that +Git added ({numref}`merge-conflict-06`). + +```{figure} img/version-control/merge_conflict_06.png +--- +name: merge-conflict-06 +--- +File where a merge conflict has been resolved. +``` + +The file must be saved, added to the staging area, and then committed before you will be able to +push your changes to GitHub. + +### Communicating using GitHub issues + +When working on a project in a team, you don't just want a historical record of who changed +what file and when in the project—you also want a record of decisions that were made, +ideas that were floated, problems that were identified and addressed, and all other +communication surrounding the project. Email and messaging apps are both very popular for general communication, but are not +designed for project-specific communication: they both generally do not have facilities for organizing conversations by project subtopics, +searching for conversations related to particular bugs or software versions, etc. + +```{index} GitHub;issues +``` + +GitHub *issues* are an alternative written communication medium to email and +messaging apps, and were designed specifically to facilitate project-specific +communication. Issues are *opened* from the "Issues" tab on the project's +GitHub page, and they persist there even after the conversation is over and the issue is *closed* (in +contrast to email, issues are not usually deleted). One issue thread is usually created +per topic, and they are easily searchable using GitHub's search tools. All +issues are accessible to all project collaborators, so no one is left out of +the conversation. Finally, issues can be set up so that team members get email +notifications when a new issue is created or a new post is made in an issue +thread. Replying to issues from email is also possible. Given all of these advantages, + we highly recommend the use of issues for project-related communication. + +To open a GitHub issue, +first click on the "Issues" tab ({numref}`issue-01`). + +```{figure} img/version-control/issue_01.png +--- +name: issue-01 +--- +The "Issues" tab on the GitHub web interface. +``` + +Next click the "New issue" button ({numref}`issue-02`). + +```{figure} img/version-control/issue_02.png +--- +name: issue-02 +--- +The "New issues" button on the GitHub web interface. +``` + +Add an issue title (which acts like an email subject line), and then put the +body of the message in the larger text box. Finally, click "Submit new issue" +to post the issue to share with others ({numref}`issue-03`). + +```{figure} img/version-control/issue_03.png +--- +name: issue-03 +--- +Dialog boxes and submission button for creating new GitHub issues. +``` + +You can reply to an issue that someone opened by adding your written response to +the large text box and clicking comment ({numref}`issue-04`). + +```{figure} img/version-control/issue_04.png +--- +name: issue-04 +--- +Dialog box for replying to GitHub issues. +``` + + +When a conversation is resolved, you can click "Close issue". +The closed issue can be later viewed by clicking the "Closed" header link +in the "Issue" tab ({numref}`issue-06`). + +```{figure} img/version-control/issue_06.png +--- +name: issue-06 +--- +The "Closed" issues tab on the GitHub web interface. +``` + +## Exercises + +Practice exercises for the material covered in this chapter +can be found in the accompanying +[worksheets repository](https://worksheets.python.datasciencebook.ca) +in the "Collaboration with version control" row. +You can launch an interactive version of the worksheet in your browser by clicking the "launch binder" button. +You can also preview a non-interactive version of the worksheet by clicking "view worksheet." +If you instead decide to download the worksheet and run it on your own machine, +make sure to follow the instructions for computer setup +found in {numref}`Chapter %s `. This will ensure that the automated feedback +and guidance that the worksheets provide will function as intended. + +## Additional resources + +Now that you've picked up the basics of version control with Git and GitHub, +you can expand your knowledge through the resources listed below: + +- GitHub's [guides website](https://guides.github.com/) and [YouTube + channel](https://www.youtube.com/githubguides) are great resources to take the next steps in + learning about Git and GitHub. +- [Good enough practices in scientific + computing](https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005510#sec014) + {cite:p}`wilson2014best` provides more advice on useful workflows and "good enough" + practices in data analysis projects. +- In addition to [GitHub](https://github.com), there are other popular Git + repository hosting services such as [GitLab](https://gitlab.com) and + [BitBucket](https://bitbucket.org). Comparing all of these options is beyond + the scope of this book, and until you become a more advanced user, you are + perfectly fine to just stick with GitHub. Just be aware that you have options! +- GitHub's [documentation on creating a personal access + token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) + is an excellent additional resource to consult if you need help + generating and using personal access tokens. + +## References + ++++ + +```{bibliography} +:filter: docname in docnames +``` diff --git a/pull313/_sources/viz.md b/pull313/_sources/viz.md new file mode 100644 index 00000000..40de0dea --- /dev/null +++ b/pull313/_sources/viz.md @@ -0,0 +1,2056 @@ +--- +jupytext: + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.5 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +```{code-cell} ipython3 +:tags: [remove-cell] + +from chapter_preamble import * +from IPython.display import Image +``` + +(viz)= +# Effective data visualization + +## Overview +This chapter will introduce concepts and tools relating to data visualization +beyond what we have seen and practiced so far. We will focus on guiding +principles for effective data visualization and explaining visualizations +independent of any particular tool or programming language. In the process, we +will cover some specifics of creating visualizations (scatter plots, bar +plots, line plots, and histograms) for data using Python. + +## Chapter learning objectives + +By the end of the chapter, readers will be able to do the following: + +- Describe when to use the following kinds of visualizations to answer specific questions using a data set: + - scatter plots + - line plots + - bar plots + - histogram plots +- Given a data set and a question, select from the above plot types and use Python to create a visualization that best answers the question. +- Given a visualization and a question, evaluate the effectiveness of the visualization and suggest improvements to better answer the question. +- Referring to the visualization, communicate the conclusions in non-technical terms. +- Identify rules of thumb for creating effective visualizations. +- Define the two key aspects of altair charts: + - graphical marks + - encoding channels +- Use the altair library in Python to create and refine the above visualizations using: + - graphical marks: `mark_point`, `mark_line`, `mark_bar` + - encoding channels: `x`, `y`, `color`, `shape` + - subplots: `facet` +- Describe the difference in raster and vector output formats. +- Use `chart.save()` to save visualizations in `.png` and `.svg` format. + +## Choosing the visualization + +*Ask a question, and answer it* + +```{index} question; visualization +``` + +The purpose of a visualization is to answer a question +about a data set of interest. So naturally, the +first thing to do **before** creating a visualization is to formulate the +question about the data you are trying to answer. A good visualization will +clearly answer your question without distraction; a *great* visualization will +suggest even what the question was itself without additional explanation. +Imagine your visualization as part of a poster presentation for a project; even +if you aren't standing at the poster explaining things, an effective +visualization will convey your message to the audience. + +Recall the different data analysis questions +from {numref}`Chapter %s `. +With the visualizations we will cover in this chapter, +we will be able to answer *only descriptive and exploratory* questions. +Be careful to not answer any *predictive, inferential, causal* +*or mechanistic* questions with the visualizations presented here, +as we have not learned the tools necessary to do that properly just yet. + +As with most coding tasks, it is totally fine (and quite common) to make +mistakes and iterate a few times before you find the right visualization for +your data and question. There are many different kinds of plotting +graphics available to use (see Chapter 5 of *Fundamentals of Data Visualization* {cite:p}`wilkeviz` for a directory). +The types of plots that we introduce in this book are shown in {numref}`plot_sketches`; +which one you should select depends on your data +and the question you want to answer. +In general, the guiding principles of when to use each type of plot +are as follows: + +```{index} visualization; line, visualization; histogram, visualization; scatter, visualization; bar, distribution +``` + +- **scatter plots** visualize the relationship between two quantitative variables +- **line plots** visualize trends with respect to an independent, ordered quantity (e.g., time) +- **bar plots** visualize comparisons of amounts +- **histograms** visualize the distribution of one quantitative variable (i.e., all its possible values and how often they occur) + +```{figure} img/viz/plot-sketches-1.png +--- +height: 400px +name: plot_sketches +--- +Examples of scatter, line and bar plots, as well as histograms. +``` + + +All types of visualization have their (mis)uses, but three kinds are usually +hard to understand or are easily replaced with an oft-better alternative. In +particular, you should avoid **pie charts**; it is generally better to use +bars, as it is easier to compare bar heights than pie slice sizes. You should +also not use **3-D visualizations**, as they are typically hard to understand +when converted to a static 2-D image format. Finally, do not use tables to make +numerical comparisons; humans are much better at quickly processing visual +information than text and math. Bar plots are again typically a better +alternative. + ++++ + +## Refining the visualization + +*Convey the message, minimize noise* + +Just being able to make a visualization in Python with `altair` (or any other tool +for that matter) doesn't mean that it effectively communicates your message to +others. Once you have selected a broad type of visualization to use, you will +have to refine it to suit your particular need. Some rules of thumb for doing +this are listed below. They generally fall into two classes: you want to +*make your visualization convey your message*, and you want to *reduce visual noise* +as much as possible. Humans have limited cognitive ability to process +information; both of these types of refinement aim to reduce the mental load on +your audience when viewing your visualization, making it easier for them to +understand and remember your message quickly. + +**Convey the message** + +- Make sure the visualization answers the question you have asked most simply and plainly as possible. +- Use legends and labels so that your visualization is understandable without reading the surrounding text. +- Ensure the text, symbols, lines, etc., on your visualization are big enough to be easily read. +- Ensure the data are clearly visible; don't hide the shape/distribution of the data behind other objects (e.g., a bar). +- Make sure to use color schemes that are understandable by those with + colorblindness (a surprisingly large fraction of the overall + population—from about 1% to 10%, depending on sex and ancestry {cite:p}`deebblind`). + For example, [Color Schemes](https://altair-viz.github.io/user_guide/customization.html#customizing-colors) + provides the ability to pick such color schemes, and you can check + your visualizations after you have created them by uploading to online tools + such as a [color blindness simulator](https://www.color-blindness.com/coblis-color-blindness-simulator/). +- Redundancy can be helpful; sometimes conveying the same message in multiple ways reinforces it for the audience. + +**Minimize noise** + +- Use colors sparingly. Too many different colors can be distracting, create false patterns, and detract from the message. +- Be wary of overplotting. Overplotting is when marks that represent the data + overlap, and is problematic as it prevents you from seeing how many data + points are represented in areas of the visualization where this occurs. If your + plot has too many dots or lines and starts to look like a mess, you need to do + something different. +- Only make the plot area (where the dots, lines, bars are) as big as needed. Simple plots can be made small. +- Don't adjust the axes to zoom in on small differences. If the difference is small, show that it's small! + ++++ + +## Creating visualizations with `altair` + +*Build the visualization iteratively* + +```{index} altair +``` + +This section will cover examples of how to choose and refine a visualization given a data set and a question that you want to answer, +and then how to create the visualization in Python using `altair`. To use the `altair` package, we need to first import it. We will also import `pandas` to use for reading in the data. + +```{code-cell} ipython3 +import pandas as pd +import altair as alt +``` + +```{note} +In this chapter, we will provide example visualizations using relatively small +data sets, so we are fine using the default settings in `altair`. However, +`altair` will raise an error if you try to plot with a data frame that has more +than 5,000 rows. The simplest way to plot larger data sets is to enable the +`vegafusion` data transformer right after you import the `altair` package: +`alt.data_transformers.enable("vegafusion")`. This will allow you to plot up to +100,000 graphical objects (e.g., a scatter plot with 100,000 points). To +visualize *even larger* data sets, see [the `altair` documentation](https://altair-viz.github.io/user_guide/large_datasets). +``` + +### Scatter plots and line plots: the Mauna Loa CO$_{\text{2}}$ data set + +```{index} Mauna Loa +``` + +The [Mauna Loa CO$_{\text{2}}$ data set](https://www.esrl.noaa.gov/gmd/ccgg/trends/data.html), +curated by Dr. Pieter Tans, NOAA/GML +and Dr. Ralph Keeling, Scripps Institution of Oceanography, +records the atmospheric concentration of carbon dioxide +(CO$_{\text{2}}$, in parts per million) +at the Mauna Loa research station in Hawaii +from 1959 onward {cite:p}`maunadata`. +For this book, we are going to focus on the last 40 years of the data set, +1980-2020. + +```{index} question; visualization +``` + +**Question:** Does the concentration of atmospheric CO$_{\text{2}}$ change over time, +and are there any interesting patterns to note? + +```{code-cell} ipython3 +:tags: ["remove-cell"] +mauna_loa = pd.read_csv("data/mauna_loa.csv") +mauna_loa["day"]=1 +mauna_loa["date_measured"]=pd.to_datetime(mauna_loa[["year", "month", "day"]]) +mauna_loa = mauna_loa[["date_measured", "ppm"]].query('ppm>0 and date_measured>"1980-1-1"') +mauna_loa.to_csv("data/mauna_loa_data.csv", index=False) +``` + +To get started, we will read and inspect the data: + +```{code-cell} ipython3 +# mauna loa carbon dioxide data +co2_df = pd.read_csv( + "data/mauna_loa_data.csv", + parse_dates=["date_measured"] +) +co2_df +``` + + +```{code-cell} ipython3 +co2_df.info() +``` + +We see that there are two columns in the `co2_df` data frame; `date_measured` and `ppm`. +The `date_measured` column holds the date the measurement was taken, +and is of type `datetime64`. +The `ppm` column holds the value of CO$_{\text{2}}$ in parts per million +that was measured on each date, and is type `float64`; this is the usual +type for decimal numbers. + +```{note} +`read_csv` was able to parse the `date_measured` column into the +`datetime` vector type because it was entered +in the international standard date format, +called ISO 8601, which lists dates as `year-month-day` and we used `parse_dates=True`. +`datetime` vectors are `double` vectors with special properties that allow +them to handle dates correctly. +For example, `datetime` type vectors allow functions like `altair` +to treat them as numeric dates and not as character vectors, +even though they contain non-numeric characters +(e.g., in the `date_measured` column in the `co2_df` data frame). +This means Python will not accidentally plot the dates in the wrong order +(i.e., not alphanumerically as would happen if it was a character vector). +More about dates and times can be viewed [here](https://wesmckinney.com/book/time-series.html). +``` + +Since we are investigating a relationship between two variables +(CO$_{\text{2}}$ concentration and date), +a scatter plot is a good place to start. +Scatter plots show the data as individual points with `x` (horizontal axis) +and `y` (vertical axis) coordinates. +Here, we will use the measurement date as the `x` coordinate +and the CO$_{\text{2}}$ concentration as the `y` coordinate. +We create a chart with the `alt.Chart()` function. +There are a few basic aspects of a plot that we need to specify: + +```{index} altair; graphical mark, altair; encoding channel +``` + +- The name of the **data frame** to visualize. + - Here, we specify the `co2_df` data frame as an argument to `alt.Chart` +- The **graphical mark**, which specifies how the mapped data should be displayed. + - To create a graphical mark, we use `Chart.mark_*` methods (see the + [altair reference](https://altair-viz.github.io/user_guide/marks.html) + for a list of graphical mark). + - Here, we use the `mark_point` function to visualize our data as a scatter plot. +- The **encoding channels**, which tells `altair` how the columns in the data frame map to visual properties in the chart. + - To create an encoding, we use the `encode` function. + - The `encode` method builds a key-value mapping between encoding channels (such as x, y) to fields in the data set, accessed by field name (column names) + - Here, we set the `x` axis of the plot to the `date_measured` variable, + and on the `y` axis, we plot the `ppm` variable. + - For the y-axis, we also provided the method + `scale(zero=False)`. By default, `altair` chooses the y-limits + based on the data and will keep `y=0` in view. + This is often a helpful default, but here it makes it + difficult to see any trends in our data since the smallest value is >300 + ppm. So by providing `scale(zero=False)`, we tell altair to + choose a reasonable lower bound based on our data, and that lower bound + doesn't have to be zero. + - To change the properties of the encoding channels, + we need to leverage the helper functions `alt.Y` and `alt.X`. + These helpers have the role of customizing things like order, titles, and scales. + Here, we use `alt.Y` to change the domain of the y-axis, + so that it starts from the lowest value in the `date_measured` column + rather than from zero. + +```{code-cell} ipython3 +co2_scatter = alt.Chart(co2_df).mark_point().encode( + x="date_measured", + y=alt.Y("ppm").scale(zero=False) +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("co2_scatter", co2_scatter, display=False) +``` + +:::{glue:figure} co2_scatter +:figwidth: 700px +:name: co2_scatter + +Scatter plot of atmospheric concentration of CO$_{2}$ over time. +::: + +The visualization in {numref}`co2_scatter` +shows a clear upward trend +in the atmospheric concentration of CO$_{\text{2}}$ over time. +This plot answers the first part of our question in the affirmative, +but that appears to be the only conclusion one can make +from the scatter visualization. + +One important thing to note about this data is that one of the variables +we are exploring is time. +Time is a special kind of quantitative variable +because it forces additional structure on the data—the +data points have a natural order. +Specifically, each observation in the data set has a predecessor +and a successor, and the order of the observations matters; changing their order +alters their meaning. +In situations like this, we typically use a line plot to visualize +the data. Line plots connect the sequence of `x` and `y` coordinates +of the observations with line segments, thereby emphasizing their order. + +```{index} altair; mark_line +``` + +We can create a line plot in `altair` using the `mark_line` function. +Let's now try to visualize the `co2_df` as a line plot +with just the default arguments: + +```{code-cell} ipython3 +co2_line = alt.Chart(co2_df).mark_line().encode( + x="date_measured", + y=alt.Y("ppm").scale(zero=False) +) +``` + + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("co2_line", co2_line, display=False) +``` + +:::{glue:figure} co2_line +:figwidth: 700px +:name: co2_line + +Line plot of atmospheric concentration of CO$_{2}$ over time. +::: + +```{index} overplotting +``` + +Aha! {numref}`co2_line` shows us there *is* another interesting +phenomenon in the data: in addition to increasing over time, the concentration +seems to oscillate as well. Given the visualization as it is now, it is still +hard to tell how fast the oscillation is, but nevertheless, the line seems to +be a better choice for answering the question than the scatter plot was. The +comparison between these two visualizations also illustrates a common issue with +scatter plots: often, the points are shown too close together or even on top of +one another, muddling information that would otherwise be clear +(*overplotting*). + +```{index} altair; alt.X, altair; alt.Y, altair; configure_axis +``` + +Now that we have settled on the rough details of the visualization, it is time +to refine things. This plot is fairly straightforward, and there is not much +visual noise to remove. But there are a few things we must do to improve +clarity, such as adding informative axis labels and making the font a more +readable size. To add axis labels, we use the `title` method along with `alt.X` and `alt.Y` functions. To +change the font size, we use the `configure_axis` function with the +`titleFontSize` argument. + +```{code-cell} ipython3 +co2_line_labels = alt.Chart(co2_df).mark_line().encode( + x=alt.X("date_measured").title("Year"), + y=alt.Y("ppm").scale(zero=False).title("Atmospheric CO2 (ppm)") +).configure_axis(titleFontSize=12) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("co2_line_labels", co2_line_labels, display=False) +``` + +:::{glue:figure} co2_line_labels +:figwidth: 700px +:name: co2_line_labels + +Line plot of atmospheric concentration of CO$_{2}$ over time with clearer axes and labels. +::: + +```{note} +The `configure_*` functions in `altair` support additional customization, +such as updating the size of the plot, changing +the font color, and many other options that can be viewed +[here](https://altair-viz.github.io/user_guide/configuration.html). +``` + +```{index} altair; alt.Scale +``` + +Finally, let's see if we can better understand the oscillation by changing the +visualization slightly. Note that it is totally fine to use a small number of +visualizations to answer different aspects of the question you are trying to +answer. We will accomplish this by using *scale*, +another important feature of `altair` that easily transforms the different +variables and set limits. +In particular, here, we will use the `alt.Scale` function to zoom in +on just a few years of data (say, 1990-1995). The +`domain` argument takes a list of length two +to specify the upper and lower bounds to limit the axis. +We also added the argument `clip=True` to `mark_line`. This tells `altair` +to "clip" (remove) the data outside of the specified domain that we set so that it doesn't +extend past the plot area. +Since we are using both the `scale` and `title` method on the encodings +we stack them on separate lines to make the code easier to read. + +```{code-cell} ipython3 +co2_line_scale = alt.Chart(co2_df).mark_line(clip=True).encode( + x=alt.X("date_measured") + .scale(domain=["1990", "1995"]) + .title("Measurement Date"), + y=alt.Y("ppm") + .scale(zero=False) + .title("Atmospheric CO2 (ppm)") +).configure_axis(titleFontSize=12) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("co2_line_scale", co2_line_scale, display=False) +``` + +:::{glue:figure} co2_line_scale +:figwidth: 700px +:name: co2_line_scale + +Line plot of atmospheric concentration of CO$_{2}$ from 1990 to 1995. +::: + +Interesting! It seems that each year, the atmospheric CO$_{\text{2}}$ increases +until it reaches its peak somewhere around April, decreases until around late +September, and finally increases again until the end of the year. In Hawaii, +there are two seasons: summer from May through October, and winter from +November through April. Therefore, the oscillating pattern in CO$_{\text{2}}$ +matches up fairly closely with the two seasons. + +A useful analogy to constructing a data visualization is painting a picture. +We start with a blank canvas, +and the first thing we do is prepare the surface +for our painting by adding primer. +In our data visualization this is akin to calling `alt.Chart` +and specifying the data set we will be using. +Next, we sketch out the background of the painting. +In our data visualization, +this would be when we map data to the axes in the `encode` function. +Then we add our key visual subjects to the painting. +In our data visualization, +this would be the graphical marks (e.g., `mark_point`, `mark_line`, etc.). +And finally, we work on adding details and refinements to the painting. +In our data visualization this would be when we fine tune axis labels, +change the font, adjust the point size, and do other related things. + + + +### Scatter plots: the Old Faithful eruption time data set + +```{index} Old Faithful +``` + +The `faithful` data set contains measurements +of the waiting time between eruptions +and the subsequent eruption duration (in minutes) of the Old Faithful +geyser in Yellowstone National Park, Wyoming, United States. +First, we will read the data and then answer the following question: + +```{index} question; visualization +``` + +**Question:** Is there a relationship between the waiting time before an eruption +and the duration of the eruption? + +```{code-cell} ipython3 +faithful = pd.read_csv("data/faithful.csv") +faithful + +``` + +Here again, we investigate the relationship between two quantitative variables +(waiting time and eruption time). +But if you look at the output of the data frame, +you'll notice that unlike time in the Mauna Loa CO$_{\text{2}}$ data set, +neither of the variables here have a natural order to them. +So a scatter plot is likely to be the most appropriate +visualization. Let's create a scatter plot using the `altair` +package with the `waiting` variable on the horizontal axis, the `eruptions` +variable on the vertical axis, and `mark_point` as the graphical mark. +The result is shown in {numref}`faithful_scatter`. + +```{code-cell} ipython3 +faithful_scatter = alt.Chart(faithful).mark_point().encode( + x="waiting", + y="eruptions" +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("faithful_scatter", faithful_scatter, display=False) +``` + +:::{glue:figure} faithful_scatter +:figwidth: 700px +:name: faithful_scatter + +Scatter plot of waiting time and eruption time. +::: + +We can see in {numref}`faithful_scatter` that the data tend to fall +into two groups: one with short waiting and eruption times, and one with long +waiting and eruption times. Note that in this case, there is no overplotting: +the points are generally nicely visually separated, and the pattern they form +is clear. +In order to refine the visualization, we need only to add axis +labels and make the font more readable. + +```{code-cell} ipython3 +faithful_scatter_labels = alt.Chart(faithful).mark_point().encode( + x=alt.X("waiting").title("Waiting Time (mins)"), + y=alt.Y("eruptions").title("Eruption Duration (mins)") +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("faithful_scatter_labels", faithful_scatter_labels, display=False) +``` + +:::{glue:figure} faithful_scatter_labels +:figwidth: 700px +:name: faithful_scatter_labels + +Scatter plot of waiting time and eruption time with clearer axes and labels. +::: + + +We can change the size of the point and color of the plot by specifying `mark_point(size=10, color="black")`. + +```{code-cell} ipython3 +faithful_scatter_labels_black = alt.Chart(faithful).mark_point(size=10, color="black").encode( + x=alt.X("waiting").title("Waiting Time (mins)"), + y=alt.Y("eruptions").title("Eruption Duration (mins)") +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("faithful_scatter_labels_black", faithful_scatter_labels_black, display=False) +``` + +:::{glue:figure} faithful_scatter_labels_black +:figwidth: 700px +:name: faithful_scatter_labels_black + +Scatter plot of waiting time and eruption time with black points. +::: + ++++ + +### Axis transformation and colored scatter plots: the Canadian languages data set + +```{index} Canadian languages +``` + +Recall the `can_lang` data set {cite:p}`timbers2020canlang` from {numref}`Chapters %s `, {numref}`%s `, and {numref}`%s `. +It contains counts of languages from the 2016 +Canadian census. + +```{index} question; visualization +``` + +**Question:** Is there a relationship between +the percentage of people who speak a language as their mother tongue and +the percentage for whom that is the primary language spoken at home? +And is there a pattern in the strength of this relationship in the +higher-level language categories (Official languages, Aboriginal languages, or +non-official and non-Aboriginal languages)? + +To get started, we will read and inspect the data: + +```{code-cell} ipython3 +:tags: ["output_scroll"] +can_lang = pd.read_csv("data/can_lang.csv") +can_lang +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +# use only nonzero entries (to avoid issues with log scale), and wrap in a pd.DataFrame to prevent copy/view warnings later +can_lang = pd.DataFrame(can_lang[(can_lang["most_at_home"] > 0) & (can_lang["mother_tongue"] > 0)]) +``` + +```{index} altair; mark_circle +``` + +We will begin with a scatter plot of the `mother_tongue` and `most_at_home` columns from our data frame. +As we have seen in the scatter plots in the previous section, +the default behavior of `mark_point` is to draw the outline of each point. +If we would like to fill them in, +we can pass the argument `filled=True` to `mark_point` +or use the shortcut `mark_circle`. +Whether to fill points or not is mostly a matter of personal preferences, +although hollow points can make it easier to see individual points +when there are many overlapping points in a chart. +The resulting plot is shown in {numref}`can_lang_plot`. + +```{code-cell} ipython3 +can_lang_plot = alt.Chart(can_lang).mark_circle().encode( + x="most_at_home", + y="mother_tongue" +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("can_lang_plot", can_lang_plot, display=False) +``` + +:::{glue:figure} can_lang_plot +:figwidth: 700px +:name: can_lang_plot + +Scatter plot of number of Canadians reporting a language as their mother tongue vs the primary language at home +::: + +```{index} escape character +``` + +To make an initial improvement in the interpretability +of {numref}`can_lang_plot`, we should +replace the default axis +names with more informative labels. +To make the axes labels on the plots more readable, +we can print long labels over multiple lines. +To achieve this, we specify the title as a list of strings +where each string in the list will correspond to a new line of text. +We can also increase the font size to further +improve readability. + +```{code-cell} ipython3 +can_lang_plot_labels = alt.Chart(can_lang).mark_circle().encode( + x=alt.X("most_at_home").title( + ["Language spoken most at home", "(number of Canadian residents)"] + ), + y=alt.Y("mother_tongue") + .scale(zero=False) + .title(["Mother tongue", "(number of Canadian residents)"]) +).configure_axis(titleFontSize=12) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("can_lang_plot_labels", can_lang_plot_labels, display=False) +``` + +:::{glue:figure} can_lang_plot_labels +:figwidth: 700px +:name: can_lang_plot_labels + +Scatter plot of number of Canadians reporting a language as their mother tongue vs the primary language at home with x and y labels. +::: + + + + +```{code-cell} ipython3 +:tags: ["remove-cell"] +import numpy as np +numlang_speakers_max=int(max(can_lang["mother_tongue"])) +print(numlang_speakers_max) +numlang_speakers_min = int(min(can_lang["mother_tongue"])) +print(numlang_speakers_min) +log_result = int(np.floor(np.log10(numlang_speakers_max/numlang_speakers_min))) +print(log_result) +glue("numlang_speakers_max", "{0:,.0f}".format(numlang_speakers_max)) +glue("numlang_speakers_min", "{0:,.0f}".format(numlang_speakers_min)) +glue("log_result", log_result) +``` + +Okay! The axes and labels in {numref}`can_lang_plot_labels` are +much more readable and interpretable now. However, the scatter points themselves could use +some work; most of the 214 data points are bunched +up in the lower left-hand side of the visualization. The data is clumped because +many more people in Canada speak English or French (the two points in +the upper right corner) than other languages. +In particular, the most common mother tongue language +has {glue:text}`numlang_speakers_max` speakers, +while the least common has only {glue:text}`numlang_speakers_min`. +That's a six-decimal-place difference +in the magnitude of these two numbers! +We can confirm that the two points in the upper right-hand corner correspond +to Canada's two official languages by filtering the data: + +```{index} pandas.DataFrame; loc[] +``` + +```{code-cell} ipython3 +:tags: ["output_scroll"] +can_lang.loc[ + (can_lang["language"]=="English") + | (can_lang["language"]=="French") +] +``` + +```{index} logarithmic scale, altair; logarithmic scaling +``` + +Recall that our question about this data pertains to *all* languages; +so to properly answer our question, +we will need to adjust the scale of the axes so that we can clearly +see all of the scatter points. +In particular, we will improve the plot by adjusting the horizontal +and vertical axes so that they are on a **logarithmic** (or **log**) scale. +Log scaling is useful when your data take both *very large* and *very small* values, +because it helps space out small values and squishes larger values together. +For example, $\log_{10}(1) = 0$, $\log_{10}(10) = 1$, $\log_{10}(100) = 2$, and $\log_{10}(1000) = 3$; +on the logarithmic scale, +the values 1, 10, 100, and 1000 are all the same distance apart! +So we see that applying this function is moving big values closer together +and moving small values farther apart. +Note that if your data can take the value 0, logarithmic scaling may not +be appropriate (since `log10(0) = -inf` in Python). There are other ways to transform +the data in such a case, but these are beyond the scope of the book. + +We can accomplish logarithmic scaling in the `altair` visualization +using the argument `type="log"` in the scale method. + +```{code-cell} ipython3 +can_lang_plot_log = alt.Chart(can_lang).mark_circle().encode( + x=alt.X("most_at_home") + .scale(type="log") + .title(["Language spoken most at home", "(number of Canadian residents)"]), + y=alt.Y("mother_tongue") + .scale(type="log") + .title(["Mother tongue", "(number of Canadian residents)"]) +).configure_axis(titleFontSize=12) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("can_lang_plot_log", can_lang_plot_log, display=False) +``` + +:::{glue:figure} can_lang_plot_log +:figwidth: 700px +:name: can_lang_plot_log + +Scatter plot of number of Canadians reporting a language as their mother tongue vs the primary language at home with log-adjusted x and y axes. +::: + +You will notice two things in the chart above, +changing the axis to log creates many axis ticks and gridlines, +which makes the appearance of the chart rather noisy +and it is hard to focus on the data. +You can also see that the second last tick label is missing on the x-axis; +Altair dropped it because there wasn't space to fit in all the large numbers next to each other. +It is also hard to see if the label for 100,000,000 is for the last or second last tick. +To fix these issue, +we can limit the number of ticks and gridlines to only include the seven major ones, +and change the number formatting to include a suffix which makes the labels shorter. + +```{code-cell} ipython3 +can_lang_plot_log_revised = alt.Chart(can_lang).mark_circle().encode( + x=alt.X("most_at_home") + .scale(type="log") + .title(["Language spoken most at home", "(number of Canadian residents)"]) + .axis(tickCount=7, format="s"), + y=alt.Y("mother_tongue") + .scale(type="log") + .title(["Mother tongue", "(number of Canadian residents)"]) + .axis(tickCount=7, format="s") +).configure_axis(titleFontSize=12) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("can_lang_plot_log_revised", can_lang_plot_log_revised, display=False) +``` + +:::{glue:figure} can_lang_plot_log_revised +:figwidth: 700px +:name: can_lang_plot_log_revised + +Scatter plot of number of Canadians reporting a language as their mother tongue vs the primary language at home with log-adjusted x and y axes. Only the major gridlines are shown. The suffix "k" indicates 1,000 ("kilo"), while the suffix "M" indicates 1,000,000 ("million"). +::: + + +```{code-cell} ipython3 +:tags: ["remove-cell"] +english_mother_tongue = can_lang.loc[can_lang["language"]=="English"].mother_tongue.values[0] +census_popn = int(35151728) +result = round((english_mother_tongue/census_popn)*100,2) +glue("english_mother_tongue", "{0:,.0f}".format(english_mother_tongue)) +glue("census_popn", "{0:,.0f}".format(census_popn)) +glue("result", "{:.2f}".format(result)) + +``` + +Similar to some of the examples in {numref}`Chapter %s `, +we can convert the counts to percentages to give them context +and make them easier to understand. +We can do this by dividing the number of people reporting a given language +as their mother tongue or primary language at home +by the number of people who live in Canada and multiplying by 100\%. +For example, +the percentage of people who reported that their mother tongue was English +in the 2016 Canadian census +was {glue:text}`english_mother_tongue` / {glue:text}`census_popn` $\times$ +100\% = {glue:text}`result`\% + +Below we assign the percentages of people reporting a given +language as their mother tongue and primary language at home +to two new columns in the `can_lang` data frame. Since the new columns are appended to the +end of the data table, we selected the new columns after the transformation so +you can clearly see the mutated output from the table. +Note that we formatted the number for the Canadian population +using `_` so that it is easier to read; +this does not affect how Python interprets the number +and is just added for readability. + +```{index} pandas.DataFrame; assign, pandas.DataFrame; [[]] +``` + +```{code-cell} ipython3 +canadian_population = 35_151_728 +can_lang["mother_tongue_percent"] = can_lang["mother_tongue"] / canadian_population * 100 +can_lang["most_at_home_percent"] = can_lang["most_at_home"] / canadian_population * 100 +can_lang[["mother_tongue_percent", "most_at_home_percent"]] +``` + +Next, we will edit the visualization to use the percentages we just computed +(and change our axis labels to reflect this change in +units). {numref}`can_lang_plot_percent` displays +the final result. +Here all the tick labels fit by default so we are not changing the labels to include suffixes. +Note that suffixes can also be harder to understand, +so it is often advisable to avoid them (particularly for small quantities) +unless you are communicating to a technical audience. + +```{code-cell} ipython3 +can_lang_plot_percent = alt.Chart(can_lang).mark_circle().encode( + x=alt.X("most_at_home_percent") + .scale(type="log") + .axis(tickCount=7) + .title(["Language spoken most at home", "(percentage of Canadian residents)"]), + y=alt.Y("mother_tongue_percent") + .scale(type="log") + .axis(tickCount=7) + .title(["Mother tongue", "(percentage of Canadian residents)"]), +).configure_axis(titleFontSize=12) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +# Increasing the dimensions makes all the ticks fit in jupyter book (the fit with the default dimensions in jupyterlab) +glue("can_lang_plot_percent", can_lang_plot_percent.properties(height=320, width=420), display=False) +``` + +:::{glue:figure} can_lang_plot_percent +:figwidth: 700px +:name: can_lang_plot_percent + +Scatter plot of percentage of Canadians reporting a language as their mother tongue vs the primary language at home. +::: + +{numref}`can_lang_plot_percent` is the appropriate +visualization to use to answer the first question in this section, i.e., +whether there is a relationship between the percentage of people who speak +a language as their mother tongue and the percentage for whom that +is the primary language spoken at home. +To fully answer the question, we need to use + {numref}`can_lang_plot_percent` +to assess a few key characteristics of the data: + +```{index} relationship; positive negative none +``` + +- **Direction:** if the y variable tends to increase when the x variable increases, then y has a **positive** relationship with x. If + y tends to decrease when x increases, then y has a **negative** relationship with x. If y does not meaningfully increase or decrease + as x increases, then y has **little or no** relationship with x. + +```{index} relationship; strong weak +``` + +- **Strength:** if the y variable *reliably* increases, decreases, or stays flat as x increases, + then the relationship is **strong**. Otherwise, the relationship is **weak**. Intuitively, + the relationship is strong when the scatter points are close together and look more like a "line" or "curve" than a "cloud." + +```{index} relationship; linear nonlinear +``` + +- **Shape:** if you can draw a straight line roughly through the data points, the relationship is **linear**. Otherwise, it is **nonlinear**. + +In {numref}`can_lang_plot_percent`, we see that +as the percentage of people who have a language as their mother tongue increases, +so does the percentage of people who speak that language at home. +Therefore, there is a **positive** relationship between these two variables. +Furthermore, because the points in {numref}`can_lang_plot_percent` +are fairly close together, and the points look more like a "line" than a "cloud", +we can say that this is a **strong** relationship. +And finally, because drawing a straight line through these points in +{numref}`can_lang_plot_percent` +would fit the pattern we observe quite well, we say that the relationship is **linear**. + +Onto the second part of our exploratory data analysis question! +Recall that we are interested in knowing whether the strength +of the relationship we uncovered +in {numref}`can_lang_plot_percent` depends +on the higher-level language category (Official languages, Aboriginal languages, +and non-official, non-Aboriginal languages). +One common way to explore this +is to color the data points on the scatter plot we have already created by +group. For example, given that we have the higher-level language category for +each language recorded in the 2016 Canadian census, we can color the points in +our previous +scatter plot to represent each language's higher-level language category. + +Here we want to distinguish the values according to the `category` group with +which they belong. We can add the argument `color` to the `encode` method, specifying +that the `category` column should color the points. Adding this argument will +color the points according to their group and add a legend at the side of the +plot. +Since the labels of the language category as descriptive of their own, +we can remove the title of the legend to reduce visual clutter without reducing the effectiveness of the chart. + +```{code-cell} ipython3 +can_lang_plot_category=alt.Chart(can_lang).mark_circle().encode( + x=alt.X("most_at_home_percent") + .scale(type="log") + .axis(tickCount=7) + .title(["Language spoken most at home", "(percentage of Canadian residents)"]), + y=alt.Y("mother_tongue_percent") + .scale(type="log") + .axis(tickCount=7) + .title(["Mother tongue", "(percentage of Canadian residents)"]), + color="category" +).configure_axis(titleFontSize=12) + +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +# Increasing the dimensions makes all the ticks fit in jupyter book (the fit with the default dimensions in jupyterlab) +glue("can_lang_plot_category", can_lang_plot_category.properties(height=320, width=420), display=False) +``` + +:::{glue:figure} can_lang_plot_category +:figwidth: 700px +:name: can_lang_plot_category + +Scatter plot of percentage of Canadians reporting a language as their mother tongue vs the primary language at home colored by language category. +::: + + +Another thing we can adjust is the location of the legend. +This is a matter of preference and not critical for the visualization. +We move the legend title using the `alt.Legend` method +and specify that we want it on the top of the chart. +This automatically changes the legend items to be laid out horizontally instead of vertically, +but we could also keep the vertical layout by specifying `direction="vertical"` inside `alt.Legend`. + +```{code-cell} ipython3 +can_lang_plot_legend = alt.Chart(can_lang).mark_circle().encode( + x=alt.X("most_at_home_percent") + .scale(type="log") + .axis(tickCount=7) + .title(["Language spoken most at home", "(percentage of Canadian residents)"]), + y=alt.Y("mother_tongue_percent") + .scale(type="log") + .axis(tickCount=7) + .title(["Mother tongue", "(percentage of Canadian residents)"]), + color=alt.Color("category") + .legend(orient="top") + .title("") +).configure_axis(titleFontSize=12) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +# Increasing the dimensions makes all the ticks fit in jupyter book (the fit with the default dimensions in jupyterlab) +glue("can_lang_plot_legend", can_lang_plot_legend.properties(height=320, width=420), display=False) +``` + +:::{glue:figure} can_lang_plot_legend +:figwidth: 700px +:name: can_lang_plot_legend + +Scatter plot of percentage of Canadians reporting a language as their mother tongue vs the primary language at home colored by language category with the legend edited. +::: + +In {numref}`can_lang_plot_legend`, the points are colored with +the default `altair` color scheme, which is called `"tableau10"`. This is an appropriate choice for most situations and is also easy to read for people with reduced color vision. +In general, the color schemes that are used by default in Altair are adapted to the type of data that is displayed and selected to be easy to interpret both for people with good and reduced color vision. +If you are unsure about a certain color combination, you can use +this [color blindness simulator](https://www.color-blindness.com/coblis-color-blindness-simulator/) to check +if your visualizations are color-blind friendly. + +```{index} color palette; color blindness simulator +``` + +All the available color schemes and information on how to create your own can be viewed [in the Altair documentation](https://altair-viz.github.io/user_guide/customization.html#customizing-colors). +To change the color scheme of our chart, +we can add the `scheme` argument in the `scale` of the `color` encoding. +Below we pick the `"dark2"` theme, with the result shown +in {numref}`can_lang_plot_theme`. +We also set the `shape` aesthetic mapping to the `category` variable as well; +this makes the scatter point shapes different for each language category. This kind of +visual redundancy—i.e., conveying the same information with both scatter point color and shape—can +further improve the clarity and accessibility of your visualization, +but can add visual noise if there are many different shapes and colors, +so it should be used with care. +Note that we are switching back to the use of `mark_point` here +since `mark_circle` does not support the `shape` encoding +and will always show up as a filled circle. + +```{code-cell} ipython3 +can_lang_plot_theme = alt.Chart(can_lang).mark_point(filled=True).encode( + x=alt.X("most_at_home_percent") + .scale(type="log") + .axis(tickCount=7) + .title(["Language spoken most at home", "(percentage of Canadian residents)"]), + y=alt.Y("mother_tongue_percent") + .scale(type="log") + .axis(tickCount=7) + .title("Mother tongue (percentage of Canadian residents)"), + color=alt.Color("category") + .legend(orient="top") + .title("") + .scale(scheme="dark2"), + shape="category" +).configure_axis(titleFontSize=12) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +# Increasing the dimensions makes all the ticks fit in jupyter book (the fit with the default dimensions in jupyterlab) +glue("can_lang_plot_theme", can_lang_plot_theme.properties(height=320, width=420), display=False) +``` + +:::{glue:figure} can_lang_plot_theme +:figwidth: 700px +:name: can_lang_plot_theme + +Scatter plot of percentage of Canadians reporting a language as their mother tongue vs the primary language at home colored by language category with custom colors and shapes. +::: + +The chart above gives a good indication of how the different language categories differ, +and this information is sufficient to answer our research question. +But what if we want to know exactly which language correspond to which point in the chart? +With a regular visualization library this would not be possible, +as adding text labels for each individual language +would add a lot of visual noise and make the chart difficult to interpret. +However, since Altair is an interactive visualization library we can add information on demand +via the `Tooltip` encoding channel, +so that text labels for each point show up once we hover over it with the mouse pointer. +Here we also add the exact values of the variables on the x and y-axis to the tooltip. + +```{code-cell} ipython3 +can_lang_plot_tooltip = alt.Chart(can_lang).mark_point(filled=True).encode( + x=alt.X("most_at_home_percent") + .scale(type="log") + .axis(tickCount=7) + .title(["Language spoken most at home", "(percentage of Canadian residents)"]), + y=alt.Y("mother_tongue_percent") + .scale(type="log") + .axis(tickCount=7) + .title("Mother tongue (percentage of Canadian residents)"), + color=alt.Color("category") + .legend(orient="top") + .title("") + .scale(scheme="dark2"), + shape="category", + tooltip=alt.Tooltip(["language", "mother_tongue", "most_at_home"]) +).configure_axis(titleFontSize=12) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +if "BOOK_BUILD_TYPE" in os.environ and os.environ["BOOK_BUILD_TYPE"] == "PDF": + glue("can_lang_plot_tooltip", Image("img/viz/languages_with_mouse.png"), display=False) +else: + # Increasing the dimensions makes all the ticks fit in jupyter book (the fit with the default dimensions in jupyterlab) + glue("can_lang_plot_tooltip", can_lang_plot_tooltip.properties(height=320, width=420), display=False) +``` + +:::{glue:figure} can_lang_plot_tooltip +:figwidth: 700px +:name: can_lang_plot_tooltip + +Scatter plot of percentage of Canadians reporting a language as their mother tongue vs the primary language at home colored by language category with custom colors and mouse hover tooltip. +::: + +From the visualization in {numref}`can_lang_plot_tooltip`, +we can now clearly see that the vast majority of Canadians reported one of the official languages +as their mother tongue and as the language they speak most often at home. +What do we see when considering the second part of our exploratory question? +Do we see a difference in the relationship +between languages spoken as a mother tongue and as a primary language +at home across the higher-level language categories? +Based on {numref}`can_lang_plot_tooltip`, there does not +appear to be much of a difference. +For each higher-level language category, +there appears to be a strong, positive, and linear relationship between +the percentage of people who speak a language as their mother tongue +and the percentage who speak it as their primary language at home. +The relationship looks similar regardless of the category. + +Does this mean that this relationship is positive for all languages in the +world? And further, can we use this data visualization on its own to predict how many people +have a given language as their mother tongue if we know how many people speak +it as their primary language at home? The answer to both these questions is +"no!" However, with exploratory data analysis, we can create new hypotheses, +ideas, and questions (like the ones at the beginning of this paragraph). +Answering those questions often involves doing more complex analyses, and sometimes +even gathering additional data. We will see more of such complex analyses later on in +this book. + +### Bar plots: the island landmass data set + +```{index} Island landmasses +``` + +The `islands.csv` data set contains a list of Earth's landmasses as well as their area (in thousands of square miles) {cite:p}`islandsdata`. + +```{index} question; visualization +``` + +**Question:** Are the continents (North / South America, Africa, Europe, Asia, Australia, Antarctica) Earth's seven largest landmasses? If so, what are the next few largest landmasses after those? + +To get started, we will read and inspect the data: + +```{code-cell} ipython3 +:tags: ["output_scroll"] +islands_df = pd.read_csv("data/islands.csv") +islands_df +``` + +Here, we have a data frame of Earth's landmasses, +and are trying to compare their sizes. +The right type of visualization to answer this question is a bar plot. +In a bar plot, the height of each bar represents the value of an *amount* +(a size, count, proportion, percentage, etc). +They are particularly useful for comparing counts or proportions across different +groups of a categorical variable. Note, however, that bar plots should generally not be +used to display mean or median values, as they hide important information about +the variation of the data. Instead it's better to show the distribution of +all the individual data points, e.g., using a histogram, which we will discuss further in {numref}`histogramsviz`. + +```{index} altair; mark_bar +``` + +We specify that we would like to use a bar plot +via the `mark_bar` function in `altair`. +The result is shown in {numref}`islands_bar`. + +```{code-cell} ipython3 +islands_bar = alt.Chart(islands_df).mark_bar().encode( + x="landmass", + y="size" +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("islands_bar", islands_bar, display=False) +``` + +:::{glue:figure} islands_bar +:figwidth: 400px +:name: islands_bar + +Bar plot of Earth's landmass sizes. The plot is too wide with the default settings. +::: + +Alright, not bad! The plot in {numref}`islands_bar` is +definitely the right kind of visualization, as we can clearly see and compare +sizes of landmasses. The major issues are that the smaller landmasses' sizes +are hard to distinguish, and the plot is so wide that we can't compare them all! But remember that the +question we asked was only about the largest landmasses; let's make the plot a +little bit clearer by keeping only the largest 12 landmasses. We do this using +the `nlargest` function: the first argument is the number of rows we want and +the second is the name of the column we want to use for comparing which is +largest. Then to help make the landmass labels easier to read +we'll swap the `x` and `y` variables, +so that the labels are on the y-axis and we don't have to tilt our head to read them. + +```{note} +Recall that in {numref}`Chapter %s `, we used `sort_values` followed by `head` to obtain +the ten rows with the largest values of a variable. We could have instead used the `nlargest` function +from `pandas` for this purpose. The `nsmallest` and `nlargest` functions achieve the same goal +as `sort_values` followed by `head`, but are slightly more efficient because they are specialized for this purpose. +In general, it is good to use more specialized functions when they are available! +``` + +```{index} pandas.DataFrame; nlargest; nsmallest +``` + +```{code-cell} ipython3 +islands_top12 = islands_df.nlargest(12, "size") + +islands_bar_top = alt.Chart(islands_top12).mark_bar().encode( + x="size", + y="landmass" +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("islands_bar_top", islands_bar_top, display=True) +``` + +:::{glue:figure} islands_bar_top +:figwidth: 700px +:name: islands_bar_top + +Bar plot of size for Earth's largest 12 landmasses. +::: + + +The plot in {numref}`islands_bar_top` is definitely clearer now, +and allows us to answer our initial questions: +"Are the seven continents Earth's largest landmasses?" +and "Which are the next few largest landmasses?". +However, we could still improve this visualization +by coloring the bars based on whether they correspond to a continent, and +by organizing the bars by landmass size rather than by alphabetical order. +The data for coloring the bars is stored in the `landmass_type` column, so +we set the `color` encoding to `landmass_type`. +To organize the landmasses by their `size` variable, +we will use the altair `sort` function +in the y-encoding of the chart. +Since the `size` variable is encoded in the x channel of the chart, +we specify `sort("x")` on `alt.Y`. +This plots the values on `y` axis +in the ascending order of `x` axis values. +This creates a chart where the largest bar is the closest to the axis line, +which is generally the most visually appealing when sorting bars. +If instead we wanted to sort the values on `y-axis` in descending order of `x-axis`, +we could add a minus sign to reverse the order and specify `sort="-x"`. + +```{index} altair; sort +``` + +To finalize this plot we will customize the axis and legend labels using the `title` method, +and add a title to the chart by specifying the `title` argument of `alt.Chart`. +Plot titles are not always required, especially when it would be redundant with an already-existing +caption or surrounding context (e.g., in a slide presentation with annotations). +But if you decide to include one, a good plot title should provide the take home message +that you want readers to focus on, e.g., "Earth's seven largest landmasses are continents," +or a more general summary of the information displayed, e.g., "Earth's twelve largest landmasses." + +```{code-cell} ipython3 +islands_plot_sorted = alt.Chart( + islands_top12, + title="Earth's seven largest landmasses are continents" +).mark_bar().encode( + x=alt.X("size").title("Size (1000 square mi)"), + y=alt.Y("landmass").sort("x").title("Landmass"), + color=alt.Color("landmass_type").title("Type") +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("islands_plot_sorted", islands_plot_sorted, display=True) +``` + +:::{glue:figure} islands_plot_sorted +:figwidth: 700px +:name: islands_plot_sorted + +Bar plot of size for Earth's largest 12 landmasses, colored by landmass type, with clearer axes and labels. +::: + + +The plot in {numref}`islands_plot_sorted` is now an effective +visualization for answering our original questions. Landmasses are organized by +their size, and continents are colored differently than other landmasses, +making it quite clear that all the seven largest landmasses are continents. + +(histogramsviz)= +### Histograms: the Michelson speed of light data set + +```{index} Michelson speed of light +``` + +The `morley` data set +contains measurements of the speed of light +collected in experiments performed in 1879. +Five experiments were performed, +and in each experiment, 20 runs were performed—meaning that +20 measurements of the speed of light were collected +in each experiment {cite:p}`lightdata`. +Because the speed of light is a very large number +(the true value is 299,792.458 km/sec), the data is coded +to be the measured speed of light minus 299,000. +This coding allows us to focus on the variations in the measurements, which are generally +much smaller than 299,000. +If we used the full large speed measurements, the variations in the measurements +would not be noticeable, making it difficult to study the differences between the experiments. + +```{index} question; visualization +``` + +**Question:** Given what we know now about the speed of +light (299,792.458 kilometres per second), how accurate were each of the experiments? + +First, we read in the data. + +```{code-cell} ipython3 +morley_df = pd.read_csv("data/morley.csv") +morley_df +``` + +```{index} distribution, altair; histogram +``` + +In this experimental data, +Michelson was trying to measure just a single quantitative number +(the speed of light). +The data set contains many measurements of this single quantity. +To tell how accurate the experiments were, +we need to visualize the distribution of the measurements +(i.e., all their possible values and how often each occurs). +We can do this using a *histogram*. +A histogram +helps us visualize how a particular variable is distributed in a data set +by grouping the values into bins, +and then using vertical bars to show how many data points fell in each bin. + +To understand how to create a histogram in `altair`, +let's start by creating a bar chart +just like we did in the previous section. +Note that this time, +we are setting the `y` encoding to `"count()"`. +There is no `"count()"` column-name in `morley_df`; +we use `"count()"` to tell `altair` +that we want to count the number of occurrences of each value in along the x-axis +(which we encoded as the `Speed` column). + +```{code-cell} ipython3 +morley_bars = alt.Chart(morley_df).mark_bar().encode( + x="Speed", + y="count()" +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("morley_bars", morley_bars, display=False) +``` + +:::{glue:figure} morley_bars +:figwidth: 700px +:name: morley_bars + +A bar chart of Michelson's speed of light data. +::: + +The bar chart above gives us an indication of +which values are more common than others, +but because the bars are so thin it's hard to get a sense for the +overall distribution of the data. +We don't really care about how many occurrences there are of each exact `Speed` value, +but rather where most of the `Speed` values fall in general. +To more effectively communicate this information +we can group the x-axis into bins (or "buckets") using the `bin` method +and then count how many `Speed` values fall within each bin. +A bar chart that represent the count of values +for a binned quantitative variable is called a histogram. + +```{code-cell} ipython3 +morley_hist = alt.Chart(morley_df).mark_bar().encode( + x=alt.X("Speed").bin(), + y="count()" +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("morley_hist", morley_hist, display=False) +``` + +:::{glue:figure} morley_hist +:figwidth: 700px +:name: morley_hist + +Histogram of Michelson's speed of light data. +::: + +#### Adding layers to an `altair` chart + +```{index} altair; +; mark_rule +``` + +{numref}`morley_hist` is a great start. +However, +we cannot tell how accurate the measurements are using this visualization +unless we can see the true value. +In order to visualize the true speed of light, +we will add a vertical line with the `mark_rule` function. +To draw a vertical line with `mark_rule`, +we need to specify where on the x-axis the line should be drawn. +We can do this by providing `x=alt.datum(792.458)`, +where the value `792.458` is the true speed of light minus 299,000 +and `alt.datum` tells altair that we have a single datum +(number) that we would like plotted (rather than a column in the data frame). +Similarly, a horizontal line can be plotted using the `y` axis encoding and +the dataframe with one value, which would act as the be the y-intercept. +Note that +*vertical lines* are used to denote quantities on the *horizontal axis*, +while *horizontal lines* are used to denote quantities on the *vertical axis*. + +To fine tune the appearance of this vertical line, +we can change it from a solid to a dashed line with `strokeDash=[5]`, +where `5` indicates the length of each dash. We also +change the thickness of the line by specifying `size=2`. +To add the dashed line on top of the histogram, we +**add** the `mark_rule` chart to the `morley_hist` +using the `+` operator. +Adding features to a plot using the `+` operator is known as *layering* in `altair`. +This is a powerful feature of `altair`; you +can continue to iterate on a single chart, adding and refining +one layer at a time. If you stored your chart as a variable +using the assignment symbol (`=`), you can add to it using the `+` operator. +Below we add a vertical line created using `mark_rule` +to the `morley_hist` we created previously. + +```{note} +Technically we could have left out the data argument +when creating the rule chart +since we're not using any values from the `morley_df` data frame, +but we will need it later when we facet this layered chart, +so we are including it here already. +``` + +```{code-cell} ipython3 +v_line = alt.Chart(morley_df).mark_rule(strokeDash=[5], size=2).encode( + x=alt.datum(792.458) +) + +morley_hist_line = morley_hist + v_line +``` + + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("morley_hist_line", morley_hist_line, display=False) +``` + +:::{glue:figure} morley_hist_line +:figwidth: 700px +:name: morley_hist_line + +Histogram of Michelson's speed of light data with vertical line indicating the true speed of light. +::: + +In {numref}`morley_hist_line`, +we still cannot tell which experiments (denoted by the `Expt` column) +led to which measurements; +perhaps some experiments were more accurate than others. +To fully answer our question, +we need to separate the measurements from each other visually. +We can try to do this using a *colored* histogram, +where counts from different experiments are stacked on top of each other +in different colors. +We can create a histogram colored by the `Expt` variable +by adding it to the `color` argument. + +```{code-cell} ipython3 +morley_hist_colored = alt.Chart(morley_df).mark_bar().encode( + x=alt.X("Speed").bin(), + y="count()", + color="Expt" +) + +morley_hist_colored = morley_hist_colored + v_line + +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("morley_hist_colored", morley_hist_colored, display=True) +``` + +:::{glue:figure} morley_hist_colored +:figwidth: 700px +:name: morley_hist_colored + +Histogram of Michelson's speed of light data colored by experiment. +::: + +```{index} integer +``` + +Alright great, {numref}`morley_hist_colored` looks... wait a second! We are not able to easily distinguish +between the colors of the different Experiments in the histogram! What is going on here? Well, if you +recall from {numref}`Chapter %s `, the *data type* you use for each variable +can influence how Python and `altair` treats it. Here, we indeed have an issue +with the data types in the `morley` data frame. In particular, the `Expt` column +is currently an *integer*---specifically, an `int64` type. But we want to treat it as a +*category*, i.e., there should be one category per type of experiment. +```{code-cell} ipython3 +morley_df.info() +``` + +```{index} nominal, altair; :N +``` + +To fix this issue we can convert the `Expt` variable into a `nominal` +(i.e., categorical) type variable by adding a suffix `:N` +to the `Expt` variable. Adding the `:N` suffix ensures that `altair` +will treat a variable as a categorical variable, and +hence use a discrete color map in visualizations +([read more about data types in the altair documentation](https://altair-viz.github.io/user_guide/encoding.html#encoding-data-types)). +We also add the `stack(False)` method on the `y` encoding so +that the bars are not stacked on top of each other, +but instead share the same baseline. +We try to ensure that the different colors can be seen +despite them sitting in front of each other +by setting the `opacity` argument in `mark_bar` to `0.5` +to make the bars slightly translucent. + +```{code-cell} ipython3 +morley_hist_categorical = alt.Chart(morley_df).mark_bar(opacity=0.5).encode( + x=alt.X("Speed").bin(), + y=alt.Y("count()").stack(False), + color="Expt:N" +) + +morley_hist_categorical = morley_hist_categorical + v_line +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("morley_hist_categorical", morley_hist_categorical, display=True) +``` + +:::{glue:figure} morley_hist_categorical +:figwidth: 700px +:name: morley_hist_categorical + +Histogram of Michelson's speed of light data colored by experiment as a categorical variable. +::: + +Unfortunately, the attempt to separate out the experiment number visually has +created a bit of a mess. All of the colors in {numref}`morley_hist_categorical` are blending together, and although it is +possible to derive *some* insight from this (e.g., experiments 1 and 3 had some +of the most incorrect measurements), it isn't the clearest way to convey our +message and answer the question. Let's try a different strategy of creating +grid of separate histogram plots. + ++++ + +```{index} altair; facet +``` + +We can use the `facet` function to create a chart +that has multiple subplots arranged in a grid. +The argument to `facet` specifies the variable(s) used to split the plot +into subplots (`Expt` in the code below), +and how many columns there should be in the grid. +In this example, we chose to +arrange our plots in a single column (`columns=1`) since this makes it easier for +us to compare the location of the histograms along the `x`-axis +in the different subplots. +We also reduce the height of each chart +so that they all fit in the same view. +Note that we are re-using the chart we created just above, +instead of re-creating the same chart from scratch. +We also explicitly specify that `facet` is a categorical variable +since faceting should only be done with categorical variables. + +```{code-cell} ipython3 +morley_hist_facet = morley_hist_categorical.properties( + height=100 +).facet( + "Expt:N", + columns=1 +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("morley_hist_facet", morley_hist_facet, display=True) +``` + +:::{glue:figure} morley_hist_facet +:figwidth: 700px +:name: morley_hist_facet + +Histogram of Michelson's speed of light data split vertically by experiment. +::: + +The visualization in {numref}`morley_hist_facet` +makes it clear how accurate the different experiments were +with respect to one another. +The most variable measurements came from Experiment 1, +where the measurements ranged from about 650–1050 km/sec. +The least variable measurements came from Experiment 2, +where the measurements ranged from about 750–950 km/sec. +The most different experiments still obtained quite similar overall results! + +```{index} altair; alt.X, altair; alt.Y, altair; configure_axis +``` + +There are three finishing touches to make this visualization even clearer. +First and foremost, we need to add informative axis labels using the `alt.X` +and `alt.Y` function, and increase the font size to make it readable using the +`configure_axis` function. We can also add a title; for a `facet` plot, this is +done by providing the `title` to the facet function. Finally, and perhaps most +subtly, even though it is easy to compare the experiments on this plot to one +another, it is hard to get a sense of just how accurate all the experiments +were overall. For example, how accurate is the value 800 on the plot, relative +to the true speed of light? To answer this question, we'll +transform our data to a relative measure of error rather than an absolute measurement. + +```{code-cell} ipython3 +speed_of_light = 299792.458 +morley_df["RelativeError"] = ( + 100 * (299000 + morley_df["Speed"] - speed_of_light) / speed_of_light +) +morley_df +``` + +```{code-cell} ipython3 +morley_hist_rel = alt.Chart(morley_df).mark_bar().encode( + x=alt.X("RelativeError") + .bin() + .title("Relative Error (%)"), + y=alt.Y("count()").title("# Measurements"), + color=alt.Color("Expt:N").title("Experiment ID") +) + +# Recreating v_line to indicate that the speed of light is at 0% relative error +v_line = alt.Chart(morley_df).mark_rule(strokeDash=[5], size=2).encode( + x=alt.datum(0) +) + +morley_hist_relative = (morley_hist_rel + v_line).properties( + height=100 +).facet( + "Expt:N", + columns=1, + title="Histogram of relative error of Michelson’s speed of light data" +) + +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("morley_hist_relative", morley_hist_relative, display=True) +``` + +:::{glue:figure} morley_hist_relative +:figwidth: 700px +:name: morley_hist_relative + +Histogram of relative error split vertically by experiment with clearer axes and labels +::: + +Wow, impressive! These measurements of the speed of light from 1879 had errors +around *0.05%* of the true speed. {numref}`morley_hist_relative` shows you that +even though experiments 2 and 5 were perhaps the most accurate, all of the +experiments did quite an admirable job given the technology available at the time. + +#### Choosing a binwidth for histograms + +When you create a histogram in `altair`, it tries to choose a reasonable number of bins. +We can change the number of bins by using the `maxbins` parameter +inside the `bin` method. + +```{code-cell} ipython3 +morley_hist_maxbins = alt.Chart(morley_df).mark_bar().encode( + x=alt.X("RelativeError").bin(maxbins=30), + y="count()" +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("morley_hist_maxbins", morley_hist_maxbins, display=False) +``` + +:::{glue:figure} morley_hist_maxbins +:figwidth: 700px +:name: morley_hist_maxbins + +Histogram of Michelson's speed of light data. +::: + + +But what number of bins is the right one to use? +Unfortunately there is no hard rule for what the right bin number +or width is. It depends entirely on your problem; the *right* number of bins +or bin width is +the one that *helps you answer the question* you asked. +Choosing the correct setting for your problem +is something that commonly takes iteration. +It's usually a good idea to try out several `maxbins` to see which one +most clearly captures your data in the context of the question +you want to answer. + +To get a sense for how different bin affect visualizations, +let's experiment with the histogram that we have been working on in this section. +In {numref}`morley_hist_max_bins`, +we compare the default setting with three other histograms where we set the +`maxbins` to 200, 70 and 5. +In this case, we can see that both the default number of bins +and the `maxbins=70` of are effective for helping to answer our question. +On the other hand, the `maxbins=200` and `maxbins=5` are too small and too big, respectively. + +```{code-cell} ipython3 +:tags: ["remove-cell"] +morley_hist_default = alt.Chart(morley_df).mark_bar().encode( + x=alt.X( + "RelativeError", + title="Relative error (%)", + bin=True + ), + y=alt.Y( + "count()", + stack=False, + title="# Measurements" + ), + color=alt.Color( + "Expt:N", + title="Experiment ID", + legend=None + ) +).properties(height=100, width=250) + +morley_hist_max_bins = alt.vconcat( + alt.hconcat( + (morley_hist_default + v_line).facet( + "Expt:N", + columns=1, + title=alt.TitleParams("Default (bin=True)", fontSize=16, anchor="middle", dx=15) + ), + (morley_hist_default.encode( + x=alt.X( + "RelativeError", + bin=alt.Bin(maxbins=5), + title="Relative error (%)" + ) + ) + v_line).facet( + "Expt:N", + columns=1, + title=alt.TitleParams("maxbins=5", fontSize=16, anchor="middle", dx=15) + ), + ), + alt.hconcat( + (morley_hist_default.encode( + x=alt.X( + "RelativeError", + bin=alt.Bin(maxbins=70), + title="Relative error (%)" + ) + ) + v_line).facet( + "Expt:N", + columns=1, + title=alt.TitleParams("maxbins=70", fontSize=16, anchor="middle", dx=15) + ), + (morley_hist_default.encode( + x=alt.X( + "RelativeError", + bin=alt.Bin(maxbins=200), + title="Relative error (%)" + ) + ) + v_line).facet( + "Expt:N", + columns=1, + title=alt.TitleParams("maxbins=200", fontSize=16, anchor="middle", dx=15) + ) + ), + spacing=50 +) +``` + +```{code-cell} ipython3 +:tags: ["remove-cell"] +glue("morley_hist_max_bins", morley_hist_max_bins, display=True) +``` + +:::{glue:figure} morley_hist_max_bins +:figwidth: 700px +:name: morley_hist_max_bins + +Effect of varying number of max bins on histograms. +::: + +## Explaining the visualization +*Tell a story* + +Typically, your visualization will not be shown entirely on its own, but rather +it will be part of a larger presentation. Further, visualizations can provide +supporting information for any aspect of a presentation, from opening to +conclusion. For example, you could use an exploratory visualization in the +opening of the presentation to motivate your choice of a more detailed data +analysis / model, a visualization of the results of your analysis to show what +your analysis has uncovered, or even one at the end of a presentation to help +suggest directions for future work. + +```{index} visualization; explanation +``` + +Regardless of where it appears, a good way to discuss your visualization is as +a story: + +1) Establish the setting and scope, and describe why you did what you did. +2) Pose the question that your visualization answers. Justify why the question is important to answer. +3) Answer the question using your visualization. Make sure you describe *all* aspects of the visualization (including describing the axes). But you + can emphasize different aspects based on what is important to answer your question: + - **trends (lines):** Does a line describe the trend well? If so, the trend is *linear*, and if not, the trend is *nonlinear*. Is the trend increasing, decreasing, or neither? + Is there a periodic oscillation (wiggle) in the trend? Is the trend noisy (does the line "jump around" a lot) or smooth? + - **distributions (scatters, histograms):** How spread out are the data? Where are they centered, roughly? Are there any obvious "clusters" or "subgroups", which would be visible as multiple bumps in the histogram? + - **distributions of two variables (scatters):** Is there a clear / strong relationship between the variables (points fall in a distinct pattern), a weak one (points fall in a pattern but there is some noise), or no discernible + relationship (the data are too noisy to make any conclusion)? + - **amounts (bars):** How large are the bars relative to one another? Are there patterns in different groups of bars? +4) Summarize your findings, and use them to motivate whatever you will discuss next. + +Below are two examples of how one might take these four steps in describing the example visualizations that appeared earlier in this chapter. +Each of the steps is denoted by its numeral in parentheses, e.g. (3). + +```{index} Mauna Loa +``` + +**Mauna Loa Atmospheric CO$_{\text{2}}$ Measurements:** (1) Many +current forms of energy generation and conversion—from automotive +engines to natural gas power plants—rely on burning fossil fuels and produce +greenhouse gases, typically primarily carbon dioxide (CO$_{\text{2}}$), as a +byproduct. Too much of these gases in the Earth's atmosphere will cause it to +trap more heat from the sun, leading to global warming. (2) In order to assess +how quickly the atmospheric concentration of CO$_{\text{2}}$ is increasing over +time, we (3) used a data set from the Mauna Loa observatory in Hawaii, +consisting of CO$_{\text{2}}$ measurements from 1980 to 2020. We plotted the +measured concentration of CO$_{\text{2}}$ (on the vertical axis) over time (on +the horizontal axis). From this plot, you can see a clear, increasing, and +generally linear trend over time. There is also a periodic oscillation that +occurs once per year and aligns with Hawaii's seasons, with an amplitude that +is small relative to the growth in the overall trend. This shows that +atmospheric CO$_{\text{2}}$ is clearly increasing over time, and (4) it is +perhaps worth investigating more into the causes. + +```{index} Michelson speed of light +``` + +**Michelson Light Speed Experiments:** (1) Our +modern understanding of the physics of light has advanced significantly from +the late 1800s when Michelson and Morley's experiments first demonstrated that +it had a finite speed. We now know, based on modern experiments, that it moves at +roughly 299,792.458 kilometers per second. (2) But how accurately were we first +able to measure this fundamental physical constant, and did certain experiments +produce more accurate results than others? (3) To better understand this, we +plotted data from 5 experiments by Michelson in 1879, each with 20 trials, as +histograms stacked on top of one another. The horizontal axis shows the +error of the measurements relative to the true speed of light as we know it +today, expressed as a percentage. From this visualization, you can see that +most results had relative errors of at most 0.05%. You can also see that +experiments 1 and 3 had measurements that were the farthest from the true +value, and experiment 5 tended to provide the most consistently accurate +result. (4) It would be worth further investigating the differences between +these experiments to see why they produced different results. + +## Saving the visualization + +*Choose the right output format for your needs* + +```{index} see: bitmap; raster graphics +``` + +```{index} raster graphics, vector graphics +``` + +Just as there are many ways to store data sets, there are many ways to store +visualizations and images. Which one you choose can depend on several factors, +such as file size/type limitations (e.g., if you are submitting your +visualization as part of a conference paper or to a poster printing shop) and +where it will be displayed (e.g., online, in a paper, on a poster, on a +billboard, in talk slides). Generally speaking, images come in two flavors: +*raster* formats +and *vector* formats. + +```{index} raster graphics; file types +``` + +**Raster** images are represented as a 2-D grid of square pixels, each +with its own color. Raster images are often *compressed* before storing so they +take up less space. A compressed format is *lossy* if the image cannot be +perfectly re-created when loading and displaying, with the hope that the change +is not noticeable. *Lossless* formats, on the other hand, allow a perfect +display of the original image. + +- *Common file types:* + - [JPEG](https://en.wikipedia.org/wiki/JPEG) (`.jpg`, `.jpeg`): lossy, usually used for photographs + - [PNG](https://en.wikipedia.org/wiki/Portable_Network_Graphics) (`.png`): lossless, usually used for plots / line drawings + - [BMP](https://en.wikipedia.org/wiki/BMP_file_format) (`.bmp`): lossless, raw image data, no compression (rarely used) + - [TIFF](https://en.wikipedia.org/wiki/TIFF) (`.tif`, `.tiff`): typically lossless, no compression, used mostly in graphic arts, publishing +- *Open-source software:* [GIMP](https://www.gimp.org/) + +```{index} vector graphics; file types +``` + +**Vector** images are represented as a collection of mathematical +objects (lines, surfaces, shapes, curves). When the computer displays the image, it +redraws all of the elements using their mathematical formulas. + +- *Common file types:* + - [SVG](https://en.wikipedia.org/wiki/Scalable_Vector_Graphics) (`.svg`): general-purpose use + - [EPS](https://en.wikipedia.org/wiki/Encapsulated_PostScript) (`.eps`), general-purpose use (rarely used) +- *Open-source software:* [Inkscape](https://inkscape.org/) + +Raster and vector images have opposing advantages and disadvantages. A raster +image of a fixed width / height takes the same amount of space and time to load +regardless of what the image shows (the one caveat is that the compression algorithms may +shrink the image more or run faster for certain images). A vector image takes +space and time to load corresponding to how complex the image is, since the +computer has to draw all the elements each time it is displayed. For example, +if you have a scatter plot with 1 million points stored as an SVG file, it may +take your computer some time to open the image. On the other hand, you can zoom +into / scale up vector graphics as much as you like without the image looking +bad, while raster images eventually start to look "pixelated." + +```{index} PDF +``` + +```{index} see: portable document dormat; PDF +``` + +```{note} +The portable document format [PDF](https://en.wikipedia.org/wiki/PDF) (`.pdf`) is commonly used to +store *both* raster and vector formats. If you try to open a PDF and it's taking a long time +to load, it may be because there is a complicated vector graphics image that your computer is rendering. +``` + +Let's learn how to save plot images to `.png` and `.svg` file formats using the +`faithful_scatter_labels` scatter plot of the [Old Faithful data set](https://www.stat.cmu.edu/~larry/all-of-statistics/=data/faithful.dat) +{cite:p}`faithfuldata` that we created earlier, shown in {numref}`faithful_scatter_labels`. +To save the plot to a file, we can use the `save` +method. The `save` method takes the path to the filename where you would like to +save the file (e.g., `img/viz/filename.png` to save a file named `filename.png` to the `img/viz/` directory). +The kind of image to save is specified by the file extension. For example, to +create a PNG image file, we specify that the file extension is `.png`. Below +we demonstrate how to save PNG and SVG file types for the +`faithful_scatter_labels` plot. + +```{code-cell} ipython3 +faithful_scatter_labels.save("img/viz/faithful_plot.png") +faithful_scatter_labels.save("img/viz/faithful_plot.svg") +``` + +```{code-cell} ipython3 +:tags: [remove-cell] + +import os +import numpy as np +png_size = np.round(os.path.getsize("img/viz/faithful_plot.png")/(1024*1024), 2) +svg_size = np.round(os.path.getsize("img/viz/faithful_plot.svg")/(1024*1024), 2) + +glue("png_size", "{:.2f}".format(png_size)) +glue("svg_size", "{:.2f}".format(svg_size)) +``` + +```{list-table} File sizes of the scatter plot of the Old Faithful data set when saved as different file formats. +:header-rows: 1 +:name: png-vs-svg-table + +* - Image type + - File type + - Image size +* - Raster + - PNG + - {glue:text}`png_size` MB +* - Vector + - SVG + - {glue:text}`svg_size` MB +``` + +Take a look at the file sizes in {numref}`png-vs-svg-table`. +Wow, that's quite a difference! In this case, the `.png` image is almost 4 times +smaller than the `.svg` image. Since there are a decent number of points in the plot, +the vector graphics format image (`.svg`) is bigger than the raster image (`.png`), which +just stores the image data itself. +In {numref}`png-vs-svg`, we show what +the images look like when we zoom in to a rectangle with only 3 data points. +You can see why vector graphics formats are so useful: because they're just +based on mathematical formulas, vector graphics can be scaled up to arbitrary +sizes. This makes them great for presentation media of all sizes, from papers +to posters to billboards. + +```{figure} img/viz/png-vs-svg.png +--- +height: 400px +name: png-vs-svg +--- +Zoomed in `faithful`, raster (PNG, left) and vector (SVG, right) formats. +``` + +## Exercises + +Practice exercises for the material covered in this chapter +can be found in the accompanying +[worksheets repository](https://worksheets.python.datasciencebook.ca) +in the "Effective data visualization" row. +You can launch an interactive version of the worksheet in your browser by clicking the "launch binder" button. +You can also preview a non-interactive version of the worksheet by clicking "view worksheet." +If you instead decide to download the worksheet and run it on your own machine, +make sure to follow the instructions for computer setup +found in {numref}`Chapter %s `. This will ensure that the automated feedback +and guidance that the worksheets provide will function as intended. + +## Additional resources + +- The [altair documentation](https://altair-viz.github.io/) {cite:p}`altair` is + where you should look if you want to learn more about the functions in this + chapter, the full set of arguments you can use, and other related functions. +- The [*Fundamentals of Data Visualization*](https://clauswilke.com/dataviz/) {cite:p}`wilkeviz` has + a wealth of information on designing effective visualizations. It is not + specific to any particular programming language or library. If you want to + improve your visualization skills, this is the next place to look. +- The [dates and times](https://wesmckinney.com/book/time-series.html) chapter + of [*Python for Data Analysis*](https://wesmckinney.com/book/) {cite:p}`mckinney2012python` + is where you should look if you want to learn about `date` and `time`, including + how to create them, and how to use them to effectively handle durations, etc + +## References + ++++ + +```{bibliography} +:filter: docname in docnames +``` diff --git a/pull313/_sources/wrangling.md b/pull313/_sources/wrangling.md new file mode 100644 index 00000000..f7f94564 --- /dev/null +++ b/pull313/_sources/wrangling.md @@ -0,0 +1,1832 @@ +--- +jupytext: + formats: py:percent,md:myst,ipynb + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.13.5 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(wrangling)= +# Cleaning and wrangling data + +```{code-cell} ipython3 +:tags: [remove-cell] + +from chapter_preamble import * +import pandas as pd +pd.set_option("display.max_rows", 20) +``` + +## Overview + +This chapter is centered around defining tidy data—a data format that is +suitable for analysis—and the tools needed to transform raw data into this +format. This will be presented in the context of a real-world data science +application, providing more practice working through a whole case study. + ++++ + +## Chapter learning objectives + +By the end of the chapter, readers will be able to do the following: + + - Define the term "tidy data". + - Discuss the advantages of storing data in a tidy data format. + - Define what series and data frames are in Python, and describe how they relate to + each other. + - Describe the common types of data in Python and their uses. + - Recall and use the following functions for their + intended data wrangling tasks: + - `agg` + - `assign` (as well as regular column assignment) + - `groupby` + - `melt` + - `pivot` + - `str.split` + - Recall and use the following operators for their + intended data wrangling tasks: + - `==`, `!=`, `<`, `>`, `<=`, `>=` + - `in` + - `and` + - `or` + - `[]` + - `loc[]` + - `iloc[]` + +## Data frames and series + +In {numref}`Chapters %s ` and {numref}`%s `, *data frames* were the focus: +we learned how to import data into Python as a data frame, and perform basic operations on data frames in Python. +In the remainder of this book, this pattern continues. The vast majority of tools we use will require +that data are represented as a `pandas` **data frame** in Python. Therefore, in this section, +we will dig more deeply into what data frames are and how they are represented in Python. +This knowledge will be helpful in effectively utilizing these objects in our data analyses. + ++++ + +### What is a data frame? + +```{index} data frame; definition +``` + +```{index} pandas.DataFrame +``` + +A data frame is a table-like structure for storing data in Python. Data frames are +important to learn about because most data that you will encounter in practice +can be naturally stored as a table. In order to define data frames precisely, +we need to introduce a few technical terms: + +```{index} variable, observation, value +``` + +- **variable:** a characteristic, number, or quantity that can be measured. +- **observation:** all of the measurements for a given entity. +- **value:** a single measurement of a single variable for a given entity. + +Given these definitions, a **data frame** is a tabular data structure in Python +that is designed to store observations, variables, and their values. +Most commonly, each column in a data frame corresponds to a variable, +and each row corresponds to an observation. For example, +{numref}`fig:02-obs` displays a data set of city populations. Here, the variables +are "region, year, population"; each of these are properties that can be +collected or measured. The first observation is "Toronto, 2016, 2235145"; +these are the values that the three variables take for the first entity in the +data set. There are 13 entities in the data set in total, corresponding to the +13 rows in {numref}`fig:02-obs`. + ++++ + +```{figure} img/wrangling/data_frame_slides_cdn.004.jpeg +:name: fig:02-obs +:figclass: figure + +A data frame storing data regarding the population of various regions in Canada. In this example data frame, the row that corresponds to the observation for the city of Vancouver is colored yellow, and the column that corresponds to the population variable is colored blue. +``` + +### What is a series? + +```{index} pandas.Series +``` + +In Python, `pandas` **series** are are objects that can contain one or more elements (like a list). +They are a single column, are ordered, can be indexed, and can contain any data type. +The `pandas` package uses `Series` objects to represent the columns in a data frame. +`Series` can contain a mix of data types, but it is good practice to only include a single type in a series +because all observations of one variable should be the same type. +Python +has several different basic data types, as shown in +{numref}`tab:datatype-table`. +You can create a `pandas` series using the +`pd.Series()` function. For example, to create the series `region` as shown +in {numref}`fig:02-series`, you can write the following. + +```{code-cell} ipython3 +import pandas as pd + +region = pd.Series(["Toronto", "Montreal", "Vancouver", "Calgary", "Ottawa"]) +region +``` + ++++ {"tags": []} + +```{figure} img/wrangling/pandas_dataframe_series.png +:name: fig:02-series +:figclass: figure + +Example of a `pandas` series whose type is string. +``` + + +```{code-cell} ipython3 +:tags: [remove-cell] + +# The following table was taken from DSCI511 Lecture 1, credit to Arman Seyed-Ahmadi, MDS 2021 +``` + +```{index} data types, string, integer, floating point number, boolean, list, set, dictionary, tuple, none +``` + +```{index} see: str; string +``` + +```{index} see: int; integer +``` + +```{index} see: float; floating point number +``` + +```{index} see: bool; boolean +``` + +```{index} see: NoneType; none +``` + +```{index} see: dict; dictionary +``` + +```{table} Basic data types in Python +:name: tab:datatype-table +| Data type | Abbreviation | Description | Example | +| :-------------------- | :----------- | :-------------------------------------------- | :----------------------------------------- | +| integer | `int` | positive/negative/zero whole numbers | `42` | +| floating point number | `float` | real number in decimal form | `3.14159` | +| boolean | `bool` | true or false | `True` | +| string | `str` | text | `"Hello World"` | +| none | `NoneType` | represents no value | `None` | +``` + ++++ + +It is important in Python to make sure you represent your data with the correct type. +Many of the `pandas` functions we use in this book treat +the various data types differently. You should use `int` and `float` types +to represent numbers and perform arithmetic. The `int` type is for integers that have no decimal point, +while the `float` type is for numbers that have a decimal point. +The `bool` type are boolean variables that can only take on one of two values: `True` or `False`. +The `string` type is used to represent data that should +be thought of as "text", such as words, names, paths, URLs, and more. +A `NoneType` is a special type in Python that is used to indicate no value; this can occur, +for example, when you have missing data. +There are other basic data types in Python, but we will generally +not use these in this textbook. + + +### What does this have to do with data frames? + ++++ + +```{index} data frame; definition +``` + +A data frame is really just a collection of series that are stuck together, +where each series corresponds to one column and all must have the same length. +But not all columns in a data frame need to be of the same type. +{numref}`fig:02-dataframe` shows a data frame where +the columns are series of different types. But each element *within* +one column should usually be the same type, since the values for a single variable +are usually all of the same type. For example, if the variable is the name of a city, +that name should be a string, whereas if the variable is a year, that should be an +integer. So even though series let you put different types in them, it is most common +(and good practice!) to have just one type per column. + ++++ {"tags": []} + +```{figure} img/wrangling/pandas_dataframe_series-3.png +:name: fig:02-dataframe +:figclass: figure + +Data frame and series types. +``` + + +```{index} type +``` + +```{note} +You can use the function `type` on a data object. +For example we can check the class of the Canadian languages data set, +`can_lang`, we worked with in the previous chapters and we see it is a `pandas.core.frame.DataFrame`. +``` + + +```{code-cell} ipython3 +can_lang = pd.read_csv("data/can_lang.csv") +type(can_lang) +``` + +### Data structures in Python + +The `Series` and `DataFrame` types are *data structures* in Python, which +are core to most data analyses. +The functions from `pandas` that we use often give us back a `DataFrame` +or a `Series` depending on the operation. Because +`Series` are essentially simple `DataFrames`, we will refer +to both `DataFrames` and `Series` as "data frames" in the text. +There are other types that represent data structures in Python. +We summarize the most common ones in {numref}`tab:datastruc-table`. + +```{table} Basic data structures in Python +:name: tab:datastruc-table +| Data Structure | Description | +| --- | ----------- | +| list | An ordered collection of values that can store multiple data types at once. | +| dict | A labeled data structure where `keys` are paired with `values` | +| Series | An ordered collection of values *with labels* that can store multiple data types at once. | +| DataFrame | A labeled data structure with `Series` columns of potentially different types. | +``` + +A `list` is an ordered collection of values. To create a list, we put the contents of the list in between +square brackets `[]`, where each item of the list is separated by a comma. A `list` can contain values +of different types. The example below contains six `str` entries. + +```{code-cell} ipython3 +cities = ["Toronto", "Vancouver", "Montreal", "Calgary", "Ottawa", "Winnipeg"] +cities +``` +A list can directly be converted to a pandas `Series`. +```{code-cell} ipython3 +cities_series = pd.Series(cities) +cities_series +``` + +A `dict`, or dictionary, contains pairs of "keys" and "values." +You use a key to look up its corresponding value. Dictionaries are created +using curly brackets `{}`. Each entry starts with the +key on the left, followed by a colon symbol `:`, and then the value. +A dictionary can have multiple key-value pairs, each separted by a comma. +Keys can take a wide variety of types (`int` and `str` are commonly used), and values can take any type; +the key-value pairs in a dictionary can all be of different types, too. + In the example below, +we create a dictionary that has two keys: `"cities"` and `"population"`. +The values associated with each are lists. + +```{code-cell} ipython3 +population_in_2016 = { + "cities": ["Toronto", "Vancouver", "Montreal", "Calgary", "Ottawa", "Winnipeg"], + "population": [2235145, 1027613, 1823281, 544870, 571146, 321484] +} +population_in_2016 +``` + +A dictionary can be converted to a data frame. Keys +become the column names, and the values become the entries in +those columns. Dictionaries on their own are quite simple objects; it is preferable to work with a data frame +because then we have access to the built-in functionality in +`pandas` (e.g. `loc[]`, `[]`, and many functions that we will discuss in the upcoming sections)! + +```{code-cell} ipython3 +population_in_2016_df = pd.DataFrame(population_in_2016) +population_in_2016_df +``` + +Of course, there is no need to name the dictionary separately before passing it to +`pd.DataFrame`; we can instead construct the dictionary right inside the call. +This is often the most convenient way to create a new data frame. + +```{code-cell} ipython3 +population_in_2016_df = pd.DataFrame({ + "cities": ["Toronto", "Vancouver", "Montreal", "Calgary", "Ottawa", "Winnipeg"], + "population": [2235145, 1027613, 1823281, 544870, 571146, 321484] +}) +population_in_2016_df +``` + ++++ + +## Tidy data + +```{index} tidy data; definition +``` + +There are many ways a tabular data set can be organized. The data frames we +have looked at so far have all been using the **tidy data** format of +organization. This chapter will focus on introducing the tidy data format and +how to make your raw (and likely messy) data tidy. A tidy data frame satisfies +the following three criteria {cite:p}`wickham2014tidy`: + + - each row is a single observation, + - each column is a single variable, and + - each value is a single cell (i.e., its entry in the data + frame is not shared with another value). + +{numref}`fig:02-tidy-image` demonstrates a tidy data set that satisfies these +three criteria. + ++++ {"tags": []} + +```{figure} img/wrangling/tidy_data.001-cropped.jpeg +:name: fig:02-tidy-image +:figclass: figure + +Tidy data satisfies three criteria. +``` + ++++ + +```{index} tidy data; arguments for +``` + +There are many good reasons for making sure your data are tidy as a first step in your analysis. +The most important is that it is a single, consistent format that nearly every function +in the `pandas` recognizes. No matter what the variables and observations +in your data represent, as long as the data frame +is tidy, you can manipulate it, plot it, and analyze it using the same tools. +If your data is *not* tidy, you will have to write special bespoke code +in your analysis that will not only be error-prone, but hard for others to understand. +Beyond making your analysis more accessible to others and less error-prone, tidy data +is also typically easy for humans to interpret. Given these benefits, +it is well worth spending the time to get your data into a tidy format +upfront. Fortunately, there are many well-designed `pandas` data +cleaning/wrangling tools to help you easily tidy your data. Let's explore them +below! + +```{note} +Is there only one shape for tidy data for a given data set? Not +necessarily! It depends on the statistical question you are asking and what +the variables are for that question. For tidy data, each variable should be +its own column. So, just as it's essential to match your statistical question +with the appropriate data analysis tool, it's important to match your +statistical question with the appropriate variables and ensure they are +represented as individual columns to make the data tidy. +``` + ++++ + +### Tidying up: going from wide to long using `melt` + +```{index} pandas.DataFrame; melt +``` + +One task that is commonly performed to get data into a tidy format +is to combine values that are stored in separate columns, +but are really part of the same variable, into one. +Data is often stored this way +because this format is sometimes more intuitive for human readability +and understanding, and humans create data sets. +In {numref}`fig:02-wide-to-long`, +the table on the left is in an untidy, "wide" format because the year values +(2006, 2011, 2016) are stored as column names. +And as a consequence, +the values for population for the various cities +over these years are also split across several columns. + +For humans, this table is easy to read, which is why you will often find data +stored in this wide format. However, this format is difficult to work with +when performing data visualization or statistical analysis using Python. For +example, if we wanted to find the latest year it would be challenging because +the year values are stored as column names instead of as values in a single +column. So before we could apply a function to find the latest year (for +example, by using `max`), we would have to first extract the column names +to get them as a list and then apply a function to extract the latest year. +The problem only gets worse if you would like to find the value for the +population for a given region for the latest year. Both of these tasks are +greatly simplified once the data is tidied. + +Another problem with data in this format is that we don't know what the +numbers under each year actually represent. Do those numbers represent +population size? Land area? It's not clear. +To solve both of these problems, +we can reshape this data set to a tidy data format +by creating a column called "year" and a column called +"population." This transformation—which makes the data +"longer"—is shown as the right table in +{numref}`fig:02-wide-to-long`. Note that the number of entries in our data frame +can change in this transformation. The "untidy" data has 5 rows and 3 columns for +a total of 15 entries, whereas the "tidy" data on the right has 15 rows and 2 columns +for a total of 30 entries. + ++++ {"tags": []} + +```{figure} img/wrangling/pivot_functions.001.jpeg +:name: fig:02-wide-to-long +:figclass: figure + + + +Melting data from a wide to long data format. +``` + ++++ + +```{index} Canadian languages +``` + +We can achieve this effect in Python using the `melt` function from the `pandas` package. +The `melt` function combines columns, +and is usually used during tidying data +when we need to make the data frame longer and narrower. +To learn how to use `melt`, we will work through an example with the +`region_lang_top5_cities_wide.csv` data set. This data set contains the +counts of how many Canadians cited each language as their mother tongue for five +major Canadian cities (Toronto, Montréal, Vancouver, Calgary and Edmonton) from +the 2016 Canadian census. +To get started, +we will use `pd.read_csv` to load the (untidy) data. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +lang_wide = pd.read_csv("data/region_lang_top5_cities_wide.csv") +lang_wide +``` + +What is wrong with the untidy format above? +The table on the left in {numref}`fig:img-pivot-longer-with-table` +represents the data in the "wide" (messy) format. +From a data analysis perspective, this format is not ideal because the values of +the variable *region* (Toronto, Montréal, Vancouver, Calgary and Edmonton) +are stored as column names. Thus they +are not easily accessible to the data analysis functions we will apply +to our data set. Additionally, the *mother tongue* variable values are +spread across multiple columns, which will prevent us from doing any desired +visualization or statistical tasks until we combine them into one column. For +instance, suppose we want to know the languages with the highest number of +Canadians reporting it as their mother tongue among all five regions. This +question would be tough to answer with the data in its current format. +We *could* find the answer with the data in this format, +though it would be much easier to answer if we tidy our +data first. If mother tongue were instead stored as one column, +as shown in the tidy data on the right in +{numref}`fig:img-pivot-longer-with-table`, +we could simply use one line of code (`df["mother_tongue"].max()`) +to get the maximum value. + ++++ {"tags": []} + +```{figure} img/wrangling/pandas_melt_wide-long.png +:name: fig:img-pivot-longer-with-table +:figclass: figure + +Going from wide to long with the `melt` function. +``` + ++++ + +{numref}`fig:img-pivot-longer` details the arguments that we need to specify +in the `melt` function to accomplish this data transformation. + ++++ {"tags": []} + +```{figure} img/wrangling/pandas_melt_args_labels.png +:name: fig:img-pivot-longer +:figclass: figure + +Syntax for the `melt` function. +``` + ++++ + +```{index} column range +``` + +```{index} see: :; column range +``` + +We use `melt` to combine the Toronto, Montréal, +Vancouver, Calgary, and Edmonton columns into a single column called `region`, +and create a column called `mother_tongue` that contains the count of how many +Canadians report each language as their mother tongue for each metropolitan +area + +```{code-cell} ipython3 +:tags: ["output_scroll"] +lang_mother_tidy = lang_wide.melt( + id_vars=["category", "language"], + var_name="region", + value_name="mother_tongue", +) +lang_mother_tidy +``` + +```{note} +In the code above, the call to the +`melt` function is split across several lines. Recall from +{numref}`Chapter %s ` that this is allowed in +certain cases. For example, when calling a function as above, the input +arguments are between parentheses `()` and Python knows to keep reading on +the next line. Each line ends with a comma `,` making it easier to read. +Splitting long lines like this across multiple lines is encouraged +as it helps significantly with code readability. Generally speaking, you should +limit each line of code to about 80 characters. +``` + +The data above is now tidy because all three criteria for tidy data have now +been met: + +1. All the variables (`category`, `language`, `region` and `mother_tongue`) are + now their own columns in the data frame. +2. Each observation, i.e., each `category`, `language`, `region`, and count of + Canadians where that language is the mother tongue, are in a single row. +3. Each value is a single cell, i.e., its row, column position in the data + frame is not shared with another value. + ++++ + +(pivot-wider)= +### Tidying up: going from long to wide using `pivot` + +```{index} pandas.DataFrame; pivot +``` + +Suppose we have observations spread across multiple rows rather than in a single +row. For example, in {numref}`fig:long-to-wide`, the table on the left is in an +untidy, long format because the `count` column contains three variables +(population, commuter, and incorporated count) and information about each observation +(here, population, commuter, and incorporated counts for a region) is split across three rows. +Remember: one of the criteria for tidy data +is that each observation must be in a single row. + +Using data in this format—where two or more variables are mixed together +in a single column—makes it harder to apply many usual `pandas` functions. +For example, finding the maximum number of commuters +would require an additional step of filtering for the commuter values +before the maximum can be computed. +In comparison, if the data were tidy, +all we would have to do is compute the maximum value for the commuter column. +To reshape this untidy data set to a tidy (and in this case, wider) format, +we need to create columns called "population", "commuters", and "incorporated." +This is illustrated in the right table of {numref}`fig:long-to-wide`. + ++++ {"tags": []} + +```{figure} img/wrangling/pivot_functions.002.jpeg +:name: fig:long-to-wide +:figclass: figure + +Going from long to wide data. +``` + ++++ + +To tidy this type of data in Python, we can use the `pivot` function. +The `pivot` function generally increases the number of columns (widens) +and decreases the number of rows in a data set. +To learn how to use `pivot`, +we will work through an example +with the `region_lang_top5_cities_long.csv` data set. +This data set contains the number of Canadians reporting +the primary language at home and work for five +major cities (Toronto, Montréal, Vancouver, Calgary and Edmonton). + +```{code-cell} ipython3 +:tags: ["output_scroll"] +lang_long = pd.read_csv("data/region_lang_top5_cities_long.csv") +lang_long +``` + +What makes the data set shown above untidy? +In this example, each observation is a language in a region. +However, each observation is split across multiple rows: +one where the count for `most_at_home` is recorded, +and the other where the count for `most_at_work` is recorded. +Suppose the goal with this data was to +visualize the relationship between the number of +Canadians reporting their primary language at home and work. +Doing that would be difficult with this data in its current form, +since these two variables are stored in the same column. +{numref}`fig:img-pivot-wider-table` shows how this data +will be tidied using the `pivot` function. + ++++ {"tags": []} + +```{figure} img/wrangling/pandas_pivot_long-wide.png +:name: fig:img-pivot-wider-table +:figclass: figure + +Going from long to wide with the `pivot` function. +``` + ++++ + +{numref}`fig:img-pivot-wider` details the arguments that we need to specify in the `pivot` function. + ++++ {"tags": []} + +```{figure} img/wrangling/pandas_pivot_args_labels.png +:name: fig:img-pivot-wider +:figclass: figure + +Syntax for the `pivot` function. +``` + ++++ + +We will apply the function as detailed in {numref}`fig:img-pivot-wider`. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +lang_home_tidy = lang_long.pivot( + index=["region", "category", "language"], + columns=["type"], + values=["count"] +).reset_index() + +lang_home_tidy.columns = [ + "region", + "category", + "language", + "most_at_home", + "most_at_work", +] +lang_home_tidy +``` + +In the first step, note that we added a call to `reset_index`. When `pivot` is called with +multiple column names passed to the `index`, those entries become the "name" of each row that +would be used when you filter rows with `[]` or `loc` rather than just simple numbers. This +can be confusing... What `reset_index` does is sets us back with the usual expected behaviour +where each row is "named" with an integer. This is a subtle point, but the main take-away is that +when you call `pivot`, it is a good idea to call `reset_index` afterwards. + +The second operation we applied is to rename the columns. When we perform the `pivot` +operation, it keeps the original column name `"count"` and adds the `"type"` as a second column name. +Having two names for a column can be confusing! So we rename giving each column only one name. + +We can print out some useful information about our data frame using the `info` function. +In the first row it tells us the `type` of `lang_home_tidy` (it is a `pandas` `DataFrame`). The second +row tells us how many rows there are: 1070, and to index those rows, you can use numbers between +0 and 1069 (remember that Python starts counting at 0!). Next, there is a print out about the data +colums. Here there are 5 columns total. The little table it prints out tells you the name of each +column, the number of non-null values (e.g. the number of entries that are not missing values), and +the type of the entries. Finally the last two rows summarize the types of each column and how much +memory the data frame is using on your computer. +```{code-cell} ipython3 +lang_home_tidy.info() +``` + +The data is now tidy! We can go through the three criteria again to check +that this data is a tidy data set. + +1. All the statistical variables are their own columns in the data frame (i.e., + `most_at_home`, and `most_at_work` have been separated into their own + columns in the data frame). +2. Each observation, (i.e., each language in a region) is in a single row. +3. Each value is a single cell (i.e., its row, column position in the data + frame is not shared with another value). + +You might notice that we have the same number of columns in the tidy data set as +we did in the messy one. Therefore `pivot` didn't really "widen" the data. +This is just because the original `type` column only had +two categories in it. If it had more than two, `pivot` would have created +more columns, and we would see the data set "widen." + + ++++ + +(str-split)= +### Tidying up: using `str.split` to deal with multiple delimiters + +```{index} pandas.Series; str.split, delimiter +``` + +Data are also not considered tidy when multiple values are stored in the same +cell. The data set we show below is even messier than the ones we dealt with +above: the `Toronto`, `Montréal`, `Vancouver`, `Calgary` and `Edmonton` columns +contain the number of Canadians reporting their primary language at home and +work in one column separated by the delimiter (`/`). The column names are the +values of a variable, *and* each value does not have its own cell! To turn this +messy data into tidy data, we'll have to fix these issues. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +lang_messy = pd.read_csv("data/region_lang_top5_cities_messy.csv") +lang_messy +``` + +First we’ll use `melt` to create two columns, `region` and `value`, +similar to what we did previously. +The new `region` columns will contain the region names, +and the new column `value` will be a temporary holding place for the +data that we need to further separate, i.e., the +number of Canadians reporting their primary language at home and work. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +lang_messy_longer = lang_messy.melt( + id_vars=["category", "language"], + var_name="region", + value_name="value", +) + +lang_messy_longer +``` + +Next we'll split the `value` column into two columns. +In basic Python, if we wanted to split the string `"50/0"` into two numbers `["50", "0"]` +we would use the `split` method on the string, and specify that the split should be made +on the slash character `"/"`. +```{code-cell} ipython3 +"50/0".split("/") +``` + +The `pandas` package provides similar functions that we can access +by using the `str` method. So to split all of the entries for an entire +column in a data frame, we will use the `str.split` method. +The output of this method is a data frame with two columns: +one containing only the counts of Canadians +that speak each language most at home, +and the other containing only the counts of Canadians +that speak each language most at work for each region. +We then drop the no-longer-needed `value` column from the `lang_messy_longer` +data frame, and assign the two columns from `str.split` to two new columns. +{numref}`fig:img-separate` +outlines what we need to specify to use `str.split`. + ++++ {"tags": []} + +```{figure} img/wrangling/str-split_args_labels.png +:name: fig:img-separate +:figclass: figure + +Syntax for the `str.split` function. +``` + +```{code-cell} ipython3 +tidy_lang = lang_messy_longer.drop(columns=["value"]) +tidy_lang[["most_at_home", "most_at_work"]] = lang_messy_longer["value"].str.split("/", expand=True) +tidy_lang +``` + +Is this data set now tidy? If we recall the three criteria for tidy data: + + - each row is a single observation, + - each column is a single variable, and + - each value is a single cell. + +We can see that this data now satisfies all three criteria, making it easier to +analyze. But we aren't done yet! Although we can't see it in the data frame above, all of the variables are actually +`object` data types. We can check this using the `info` method. +```{code-cell} ipython3 +tidy_lang.info() +``` + +Object columns in `pandas` data frames are columns of strings or columns with +mixed types. In the previous example in {numref}`pivot-wider`, the +`most_at_home` and `most_at_work` variables were `int64` (integer), which is a type of numeric data. +This change is due to the delimiter (`/`) when we read in this messy data set. +Python read these columns in as string types, and by default, `str.split` will +return columns with the `object` data type. + +It makes sense for `region`, `category`, and `language` to be stored as an +`object` type since they hold categorical values. However, suppose we want to apply any functions that treat the +`most_at_home` and `most_at_work` columns as a number (e.g., finding rows +above a numeric threshold of a column). +That won't be possible if the variable is stored as an `object`. +Fortunately, the `astype` method from `pandas` provides a natural way to fix problems +like this: it will convert the column to a selected data type. In this case, we choose the `int` +data type to indicate that these variables contain integer counts. Note that below +we *assign* the new numerical series to the `most_at_home` and `most_at_work` columns +in `tidy_lang`; we have seen this syntax before in {numref}`ch1-adding-modifying`, +and we will discuss it in more depth later in this chapter in {numref}`pandas-assign`. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +tidy_lang["most_at_home"] = tidy_lang["most_at_home"].astype("int") +tidy_lang["most_at_work"] = tidy_lang["most_at_work"].astype("int") +tidy_lang +``` + +```{code-cell} ipython3 +tidy_lang.info() +``` + +Now we see `most_at_home` and `most_at_work` columns are of `int64` data types, +indicating they are integer data types (i.e., numbers)! + ++++ + +## Using `[]` to extract rows or columns + +Now that the `tidy_lang` data is indeed *tidy*, we can start manipulating it +using the powerful suite of functions from the `pandas`. +We will first revisit the `[]` from {numref}`Chapter %s `, +which lets us obtain a subset of either the rows **or** the columns of a data frame. +This section will highlight more advanced usage of the `[]` function, +including an in-depth treatment of the variety of logical statements +one can use in the `[]` to select subsets of rows. + ++++ + +### Extracting columns by name + +Recall that if we provide a list of column names, `[]` returns the subset of columns with those names as a data frame. +Suppose we wanted to select the columns `language`, `region`, +`most_at_home` and `most_at_work` from the `tidy_lang` data set. Using what we +learned in {numref}`Chapter %s `, we can pass all of these column +names into the square brackets. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +tidy_lang[["language", "region", "most_at_home", "most_at_work"]] +``` + +Likewise, +if we pass a list containing a single column name, +a data frame with this column will be returned. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +tidy_lang[["language"]] +``` + +When we need to extract only a single column, +we can also pass the column name as a string rather than a list. +The returned data type will now be a series. +Throughout this textbook, +we will mostly extract single columns this way, +but we will point out a few occasions +where it is advantageous to extract single columns as data frames. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +tidy_lang["language"] +``` + + +### Extracting rows that have a certain value with `==` +Suppose we are only interested in the subset of rows in `tidy_lang` corresponding to the +official languages of Canada (English and French). +We can extract these rows by using the *equivalency operator* (`==`) +to compare the values of the `category` column +with the value `"Official languages"`. +With these arguments, `[]` returns a data frame with all the columns +of the input data frame +but only the rows we asked for in the logical statement, i.e., +those where the `category` column holds the value `"Official languages"`. +We name this data frame `official_langs`. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +official_langs = tidy_lang[tidy_lang["category"] == "Official languages"] +official_langs +``` + +### Extracting rows that do not have a certain value with `!=` + +What if we want all the other language categories in the data set *except* for +those in the `"Official languages"` category? We can accomplish this with the `!=` +operator, which means "not equal to". So if we want to find all the rows +where the `category` does *not* equal `"Official languages"` we write the code +below. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +tidy_lang[tidy_lang["category"] != "Official languages"] +``` + +(filter-and)= +### Extracting rows satisfying multiple conditions using `&` + +Suppose now we want to look at only the rows +for the French language in Montréal. +To do this, we need to filter the data set +to find rows that satisfy multiple conditions simultaneously. +We can do this with the ampersand symbol (`&`), which +is interpreted by Python as "and". +We write the code as shown below to filter the `official_langs` data frame +to subset the rows where `region == "Montréal"` +*and* `language == "French"`. + +```{code-cell} ipython3 +tidy_lang[ + (tidy_lang["region"] == "Montréal") & + (tidy_lang["language"] == "French") +] +``` + ++++ {"tags": []} + +### Extracting rows satisfying at least one condition using `|` + +Suppose we were interested in only those rows corresponding to cities in Alberta +in the `official_langs` data set (Edmonton and Calgary). +We can't use `&` as we did above because `region` +cannot be both Edmonton *and* Calgary simultaneously. +Instead, we can use the vertical pipe (`|`) logical operator, +which gives us the cases where one condition *or* +another condition *or* both are satisfied. +In the code below, we ask Python to return the rows +where the `region` columns are equal to "Calgary" *or* "Edmonton". + +```{code-cell} ipython3 +official_langs[ + (official_langs["region"] == "Calgary") | + (official_langs["region"] == "Edmonton") +] +``` + +### Extracting rows with values in a list using `isin` + +Next, suppose we want to see the populations of our five cities. +Let's read in the `region_data.csv` file +that comes from the 2016 Canadian census, +as it contains statistics for number of households, land area, population +and number of dwellings for different regions. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +region_data = pd.read_csv("data/region_data.csv") +region_data +``` + +To get the population of the five cities +we can filter the data set using the `isin` method. +The `isin` method is used to see if an element belongs to a list. +Here we are filtering for rows where the value in the `region` column +matches any of the five cities we are intersted in: Toronto, Montréal, +Vancouver, Calgary, and Edmonton. + +```{code-cell} ipython3 +city_names = ["Toronto", "Montréal", "Vancouver", "Calgary", "Edmonton"] +five_cities = region_data[region_data["region"].isin(city_names)] +five_cities +``` + +```{note} +What's the difference between `==` and `isin`? Suppose we have two +Series, `seriesA` and `seriesB`. If you type `seriesA == seriesB` into Python it +will compare the series element by element. Python checks if the first element of +`seriesA` equals the first element of `seriesB`, the second element of +`seriesA` equals the second element of `seriesB`, and so on. On the other hand, +`seriesA.isin(seriesB)` compares the first element of `seriesA` to all the +elements in `seriesB`. Then the second element of `seriesA` is compared +to all the elements in `seriesB`, and so on. Notice the difference between `==` and +`isin` in the example below. +``` + +```{code-cell} ipython3 +pd.Series(["Vancouver", "Toronto"]) == pd.Series(["Toronto", "Vancouver"]) +``` + +```{code-cell} ipython3 +pd.Series(["Vancouver", "Toronto"]).isin(pd.Series(["Toronto", "Vancouver"])) +``` + +### Extracting rows above or below a threshold using `>` and `<` + +```{code-cell} ipython3 +:tags: [remove-cell] + +glue("census_popn", "{0:,.0f}".format(35151728)) +glue("most_french", "{0:,.0f}".format(2669195)) +``` + +We saw in {numref}`filter-and` that +{glue:text}`most_french` people reported +speaking French in Montréal as their primary language at home. +If we are interested in finding the official languages in regions +with higher numbers of people who speak it as their primary language at home +compared to French in Montréal, then we can use `[]` to obtain rows +where the value of `most_at_home` is greater than +{glue:text}`most_french`. We use the `>` symbol to look for values *above* a threshold, +and the `<` symbol to look for values *below* a threshold. The `>=` and `<=` +symbols similarly look for *equal to or above* a threshold and *equal to or below* a threshold. + +```{code-cell} ipython3 +official_langs[official_langs["most_at_home"] > 2669195] +``` + +This operation returns a data frame with only one row, indicating that when +considering the official languages, +only English in Toronto is reported by more people +as their primary language at home +than French in Montréal according to the 2016 Canadian census. + +### Extracting rows using `query` + +You can also extract rows above, below, equal or not-equal to a threshold using the +`query` method. For example the following gives us the same result as when we used +`official_langs[official_langs["most_at_home"] > 2669195]`. + +```{code-cell} ipython3 +official_langs.query("most_at_home > 2669195") +``` + +The query (criteria we are using to select values) is input as a string. The `query` method +is less often used than the earlier approaches we introduced, but it can come in handy +to make long chains of filtering operations a bit easier to read. + +(loc-iloc)= +## Using `loc[]` to filter rows and select columns + +```{index} pandas.DataFrame; loc[] +``` + +The `[]` operation is only used when you want to either filter rows **or** select columns; +it cannot be used to do both operations at the same time. This is where `loc[]` +comes in. For the first example, recall `loc[]` from {numref}`Chapter %s `, +which lets us create a subset of the rows and columns in the `tidy_lang` data frame. +In the first argument to `loc[]`, we specify a logical statement that +filters the rows to only those pertaining to the Toronto region, +and the second argument specifies a list of columns to keep by name. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +tidy_lang.loc[ + tidy_lang["region"] == "Toronto", + ["language", "region", "most_at_home", "most_at_work"] +] +``` + +In addition to simultaneous subsetting of rows and columns, `loc[]` has two +more special capabilities beyond those of `[]`. First, `loc[]` has the ability to specify *ranges* of rows and columns. +For example, note that the list of columns `language`, `region`, `most_at_home`, `most_at_work` +corresponds to the *range* of columns from `language` to `most_at_work`. +Rather than explicitly listing all of the column names as we did above, +we can ask for the range of columns `"language":"most_at_work"`; the `:`-syntax +denotes a range, and is supported by the `loc[]` function, but not by `[]`. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +tidy_lang.loc[ + tidy_lang["region"] == "Toronto", + "language":"most_at_work" +] +``` + +We can pass `:` by itself—without anything before or after—to denote that we want to retrieve +everything. For example, to obtain a subset of all rows and only those columns ranging from `language` to `most_at_work`, +we could use the following expression. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +tidy_lang.loc[:, "language":"most_at_work"] +``` + +We can also omit the beginning or end of the `:` range expression to denote +that we want "everything up to" or "everything after" an element. For example, +if we want all of the columns including and after `language`, we can write the expression: + +```{code-cell} ipython3 +:tags: ["output_scroll"] +tidy_lang.loc[:, "language":] +``` +By not putting anything after the `:`, Python reads this as "from `language` until the last column". +Similarly, we can specify that we want everything up to and including `language` by writing +the expression: + +```{code-cell} ipython3 +:tags: ["output_scroll"] +tidy_lang.loc[:, :"language"] +``` + +By not putting anything before the `:`, Python reads this as "from the first column until `language`." +Although the notation for selecting a range using `:` is convenient because less code is required, +it must be used carefully. If you were to re-order columns or add a column to the data frame, the +output would change. Using a list is more explicit and less prone to potential confusion, but sometimes +involves a lot more typing. + +The second special capability of `.loc[]` over `[]` is that it enables *selecting columns* using +logical statements. The `[]` operator can only use logical statements to filter rows; `.loc[]` can do both! +For example, let's say we wanted only to select the +columns `most_at_home` and `most_at_work`. We could then use the `.str.startswith` method +to choose only the columns that start with the word "most". +The `str.startswith` expression returns a list of `True` or `False` values +corresponding to the column names that start with the desired characters. + +```{code-cell} ipython3 +tidy_lang.loc[:, tidy_lang.columns.str.startswith("most")] +``` + +```{index} pandas.Series; str.contains +``` + +We could also have chosen the columns containing an underscore `_` by using the +`.str.contains("_")`, since we notice +the columns we want contain underscores and the others don't. + +```{code-cell} ipython3 +tidy_lang.loc[:, tidy_lang.columns.str.contains("_")] +``` + +## Using `iloc[]` to extract rows and columns by position +```{index} pandas.DataFrame; iloc[], column range +``` +Another approach for selecting rows and columns is to use `iloc[]`, +which provides the ability to index with the position rather than the label of the columns. +For example, the column labels of the `tidy_lang` data frame are +`["category", "language", "region", "most_at_home", "most_at_work"]`. +Using `iloc[]`, you can ask for the `language` column by requesting the +column at index `1` (remember that Python starts counting at `0`, so the second item `"language"` +has index `1`!). + +```{code-cell} ipython3 +tidy_lang.iloc[:, 1] +``` + +You can also ask for multiple columns. +We pass `1:` after the comma +indicating we want columns after and including index 1 (*i.e.* `language`). + +```{code-cell} ipython3 +tidy_lang.iloc[:, 1:] +``` + +We can also use `iloc[]` to select ranges of rows, or simultaneously select ranges of rows and columns, using a similar syntax. +For example, to select the first five rows and columns after and including index 1, we could use the following: + +```{code-cell} ipython3 +tidy_lang.iloc[:5, 1:] +``` + +Note that the `iloc[]` method is not commonly used, and must be used with care. +For example, it is easy to +accidentally put in the wrong integer index! If you did not correctly remember +that the `language` column was index `1`, and used `2` instead, your code +might end up having a bug that is quite hard to track down. + +```{index} pandas.Series; str.startswith +``` + ++++ {"tags": []} + +## Aggregating data + ++++ + +### Calculating summary statistics on individual columns + +```{index} summarize +``` + +As a part of many data analyses, we need to calculate a summary value for the +data (a *summary statistic*). +Examples of summary statistics we might want to calculate +are the number of observations, the average/mean value for a column, +the minimum value, etc. +Oftentimes, +this summary statistic is calculated from the values in a data frame column, +or columns, as shown in {numref}`fig:summarize`. + ++++ {"tags": []} + +```{figure} img/wrangling/summarize.001.jpeg +:name: fig:summarize +:figclass: figure + +Calculating summary statistics on one or more column(s) in `pandas` generally +creates a series or data frame containing the summary statistic(s) for each column +being summarized. The darker, top row of each table represents column headers. +``` + ++++ + +We will start by showing how to compute the minimum and maximum number of Canadians reporting a particular +language as their primary language at home. First, a reminder of what `region_lang` looks like: + +```{code-cell} ipython3 +:tags: ["output_scroll"] +region_lang = pd.read_csv("data/region_lang.csv") +region_lang +``` + +We use `.min` to calculate the minimum +and `.max` to calculate maximum number of Canadians +reporting a particular language as their primary language at home, +for any region. + +```{code-cell} ipython3 +region_lang["most_at_home"].min() +``` + +```{code-cell} ipython3 +region_lang["most_at_home"].max() +``` + +```{code-cell} ipython3 +:tags: [remove-cell] +glue("lang_most_people", "{0:,.0f}".format(int(region_lang["most_at_home"].max()))) +``` + +From this we see that there are some languages in the data set that no one speaks +as their primary language at home. We also see that the most commonly spoken +primary language at home is spoken by +{glue:text}`lang_most_people` people. If instead we wanted to know the +total number of people in the survey, we could use the `sum` summary statistic method. +```{code-cell} ipython3 +region_lang["most_at_home"].sum() +``` + +Other handy summary statistics include the `mean`, `median` and `std` for +computing the mean, median, and standard deviation of observations, respectively. +We can also compute multiple statistics at once using `agg` to "aggregate" results. +For example, if we wanted to +compute both the `min` and `max` at once, we could use `agg` with the argument `["min", "max"]`. +Note that `agg` outputs a `Series` object. + +```{code-cell} ipython3 +region_lang["most_at_home"].agg(["min", "max"]) +``` + +The `pandas` package also provides the `describe` method, +which is a handy function that computes many common summary statistics at once; it +gives us a *summary* of a variable. + +```{code-cell} ipython3 +region_lang["most_at_home"].describe() +``` + +In addition to the summary methods we introduced earlier, the `describe` method +outputs a `count` (the total number of observations, or rows, in our data frame), +as well as the 25th, 50th, and 75th percentiles. +{numref}`tab:basic-summary-statistics` provides an overview of some of the useful +summary statistics that you can compute with `pandas`. + +```{table} Basic summary statistics +:name: tab:basic-summary-statistics +| Function | Description | +| -------- | ----------- | +| `count` | The number of observations (rows) | +| `mean` | The mean of the observations | +| `median` | The median value of the observations | +| `std` | The standard deviation of the observations | +| `max` | The largest value in a column | +| `min` | The smallest value in a column | +| `sum` | The sum of all observations | +| `agg` | Aggregate multiple statistics together | +| `describe` | a summary | +``` + ++++ ++++ + + +```{note} +In `pandas`, the value `NaN` is often used to denote missing data. +By default, when `pandas` calculates summary statistics (e.g., `max`, `min`, `sum`, etc), +it ignores these values. If you look at the documentation for these functions, you will +see an input variable `skipna`, which by default is set to `skipna=True`. This means that +`pandas` will skip `NaN` values when computing statistics. +``` + +### Calculating summary statistics on data frames + +What if you want to calculate summary statistics on an entire data frame? Well, +it turns out that the functions in {numref}`tab:basic-summary-statistics` +can be applied to a whole data frame! +For example, we can ask for the maximum value of each each column has using `max`. + +```{code-cell} ipython3 +region_lang.max() +``` + +We can see that for columns that contain string data +with words like `"Vancouver"` and `"Halifax"`, +the maximum value is determined by sorting the string alphabetically +and returning the last value. +If we only want the maximum value for +numeric columns, +we can provide `numeric_only=True`: + +```{code-cell} ipython3 +region_lang.max(numeric_only=True) +``` + +We could also ask for the `mean` for each columns in the dataframe. +It does not make sense to compute the mean of the string columns, +so in this case we *must* provide the keyword `numeric_only=True` +so that the mean is only computed on columns with numeric values. + +```{code-cell} ipython3 +region_lang.mean(numeric_only=True) +``` + +If there are only some columns for which you would like to get summary statistics, +you can first use `[]` or `.loc[]` to select those columns, +and then ask for the summary statistic +as we did for a single column previously. +For example, if we want to know +the mean and standard deviation of all of the columns between `"mother_tongue"` and `"lang_known"`, +we use `.loc[]` to select those columns and then `agg` to ask for both the `mean` and `std`. +```{code-cell} ipython3 +region_lang.loc[:, "mother_tongue":"lang_known"].agg(["mean", "std"]) +``` + +## Performing operations on groups of rows using `groupby` + ++++ + +```{index} pandas.DataFrame; groupby +``` +What happens if we want to know how languages vary by region? In this case, +we need a new tool that lets us group rows by region. This can be achieved +using the `groupby` function in `pandas`. Pairing summary functions +with `groupby` lets you summarize values for subgroups within a data set, +as illustrated in {numref}`fig:summarize-groupby`. +For example, we can use `groupby` to group the regions of the `tidy_lang` data +frame and then calculate the minimum and maximum number of Canadians +reporting the language as the primary language at home +for each of the regions in the data set. + ++++ {"tags": []} + +```{figure} img/wrangling/summarize.002.jpeg +:name: fig:summarize-groupby +:figclass: figure + +A summary statistic function paired with `groupby` is useful for calculating that statistic +on one or more column(s) for each group. It +creates a new data frame with one row for each group +and one column for each summary statistic.The darker, top row of each table +represents the column headers. The gray, blue, and green colored rows +correspond to the rows that belong to each of the three groups being +represented in this cartoon example. +``` + ++++ + +The `groupby` function takes at least one argument—the columns to use in the +grouping. Here we use only one column for grouping (`region`). + +```{code-cell} ipython3 +region_lang.groupby("region") +``` + +Notice that `groupby` converts a `DataFrame` object to a `DataFrameGroupBy` +object, which contains information about the groups of the data frame. We can +then apply aggregating functions to the `DataFrameGroupBy` object. Here we first +select the `most_at_home` column, and then summarize the grouped data by their +minimum and maximum values using `agg`. + +```{code-cell} ipython3 +region_lang.groupby("region")["most_at_home"].agg(["min", "max"]) +``` + +The resulting dataframe has `region` as an index name. +This is similar to what happened when we used the `pivot` function +in {numref}`pivot-wider`; +and just as we did then, +you can use `reset_index` to get back to a regular dataframe +with `region` as a column name. + +```{code-cell} ipython3 +region_lang.groupby("region")["most_at_home"].agg(["min", "max"]).reset_index() +``` +You can also pass multiple column names to `groupby`. For example, if we wanted to +know about how the different categories of languages (Aboriginal, Non-Official & +Non-Aboriginal, and Official) are spoken at home in different regions, we would pass a +list including `region` and `category` to `groupby`. + +```{code-cell} ipython3 +region_lang.groupby(["region", "category"])["most_at_home"].agg(["min", "max"]).reset_index() +``` + +You can also ask for grouped summary statistics on the whole data frame. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +region_lang.groupby("region").agg(["min", "max"]).reset_index() +``` + +If you want to ask for only some columns, for example +the columns between `"most_at_home"` and `"lang_known"`, +you might think about first applying `groupby` and then `["most_at_home":"lang_known"]`; +but `groupby` returns a `DataFrameGroupBy` object, which does not +work with ranges inside `[]`. +The other option is to do things the other way around: +first use `["most_at_home":"lang_known"]`, then use `groupby`. +This can work, but you have to be careful! For example, +in our case, we get an error. + +```{code-cell} ipython3 +:tags: [remove-output] +region_lang["most_at_home":"lang_known"].groupby("region").max() +``` + +``` +KeyError: "region" +``` + +This is because when we use `[]` we selected only the columns between +`"most_at_home"` and `"lang_known"`, which doesn't include `"region"`! +Instead, we need to use `groupby` first +and then call `[]` with a list of column names that includes `region`; +this approach always works. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +region_lang.groupby("region")[["most_at_home", "most_at_work", "lang_known"]].max().reset_index() +``` + +To see how many observations there are in each group, +we can use `value_counts`. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +region_lang.value_counts("region") +``` + +Which takes the `normalize` parameter to show the output as proportion +instead of a count. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +region_lang.value_counts("region", normalize=True) +``` + ++++ + +## Apply functions across multiple columns + +Computing summary statistics is not the only situation in which we need +to apply a function across columns in a data frame. There are two other +common wrangling tasks that require the application of a function across columns. +The first is when we want to apply a transformation, such as a conversion of measurement units, to multiple columns. +We illustrate such a data transformation in {numref}`fig:mutate-across`; note that it does not +change the shape of the data frame. + +```{figure} img/wrangling/summarize.005.jpeg +:name: fig:mutate-across +:figclass: figure + +A transformation applied across many columns. The darker, top row of each table represents the column headers. +``` + +For example, imagine that we wanted to convert all the numeric columns +in the `region_lang` data frame from `int64` type to `int32` type +using the `.astype` function. +When we revisit the `region_lang` data frame, +we can see that this would be the columns from `mother_tongue` to `lang_known`. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +region_lang +``` + +```{index} pandas.DataFrame; apply, pandas.DataFrame; loc[] +``` + +We can simply call the `.astype` function to apply it across the desired range of columns. + +```{code-cell} ipython3 +region_lang_nums = region_lang.loc[:, "mother_tongue":"lang_known"].astype("int32") +region_lang_nums.info() +``` +You can now see that the columns from `mother_tongue` to `lang_known` are type `int32`, +and that we have obtained a data frame with the same number of columns and rows +as the input data frame. + +The second situation occurs when you want to apply a function across columns within each individual +row, i.e., *row-wise*. This operation, illustrated in {numref}`fig:rowwise`, +will produce a single column whose entries summarize each row in the original data frame; +this new column can be added back into the original data. + +```{figure} img/wrangling/summarize.004.jpeg +:name: fig:rowwise +:figclass: figure + +A function applied row-wise across a data frame, producing a new column. The +darker, top row of each table represents the column headers. +``` + +For example, suppose we want to know the maximum value between `mother_tongue`, +and `lang_known` for each language and region in the `region_lang_nums` data set. +In other words, we want to apply the `max` function *row-wise.* +In order to tell `max` that we want to work row-wise (as opposed to acting on each column +individually, which is the default behavior), we just specify the argument `axis=1`. + +```{code-cell} ipython3 +region_lang_nums.max(axis=1) +``` + +We see that we obtain a series containing the maximum value between `mother_tongue`, +`most_at_home`, `most_at_work` and `lang_known` for each row in the data frame. It +is often the case that we want to include a column result +from a row-wise operation as a new column in the data frame, so that we can make +plots or continue our analysis. To make this happen, +we will use column assignment or the `assign` function to create a new column. +This is discussed in the next section. + +```{note} +While `pandas` provides many methods (like `max`, `astype`, etc.) that can be applied to a data frame, +sometimes you may want to apply your own function to multiple columns in a data frame. In this case +you can use the more general [`apply`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.apply.html) method. +``` + +(pandas-assign)= +## Modifying and adding columns + + +```{index} pandas.DataFrame; [] +``` + +When we compute summary statistics or apply functions, +a new data frame or series is created. But what if we want to append that information +to an existing data frame? For example, say we wanted to compute the +maximum value in each row of the `region_lang_nums` data frame, +and to append that as an additional column of the `region_lang` data frame. +In this case, we have two options: we can either create a new column within the `region_lang` data +frame itself, or create an entirely new data frame +with the `assign` method. The first option we have seen already in earlier chapters, and is +the more commonly used pattern in practice: +```{code-cell} ipython3 +:tags: ["output_scroll"] +region_lang["maximum"] = region_lang_nums.max(axis=1) +region_lang +``` +You can see above that the `region_lang` data frame now has an additional column named `maximum`. +The `maximum` column contains +the maximum value between `mother_tongue`, +`most_at_home`, `most_at_work` and `lang_known` for each language +and region, just as we specified! + +To instead create an entirely new data frame, we can use the `assign` method and specify one argument for each column we want to create. +In this case we want to create one new column named `maximum`, so the argument +to `assign` begins with `maximum= `. +Then after the `=`, we specify what the contents of that new column +should be. In this case we use `max` just as we did previously to give us the maximum values. +Remember to specify `axis=1` in the `max` method so that we compute the row-wise maximum value. +```{code-cell} ipython3 +:tags: ["output_scroll"] +region_lang.assign( + maximum=region_lang_nums.max(axis=1) +) +``` +This data frame looks just like the previous one, except that it is a copy of `region_lang`, not `region_lang` itself; making further +changes to this data frame will not impact the original `region_lang` data frame. + + +```{code-cell} ipython3 +:tags: [remove-cell] + +# remove maximum coln from region_lang +region_lang = region_lang.drop(columns=["maximum"]) + +# get english counts for toronto and glue +number_most_home = int( + official_langs[ + (official_langs["language"] == "English") & + (official_langs["region"] == "Toronto") + ]["most_at_home"] +) + +toronto_popn = int(region_data[region_data["region"] == "Toronto"]["population"]) + +glue("number_most_home", "{0:,.0f}".format(number_most_home)) +glue("toronto_popn", "{0:,.0f}".format(toronto_popn)) +glue("prop_eng_tor", "{0:.2f}".format(number_most_home / toronto_popn)) +``` + +As another example, we might ask the question: "What proportion of +the population reported English as their primary language at home in the 2016 census?" +For example, in Toronto, {glue:text}`number_most_home` people reported +speaking English as their primary language at home, and the +population of Toronto was reported to be +{glue:text}`toronto_popn` people. So the proportion of people reporting English +as their primary language in Toronto in the 2016 census was {glue:text}`prop_eng_tor`. +How could we figure this out starting from the `region_lang` data frame? + +First, we need to filter the `region_lang` data frame +so that we only keep the rows where the language is English. +We will also restrict our attention to the five major cities +in the `five_cities` data frame: Toronto, Montréal, Vancouver, Calgary, and Edmonton. +We will filter to keep only those rows pertaining to the English language +and pertaining to the five aforementioned cities. To combine these two logical statements +we will use the `&` symbol. +and with the `[]` operation, + `"English"` as the `language` and filter the rows, +and name the new data frame `english_langs`. +```{code-cell} ipython3 +:tags: ["output_scroll"] +english_lang = region_lang[ + (region_lang["language"] == "English") & + (region_lang["region"].isin(five_cities["region"])) +] +english_lang +``` + +Okay, now we have a data frame that pertains only to the English language +and the five cities mentioned earlier. +In order to compute the proportion of the population speaking English in each of these cities, +we need to add the population data from the `five_cities` data frame. +```{code-cell} ipython3 +five_cities +``` +The data frame above shows that the populations of the five cities in 2016 were +5928040 (Toronto), 4098927 (Montréal), 2463431 (Vancouver), 1392609 (Calgary), and 1321426 (Edmonton). +Next, we will add this information to a new data frame column called `city_pops`. +Once again, we will illustrate how to do this using both the `assign` method and regular column assignment. +We specify the new column name (`city_pops`) as the argument, followed by the equals symbol `=`, +and finally the data in the column. +Note that the order of the rows in the `english_lang` data frame is Montréal, Toronto, Calgary, Edmonton, Vancouver. +So we will create a column called `city_pops` where we list the populations of those cities in that +order, and add it to our data frame. +And remember that by default, like other `pandas` functions, `assign` does not +modify the original data frame directly, so the `english_lang` data frame is unchanged! +```{code-cell} ipython3 +:tags: ["output_scroll"] +english_lang.assign( + city_pops=[4098927, 5928040, 1392609, 1321426, 2463431] +) +``` + +Instead of using the `assign` method we can directly modify the `english_lang` data frame using regular column assignment. +This would be a more natural choice in this particular case, +since the syntax is more convenient for simple column modifications and additions. +```{code-cell} ipython3 +:tags: [remove-cell] +english_lang["city_pops"] = [4098927, 5928040, 1392609, 1321426, 2463431] +``` +```python +english_lang["city_pops"] = [4098927, 5928040, 1392609, 1321426, 2463431] +english_lang +``` +```text +/tmp/ipykernel_12/2654974267.py:1: SettingWithCopyWarning: +A value is trying to be set on a copy of a slice from a DataFrame. +Try using .loc[row_indexer,col_indexer] = value instead + +See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy + english_lang["city_pops"] = [4098927, 5928040, 1392609, 1321426, 2463431] +``` +```{code-cell} ipython3 +:tags: [remove-input] +english_lang +``` +Wait a moment...what is that warning message? It seems to suggest that something went wrong, but +if we inspect the `english_lang` data frame above, it looks like the city populations were added +just fine! As it turns out, this is caused by the earlier filtering we did from `region_lang` to +produce the original `english_lang`. The details are a little bit technical, but +`pandas` sometimes does not like it when you subset a data frame using `[]` or `loc[]` followed by +column assignment. For the purposes of your own data analysis, if you ever see a `SettingWithCopyWarning`, just make sure +to double check that the result of your column assignment looks the way you expect it to before proceeding. +For the rest of the book, we will silence that warning to help with readability. +```{code-cell} ipython3 +:tags: [remove-cell] +# suppress for the rest of this chapter +pd.options.mode.chained_assignment = None +``` + +```{note} +Inserting the data column `[4098927, 5928040, ...]` manually as we did above is generally very error-prone and is not recommended. +We do it here to demonstrate another usage of `assign` and regular column assignment. +But in more advanced data wrangling, +one would solve this problem in a less error-prone way using +the `merge` function, which lets you combine two data frames. We will show you an +example using `merge` at the end of the chapter! +``` + +Now we have a new column with the population for each city. Finally, we can convert all the numerical +columns to proportions of people who speak English by taking the ratio of all the numerical columns +with `city_pops`. Let's modify the `english_lang` column directly; in this case +we can just assign directly to the data frame. +This is similar to what we did in {numref}`str-split`, +when we first read in the `"region_lang_top5_cities_messy.csv"` data and we needed to convert a few +of the variables to numeric types. Here we assign to a range of columns simultaneously using `loc[]`. +Note that it is again possible to instead use the `assign` function to produce a new data +frame when modifying existing columns, although this is not commonly done. +Note also that we use the `div` method with the argument `axis=0` to divide a range of columns in a data frame +by the values in a single column—the basic division symbol `/` won't work in this case. + +```{code-cell} ipython3 +:tags: ["output_scroll"] +english_lang.loc[:, "mother_tongue":"lang_known"] = english_lang.loc[ + :, + "mother_tongue":"lang_known" + ].div(english_lang["city_pops"], axis=0) +english_lang +``` + ++++ + +## Using `merge` to combine data frames + +Let's return to the situation right before we added the city populations +of Toronto, Montréal, Vancouver, Calgary, and Edmonton to the `english_lang` data frame. Before adding the new column, we had filtered +`region_lang` to create the `english_lang` data frame containing only English speakers in the five cities +of interest. +```{code-cell} ipython3 +:tags: ["remove-cell"] +english_lang = region_lang[ + (region_lang["language"] == "English") & + (region_lang["region"].isin(five_cities["region"])) +] +``` + +```{code-cell} ipython3 +:tags: ["output_scroll"] +english_lang +``` +We then added the populations of these cities as a column +(Toronto: 5928040, Montréal: 4098927, Vancouver: 2463431, +Calgary: 1392609, and Edmonton: 1321426). We had to be careful to add those populations in the +right order; this is an error-prone process. An alternative approach, that we demonstrate here +is to (1) create a new data frame with the city names and populations, and +(2) use `merge` to combine the two data frames, recognizing that the "regions" are the same. + +We create a new data frame by calling `pd.DataFrame` with a dictionary +as its argument. The dictionary associates each column name in the data frame to be created +with a list of entries. Here we list city names in a column called `"region"` +and their populations in a column called `"population"`. +```{code-cell} ipython3 +city_populations = pd.DataFrame({ + "region" : ["Toronto", "Montréal", "Vancouver", "Calgary", "Edmonton"], + "population" : [5928040, 4098927, 2463431, 1392609, 1321426] +}) +city_populations +``` +This new data frame has the same `region` column as the `english_lang` data frame. The order of +the cities is different, but that is okay! We can use the `merge` function in `pandas` to say +we would like to combine the two data frames by matching the `region` between them. The argument +`on="region"` tells pandas we would like to use the `region` column to match up the entries. +```{code-cell} ipython3 +:tags: ["output_scroll"] +english_lang = english_lang.merge(city_populations, on="region") +english_lang +``` +You can see that the populations for each city are correct (e.g. Montréal: 4098927, Toronto: 5928040), +and we can proceed to with our analysis from here. + +## Summary + +Cleaning and wrangling data can be a very time-consuming process. However, +it is a critical step in any data analysis. We have explored many different +functions for cleaning and wrangling data into a tidy format. +{numref}`tab:summary-functions-table` summarizes some of the key wrangling +functions we learned in this chapter. In the following chapters, you will +learn how you can take this tidy data and do so much more with it to answer your +burning data science questions! + ++++ + +```{table} Summary of wrangling functions +:name: tab:summary-functions-table + +| Function | Description | +| --- | ----------- | +| `agg` | calculates aggregated summaries of inputs | +| `assign` | adds or modifies columns in a data frame | +| `groupby` | allows you to apply function(s) to groups of rows | +| `iloc` | subsets columns/rows of a data frame using integer indices | +| `loc` | subsets columns/rows of a data frame using labels | +| `melt` | generally makes the data frame longer and narrower | +| `merge` | combine two data frames | +| `pivot` | generally makes a data frame wider and decreases the number of rows | +| `str.split` | splits up a string column into multiple columns | +``` + +## Exercises + +Practice exercises for the material covered in this chapter +can be found in the accompanying +[worksheets repository](https://worksheets.python.datasciencebook.ca) +in the "Cleaning and wrangling data" row. +You can launch an interactive version of the worksheet in your browser by clicking the "launch binder" button. +You can also preview a non-interactive version of the worksheet by clicking "view worksheet." +If you instead decide to download the worksheet and run it on your own machine, +make sure to follow the instructions for computer setup +found in {numref}`Chapter %s `. This will ensure that the automated feedback +and guidance that the worksheets provide will function as intended. + ++++ {"tags": []} + +## Additional resources + +- The [`pandas` package documentation](https://pandas.pydata.org/docs/reference/index.html) is + another resource to learn more about the functions in this + chapter, the full set of arguments you can use, and other related functions. +- [*Python for Data Analysis*](https://wesmckinney.com/book/) {cite:p}`mckinney2012python` has a few chapters related to + data wrangling that go into more depth than this book. For example, the + [data wrangling chapter](https://wesmckinney.com/book/data-wrangling.html) covers tidy data, + `melt` and `pivot`, but also covers missing values + and additional wrangling functions (like `stack`). The [data + aggregation chapter](https://wesmckinney.com/book/data-aggregation.html) covers + `groupby`, aggregating functions, `apply`, etc. +- You will occasionally encounter a case where you need to iterate over items + in a data frame, but none of the above functions are flexible enough to do + what you want. In that case, you may consider using [a for loop](https://wesmckinney.com/book/python-basics.html#control_for) {cite:p}`mckinney2012python`. + + +## References + ++++ + +```{bibliography} +:filter: docname in docnames +``` diff --git a/pull313/_sphinx_design_static/design-style.4045f2051d55cab465a707391d5b2007.min.css b/pull313/_sphinx_design_static/design-style.4045f2051d55cab465a707391d5b2007.min.css new file mode 100644 index 00000000..3225661c --- /dev/null +++ b/pull313/_sphinx_design_static/design-style.4045f2051d55cab465a707391d5b2007.min.css @@ -0,0 +1 @@ +.sd-bg-primary{background-color:var(--sd-color-primary) !important}.sd-bg-text-primary{color:var(--sd-color-primary-text) !important}button.sd-bg-primary:focus,button.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}a.sd-bg-primary:focus,a.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}.sd-bg-secondary{background-color:var(--sd-color-secondary) !important}.sd-bg-text-secondary{color:var(--sd-color-secondary-text) !important}button.sd-bg-secondary:focus,button.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}a.sd-bg-secondary:focus,a.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}.sd-bg-success{background-color:var(--sd-color-success) !important}.sd-bg-text-success{color:var(--sd-color-success-text) !important}button.sd-bg-success:focus,button.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}a.sd-bg-success:focus,a.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}.sd-bg-info{background-color:var(--sd-color-info) !important}.sd-bg-text-info{color:var(--sd-color-info-text) !important}button.sd-bg-info:focus,button.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}a.sd-bg-info:focus,a.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}.sd-bg-warning{background-color:var(--sd-color-warning) !important}.sd-bg-text-warning{color:var(--sd-color-warning-text) !important}button.sd-bg-warning:focus,button.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}a.sd-bg-warning:focus,a.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}.sd-bg-danger{background-color:var(--sd-color-danger) !important}.sd-bg-text-danger{color:var(--sd-color-danger-text) !important}button.sd-bg-danger:focus,button.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}a.sd-bg-danger:focus,a.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}.sd-bg-light{background-color:var(--sd-color-light) !important}.sd-bg-text-light{color:var(--sd-color-light-text) !important}button.sd-bg-light:focus,button.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}a.sd-bg-light:focus,a.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}.sd-bg-muted{background-color:var(--sd-color-muted) !important}.sd-bg-text-muted{color:var(--sd-color-muted-text) !important}button.sd-bg-muted:focus,button.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}a.sd-bg-muted:focus,a.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}.sd-bg-dark{background-color:var(--sd-color-dark) !important}.sd-bg-text-dark{color:var(--sd-color-dark-text) !important}button.sd-bg-dark:focus,button.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}a.sd-bg-dark:focus,a.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}.sd-bg-black{background-color:var(--sd-color-black) !important}.sd-bg-text-black{color:var(--sd-color-black-text) !important}button.sd-bg-black:focus,button.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}a.sd-bg-black:focus,a.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}.sd-bg-white{background-color:var(--sd-color-white) !important}.sd-bg-text-white{color:var(--sd-color-white-text) !important}button.sd-bg-white:focus,button.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}a.sd-bg-white:focus,a.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}.sd-text-primary,.sd-text-primary>p{color:var(--sd-color-primary) !important}a.sd-text-primary:focus,a.sd-text-primary:hover{color:var(--sd-color-primary-highlight) !important}.sd-text-secondary,.sd-text-secondary>p{color:var(--sd-color-secondary) !important}a.sd-text-secondary:focus,a.sd-text-secondary:hover{color:var(--sd-color-secondary-highlight) !important}.sd-text-success,.sd-text-success>p{color:var(--sd-color-success) !important}a.sd-text-success:focus,a.sd-text-success:hover{color:var(--sd-color-success-highlight) !important}.sd-text-info,.sd-text-info>p{color:var(--sd-color-info) !important}a.sd-text-info:focus,a.sd-text-info:hover{color:var(--sd-color-info-highlight) !important}.sd-text-warning,.sd-text-warning>p{color:var(--sd-color-warning) !important}a.sd-text-warning:focus,a.sd-text-warning:hover{color:var(--sd-color-warning-highlight) !important}.sd-text-danger,.sd-text-danger>p{color:var(--sd-color-danger) !important}a.sd-text-danger:focus,a.sd-text-danger:hover{color:var(--sd-color-danger-highlight) !important}.sd-text-light,.sd-text-light>p{color:var(--sd-color-light) !important}a.sd-text-light:focus,a.sd-text-light:hover{color:var(--sd-color-light-highlight) !important}.sd-text-muted,.sd-text-muted>p{color:var(--sd-color-muted) !important}a.sd-text-muted:focus,a.sd-text-muted:hover{color:var(--sd-color-muted-highlight) !important}.sd-text-dark,.sd-text-dark>p{color:var(--sd-color-dark) !important}a.sd-text-dark:focus,a.sd-text-dark:hover{color:var(--sd-color-dark-highlight) !important}.sd-text-black,.sd-text-black>p{color:var(--sd-color-black) !important}a.sd-text-black:focus,a.sd-text-black:hover{color:var(--sd-color-black-highlight) !important}.sd-text-white,.sd-text-white>p{color:var(--sd-color-white) !important}a.sd-text-white:focus,a.sd-text-white:hover{color:var(--sd-color-white-highlight) !important}.sd-outline-primary{border-color:var(--sd-color-primary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-primary:focus,a.sd-outline-primary:hover{border-color:var(--sd-color-primary-highlight) !important}.sd-outline-secondary{border-color:var(--sd-color-secondary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-secondary:focus,a.sd-outline-secondary:hover{border-color:var(--sd-color-secondary-highlight) !important}.sd-outline-success{border-color:var(--sd-color-success) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-success:focus,a.sd-outline-success:hover{border-color:var(--sd-color-success-highlight) !important}.sd-outline-info{border-color:var(--sd-color-info) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-info:focus,a.sd-outline-info:hover{border-color:var(--sd-color-info-highlight) !important}.sd-outline-warning{border-color:var(--sd-color-warning) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-warning:focus,a.sd-outline-warning:hover{border-color:var(--sd-color-warning-highlight) !important}.sd-outline-danger{border-color:var(--sd-color-danger) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-danger:focus,a.sd-outline-danger:hover{border-color:var(--sd-color-danger-highlight) !important}.sd-outline-light{border-color:var(--sd-color-light) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-light:focus,a.sd-outline-light:hover{border-color:var(--sd-color-light-highlight) !important}.sd-outline-muted{border-color:var(--sd-color-muted) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-muted:focus,a.sd-outline-muted:hover{border-color:var(--sd-color-muted-highlight) !important}.sd-outline-dark{border-color:var(--sd-color-dark) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-dark:focus,a.sd-outline-dark:hover{border-color:var(--sd-color-dark-highlight) !important}.sd-outline-black{border-color:var(--sd-color-black) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-black:focus,a.sd-outline-black:hover{border-color:var(--sd-color-black-highlight) !important}.sd-outline-white{border-color:var(--sd-color-white) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-white:focus,a.sd-outline-white:hover{border-color:var(--sd-color-white-highlight) !important}.sd-bg-transparent{background-color:transparent !important}.sd-outline-transparent{border-color:transparent !important}.sd-text-transparent{color:transparent !important}.sd-p-0{padding:0 !important}.sd-pt-0,.sd-py-0{padding-top:0 !important}.sd-pr-0,.sd-px-0{padding-right:0 !important}.sd-pb-0,.sd-py-0{padding-bottom:0 !important}.sd-pl-0,.sd-px-0{padding-left:0 !important}.sd-p-1{padding:.25rem !important}.sd-pt-1,.sd-py-1{padding-top:.25rem !important}.sd-pr-1,.sd-px-1{padding-right:.25rem !important}.sd-pb-1,.sd-py-1{padding-bottom:.25rem !important}.sd-pl-1,.sd-px-1{padding-left:.25rem !important}.sd-p-2{padding:.5rem !important}.sd-pt-2,.sd-py-2{padding-top:.5rem !important}.sd-pr-2,.sd-px-2{padding-right:.5rem !important}.sd-pb-2,.sd-py-2{padding-bottom:.5rem !important}.sd-pl-2,.sd-px-2{padding-left:.5rem !important}.sd-p-3{padding:1rem !important}.sd-pt-3,.sd-py-3{padding-top:1rem !important}.sd-pr-3,.sd-px-3{padding-right:1rem !important}.sd-pb-3,.sd-py-3{padding-bottom:1rem !important}.sd-pl-3,.sd-px-3{padding-left:1rem !important}.sd-p-4{padding:1.5rem !important}.sd-pt-4,.sd-py-4{padding-top:1.5rem !important}.sd-pr-4,.sd-px-4{padding-right:1.5rem !important}.sd-pb-4,.sd-py-4{padding-bottom:1.5rem !important}.sd-pl-4,.sd-px-4{padding-left:1.5rem !important}.sd-p-5{padding:3rem !important}.sd-pt-5,.sd-py-5{padding-top:3rem !important}.sd-pr-5,.sd-px-5{padding-right:3rem !important}.sd-pb-5,.sd-py-5{padding-bottom:3rem !important}.sd-pl-5,.sd-px-5{padding-left:3rem !important}.sd-m-auto{margin:auto !important}.sd-mt-auto,.sd-my-auto{margin-top:auto !important}.sd-mr-auto,.sd-mx-auto{margin-right:auto !important}.sd-mb-auto,.sd-my-auto{margin-bottom:auto !important}.sd-ml-auto,.sd-mx-auto{margin-left:auto !important}.sd-m-0{margin:0 !important}.sd-mt-0,.sd-my-0{margin-top:0 !important}.sd-mr-0,.sd-mx-0{margin-right:0 !important}.sd-mb-0,.sd-my-0{margin-bottom:0 !important}.sd-ml-0,.sd-mx-0{margin-left:0 !important}.sd-m-1{margin:.25rem !important}.sd-mt-1,.sd-my-1{margin-top:.25rem !important}.sd-mr-1,.sd-mx-1{margin-right:.25rem !important}.sd-mb-1,.sd-my-1{margin-bottom:.25rem !important}.sd-ml-1,.sd-mx-1{margin-left:.25rem !important}.sd-m-2{margin:.5rem !important}.sd-mt-2,.sd-my-2{margin-top:.5rem !important}.sd-mr-2,.sd-mx-2{margin-right:.5rem !important}.sd-mb-2,.sd-my-2{margin-bottom:.5rem !important}.sd-ml-2,.sd-mx-2{margin-left:.5rem !important}.sd-m-3{margin:1rem !important}.sd-mt-3,.sd-my-3{margin-top:1rem !important}.sd-mr-3,.sd-mx-3{margin-right:1rem !important}.sd-mb-3,.sd-my-3{margin-bottom:1rem !important}.sd-ml-3,.sd-mx-3{margin-left:1rem !important}.sd-m-4{margin:1.5rem !important}.sd-mt-4,.sd-my-4{margin-top:1.5rem !important}.sd-mr-4,.sd-mx-4{margin-right:1.5rem !important}.sd-mb-4,.sd-my-4{margin-bottom:1.5rem !important}.sd-ml-4,.sd-mx-4{margin-left:1.5rem !important}.sd-m-5{margin:3rem !important}.sd-mt-5,.sd-my-5{margin-top:3rem !important}.sd-mr-5,.sd-mx-5{margin-right:3rem !important}.sd-mb-5,.sd-my-5{margin-bottom:3rem !important}.sd-ml-5,.sd-mx-5{margin-left:3rem !important}.sd-w-25{width:25% !important}.sd-w-50{width:50% !important}.sd-w-75{width:75% !important}.sd-w-100{width:100% !important}.sd-w-auto{width:auto !important}.sd-h-25{height:25% !important}.sd-h-50{height:50% !important}.sd-h-75{height:75% !important}.sd-h-100{height:100% !important}.sd-h-auto{height:auto !important}.sd-d-none{display:none !important}.sd-d-inline{display:inline !important}.sd-d-inline-block{display:inline-block !important}.sd-d-block{display:block !important}.sd-d-grid{display:grid !important}.sd-d-flex-row{display:-ms-flexbox !important;display:flex !important;flex-direction:row !important}.sd-d-flex-column{display:-ms-flexbox !important;display:flex !important;flex-direction:column !important}.sd-d-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}@media(min-width: 576px){.sd-d-sm-none{display:none !important}.sd-d-sm-inline{display:inline !important}.sd-d-sm-inline-block{display:inline-block !important}.sd-d-sm-block{display:block !important}.sd-d-sm-grid{display:grid !important}.sd-d-sm-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-sm-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 768px){.sd-d-md-none{display:none !important}.sd-d-md-inline{display:inline !important}.sd-d-md-inline-block{display:inline-block !important}.sd-d-md-block{display:block !important}.sd-d-md-grid{display:grid !important}.sd-d-md-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-md-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 992px){.sd-d-lg-none{display:none !important}.sd-d-lg-inline{display:inline !important}.sd-d-lg-inline-block{display:inline-block !important}.sd-d-lg-block{display:block !important}.sd-d-lg-grid{display:grid !important}.sd-d-lg-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-lg-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 1200px){.sd-d-xl-none{display:none !important}.sd-d-xl-inline{display:inline !important}.sd-d-xl-inline-block{display:inline-block !important}.sd-d-xl-block{display:block !important}.sd-d-xl-grid{display:grid !important}.sd-d-xl-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-xl-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}.sd-align-major-start{justify-content:flex-start !important}.sd-align-major-end{justify-content:flex-end !important}.sd-align-major-center{justify-content:center !important}.sd-align-major-justify{justify-content:space-between !important}.sd-align-major-spaced{justify-content:space-evenly !important}.sd-align-minor-start{align-items:flex-start !important}.sd-align-minor-end{align-items:flex-end !important}.sd-align-minor-center{align-items:center !important}.sd-align-minor-stretch{align-items:stretch !important}.sd-text-justify{text-align:justify !important}.sd-text-left{text-align:left !important}.sd-text-right{text-align:right !important}.sd-text-center{text-align:center !important}.sd-font-weight-light{font-weight:300 !important}.sd-font-weight-lighter{font-weight:lighter !important}.sd-font-weight-normal{font-weight:400 !important}.sd-font-weight-bold{font-weight:700 !important}.sd-font-weight-bolder{font-weight:bolder !important}.sd-font-italic{font-style:italic !important}.sd-text-decoration-none{text-decoration:none !important}.sd-text-lowercase{text-transform:lowercase !important}.sd-text-uppercase{text-transform:uppercase !important}.sd-text-capitalize{text-transform:capitalize !important}.sd-text-wrap{white-space:normal !important}.sd-text-nowrap{white-space:nowrap !important}.sd-text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.sd-fs-1,.sd-fs-1>p{font-size:calc(1.375rem + 1.5vw) !important;line-height:unset !important}.sd-fs-2,.sd-fs-2>p{font-size:calc(1.325rem + 0.9vw) !important;line-height:unset !important}.sd-fs-3,.sd-fs-3>p{font-size:calc(1.3rem + 0.6vw) !important;line-height:unset !important}.sd-fs-4,.sd-fs-4>p{font-size:calc(1.275rem + 0.3vw) !important;line-height:unset !important}.sd-fs-5,.sd-fs-5>p{font-size:1.25rem !important;line-height:unset !important}.sd-fs-6,.sd-fs-6>p{font-size:1rem !important;line-height:unset !important}.sd-border-0{border:0 solid !important}.sd-border-top-0{border-top:0 solid !important}.sd-border-bottom-0{border-bottom:0 solid !important}.sd-border-right-0{border-right:0 solid !important}.sd-border-left-0{border-left:0 solid !important}.sd-border-1{border:1px solid !important}.sd-border-top-1{border-top:1px solid !important}.sd-border-bottom-1{border-bottom:1px solid !important}.sd-border-right-1{border-right:1px solid !important}.sd-border-left-1{border-left:1px solid !important}.sd-border-2{border:2px solid !important}.sd-border-top-2{border-top:2px solid !important}.sd-border-bottom-2{border-bottom:2px solid !important}.sd-border-right-2{border-right:2px solid !important}.sd-border-left-2{border-left:2px solid !important}.sd-border-3{border:3px solid !important}.sd-border-top-3{border-top:3px solid !important}.sd-border-bottom-3{border-bottom:3px solid !important}.sd-border-right-3{border-right:3px solid !important}.sd-border-left-3{border-left:3px solid !important}.sd-border-4{border:4px solid !important}.sd-border-top-4{border-top:4px solid !important}.sd-border-bottom-4{border-bottom:4px solid !important}.sd-border-right-4{border-right:4px solid !important}.sd-border-left-4{border-left:4px solid !important}.sd-border-5{border:5px solid !important}.sd-border-top-5{border-top:5px solid !important}.sd-border-bottom-5{border-bottom:5px solid !important}.sd-border-right-5{border-right:5px solid !important}.sd-border-left-5{border-left:5px solid !important}.sd-rounded-0{border-radius:0 !important}.sd-rounded-1{border-radius:.2rem !important}.sd-rounded-2{border-radius:.3rem !important}.sd-rounded-3{border-radius:.5rem !important}.sd-rounded-pill{border-radius:50rem !important}.sd-rounded-circle{border-radius:50% !important}.shadow-none{box-shadow:none !important}.sd-shadow-sm{box-shadow:0 .125rem .25rem var(--sd-color-shadow) !important}.sd-shadow-md{box-shadow:0 .5rem 1rem var(--sd-color-shadow) !important}.sd-shadow-lg{box-shadow:0 1rem 3rem var(--sd-color-shadow) !important}@keyframes sd-slide-from-left{0%{transform:translateX(-100%)}100%{transform:translateX(0)}}@keyframes sd-slide-from-right{0%{transform:translateX(200%)}100%{transform:translateX(0)}}@keyframes sd-grow100{0%{transform:scale(0);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50{0%{transform:scale(0.5);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50-rot20{0%{transform:scale(0.5) rotateZ(-20deg);opacity:.5}75%{transform:scale(1) rotateZ(5deg);opacity:1}95%{transform:scale(1) rotateZ(-1deg);opacity:1}100%{transform:scale(1) rotateZ(0);opacity:1}}.sd-animate-slide-from-left{animation:1s ease-out 0s 1 normal none running sd-slide-from-left}.sd-animate-slide-from-right{animation:1s ease-out 0s 1 normal none running sd-slide-from-right}.sd-animate-grow100{animation:1s ease-out 0s 1 normal none running sd-grow100}.sd-animate-grow50{animation:1s ease-out 0s 1 normal none running sd-grow50}.sd-animate-grow50-rot20{animation:1s ease-out 0s 1 normal none running sd-grow50-rot20}.sd-badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.sd-badge:empty{display:none}a.sd-badge{text-decoration:none}.sd-btn .sd-badge{position:relative;top:-1px}.sd-btn{background-color:transparent;border:1px solid transparent;border-radius:.25rem;cursor:pointer;display:inline-block;font-weight:400;font-size:1rem;line-height:1.5;padding:.375rem .75rem;text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;vertical-align:middle;user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none}.sd-btn:hover{text-decoration:none}@media(prefers-reduced-motion: reduce){.sd-btn{transition:none}}.sd-btn-primary,.sd-btn-outline-primary:hover,.sd-btn-outline-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-primary:hover,.sd-btn-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary-highlight) !important;border-color:var(--sd-color-primary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-primary{color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary,.sd-btn-outline-secondary:hover,.sd-btn-outline-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary:hover,.sd-btn-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary-highlight) !important;border-color:var(--sd-color-secondary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-secondary{color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success,.sd-btn-outline-success:hover,.sd-btn-outline-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success:hover,.sd-btn-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success-highlight) !important;border-color:var(--sd-color-success-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-success{color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info,.sd-btn-outline-info:hover,.sd-btn-outline-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info:hover,.sd-btn-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info-highlight) !important;border-color:var(--sd-color-info-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-info{color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning,.sd-btn-outline-warning:hover,.sd-btn-outline-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning:hover,.sd-btn-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning-highlight) !important;border-color:var(--sd-color-warning-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-warning{color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger,.sd-btn-outline-danger:hover,.sd-btn-outline-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger:hover,.sd-btn-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger-highlight) !important;border-color:var(--sd-color-danger-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-danger{color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light,.sd-btn-outline-light:hover,.sd-btn-outline-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light:hover,.sd-btn-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light-highlight) !important;border-color:var(--sd-color-light-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-light{color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted,.sd-btn-outline-muted:hover,.sd-btn-outline-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted:hover,.sd-btn-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted-highlight) !important;border-color:var(--sd-color-muted-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-muted{color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark,.sd-btn-outline-dark:hover,.sd-btn-outline-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark:hover,.sd-btn-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark-highlight) !important;border-color:var(--sd-color-dark-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-dark{color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black,.sd-btn-outline-black:hover,.sd-btn-outline-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black:hover,.sd-btn-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black-highlight) !important;border-color:var(--sd-color-black-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-black{color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white,.sd-btn-outline-white:hover,.sd-btn-outline-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white:hover,.sd-btn-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white-highlight) !important;border-color:var(--sd-color-white-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-white{color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.sd-hide-link-text{font-size:0}.sd-octicon,.sd-material-icon{display:inline-block;fill:currentColor;vertical-align:middle}.sd-avatar-xs{border-radius:50%;object-fit:cover;object-position:center;width:1rem;height:1rem}.sd-avatar-sm{border-radius:50%;object-fit:cover;object-position:center;width:3rem;height:3rem}.sd-avatar-md{border-radius:50%;object-fit:cover;object-position:center;width:5rem;height:5rem}.sd-avatar-lg{border-radius:50%;object-fit:cover;object-position:center;width:7rem;height:7rem}.sd-avatar-xl{border-radius:50%;object-fit:cover;object-position:center;width:10rem;height:10rem}.sd-avatar-inherit{border-radius:50%;object-fit:cover;object-position:center;width:inherit;height:inherit}.sd-avatar-initial{border-radius:50%;object-fit:cover;object-position:center;width:initial;height:initial}.sd-card{background-clip:border-box;background-color:var(--sd-color-card-background);border:1px solid var(--sd-color-card-border);border-radius:.25rem;color:var(--sd-color-card-text);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;position:relative;word-wrap:break-word}.sd-card>hr{margin-left:0;margin-right:0}.sd-card-hover:hover{border-color:var(--sd-color-card-border-hover);transform:scale(1.01)}.sd-card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem 1rem}.sd-card-title{margin-bottom:.5rem}.sd-card-subtitle{margin-top:-0.25rem;margin-bottom:0}.sd-card-text:last-child{margin-bottom:0}.sd-card-link:hover{text-decoration:none}.sd-card-link+.card-link{margin-left:1rem}.sd-card-header{padding:.5rem 1rem;margin-bottom:0;background-color:var(--sd-color-card-header);border-bottom:1px solid var(--sd-color-card-border)}.sd-card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.sd-card-footer{padding:.5rem 1rem;background-color:var(--sd-color-card-footer);border-top:1px solid var(--sd-color-card-border)}.sd-card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.sd-card-header-tabs{margin-right:-0.5rem;margin-bottom:-0.5rem;margin-left:-0.5rem;border-bottom:0}.sd-card-header-pills{margin-right:-0.5rem;margin-left:-0.5rem}.sd-card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom,.sd-card-img-top{width:100%}.sd-card-img,.sd-card-img-top{border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom{border-bottom-left-radius:calc(0.25rem - 1px);border-bottom-right-radius:calc(0.25rem - 1px)}.sd-cards-carousel{width:100%;display:flex;flex-wrap:nowrap;-ms-flex-direction:row;flex-direction:row;overflow-x:hidden;scroll-snap-type:x mandatory}.sd-cards-carousel.sd-show-scrollbar{overflow-x:auto}.sd-cards-carousel:hover,.sd-cards-carousel:focus{overflow-x:auto}.sd-cards-carousel>.sd-card{flex-shrink:0;scroll-snap-align:start}.sd-cards-carousel>.sd-card:not(:last-child){margin-right:3px}.sd-card-cols-1>.sd-card{width:90%}.sd-card-cols-2>.sd-card{width:45%}.sd-card-cols-3>.sd-card{width:30%}.sd-card-cols-4>.sd-card{width:22.5%}.sd-card-cols-5>.sd-card{width:18%}.sd-card-cols-6>.sd-card{width:15%}.sd-card-cols-7>.sd-card{width:12.8571428571%}.sd-card-cols-8>.sd-card{width:11.25%}.sd-card-cols-9>.sd-card{width:10%}.sd-card-cols-10>.sd-card{width:9%}.sd-card-cols-11>.sd-card{width:8.1818181818%}.sd-card-cols-12>.sd-card{width:7.5%}.sd-container,.sd-container-fluid,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container-xl{margin-left:auto;margin-right:auto;padding-left:var(--sd-gutter-x, 0.75rem);padding-right:var(--sd-gutter-x, 0.75rem);width:100%}@media(min-width: 576px){.sd-container-sm,.sd-container{max-width:540px}}@media(min-width: 768px){.sd-container-md,.sd-container-sm,.sd-container{max-width:720px}}@media(min-width: 992px){.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:960px}}@media(min-width: 1200px){.sd-container-xl,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:1140px}}.sd-row{--sd-gutter-x: 1.5rem;--sd-gutter-y: 0;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-top:calc(var(--sd-gutter-y) * -1);margin-right:calc(var(--sd-gutter-x) * -0.5);margin-left:calc(var(--sd-gutter-x) * -0.5)}.sd-row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--sd-gutter-x) * 0.5);padding-left:calc(var(--sd-gutter-x) * 0.5);margin-top:var(--sd-gutter-y)}.sd-col{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-auto>*{flex:0 0 auto;width:auto}.sd-row-cols-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}@media(min-width: 576px){.sd-col-sm{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-sm-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-sm-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-sm-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-sm-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-sm-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-sm-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-sm-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-sm-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-sm-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-sm-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-sm-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-sm-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-sm-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 768px){.sd-col-md{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-md-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-md-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-md-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-md-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-md-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-md-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-md-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-md-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-md-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-md-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-md-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-md-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-md-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 992px){.sd-col-lg{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-lg-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-lg-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-lg-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-lg-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-lg-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-lg-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-lg-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-lg-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-lg-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-lg-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-lg-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-lg-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-lg-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 1200px){.sd-col-xl{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-xl-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-xl-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-xl-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-xl-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-xl-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-xl-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-xl-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-xl-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-xl-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-xl-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-xl-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-xl-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-xl-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}.sd-col-auto{flex:0 0 auto;-ms-flex:0 0 auto;width:auto}.sd-col-1{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}.sd-col-2{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-col-3{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-col-4{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-col-5{flex:0 0 auto;-ms-flex:0 0 auto;width:41.6666666667%}.sd-col-6{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-col-7{flex:0 0 auto;-ms-flex:0 0 auto;width:58.3333333333%}.sd-col-8{flex:0 0 auto;-ms-flex:0 0 auto;width:66.6666666667%}.sd-col-9{flex:0 0 auto;-ms-flex:0 0 auto;width:75%}.sd-col-10{flex:0 0 auto;-ms-flex:0 0 auto;width:83.3333333333%}.sd-col-11{flex:0 0 auto;-ms-flex:0 0 auto;width:91.6666666667%}.sd-col-12{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-g-0,.sd-gy-0{--sd-gutter-y: 0}.sd-g-0,.sd-gx-0{--sd-gutter-x: 0}.sd-g-1,.sd-gy-1{--sd-gutter-y: 0.25rem}.sd-g-1,.sd-gx-1{--sd-gutter-x: 0.25rem}.sd-g-2,.sd-gy-2{--sd-gutter-y: 0.5rem}.sd-g-2,.sd-gx-2{--sd-gutter-x: 0.5rem}.sd-g-3,.sd-gy-3{--sd-gutter-y: 1rem}.sd-g-3,.sd-gx-3{--sd-gutter-x: 1rem}.sd-g-4,.sd-gy-4{--sd-gutter-y: 1.5rem}.sd-g-4,.sd-gx-4{--sd-gutter-x: 1.5rem}.sd-g-5,.sd-gy-5{--sd-gutter-y: 3rem}.sd-g-5,.sd-gx-5{--sd-gutter-x: 3rem}@media(min-width: 576px){.sd-col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-sm-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-sm-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-sm-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-sm-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-sm-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-sm-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-sm-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-sm-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-sm-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-sm-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-sm-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-sm-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-sm-0,.sd-gy-sm-0{--sd-gutter-y: 0}.sd-g-sm-0,.sd-gx-sm-0{--sd-gutter-x: 0}.sd-g-sm-1,.sd-gy-sm-1{--sd-gutter-y: 0.25rem}.sd-g-sm-1,.sd-gx-sm-1{--sd-gutter-x: 0.25rem}.sd-g-sm-2,.sd-gy-sm-2{--sd-gutter-y: 0.5rem}.sd-g-sm-2,.sd-gx-sm-2{--sd-gutter-x: 0.5rem}.sd-g-sm-3,.sd-gy-sm-3{--sd-gutter-y: 1rem}.sd-g-sm-3,.sd-gx-sm-3{--sd-gutter-x: 1rem}.sd-g-sm-4,.sd-gy-sm-4{--sd-gutter-y: 1.5rem}.sd-g-sm-4,.sd-gx-sm-4{--sd-gutter-x: 1.5rem}.sd-g-sm-5,.sd-gy-sm-5{--sd-gutter-y: 3rem}.sd-g-sm-5,.sd-gx-sm-5{--sd-gutter-x: 3rem}}@media(min-width: 768px){.sd-col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-md-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-md-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-md-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-md-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-md-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-md-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-md-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-md-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-md-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-md-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-md-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-md-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-md-0,.sd-gy-md-0{--sd-gutter-y: 0}.sd-g-md-0,.sd-gx-md-0{--sd-gutter-x: 0}.sd-g-md-1,.sd-gy-md-1{--sd-gutter-y: 0.25rem}.sd-g-md-1,.sd-gx-md-1{--sd-gutter-x: 0.25rem}.sd-g-md-2,.sd-gy-md-2{--sd-gutter-y: 0.5rem}.sd-g-md-2,.sd-gx-md-2{--sd-gutter-x: 0.5rem}.sd-g-md-3,.sd-gy-md-3{--sd-gutter-y: 1rem}.sd-g-md-3,.sd-gx-md-3{--sd-gutter-x: 1rem}.sd-g-md-4,.sd-gy-md-4{--sd-gutter-y: 1.5rem}.sd-g-md-4,.sd-gx-md-4{--sd-gutter-x: 1.5rem}.sd-g-md-5,.sd-gy-md-5{--sd-gutter-y: 3rem}.sd-g-md-5,.sd-gx-md-5{--sd-gutter-x: 3rem}}@media(min-width: 992px){.sd-col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-lg-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-lg-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-lg-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-lg-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-lg-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-lg-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-lg-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-lg-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-lg-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-lg-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-lg-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-lg-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-lg-0,.sd-gy-lg-0{--sd-gutter-y: 0}.sd-g-lg-0,.sd-gx-lg-0{--sd-gutter-x: 0}.sd-g-lg-1,.sd-gy-lg-1{--sd-gutter-y: 0.25rem}.sd-g-lg-1,.sd-gx-lg-1{--sd-gutter-x: 0.25rem}.sd-g-lg-2,.sd-gy-lg-2{--sd-gutter-y: 0.5rem}.sd-g-lg-2,.sd-gx-lg-2{--sd-gutter-x: 0.5rem}.sd-g-lg-3,.sd-gy-lg-3{--sd-gutter-y: 1rem}.sd-g-lg-3,.sd-gx-lg-3{--sd-gutter-x: 1rem}.sd-g-lg-4,.sd-gy-lg-4{--sd-gutter-y: 1.5rem}.sd-g-lg-4,.sd-gx-lg-4{--sd-gutter-x: 1.5rem}.sd-g-lg-5,.sd-gy-lg-5{--sd-gutter-y: 3rem}.sd-g-lg-5,.sd-gx-lg-5{--sd-gutter-x: 3rem}}@media(min-width: 1200px){.sd-col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-xl-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-xl-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-xl-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-xl-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-xl-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-xl-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-xl-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-xl-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-xl-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-xl-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-xl-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-xl-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-xl-0,.sd-gy-xl-0{--sd-gutter-y: 0}.sd-g-xl-0,.sd-gx-xl-0{--sd-gutter-x: 0}.sd-g-xl-1,.sd-gy-xl-1{--sd-gutter-y: 0.25rem}.sd-g-xl-1,.sd-gx-xl-1{--sd-gutter-x: 0.25rem}.sd-g-xl-2,.sd-gy-xl-2{--sd-gutter-y: 0.5rem}.sd-g-xl-2,.sd-gx-xl-2{--sd-gutter-x: 0.5rem}.sd-g-xl-3,.sd-gy-xl-3{--sd-gutter-y: 1rem}.sd-g-xl-3,.sd-gx-xl-3{--sd-gutter-x: 1rem}.sd-g-xl-4,.sd-gy-xl-4{--sd-gutter-y: 1.5rem}.sd-g-xl-4,.sd-gx-xl-4{--sd-gutter-x: 1.5rem}.sd-g-xl-5,.sd-gy-xl-5{--sd-gutter-y: 3rem}.sd-g-xl-5,.sd-gx-xl-5{--sd-gutter-x: 3rem}}.sd-flex-row-reverse{flex-direction:row-reverse !important}details.sd-dropdown{position:relative}details.sd-dropdown .sd-summary-title{font-weight:700;padding-right:3em !important;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none;user-select:none}details.sd-dropdown:hover{cursor:pointer}details.sd-dropdown .sd-summary-content{cursor:default}details.sd-dropdown summary{list-style:none;padding:1em}details.sd-dropdown summary .sd-octicon.no-title{vertical-align:middle}details.sd-dropdown[open] summary .sd-octicon.no-title{visibility:hidden}details.sd-dropdown summary::-webkit-details-marker{display:none}details.sd-dropdown summary:focus{outline:none}details.sd-dropdown .sd-summary-icon{margin-right:.5em}details.sd-dropdown .sd-summary-icon svg{opacity:.8}details.sd-dropdown summary:hover .sd-summary-up svg,details.sd-dropdown summary:hover .sd-summary-down svg{opacity:1;transform:scale(1.1)}details.sd-dropdown .sd-summary-up svg,details.sd-dropdown .sd-summary-down svg{display:block;opacity:.6}details.sd-dropdown .sd-summary-up,details.sd-dropdown .sd-summary-down{pointer-events:none;position:absolute;right:1em;top:1em}details.sd-dropdown[open]>.sd-summary-title .sd-summary-down{visibility:hidden}details.sd-dropdown:not([open])>.sd-summary-title .sd-summary-up{visibility:hidden}details.sd-dropdown:not([open]).sd-card{border:none}details.sd-dropdown:not([open])>.sd-card-header{border:1px solid var(--sd-color-card-border);border-radius:.25rem}details.sd-dropdown.sd-fade-in[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out;animation:sd-fade-in .5s ease-in-out}details.sd-dropdown.sd-fade-in-slide-down[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out}.sd-col>.sd-dropdown{width:100%}.sd-summary-content>.sd-tab-set:first-child{margin-top:0}@keyframes sd-fade-in{0%{opacity:0}100%{opacity:1}}@keyframes sd-slide-down{0%{transform:translate(0, -10px)}100%{transform:translate(0, 0)}}.sd-tab-set{border-radius:.125rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.sd-tab-set>input{opacity:0;position:absolute}.sd-tab-set>input:checked+label{border-color:var(--sd-color-tabs-underline-active);color:var(--sd-color-tabs-label-active)}.sd-tab-set>input:checked+label+.sd-tab-content{display:block}.sd-tab-set>input:not(:checked)+label:hover{color:var(--sd-color-tabs-label-hover);border-color:var(--sd-color-tabs-underline-hover)}.sd-tab-set>input:focus+label{outline-style:auto}.sd-tab-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.sd-tab-set>label{border-bottom:.125rem solid transparent;margin-bottom:0;color:var(--sd-color-tabs-label-inactive);border-color:var(--sd-color-tabs-underline-inactive);cursor:pointer;font-size:var(--sd-fontsize-tabs-label);font-weight:700;padding:1em 1.25em .5em;transition:color 250ms;width:auto;z-index:1}html .sd-tab-set>label:hover{color:var(--sd-color-tabs-label-active)}.sd-col>.sd-tab-set{width:100%}.sd-tab-content{box-shadow:0 -0.0625rem var(--sd-color-tabs-overline),0 .0625rem var(--sd-color-tabs-underline);display:none;order:99;padding-bottom:.75rem;padding-top:.75rem;width:100%}.sd-tab-content>:first-child{margin-top:0 !important}.sd-tab-content>:last-child{margin-bottom:0 !important}.sd-tab-content>.sd-tab-set{margin:0}.sd-sphinx-override,.sd-sphinx-override *{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.sd-sphinx-override p{margin-top:0}:root{--sd-color-primary: #007bff;--sd-color-secondary: #6c757d;--sd-color-success: #28a745;--sd-color-info: #17a2b8;--sd-color-warning: #f0b37e;--sd-color-danger: #dc3545;--sd-color-light: #f8f9fa;--sd-color-muted: #6c757d;--sd-color-dark: #212529;--sd-color-black: black;--sd-color-white: white;--sd-color-primary-highlight: #0069d9;--sd-color-secondary-highlight: #5c636a;--sd-color-success-highlight: #228e3b;--sd-color-info-highlight: #148a9c;--sd-color-warning-highlight: #cc986b;--sd-color-danger-highlight: #bb2d3b;--sd-color-light-highlight: #d3d4d5;--sd-color-muted-highlight: #5c636a;--sd-color-dark-highlight: #1c1f23;--sd-color-black-highlight: black;--sd-color-white-highlight: #d9d9d9;--sd-color-primary-text: #fff;--sd-color-secondary-text: #fff;--sd-color-success-text: #fff;--sd-color-info-text: #fff;--sd-color-warning-text: #212529;--sd-color-danger-text: #fff;--sd-color-light-text: #212529;--sd-color-muted-text: #fff;--sd-color-dark-text: #fff;--sd-color-black-text: #fff;--sd-color-white-text: #212529;--sd-color-shadow: rgba(0, 0, 0, 0.15);--sd-color-card-border: rgba(0, 0, 0, 0.125);--sd-color-card-border-hover: hsla(231, 99%, 66%, 1);--sd-color-card-background: transparent;--sd-color-card-text: inherit;--sd-color-card-header: transparent;--sd-color-card-footer: transparent;--sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-inactive: hsl(0, 0%, 66%);--sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);--sd-color-tabs-underline-inactive: transparent;--sd-color-tabs-overline: rgb(222, 222, 222);--sd-color-tabs-underline: rgb(222, 222, 222);--sd-fontsize-tabs-label: 1rem} diff --git a/pull313/_sphinx_design_static/design-tabs.js b/pull313/_sphinx_design_static/design-tabs.js new file mode 100644 index 00000000..36b38cf0 --- /dev/null +++ b/pull313/_sphinx_design_static/design-tabs.js @@ -0,0 +1,27 @@ +var sd_labels_by_text = {}; + +function ready() { + const li = document.getElementsByClassName("sd-tab-label"); + for (const label of li) { + syncId = label.getAttribute("data-sync-id"); + if (syncId) { + label.onclick = onLabelClick; + if (!sd_labels_by_text[syncId]) { + sd_labels_by_text[syncId] = []; + } + sd_labels_by_text[syncId].push(label); + } + } +} + +function onLabelClick() { + // Activate other inputs with the same sync id. + syncId = this.getAttribute("data-sync-id"); + for (label of sd_labels_by_text[syncId]) { + if (label === this) continue; + label.previousElementSibling.checked = true; + } + window.localStorage.setItem("sphinx-design-last-tab", syncId); +} + +document.addEventListener("DOMContentLoaded", ready, false); diff --git a/pull313/_static/_sphinx_javascript_frameworks_compat.js b/pull313/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 00000000..8549469d --- /dev/null +++ b/pull313/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,134 @@ +/* + * _sphinx_javascript_frameworks_compat.js + * ~~~~~~~~~~ + * + * Compatability shim for jQuery and underscores.js. + * + * WILL BE REMOVED IN Sphinx 6.0 + * xref RemovedInSphinx60Warning + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/pull313/_static/basic.css b/pull313/_static/basic.css new file mode 100644 index 00000000..5685b52e --- /dev/null +++ b/pull313/_static/basic.css @@ -0,0 +1,928 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 270px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} +a.brackets:before, +span.brackets > a:before{ + content: "["; +} + +a.brackets:after, +span.brackets > a:after { + content: "]"; +} + + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +/* Docutils 0.17 and older (footnotes & citations) */ +dl.footnote > dt, +dl.citation > dt { + float: left; + margin-right: 0.5em; +} + +dl.footnote > dd, +dl.citation > dd { + margin-bottom: 0em; +} + +dl.footnote > dd:after, +dl.citation > dd:after { + content: ""; + clear: both; +} + +/* Docutils 0.18+ (footnotes & citations) */ +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +/* Footnotes & citations ends */ + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dt:after { + content: ":"; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/pull313/_static/check-solid.svg b/pull313/_static/check-solid.svg new file mode 100644 index 00000000..92fad4b5 --- /dev/null +++ b/pull313/_static/check-solid.svg @@ -0,0 +1,4 @@ + + + + diff --git a/pull313/_static/clipboard.min.js b/pull313/_static/clipboard.min.js new file mode 100644 index 00000000..54b3c463 --- /dev/null +++ b/pull313/_static/clipboard.min.js @@ -0,0 +1,7 @@ +/*! + * clipboard.js v2.0.8 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */ +!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return n={686:function(t,e,n){"use strict";n.d(e,{default:function(){return o}});var e=n(279),i=n.n(e),e=n(370),u=n.n(e),e=n(817),c=n.n(e);function a(t){try{return document.execCommand(t)}catch(t){return}}var f=function(t){t=c()(t);return a("cut"),t};var l=function(t){var e,n,o,r=1 + + + + diff --git a/pull313/_static/copybutton.css b/pull313/_static/copybutton.css new file mode 100644 index 00000000..f1916ec7 --- /dev/null +++ b/pull313/_static/copybutton.css @@ -0,0 +1,94 @@ +/* Copy buttons */ +button.copybtn { + position: absolute; + display: flex; + top: .3em; + right: .3em; + width: 1.7em; + height: 1.7em; + opacity: 0; + transition: opacity 0.3s, border .3s, background-color .3s; + user-select: none; + padding: 0; + border: none; + outline: none; + border-radius: 0.4em; + /* The colors that GitHub uses */ + border: #1b1f2426 1px solid; + background-color: #f6f8fa; + color: #57606a; +} + +button.copybtn.success { + border-color: #22863a; + color: #22863a; +} + +button.copybtn svg { + stroke: currentColor; + width: 1.5em; + height: 1.5em; + padding: 0.1em; +} + +div.highlight { + position: relative; +} + +/* Show the copybutton */ +.highlight:hover button.copybtn, button.copybtn.success { + opacity: 1; +} + +.highlight button.copybtn:hover { + background-color: rgb(235, 235, 235); +} + +.highlight button.copybtn:active { + background-color: rgb(187, 187, 187); +} + +/** + * A minimal CSS-only tooltip copied from: + * https://codepen.io/mildrenben/pen/rVBrpK + * + * To use, write HTML like the following: + * + *

Short

+ */ + .o-tooltip--left { + position: relative; + } + + .o-tooltip--left:after { + opacity: 0; + visibility: hidden; + position: absolute; + content: attr(data-tooltip); + padding: .2em; + font-size: .8em; + left: -.2em; + background: grey; + color: white; + white-space: nowrap; + z-index: 2; + border-radius: 2px; + transform: translateX(-102%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); +} + +.o-tooltip--left:hover:after { + display: block; + opacity: 1; + visibility: visible; + transform: translateX(-100%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); + transition-delay: .5s; +} + +/* By default the copy button shouldn't show up when printing a page */ +@media print { + button.copybtn { + display: none; + } +} diff --git a/pull313/_static/copybutton.js b/pull313/_static/copybutton.js new file mode 100644 index 00000000..2ea7ff3e --- /dev/null +++ b/pull313/_static/copybutton.js @@ -0,0 +1,248 @@ +// Localization support +const messages = { + 'en': { + 'copy': 'Copy', + 'copy_to_clipboard': 'Copy to clipboard', + 'copy_success': 'Copied!', + 'copy_failure': 'Failed to copy', + }, + 'es' : { + 'copy': 'Copiar', + 'copy_to_clipboard': 'Copiar al portapapeles', + 'copy_success': '¡Copiado!', + 'copy_failure': 'Error al copiar', + }, + 'de' : { + 'copy': 'Kopieren', + 'copy_to_clipboard': 'In die Zwischenablage kopieren', + 'copy_success': 'Kopiert!', + 'copy_failure': 'Fehler beim Kopieren', + }, + 'fr' : { + 'copy': 'Copier', + 'copy_to_clipboard': 'Copier dans le presse-papier', + 'copy_success': 'Copié !', + 'copy_failure': 'Échec de la copie', + }, + 'ru': { + 'copy': 'Скопировать', + 'copy_to_clipboard': 'Скопировать в буфер', + 'copy_success': 'Скопировано!', + 'copy_failure': 'Не удалось скопировать', + }, + 'zh-CN': { + 'copy': '复制', + 'copy_to_clipboard': '复制到剪贴板', + 'copy_success': '复制成功!', + 'copy_failure': '复制失败', + }, + 'it' : { + 'copy': 'Copiare', + 'copy_to_clipboard': 'Copiato negli appunti', + 'copy_success': 'Copiato!', + 'copy_failure': 'Errore durante la copia', + } +} + +let locale = 'en' +if( document.documentElement.lang !== undefined + && messages[document.documentElement.lang] !== undefined ) { + locale = document.documentElement.lang +} + +let doc_url_root = DOCUMENTATION_OPTIONS.URL_ROOT; +if (doc_url_root == '#') { + doc_url_root = ''; +} + +/** + * SVG files for our copy buttons + */ +let iconCheck = ` + ${messages[locale]['copy_success']} + + +` + +// If the user specified their own SVG use that, otherwise use the default +let iconCopy = ``; +if (!iconCopy) { + iconCopy = ` + ${messages[locale]['copy_to_clipboard']} + + + +` +} + +/** + * Set up copy/paste for code blocks + */ + +const runWhenDOMLoaded = cb => { + if (document.readyState != 'loading') { + cb() + } else if (document.addEventListener) { + document.addEventListener('DOMContentLoaded', cb) + } else { + document.attachEvent('onreadystatechange', function() { + if (document.readyState == 'complete') cb() + }) + } +} + +const codeCellId = index => `codecell${index}` + +// Clears selected text since ClipboardJS will select the text when copying +const clearSelection = () => { + if (window.getSelection) { + window.getSelection().removeAllRanges() + } else if (document.selection) { + document.selection.empty() + } +} + +// Changes tooltip text for a moment, then changes it back +// We want the timeout of our `success` class to be a bit shorter than the +// tooltip and icon change, so that we can hide the icon before changing back. +var timeoutIcon = 2000; +var timeoutSuccessClass = 1500; + +const temporarilyChangeTooltip = (el, oldText, newText) => { + el.setAttribute('data-tooltip', newText) + el.classList.add('success') + // Remove success a little bit sooner than we change the tooltip + // So that we can use CSS to hide the copybutton first + setTimeout(() => el.classList.remove('success'), timeoutSuccessClass) + setTimeout(() => el.setAttribute('data-tooltip', oldText), timeoutIcon) +} + +// Changes the copy button icon for two seconds, then changes it back +const temporarilyChangeIcon = (el) => { + el.innerHTML = iconCheck; + setTimeout(() => {el.innerHTML = iconCopy}, timeoutIcon) +} + +const addCopyButtonToCodeCells = () => { + // If ClipboardJS hasn't loaded, wait a bit and try again. This + // happens because we load ClipboardJS asynchronously. + if (window.ClipboardJS === undefined) { + setTimeout(addCopyButtonToCodeCells, 250) + return + } + + // Add copybuttons to all of our code cells + const COPYBUTTON_SELECTOR = 'div.highlight pre'; + const codeCells = document.querySelectorAll(COPYBUTTON_SELECTOR) + codeCells.forEach((codeCell, index) => { + const id = codeCellId(index) + codeCell.setAttribute('id', id) + + const clipboardButton = id => + `` + codeCell.insertAdjacentHTML('afterend', clipboardButton(id)) + }) + +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} + +/** + * Removes excluded text from a Node. + * + * @param {Node} target Node to filter. + * @param {string} exclude CSS selector of nodes to exclude. + * @returns {DOMString} Text from `target` with text removed. + */ +function filterText(target, exclude) { + const clone = target.cloneNode(true); // clone as to not modify the live DOM + if (exclude) { + // remove excluded nodes + clone.querySelectorAll(exclude).forEach(node => node.remove()); + } + return clone.innerText; +} + +// Callback when a copy button is clicked. Will be passed the node that was clicked +// should then grab the text and replace pieces of text that shouldn't be used in output +function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { + var regexp; + var match; + + // Do we check for line continuation characters and "HERE-documents"? + var useLineCont = !!lineContinuationChar + var useHereDoc = !!hereDocDelim + + // create regexp to capture prompt and remaining line + if (isRegexp) { + regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') + } else { + regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') + } + + const outputLines = []; + var promptFound = false; + var gotLineCont = false; + var gotHereDoc = false; + const lineGotPrompt = []; + for (const line of textContent.split('\n')) { + match = line.match(regexp) + if (match || gotLineCont || gotHereDoc) { + promptFound = regexp.test(line) + lineGotPrompt.push(promptFound) + if (removePrompts && promptFound) { + outputLines.push(match[2]) + } else { + outputLines.push(line) + } + gotLineCont = line.endsWith(lineContinuationChar) & useLineCont + if (line.includes(hereDocDelim) & useHereDoc) + gotHereDoc = !gotHereDoc + } else if (!onlyCopyPromptLines) { + outputLines.push(line) + } else if (copyEmptyLines && line.trim() === '') { + outputLines.push(line) + } + } + + // If no lines with the prompt were found then just use original lines + if (lineGotPrompt.some(v => v === true)) { + textContent = outputLines.join('\n'); + } + + // Remove a trailing newline to avoid auto-running when pasting + if (textContent.endsWith("\n")) { + textContent = textContent.slice(0, -1) + } + return textContent +} + + +var copyTargetText = (trigger) => { + var target = document.querySelector(trigger.attributes['data-clipboard-target'].value); + + // get filtered text + let exclude = '.linenos'; + + let text = filterText(target, exclude); + return formatCopyText(text, '', false, true, true, true, '', '') +} + + // Initialize with a callback so we can modify the text before copy + const clipboard = new ClipboardJS('.copybtn', {text: copyTargetText}) + + // Update UI with error/success messages + clipboard.on('success', event => { + clearSelection() + temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_success']) + temporarilyChangeIcon(event.trigger) + }) + + clipboard.on('error', event => { + temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_failure']) + }) +} + +runWhenDOMLoaded(addCopyButtonToCodeCells) \ No newline at end of file diff --git a/pull313/_static/copybutton_funcs.js b/pull313/_static/copybutton_funcs.js new file mode 100644 index 00000000..dbe1aaad --- /dev/null +++ b/pull313/_static/copybutton_funcs.js @@ -0,0 +1,73 @@ +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} + +/** + * Removes excluded text from a Node. + * + * @param {Node} target Node to filter. + * @param {string} exclude CSS selector of nodes to exclude. + * @returns {DOMString} Text from `target` with text removed. + */ +export function filterText(target, exclude) { + const clone = target.cloneNode(true); // clone as to not modify the live DOM + if (exclude) { + // remove excluded nodes + clone.querySelectorAll(exclude).forEach(node => node.remove()); + } + return clone.innerText; +} + +// Callback when a copy button is clicked. Will be passed the node that was clicked +// should then grab the text and replace pieces of text that shouldn't be used in output +export function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { + var regexp; + var match; + + // Do we check for line continuation characters and "HERE-documents"? + var useLineCont = !!lineContinuationChar + var useHereDoc = !!hereDocDelim + + // create regexp to capture prompt and remaining line + if (isRegexp) { + regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') + } else { + regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') + } + + const outputLines = []; + var promptFound = false; + var gotLineCont = false; + var gotHereDoc = false; + const lineGotPrompt = []; + for (const line of textContent.split('\n')) { + match = line.match(regexp) + if (match || gotLineCont || gotHereDoc) { + promptFound = regexp.test(line) + lineGotPrompt.push(promptFound) + if (removePrompts && promptFound) { + outputLines.push(match[2]) + } else { + outputLines.push(line) + } + gotLineCont = line.endsWith(lineContinuationChar) & useLineCont + if (line.includes(hereDocDelim) & useHereDoc) + gotHereDoc = !gotHereDoc + } else if (!onlyCopyPromptLines) { + outputLines.push(line) + } else if (copyEmptyLines && line.trim() === '') { + outputLines.push(line) + } + } + + // If no lines with the prompt were found then just use original lines + if (lineGotPrompt.some(v => v === true)) { + textContent = outputLines.join('\n'); + } + + // Remove a trailing newline to avoid auto-running when pasting + if (textContent.endsWith("\n")) { + textContent = textContent.slice(0, -1) + } + return textContent +} diff --git a/pull313/_static/design-style.4045f2051d55cab465a707391d5b2007.min.css b/pull313/_static/design-style.4045f2051d55cab465a707391d5b2007.min.css new file mode 100644 index 00000000..3225661c --- /dev/null +++ b/pull313/_static/design-style.4045f2051d55cab465a707391d5b2007.min.css @@ -0,0 +1 @@ +.sd-bg-primary{background-color:var(--sd-color-primary) !important}.sd-bg-text-primary{color:var(--sd-color-primary-text) !important}button.sd-bg-primary:focus,button.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}a.sd-bg-primary:focus,a.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}.sd-bg-secondary{background-color:var(--sd-color-secondary) !important}.sd-bg-text-secondary{color:var(--sd-color-secondary-text) !important}button.sd-bg-secondary:focus,button.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}a.sd-bg-secondary:focus,a.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}.sd-bg-success{background-color:var(--sd-color-success) !important}.sd-bg-text-success{color:var(--sd-color-success-text) !important}button.sd-bg-success:focus,button.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}a.sd-bg-success:focus,a.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}.sd-bg-info{background-color:var(--sd-color-info) !important}.sd-bg-text-info{color:var(--sd-color-info-text) !important}button.sd-bg-info:focus,button.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}a.sd-bg-info:focus,a.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}.sd-bg-warning{background-color:var(--sd-color-warning) !important}.sd-bg-text-warning{color:var(--sd-color-warning-text) !important}button.sd-bg-warning:focus,button.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}a.sd-bg-warning:focus,a.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}.sd-bg-danger{background-color:var(--sd-color-danger) !important}.sd-bg-text-danger{color:var(--sd-color-danger-text) !important}button.sd-bg-danger:focus,button.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}a.sd-bg-danger:focus,a.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}.sd-bg-light{background-color:var(--sd-color-light) !important}.sd-bg-text-light{color:var(--sd-color-light-text) !important}button.sd-bg-light:focus,button.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}a.sd-bg-light:focus,a.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}.sd-bg-muted{background-color:var(--sd-color-muted) !important}.sd-bg-text-muted{color:var(--sd-color-muted-text) !important}button.sd-bg-muted:focus,button.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}a.sd-bg-muted:focus,a.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}.sd-bg-dark{background-color:var(--sd-color-dark) !important}.sd-bg-text-dark{color:var(--sd-color-dark-text) !important}button.sd-bg-dark:focus,button.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}a.sd-bg-dark:focus,a.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}.sd-bg-black{background-color:var(--sd-color-black) !important}.sd-bg-text-black{color:var(--sd-color-black-text) !important}button.sd-bg-black:focus,button.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}a.sd-bg-black:focus,a.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}.sd-bg-white{background-color:var(--sd-color-white) !important}.sd-bg-text-white{color:var(--sd-color-white-text) !important}button.sd-bg-white:focus,button.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}a.sd-bg-white:focus,a.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}.sd-text-primary,.sd-text-primary>p{color:var(--sd-color-primary) !important}a.sd-text-primary:focus,a.sd-text-primary:hover{color:var(--sd-color-primary-highlight) !important}.sd-text-secondary,.sd-text-secondary>p{color:var(--sd-color-secondary) !important}a.sd-text-secondary:focus,a.sd-text-secondary:hover{color:var(--sd-color-secondary-highlight) !important}.sd-text-success,.sd-text-success>p{color:var(--sd-color-success) !important}a.sd-text-success:focus,a.sd-text-success:hover{color:var(--sd-color-success-highlight) !important}.sd-text-info,.sd-text-info>p{color:var(--sd-color-info) !important}a.sd-text-info:focus,a.sd-text-info:hover{color:var(--sd-color-info-highlight) !important}.sd-text-warning,.sd-text-warning>p{color:var(--sd-color-warning) !important}a.sd-text-warning:focus,a.sd-text-warning:hover{color:var(--sd-color-warning-highlight) !important}.sd-text-danger,.sd-text-danger>p{color:var(--sd-color-danger) !important}a.sd-text-danger:focus,a.sd-text-danger:hover{color:var(--sd-color-danger-highlight) !important}.sd-text-light,.sd-text-light>p{color:var(--sd-color-light) !important}a.sd-text-light:focus,a.sd-text-light:hover{color:var(--sd-color-light-highlight) !important}.sd-text-muted,.sd-text-muted>p{color:var(--sd-color-muted) !important}a.sd-text-muted:focus,a.sd-text-muted:hover{color:var(--sd-color-muted-highlight) !important}.sd-text-dark,.sd-text-dark>p{color:var(--sd-color-dark) !important}a.sd-text-dark:focus,a.sd-text-dark:hover{color:var(--sd-color-dark-highlight) !important}.sd-text-black,.sd-text-black>p{color:var(--sd-color-black) !important}a.sd-text-black:focus,a.sd-text-black:hover{color:var(--sd-color-black-highlight) !important}.sd-text-white,.sd-text-white>p{color:var(--sd-color-white) !important}a.sd-text-white:focus,a.sd-text-white:hover{color:var(--sd-color-white-highlight) !important}.sd-outline-primary{border-color:var(--sd-color-primary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-primary:focus,a.sd-outline-primary:hover{border-color:var(--sd-color-primary-highlight) !important}.sd-outline-secondary{border-color:var(--sd-color-secondary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-secondary:focus,a.sd-outline-secondary:hover{border-color:var(--sd-color-secondary-highlight) !important}.sd-outline-success{border-color:var(--sd-color-success) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-success:focus,a.sd-outline-success:hover{border-color:var(--sd-color-success-highlight) !important}.sd-outline-info{border-color:var(--sd-color-info) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-info:focus,a.sd-outline-info:hover{border-color:var(--sd-color-info-highlight) !important}.sd-outline-warning{border-color:var(--sd-color-warning) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-warning:focus,a.sd-outline-warning:hover{border-color:var(--sd-color-warning-highlight) !important}.sd-outline-danger{border-color:var(--sd-color-danger) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-danger:focus,a.sd-outline-danger:hover{border-color:var(--sd-color-danger-highlight) !important}.sd-outline-light{border-color:var(--sd-color-light) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-light:focus,a.sd-outline-light:hover{border-color:var(--sd-color-light-highlight) !important}.sd-outline-muted{border-color:var(--sd-color-muted) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-muted:focus,a.sd-outline-muted:hover{border-color:var(--sd-color-muted-highlight) !important}.sd-outline-dark{border-color:var(--sd-color-dark) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-dark:focus,a.sd-outline-dark:hover{border-color:var(--sd-color-dark-highlight) !important}.sd-outline-black{border-color:var(--sd-color-black) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-black:focus,a.sd-outline-black:hover{border-color:var(--sd-color-black-highlight) !important}.sd-outline-white{border-color:var(--sd-color-white) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-white:focus,a.sd-outline-white:hover{border-color:var(--sd-color-white-highlight) !important}.sd-bg-transparent{background-color:transparent !important}.sd-outline-transparent{border-color:transparent !important}.sd-text-transparent{color:transparent !important}.sd-p-0{padding:0 !important}.sd-pt-0,.sd-py-0{padding-top:0 !important}.sd-pr-0,.sd-px-0{padding-right:0 !important}.sd-pb-0,.sd-py-0{padding-bottom:0 !important}.sd-pl-0,.sd-px-0{padding-left:0 !important}.sd-p-1{padding:.25rem !important}.sd-pt-1,.sd-py-1{padding-top:.25rem !important}.sd-pr-1,.sd-px-1{padding-right:.25rem !important}.sd-pb-1,.sd-py-1{padding-bottom:.25rem !important}.sd-pl-1,.sd-px-1{padding-left:.25rem !important}.sd-p-2{padding:.5rem !important}.sd-pt-2,.sd-py-2{padding-top:.5rem !important}.sd-pr-2,.sd-px-2{padding-right:.5rem !important}.sd-pb-2,.sd-py-2{padding-bottom:.5rem !important}.sd-pl-2,.sd-px-2{padding-left:.5rem !important}.sd-p-3{padding:1rem !important}.sd-pt-3,.sd-py-3{padding-top:1rem !important}.sd-pr-3,.sd-px-3{padding-right:1rem !important}.sd-pb-3,.sd-py-3{padding-bottom:1rem !important}.sd-pl-3,.sd-px-3{padding-left:1rem !important}.sd-p-4{padding:1.5rem !important}.sd-pt-4,.sd-py-4{padding-top:1.5rem !important}.sd-pr-4,.sd-px-4{padding-right:1.5rem !important}.sd-pb-4,.sd-py-4{padding-bottom:1.5rem !important}.sd-pl-4,.sd-px-4{padding-left:1.5rem !important}.sd-p-5{padding:3rem !important}.sd-pt-5,.sd-py-5{padding-top:3rem !important}.sd-pr-5,.sd-px-5{padding-right:3rem !important}.sd-pb-5,.sd-py-5{padding-bottom:3rem !important}.sd-pl-5,.sd-px-5{padding-left:3rem !important}.sd-m-auto{margin:auto !important}.sd-mt-auto,.sd-my-auto{margin-top:auto !important}.sd-mr-auto,.sd-mx-auto{margin-right:auto !important}.sd-mb-auto,.sd-my-auto{margin-bottom:auto !important}.sd-ml-auto,.sd-mx-auto{margin-left:auto !important}.sd-m-0{margin:0 !important}.sd-mt-0,.sd-my-0{margin-top:0 !important}.sd-mr-0,.sd-mx-0{margin-right:0 !important}.sd-mb-0,.sd-my-0{margin-bottom:0 !important}.sd-ml-0,.sd-mx-0{margin-left:0 !important}.sd-m-1{margin:.25rem !important}.sd-mt-1,.sd-my-1{margin-top:.25rem !important}.sd-mr-1,.sd-mx-1{margin-right:.25rem !important}.sd-mb-1,.sd-my-1{margin-bottom:.25rem !important}.sd-ml-1,.sd-mx-1{margin-left:.25rem !important}.sd-m-2{margin:.5rem !important}.sd-mt-2,.sd-my-2{margin-top:.5rem !important}.sd-mr-2,.sd-mx-2{margin-right:.5rem !important}.sd-mb-2,.sd-my-2{margin-bottom:.5rem !important}.sd-ml-2,.sd-mx-2{margin-left:.5rem !important}.sd-m-3{margin:1rem !important}.sd-mt-3,.sd-my-3{margin-top:1rem !important}.sd-mr-3,.sd-mx-3{margin-right:1rem !important}.sd-mb-3,.sd-my-3{margin-bottom:1rem !important}.sd-ml-3,.sd-mx-3{margin-left:1rem !important}.sd-m-4{margin:1.5rem !important}.sd-mt-4,.sd-my-4{margin-top:1.5rem !important}.sd-mr-4,.sd-mx-4{margin-right:1.5rem !important}.sd-mb-4,.sd-my-4{margin-bottom:1.5rem !important}.sd-ml-4,.sd-mx-4{margin-left:1.5rem !important}.sd-m-5{margin:3rem !important}.sd-mt-5,.sd-my-5{margin-top:3rem !important}.sd-mr-5,.sd-mx-5{margin-right:3rem !important}.sd-mb-5,.sd-my-5{margin-bottom:3rem !important}.sd-ml-5,.sd-mx-5{margin-left:3rem !important}.sd-w-25{width:25% !important}.sd-w-50{width:50% !important}.sd-w-75{width:75% !important}.sd-w-100{width:100% !important}.sd-w-auto{width:auto !important}.sd-h-25{height:25% !important}.sd-h-50{height:50% !important}.sd-h-75{height:75% !important}.sd-h-100{height:100% !important}.sd-h-auto{height:auto !important}.sd-d-none{display:none !important}.sd-d-inline{display:inline !important}.sd-d-inline-block{display:inline-block !important}.sd-d-block{display:block !important}.sd-d-grid{display:grid !important}.sd-d-flex-row{display:-ms-flexbox !important;display:flex !important;flex-direction:row !important}.sd-d-flex-column{display:-ms-flexbox !important;display:flex !important;flex-direction:column !important}.sd-d-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}@media(min-width: 576px){.sd-d-sm-none{display:none !important}.sd-d-sm-inline{display:inline !important}.sd-d-sm-inline-block{display:inline-block !important}.sd-d-sm-block{display:block !important}.sd-d-sm-grid{display:grid !important}.sd-d-sm-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-sm-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 768px){.sd-d-md-none{display:none !important}.sd-d-md-inline{display:inline !important}.sd-d-md-inline-block{display:inline-block !important}.sd-d-md-block{display:block !important}.sd-d-md-grid{display:grid !important}.sd-d-md-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-md-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 992px){.sd-d-lg-none{display:none !important}.sd-d-lg-inline{display:inline !important}.sd-d-lg-inline-block{display:inline-block !important}.sd-d-lg-block{display:block !important}.sd-d-lg-grid{display:grid !important}.sd-d-lg-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-lg-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 1200px){.sd-d-xl-none{display:none !important}.sd-d-xl-inline{display:inline !important}.sd-d-xl-inline-block{display:inline-block !important}.sd-d-xl-block{display:block !important}.sd-d-xl-grid{display:grid !important}.sd-d-xl-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-xl-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}.sd-align-major-start{justify-content:flex-start !important}.sd-align-major-end{justify-content:flex-end !important}.sd-align-major-center{justify-content:center !important}.sd-align-major-justify{justify-content:space-between !important}.sd-align-major-spaced{justify-content:space-evenly !important}.sd-align-minor-start{align-items:flex-start !important}.sd-align-minor-end{align-items:flex-end !important}.sd-align-minor-center{align-items:center !important}.sd-align-minor-stretch{align-items:stretch !important}.sd-text-justify{text-align:justify !important}.sd-text-left{text-align:left !important}.sd-text-right{text-align:right !important}.sd-text-center{text-align:center !important}.sd-font-weight-light{font-weight:300 !important}.sd-font-weight-lighter{font-weight:lighter !important}.sd-font-weight-normal{font-weight:400 !important}.sd-font-weight-bold{font-weight:700 !important}.sd-font-weight-bolder{font-weight:bolder !important}.sd-font-italic{font-style:italic !important}.sd-text-decoration-none{text-decoration:none !important}.sd-text-lowercase{text-transform:lowercase !important}.sd-text-uppercase{text-transform:uppercase !important}.sd-text-capitalize{text-transform:capitalize !important}.sd-text-wrap{white-space:normal !important}.sd-text-nowrap{white-space:nowrap !important}.sd-text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.sd-fs-1,.sd-fs-1>p{font-size:calc(1.375rem + 1.5vw) !important;line-height:unset !important}.sd-fs-2,.sd-fs-2>p{font-size:calc(1.325rem + 0.9vw) !important;line-height:unset !important}.sd-fs-3,.sd-fs-3>p{font-size:calc(1.3rem + 0.6vw) !important;line-height:unset !important}.sd-fs-4,.sd-fs-4>p{font-size:calc(1.275rem + 0.3vw) !important;line-height:unset !important}.sd-fs-5,.sd-fs-5>p{font-size:1.25rem !important;line-height:unset !important}.sd-fs-6,.sd-fs-6>p{font-size:1rem !important;line-height:unset !important}.sd-border-0{border:0 solid !important}.sd-border-top-0{border-top:0 solid !important}.sd-border-bottom-0{border-bottom:0 solid !important}.sd-border-right-0{border-right:0 solid !important}.sd-border-left-0{border-left:0 solid !important}.sd-border-1{border:1px solid !important}.sd-border-top-1{border-top:1px solid !important}.sd-border-bottom-1{border-bottom:1px solid !important}.sd-border-right-1{border-right:1px solid !important}.sd-border-left-1{border-left:1px solid !important}.sd-border-2{border:2px solid !important}.sd-border-top-2{border-top:2px solid !important}.sd-border-bottom-2{border-bottom:2px solid !important}.sd-border-right-2{border-right:2px solid !important}.sd-border-left-2{border-left:2px solid !important}.sd-border-3{border:3px solid !important}.sd-border-top-3{border-top:3px solid !important}.sd-border-bottom-3{border-bottom:3px solid !important}.sd-border-right-3{border-right:3px solid !important}.sd-border-left-3{border-left:3px solid !important}.sd-border-4{border:4px solid !important}.sd-border-top-4{border-top:4px solid !important}.sd-border-bottom-4{border-bottom:4px solid !important}.sd-border-right-4{border-right:4px solid !important}.sd-border-left-4{border-left:4px solid !important}.sd-border-5{border:5px solid !important}.sd-border-top-5{border-top:5px solid !important}.sd-border-bottom-5{border-bottom:5px solid !important}.sd-border-right-5{border-right:5px solid !important}.sd-border-left-5{border-left:5px solid !important}.sd-rounded-0{border-radius:0 !important}.sd-rounded-1{border-radius:.2rem !important}.sd-rounded-2{border-radius:.3rem !important}.sd-rounded-3{border-radius:.5rem !important}.sd-rounded-pill{border-radius:50rem !important}.sd-rounded-circle{border-radius:50% !important}.shadow-none{box-shadow:none !important}.sd-shadow-sm{box-shadow:0 .125rem .25rem var(--sd-color-shadow) !important}.sd-shadow-md{box-shadow:0 .5rem 1rem var(--sd-color-shadow) !important}.sd-shadow-lg{box-shadow:0 1rem 3rem var(--sd-color-shadow) !important}@keyframes sd-slide-from-left{0%{transform:translateX(-100%)}100%{transform:translateX(0)}}@keyframes sd-slide-from-right{0%{transform:translateX(200%)}100%{transform:translateX(0)}}@keyframes sd-grow100{0%{transform:scale(0);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50{0%{transform:scale(0.5);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50-rot20{0%{transform:scale(0.5) rotateZ(-20deg);opacity:.5}75%{transform:scale(1) rotateZ(5deg);opacity:1}95%{transform:scale(1) rotateZ(-1deg);opacity:1}100%{transform:scale(1) rotateZ(0);opacity:1}}.sd-animate-slide-from-left{animation:1s ease-out 0s 1 normal none running sd-slide-from-left}.sd-animate-slide-from-right{animation:1s ease-out 0s 1 normal none running sd-slide-from-right}.sd-animate-grow100{animation:1s ease-out 0s 1 normal none running sd-grow100}.sd-animate-grow50{animation:1s ease-out 0s 1 normal none running sd-grow50}.sd-animate-grow50-rot20{animation:1s ease-out 0s 1 normal none running sd-grow50-rot20}.sd-badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.sd-badge:empty{display:none}a.sd-badge{text-decoration:none}.sd-btn .sd-badge{position:relative;top:-1px}.sd-btn{background-color:transparent;border:1px solid transparent;border-radius:.25rem;cursor:pointer;display:inline-block;font-weight:400;font-size:1rem;line-height:1.5;padding:.375rem .75rem;text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;vertical-align:middle;user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none}.sd-btn:hover{text-decoration:none}@media(prefers-reduced-motion: reduce){.sd-btn{transition:none}}.sd-btn-primary,.sd-btn-outline-primary:hover,.sd-btn-outline-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-primary:hover,.sd-btn-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary-highlight) !important;border-color:var(--sd-color-primary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-primary{color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary,.sd-btn-outline-secondary:hover,.sd-btn-outline-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary:hover,.sd-btn-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary-highlight) !important;border-color:var(--sd-color-secondary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-secondary{color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success,.sd-btn-outline-success:hover,.sd-btn-outline-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success:hover,.sd-btn-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success-highlight) !important;border-color:var(--sd-color-success-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-success{color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info,.sd-btn-outline-info:hover,.sd-btn-outline-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info:hover,.sd-btn-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info-highlight) !important;border-color:var(--sd-color-info-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-info{color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning,.sd-btn-outline-warning:hover,.sd-btn-outline-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning:hover,.sd-btn-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning-highlight) !important;border-color:var(--sd-color-warning-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-warning{color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger,.sd-btn-outline-danger:hover,.sd-btn-outline-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger:hover,.sd-btn-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger-highlight) !important;border-color:var(--sd-color-danger-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-danger{color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light,.sd-btn-outline-light:hover,.sd-btn-outline-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light:hover,.sd-btn-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light-highlight) !important;border-color:var(--sd-color-light-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-light{color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted,.sd-btn-outline-muted:hover,.sd-btn-outline-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted:hover,.sd-btn-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted-highlight) !important;border-color:var(--sd-color-muted-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-muted{color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark,.sd-btn-outline-dark:hover,.sd-btn-outline-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark:hover,.sd-btn-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark-highlight) !important;border-color:var(--sd-color-dark-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-dark{color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black,.sd-btn-outline-black:hover,.sd-btn-outline-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black:hover,.sd-btn-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black-highlight) !important;border-color:var(--sd-color-black-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-black{color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white,.sd-btn-outline-white:hover,.sd-btn-outline-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white:hover,.sd-btn-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white-highlight) !important;border-color:var(--sd-color-white-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-white{color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.sd-hide-link-text{font-size:0}.sd-octicon,.sd-material-icon{display:inline-block;fill:currentColor;vertical-align:middle}.sd-avatar-xs{border-radius:50%;object-fit:cover;object-position:center;width:1rem;height:1rem}.sd-avatar-sm{border-radius:50%;object-fit:cover;object-position:center;width:3rem;height:3rem}.sd-avatar-md{border-radius:50%;object-fit:cover;object-position:center;width:5rem;height:5rem}.sd-avatar-lg{border-radius:50%;object-fit:cover;object-position:center;width:7rem;height:7rem}.sd-avatar-xl{border-radius:50%;object-fit:cover;object-position:center;width:10rem;height:10rem}.sd-avatar-inherit{border-radius:50%;object-fit:cover;object-position:center;width:inherit;height:inherit}.sd-avatar-initial{border-radius:50%;object-fit:cover;object-position:center;width:initial;height:initial}.sd-card{background-clip:border-box;background-color:var(--sd-color-card-background);border:1px solid var(--sd-color-card-border);border-radius:.25rem;color:var(--sd-color-card-text);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;position:relative;word-wrap:break-word}.sd-card>hr{margin-left:0;margin-right:0}.sd-card-hover:hover{border-color:var(--sd-color-card-border-hover);transform:scale(1.01)}.sd-card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem 1rem}.sd-card-title{margin-bottom:.5rem}.sd-card-subtitle{margin-top:-0.25rem;margin-bottom:0}.sd-card-text:last-child{margin-bottom:0}.sd-card-link:hover{text-decoration:none}.sd-card-link+.card-link{margin-left:1rem}.sd-card-header{padding:.5rem 1rem;margin-bottom:0;background-color:var(--sd-color-card-header);border-bottom:1px solid var(--sd-color-card-border)}.sd-card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.sd-card-footer{padding:.5rem 1rem;background-color:var(--sd-color-card-footer);border-top:1px solid var(--sd-color-card-border)}.sd-card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.sd-card-header-tabs{margin-right:-0.5rem;margin-bottom:-0.5rem;margin-left:-0.5rem;border-bottom:0}.sd-card-header-pills{margin-right:-0.5rem;margin-left:-0.5rem}.sd-card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom,.sd-card-img-top{width:100%}.sd-card-img,.sd-card-img-top{border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom{border-bottom-left-radius:calc(0.25rem - 1px);border-bottom-right-radius:calc(0.25rem - 1px)}.sd-cards-carousel{width:100%;display:flex;flex-wrap:nowrap;-ms-flex-direction:row;flex-direction:row;overflow-x:hidden;scroll-snap-type:x mandatory}.sd-cards-carousel.sd-show-scrollbar{overflow-x:auto}.sd-cards-carousel:hover,.sd-cards-carousel:focus{overflow-x:auto}.sd-cards-carousel>.sd-card{flex-shrink:0;scroll-snap-align:start}.sd-cards-carousel>.sd-card:not(:last-child){margin-right:3px}.sd-card-cols-1>.sd-card{width:90%}.sd-card-cols-2>.sd-card{width:45%}.sd-card-cols-3>.sd-card{width:30%}.sd-card-cols-4>.sd-card{width:22.5%}.sd-card-cols-5>.sd-card{width:18%}.sd-card-cols-6>.sd-card{width:15%}.sd-card-cols-7>.sd-card{width:12.8571428571%}.sd-card-cols-8>.sd-card{width:11.25%}.sd-card-cols-9>.sd-card{width:10%}.sd-card-cols-10>.sd-card{width:9%}.sd-card-cols-11>.sd-card{width:8.1818181818%}.sd-card-cols-12>.sd-card{width:7.5%}.sd-container,.sd-container-fluid,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container-xl{margin-left:auto;margin-right:auto;padding-left:var(--sd-gutter-x, 0.75rem);padding-right:var(--sd-gutter-x, 0.75rem);width:100%}@media(min-width: 576px){.sd-container-sm,.sd-container{max-width:540px}}@media(min-width: 768px){.sd-container-md,.sd-container-sm,.sd-container{max-width:720px}}@media(min-width: 992px){.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:960px}}@media(min-width: 1200px){.sd-container-xl,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:1140px}}.sd-row{--sd-gutter-x: 1.5rem;--sd-gutter-y: 0;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-top:calc(var(--sd-gutter-y) * -1);margin-right:calc(var(--sd-gutter-x) * -0.5);margin-left:calc(var(--sd-gutter-x) * -0.5)}.sd-row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--sd-gutter-x) * 0.5);padding-left:calc(var(--sd-gutter-x) * 0.5);margin-top:var(--sd-gutter-y)}.sd-col{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-auto>*{flex:0 0 auto;width:auto}.sd-row-cols-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}@media(min-width: 576px){.sd-col-sm{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-sm-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-sm-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-sm-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-sm-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-sm-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-sm-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-sm-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-sm-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-sm-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-sm-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-sm-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-sm-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-sm-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 768px){.sd-col-md{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-md-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-md-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-md-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-md-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-md-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-md-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-md-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-md-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-md-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-md-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-md-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-md-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-md-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 992px){.sd-col-lg{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-lg-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-lg-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-lg-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-lg-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-lg-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-lg-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-lg-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-lg-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-lg-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-lg-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-lg-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-lg-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-lg-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 1200px){.sd-col-xl{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-xl-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-xl-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-xl-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-xl-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-xl-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-xl-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-xl-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-xl-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-xl-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-xl-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-xl-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-xl-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-xl-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}.sd-col-auto{flex:0 0 auto;-ms-flex:0 0 auto;width:auto}.sd-col-1{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}.sd-col-2{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-col-3{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-col-4{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-col-5{flex:0 0 auto;-ms-flex:0 0 auto;width:41.6666666667%}.sd-col-6{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-col-7{flex:0 0 auto;-ms-flex:0 0 auto;width:58.3333333333%}.sd-col-8{flex:0 0 auto;-ms-flex:0 0 auto;width:66.6666666667%}.sd-col-9{flex:0 0 auto;-ms-flex:0 0 auto;width:75%}.sd-col-10{flex:0 0 auto;-ms-flex:0 0 auto;width:83.3333333333%}.sd-col-11{flex:0 0 auto;-ms-flex:0 0 auto;width:91.6666666667%}.sd-col-12{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-g-0,.sd-gy-0{--sd-gutter-y: 0}.sd-g-0,.sd-gx-0{--sd-gutter-x: 0}.sd-g-1,.sd-gy-1{--sd-gutter-y: 0.25rem}.sd-g-1,.sd-gx-1{--sd-gutter-x: 0.25rem}.sd-g-2,.sd-gy-2{--sd-gutter-y: 0.5rem}.sd-g-2,.sd-gx-2{--sd-gutter-x: 0.5rem}.sd-g-3,.sd-gy-3{--sd-gutter-y: 1rem}.sd-g-3,.sd-gx-3{--sd-gutter-x: 1rem}.sd-g-4,.sd-gy-4{--sd-gutter-y: 1.5rem}.sd-g-4,.sd-gx-4{--sd-gutter-x: 1.5rem}.sd-g-5,.sd-gy-5{--sd-gutter-y: 3rem}.sd-g-5,.sd-gx-5{--sd-gutter-x: 3rem}@media(min-width: 576px){.sd-col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-sm-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-sm-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-sm-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-sm-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-sm-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-sm-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-sm-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-sm-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-sm-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-sm-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-sm-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-sm-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-sm-0,.sd-gy-sm-0{--sd-gutter-y: 0}.sd-g-sm-0,.sd-gx-sm-0{--sd-gutter-x: 0}.sd-g-sm-1,.sd-gy-sm-1{--sd-gutter-y: 0.25rem}.sd-g-sm-1,.sd-gx-sm-1{--sd-gutter-x: 0.25rem}.sd-g-sm-2,.sd-gy-sm-2{--sd-gutter-y: 0.5rem}.sd-g-sm-2,.sd-gx-sm-2{--sd-gutter-x: 0.5rem}.sd-g-sm-3,.sd-gy-sm-3{--sd-gutter-y: 1rem}.sd-g-sm-3,.sd-gx-sm-3{--sd-gutter-x: 1rem}.sd-g-sm-4,.sd-gy-sm-4{--sd-gutter-y: 1.5rem}.sd-g-sm-4,.sd-gx-sm-4{--sd-gutter-x: 1.5rem}.sd-g-sm-5,.sd-gy-sm-5{--sd-gutter-y: 3rem}.sd-g-sm-5,.sd-gx-sm-5{--sd-gutter-x: 3rem}}@media(min-width: 768px){.sd-col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-md-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-md-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-md-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-md-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-md-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-md-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-md-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-md-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-md-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-md-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-md-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-md-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-md-0,.sd-gy-md-0{--sd-gutter-y: 0}.sd-g-md-0,.sd-gx-md-0{--sd-gutter-x: 0}.sd-g-md-1,.sd-gy-md-1{--sd-gutter-y: 0.25rem}.sd-g-md-1,.sd-gx-md-1{--sd-gutter-x: 0.25rem}.sd-g-md-2,.sd-gy-md-2{--sd-gutter-y: 0.5rem}.sd-g-md-2,.sd-gx-md-2{--sd-gutter-x: 0.5rem}.sd-g-md-3,.sd-gy-md-3{--sd-gutter-y: 1rem}.sd-g-md-3,.sd-gx-md-3{--sd-gutter-x: 1rem}.sd-g-md-4,.sd-gy-md-4{--sd-gutter-y: 1.5rem}.sd-g-md-4,.sd-gx-md-4{--sd-gutter-x: 1.5rem}.sd-g-md-5,.sd-gy-md-5{--sd-gutter-y: 3rem}.sd-g-md-5,.sd-gx-md-5{--sd-gutter-x: 3rem}}@media(min-width: 992px){.sd-col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-lg-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-lg-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-lg-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-lg-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-lg-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-lg-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-lg-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-lg-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-lg-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-lg-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-lg-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-lg-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-lg-0,.sd-gy-lg-0{--sd-gutter-y: 0}.sd-g-lg-0,.sd-gx-lg-0{--sd-gutter-x: 0}.sd-g-lg-1,.sd-gy-lg-1{--sd-gutter-y: 0.25rem}.sd-g-lg-1,.sd-gx-lg-1{--sd-gutter-x: 0.25rem}.sd-g-lg-2,.sd-gy-lg-2{--sd-gutter-y: 0.5rem}.sd-g-lg-2,.sd-gx-lg-2{--sd-gutter-x: 0.5rem}.sd-g-lg-3,.sd-gy-lg-3{--sd-gutter-y: 1rem}.sd-g-lg-3,.sd-gx-lg-3{--sd-gutter-x: 1rem}.sd-g-lg-4,.sd-gy-lg-4{--sd-gutter-y: 1.5rem}.sd-g-lg-4,.sd-gx-lg-4{--sd-gutter-x: 1.5rem}.sd-g-lg-5,.sd-gy-lg-5{--sd-gutter-y: 3rem}.sd-g-lg-5,.sd-gx-lg-5{--sd-gutter-x: 3rem}}@media(min-width: 1200px){.sd-col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-xl-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-xl-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-xl-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-xl-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-xl-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-xl-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-xl-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-xl-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-xl-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-xl-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-xl-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-xl-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-xl-0,.sd-gy-xl-0{--sd-gutter-y: 0}.sd-g-xl-0,.sd-gx-xl-0{--sd-gutter-x: 0}.sd-g-xl-1,.sd-gy-xl-1{--sd-gutter-y: 0.25rem}.sd-g-xl-1,.sd-gx-xl-1{--sd-gutter-x: 0.25rem}.sd-g-xl-2,.sd-gy-xl-2{--sd-gutter-y: 0.5rem}.sd-g-xl-2,.sd-gx-xl-2{--sd-gutter-x: 0.5rem}.sd-g-xl-3,.sd-gy-xl-3{--sd-gutter-y: 1rem}.sd-g-xl-3,.sd-gx-xl-3{--sd-gutter-x: 1rem}.sd-g-xl-4,.sd-gy-xl-4{--sd-gutter-y: 1.5rem}.sd-g-xl-4,.sd-gx-xl-4{--sd-gutter-x: 1.5rem}.sd-g-xl-5,.sd-gy-xl-5{--sd-gutter-y: 3rem}.sd-g-xl-5,.sd-gx-xl-5{--sd-gutter-x: 3rem}}.sd-flex-row-reverse{flex-direction:row-reverse !important}details.sd-dropdown{position:relative}details.sd-dropdown .sd-summary-title{font-weight:700;padding-right:3em !important;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none;user-select:none}details.sd-dropdown:hover{cursor:pointer}details.sd-dropdown .sd-summary-content{cursor:default}details.sd-dropdown summary{list-style:none;padding:1em}details.sd-dropdown summary .sd-octicon.no-title{vertical-align:middle}details.sd-dropdown[open] summary .sd-octicon.no-title{visibility:hidden}details.sd-dropdown summary::-webkit-details-marker{display:none}details.sd-dropdown summary:focus{outline:none}details.sd-dropdown .sd-summary-icon{margin-right:.5em}details.sd-dropdown .sd-summary-icon svg{opacity:.8}details.sd-dropdown summary:hover .sd-summary-up svg,details.sd-dropdown summary:hover .sd-summary-down svg{opacity:1;transform:scale(1.1)}details.sd-dropdown .sd-summary-up svg,details.sd-dropdown .sd-summary-down svg{display:block;opacity:.6}details.sd-dropdown .sd-summary-up,details.sd-dropdown .sd-summary-down{pointer-events:none;position:absolute;right:1em;top:1em}details.sd-dropdown[open]>.sd-summary-title .sd-summary-down{visibility:hidden}details.sd-dropdown:not([open])>.sd-summary-title .sd-summary-up{visibility:hidden}details.sd-dropdown:not([open]).sd-card{border:none}details.sd-dropdown:not([open])>.sd-card-header{border:1px solid var(--sd-color-card-border);border-radius:.25rem}details.sd-dropdown.sd-fade-in[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out;animation:sd-fade-in .5s ease-in-out}details.sd-dropdown.sd-fade-in-slide-down[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out}.sd-col>.sd-dropdown{width:100%}.sd-summary-content>.sd-tab-set:first-child{margin-top:0}@keyframes sd-fade-in{0%{opacity:0}100%{opacity:1}}@keyframes sd-slide-down{0%{transform:translate(0, -10px)}100%{transform:translate(0, 0)}}.sd-tab-set{border-radius:.125rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.sd-tab-set>input{opacity:0;position:absolute}.sd-tab-set>input:checked+label{border-color:var(--sd-color-tabs-underline-active);color:var(--sd-color-tabs-label-active)}.sd-tab-set>input:checked+label+.sd-tab-content{display:block}.sd-tab-set>input:not(:checked)+label:hover{color:var(--sd-color-tabs-label-hover);border-color:var(--sd-color-tabs-underline-hover)}.sd-tab-set>input:focus+label{outline-style:auto}.sd-tab-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.sd-tab-set>label{border-bottom:.125rem solid transparent;margin-bottom:0;color:var(--sd-color-tabs-label-inactive);border-color:var(--sd-color-tabs-underline-inactive);cursor:pointer;font-size:var(--sd-fontsize-tabs-label);font-weight:700;padding:1em 1.25em .5em;transition:color 250ms;width:auto;z-index:1}html .sd-tab-set>label:hover{color:var(--sd-color-tabs-label-active)}.sd-col>.sd-tab-set{width:100%}.sd-tab-content{box-shadow:0 -0.0625rem var(--sd-color-tabs-overline),0 .0625rem var(--sd-color-tabs-underline);display:none;order:99;padding-bottom:.75rem;padding-top:.75rem;width:100%}.sd-tab-content>:first-child{margin-top:0 !important}.sd-tab-content>:last-child{margin-bottom:0 !important}.sd-tab-content>.sd-tab-set{margin:0}.sd-sphinx-override,.sd-sphinx-override *{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.sd-sphinx-override p{margin-top:0}:root{--sd-color-primary: #007bff;--sd-color-secondary: #6c757d;--sd-color-success: #28a745;--sd-color-info: #17a2b8;--sd-color-warning: #f0b37e;--sd-color-danger: #dc3545;--sd-color-light: #f8f9fa;--sd-color-muted: #6c757d;--sd-color-dark: #212529;--sd-color-black: black;--sd-color-white: white;--sd-color-primary-highlight: #0069d9;--sd-color-secondary-highlight: #5c636a;--sd-color-success-highlight: #228e3b;--sd-color-info-highlight: #148a9c;--sd-color-warning-highlight: #cc986b;--sd-color-danger-highlight: #bb2d3b;--sd-color-light-highlight: #d3d4d5;--sd-color-muted-highlight: #5c636a;--sd-color-dark-highlight: #1c1f23;--sd-color-black-highlight: black;--sd-color-white-highlight: #d9d9d9;--sd-color-primary-text: #fff;--sd-color-secondary-text: #fff;--sd-color-success-text: #fff;--sd-color-info-text: #fff;--sd-color-warning-text: #212529;--sd-color-danger-text: #fff;--sd-color-light-text: #212529;--sd-color-muted-text: #fff;--sd-color-dark-text: #fff;--sd-color-black-text: #fff;--sd-color-white-text: #212529;--sd-color-shadow: rgba(0, 0, 0, 0.15);--sd-color-card-border: rgba(0, 0, 0, 0.125);--sd-color-card-border-hover: hsla(231, 99%, 66%, 1);--sd-color-card-background: transparent;--sd-color-card-text: inherit;--sd-color-card-header: transparent;--sd-color-card-footer: transparent;--sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-inactive: hsl(0, 0%, 66%);--sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);--sd-color-tabs-underline-inactive: transparent;--sd-color-tabs-overline: rgb(222, 222, 222);--sd-color-tabs-underline: rgb(222, 222, 222);--sd-fontsize-tabs-label: 1rem} diff --git a/pull313/_static/design-tabs.js b/pull313/_static/design-tabs.js new file mode 100644 index 00000000..36b38cf0 --- /dev/null +++ b/pull313/_static/design-tabs.js @@ -0,0 +1,27 @@ +var sd_labels_by_text = {}; + +function ready() { + const li = document.getElementsByClassName("sd-tab-label"); + for (const label of li) { + syncId = label.getAttribute("data-sync-id"); + if (syncId) { + label.onclick = onLabelClick; + if (!sd_labels_by_text[syncId]) { + sd_labels_by_text[syncId] = []; + } + sd_labels_by_text[syncId].push(label); + } + } +} + +function onLabelClick() { + // Activate other inputs with the same sync id. + syncId = this.getAttribute("data-sync-id"); + for (label of sd_labels_by_text[syncId]) { + if (label === this) continue; + label.previousElementSibling.checked = true; + } + window.localStorage.setItem("sphinx-design-last-tab", syncId); +} + +document.addEventListener("DOMContentLoaded", ready, false); diff --git a/pull313/_static/doctools.js b/pull313/_static/doctools.js new file mode 100644 index 00000000..c3db08d1 --- /dev/null +++ b/pull313/_static/doctools.js @@ -0,0 +1,264 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + parent.insertBefore( + span, + parent.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.highlightSearchWords(); + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords: () => { + const highlight = + new URLSearchParams(window.location.search).get("highlight") || ""; + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + const url = new URL(window.location); + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + const blacklistedElements = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", + ]); + document.addEventListener("keydown", (event) => { + if (blacklistedElements.has(document.activeElement.tagName)) return; // bail for input elements + if (event.altKey || event.ctrlKey || event.metaKey) return; // bail with special keys + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + case "Escape": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.hideSearchWords(); + event.preventDefault(); + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/pull313/_static/documentation_options.js b/pull313/_static/documentation_options.js new file mode 100644 index 00000000..162a6ba8 --- /dev/null +++ b/pull313/_static/documentation_options.js @@ -0,0 +1,14 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: false, +}; \ No newline at end of file diff --git a/pull313/_static/file.png b/pull313/_static/file.png new file mode 100644 index 00000000..a858a410 Binary files /dev/null and b/pull313/_static/file.png differ diff --git a/pull313/_static/images/logo_binder.svg b/pull313/_static/images/logo_binder.svg new file mode 100644 index 00000000..45fecf75 --- /dev/null +++ b/pull313/_static/images/logo_binder.svg @@ -0,0 +1,19 @@ + + + + +logo + + + + + + + + diff --git a/pull313/_static/images/logo_colab.png b/pull313/_static/images/logo_colab.png new file mode 100644 index 00000000..b7560ec2 Binary files /dev/null and b/pull313/_static/images/logo_colab.png differ diff --git a/pull313/_static/images/logo_deepnote.svg b/pull313/_static/images/logo_deepnote.svg new file mode 100644 index 00000000..fa77ebfc --- /dev/null +++ b/pull313/_static/images/logo_deepnote.svg @@ -0,0 +1 @@ + diff --git a/pull313/_static/images/logo_jupyterhub.svg b/pull313/_static/images/logo_jupyterhub.svg new file mode 100644 index 00000000..60cfe9f2 --- /dev/null +++ b/pull313/_static/images/logo_jupyterhub.svg @@ -0,0 +1 @@ +logo_jupyterhubHub diff --git a/pull313/_static/jquery-3.6.0.js b/pull313/_static/jquery-3.6.0.js new file mode 100644 index 00000000..fc6c299b --- /dev/null +++ b/pull313/_static/jquery-3.6.0.js @@ -0,0 +1,10881 @@ +/*! + * jQuery JavaScript Library v3.6.0 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright OpenJS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2021-03-02T17:08Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var flat = arr.flat ? function( array ) { + return arr.flat.call( array ); +} : function( array ) { + return arr.concat.apply( [], array ); +}; + + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 + // Plus for old WebKit, typeof returns "function" for HTML collections + // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) + return typeof obj === "function" && typeof obj.nodeType !== "number" && + typeof obj.item !== "function"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + +var document = window.document; + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.6.0", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + even: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return ( i + 1 ) % 2; + } ) ); + }, + + odd: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return i % 2; + } ) ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a provided context; falls back to the global one + // if not specified. + globalEval: function( code, options, doc ) { + DOMEval( code, { nonce: options && options.nonce }, doc ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return flat( ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), + function( _i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); + } ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.6 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2021-02-16 + */ +( function( window ) { +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ( {} ).hasOwnProperty, + arr = [], + pop = arr.pop, + pushNative = arr.push, + push = arr.push, + slice = arr.slice, + + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[ i ] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + + "ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram + identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + + "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + + // "Attribute values must be CSS identifiers [capture 5] + // or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + + whitespace + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), + funescape = function( escape, nonHex ) { + var high = "0x" + escape.slice( 1 ) - 0x10000; + + return nonHex ? + + // Strip the backslash prefix from a non-hex escape sequence + nonHex : + + // Replace a hexadecimal escape sequence with the encoded Unicode code point + // Support: IE <=11+ + // For values outside the Basic Multilingual Plane (BMP), manually construct a + // surrogate pair + high < 0 ? + String.fromCharCode( high + 0x10000 ) : + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + ( arr = slice.call( preferredDoc.childNodes ) ), + preferredDoc.childNodes + ); + + // Support: Android<4.0 + // Detect silently failing push.apply + // eslint-disable-next-line no-unused-expressions + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + pushNative.apply( target, slice.call( els ) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + + // Can't trust NodeList.length + while ( ( target[ j++ ] = els[ i++ ] ) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + setDocument( context ); + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { + + // ID selector + if ( ( m = match[ 1 ] ) ) { + + // Document context + if ( nodeType === 9 ) { + if ( ( elem = context.getElementById( m ) ) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && ( elem = newContext.getElementById( m ) ) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[ 2 ] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && + + // Support: IE 8 only + // Exclude object elements + ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // The technique has to be used as well when a leading combinator is used + // as such selectors are not recognized by querySelectorAll. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && + ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + + // We can use :scope instead of the ID hack if the browser + // supports it & if we're not changing the context. + if ( newContext !== context || !support.scope ) { + + // Capture the context ID, setting it first if necessary + if ( ( nid = context.getAttribute( "id" ) ) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", ( nid = expando ) ); + } + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + + toSelector( groups[ i ] ); + } + newSelector = groups.join( "," ); + } + + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return ( cache[ key + " " ] = value ); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement( "fieldset" ); + + try { + return !!fn( el ); + } catch ( e ) { + return false; + } finally { + + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split( "|" ), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[ i ] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( ( cur = cur.nextSibling ) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return ( name === "input" || name === "button" ) && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction( function( argument ) { + argument = +argument; + return markFunction( function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ ( j = matchIndexes[ i ] ) ] ) { + seed[ j ] = !( matches[ j ] = seed[ j ] ); + } + } + } ); + } ); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + var namespace = elem && elem.namespaceURI, + docElem = elem && ( elem.ownerDocument || elem ).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9 - 11+, Edge 12 - 18+ + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( preferredDoc != document && + ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, + // Safari 4 - 5 only, Opera <=11.6 - 12.x only + // IE/Edge & older browsers don't support the :scope pseudo-class. + // Support: Safari 6.0 only + // Safari 6.0 supports :scope but it's an alias of :root there. + support.scope = assert( function( el ) { + docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); + return typeof el.querySelectorAll !== "undefined" && + !el.querySelectorAll( ":scope fieldset div" ).length; + } ); + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert( function( el ) { + el.className = "i"; + return !el.getAttribute( "className" ); + } ); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert( function( el ) { + el.appendChild( document.createComment( "" ) ); + return !el.getElementsByTagName( "*" ).length; + } ); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert( function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + } ); + + // ID filter and find + if ( support.getById ) { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute( "id" ) === attrId; + }; + }; + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode( "id" ); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( ( elem = elems[ i++ ] ) ) { + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find[ "TAG" ] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { + + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert( function( el ) { + + var input; + + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll( "[selected]" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push( "~=" ); + } + + // Support: IE 11+, Edge 15 - 18+ + // IE 11/Edge don't find elements on a `[name='']` query in some cases. + // Adding a temporary attribute to the document before the selection works + // around the issue. + // Interestingly, IE 10 & older don't seem to have the issue. + input = document.createElement( "input" ); + input.setAttribute( "name", "" ); + el.appendChild( input ); + if ( !el.querySelectorAll( "[name='']" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + + whitespace + "*(?:''|\"\")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll( ":checked" ).length ) { + rbuggyQSA.push( ":checked" ); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push( ".#.+[+~]" ); + } + + // Support: Firefox <=3.6 - 5 only + // Old Firefox doesn't throw on a badly-escaped identifier. + el.querySelectorAll( "\\\f" ); + rbuggyQSA.push( "[\\r\\n\\f]" ); + } ); + + assert( function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement( "input" ); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll( "[name=d]" ).length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: Opera 10 - 11 only + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll( "*,:x" ); + rbuggyQSA.push( ",.*:" ); + } ); + } + + if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector ) ) ) ) { + + assert( function( el ) { + + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + } ); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + ) ); + } : + function( a, b ) { + if ( b ) { + while ( ( b = b.parentNode ) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { + + // Choose the first element that is related to our preferred document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( a == document || a.ownerDocument == preferredDoc && + contains( preferredDoc, a ) ) { + return -1; + } + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( b == document || b.ownerDocument == preferredDoc && + contains( preferredDoc, b ) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + return a == document ? -1 : + b == document ? 1 : + /* eslint-enable eqeqeq */ + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( ( cur = cur.parentNode ) ) { + ap.unshift( cur ); + } + cur = b; + while ( ( cur = cur.parentNode ) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[ i ] === bp[ i ] ) { + i++; + } + + return i ? + + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[ i ], bp[ i ] ) : + + // Otherwise nodes in our document sort first + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + ap[ i ] == preferredDoc ? -1 : + bp[ i ] == preferredDoc ? 1 : + /* eslint-enable eqeqeq */ + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + setDocument( elem ); + + if ( support.matchesSelector && documentIsHTML && + !nonnativeSelectorCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch ( e ) { + nonnativeSelectorCache( expr, true ); + } + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( context.ownerDocument || context ) != document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( elem.ownerDocument || elem ) != document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return ( sel + "" ).replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + + // If no nodeType, this is expected to be an array + while ( ( node = elem[ i++ ] ) ) { + + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[ 1 ] = match[ 1 ].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[ 3 ] = ( match[ 3 ] || match[ 4 ] || + match[ 5 ] || "" ).replace( runescape, funescape ); + + if ( match[ 2 ] === "~=" ) { + match[ 3 ] = " " + match[ 3 ] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[ 1 ] = match[ 1 ].toLowerCase(); + + if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { + + // nth-* requires argument + if ( !match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[ 4 ] = +( match[ 4 ] ? + match[ 5 ] + ( match[ 6 ] || 1 ) : + 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); + match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); + + // other types prohibit arguments + } else if ( match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[ 6 ] && match[ 2 ]; + + if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[ 3 ] ) { + match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + + // Get excess from tokenize (recursively) + ( excess = tokenize( unquoted, true ) ) && + + // advance to the next closing parenthesis + ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { + + // excess is a negative index + match[ 0 ] = match[ 0 ].slice( 0, excess ); + match[ 2 ] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { + return true; + } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + ( pattern = new RegExp( "(^|" + whitespace + + ")" + className + "(" + whitespace + "|$)" ) ) && classCache( + className, function( elem ) { + return pattern.test( + typeof elem.className === "string" && elem.className || + typeof elem.getAttribute !== "undefined" && + elem.getAttribute( "class" ) || + "" + ); + } ); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + /* eslint-disable max-len */ + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + /* eslint-enable max-len */ + + }; + }, + + "CHILD": function( type, what, _argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, _context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( ( node = node[ dir ] ) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( ( node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + + // Use previously-cached element index if available + if ( useCache ) { + + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + + // Use the same loop as above to seek `elem` from the start + while ( ( node = ++nodeIndex && node && node[ dir ] || + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || + ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction( function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[ i ] ); + seed[ idx ] = !( matches[ idx ] = matched[ i ] ); + } + } ) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + + // Potentially complex pseudos + "not": markFunction( function( selector ) { + + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction( function( seed, matches, _context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( ( elem = unmatched[ i ] ) ) { + seed[ i ] = !( matches[ i ] = elem ); + } + } + } ) : + function( elem, _context, xml ) { + input[ 0 ] = elem; + matcher( input, null, xml, results ); + + // Don't keep the element (issue #299) + input[ 0 ] = null; + return !results.pop(); + }; + } ), + + "has": markFunction( function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + } ), + + "contains": markFunction( function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; + }; + } ), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + + // lang value must be a valid identifier + if ( !ridentifier.test( lang || "" ) ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( ( elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); + return false; + }; + } ), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && + ( !document.hasFocus || document.hasFocus() ) && + !!( elem.type || elem.href || ~elem.tabIndex ); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return ( nodeName === "input" && !!elem.checked ) || + ( nodeName === "option" && !!elem.selected ); + }, + + "selected": function( elem ) { + + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + // eslint-disable-next-line no-unused-expressions + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos[ "empty" ]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( ( attr = elem.getAttribute( "type" ) ) == null || + attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo( function() { + return [ 0 ]; + } ), + + "last": createPositionalPseudo( function( _matchIndexes, length ) { + return [ length - 1 ]; + } ), + + "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + } ), + + "even": createPositionalPseudo( function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "odd": createPositionalPseudo( function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? + argument + length : + argument > length ? + length : + argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ) + } +}; + +Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || ( match = rcomma.exec( soFar ) ) ) { + if ( match ) { + + // Don't consume trailing commas as valid + soFar = soFar.slice( match[ 0 ].length ) || soFar; + } + groups.push( ( tokens = [] ) ); + } + + matched = false; + + // Combinators + if ( ( match = rcombinators.exec( soFar ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + + // Cast descendant combinators to space + type: match[ 0 ].replace( rtrim, " " ) + } ); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || + ( match = preFilters[ type ]( match ) ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + type: type, + matches: match + } ); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[ i ].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || ( elem[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || + ( outerCache[ elem.uniqueID ] = {} ); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( ( oldCache = uniqueCache[ key ] ) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return ( newCache[ 2 ] = oldCache[ 2 ] ); + } else { + + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[ i ]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[ 0 ]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[ i ], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( ( elem = unmatched[ i ] ) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction( function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( + selector || "*", + context.nodeType ? [ context ] : context, + [] + ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( ( elem = temp[ i ] ) ) { + matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) ) { + + // Restore matcherIn since elem is not yet a final match + temp.push( ( matcherIn[ i ] = elem ) ); + } + } + postFinder( null, ( matcherOut = [] ), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) && + ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { + + seed[ temp ] = !( results[ temp ] = elem ); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + } ); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[ 0 ].type ], + implicitRelative = leadingRelative || Expr.relative[ " " ], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + ( checkContext = context ).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { + matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; + } else { + matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[ j ].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens + .slice( 0, i - 1 ) + .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), + + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), + len = elems.length; + + if ( outermost ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + outermostContext = context == document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( !context && elem.ownerDocument != document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( ( matcher = elementMatchers[ j++ ] ) ) { + if ( matcher( elem, context || document, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + + // They will have gone through all possible matchers + if ( ( elem = !matcher && elem ) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( ( matcher = setMatchers[ j++ ] ) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !( unmatched[ i ] || setMatched[ i ] ) ) { + setMatched[ i ] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[ i ] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( + selector, + matcherFromGroupMatchers( elementMatchers, setMatchers ) + ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( ( selector = compiled.selector || selector ) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[ 0 ] = match[ 0 ].slice( 0 ); + if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { + + context = ( Expr.find[ "ID" ]( token.matches[ 0 ] + .replace( runescape, funescape ), context ) || [] )[ 0 ]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[ i ]; + + // Abort if we hit a combinator + if ( Expr.relative[ ( type = token.type ) ] ) { + break; + } + if ( ( find = Expr.find[ type ] ) ) { + + // Search, expanding context for leading sibling combinators + if ( ( seed = find( + token.matches[ 0 ].replace( runescape, funescape ), + rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || + context + ) ) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert( function( el ) { + + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; +} ); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert( function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute( "href" ) === "#"; +} ) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + } ); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert( function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +} ) ) { + addHandle( "value", function( elem, _name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + } ); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert( function( el ) { + return el.getAttribute( "disabled" ) == null; +} ) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; + } + } ); +} + +return Sizzle; + +} )( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +} +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, _i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, _i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, _i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( elem.contentDocument != null && + + // Support: IE 11+ + // elements with no `data` attribute has an object + // `contentDocument` with a `null` prototype. + getProto( elem.contentDocument ) ) { + + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( _i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the primary Deferred + primary = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + primary.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( primary.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return primary.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); + } + + return primary.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, _key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( _all, letter ) { + return letter.toUpperCase(); +} + +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var documentElement = document.documentElement; + + + + var isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ); + }, + composed = { composed: true }; + + // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only + // Check attachment across shadow DOM boundaries when possible (gh-3504) + // Support: iOS 10.0-10.2 only + // Early iOS 10 versions support `attachShadow` but not `getRootNode`, + // leading to errors. We need to check for `getRootNode`. + if ( documentElement.getRootNode ) { + isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ) || + elem.getRootNode( composed ) === elem.ownerDocument; + }; + } +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + isAttached( elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = elem.nodeType && + ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // Support: IE <=9 only + // IE <=9 replaces "; + support.option = !!div.lastChild; +} )(); + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// Support: IE <=9 only +if ( !support.option ) { + wrapMap.optgroup = wrapMap.option = [ 1, "" ]; +} + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Only attach events to objects that accept data + if ( !acceptData( elem ) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = Object.create( null ); + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( nativeEvent ), + + handlers = ( + dataPriv.get( this, "events" ) || Object.create( null ) + )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + + // Support: Chrome 86+ + // In Chrome, if an element having a focusout handler is blurred by + // clicking outside of it, it invokes the handler synchronously. If + // that handler calls `.remove()` on the element, the data is cleared, + // leaving `result` undefined. We need to guard against this. + return result && result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + which: true +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + // Suppress native focus or blur as it's already being fired + // in leverageNative. + _default: function() { + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.get( src ); + events = pdataOld.events; + + if ( events ) { + dataPriv.remove( dest, "handle events" ); + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = flat( args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + }, doc ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html; + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var swap = function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableTrDimensionsVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + }, + + // Support: IE 9 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Behavior in IE 9 is more subtle than in newer versions & it passes + // some versions of this test; make sure not to make it pass there! + // + // Support: Firefox 70+ + // Only Firefox includes border widths + // in computed dimensions. (gh-4529) + reliableTrDimensions: function() { + var table, tr, trChild, trStyle; + if ( reliableTrDimensionsVal == null ) { + table = document.createElement( "table" ); + tr = document.createElement( "tr" ); + trChild = document.createElement( "div" ); + + table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; + tr.style.cssText = "border:1px solid"; + + // Support: Chrome 86+ + // Height set through cssText does not get applied. + // Computed height then comes back as 0. + tr.style.height = "1px"; + trChild.style.height = "9px"; + + // Support: Android 8 Chrome 86+ + // In our bodyBackground.html iframe, + // display for all div elements is set to "inline", + // which causes a problem only in Android 8 Chrome 86. + // Ensuring the div is display: block + // gets around this issue. + trChild.style.display = "block"; + + documentElement + .appendChild( table ) + .appendChild( tr ) + .appendChild( trChild ); + + trStyle = window.getComputedStyle( tr ); + reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + + parseInt( trStyle.borderTopWidth, 10 ) + + parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; + + documentElement.removeChild( table ); + } + return reliableTrDimensionsVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( _elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Support: IE 9 - 11 only + // Use offsetWidth/offsetHeight for when box sizing is unreliable. + // In those cases, the computed value can be trusted to be border-box. + if ( ( !support.boxSizingReliable() && isBorderBox || + + // Support: IE 10 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Interestingly, in some cases IE 9 doesn't suffer from this issue. + !support.reliableTrDimensions() && nodeName( elem, "tr" ) || + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + val === "auto" || + + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + + // Make sure the element is visible & connected + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( _i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + + // Handle: regular nodes (via `this.ownerDocument`), window + // (via `this.document`) & document (via `this`). + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = { guid: Date.now() }; + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml, parserErrorElem; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) {} + + parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; + if ( !xml || parserErrorElem ) { + jQuery.error( "Invalid XML: " + ( + parserErrorElem ? + jQuery.map( parserErrorElem.childNodes, function( el ) { + return el.textContent; + } ).join( "\n" ) : + data + ) ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ).filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ).map( function( _i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + +originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Use a noop converter for missing script but not if jsonp + if ( !isSuccess && + jQuery.inArray( "script", s.dataTypes ) > -1 && + jQuery.inArray( "json", s.dataTypes ) < 0 ) { + s.converters[ "text script" ] = function() {}; + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( _i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + +jQuery.ajaxPrefilter( function( s ) { + var i; + for ( i in s.headers ) { + if ( i.toLowerCase() === "content-type" ) { + s.contentType = s.headers[ i ] || ""; + } + } +} ); + + +jQuery._evalUrl = function( url, options, doc ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options, doc ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " +{% endmacro %} diff --git a/pull313/_static/scripts/bootstrap.js b/pull313/_static/scripts/bootstrap.js new file mode 100644 index 00000000..bda8a602 --- /dev/null +++ b/pull313/_static/scripts/bootstrap.js @@ -0,0 +1,3 @@ +/*! For license information please see bootstrap.js.LICENSE.txt */ +(()=>{"use strict";var t={d:(e,i)=>{for(var n in i)t.o(i,n)&&!t.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:i[n]})},o:(t,e)=>Object.prototype.hasOwnProperty.call(t,e),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},e={};t.r(e),t.d(e,{afterMain:()=>w,afterRead:()=>b,afterWrite:()=>T,applyStyles:()=>D,arrow:()=>G,auto:()=>r,basePlacements:()=>a,beforeMain:()=>v,beforeRead:()=>g,beforeWrite:()=>E,bottom:()=>n,clippingParents:()=>h,computeStyles:()=>et,createPopper:()=>St,createPopperBase:()=>Lt,createPopperLite:()=>Dt,detectOverflow:()=>gt,end:()=>c,eventListeners:()=>nt,flip:()=>_t,hide:()=>yt,left:()=>o,main:()=>y,modifierPhases:()=>C,offset:()=>wt,placements:()=>m,popper:()=>u,popperGenerator:()=>kt,popperOffsets:()=>Et,preventOverflow:()=>At,read:()=>_,reference:()=>f,right:()=>s,start:()=>l,top:()=>i,variationPlacements:()=>p,viewport:()=>d,write:()=>A});var i="top",n="bottom",s="right",o="left",r="auto",a=[i,n,s,o],l="start",c="end",h="clippingParents",d="viewport",u="popper",f="reference",p=a.reduce((function(t,e){return t.concat([e+"-"+l,e+"-"+c])}),[]),m=[].concat(a,[r]).reduce((function(t,e){return t.concat([e,e+"-"+l,e+"-"+c])}),[]),g="beforeRead",_="read",b="afterRead",v="beforeMain",y="main",w="afterMain",E="beforeWrite",A="write",T="afterWrite",C=[g,_,b,v,y,w,E,A,T];function O(t){return t?(t.nodeName||"").toLowerCase():null}function x(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function k(t){return t instanceof x(t).Element||t instanceof Element}function L(t){return t instanceof x(t).HTMLElement||t instanceof HTMLElement}function S(t){return"undefined"!=typeof ShadowRoot&&(t instanceof x(t).ShadowRoot||t instanceof ShadowRoot)}const D={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];L(s)&&O(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});L(n)&&O(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function $(t){return t.split("-")[0]}var I=Math.max,N=Math.min,P=Math.round;function M(){var t=navigator.userAgentData;return null!=t&&t.brands&&Array.isArray(t.brands)?t.brands.map((function(t){return t.brand+"/"+t.version})).join(" "):navigator.userAgent}function j(){return!/^((?!chrome|android).)*safari/i.test(M())}function F(t,e,i){void 0===e&&(e=!1),void 0===i&&(i=!1);var n=t.getBoundingClientRect(),s=1,o=1;e&&L(t)&&(s=t.offsetWidth>0&&P(n.width)/t.offsetWidth||1,o=t.offsetHeight>0&&P(n.height)/t.offsetHeight||1);var r=(k(t)?x(t):window).visualViewport,a=!j()&&i,l=(n.left+(a&&r?r.offsetLeft:0))/s,c=(n.top+(a&&r?r.offsetTop:0))/o,h=n.width/s,d=n.height/o;return{width:h,height:d,top:c,right:l+h,bottom:c+d,left:l,x:l,y:c}}function H(t){var e=F(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function B(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&S(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function W(t){return x(t).getComputedStyle(t)}function z(t){return["table","td","th"].indexOf(O(t))>=0}function R(t){return((k(t)?t.ownerDocument:t.document)||window.document).documentElement}function q(t){return"html"===O(t)?t:t.assignedSlot||t.parentNode||(S(t)?t.host:null)||R(t)}function V(t){return L(t)&&"fixed"!==W(t).position?t.offsetParent:null}function Y(t){for(var e=x(t),i=V(t);i&&z(i)&&"static"===W(i).position;)i=V(i);return i&&("html"===O(i)||"body"===O(i)&&"static"===W(i).position)?e:i||function(t){var e=/firefox/i.test(M());if(/Trident/i.test(M())&&L(t)&&"fixed"===W(t).position)return null;var i=q(t);for(S(i)&&(i=i.host);L(i)&&["html","body"].indexOf(O(i))<0;){var n=W(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function K(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}function Q(t,e,i){return I(t,N(e,i))}function X(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function U(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}const G={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,r=t.state,l=t.name,c=t.options,h=r.elements.arrow,d=r.modifiersData.popperOffsets,u=$(r.placement),f=K(u),p=[o,s].indexOf(u)>=0?"height":"width";if(h&&d){var m=function(t,e){return X("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:U(t,a))}(c.padding,r),g=H(h),_="y"===f?i:o,b="y"===f?n:s,v=r.rects.reference[p]+r.rects.reference[f]-d[f]-r.rects.popper[p],y=d[f]-r.rects.reference[f],w=Y(h),E=w?"y"===f?w.clientHeight||0:w.clientWidth||0:0,A=v/2-y/2,T=m[_],C=E-g[p]-m[b],O=E/2-g[p]/2+A,x=Q(T,O,C),k=f;r.modifiersData[l]=((e={})[k]=x,e.centerOffset=x-O,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&B(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function J(t){return t.split("-")[1]}var Z={top:"auto",right:"auto",bottom:"auto",left:"auto"};function tt(t){var e,r=t.popper,a=t.popperRect,l=t.placement,h=t.variation,d=t.offsets,u=t.position,f=t.gpuAcceleration,p=t.adaptive,m=t.roundOffsets,g=t.isFixed,_=d.x,b=void 0===_?0:_,v=d.y,y=void 0===v?0:v,w="function"==typeof m?m({x:b,y}):{x:b,y};b=w.x,y=w.y;var E=d.hasOwnProperty("x"),A=d.hasOwnProperty("y"),T=o,C=i,O=window;if(p){var k=Y(r),L="clientHeight",S="clientWidth";k===x(r)&&"static"!==W(k=R(r)).position&&"absolute"===u&&(L="scrollHeight",S="scrollWidth"),(l===i||(l===o||l===s)&&h===c)&&(C=n,y-=(g&&k===O&&O.visualViewport?O.visualViewport.height:k[L])-a.height,y*=f?1:-1),l!==o&&(l!==i&&l!==n||h!==c)||(T=s,b-=(g&&k===O&&O.visualViewport?O.visualViewport.width:k[S])-a.width,b*=f?1:-1)}var D,$=Object.assign({position:u},p&&Z),I=!0===m?function(t,e){var i=t.x,n=t.y,s=e.devicePixelRatio||1;return{x:P(i*s)/s||0,y:P(n*s)/s||0}}({x:b,y},x(r)):{x:b,y};return b=I.x,y=I.y,f?Object.assign({},$,((D={})[C]=A?"0":"",D[T]=E?"0":"",D.transform=(O.devicePixelRatio||1)<=1?"translate("+b+"px, "+y+"px)":"translate3d("+b+"px, "+y+"px, 0)",D)):Object.assign({},$,((e={})[C]=A?y+"px":"",e[T]=E?b+"px":"",e.transform="",e))}const et={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:$(e.placement),variation:J(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s,isFixed:"fixed"===e.options.strategy};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,tt(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,tt(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}};var it={passive:!0};const nt={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=x(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,it)})),a&&l.addEventListener("resize",i.update,it),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,it)})),a&&l.removeEventListener("resize",i.update,it)}},data:{}};var st={left:"right",right:"left",bottom:"top",top:"bottom"};function ot(t){return t.replace(/left|right|bottom|top/g,(function(t){return st[t]}))}var rt={start:"end",end:"start"};function at(t){return t.replace(/start|end/g,(function(t){return rt[t]}))}function lt(t){var e=x(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function ct(t){return F(R(t)).left+lt(t).scrollLeft}function ht(t){var e=W(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function dt(t){return["html","body","#document"].indexOf(O(t))>=0?t.ownerDocument.body:L(t)&&ht(t)?t:dt(q(t))}function ut(t,e){var i;void 0===e&&(e=[]);var n=dt(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=x(n),r=s?[o].concat(o.visualViewport||[],ht(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(ut(q(r)))}function ft(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function pt(t,e,i){return e===d?ft(function(t,e){var i=x(t),n=R(t),s=i.visualViewport,o=n.clientWidth,r=n.clientHeight,a=0,l=0;if(s){o=s.width,r=s.height;var c=j();(c||!c&&"fixed"===e)&&(a=s.offsetLeft,l=s.offsetTop)}return{width:o,height:r,x:a+ct(t),y:l}}(t,i)):k(e)?function(t,e){var i=F(t,!1,"fixed"===e);return i.top=i.top+t.clientTop,i.left=i.left+t.clientLeft,i.bottom=i.top+t.clientHeight,i.right=i.left+t.clientWidth,i.width=t.clientWidth,i.height=t.clientHeight,i.x=i.left,i.y=i.top,i}(e,i):ft(function(t){var e,i=R(t),n=lt(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=I(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=I(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+ct(t),l=-n.scrollTop;return"rtl"===W(s||i).direction&&(a+=I(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(R(t)))}function mt(t){var e,r=t.reference,a=t.element,h=t.placement,d=h?$(h):null,u=h?J(h):null,f=r.x+r.width/2-a.width/2,p=r.y+r.height/2-a.height/2;switch(d){case i:e={x:f,y:r.y-a.height};break;case n:e={x:f,y:r.y+r.height};break;case s:e={x:r.x+r.width,y:p};break;case o:e={x:r.x-a.width,y:p};break;default:e={x:r.x,y:r.y}}var m=d?K(d):null;if(null!=m){var g="y"===m?"height":"width";switch(u){case l:e[m]=e[m]-(r[g]/2-a[g]/2);break;case c:e[m]=e[m]+(r[g]/2-a[g]/2)}}return e}function gt(t,e){void 0===e&&(e={});var o=e,r=o.placement,l=void 0===r?t.placement:r,c=o.strategy,p=void 0===c?t.strategy:c,m=o.boundary,g=void 0===m?h:m,_=o.rootBoundary,b=void 0===_?d:_,v=o.elementContext,y=void 0===v?u:v,w=o.altBoundary,E=void 0!==w&&w,A=o.padding,T=void 0===A?0:A,C=X("number"!=typeof T?T:U(T,a)),x=y===u?f:u,S=t.rects.popper,D=t.elements[E?x:y],$=function(t,e,i,n){var s="clippingParents"===e?function(t){var e=ut(q(t)),i=["absolute","fixed"].indexOf(W(t).position)>=0&&L(t)?Y(t):t;return k(i)?e.filter((function(t){return k(t)&&B(t,i)&&"body"!==O(t)})):[]}(t):[].concat(e),o=[].concat(s,[i]),r=o[0],a=o.reduce((function(e,i){var s=pt(t,i,n);return e.top=I(s.top,e.top),e.right=N(s.right,e.right),e.bottom=N(s.bottom,e.bottom),e.left=I(s.left,e.left),e}),pt(t,r,n));return a.width=a.right-a.left,a.height=a.bottom-a.top,a.x=a.left,a.y=a.top,a}(k(D)?D:D.contextElement||R(t.elements.popper),g,b,p),P=F(t.elements.reference),M=mt({reference:P,element:S,strategy:"absolute",placement:l}),j=ft(Object.assign({},S,M)),H=y===u?j:P,z={top:$.top-H.top+C.top,bottom:H.bottom-$.bottom+C.bottom,left:$.left-H.left+C.left,right:H.right-$.right+C.right},V=t.modifiersData.offset;if(y===u&&V){var K=V[l];Object.keys(z).forEach((function(t){var e=[s,n].indexOf(t)>=0?1:-1,o=[i,n].indexOf(t)>=0?"y":"x";z[t]+=K[o]*e}))}return z}const _t={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,c=t.options,h=t.name;if(!e.modifiersData[h]._skip){for(var d=c.mainAxis,u=void 0===d||d,f=c.altAxis,g=void 0===f||f,_=c.fallbackPlacements,b=c.padding,v=c.boundary,y=c.rootBoundary,w=c.altBoundary,E=c.flipVariations,A=void 0===E||E,T=c.allowedAutoPlacements,C=e.options.placement,O=$(C),x=_||(O!==C&&A?function(t){if($(t)===r)return[];var e=ot(t);return[at(t),e,at(e)]}(C):[ot(C)]),k=[C].concat(x).reduce((function(t,i){return t.concat($(i)===r?function(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,l=i.flipVariations,c=i.allowedAutoPlacements,h=void 0===c?m:c,d=J(n),u=d?l?p:p.filter((function(t){return J(t)===d})):a,f=u.filter((function(t){return h.indexOf(t)>=0}));0===f.length&&(f=u);var g=f.reduce((function(e,i){return e[i]=gt(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[$(i)],e}),{});return Object.keys(g).sort((function(t,e){return g[t]-g[e]}))}(e,{placement:i,boundary:v,rootBoundary:y,padding:b,flipVariations:A,allowedAutoPlacements:T}):i)}),[]),L=e.rects.reference,S=e.rects.popper,D=new Map,I=!0,N=k[0],P=0;P=0,B=H?"width":"height",W=gt(e,{placement:M,boundary:v,rootBoundary:y,altBoundary:w,padding:b}),z=H?F?s:o:F?n:i;L[B]>S[B]&&(z=ot(z));var R=ot(z),q=[];if(u&&q.push(W[j]<=0),g&&q.push(W[z]<=0,W[R]<=0),q.every((function(t){return t}))){N=M,I=!1;break}D.set(M,q)}if(I)for(var V=function(t){var e=k.find((function(e){var i=D.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return N=e,"break"},Y=A?3:1;Y>0&&"break"!==V(Y);Y--);e.placement!==N&&(e.modifiersData[h]._skip=!0,e.placement=N,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function bt(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function vt(t){return[i,s,n,o].some((function(e){return t[e]>=0}))}const yt={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=gt(e,{elementContext:"reference"}),a=gt(e,{altBoundary:!0}),l=bt(r,n),c=bt(a,s,o),h=vt(l),d=vt(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:h,hasPopperEscaped:d},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":d})}},wt={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,n=t.options,r=t.name,a=n.offset,l=void 0===a?[0,0]:a,c=m.reduce((function(t,n){return t[n]=function(t,e,n){var r=$(t),a=[o,i].indexOf(r)>=0?-1:1,l="function"==typeof n?n(Object.assign({},e,{placement:t})):n,c=l[0],h=l[1];return c=c||0,h=(h||0)*a,[o,s].indexOf(r)>=0?{x:h,y:c}:{x:c,y:h}}(n,e.rects,l),t}),{}),h=c[e.placement],d=h.x,u=h.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=d,e.modifiersData.popperOffsets.y+=u),e.modifiersData[r]=c}},Et={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=mt({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},At={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,r=t.options,a=t.name,c=r.mainAxis,h=void 0===c||c,d=r.altAxis,u=void 0!==d&&d,f=r.boundary,p=r.rootBoundary,m=r.altBoundary,g=r.padding,_=r.tether,b=void 0===_||_,v=r.tetherOffset,y=void 0===v?0:v,w=gt(e,{boundary:f,rootBoundary:p,padding:g,altBoundary:m}),E=$(e.placement),A=J(e.placement),T=!A,C=K(E),O="x"===C?"y":"x",x=e.modifiersData.popperOffsets,k=e.rects.reference,L=e.rects.popper,S="function"==typeof y?y(Object.assign({},e.rects,{placement:e.placement})):y,D="number"==typeof S?{mainAxis:S,altAxis:S}:Object.assign({mainAxis:0,altAxis:0},S),P=e.modifiersData.offset?e.modifiersData.offset[e.placement]:null,M={x:0,y:0};if(x){if(h){var j,F="y"===C?i:o,B="y"===C?n:s,W="y"===C?"height":"width",z=x[C],R=z+w[F],q=z-w[B],V=b?-L[W]/2:0,X=A===l?k[W]:L[W],U=A===l?-L[W]:-k[W],G=e.elements.arrow,Z=b&&G?H(G):{width:0,height:0},tt=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},et=tt[F],it=tt[B],nt=Q(0,k[W],Z[W]),st=T?k[W]/2-V-nt-et-D.mainAxis:X-nt-et-D.mainAxis,ot=T?-k[W]/2+V+nt+it+D.mainAxis:U+nt+it+D.mainAxis,rt=e.elements.arrow&&Y(e.elements.arrow),at=rt?"y"===C?rt.clientTop||0:rt.clientLeft||0:0,lt=null!=(j=null==P?void 0:P[C])?j:0,ct=z+ot-lt,ht=Q(b?N(R,z+st-lt-at):R,z,b?I(q,ct):q);x[C]=ht,M[C]=ht-z}if(u){var dt,ut="x"===C?i:o,ft="x"===C?n:s,pt=x[O],mt="y"===O?"height":"width",_t=pt+w[ut],bt=pt-w[ft],vt=-1!==[i,o].indexOf(E),yt=null!=(dt=null==P?void 0:P[O])?dt:0,wt=vt?_t:pt-k[mt]-L[mt]-yt+D.altAxis,Et=vt?pt+k[mt]+L[mt]-yt-D.altAxis:bt,At=b&&vt?function(t,e,i){var n=Q(t,e,i);return n>i?i:n}(wt,pt,Et):Q(b?wt:_t,pt,b?Et:bt);x[O]=At,M[O]=At-pt}e.modifiersData[a]=M}},requiresIfExists:["offset"]};function Tt(t,e,i){void 0===i&&(i=!1);var n,s,o=L(e),r=L(e)&&function(t){var e=t.getBoundingClientRect(),i=P(e.width)/t.offsetWidth||1,n=P(e.height)/t.offsetHeight||1;return 1!==i||1!==n}(e),a=R(e),l=F(t,r,i),c={scrollLeft:0,scrollTop:0},h={x:0,y:0};return(o||!o&&!i)&&(("body"!==O(e)||ht(a))&&(c=(n=e)!==x(n)&&L(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:lt(n)),L(e)?((h=F(e,!0)).x+=e.clientLeft,h.y+=e.clientTop):a&&(h.x=ct(a))),{x:l.left+c.scrollLeft-h.x,y:l.top+c.scrollTop-h.y,width:l.width,height:l.height}}function Ct(t){var e=new Map,i=new Set,n=[];function s(t){i.add(t.name),[].concat(t.requires||[],t.requiresIfExists||[]).forEach((function(t){if(!i.has(t)){var n=e.get(t);n&&s(n)}})),n.push(t)}return t.forEach((function(t){e.set(t.name,t)})),t.forEach((function(t){i.has(t.name)||s(t)})),n}var Ot={placement:"bottom",modifiers:[],strategy:"absolute"};function xt(){for(var t=arguments.length,e=new Array(t),i=0;i$t.has(t)&&$t.get(t).get(e)||null,remove(t,e){if(!$t.has(t))return;const i=$t.get(t);i.delete(e),0===i.size&&$t.delete(t)}},Nt="transitionend",Pt=t=>(t&&window.CSS&&window.CSS.escape&&(t=t.replace(/#([^\s"#']+)/g,((t,e)=>`#${CSS.escape(e)}`))),t),Mt=t=>{t.dispatchEvent(new Event(Nt))},jt=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),Ft=t=>jt(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(Pt(t)):null,Ht=t=>{if(!jt(t)||0===t.getClientRects().length)return!1;const e="visible"===getComputedStyle(t).getPropertyValue("visibility"),i=t.closest("details:not([open])");if(!i)return e;if(i!==t){const e=t.closest("summary");if(e&&e.parentNode!==i)return!1;if(null===e)return!1}return e},Bt=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),Wt=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?Wt(t.parentNode):null},zt=()=>{},Rt=t=>{t.offsetHeight},qt=()=>window.jQuery&&!document.body.hasAttribute("data-bs-no-jquery")?window.jQuery:null,Vt=[],Yt=()=>"rtl"===document.documentElement.dir,Kt=t=>{var e;e=()=>{const e=qt();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(Vt.length||document.addEventListener("DOMContentLoaded",(()=>{for(const t of Vt)t()})),Vt.push(e)):e()},Qt=(t,e=[],i=t)=>"function"==typeof t?t(...e):i,Xt=(t,e,i=!0)=>{if(!i)return void Qt(t);const n=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let s=!1;const o=({target:i})=>{i===e&&(s=!0,e.removeEventListener(Nt,o),Qt(t))};e.addEventListener(Nt,o),setTimeout((()=>{s||Mt(e)}),n)},Ut=(t,e,i,n)=>{const s=t.length;let o=t.indexOf(e);return-1===o?!i&&n?t[s-1]:t[0]:(o+=i?1:-1,n&&(o=(o+s)%s),t[Math.max(0,Math.min(o,s-1))])},Gt=/[^.]*(?=\..*)\.|.*/,Jt=/\..*/,Zt=/::\d+$/,te={};let ee=1;const ie={mouseenter:"mouseover",mouseleave:"mouseout"},ne=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function se(t,e){return e&&`${e}::${ee++}`||t.uidEvent||ee++}function oe(t){const e=se(t);return t.uidEvent=e,te[e]=te[e]||{},te[e]}function re(t,e,i=null){return Object.values(t).find((t=>t.callable===e&&t.delegationSelector===i))}function ae(t,e,i){const n="string"==typeof e,s=n?i:e||i;let o=de(t);return ne.has(o)||(o=t),[n,s,o]}function le(t,e,i,n,s){if("string"!=typeof e||!t)return;let[o,r,a]=ae(e,i,n);if(e in ie){const t=t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};r=t(r)}const l=oe(t),c=l[a]||(l[a]={}),h=re(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&s);const d=se(r,e.replace(Gt,"")),u=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(const a of o)if(a===r)return fe(s,{delegateTarget:r}),n.oneOff&&ue.off(t,s.type,e,i),i.apply(r,[s])}}(t,i,r):function(t,e){return function i(n){return fe(n,{delegateTarget:t}),i.oneOff&&ue.off(t,n.type,e),e.apply(t,[n])}}(t,r);u.delegationSelector=o?i:null,u.callable=r,u.oneOff=s,u.uidEvent=d,c[d]=u,t.addEventListener(a,u,o)}function ce(t,e,i,n,s){const o=re(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function he(t,e,i,n){const s=e[i]||{};for(const[o,r]of Object.entries(s))o.includes(n)&&ce(t,e,i,r.callable,r.delegationSelector)}function de(t){return t=t.replace(Jt,""),ie[t]||t}const ue={on(t,e,i,n){le(t,e,i,n,!1)},one(t,e,i,n){le(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=ae(e,i,n),a=r!==e,l=oe(t),c=l[r]||{},h=e.startsWith(".");if(void 0===o){if(h)for(const i of Object.keys(l))he(t,l,i,e.slice(1));for(const[i,n]of Object.entries(c)){const s=i.replace(Zt,"");a&&!e.includes(s)||ce(t,l,r,n.callable,n.delegationSelector)}}else{if(!Object.keys(c).length)return;ce(t,l,r,o,s?i:null)}},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=qt();let s=null,o=!0,r=!0,a=!1;e!==de(e)&&n&&(s=n.Event(e,i),n(t).trigger(s),o=!s.isPropagationStopped(),r=!s.isImmediatePropagationStopped(),a=s.isDefaultPrevented());const l=fe(new Event(e,{bubbles:o,cancelable:!0}),i);return a&&l.preventDefault(),r&&t.dispatchEvent(l),l.defaultPrevented&&s&&s.preventDefault(),l}};function fe(t,e={}){for(const[i,n]of Object.entries(e))try{t[i]=n}catch(e){Object.defineProperty(t,i,{configurable:!0,get:()=>n})}return t}function pe(t){if("true"===t)return!0;if("false"===t)return!1;if(t===Number(t).toString())return Number(t);if(""===t||"null"===t)return null;if("string"!=typeof t)return t;try{return JSON.parse(decodeURIComponent(t))}catch(e){return t}}function me(t){return t.replace(/[A-Z]/g,(t=>`-${t.toLowerCase()}`))}const ge={setDataAttribute(t,e,i){t.setAttribute(`data-bs-${me(e)}`,i)},removeDataAttribute(t,e){t.removeAttribute(`data-bs-${me(e)}`)},getDataAttributes(t){if(!t)return{};const e={},i=Object.keys(t.dataset).filter((t=>t.startsWith("bs")&&!t.startsWith("bsConfig")));for(const n of i){let i=n.replace(/^bs/,"");i=i.charAt(0).toLowerCase()+i.slice(1,i.length),e[i]=pe(t.dataset[n])}return e},getDataAttribute:(t,e)=>pe(t.getAttribute(`data-bs-${me(e)}`))};class _e{static get Default(){return{}}static get DefaultType(){return{}}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}_getConfig(t){return t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t}_mergeConfigObj(t,e){const i=jt(e)?ge.getDataAttribute(e,"config"):{};return{...this.constructor.Default,..."object"==typeof i?i:{},...jt(e)?ge.getDataAttributes(e):{},..."object"==typeof t?t:{}}}_typeCheckConfig(t,e=this.constructor.DefaultType){for(const[n,s]of Object.entries(e)){const e=t[n],o=jt(e)?"element":null==(i=e)?`${i}`:Object.prototype.toString.call(i).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(s).test(o))throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option "${n}" provided type "${o}" but expected type "${s}".`)}var i}}class be extends _e{constructor(t,e){super(),(t=Ft(t))&&(this._element=t,this._config=this._getConfig(e),It.set(this._element,this.constructor.DATA_KEY,this))}dispose(){It.remove(this._element,this.constructor.DATA_KEY),ue.off(this._element,this.constructor.EVENT_KEY);for(const t of Object.getOwnPropertyNames(this))this[t]=null}_queueCallback(t,e,i=!0){Xt(t,e,i)}_getConfig(t){return t=this._mergeConfigObj(t,this._element),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}static getInstance(t){return It.get(Ft(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.3.2"}static get DATA_KEY(){return`bs.${this.NAME}`}static get EVENT_KEY(){return`.${this.DATA_KEY}`}static eventName(t){return`${t}${this.EVENT_KEY}`}}const ve=t=>{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i=`#${i.split("#")[1]}`),e=i&&"#"!==i?Pt(i.trim()):null}return e},ye={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter((t=>t.matches(e))),parents(t,e){const i=[];let n=t.parentNode.closest(e);for(;n;)i.push(n),n=n.parentNode.closest(e);return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map((t=>`${t}:not([tabindex^="-"])`)).join(",");return this.find(e,t).filter((t=>!Bt(t)&&Ht(t)))},getSelectorFromElement(t){const e=ve(t);return e&&ye.findOne(e)?e:null},getElementFromSelector(t){const e=ve(t);return e?ye.findOne(e):null},getMultipleElementsFromSelector(t){const e=ve(t);return e?ye.find(e):[]}},we=(t,e="hide")=>{const i=`click.dismiss${t.EVENT_KEY}`,n=t.NAME;ue.on(document,i,`[data-bs-dismiss="${n}"]`,(function(i){if(["A","AREA"].includes(this.tagName)&&i.preventDefault(),Bt(this))return;const s=ye.getElementFromSelector(this)||this.closest(`.${n}`);t.getOrCreateInstance(s)[e]()}))},Ee=".bs.alert",Ae=`close${Ee}`,Te=`closed${Ee}`;class Ce extends be{static get NAME(){return"alert"}close(){if(ue.trigger(this._element,Ae).defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback((()=>this._destroyElement()),this._element,t)}_destroyElement(){this._element.remove(),ue.trigger(this._element,Te),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=Ce.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}we(Ce,"close"),Kt(Ce);const Oe='[data-bs-toggle="button"]';class xe extends be{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=xe.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}ue.on(document,"click.bs.button.data-api",Oe,(t=>{t.preventDefault();const e=t.target.closest(Oe);xe.getOrCreateInstance(e).toggle()})),Kt(xe);const ke=".bs.swipe",Le=`touchstart${ke}`,Se=`touchmove${ke}`,De=`touchend${ke}`,$e=`pointerdown${ke}`,Ie=`pointerup${ke}`,Ne={endCallback:null,leftCallback:null,rightCallback:null},Pe={endCallback:"(function|null)",leftCallback:"(function|null)",rightCallback:"(function|null)"};class Me extends _e{constructor(t,e){super(),this._element=t,t&&Me.isSupported()&&(this._config=this._getConfig(e),this._deltaX=0,this._supportPointerEvents=Boolean(window.PointerEvent),this._initEvents())}static get Default(){return Ne}static get DefaultType(){return Pe}static get NAME(){return"swipe"}dispose(){ue.off(this._element,ke)}_start(t){this._supportPointerEvents?this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX):this._deltaX=t.touches[0].clientX}_end(t){this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX-this._deltaX),this._handleSwipe(),Qt(this._config.endCallback)}_move(t){this._deltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this._deltaX}_handleSwipe(){const t=Math.abs(this._deltaX);if(t<=40)return;const e=t/this._deltaX;this._deltaX=0,e&&Qt(e>0?this._config.rightCallback:this._config.leftCallback)}_initEvents(){this._supportPointerEvents?(ue.on(this._element,$e,(t=>this._start(t))),ue.on(this._element,Ie,(t=>this._end(t))),this._element.classList.add("pointer-event")):(ue.on(this._element,Le,(t=>this._start(t))),ue.on(this._element,Se,(t=>this._move(t))),ue.on(this._element,De,(t=>this._end(t))))}_eventIsPointerPenTouch(t){return this._supportPointerEvents&&("pen"===t.pointerType||"touch"===t.pointerType)}static isSupported(){return"ontouchstart"in document.documentElement||navigator.maxTouchPoints>0}}const je=".bs.carousel",Fe=".data-api",He="next",Be="prev",We="left",ze="right",Re=`slide${je}`,qe=`slid${je}`,Ve=`keydown${je}`,Ye=`mouseenter${je}`,Ke=`mouseleave${je}`,Qe=`dragstart${je}`,Xe=`load${je}${Fe}`,Ue=`click${je}${Fe}`,Ge="carousel",Je="active",Ze=".active",ti=".carousel-item",ei=Ze+ti,ii={ArrowLeft:ze,ArrowRight:We},ni={interval:5e3,keyboard:!0,pause:"hover",ride:!1,touch:!0,wrap:!0},si={interval:"(number|boolean)",keyboard:"boolean",pause:"(string|boolean)",ride:"(boolean|string)",touch:"boolean",wrap:"boolean"};class oi extends be{constructor(t,e){super(t,e),this._interval=null,this._activeElement=null,this._isSliding=!1,this.touchTimeout=null,this._swipeHelper=null,this._indicatorsElement=ye.findOne(".carousel-indicators",this._element),this._addEventListeners(),this._config.ride===Ge&&this.cycle()}static get Default(){return ni}static get DefaultType(){return si}static get NAME(){return"carousel"}next(){this._slide(He)}nextWhenVisible(){!document.hidden&&Ht(this._element)&&this.next()}prev(){this._slide(Be)}pause(){this._isSliding&&Mt(this._element),this._clearInterval()}cycle(){this._clearInterval(),this._updateInterval(),this._interval=setInterval((()=>this.nextWhenVisible()),this._config.interval)}_maybeEnableCycle(){this._config.ride&&(this._isSliding?ue.one(this._element,qe,(()=>this.cycle())):this.cycle())}to(t){const e=this._getItems();if(t>e.length-1||t<0)return;if(this._isSliding)return void ue.one(this._element,qe,(()=>this.to(t)));const i=this._getItemIndex(this._getActive());if(i===t)return;const n=t>i?He:Be;this._slide(n,e[t])}dispose(){this._swipeHelper&&this._swipeHelper.dispose(),super.dispose()}_configAfterMerge(t){return t.defaultInterval=t.interval,t}_addEventListeners(){this._config.keyboard&&ue.on(this._element,Ve,(t=>this._keydown(t))),"hover"===this._config.pause&&(ue.on(this._element,Ye,(()=>this.pause())),ue.on(this._element,Ke,(()=>this._maybeEnableCycle()))),this._config.touch&&Me.isSupported()&&this._addTouchEventListeners()}_addTouchEventListeners(){for(const t of ye.find(".carousel-item img",this._element))ue.on(t,Qe,(t=>t.preventDefault()));const t={leftCallback:()=>this._slide(this._directionToOrder(We)),rightCallback:()=>this._slide(this._directionToOrder(ze)),endCallback:()=>{"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout((()=>this._maybeEnableCycle()),500+this._config.interval))}};this._swipeHelper=new Me(this._element,t)}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=ii[t.key];e&&(t.preventDefault(),this._slide(this._directionToOrder(e)))}_getItemIndex(t){return this._getItems().indexOf(t)}_setActiveIndicatorElement(t){if(!this._indicatorsElement)return;const e=ye.findOne(Ze,this._indicatorsElement);e.classList.remove(Je),e.removeAttribute("aria-current");const i=ye.findOne(`[data-bs-slide-to="${t}"]`,this._indicatorsElement);i&&(i.classList.add(Je),i.setAttribute("aria-current","true"))}_updateInterval(){const t=this._activeElement||this._getActive();if(!t)return;const e=Number.parseInt(t.getAttribute("data-bs-interval"),10);this._config.interval=e||this._config.defaultInterval}_slide(t,e=null){if(this._isSliding)return;const i=this._getActive(),n=t===He,s=e||Ut(this._getItems(),i,n,this._config.wrap);if(s===i)return;const o=this._getItemIndex(s),r=e=>ue.trigger(this._element,e,{relatedTarget:s,direction:this._orderToDirection(t),from:this._getItemIndex(i),to:o});if(r(Re).defaultPrevented)return;if(!i||!s)return;const a=Boolean(this._interval);this.pause(),this._isSliding=!0,this._setActiveIndicatorElement(o),this._activeElement=s;const l=n?"carousel-item-start":"carousel-item-end",c=n?"carousel-item-next":"carousel-item-prev";s.classList.add(c),Rt(s),i.classList.add(l),s.classList.add(l),this._queueCallback((()=>{s.classList.remove(l,c),s.classList.add(Je),i.classList.remove(Je,c,l),this._isSliding=!1,r(qe)}),i,this._isAnimated()),a&&this.cycle()}_isAnimated(){return this._element.classList.contains("slide")}_getActive(){return ye.findOne(ei,this._element)}_getItems(){return ye.find(ti,this._element)}_clearInterval(){this._interval&&(clearInterval(this._interval),this._interval=null)}_directionToOrder(t){return Yt()?t===We?Be:He:t===We?He:Be}_orderToDirection(t){return Yt()?t===Be?We:ze:t===Be?ze:We}static jQueryInterface(t){return this.each((function(){const e=oi.getOrCreateInstance(this,t);if("number"!=typeof t){if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}else e.to(t)}))}}ue.on(document,Ue,"[data-bs-slide], [data-bs-slide-to]",(function(t){const e=ye.getElementFromSelector(this);if(!e||!e.classList.contains(Ge))return;t.preventDefault();const i=oi.getOrCreateInstance(e),n=this.getAttribute("data-bs-slide-to");return n?(i.to(n),void i._maybeEnableCycle()):"next"===ge.getDataAttribute(this,"slide")?(i.next(),void i._maybeEnableCycle()):(i.prev(),void i._maybeEnableCycle())})),ue.on(window,Xe,(()=>{const t=ye.find('[data-bs-ride="carousel"]');for(const e of t)oi.getOrCreateInstance(e)})),Kt(oi);const ri=".bs.collapse",ai=`show${ri}`,li=`shown${ri}`,ci=`hide${ri}`,hi=`hidden${ri}`,di=`click${ri}.data-api`,ui="show",fi="collapse",pi="collapsing",mi=`:scope .${fi} .${fi}`,gi='[data-bs-toggle="collapse"]',_i={parent:null,toggle:!0},bi={parent:"(null|element)",toggle:"boolean"};class vi extends be{constructor(t,e){super(t,e),this._isTransitioning=!1,this._triggerArray=[];const i=ye.find(gi);for(const t of i){const e=ye.getSelectorFromElement(t),i=ye.find(e).filter((t=>t===this._element));null!==e&&i.length&&this._triggerArray.push(t)}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return _i}static get DefaultType(){return bi}static get NAME(){return"collapse"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t=[];if(this._config.parent&&(t=this._getFirstLevelChildren(".collapse.show, .collapse.collapsing").filter((t=>t!==this._element)).map((t=>vi.getOrCreateInstance(t,{toggle:!1})))),t.length&&t[0]._isTransitioning)return;if(ue.trigger(this._element,ai).defaultPrevented)return;for(const e of t)e.hide();const e=this._getDimension();this._element.classList.remove(fi),this._element.classList.add(pi),this._element.style[e]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const i=`scroll${e[0].toUpperCase()+e.slice(1)}`;this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(pi),this._element.classList.add(fi,ui),this._element.style[e]="",ue.trigger(this._element,li)}),this._element,!0),this._element.style[e]=`${this._element[i]}px`}hide(){if(this._isTransitioning||!this._isShown())return;if(ue.trigger(this._element,ci).defaultPrevented)return;const t=this._getDimension();this._element.style[t]=`${this._element.getBoundingClientRect()[t]}px`,Rt(this._element),this._element.classList.add(pi),this._element.classList.remove(fi,ui);for(const t of this._triggerArray){const e=ye.getElementFromSelector(t);e&&!this._isShown(e)&&this._addAriaAndCollapsedClass([t],!1)}this._isTransitioning=!0,this._element.style[t]="",this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(pi),this._element.classList.add(fi),ue.trigger(this._element,hi)}),this._element,!0)}_isShown(t=this._element){return t.classList.contains(ui)}_configAfterMerge(t){return t.toggle=Boolean(t.toggle),t.parent=Ft(t.parent),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=this._getFirstLevelChildren(gi);for(const e of t){const t=ye.getElementFromSelector(e);t&&this._addAriaAndCollapsedClass([e],this._isShown(t))}}_getFirstLevelChildren(t){const e=ye.find(mi,this._config.parent);return ye.find(t,this._config.parent).filter((t=>!e.includes(t)))}_addAriaAndCollapsedClass(t,e){if(t.length)for(const i of t)i.classList.toggle("collapsed",!e),i.setAttribute("aria-expanded",e)}static jQueryInterface(t){const e={};return"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1),this.each((function(){const i=vi.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}ue.on(document,di,gi,(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();for(const t of ye.getMultipleElementsFromSelector(this))vi.getOrCreateInstance(t,{toggle:!1}).toggle()})),Kt(vi);const yi="dropdown",wi=".bs.dropdown",Ei=".data-api",Ai="ArrowUp",Ti="ArrowDown",Ci=`hide${wi}`,Oi=`hidden${wi}`,xi=`show${wi}`,ki=`shown${wi}`,Li=`click${wi}${Ei}`,Si=`keydown${wi}${Ei}`,Di=`keyup${wi}${Ei}`,$i="show",Ii='[data-bs-toggle="dropdown"]:not(.disabled):not(:disabled)',Ni=`${Ii}.${$i}`,Pi=".dropdown-menu",Mi=Yt()?"top-end":"top-start",ji=Yt()?"top-start":"top-end",Fi=Yt()?"bottom-end":"bottom-start",Hi=Yt()?"bottom-start":"bottom-end",Bi=Yt()?"left-start":"right-start",Wi=Yt()?"right-start":"left-start",zi={autoClose:!0,boundary:"clippingParents",display:"dynamic",offset:[0,2],popperConfig:null,reference:"toggle"},Ri={autoClose:"(boolean|string)",boundary:"(string|element)",display:"string",offset:"(array|string|function)",popperConfig:"(null|object|function)",reference:"(string|element|object)"};class qi extends be{constructor(t,e){super(t,e),this._popper=null,this._parent=this._element.parentNode,this._menu=ye.next(this._element,Pi)[0]||ye.prev(this._element,Pi)[0]||ye.findOne(Pi,this._parent),this._inNavbar=this._detectNavbar()}static get Default(){return zi}static get DefaultType(){return Ri}static get NAME(){return yi}toggle(){return this._isShown()?this.hide():this.show()}show(){if(Bt(this._element)||this._isShown())return;const t={relatedTarget:this._element};if(!ue.trigger(this._element,xi,t).defaultPrevented){if(this._createPopper(),"ontouchstart"in document.documentElement&&!this._parent.closest(".navbar-nav"))for(const t of[].concat(...document.body.children))ue.on(t,"mouseover",zt);this._element.focus(),this._element.setAttribute("aria-expanded",!0),this._menu.classList.add($i),this._element.classList.add($i),ue.trigger(this._element,ki,t)}}hide(){if(Bt(this._element)||!this._isShown())return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_completeHide(t){if(!ue.trigger(this._element,Ci,t).defaultPrevented){if("ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))ue.off(t,"mouseover",zt);this._popper&&this._popper.destroy(),this._menu.classList.remove($i),this._element.classList.remove($i),this._element.setAttribute("aria-expanded","false"),ge.removeDataAttribute(this._menu,"popper"),ue.trigger(this._element,Oi,t)}}_getConfig(t){if("object"==typeof(t=super._getConfig(t)).reference&&!jt(t.reference)&&"function"!=typeof t.reference.getBoundingClientRect)throw new TypeError(`${yi.toUpperCase()}: Option "reference" provided type "object" without a required "getBoundingClientRect" method.`);return t}_createPopper(){if(void 0===e)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");let t=this._element;"parent"===this._config.reference?t=this._parent:jt(this._config.reference)?t=Ft(this._config.reference):"object"==typeof this._config.reference&&(t=this._config.reference);const i=this._getPopperConfig();this._popper=St(t,this._menu,i)}_isShown(){return this._menu.classList.contains($i)}_getPlacement(){const t=this._parent;if(t.classList.contains("dropend"))return Bi;if(t.classList.contains("dropstart"))return Wi;if(t.classList.contains("dropup-center"))return"top";if(t.classList.contains("dropdown-center"))return"bottom";const e="end"===getComputedStyle(this._menu).getPropertyValue("--bs-position").trim();return t.classList.contains("dropup")?e?ji:Mi:e?Hi:Fi}_detectNavbar(){return null!==this._element.closest(".navbar")}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return(this._inNavbar||"static"===this._config.display)&&(ge.setDataAttribute(this._menu,"popper","static"),t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,...Qt(this._config.popperConfig,[t])}}_selectMenuItem({key:t,target:e}){const i=ye.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter((t=>Ht(t)));i.length&&Ut(i,e,t===Ti,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=qi.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(2===t.button||"keyup"===t.type&&"Tab"!==t.key)return;const e=ye.find(Ni);for(const i of e){const e=qi.getInstance(i);if(!e||!1===e._config.autoClose)continue;const n=t.composedPath(),s=n.includes(e._menu);if(n.includes(e._element)||"inside"===e._config.autoClose&&!s||"outside"===e._config.autoClose&&s)continue;if(e._menu.contains(t.target)&&("keyup"===t.type&&"Tab"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;const o={relatedTarget:e._element};"click"===t.type&&(o.clickEvent=t),e._completeHide(o)}}static dataApiKeydownHandler(t){const e=/input|textarea/i.test(t.target.tagName),i="Escape"===t.key,n=[Ai,Ti].includes(t.key);if(!n&&!i)return;if(e&&!i)return;t.preventDefault();const s=this.matches(Ii)?this:ye.prev(this,Ii)[0]||ye.next(this,Ii)[0]||ye.findOne(Ii,t.delegateTarget.parentNode),o=qi.getOrCreateInstance(s);if(n)return t.stopPropagation(),o.show(),void o._selectMenuItem(t);o._isShown()&&(t.stopPropagation(),o.hide(),s.focus())}}ue.on(document,Si,Ii,qi.dataApiKeydownHandler),ue.on(document,Si,Pi,qi.dataApiKeydownHandler),ue.on(document,Li,qi.clearMenus),ue.on(document,Di,qi.clearMenus),ue.on(document,Li,Ii,(function(t){t.preventDefault(),qi.getOrCreateInstance(this).toggle()})),Kt(qi);const Vi="backdrop",Yi="show",Ki=`mousedown.bs.${Vi}`,Qi={className:"modal-backdrop",clickCallback:null,isAnimated:!1,isVisible:!0,rootElement:"body"},Xi={className:"string",clickCallback:"(function|null)",isAnimated:"boolean",isVisible:"boolean",rootElement:"(element|string)"};class Ui extends _e{constructor(t){super(),this._config=this._getConfig(t),this._isAppended=!1,this._element=null}static get Default(){return Qi}static get DefaultType(){return Xi}static get NAME(){return Vi}show(t){if(!this._config.isVisible)return void Qt(t);this._append();const e=this._getElement();this._config.isAnimated&&Rt(e),e.classList.add(Yi),this._emulateAnimation((()=>{Qt(t)}))}hide(t){this._config.isVisible?(this._getElement().classList.remove(Yi),this._emulateAnimation((()=>{this.dispose(),Qt(t)}))):Qt(t)}dispose(){this._isAppended&&(ue.off(this._element,Ki),this._element.remove(),this._isAppended=!1)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_configAfterMerge(t){return t.rootElement=Ft(t.rootElement),t}_append(){if(this._isAppended)return;const t=this._getElement();this._config.rootElement.append(t),ue.on(t,Ki,(()=>{Qt(this._config.clickCallback)})),this._isAppended=!0}_emulateAnimation(t){Xt(t,this._getElement(),this._config.isAnimated)}}const Gi=".bs.focustrap",Ji=`focusin${Gi}`,Zi=`keydown.tab${Gi}`,tn="backward",en={autofocus:!0,trapElement:null},nn={autofocus:"boolean",trapElement:"element"};class sn extends _e{constructor(t){super(),this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}static get Default(){return en}static get DefaultType(){return nn}static get NAME(){return"focustrap"}activate(){this._isActive||(this._config.autofocus&&this._config.trapElement.focus(),ue.off(document,Gi),ue.on(document,Ji,(t=>this._handleFocusin(t))),ue.on(document,Zi,(t=>this._handleKeydown(t))),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,ue.off(document,Gi))}_handleFocusin(t){const{trapElement:e}=this._config;if(t.target===document||t.target===e||e.contains(t.target))return;const i=ye.focusableChildren(e);0===i.length?e.focus():this._lastTabNavDirection===tn?i[i.length-1].focus():i[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?tn:"forward")}}const on=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",rn=".sticky-top",an="padding-right",ln="margin-right";class cn{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,an,(e=>e+t)),this._setElementAttributes(on,an,(e=>e+t)),this._setElementAttributes(rn,ln,(e=>e-t))}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,an),this._resetElementAttributes(on,an),this._resetElementAttributes(rn,ln)}isOverflowing(){return this.getWidth()>0}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,(t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t).getPropertyValue(e);t.style.setProperty(e,`${i(Number.parseFloat(s))}px`)}))}_saveInitialAttribute(t,e){const i=t.style.getPropertyValue(e);i&&ge.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,(t=>{const i=ge.getDataAttribute(t,e);null!==i?(ge.removeDataAttribute(t,e),t.style.setProperty(e,i)):t.style.removeProperty(e)}))}_applyManipulationCallback(t,e){if(jt(t))e(t);else for(const i of ye.find(t,this._element))e(i)}}const hn=".bs.modal",dn=`hide${hn}`,un=`hidePrevented${hn}`,fn=`hidden${hn}`,pn=`show${hn}`,mn=`shown${hn}`,gn=`resize${hn}`,_n=`click.dismiss${hn}`,bn=`mousedown.dismiss${hn}`,vn=`keydown.dismiss${hn}`,yn=`click${hn}.data-api`,wn="modal-open",En="show",An="modal-static",Tn={backdrop:!0,focus:!0,keyboard:!0},Cn={backdrop:"(boolean|string)",focus:"boolean",keyboard:"boolean"};class On extends be{constructor(t,e){super(t,e),this._dialog=ye.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._isTransitioning=!1,this._scrollBar=new cn,this._addEventListeners()}static get Default(){return Tn}static get DefaultType(){return Cn}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||ue.trigger(this._element,pn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isTransitioning=!0,this._scrollBar.hide(),document.body.classList.add(wn),this._adjustDialog(),this._backdrop.show((()=>this._showElement(t))))}hide(){this._isShown&&!this._isTransitioning&&(ue.trigger(this._element,dn).defaultPrevented||(this._isShown=!1,this._isTransitioning=!0,this._focustrap.deactivate(),this._element.classList.remove(En),this._queueCallback((()=>this._hideModal()),this._element,this._isAnimated())))}dispose(){ue.off(window,hn),ue.off(this._dialog,hn),this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new Ui({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new sn({trapElement:this._element})}_showElement(t){document.body.contains(this._element)||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0;const e=ye.findOne(".modal-body",this._dialog);e&&(e.scrollTop=0),Rt(this._element),this._element.classList.add(En),this._queueCallback((()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,ue.trigger(this._element,mn,{relatedTarget:t})}),this._dialog,this._isAnimated())}_addEventListeners(){ue.on(this._element,vn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():this._triggerBackdropTransition())})),ue.on(window,gn,(()=>{this._isShown&&!this._isTransitioning&&this._adjustDialog()})),ue.on(this._element,bn,(t=>{ue.one(this._element,_n,(e=>{this._element===t.target&&this._element===e.target&&("static"!==this._config.backdrop?this._config.backdrop&&this.hide():this._triggerBackdropTransition())}))}))}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide((()=>{document.body.classList.remove(wn),this._resetAdjustments(),this._scrollBar.reset(),ue.trigger(this._element,fn)}))}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(ue.trigger(this._element,un).defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._element.style.overflowY;"hidden"===e||this._element.classList.contains(An)||(t||(this._element.style.overflowY="hidden"),this._element.classList.add(An),this._queueCallback((()=>{this._element.classList.remove(An),this._queueCallback((()=>{this._element.style.overflowY=e}),this._dialog)}),this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;if(i&&!t){const t=Yt()?"paddingLeft":"paddingRight";this._element.style[t]=`${e}px`}if(!i&&t){const t=Yt()?"paddingRight":"paddingLeft";this._element.style[t]=`${e}px`}}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=On.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}ue.on(document,yn,'[data-bs-toggle="modal"]',(function(t){const e=ye.getElementFromSelector(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),ue.one(e,pn,(t=>{t.defaultPrevented||ue.one(e,fn,(()=>{Ht(this)&&this.focus()}))}));const i=ye.findOne(".modal.show");i&&On.getInstance(i).hide(),On.getOrCreateInstance(e).toggle(this)})),we(On),Kt(On);const xn=".bs.offcanvas",kn=".data-api",Ln=`load${xn}${kn}`,Sn="show",Dn="showing",$n="hiding",In=".offcanvas.show",Nn=`show${xn}`,Pn=`shown${xn}`,Mn=`hide${xn}`,jn=`hidePrevented${xn}`,Fn=`hidden${xn}`,Hn=`resize${xn}`,Bn=`click${xn}${kn}`,Wn=`keydown.dismiss${xn}`,zn={backdrop:!0,keyboard:!0,scroll:!1},Rn={backdrop:"(boolean|string)",keyboard:"boolean",scroll:"boolean"};class qn extends be{constructor(t,e){super(t,e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get Default(){return zn}static get DefaultType(){return Rn}static get NAME(){return"offcanvas"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||ue.trigger(this._element,Nn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._backdrop.show(),this._config.scroll||(new cn).hide(),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add(Dn),this._queueCallback((()=>{this._config.scroll&&!this._config.backdrop||this._focustrap.activate(),this._element.classList.add(Sn),this._element.classList.remove(Dn),ue.trigger(this._element,Pn,{relatedTarget:t})}),this._element,!0))}hide(){this._isShown&&(ue.trigger(this._element,Mn).defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.add($n),this._backdrop.hide(),this._queueCallback((()=>{this._element.classList.remove(Sn,$n),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._config.scroll||(new cn).reset(),ue.trigger(this._element,Fn)}),this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_initializeBackDrop(){const t=Boolean(this._config.backdrop);return new Ui({className:"offcanvas-backdrop",isVisible:t,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:t?()=>{"static"!==this._config.backdrop?this.hide():ue.trigger(this._element,jn)}:null})}_initializeFocusTrap(){return new sn({trapElement:this._element})}_addEventListeners(){ue.on(this._element,Wn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():ue.trigger(this._element,jn))}))}static jQueryInterface(t){return this.each((function(){const e=qn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}ue.on(document,Bn,'[data-bs-toggle="offcanvas"]',(function(t){const e=ye.getElementFromSelector(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),Bt(this))return;ue.one(e,Fn,(()=>{Ht(this)&&this.focus()}));const i=ye.findOne(In);i&&i!==e&&qn.getInstance(i).hide(),qn.getOrCreateInstance(e).toggle(this)})),ue.on(window,Ln,(()=>{for(const t of ye.find(In))qn.getOrCreateInstance(t).show()})),ue.on(window,Hn,(()=>{for(const t of ye.find("[aria-modal][class*=show][class*=offcanvas-]"))"fixed"!==getComputedStyle(t).position&&qn.getOrCreateInstance(t).hide()})),we(qn),Kt(qn);const Vn={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},Yn=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Kn=/^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i,Qn=(t,e)=>{const i=t.nodeName.toLowerCase();return e.includes(i)?!Yn.has(i)||Boolean(Kn.test(t.nodeValue)):e.filter((t=>t instanceof RegExp)).some((t=>t.test(i)))},Xn={allowList:Vn,content:{},extraClass:"",html:!1,sanitize:!0,sanitizeFn:null,template:"
"},Un={allowList:"object",content:"object",extraClass:"(string|function)",html:"boolean",sanitize:"boolean",sanitizeFn:"(null|function)",template:"string"},Gn={entry:"(string|element|function|null)",selector:"(string|element)"};class Jn extends _e{constructor(t){super(),this._config=this._getConfig(t)}static get Default(){return Xn}static get DefaultType(){return Un}static get NAME(){return"TemplateFactory"}getContent(){return Object.values(this._config.content).map((t=>this._resolvePossibleFunction(t))).filter(Boolean)}hasContent(){return this.getContent().length>0}changeContent(t){return this._checkContent(t),this._config.content={...this._config.content,...t},this}toHtml(){const t=document.createElement("div");t.innerHTML=this._maybeSanitize(this._config.template);for(const[e,i]of Object.entries(this._config.content))this._setContent(t,i,e);const e=t.children[0],i=this._resolvePossibleFunction(this._config.extraClass);return i&&e.classList.add(...i.split(" ")),e}_typeCheckConfig(t){super._typeCheckConfig(t),this._checkContent(t.content)}_checkContent(t){for(const[e,i]of Object.entries(t))super._typeCheckConfig({selector:e,entry:i},Gn)}_setContent(t,e,i){const n=ye.findOne(i,t);n&&((e=this._resolvePossibleFunction(e))?jt(e)?this._putElementInTemplate(Ft(e),n):this._config.html?n.innerHTML=this._maybeSanitize(e):n.textContent=e:n.remove())}_maybeSanitize(t){return this._config.sanitize?function(t,e,i){if(!t.length)return t;if(i&&"function"==typeof i)return i(t);const n=(new window.DOMParser).parseFromString(t,"text/html"),s=[].concat(...n.body.querySelectorAll("*"));for(const t of s){const i=t.nodeName.toLowerCase();if(!Object.keys(e).includes(i)){t.remove();continue}const n=[].concat(...t.attributes),s=[].concat(e["*"]||[],e[i]||[]);for(const e of n)Qn(e,s)||t.removeAttribute(e.nodeName)}return n.body.innerHTML}(t,this._config.allowList,this._config.sanitizeFn):t}_resolvePossibleFunction(t){return Qt(t,[this])}_putElementInTemplate(t,e){if(this._config.html)return e.innerHTML="",void e.append(t);e.textContent=t.textContent}}const Zn=new Set(["sanitize","allowList","sanitizeFn"]),ts="fade",es="show",is=".modal",ns="hide.bs.modal",ss="hover",os="focus",rs={AUTO:"auto",TOP:"top",RIGHT:Yt()?"left":"right",BOTTOM:"bottom",LEFT:Yt()?"right":"left"},as={allowList:Vn,animation:!0,boundary:"clippingParents",container:!1,customClass:"",delay:0,fallbackPlacements:["top","right","bottom","left"],html:!1,offset:[0,6],placement:"top",popperConfig:null,sanitize:!0,sanitizeFn:null,selector:!1,template:'',title:"",trigger:"hover focus"},ls={allowList:"object",animation:"boolean",boundary:"(string|element)",container:"(string|element|boolean)",customClass:"(string|function)",delay:"(number|object)",fallbackPlacements:"array",html:"boolean",offset:"(array|string|function)",placement:"(string|function)",popperConfig:"(null|object|function)",sanitize:"boolean",sanitizeFn:"(null|function)",selector:"(string|boolean)",template:"string",title:"(string|element|function)",trigger:"string"};class cs extends be{constructor(t,i){if(void 0===e)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t,i),this._isEnabled=!0,this._timeout=0,this._isHovered=null,this._activeTrigger={},this._popper=null,this._templateFactory=null,this._newContent=null,this.tip=null,this._setListeners(),this._config.selector||this._fixTitle()}static get Default(){return as}static get DefaultType(){return ls}static get NAME(){return"tooltip"}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(){this._isEnabled&&(this._activeTrigger.click=!this._activeTrigger.click,this._isShown()?this._leave():this._enter())}dispose(){clearTimeout(this._timeout),ue.off(this._element.closest(is),ns,this._hideModalHandler),this._element.getAttribute("data-bs-original-title")&&this._element.setAttribute("title",this._element.getAttribute("data-bs-original-title")),this._disposePopper(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this._isWithContent()||!this._isEnabled)return;const t=ue.trigger(this._element,this.constructor.eventName("show")),e=(Wt(this._element)||this._element.ownerDocument.documentElement).contains(this._element);if(t.defaultPrevented||!e)return;this._disposePopper();const i=this._getTipElement();this._element.setAttribute("aria-describedby",i.getAttribute("id"));const{container:n}=this._config;if(this._element.ownerDocument.documentElement.contains(this.tip)||(n.append(i),ue.trigger(this._element,this.constructor.eventName("inserted"))),this._popper=this._createPopper(i),i.classList.add(es),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))ue.on(t,"mouseover",zt);this._queueCallback((()=>{ue.trigger(this._element,this.constructor.eventName("shown")),!1===this._isHovered&&this._leave(),this._isHovered=!1}),this.tip,this._isAnimated())}hide(){if(this._isShown()&&!ue.trigger(this._element,this.constructor.eventName("hide")).defaultPrevented){if(this._getTipElement().classList.remove(es),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))ue.off(t,"mouseover",zt);this._activeTrigger.click=!1,this._activeTrigger[os]=!1,this._activeTrigger[ss]=!1,this._isHovered=null,this._queueCallback((()=>{this._isWithActiveTrigger()||(this._isHovered||this._disposePopper(),this._element.removeAttribute("aria-describedby"),ue.trigger(this._element,this.constructor.eventName("hidden")))}),this.tip,this._isAnimated())}}update(){this._popper&&this._popper.update()}_isWithContent(){return Boolean(this._getTitle())}_getTipElement(){return this.tip||(this.tip=this._createTipElement(this._newContent||this._getContentForTemplate())),this.tip}_createTipElement(t){const e=this._getTemplateFactory(t).toHtml();if(!e)return null;e.classList.remove(ts,es),e.classList.add(`bs-${this.constructor.NAME}-auto`);const i=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME).toString();return e.setAttribute("id",i),this._isAnimated()&&e.classList.add(ts),e}setContent(t){this._newContent=t,this._isShown()&&(this._disposePopper(),this.show())}_getTemplateFactory(t){return this._templateFactory?this._templateFactory.changeContent(t):this._templateFactory=new Jn({...this._config,content:t,extraClass:this._resolvePossibleFunction(this._config.customClass)}),this._templateFactory}_getContentForTemplate(){return{".tooltip-inner":this._getTitle()}}_getTitle(){return this._resolvePossibleFunction(this._config.title)||this._element.getAttribute("data-bs-original-title")}_initializeOnDelegatedTarget(t){return this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_isAnimated(){return this._config.animation||this.tip&&this.tip.classList.contains(ts)}_isShown(){return this.tip&&this.tip.classList.contains(es)}_createPopper(t){const e=Qt(this._config.placement,[this,t,this._element]),i=rs[e.toUpperCase()];return St(this._element,t,this._getPopperConfig(i))}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return Qt(t,[this._element])}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"preSetPlacement",enabled:!0,phase:"beforeMain",fn:t=>{this._getTipElement().setAttribute("data-popper-placement",t.state.placement)}}]};return{...e,...Qt(this._config.popperConfig,[e])}}_setListeners(){const t=this._config.trigger.split(" ");for(const e of t)if("click"===e)ue.on(this._element,this.constructor.eventName("click"),this._config.selector,(t=>{this._initializeOnDelegatedTarget(t).toggle()}));else if("manual"!==e){const t=e===ss?this.constructor.eventName("mouseenter"):this.constructor.eventName("focusin"),i=e===ss?this.constructor.eventName("mouseleave"):this.constructor.eventName("focusout");ue.on(this._element,t,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusin"===t.type?os:ss]=!0,e._enter()})),ue.on(this._element,i,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusout"===t.type?os:ss]=e._element.contains(t.relatedTarget),e._leave()}))}this._hideModalHandler=()=>{this._element&&this.hide()},ue.on(this._element.closest(is),ns,this._hideModalHandler)}_fixTitle(){const t=this._element.getAttribute("title");t&&(this._element.getAttribute("aria-label")||this._element.textContent.trim()||this._element.setAttribute("aria-label",t),this._element.setAttribute("data-bs-original-title",t),this._element.removeAttribute("title"))}_enter(){this._isShown()||this._isHovered?this._isHovered=!0:(this._isHovered=!0,this._setTimeout((()=>{this._isHovered&&this.show()}),this._config.delay.show))}_leave(){this._isWithActiveTrigger()||(this._isHovered=!1,this._setTimeout((()=>{this._isHovered||this.hide()}),this._config.delay.hide))}_setTimeout(t,e){clearTimeout(this._timeout),this._timeout=setTimeout(t,e)}_isWithActiveTrigger(){return Object.values(this._activeTrigger).includes(!0)}_getConfig(t){const e=ge.getDataAttributes(this._element);for(const t of Object.keys(e))Zn.has(t)&&delete e[t];return t={...e,..."object"==typeof t&&t?t:{}},t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t.container=!1===t.container?document.body:Ft(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),t}_getDelegateConfig(){const t={};for(const[e,i]of Object.entries(this._config))this.constructor.Default[e]!==i&&(t[e]=i);return t.selector=!1,t.trigger="manual",t}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null),this.tip&&(this.tip.remove(),this.tip=null)}static jQueryInterface(t){return this.each((function(){const e=cs.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Kt(cs);const hs={...cs.Default,content:"",offset:[0,8],placement:"right",template:'',trigger:"click"},ds={...cs.DefaultType,content:"(null|string|element|function)"};class us extends cs{static get Default(){return hs}static get DefaultType(){return ds}static get NAME(){return"popover"}_isWithContent(){return this._getTitle()||this._getContent()}_getContentForTemplate(){return{".popover-header":this._getTitle(),".popover-body":this._getContent()}}_getContent(){return this._resolvePossibleFunction(this._config.content)}static jQueryInterface(t){return this.each((function(){const e=us.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Kt(us);const fs=".bs.scrollspy",ps=`activate${fs}`,ms=`click${fs}`,gs=`load${fs}.data-api`,_s="active",bs="[href]",vs=".nav-link",ys=`${vs}, .nav-item > ${vs}, .list-group-item`,ws={offset:null,rootMargin:"0px 0px -25%",smoothScroll:!1,target:null,threshold:[.1,.5,1]},Es={offset:"(number|null)",rootMargin:"string",smoothScroll:"boolean",target:"element",threshold:"array"};class As extends be{constructor(t,e){super(t,e),this._targetLinks=new Map,this._observableSections=new Map,this._rootElement="visible"===getComputedStyle(this._element).overflowY?null:this._element,this._activeTarget=null,this._observer=null,this._previousScrollData={visibleEntryTop:0,parentScrollTop:0},this.refresh()}static get Default(){return ws}static get DefaultType(){return Es}static get NAME(){return"scrollspy"}refresh(){this._initializeTargetsAndObservables(),this._maybeEnableSmoothScroll(),this._observer?this._observer.disconnect():this._observer=this._getNewObserver();for(const t of this._observableSections.values())this._observer.observe(t)}dispose(){this._observer.disconnect(),super.dispose()}_configAfterMerge(t){return t.target=Ft(t.target)||document.body,t.rootMargin=t.offset?`${t.offset}px 0px -30%`:t.rootMargin,"string"==typeof t.threshold&&(t.threshold=t.threshold.split(",").map((t=>Number.parseFloat(t)))),t}_maybeEnableSmoothScroll(){this._config.smoothScroll&&(ue.off(this._config.target,ms),ue.on(this._config.target,ms,bs,(t=>{const e=this._observableSections.get(t.target.hash);if(e){t.preventDefault();const i=this._rootElement||window,n=e.offsetTop-this._element.offsetTop;if(i.scrollTo)return void i.scrollTo({top:n,behavior:"smooth"});i.scrollTop=n}})))}_getNewObserver(){const t={root:this._rootElement,threshold:this._config.threshold,rootMargin:this._config.rootMargin};return new IntersectionObserver((t=>this._observerCallback(t)),t)}_observerCallback(t){const e=t=>this._targetLinks.get(`#${t.target.id}`),i=t=>{this._previousScrollData.visibleEntryTop=t.target.offsetTop,this._process(e(t))},n=(this._rootElement||document.documentElement).scrollTop,s=n>=this._previousScrollData.parentScrollTop;this._previousScrollData.parentScrollTop=n;for(const o of t){if(!o.isIntersecting){this._activeTarget=null,this._clearActiveClass(e(o));continue}const t=o.target.offsetTop>=this._previousScrollData.visibleEntryTop;if(s&&t){if(i(o),!n)return}else s||t||i(o)}}_initializeTargetsAndObservables(){this._targetLinks=new Map,this._observableSections=new Map;const t=ye.find(bs,this._config.target);for(const e of t){if(!e.hash||Bt(e))continue;const t=ye.findOne(decodeURI(e.hash),this._element);Ht(t)&&(this._targetLinks.set(decodeURI(e.hash),e),this._observableSections.set(e.hash,t))}}_process(t){this._activeTarget!==t&&(this._clearActiveClass(this._config.target),this._activeTarget=t,t.classList.add(_s),this._activateParents(t),ue.trigger(this._element,ps,{relatedTarget:t}))}_activateParents(t){if(t.classList.contains("dropdown-item"))ye.findOne(".dropdown-toggle",t.closest(".dropdown")).classList.add(_s);else for(const e of ye.parents(t,".nav, .list-group"))for(const t of ye.prev(e,ys))t.classList.add(_s)}_clearActiveClass(t){t.classList.remove(_s);const e=ye.find(`${bs}.${_s}`,t);for(const t of e)t.classList.remove(_s)}static jQueryInterface(t){return this.each((function(){const e=As.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}ue.on(window,gs,(()=>{for(const t of ye.find('[data-bs-spy="scroll"]'))As.getOrCreateInstance(t)})),Kt(As);const Ts=".bs.tab",Cs=`hide${Ts}`,Os=`hidden${Ts}`,xs=`show${Ts}`,ks=`shown${Ts}`,Ls=`click${Ts}`,Ss=`keydown${Ts}`,Ds=`load${Ts}`,$s="ArrowLeft",Is="ArrowRight",Ns="ArrowUp",Ps="ArrowDown",Ms="Home",js="End",Fs="active",Hs="fade",Bs="show",Ws=".dropdown-toggle",zs=`:not(${Ws})`,Rs='[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',qs=`.nav-link${zs}, .list-group-item${zs}, [role="tab"]${zs}, ${Rs}`,Vs=`.${Fs}[data-bs-toggle="tab"], .${Fs}[data-bs-toggle="pill"], .${Fs}[data-bs-toggle="list"]`;class Ys extends be{constructor(t){super(t),this._parent=this._element.closest('.list-group, .nav, [role="tablist"]'),this._parent&&(this._setInitialAttributes(this._parent,this._getChildren()),ue.on(this._element,Ss,(t=>this._keydown(t))))}static get NAME(){return"tab"}show(){const t=this._element;if(this._elemIsActive(t))return;const e=this._getActiveElem(),i=e?ue.trigger(e,Cs,{relatedTarget:t}):null;ue.trigger(t,xs,{relatedTarget:e}).defaultPrevented||i&&i.defaultPrevented||(this._deactivate(e,t),this._activate(t,e))}_activate(t,e){t&&(t.classList.add(Fs),this._activate(ye.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.removeAttribute("tabindex"),t.setAttribute("aria-selected",!0),this._toggleDropDown(t,!0),ue.trigger(t,ks,{relatedTarget:e})):t.classList.add(Bs)}),t,t.classList.contains(Hs)))}_deactivate(t,e){t&&(t.classList.remove(Fs),t.blur(),this._deactivate(ye.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.setAttribute("aria-selected",!1),t.setAttribute("tabindex","-1"),this._toggleDropDown(t,!1),ue.trigger(t,Os,{relatedTarget:e})):t.classList.remove(Bs)}),t,t.classList.contains(Hs)))}_keydown(t){if(![$s,Is,Ns,Ps,Ms,js].includes(t.key))return;t.stopPropagation(),t.preventDefault();const e=this._getChildren().filter((t=>!Bt(t)));let i;if([Ms,js].includes(t.key))i=e[t.key===Ms?0:e.length-1];else{const n=[Is,Ps].includes(t.key);i=Ut(e,t.target,n,!0)}i&&(i.focus({preventScroll:!0}),Ys.getOrCreateInstance(i).show())}_getChildren(){return ye.find(qs,this._parent)}_getActiveElem(){return this._getChildren().find((t=>this._elemIsActive(t)))||null}_setInitialAttributes(t,e){this._setAttributeIfNotExists(t,"role","tablist");for(const t of e)this._setInitialAttributesOnChild(t)}_setInitialAttributesOnChild(t){t=this._getInnerElement(t);const e=this._elemIsActive(t),i=this._getOuterElement(t);t.setAttribute("aria-selected",e),i!==t&&this._setAttributeIfNotExists(i,"role","presentation"),e||t.setAttribute("tabindex","-1"),this._setAttributeIfNotExists(t,"role","tab"),this._setInitialAttributesOnTargetPanel(t)}_setInitialAttributesOnTargetPanel(t){const e=ye.getElementFromSelector(t);e&&(this._setAttributeIfNotExists(e,"role","tabpanel"),t.id&&this._setAttributeIfNotExists(e,"aria-labelledby",`${t.id}`))}_toggleDropDown(t,e){const i=this._getOuterElement(t);if(!i.classList.contains("dropdown"))return;const n=(t,n)=>{const s=ye.findOne(t,i);s&&s.classList.toggle(n,e)};n(Ws,Fs),n(".dropdown-menu",Bs),i.setAttribute("aria-expanded",e)}_setAttributeIfNotExists(t,e,i){t.hasAttribute(e)||t.setAttribute(e,i)}_elemIsActive(t){return t.classList.contains(Fs)}_getInnerElement(t){return t.matches(qs)?t:ye.findOne(qs,t)}_getOuterElement(t){return t.closest(".nav-item, .list-group-item")||t}static jQueryInterface(t){return this.each((function(){const e=Ys.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}ue.on(document,Ls,Rs,(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),Bt(this)||Ys.getOrCreateInstance(this).show()})),ue.on(window,Ds,(()=>{for(const t of ye.find(Vs))Ys.getOrCreateInstance(t)})),Kt(Ys);const Ks=".bs.toast",Qs=`mouseover${Ks}`,Xs=`mouseout${Ks}`,Us=`focusin${Ks}`,Gs=`focusout${Ks}`,Js=`hide${Ks}`,Zs=`hidden${Ks}`,to=`show${Ks}`,eo=`shown${Ks}`,io="hide",no="show",so="showing",oo={animation:"boolean",autohide:"boolean",delay:"number"},ro={animation:!0,autohide:!0,delay:5e3};class ao extends be{constructor(t,e){super(t,e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get Default(){return ro}static get DefaultType(){return oo}static get NAME(){return"toast"}show(){ue.trigger(this._element,to).defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove(io),Rt(this._element),this._element.classList.add(no,so),this._queueCallback((()=>{this._element.classList.remove(so),ue.trigger(this._element,eo),this._maybeScheduleHide()}),this._element,this._config.animation))}hide(){this.isShown()&&(ue.trigger(this._element,Js).defaultPrevented||(this._element.classList.add(so),this._queueCallback((()=>{this._element.classList.add(io),this._element.classList.remove(so,no),ue.trigger(this._element,Zs)}),this._element,this._config.animation)))}dispose(){this._clearTimeout(),this.isShown()&&this._element.classList.remove(no),super.dispose()}isShown(){return this._element.classList.contains(no)}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout((()=>{this.hide()}),this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){ue.on(this._element,Qs,(t=>this._onInteraction(t,!0))),ue.on(this._element,Xs,(t=>this._onInteraction(t,!1))),ue.on(this._element,Us,(t=>this._onInteraction(t,!0))),ue.on(this._element,Gs,(t=>this._onInteraction(t,!1)))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=ao.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}function lo(t){"loading"!=document.readyState?t():document.addEventListener("DOMContentLoaded",t)}we(ao),Kt(ao),lo((function(){[].slice.call(document.querySelectorAll('[data-bs-toggle="tooltip"]')).map((function(t){return new cs(t,{delay:{show:500,hide:100}})}))})),lo((function(){document.getElementById("pst-back-to-top").addEventListener("click",(function(){document.body.scrollTop=0,document.documentElement.scrollTop=0}))})),lo((function(){var t=document.getElementById("pst-back-to-top"),e=document.getElementsByClassName("bd-header")[0].getBoundingClientRect();window.addEventListener("scroll",(function(){this.oldScroll>this.scrollY&&this.scrollY>e.bottom?t.style.display="block":t.style.display="none",this.oldScroll=this.scrollY}))}))})(); +//# sourceMappingURL=bootstrap.js.map \ No newline at end of file diff --git a/pull313/_static/scripts/bootstrap.js.LICENSE.txt b/pull313/_static/scripts/bootstrap.js.LICENSE.txt new file mode 100644 index 00000000..10f979d0 --- /dev/null +++ b/pull313/_static/scripts/bootstrap.js.LICENSE.txt @@ -0,0 +1,5 @@ +/*! + * Bootstrap v5.3.2 (https://getbootstrap.com/) + * Copyright 2011-2023 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) + */ diff --git a/pull313/_static/scripts/bootstrap.js.map b/pull313/_static/scripts/bootstrap.js.map new file mode 100644 index 00000000..e5bc1575 --- /dev/null +++ b/pull313/_static/scripts/bootstrap.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/bootstrap.js","mappings":";mBACA,IAAIA,EAAsB,CCA1BA,EAAwB,CAACC,EAASC,KACjC,IAAI,IAAIC,KAAOD,EACXF,EAAoBI,EAAEF,EAAYC,KAASH,EAAoBI,EAAEH,EAASE,IAC5EE,OAAOC,eAAeL,EAASE,EAAK,CAAEI,YAAY,EAAMC,IAAKN,EAAWC,IAE1E,ECNDH,EAAwB,CAACS,EAAKC,IAAUL,OAAOM,UAAUC,eAAeC,KAAKJ,EAAKC,GCClFV,EAAyBC,IACH,oBAAXa,QAA0BA,OAAOC,aAC1CV,OAAOC,eAAeL,EAASa,OAAOC,YAAa,CAAEC,MAAO,WAE7DX,OAAOC,eAAeL,EAAS,aAAc,CAAEe,OAAO,GAAO,ipBCLvD,IAAI,EAAM,MACNC,EAAS,SACTC,EAAQ,QACRC,EAAO,OACPC,EAAO,OACPC,EAAiB,CAAC,EAAKJ,EAAQC,EAAOC,GACtCG,EAAQ,QACRC,EAAM,MACNC,EAAkB,kBAClBC,EAAW,WACXC,EAAS,SACTC,EAAY,YACZC,EAAmCP,EAAeQ,QAAO,SAAUC,EAAKC,GACjF,OAAOD,EAAIE,OAAO,CAACD,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAChE,GAAG,IACQ,EAA0B,GAAGS,OAAOX,EAAgB,CAACD,IAAOS,QAAO,SAAUC,EAAKC,GAC3F,OAAOD,EAAIE,OAAO,CAACD,EAAWA,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAC3E,GAAG,IAEQU,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAc,cACdC,EAAQ,QACRC,EAAa,aACbC,EAAiB,CAACT,EAAYC,EAAMC,EAAWC,EAAYC,EAAMC,EAAWC,EAAaC,EAAOC,GC9B5F,SAASE,EAAYC,GAClC,OAAOA,GAAWA,EAAQC,UAAY,IAAIC,cAAgB,IAC5D,CCFe,SAASC,EAAUC,GAChC,GAAY,MAARA,EACF,OAAOC,OAGT,GAAwB,oBAApBD,EAAKE,WAAkC,CACzC,IAAIC,EAAgBH,EAAKG,cACzB,OAAOA,GAAgBA,EAAcC,aAAwBH,MAC/D,CAEA,OAAOD,CACT,CCTA,SAASK,EAAUL,GAEjB,OAAOA,aADUD,EAAUC,GAAMM,SACIN,aAAgBM,OACvD,CAEA,SAASC,EAAcP,GAErB,OAAOA,aADUD,EAAUC,GAAMQ,aACIR,aAAgBQ,WACvD,CAEA,SAASC,EAAaT,GAEpB,MAA0B,oBAAfU,aAKJV,aADUD,EAAUC,GAAMU,YACIV,aAAgBU,WACvD,CCwDA,SACEC,KAAM,cACNC,SAAS,EACTC,MAAO,QACPC,GA5EF,SAAqBC,GACnB,IAAIC,EAAQD,EAAKC,MACjB3D,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIS,EAAQJ,EAAMK,OAAOV,IAAS,CAAC,EAC/BW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EACxCf,EAAUoB,EAAME,SAASP,GAExBJ,EAAcX,IAAaD,EAAYC,KAO5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUR,GACxC,IAAI3C,EAAQsD,EAAWX,IAET,IAAV3C,EACF4B,EAAQ4B,gBAAgBb,GAExBf,EAAQ6B,aAAad,GAAgB,IAAV3C,EAAiB,GAAKA,EAErD,IACF,GACF,EAoDE0D,OAlDF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MACdY,EAAgB,CAClBlD,OAAQ,CACNmD,SAAUb,EAAMc,QAAQC,SACxB5D,KAAM,IACN6D,IAAK,IACLC,OAAQ,KAEVC,MAAO,CACLL,SAAU,YAEZlD,UAAW,CAAC,GASd,OAPAtB,OAAOkE,OAAOP,EAAME,SAASxC,OAAO0C,MAAOQ,EAAclD,QACzDsC,EAAMK,OAASO,EAEXZ,EAAME,SAASgB,OACjB7E,OAAOkE,OAAOP,EAAME,SAASgB,MAAMd,MAAOQ,EAAcM,OAGnD,WACL7E,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIf,EAAUoB,EAAME,SAASP,GACzBW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EAGxCS,EAFkB/D,OAAO4D,KAAKD,EAAMK,OAAOzD,eAAe+C,GAAQK,EAAMK,OAAOV,GAAQiB,EAAcjB,IAE7E9B,QAAO,SAAUuC,EAAOe,GAElD,OADAf,EAAMe,GAAY,GACXf,CACT,GAAG,CAAC,GAECb,EAAcX,IAAaD,EAAYC,KAI5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUiB,GACxCxC,EAAQ4B,gBAAgBY,EAC1B,IACF,GACF,CACF,EASEC,SAAU,CAAC,kBCjFE,SAASC,EAAiBvD,GACvC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCHO,IAAI,EAAMC,KAAKC,IACX,EAAMD,KAAKE,IACXC,EAAQH,KAAKG,MCFT,SAASC,IACtB,IAAIC,EAASC,UAAUC,cAEvB,OAAc,MAAVF,GAAkBA,EAAOG,QAAUC,MAAMC,QAAQL,EAAOG,QACnDH,EAAOG,OAAOG,KAAI,SAAUC,GACjC,OAAOA,EAAKC,MAAQ,IAAMD,EAAKE,OACjC,IAAGC,KAAK,KAGHT,UAAUU,SACnB,CCTe,SAASC,IACtB,OAAQ,iCAAiCC,KAAKd,IAChD,CCCe,SAASe,EAAsB/D,EAASgE,EAAcC,QAC9C,IAAjBD,IACFA,GAAe,QAGO,IAApBC,IACFA,GAAkB,GAGpB,IAAIC,EAAalE,EAAQ+D,wBACrBI,EAAS,EACTC,EAAS,EAETJ,GAAgBrD,EAAcX,KAChCmE,EAASnE,EAAQqE,YAAc,GAAItB,EAAMmB,EAAWI,OAAStE,EAAQqE,aAAmB,EACxFD,EAASpE,EAAQuE,aAAe,GAAIxB,EAAMmB,EAAWM,QAAUxE,EAAQuE,cAAoB,GAG7F,IACIE,GADOhE,EAAUT,GAAWG,EAAUH,GAAWK,QAC3BoE,eAEtBC,GAAoBb,KAAsBI,EAC1CU,GAAKT,EAAW3F,MAAQmG,GAAoBD,EAAiBA,EAAeG,WAAa,IAAMT,EAC/FU,GAAKX,EAAW9B,KAAOsC,GAAoBD,EAAiBA,EAAeK,UAAY,IAAMV,EAC7FE,EAAQJ,EAAWI,MAAQH,EAC3BK,EAASN,EAAWM,OAASJ,EACjC,MAAO,CACLE,MAAOA,EACPE,OAAQA,EACRpC,IAAKyC,EACLvG,MAAOqG,EAAIL,EACXjG,OAAQwG,EAAIL,EACZjG,KAAMoG,EACNA,EAAGA,EACHE,EAAGA,EAEP,CCrCe,SAASE,EAAc/E,GACpC,IAAIkE,EAAaH,EAAsB/D,GAGnCsE,EAAQtE,EAAQqE,YAChBG,EAASxE,EAAQuE,aAUrB,OARI3B,KAAKoC,IAAId,EAAWI,MAAQA,IAAU,IACxCA,EAAQJ,EAAWI,OAGjB1B,KAAKoC,IAAId,EAAWM,OAASA,IAAW,IAC1CA,EAASN,EAAWM,QAGf,CACLG,EAAG3E,EAAQ4E,WACXC,EAAG7E,EAAQ8E,UACXR,MAAOA,EACPE,OAAQA,EAEZ,CCvBe,SAASS,EAASC,EAAQC,GACvC,IAAIC,EAAWD,EAAME,aAAeF,EAAME,cAE1C,GAAIH,EAAOD,SAASE,GAClB,OAAO,EAEJ,GAAIC,GAAYvE,EAAauE,GAAW,CACzC,IAAIE,EAAOH,EAEX,EAAG,CACD,GAAIG,GAAQJ,EAAOK,WAAWD,GAC5B,OAAO,EAITA,EAAOA,EAAKE,YAAcF,EAAKG,IACjC,OAASH,EACX,CAGF,OAAO,CACT,CCrBe,SAAS,EAAiBtF,GACvC,OAAOG,EAAUH,GAAS0F,iBAAiB1F,EAC7C,CCFe,SAAS2F,EAAe3F,GACrC,MAAO,CAAC,QAAS,KAAM,MAAM4F,QAAQ7F,EAAYC,KAAa,CAChE,CCFe,SAAS6F,EAAmB7F,GAEzC,QAASS,EAAUT,GAAWA,EAAQO,cACtCP,EAAQ8F,WAAazF,OAAOyF,UAAUC,eACxC,CCFe,SAASC,EAAchG,GACpC,MAA6B,SAAzBD,EAAYC,GACPA,EAMPA,EAAQiG,cACRjG,EAAQwF,aACR3E,EAAab,GAAWA,EAAQyF,KAAO,OAEvCI,EAAmB7F,EAGvB,CCVA,SAASkG,EAAoBlG,GAC3B,OAAKW,EAAcX,IACoB,UAAvC,EAAiBA,GAASiC,SAInBjC,EAAQmG,aAHN,IAIX,CAwCe,SAASC,EAAgBpG,GAItC,IAHA,IAAIK,EAASF,EAAUH,GACnBmG,EAAeD,EAAoBlG,GAEhCmG,GAAgBR,EAAeQ,IAA6D,WAA5C,EAAiBA,GAAclE,UACpFkE,EAAeD,EAAoBC,GAGrC,OAAIA,IAA+C,SAA9BpG,EAAYoG,IAA0D,SAA9BpG,EAAYoG,IAAwE,WAA5C,EAAiBA,GAAclE,UAC3H5B,EAGF8F,GAhDT,SAA4BnG,GAC1B,IAAIqG,EAAY,WAAWvC,KAAKd,KAGhC,GAFW,WAAWc,KAAKd,MAEfrC,EAAcX,IAII,UAFX,EAAiBA,GAEnBiC,SACb,OAAO,KAIX,IAAIqE,EAAcN,EAAchG,GAMhC,IAJIa,EAAayF,KACfA,EAAcA,EAAYb,MAGrB9E,EAAc2F,IAAgB,CAAC,OAAQ,QAAQV,QAAQ7F,EAAYuG,IAAgB,GAAG,CAC3F,IAAIC,EAAM,EAAiBD,GAI3B,GAAsB,SAAlBC,EAAIC,WAA4C,SAApBD,EAAIE,aAA0C,UAAhBF,EAAIG,UAAiF,IAA1D,CAAC,YAAa,eAAed,QAAQW,EAAII,aAAsBN,GAAgC,WAAnBE,EAAII,YAA2BN,GAAaE,EAAIK,QAAyB,SAAfL,EAAIK,OACjO,OAAON,EAEPA,EAAcA,EAAYd,UAE9B,CAEA,OAAO,IACT,CAgByBqB,CAAmB7G,IAAYK,CACxD,CCpEe,SAASyG,EAAyB3H,GAC/C,MAAO,CAAC,MAAO,UAAUyG,QAAQzG,IAAc,EAAI,IAAM,GAC3D,CCDO,SAAS4H,EAAOjE,EAAK1E,EAAOyE,GACjC,OAAO,EAAQC,EAAK,EAAQ1E,EAAOyE,GACrC,CCFe,SAASmE,EAAmBC,GACzC,OAAOxJ,OAAOkE,OAAO,CAAC,ECDf,CACLS,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GDHuC0I,EACjD,CEHe,SAASC,EAAgB9I,EAAOiD,GAC7C,OAAOA,EAAKpC,QAAO,SAAUkI,EAAS5J,GAEpC,OADA4J,EAAQ5J,GAAOa,EACR+I,CACT,GAAG,CAAC,EACN,CC4EA,SACEpG,KAAM,QACNC,SAAS,EACTC,MAAO,OACPC,GApEF,SAAeC,GACb,IAAIiG,EAEAhG,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZmB,EAAUf,EAAKe,QACfmF,EAAejG,EAAME,SAASgB,MAC9BgF,EAAgBlG,EAAMmG,cAAcD,cACpCE,EAAgB9E,EAAiBtB,EAAMjC,WACvCsI,EAAOX,EAAyBU,GAEhCE,EADa,CAACnJ,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAClC,SAAW,QAElC,GAAKH,GAAiBC,EAAtB,CAIA,IAAIL,EAxBgB,SAAyBU,EAASvG,GAItD,OAAO4F,EAAsC,iBAH7CW,EAA6B,mBAAZA,EAAyBA,EAAQlK,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CAC/EzI,UAAWiC,EAAMjC,aACbwI,GACkDA,EAAUT,EAAgBS,EAASlJ,GAC7F,CAmBsBoJ,CAAgB3F,EAAQyF,QAASvG,GACjD0G,EAAY/C,EAAcsC,GAC1BU,EAAmB,MAATN,EAAe,EAAMlJ,EAC/ByJ,EAAmB,MAATP,EAAepJ,EAASC,EAClC2J,EAAU7G,EAAMwG,MAAM7I,UAAU2I,GAAOtG,EAAMwG,MAAM7I,UAAU0I,GAAQH,EAAcG,GAAQrG,EAAMwG,MAAM9I,OAAO4I,GAC9GQ,EAAYZ,EAAcG,GAAQrG,EAAMwG,MAAM7I,UAAU0I,GACxDU,EAAoB/B,EAAgBiB,GACpCe,EAAaD,EAA6B,MAATV,EAAeU,EAAkBE,cAAgB,EAAIF,EAAkBG,aAAe,EAAI,EAC3HC,EAAoBN,EAAU,EAAIC,EAAY,EAG9CpF,EAAMmE,EAAcc,GACpBlF,EAAMuF,EAAaN,EAAUJ,GAAOT,EAAce,GAClDQ,EAASJ,EAAa,EAAIN,EAAUJ,GAAO,EAAIa,EAC/CE,EAAS1B,EAAOjE,EAAK0F,EAAQ3F,GAE7B6F,EAAWjB,EACfrG,EAAMmG,cAAcxG,KAASqG,EAAwB,CAAC,GAAyBsB,GAAYD,EAAQrB,EAAsBuB,aAAeF,EAASD,EAAQpB,EAnBzJ,CAoBF,EAkCEtF,OAhCF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MAEdwH,EADU7G,EAAMG,QACWlC,QAC3BqH,OAAoC,IAArBuB,EAA8B,sBAAwBA,EAErD,MAAhBvB,IAKwB,iBAAjBA,IACTA,EAAejG,EAAME,SAASxC,OAAO+J,cAAcxB,MAOhDpC,EAAS7D,EAAME,SAASxC,OAAQuI,KAIrCjG,EAAME,SAASgB,MAAQ+E,EACzB,EASE5E,SAAU,CAAC,iBACXqG,iBAAkB,CAAC,oBCxFN,SAASC,EAAa5J,GACnC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCOA,IAAIqG,EAAa,CACf5G,IAAK,OACL9D,MAAO,OACPD,OAAQ,OACRE,KAAM,QAeD,SAAS0K,GAAYlH,GAC1B,IAAImH,EAEApK,EAASiD,EAAMjD,OACfqK,EAAapH,EAAMoH,WACnBhK,EAAY4C,EAAM5C,UAClBiK,EAAYrH,EAAMqH,UAClBC,EAAUtH,EAAMsH,QAChBpH,EAAWF,EAAME,SACjBqH,EAAkBvH,EAAMuH,gBACxBC,EAAWxH,EAAMwH,SACjBC,EAAezH,EAAMyH,aACrBC,EAAU1H,EAAM0H,QAChBC,EAAaL,EAAQ1E,EACrBA,OAAmB,IAAf+E,EAAwB,EAAIA,EAChCC,EAAaN,EAAQxE,EACrBA,OAAmB,IAAf8E,EAAwB,EAAIA,EAEhCC,EAAgC,mBAAjBJ,EAA8BA,EAAa,CAC5D7E,EAAGA,EACHE,IACG,CACHF,EAAGA,EACHE,GAGFF,EAAIiF,EAAMjF,EACVE,EAAI+E,EAAM/E,EACV,IAAIgF,EAAOR,EAAQrL,eAAe,KAC9B8L,EAAOT,EAAQrL,eAAe,KAC9B+L,EAAQxL,EACRyL,EAAQ,EACRC,EAAM5J,OAEV,GAAIkJ,EAAU,CACZ,IAAIpD,EAAeC,EAAgBtH,GAC/BoL,EAAa,eACbC,EAAY,cAEZhE,IAAiBhG,EAAUrB,IAGmB,WAA5C,EAFJqH,EAAeN,EAAmB/G,IAECmD,UAAsC,aAAbA,IAC1DiI,EAAa,eACbC,EAAY,gBAOZhL,IAAc,IAAQA,IAAcZ,GAAQY,IAAcb,IAAU8K,IAAczK,KACpFqL,EAAQ3L,EAGRwG,IAFc4E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeD,OACzF2B,EAAa+D,IACEf,EAAW3E,OAC1BK,GAAKyE,EAAkB,GAAK,GAG1BnK,IAAcZ,IAASY,IAAc,GAAOA,IAAcd,GAAW+K,IAAczK,KACrFoL,EAAQzL,EAGRqG,IAFc8E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeH,MACzF6B,EAAagE,IACEhB,EAAW7E,MAC1BK,GAAK2E,EAAkB,GAAK,EAEhC,CAEA,IAgBMc,EAhBFC,EAAe5M,OAAOkE,OAAO,CAC/BM,SAAUA,GACTsH,GAAYP,GAEXsB,GAAyB,IAAjBd,EAlFd,SAA2BrI,EAAM8I,GAC/B,IAAItF,EAAIxD,EAAKwD,EACTE,EAAI1D,EAAK0D,EACT0F,EAAMN,EAAIO,kBAAoB,EAClC,MAAO,CACL7F,EAAG5B,EAAM4B,EAAI4F,GAAOA,GAAO,EAC3B1F,EAAG9B,EAAM8B,EAAI0F,GAAOA,GAAO,EAE/B,CA0EsCE,CAAkB,CACpD9F,EAAGA,EACHE,GACC1E,EAAUrB,IAAW,CACtB6F,EAAGA,EACHE,GAMF,OAHAF,EAAI2F,EAAM3F,EACVE,EAAIyF,EAAMzF,EAENyE,EAGK7L,OAAOkE,OAAO,CAAC,EAAG0I,IAAeD,EAAiB,CAAC,GAAkBJ,GAASF,EAAO,IAAM,GAAIM,EAAeL,GAASF,EAAO,IAAM,GAAIO,EAAe5D,WAAayD,EAAIO,kBAAoB,IAAM,EAAI,aAAe7F,EAAI,OAASE,EAAI,MAAQ,eAAiBF,EAAI,OAASE,EAAI,SAAUuF,IAG5R3M,OAAOkE,OAAO,CAAC,EAAG0I,IAAenB,EAAkB,CAAC,GAAmBc,GAASF,EAAOjF,EAAI,KAAO,GAAIqE,EAAgBa,GAASF,EAAOlF,EAAI,KAAO,GAAIuE,EAAgB1C,UAAY,GAAI0C,GAC9L,CA4CA,UACEnI,KAAM,gBACNC,SAAS,EACTC,MAAO,cACPC,GA9CF,SAAuBwJ,GACrB,IAAItJ,EAAQsJ,EAAMtJ,MACdc,EAAUwI,EAAMxI,QAChByI,EAAwBzI,EAAQoH,gBAChCA,OAA4C,IAA1BqB,GAA0CA,EAC5DC,EAAoB1I,EAAQqH,SAC5BA,OAAiC,IAAtBqB,GAAsCA,EACjDC,EAAwB3I,EAAQsH,aAChCA,OAAyC,IAA1BqB,GAA0CA,EACzDR,EAAe,CACjBlL,UAAWuD,EAAiBtB,EAAMjC,WAClCiK,UAAWL,EAAa3H,EAAMjC,WAC9BL,OAAQsC,EAAME,SAASxC,OACvBqK,WAAY/H,EAAMwG,MAAM9I,OACxBwK,gBAAiBA,EACjBG,QAAoC,UAA3BrI,EAAMc,QAAQC,UAGgB,MAArCf,EAAMmG,cAAcD,gBACtBlG,EAAMK,OAAO3C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAO3C,OAAQmK,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACvGhB,QAASjI,EAAMmG,cAAcD,cAC7BrF,SAAUb,EAAMc,QAAQC,SACxBoH,SAAUA,EACVC,aAAcA,OAIe,MAA7BpI,EAAMmG,cAAcjF,QACtBlB,EAAMK,OAAOa,MAAQ7E,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAOa,MAAO2G,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACrGhB,QAASjI,EAAMmG,cAAcjF,MAC7BL,SAAU,WACVsH,UAAU,EACVC,aAAcA,OAIlBpI,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,wBAAyBsC,EAAMjC,WAEnC,EAQE2L,KAAM,CAAC,GCrKT,IAAIC,GAAU,CACZA,SAAS,GAsCX,UACEhK,KAAM,iBACNC,SAAS,EACTC,MAAO,QACPC,GAAI,WAAe,EACnBY,OAxCF,SAAgBX,GACd,IAAIC,EAAQD,EAAKC,MACb4J,EAAW7J,EAAK6J,SAChB9I,EAAUf,EAAKe,QACf+I,EAAkB/I,EAAQgJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAkBjJ,EAAQkJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7C9K,EAASF,EAAUiB,EAAME,SAASxC,QAClCuM,EAAgB,GAAGjM,OAAOgC,EAAMiK,cAActM,UAAWqC,EAAMiK,cAAcvM,QAYjF,OAVIoM,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaC,iBAAiB,SAAUP,EAASQ,OAAQT,GAC3D,IAGEK,GACF/K,EAAOkL,iBAAiB,SAAUP,EAASQ,OAAQT,IAG9C,WACDG,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaG,oBAAoB,SAAUT,EAASQ,OAAQT,GAC9D,IAGEK,GACF/K,EAAOoL,oBAAoB,SAAUT,EAASQ,OAAQT,GAE1D,CACF,EASED,KAAM,CAAC,GC/CT,IAAIY,GAAO,CACTnN,KAAM,QACND,MAAO,OACPD,OAAQ,MACR+D,IAAK,UAEQ,SAASuJ,GAAqBxM,GAC3C,OAAOA,EAAUyM,QAAQ,0BAA0B,SAAUC,GAC3D,OAAOH,GAAKG,EACd,GACF,CCVA,IAAI,GAAO,CACTnN,MAAO,MACPC,IAAK,SAEQ,SAASmN,GAA8B3M,GACpD,OAAOA,EAAUyM,QAAQ,cAAc,SAAUC,GAC/C,OAAO,GAAKA,EACd,GACF,CCPe,SAASE,GAAgB3L,GACtC,IAAI6J,EAAM9J,EAAUC,GAGpB,MAAO,CACL4L,WAHe/B,EAAIgC,YAInBC,UAHcjC,EAAIkC,YAKtB,CCNe,SAASC,GAAoBpM,GAQ1C,OAAO+D,EAAsB8B,EAAmB7F,IAAUzB,KAAOwN,GAAgB/L,GAASgM,UAC5F,CCXe,SAASK,GAAerM,GAErC,IAAIsM,EAAoB,EAAiBtM,GACrCuM,EAAWD,EAAkBC,SAC7BC,EAAYF,EAAkBE,UAC9BC,EAAYH,EAAkBG,UAElC,MAAO,6BAA6B3I,KAAKyI,EAAWE,EAAYD,EAClE,CCLe,SAASE,GAAgBtM,GACtC,MAAI,CAAC,OAAQ,OAAQ,aAAawF,QAAQ7F,EAAYK,KAAU,EAEvDA,EAAKG,cAAcoM,KAGxBhM,EAAcP,IAASiM,GAAejM,GACjCA,EAGFsM,GAAgB1G,EAAc5F,GACvC,CCJe,SAASwM,GAAkB5M,EAAS6M,GACjD,IAAIC,OAES,IAATD,IACFA,EAAO,IAGT,IAAIvB,EAAeoB,GAAgB1M,GAC/B+M,EAASzB,KAAqE,OAAlDwB,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,MACpH1C,EAAM9J,EAAUmL,GAChB0B,EAASD,EAAS,CAAC9C,GAAK7K,OAAO6K,EAAIxF,gBAAkB,GAAI4H,GAAef,GAAgBA,EAAe,IAAMA,EAC7G2B,EAAcJ,EAAKzN,OAAO4N,GAC9B,OAAOD,EAASE,EAChBA,EAAY7N,OAAOwN,GAAkB5G,EAAcgH,IACrD,CCzBe,SAASE,GAAiBC,GACvC,OAAO1P,OAAOkE,OAAO,CAAC,EAAGwL,EAAM,CAC7B5O,KAAM4O,EAAKxI,EACXvC,IAAK+K,EAAKtI,EACVvG,MAAO6O,EAAKxI,EAAIwI,EAAK7I,MACrBjG,OAAQ8O,EAAKtI,EAAIsI,EAAK3I,QAE1B,CCqBA,SAAS4I,GAA2BpN,EAASqN,EAAgBlL,GAC3D,OAAOkL,IAAmBxO,EAAWqO,GCzBxB,SAAyBlN,EAASmC,GAC/C,IAAI8H,EAAM9J,EAAUH,GAChBsN,EAAOzH,EAAmB7F,GAC1ByE,EAAiBwF,EAAIxF,eACrBH,EAAQgJ,EAAKhF,YACb9D,EAAS8I,EAAKjF,aACd1D,EAAI,EACJE,EAAI,EAER,GAAIJ,EAAgB,CAClBH,EAAQG,EAAeH,MACvBE,EAASC,EAAeD,OACxB,IAAI+I,EAAiB1J,KAEjB0J,IAAmBA,GAA+B,UAAbpL,KACvCwC,EAAIF,EAAeG,WACnBC,EAAIJ,EAAeK,UAEvB,CAEA,MAAO,CACLR,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EAAIyH,GAAoBpM,GAC3B6E,EAAGA,EAEP,CDDwD2I,CAAgBxN,EAASmC,IAAa1B,EAAU4M,GAdxG,SAAoCrN,EAASmC,GAC3C,IAAIgL,EAAOpJ,EAAsB/D,GAAS,EAAoB,UAAbmC,GASjD,OARAgL,EAAK/K,IAAM+K,EAAK/K,IAAMpC,EAAQyN,UAC9BN,EAAK5O,KAAO4O,EAAK5O,KAAOyB,EAAQ0N,WAChCP,EAAK9O,OAAS8O,EAAK/K,IAAMpC,EAAQqI,aACjC8E,EAAK7O,MAAQ6O,EAAK5O,KAAOyB,EAAQsI,YACjC6E,EAAK7I,MAAQtE,EAAQsI,YACrB6E,EAAK3I,OAASxE,EAAQqI,aACtB8E,EAAKxI,EAAIwI,EAAK5O,KACd4O,EAAKtI,EAAIsI,EAAK/K,IACP+K,CACT,CAG0HQ,CAA2BN,EAAgBlL,GAAY+K,GEtBlK,SAAyBlN,GACtC,IAAI8M,EAEAQ,EAAOzH,EAAmB7F,GAC1B4N,EAAY7B,GAAgB/L,GAC5B2M,EAA0D,OAAlDG,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,KAChGrI,EAAQ,EAAIgJ,EAAKO,YAAaP,EAAKhF,YAAaqE,EAAOA,EAAKkB,YAAc,EAAGlB,EAAOA,EAAKrE,YAAc,GACvG9D,EAAS,EAAI8I,EAAKQ,aAAcR,EAAKjF,aAAcsE,EAAOA,EAAKmB,aAAe,EAAGnB,EAAOA,EAAKtE,aAAe,GAC5G1D,GAAKiJ,EAAU5B,WAAaI,GAAoBpM,GAChD6E,GAAK+I,EAAU1B,UAMnB,MAJiD,QAA7C,EAAiBS,GAAQW,GAAMS,YACjCpJ,GAAK,EAAI2I,EAAKhF,YAAaqE,EAAOA,EAAKrE,YAAc,GAAKhE,GAGrD,CACLA,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EACHE,EAAGA,EAEP,CFCkMmJ,CAAgBnI,EAAmB7F,IACrO,CG1Be,SAASiO,GAAe9M,GACrC,IAOIkI,EAPAtK,EAAYoC,EAAKpC,UACjBiB,EAAUmB,EAAKnB,QACfb,EAAYgC,EAAKhC,UACjBqI,EAAgBrI,EAAYuD,EAAiBvD,GAAa,KAC1DiK,EAAYjK,EAAY4J,EAAa5J,GAAa,KAClD+O,EAAUnP,EAAU4F,EAAI5F,EAAUuF,MAAQ,EAAItE,EAAQsE,MAAQ,EAC9D6J,EAAUpP,EAAU8F,EAAI9F,EAAUyF,OAAS,EAAIxE,EAAQwE,OAAS,EAGpE,OAAQgD,GACN,KAAK,EACH6B,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI7E,EAAQwE,QAE3B,MAEF,KAAKnG,EACHgL,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI9F,EAAUyF,QAE7B,MAEF,KAAKlG,EACH+K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI5F,EAAUuF,MAC3BO,EAAGsJ,GAEL,MAEF,KAAK5P,EACH8K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI3E,EAAQsE,MACzBO,EAAGsJ,GAEL,MAEF,QACE9E,EAAU,CACR1E,EAAG5F,EAAU4F,EACbE,EAAG9F,EAAU8F,GAInB,IAAIuJ,EAAW5G,EAAgBV,EAAyBU,GAAiB,KAEzE,GAAgB,MAAZ4G,EAAkB,CACpB,IAAI1G,EAAmB,MAAb0G,EAAmB,SAAW,QAExC,OAAQhF,GACN,KAAK1K,EACH2K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAC7E,MAEF,KAAK/I,EACH0K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAKnF,CAEA,OAAO2B,CACT,CC3De,SAASgF,GAAejN,EAAOc,QAC5B,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACXqM,EAAqBD,EAASnP,UAC9BA,OAAmC,IAAvBoP,EAAgCnN,EAAMjC,UAAYoP,EAC9DC,EAAoBF,EAASnM,SAC7BA,OAAiC,IAAtBqM,EAA+BpN,EAAMe,SAAWqM,EAC3DC,EAAoBH,EAASI,SAC7BA,OAAiC,IAAtBD,EAA+B7P,EAAkB6P,EAC5DE,EAAwBL,EAASM,aACjCA,OAAyC,IAA1BD,EAAmC9P,EAAW8P,EAC7DE,EAAwBP,EAASQ,eACjCA,OAA2C,IAA1BD,EAAmC/P,EAAS+P,EAC7DE,EAAuBT,EAASU,YAChCA,OAAuC,IAAzBD,GAA0CA,EACxDE,EAAmBX,EAAS3G,QAC5BA,OAA+B,IAArBsH,EAA8B,EAAIA,EAC5ChI,EAAgBD,EAAsC,iBAAZW,EAAuBA,EAAUT,EAAgBS,EAASlJ,IACpGyQ,EAAaJ,IAAmBhQ,EAASC,EAAYD,EACrDqK,EAAa/H,EAAMwG,MAAM9I,OACzBkB,EAAUoB,EAAME,SAAS0N,EAAcE,EAAaJ,GACpDK,EJkBS,SAAyBnP,EAAS0O,EAAUE,EAAczM,GACvE,IAAIiN,EAAmC,oBAAbV,EAlB5B,SAA4B1O,GAC1B,IAAIpB,EAAkBgO,GAAkB5G,EAAchG,IAElDqP,EADoB,CAAC,WAAY,SAASzJ,QAAQ,EAAiB5F,GAASiC,WAAa,GACnDtB,EAAcX,GAAWoG,EAAgBpG,GAAWA,EAE9F,OAAKS,EAAU4O,GAKRzQ,EAAgBgI,QAAO,SAAUyG,GACtC,OAAO5M,EAAU4M,IAAmBpI,EAASoI,EAAgBgC,IAAmD,SAAhCtP,EAAYsN,EAC9F,IANS,EAOX,CAK6DiC,CAAmBtP,GAAW,GAAGZ,OAAOsP,GAC/F9P,EAAkB,GAAGQ,OAAOgQ,EAAqB,CAACR,IAClDW,EAAsB3Q,EAAgB,GACtC4Q,EAAe5Q,EAAgBK,QAAO,SAAUwQ,EAASpC,GAC3D,IAAIF,EAAOC,GAA2BpN,EAASqN,EAAgBlL,GAK/D,OAJAsN,EAAQrN,IAAM,EAAI+K,EAAK/K,IAAKqN,EAAQrN,KACpCqN,EAAQnR,MAAQ,EAAI6O,EAAK7O,MAAOmR,EAAQnR,OACxCmR,EAAQpR,OAAS,EAAI8O,EAAK9O,OAAQoR,EAAQpR,QAC1CoR,EAAQlR,KAAO,EAAI4O,EAAK5O,KAAMkR,EAAQlR,MAC/BkR,CACT,GAAGrC,GAA2BpN,EAASuP,EAAqBpN,IAK5D,OAJAqN,EAAalL,MAAQkL,EAAalR,MAAQkR,EAAajR,KACvDiR,EAAahL,OAASgL,EAAanR,OAASmR,EAAapN,IACzDoN,EAAa7K,EAAI6K,EAAajR,KAC9BiR,EAAa3K,EAAI2K,EAAapN,IACvBoN,CACT,CInC2BE,CAAgBjP,EAAUT,GAAWA,EAAUA,EAAQ2P,gBAAkB9J,EAAmBzE,EAAME,SAASxC,QAAS4P,EAAUE,EAAczM,GACjKyN,EAAsB7L,EAAsB3C,EAAME,SAASvC,WAC3DuI,EAAgB2G,GAAe,CACjClP,UAAW6Q,EACX5P,QAASmJ,EACThH,SAAU,WACVhD,UAAWA,IAET0Q,EAAmB3C,GAAiBzP,OAAOkE,OAAO,CAAC,EAAGwH,EAAY7B,IAClEwI,EAAoBhB,IAAmBhQ,EAAS+Q,EAAmBD,EAGnEG,EAAkB,CACpB3N,IAAK+M,EAAmB/M,IAAM0N,EAAkB1N,IAAM6E,EAAc7E,IACpE/D,OAAQyR,EAAkBzR,OAAS8Q,EAAmB9Q,OAAS4I,EAAc5I,OAC7EE,KAAM4Q,EAAmB5Q,KAAOuR,EAAkBvR,KAAO0I,EAAc1I,KACvED,MAAOwR,EAAkBxR,MAAQ6Q,EAAmB7Q,MAAQ2I,EAAc3I,OAExE0R,EAAa5O,EAAMmG,cAAckB,OAErC,GAAIqG,IAAmBhQ,GAAUkR,EAAY,CAC3C,IAAIvH,EAASuH,EAAW7Q,GACxB1B,OAAO4D,KAAK0O,GAAiBxO,SAAQ,SAAUhE,GAC7C,IAAI0S,EAAW,CAAC3R,EAAOD,GAAQuH,QAAQrI,IAAQ,EAAI,GAAK,EACpDkK,EAAO,CAAC,EAAKpJ,GAAQuH,QAAQrI,IAAQ,EAAI,IAAM,IACnDwS,EAAgBxS,IAAQkL,EAAOhB,GAAQwI,CACzC,GACF,CAEA,OAAOF,CACT,CCyEA,UACEhP,KAAM,OACNC,SAAS,EACTC,MAAO,OACPC,GA5HF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KAEhB,IAAIK,EAAMmG,cAAcxG,GAAMmP,MAA9B,CAoCA,IAhCA,IAAIC,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAqCA,EACpDG,EAA8BtO,EAAQuO,mBACtC9I,EAAUzF,EAAQyF,QAClB+G,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtB0B,EAAwBxO,EAAQyO,eAChCA,OAA2C,IAA1BD,GAA0CA,EAC3DE,EAAwB1O,EAAQ0O,sBAChCC,EAAqBzP,EAAMc,QAAQ/C,UACnCqI,EAAgB9E,EAAiBmO,GAEjCJ,EAAqBD,IADHhJ,IAAkBqJ,GACqCF,EAjC/E,SAAuCxR,GACrC,GAAIuD,EAAiBvD,KAAeX,EAClC,MAAO,GAGT,IAAIsS,EAAoBnF,GAAqBxM,GAC7C,MAAO,CAAC2M,GAA8B3M,GAAY2R,EAAmBhF,GAA8BgF,GACrG,CA0B6IC,CAA8BF,GAA3E,CAAClF,GAAqBkF,KAChHG,EAAa,CAACH,GAAoBzR,OAAOqR,GAAoBxR,QAAO,SAAUC,EAAKC,GACrF,OAAOD,EAAIE,OAAOsD,EAAiBvD,KAAeX,ECvCvC,SAA8B4C,EAAOc,QAClC,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACX/C,EAAYmP,EAASnP,UACrBuP,EAAWJ,EAASI,SACpBE,EAAeN,EAASM,aACxBjH,EAAU2G,EAAS3G,QACnBgJ,EAAiBrC,EAASqC,eAC1BM,EAAwB3C,EAASsC,sBACjCA,OAAkD,IAA1BK,EAAmC,EAAgBA,EAC3E7H,EAAYL,EAAa5J,GACzB6R,EAAa5H,EAAYuH,EAAiB3R,EAAsBA,EAAoB4H,QAAO,SAAUzH,GACvG,OAAO4J,EAAa5J,KAAeiK,CACrC,IAAK3K,EACDyS,EAAoBF,EAAWpK,QAAO,SAAUzH,GAClD,OAAOyR,EAAsBhL,QAAQzG,IAAc,CACrD,IAEiC,IAA7B+R,EAAkBC,SACpBD,EAAoBF,GAItB,IAAII,EAAYF,EAAkBjS,QAAO,SAAUC,EAAKC,GAOtD,OANAD,EAAIC,GAAakP,GAAejN,EAAO,CACrCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,IACRjF,EAAiBvD,IACbD,CACT,GAAG,CAAC,GACJ,OAAOzB,OAAO4D,KAAK+P,GAAWC,MAAK,SAAUC,EAAGC,GAC9C,OAAOH,EAAUE,GAAKF,EAAUG,EAClC,GACF,CDC6DC,CAAqBpQ,EAAO,CACnFjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTgJ,eAAgBA,EAChBC,sBAAuBA,IACpBzR,EACP,GAAG,IACCsS,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzB4S,EAAY,IAAIC,IAChBC,GAAqB,EACrBC,EAAwBb,EAAW,GAE9Bc,EAAI,EAAGA,EAAId,EAAWG,OAAQW,IAAK,CAC1C,IAAI3S,EAAY6R,EAAWc,GAEvBC,EAAiBrP,EAAiBvD,GAElC6S,EAAmBjJ,EAAa5J,KAAeT,EAC/CuT,EAAa,CAAC,EAAK5T,GAAQuH,QAAQmM,IAAmB,EACtDrK,EAAMuK,EAAa,QAAU,SAC7B1F,EAAW8B,GAAejN,EAAO,CACnCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdI,YAAaA,EACbrH,QAASA,IAEPuK,EAAoBD,EAAaD,EAAmB1T,EAAQC,EAAOyT,EAAmB3T,EAAS,EAE/FoT,EAAc/J,GAAOyB,EAAWzB,KAClCwK,EAAoBvG,GAAqBuG,IAG3C,IAAIC,EAAmBxG,GAAqBuG,GACxCE,EAAS,GAUb,GARIhC,GACFgC,EAAOC,KAAK9F,EAASwF,IAAmB,GAGtCxB,GACF6B,EAAOC,KAAK9F,EAAS2F,IAAsB,EAAG3F,EAAS4F,IAAqB,GAG1EC,EAAOE,OAAM,SAAUC,GACzB,OAAOA,CACT,IAAI,CACFV,EAAwB1S,EACxByS,GAAqB,EACrB,KACF,CAEAF,EAAUc,IAAIrT,EAAWiT,EAC3B,CAEA,GAAIR,EAqBF,IAnBA,IAEIa,EAAQ,SAAeC,GACzB,IAAIC,EAAmB3B,EAAW4B,MAAK,SAAUzT,GAC/C,IAAIiT,EAASV,EAAU9T,IAAIuB,GAE3B,GAAIiT,EACF,OAAOA,EAAOS,MAAM,EAAGH,GAAIJ,OAAM,SAAUC,GACzC,OAAOA,CACT,GAEJ,IAEA,GAAII,EAEF,OADAd,EAAwBc,EACjB,OAEX,EAESD,EAnBY/B,EAAiB,EAAI,EAmBZ+B,EAAK,GAGpB,UAFFD,EAAMC,GADmBA,KAOpCtR,EAAMjC,YAAc0S,IACtBzQ,EAAMmG,cAAcxG,GAAMmP,OAAQ,EAClC9O,EAAMjC,UAAY0S,EAClBzQ,EAAM0R,OAAQ,EA5GhB,CA8GF,EAQEhK,iBAAkB,CAAC,UACnBgC,KAAM,CACJoF,OAAO,IE7IX,SAAS6C,GAAexG,EAAUY,EAAM6F,GAQtC,YAPyB,IAArBA,IACFA,EAAmB,CACjBrO,EAAG,EACHE,EAAG,IAIA,CACLzC,IAAKmK,EAASnK,IAAM+K,EAAK3I,OAASwO,EAAiBnO,EACnDvG,MAAOiO,EAASjO,MAAQ6O,EAAK7I,MAAQ0O,EAAiBrO,EACtDtG,OAAQkO,EAASlO,OAAS8O,EAAK3I,OAASwO,EAAiBnO,EACzDtG,KAAMgO,EAAShO,KAAO4O,EAAK7I,MAAQ0O,EAAiBrO,EAExD,CAEA,SAASsO,GAAsB1G,GAC7B,MAAO,CAAC,EAAKjO,EAAOD,EAAQE,GAAM2U,MAAK,SAAUC,GAC/C,OAAO5G,EAAS4G,IAAS,CAC3B,GACF,CA+BA,UACEpS,KAAM,OACNC,SAAS,EACTC,MAAO,OACP6H,iBAAkB,CAAC,mBACnB5H,GAlCF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZ0Q,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBkU,EAAmB5R,EAAMmG,cAAc6L,gBACvCC,EAAoBhF,GAAejN,EAAO,CAC5C0N,eAAgB,cAEdwE,EAAoBjF,GAAejN,EAAO,CAC5C4N,aAAa,IAEXuE,EAA2BR,GAAeM,EAAmB5B,GAC7D+B,EAAsBT,GAAeO,EAAmBnK,EAAY6J,GACpES,EAAoBR,GAAsBM,GAC1CG,EAAmBT,GAAsBO,GAC7CpS,EAAMmG,cAAcxG,GAAQ,CAC1BwS,yBAA0BA,EAC1BC,oBAAqBA,EACrBC,kBAAmBA,EACnBC,iBAAkBA,GAEpBtS,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,+BAAgC2U,EAChC,sBAAuBC,GAE3B,GCJA,IACE3S,KAAM,SACNC,SAAS,EACTC,MAAO,OACPwB,SAAU,CAAC,iBACXvB,GA5BF,SAAgBa,GACd,IAAIX,EAAQW,EAAMX,MACdc,EAAUH,EAAMG,QAChBnB,EAAOgB,EAAMhB,KACb4S,EAAkBzR,EAAQuG,OAC1BA,OAA6B,IAApBkL,EAA6B,CAAC,EAAG,GAAKA,EAC/C7I,EAAO,EAAW7L,QAAO,SAAUC,EAAKC,GAE1C,OADAD,EAAIC,GA5BD,SAAiCA,EAAWyI,EAAOa,GACxD,IAAIjB,EAAgB9E,EAAiBvD,GACjCyU,EAAiB,CAACrV,EAAM,GAAKqH,QAAQ4B,IAAkB,GAAK,EAAI,EAEhErG,EAAyB,mBAAXsH,EAAwBA,EAAOhL,OAAOkE,OAAO,CAAC,EAAGiG,EAAO,CACxEzI,UAAWA,KACPsJ,EACFoL,EAAW1S,EAAK,GAChB2S,EAAW3S,EAAK,GAIpB,OAFA0S,EAAWA,GAAY,EACvBC,GAAYA,GAAY,GAAKF,EACtB,CAACrV,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAAI,CACjD7C,EAAGmP,EACHjP,EAAGgP,GACD,CACFlP,EAAGkP,EACHhP,EAAGiP,EAEP,CASqBC,CAAwB5U,EAAWiC,EAAMwG,MAAOa,GAC1DvJ,CACT,GAAG,CAAC,GACA8U,EAAwBlJ,EAAK1J,EAAMjC,WACnCwF,EAAIqP,EAAsBrP,EAC1BE,EAAImP,EAAsBnP,EAEW,MAArCzD,EAAMmG,cAAcD,gBACtBlG,EAAMmG,cAAcD,cAAc3C,GAAKA,EACvCvD,EAAMmG,cAAcD,cAAczC,GAAKA,GAGzCzD,EAAMmG,cAAcxG,GAAQ+J,CAC9B,GC1BA,IACE/J,KAAM,gBACNC,SAAS,EACTC,MAAO,OACPC,GApBF,SAAuBC,GACrB,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KAKhBK,EAAMmG,cAAcxG,GAAQkN,GAAe,CACzClP,UAAWqC,EAAMwG,MAAM7I,UACvBiB,QAASoB,EAAMwG,MAAM9I,OACrBqD,SAAU,WACVhD,UAAWiC,EAAMjC,WAErB,EAQE2L,KAAM,CAAC,GCgHT,IACE/J,KAAM,kBACNC,SAAS,EACTC,MAAO,OACPC,GA/HF,SAAyBC,GACvB,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KACZoP,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAsCA,EACrD3B,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtBrH,EAAUzF,EAAQyF,QAClBsM,EAAkB/R,EAAQgS,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAwBjS,EAAQkS,aAChCA,OAAyC,IAA1BD,EAAmC,EAAIA,EACtD5H,EAAW8B,GAAejN,EAAO,CACnCsN,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTqH,YAAaA,IAEXxH,EAAgB9E,EAAiBtB,EAAMjC,WACvCiK,EAAYL,EAAa3H,EAAMjC,WAC/BkV,GAAmBjL,EACnBgF,EAAWtH,EAAyBU,GACpC8I,ECrCY,MDqCSlC,ECrCH,IAAM,IDsCxB9G,EAAgBlG,EAAMmG,cAAcD,cACpCmK,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBwV,EAA4C,mBAAjBF,EAA8BA,EAAa3W,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CACvGzI,UAAWiC,EAAMjC,aACbiV,EACFG,EAA2D,iBAAtBD,EAAiC,CACxElG,SAAUkG,EACVhE,QAASgE,GACP7W,OAAOkE,OAAO,CAChByM,SAAU,EACVkC,QAAS,GACRgE,GACCE,EAAsBpT,EAAMmG,cAAckB,OAASrH,EAAMmG,cAAckB,OAAOrH,EAAMjC,WAAa,KACjG2L,EAAO,CACTnG,EAAG,EACHE,EAAG,GAGL,GAAKyC,EAAL,CAIA,GAAI8I,EAAe,CACjB,IAAIqE,EAEAC,EAAwB,MAAbtG,EAAmB,EAAM7P,EACpCoW,EAAuB,MAAbvG,EAAmB/P,EAASC,EACtCoJ,EAAmB,MAAb0G,EAAmB,SAAW,QACpC3F,EAASnB,EAAc8G,GACvBtL,EAAM2F,EAAS8D,EAASmI,GACxB7R,EAAM4F,EAAS8D,EAASoI,GACxBC,EAAWV,GAAU/K,EAAWzB,GAAO,EAAI,EAC3CmN,EAASzL,IAAc1K,EAAQ+S,EAAc/J,GAAOyB,EAAWzB,GAC/DoN,EAAS1L,IAAc1K,GAASyK,EAAWzB,IAAQ+J,EAAc/J,GAGjEL,EAAejG,EAAME,SAASgB,MAC9BwF,EAAYoM,GAAU7M,EAAetC,EAAcsC,GAAgB,CACrE/C,MAAO,EACPE,OAAQ,GAENuQ,GAAqB3T,EAAMmG,cAAc,oBAAsBnG,EAAMmG,cAAc,oBAAoBI,QxBhFtG,CACLvF,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GwB6EFyW,GAAkBD,GAAmBL,GACrCO,GAAkBF,GAAmBJ,GAMrCO,GAAWnO,EAAO,EAAG0K,EAAc/J,GAAMI,EAAUJ,IACnDyN,GAAYd,EAAkB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWF,GAAkBT,EAA4BnG,SAAWyG,EAASK,GAAWF,GAAkBT,EAA4BnG,SACxMgH,GAAYf,GAAmB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWD,GAAkBV,EAA4BnG,SAAW0G,EAASI,GAAWD,GAAkBV,EAA4BnG,SACzMjG,GAAoB/G,EAAME,SAASgB,OAAS8D,EAAgBhF,EAAME,SAASgB,OAC3E+S,GAAelN,GAAiC,MAAbiG,EAAmBjG,GAAkBsF,WAAa,EAAItF,GAAkBuF,YAAc,EAAI,EAC7H4H,GAAwH,OAAjGb,EAA+C,MAAvBD,OAA8B,EAASA,EAAoBpG,IAAqBqG,EAAwB,EAEvJc,GAAY9M,EAAS2M,GAAYE,GACjCE,GAAkBzO,EAAOmN,EAAS,EAAQpR,EAF9B2F,EAAS0M,GAAYG,GAAsBD,IAEKvS,EAAK2F,EAAQyL,EAAS,EAAQrR,EAAK0S,IAAa1S,GAChHyE,EAAc8G,GAAYoH,GAC1B1K,EAAKsD,GAAYoH,GAAkB/M,CACrC,CAEA,GAAI8H,EAAc,CAChB,IAAIkF,GAEAC,GAAyB,MAAbtH,EAAmB,EAAM7P,EAErCoX,GAAwB,MAAbvH,EAAmB/P,EAASC,EAEvCsX,GAAUtO,EAAcgJ,GAExBuF,GAAmB,MAAZvF,EAAkB,SAAW,QAEpCwF,GAAOF,GAAUrJ,EAASmJ,IAE1BK,GAAOH,GAAUrJ,EAASoJ,IAE1BK,IAAuD,IAAxC,CAAC,EAAKzX,GAAMqH,QAAQ4B,GAEnCyO,GAAyH,OAAjGR,GAAgD,MAAvBjB,OAA8B,EAASA,EAAoBlE,IAAoBmF,GAAyB,EAEzJS,GAAaF,GAAeF,GAAOF,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAEzI6F,GAAaH,GAAeJ,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAAUyF,GAE5IK,GAAmBlC,GAAU8B,G1BzH9B,SAAwBlT,EAAK1E,EAAOyE,GACzC,IAAIwT,EAAItP,EAAOjE,EAAK1E,EAAOyE,GAC3B,OAAOwT,EAAIxT,EAAMA,EAAMwT,CACzB,C0BsHoDC,CAAeJ,GAAYN,GAASO,IAAcpP,EAAOmN,EAASgC,GAAaJ,GAAMF,GAAS1B,EAASiC,GAAaJ,IAEpKzO,EAAcgJ,GAAW8F,GACzBtL,EAAKwF,GAAW8F,GAAmBR,EACrC,CAEAxU,EAAMmG,cAAcxG,GAAQ+J,CAvE5B,CAwEF,EAQEhC,iBAAkB,CAAC,WE1HN,SAASyN,GAAiBC,EAAyBrQ,EAAcsD,QAC9D,IAAZA,IACFA,GAAU,GAGZ,ICnBoCrJ,ECJOJ,EFuBvCyW,EAA0B9V,EAAcwF,GACxCuQ,EAAuB/V,EAAcwF,IAf3C,SAAyBnG,GACvB,IAAImN,EAAOnN,EAAQ+D,wBACfI,EAASpB,EAAMoK,EAAK7I,OAAStE,EAAQqE,aAAe,EACpDD,EAASrB,EAAMoK,EAAK3I,QAAUxE,EAAQuE,cAAgB,EAC1D,OAAkB,IAAXJ,GAA2B,IAAXC,CACzB,CAU4DuS,CAAgBxQ,GACtEJ,EAAkBF,EAAmBM,GACrCgH,EAAOpJ,EAAsByS,EAAyBE,EAAsBjN,GAC5EyB,EAAS,CACXc,WAAY,EACZE,UAAW,GAET7C,EAAU,CACZ1E,EAAG,EACHE,EAAG,GAkBL,OAfI4R,IAA4BA,IAA4BhN,MACxB,SAA9B1J,EAAYoG,IAChBkG,GAAetG,MACbmF,GCnCgC9K,EDmCT+F,KClCdhG,EAAUC,IAAUO,EAAcP,GCJxC,CACL4L,YAFyChM,EDQbI,GCNR4L,WACpBE,UAAWlM,EAAQkM,WDGZH,GAAgB3L,IDoCnBO,EAAcwF,KAChBkD,EAAUtF,EAAsBoC,GAAc,IACtCxB,GAAKwB,EAAauH,WAC1BrE,EAAQxE,GAAKsB,EAAasH,WACjB1H,IACTsD,EAAQ1E,EAAIyH,GAAoBrG,KAI7B,CACLpB,EAAGwI,EAAK5O,KAAO2M,EAAOc,WAAa3C,EAAQ1E,EAC3CE,EAAGsI,EAAK/K,IAAM8I,EAAOgB,UAAY7C,EAAQxE,EACzCP,MAAO6I,EAAK7I,MACZE,OAAQ2I,EAAK3I,OAEjB,CGvDA,SAASoS,GAAMC,GACb,IAAItT,EAAM,IAAIoO,IACVmF,EAAU,IAAIC,IACdC,EAAS,GAKb,SAAS3F,EAAK4F,GACZH,EAAQI,IAAID,EAASlW,MACN,GAAG3B,OAAO6X,EAASxU,UAAY,GAAIwU,EAASnO,kBAAoB,IACtEvH,SAAQ,SAAU4V,GACzB,IAAKL,EAAQM,IAAID,GAAM,CACrB,IAAIE,EAAc9T,EAAI3F,IAAIuZ,GAEtBE,GACFhG,EAAKgG,EAET,CACF,IACAL,EAAO3E,KAAK4E,EACd,CAQA,OAzBAJ,EAAUtV,SAAQ,SAAU0V,GAC1B1T,EAAIiP,IAAIyE,EAASlW,KAAMkW,EACzB,IAiBAJ,EAAUtV,SAAQ,SAAU0V,GACrBH,EAAQM,IAAIH,EAASlW,OAExBsQ,EAAK4F,EAET,IACOD,CACT,CCvBA,IAAIM,GAAkB,CACpBnY,UAAW,SACX0X,UAAW,GACX1U,SAAU,YAGZ,SAASoV,KACP,IAAK,IAAI1B,EAAO2B,UAAUrG,OAAQsG,EAAO,IAAIpU,MAAMwS,GAAO6B,EAAO,EAAGA,EAAO7B,EAAM6B,IAC/ED,EAAKC,GAAQF,UAAUE,GAGzB,OAAQD,EAAKvE,MAAK,SAAUlT,GAC1B,QAASA,GAAoD,mBAAlCA,EAAQ+D,sBACrC,GACF,CAEO,SAAS4T,GAAgBC,QACL,IAArBA,IACFA,EAAmB,CAAC,GAGtB,IAAIC,EAAoBD,EACpBE,EAAwBD,EAAkBE,iBAC1CA,OAA6C,IAA1BD,EAAmC,GAAKA,EAC3DE,EAAyBH,EAAkBI,eAC3CA,OAA4C,IAA3BD,EAAoCV,GAAkBU,EAC3E,OAAO,SAAsBjZ,EAAWD,EAAQoD,QAC9B,IAAZA,IACFA,EAAU+V,GAGZ,ICxC6B/W,EAC3BgX,EDuCE9W,EAAQ,CACVjC,UAAW,SACXgZ,iBAAkB,GAClBjW,QAASzE,OAAOkE,OAAO,CAAC,EAAG2V,GAAiBW,GAC5C1Q,cAAe,CAAC,EAChBjG,SAAU,CACRvC,UAAWA,EACXD,OAAQA,GAEV4C,WAAY,CAAC,EACbD,OAAQ,CAAC,GAEP2W,EAAmB,GACnBC,GAAc,EACdrN,EAAW,CACb5J,MAAOA,EACPkX,WAAY,SAAoBC,GAC9B,IAAIrW,EAAsC,mBAArBqW,EAAkCA,EAAiBnX,EAAMc,SAAWqW,EACzFC,IACApX,EAAMc,QAAUzE,OAAOkE,OAAO,CAAC,EAAGsW,EAAgB7W,EAAMc,QAASA,GACjEd,EAAMiK,cAAgB,CACpBtM,UAAW0B,EAAU1B,GAAa6N,GAAkB7N,GAAaA,EAAU4Q,eAAiB/C,GAAkB7N,EAAU4Q,gBAAkB,GAC1I7Q,OAAQ8N,GAAkB9N,IAI5B,IElE4B+X,EAC9B4B,EFiEMN,EDhCG,SAAwBtB,GAErC,IAAIsB,EAAmBvB,GAAMC,GAE7B,OAAO/W,EAAeb,QAAO,SAAUC,EAAK+B,GAC1C,OAAO/B,EAAIE,OAAO+Y,EAAiBvR,QAAO,SAAUqQ,GAClD,OAAOA,EAAShW,QAAUA,CAC5B,IACF,GAAG,GACL,CCuB+ByX,EElEK7B,EFkEsB,GAAGzX,OAAO2Y,EAAkB3W,EAAMc,QAAQ2U,WEjE9F4B,EAAS5B,EAAU5X,QAAO,SAAUwZ,EAAQE,GAC9C,IAAIC,EAAWH,EAAOE,EAAQ5X,MAK9B,OAJA0X,EAAOE,EAAQ5X,MAAQ6X,EAAWnb,OAAOkE,OAAO,CAAC,EAAGiX,EAAUD,EAAS,CACrEzW,QAASzE,OAAOkE,OAAO,CAAC,EAAGiX,EAAS1W,QAASyW,EAAQzW,SACrD4I,KAAMrN,OAAOkE,OAAO,CAAC,EAAGiX,EAAS9N,KAAM6N,EAAQ7N,QAC5C6N,EACEF,CACT,GAAG,CAAC,GAEGhb,OAAO4D,KAAKoX,GAAQlV,KAAI,SAAUhG,GACvC,OAAOkb,EAAOlb,EAChB,MF4DM,OAJA6D,EAAM+W,iBAAmBA,EAAiBvR,QAAO,SAAUiS,GACzD,OAAOA,EAAE7X,OACX,IA+FFI,EAAM+W,iBAAiB5W,SAAQ,SAAUJ,GACvC,IAAIJ,EAAOI,EAAKJ,KACZ+X,EAAe3X,EAAKe,QACpBA,OAA2B,IAAjB4W,EAA0B,CAAC,EAAIA,EACzChX,EAASX,EAAKW,OAElB,GAAsB,mBAAXA,EAAuB,CAChC,IAAIiX,EAAYjX,EAAO,CACrBV,MAAOA,EACPL,KAAMA,EACNiK,SAAUA,EACV9I,QAASA,IAKXkW,EAAiB/F,KAAK0G,GAFT,WAAmB,EAGlC,CACF,IA/GS/N,EAASQ,QAClB,EAMAwN,YAAa,WACX,IAAIX,EAAJ,CAIA,IAAIY,EAAkB7X,EAAME,SACxBvC,EAAYka,EAAgBla,UAC5BD,EAASma,EAAgBna,OAG7B,GAAKyY,GAAiBxY,EAAWD,GAAjC,CAKAsC,EAAMwG,MAAQ,CACZ7I,UAAWwX,GAAiBxX,EAAWqH,EAAgBtH,GAAoC,UAA3BsC,EAAMc,QAAQC,UAC9ErD,OAAQiG,EAAcjG,IAOxBsC,EAAM0R,OAAQ,EACd1R,EAAMjC,UAAYiC,EAAMc,QAAQ/C,UAKhCiC,EAAM+W,iBAAiB5W,SAAQ,SAAU0V,GACvC,OAAO7V,EAAMmG,cAAc0P,EAASlW,MAAQtD,OAAOkE,OAAO,CAAC,EAAGsV,EAASnM,KACzE,IAEA,IAAK,IAAIoO,EAAQ,EAAGA,EAAQ9X,EAAM+W,iBAAiBhH,OAAQ+H,IACzD,IAAoB,IAAhB9X,EAAM0R,MAAV,CAMA,IAAIqG,EAAwB/X,EAAM+W,iBAAiBe,GAC/ChY,EAAKiY,EAAsBjY,GAC3BkY,EAAyBD,EAAsBjX,QAC/CoM,OAAsC,IAA3B8K,EAAoC,CAAC,EAAIA,EACpDrY,EAAOoY,EAAsBpY,KAEf,mBAAPG,IACTE,EAAQF,EAAG,CACTE,MAAOA,EACPc,QAASoM,EACTvN,KAAMA,EACNiK,SAAUA,KACN5J,EAdR,MAHEA,EAAM0R,OAAQ,EACdoG,GAAS,CAzBb,CATA,CAqDF,EAGA1N,QC1I2BtK,ED0IV,WACf,OAAO,IAAImY,SAAQ,SAAUC,GAC3BtO,EAASgO,cACTM,EAAQlY,EACV,GACF,EC7IG,WAUL,OATK8W,IACHA,EAAU,IAAImB,SAAQ,SAAUC,GAC9BD,QAAQC,UAAUC,MAAK,WACrBrB,OAAUsB,EACVF,EAAQpY,IACV,GACF,KAGKgX,CACT,GDmIIuB,QAAS,WACPjB,IACAH,GAAc,CAChB,GAGF,IAAKd,GAAiBxY,EAAWD,GAC/B,OAAOkM,EAmCT,SAASwN,IACPJ,EAAiB7W,SAAQ,SAAUL,GACjC,OAAOA,GACT,IACAkX,EAAmB,EACrB,CAEA,OAvCApN,EAASsN,WAAWpW,GAASqX,MAAK,SAAUnY,IACrCiX,GAAenW,EAAQwX,eAC1BxX,EAAQwX,cAActY,EAE1B,IAmCO4J,CACT,CACF,CACO,IAAI2O,GAA4BhC,KGzLnC,GAA4BA,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,EAAa,GAAQ,GAAM,GAAiB,EAAO,MCJrH,GAA4BjC,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,KCatE,MAAMC,GAAa,IAAIlI,IACjBmI,GAAO,CACX,GAAAtH,CAAIxS,EAASzC,EAAKyN,GACX6O,GAAWzC,IAAIpX,IAClB6Z,GAAWrH,IAAIxS,EAAS,IAAI2R,KAE9B,MAAMoI,EAAcF,GAAWjc,IAAIoC,GAI9B+Z,EAAY3C,IAAI7Z,IAA6B,IAArBwc,EAAYC,KAKzCD,EAAYvH,IAAIjV,EAAKyN,GAHnBiP,QAAQC,MAAM,+EAA+E7W,MAAM8W,KAAKJ,EAAY1Y,QAAQ,MAIhI,EACAzD,IAAG,CAACoC,EAASzC,IACPsc,GAAWzC,IAAIpX,IACV6Z,GAAWjc,IAAIoC,GAASpC,IAAIL,IAE9B,KAET,MAAA6c,CAAOpa,EAASzC,GACd,IAAKsc,GAAWzC,IAAIpX,GAClB,OAEF,MAAM+Z,EAAcF,GAAWjc,IAAIoC,GACnC+Z,EAAYM,OAAO9c,GAGM,IAArBwc,EAAYC,MACdH,GAAWQ,OAAOra,EAEtB,GAYIsa,GAAiB,gBAOjBC,GAAgBC,IAChBA,GAAYna,OAAOoa,KAAOpa,OAAOoa,IAAIC,SAEvCF,EAAWA,EAAS5O,QAAQ,iBAAiB,CAAC+O,EAAOC,IAAO,IAAIH,IAAIC,OAAOE,QAEtEJ,GA4CHK,GAAuB7a,IAC3BA,EAAQ8a,cAAc,IAAIC,MAAMT,IAAgB,EAE5C,GAAYU,MACXA,GAA4B,iBAAXA,UAGO,IAAlBA,EAAOC,SAChBD,EAASA,EAAO,SAEgB,IAApBA,EAAOE,UAEjBC,GAAaH,GAEb,GAAUA,GACLA,EAAOC,OAASD,EAAO,GAAKA,EAEf,iBAAXA,GAAuBA,EAAO7J,OAAS,EACzCrL,SAAS+C,cAAc0R,GAAcS,IAEvC,KAEHI,GAAYpb,IAChB,IAAK,GAAUA,IAAgD,IAApCA,EAAQqb,iBAAiBlK,OAClD,OAAO,EAET,MAAMmK,EAAgF,YAA7D5V,iBAAiB1F,GAASub,iBAAiB,cAE9DC,EAAgBxb,EAAQyb,QAAQ,uBACtC,IAAKD,EACH,OAAOF,EAET,GAAIE,IAAkBxb,EAAS,CAC7B,MAAM0b,EAAU1b,EAAQyb,QAAQ,WAChC,GAAIC,GAAWA,EAAQlW,aAAegW,EACpC,OAAO,EAET,GAAgB,OAAZE,EACF,OAAO,CAEX,CACA,OAAOJ,CAAgB,EAEnBK,GAAa3b,IACZA,GAAWA,EAAQkb,WAAaU,KAAKC,gBAGtC7b,EAAQ8b,UAAU7W,SAAS,mBAGC,IAArBjF,EAAQ+b,SACV/b,EAAQ+b,SAEV/b,EAAQgc,aAAa,aAAoD,UAArChc,EAAQic,aAAa,aAE5DC,GAAiBlc,IACrB,IAAK8F,SAASC,gBAAgBoW,aAC5B,OAAO,KAIT,GAAmC,mBAAxBnc,EAAQqF,YAA4B,CAC7C,MAAM+W,EAAOpc,EAAQqF,cACrB,OAAO+W,aAAgBtb,WAAasb,EAAO,IAC7C,CACA,OAAIpc,aAAmBc,WACdd,EAIJA,EAAQwF,WAGN0W,GAAelc,EAAQwF,YAFrB,IAEgC,EAErC6W,GAAO,OAUPC,GAAStc,IACbA,EAAQuE,YAAY,EAGhBgY,GAAY,IACZlc,OAAOmc,SAAW1W,SAAS6G,KAAKqP,aAAa,qBACxC3b,OAAOmc,OAET,KAEHC,GAA4B,GAgB5BC,GAAQ,IAAuC,QAAjC5W,SAASC,gBAAgB4W,IACvCC,GAAqBC,IAhBAC,QAiBN,KACjB,MAAMC,EAAIR,KAEV,GAAIQ,EAAG,CACL,MAAMhc,EAAO8b,EAAOG,KACdC,EAAqBF,EAAE7b,GAAGH,GAChCgc,EAAE7b,GAAGH,GAAQ8b,EAAOK,gBACpBH,EAAE7b,GAAGH,GAAMoc,YAAcN,EACzBE,EAAE7b,GAAGH,GAAMqc,WAAa,KACtBL,EAAE7b,GAAGH,GAAQkc,EACNJ,EAAOK,gBAElB,GA5B0B,YAAxBpX,SAASuX,YAENZ,GAA0BtL,QAC7BrL,SAASyF,iBAAiB,oBAAoB,KAC5C,IAAK,MAAMuR,KAAYL,GACrBK,GACF,IAGJL,GAA0BpK,KAAKyK,IAE/BA,GAkBA,EAEEQ,GAAU,CAACC,EAAkB9F,EAAO,GAAI+F,EAAeD,IACxB,mBAArBA,EAAkCA,KAAoB9F,GAAQ+F,EAExEC,GAAyB,CAACX,EAAUY,EAAmBC,GAAoB,KAC/E,IAAKA,EAEH,YADAL,GAAQR,GAGV,MACMc,EAhKiC5d,KACvC,IAAKA,EACH,OAAO,EAIT,IAAI,mBACF6d,EAAkB,gBAClBC,GACEzd,OAAOqF,iBAAiB1F,GAC5B,MAAM+d,EAA0BC,OAAOC,WAAWJ,GAC5CK,EAAuBF,OAAOC,WAAWH,GAG/C,OAAKC,GAA4BG,GAKjCL,EAAqBA,EAAmBlb,MAAM,KAAK,GACnDmb,EAAkBA,EAAgBnb,MAAM,KAAK,GAtDf,KAuDtBqb,OAAOC,WAAWJ,GAAsBG,OAAOC,WAAWH,KANzD,CAMoG,EA2IpFK,CAAiCT,GADlC,EAExB,IAAIU,GAAS,EACb,MAAMC,EAAU,EACdrR,aAEIA,IAAW0Q,IAGfU,GAAS,EACTV,EAAkBjS,oBAAoB6O,GAAgB+D,GACtDf,GAAQR,GAAS,EAEnBY,EAAkBnS,iBAAiB+O,GAAgB+D,GACnDC,YAAW,KACJF,GACHvD,GAAqB6C,EACvB,GACCE,EAAiB,EAYhBW,GAAuB,CAAC1R,EAAM2R,EAAeC,EAAeC,KAChE,MAAMC,EAAa9R,EAAKsE,OACxB,IAAI+H,EAAQrM,EAAKjH,QAAQ4Y,GAIzB,OAAe,IAAXtF,GACMuF,GAAiBC,EAAiB7R,EAAK8R,EAAa,GAAK9R,EAAK,IAExEqM,GAASuF,EAAgB,GAAK,EAC1BC,IACFxF,GAASA,EAAQyF,GAAcA,GAE1B9R,EAAKjK,KAAKC,IAAI,EAAGD,KAAKE,IAAIoW,EAAOyF,EAAa,KAAI,EAerDC,GAAiB,qBACjBC,GAAiB,OACjBC,GAAgB,SAChBC,GAAgB,CAAC,EACvB,IAAIC,GAAW,EACf,MAAMC,GAAe,CACnBC,WAAY,YACZC,WAAY,YAERC,GAAe,IAAIrI,IAAI,CAAC,QAAS,WAAY,UAAW,YAAa,cAAe,aAAc,iBAAkB,YAAa,WAAY,YAAa,cAAe,YAAa,UAAW,WAAY,QAAS,oBAAqB,aAAc,YAAa,WAAY,cAAe,cAAe,cAAe,YAAa,eAAgB,gBAAiB,eAAgB,gBAAiB,aAAc,QAAS,OAAQ,SAAU,QAAS,SAAU,SAAU,UAAW,WAAY,OAAQ,SAAU,eAAgB,SAAU,OAAQ,mBAAoB,mBAAoB,QAAS,QAAS,WAM/lB,SAASsI,GAAarf,EAASsf,GAC7B,OAAOA,GAAO,GAAGA,MAAQN,QAAgBhf,EAAQgf,UAAYA,IAC/D,CACA,SAASO,GAAiBvf,GACxB,MAAMsf,EAAMD,GAAarf,GAGzB,OAFAA,EAAQgf,SAAWM,EACnBP,GAAcO,GAAOP,GAAcO,IAAQ,CAAC,EACrCP,GAAcO,EACvB,CAiCA,SAASE,GAAYC,EAAQC,EAAUC,EAAqB,MAC1D,OAAOliB,OAAOmiB,OAAOH,GAAQ7M,MAAKiN,GAASA,EAAMH,WAAaA,GAAYG,EAAMF,qBAAuBA,GACzG,CACA,SAASG,GAAoBC,EAAmB1B,EAAS2B,GACvD,MAAMC,EAAiC,iBAAZ5B,EAErBqB,EAAWO,EAAcD,EAAqB3B,GAAW2B,EAC/D,IAAIE,EAAYC,GAAaJ,GAI7B,OAHKX,GAAahI,IAAI8I,KACpBA,EAAYH,GAEP,CAACE,EAAaP,EAAUQ,EACjC,CACA,SAASE,GAAWpgB,EAAS+f,EAAmB1B,EAAS2B,EAAoBK,GAC3E,GAAiC,iBAAtBN,IAAmC/f,EAC5C,OAEF,IAAKigB,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GAIzF,GAAID,KAAqBd,GAAc,CACrC,MAAMqB,EAAepf,GACZ,SAAU2e,GACf,IAAKA,EAAMU,eAAiBV,EAAMU,gBAAkBV,EAAMW,iBAAmBX,EAAMW,eAAevb,SAAS4a,EAAMU,eAC/G,OAAOrf,EAAGjD,KAAKwiB,KAAMZ,EAEzB,EAEFH,EAAWY,EAAaZ,EAC1B,CACA,MAAMD,EAASF,GAAiBvf,GAC1B0gB,EAAWjB,EAAOS,KAAeT,EAAOS,GAAa,CAAC,GACtDS,EAAmBnB,GAAYkB,EAAUhB,EAAUO,EAAc5B,EAAU,MACjF,GAAIsC,EAEF,YADAA,EAAiBN,OAASM,EAAiBN,QAAUA,GAGvD,MAAMf,EAAMD,GAAaK,EAAUK,EAAkBnU,QAAQgT,GAAgB,KACvE1d,EAAK+e,EA5Db,SAAoCjgB,EAASwa,EAAUtZ,GACrD,OAAO,SAASmd,EAAQwB,GACtB,MAAMe,EAAc5gB,EAAQ6gB,iBAAiBrG,GAC7C,IAAK,IAAI,OACPxN,GACE6S,EAAO7S,GAAUA,IAAWyT,KAAMzT,EAASA,EAAOxH,WACpD,IAAK,MAAMsb,KAAcF,EACvB,GAAIE,IAAe9T,EASnB,OANA+T,GAAWlB,EAAO,CAChBW,eAAgBxT,IAEdqR,EAAQgC,QACVW,GAAaC,IAAIjhB,EAAS6f,EAAMqB,KAAM1G,EAAUtZ,GAE3CA,EAAGigB,MAAMnU,EAAQ,CAAC6S,GAG/B,CACF,CAwC2BuB,CAA2BphB,EAASqe,EAASqB,GAvExE,SAA0B1f,EAASkB,GACjC,OAAO,SAASmd,EAAQwB,GAOtB,OANAkB,GAAWlB,EAAO,CAChBW,eAAgBxgB,IAEdqe,EAAQgC,QACVW,GAAaC,IAAIjhB,EAAS6f,EAAMqB,KAAMhgB,GAEjCA,EAAGigB,MAAMnhB,EAAS,CAAC6f,GAC5B,CACF,CA6DoFwB,CAAiBrhB,EAAS0f,GAC5Gxe,EAAGye,mBAAqBM,EAAc5B,EAAU,KAChDnd,EAAGwe,SAAWA,EACdxe,EAAGmf,OAASA,EACZnf,EAAG8d,SAAWM,EACdoB,EAASpB,GAAOpe,EAChBlB,EAAQuL,iBAAiB2U,EAAWhf,EAAI+e,EAC1C,CACA,SAASqB,GAActhB,EAASyf,EAAQS,EAAW7B,EAASsB,GAC1D,MAAMze,EAAKse,GAAYC,EAAOS,GAAY7B,EAASsB,GAC9Cze,IAGLlB,EAAQyL,oBAAoByU,EAAWhf,EAAIqgB,QAAQ5B,WAC5CF,EAAOS,GAAWhf,EAAG8d,UAC9B,CACA,SAASwC,GAAyBxhB,EAASyf,EAAQS,EAAWuB,GAC5D,MAAMC,EAAoBjC,EAAOS,IAAc,CAAC,EAChD,IAAK,MAAOyB,EAAY9B,KAAUpiB,OAAOmkB,QAAQF,GAC3CC,EAAWE,SAASJ,IACtBH,GAActhB,EAASyf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAGtE,CACA,SAASQ,GAAaN,GAGpB,OADAA,EAAQA,EAAMjU,QAAQiT,GAAgB,IAC/BI,GAAaY,IAAUA,CAChC,CACA,MAAMmB,GAAe,CACnB,EAAAc,CAAG9hB,EAAS6f,EAAOxB,EAAS2B,GAC1BI,GAAWpgB,EAAS6f,EAAOxB,EAAS2B,GAAoB,EAC1D,EACA,GAAA+B,CAAI/hB,EAAS6f,EAAOxB,EAAS2B,GAC3BI,GAAWpgB,EAAS6f,EAAOxB,EAAS2B,GAAoB,EAC1D,EACA,GAAAiB,CAAIjhB,EAAS+f,EAAmB1B,EAAS2B,GACvC,GAAiC,iBAAtBD,IAAmC/f,EAC5C,OAEF,MAAOigB,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GACrFgC,EAAc9B,IAAcH,EAC5BN,EAASF,GAAiBvf,GAC1B0hB,EAAoBjC,EAAOS,IAAc,CAAC,EAC1C+B,EAAclC,EAAkBmC,WAAW,KACjD,QAAwB,IAAbxC,EAAX,CAQA,GAAIuC,EACF,IAAK,MAAME,KAAgB1kB,OAAO4D,KAAKoe,GACrC+B,GAAyBxhB,EAASyf,EAAQ0C,EAAcpC,EAAkBlN,MAAM,IAGpF,IAAK,MAAOuP,EAAavC,KAAUpiB,OAAOmkB,QAAQF,GAAoB,CACpE,MAAMC,EAAaS,EAAYxW,QAAQkT,GAAe,IACjDkD,IAAejC,EAAkB8B,SAASF,IAC7CL,GAActhB,EAASyf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAEpE,CAXA,KAPA,CAEE,IAAKliB,OAAO4D,KAAKqgB,GAAmBvQ,OAClC,OAEFmQ,GAActhB,EAASyf,EAAQS,EAAWR,EAAUO,EAAc5B,EAAU,KAE9E,CAYF,EACA,OAAAgE,CAAQriB,EAAS6f,EAAOpI,GACtB,GAAqB,iBAAVoI,IAAuB7f,EAChC,OAAO,KAET,MAAM+c,EAAIR,KAGV,IAAI+F,EAAc,KACdC,GAAU,EACVC,GAAiB,EACjBC,GAAmB,EAJH5C,IADFM,GAAaN,IAMZ9C,IACjBuF,EAAcvF,EAAEhC,MAAM8E,EAAOpI,GAC7BsF,EAAE/c,GAASqiB,QAAQC,GACnBC,GAAWD,EAAYI,uBACvBF,GAAkBF,EAAYK,gCAC9BF,EAAmBH,EAAYM,sBAEjC,MAAMC,EAAM9B,GAAW,IAAIhG,MAAM8E,EAAO,CACtC0C,UACAO,YAAY,IACVrL,GAUJ,OATIgL,GACFI,EAAIE,iBAEFP,GACFxiB,EAAQ8a,cAAc+H,GAEpBA,EAAIJ,kBAAoBH,GAC1BA,EAAYS,iBAEPF,CACT,GAEF,SAAS9B,GAAWljB,EAAKmlB,EAAO,CAAC,GAC/B,IAAK,MAAOzlB,EAAKa,KAAUX,OAAOmkB,QAAQoB,GACxC,IACEnlB,EAAIN,GAAOa,CACb,CAAE,MAAO6kB,GACPxlB,OAAOC,eAAeG,EAAKN,EAAK,CAC9B2lB,cAAc,EACdtlB,IAAG,IACMQ,GAGb,CAEF,OAAOP,CACT,CASA,SAASslB,GAAc/kB,GACrB,GAAc,SAAVA,EACF,OAAO,EAET,GAAc,UAAVA,EACF,OAAO,EAET,GAAIA,IAAU4f,OAAO5f,GAAOkC,WAC1B,OAAO0d,OAAO5f,GAEhB,GAAc,KAAVA,GAA0B,SAAVA,EAClB,OAAO,KAET,GAAqB,iBAAVA,EACT,OAAOA,EAET,IACE,OAAOglB,KAAKC,MAAMC,mBAAmBllB,GACvC,CAAE,MAAO6kB,GACP,OAAO7kB,CACT,CACF,CACA,SAASmlB,GAAiBhmB,GACxB,OAAOA,EAAIqO,QAAQ,UAAU4X,GAAO,IAAIA,EAAItjB,iBAC9C,CACA,MAAMujB,GAAc,CAClB,gBAAAC,CAAiB1jB,EAASzC,EAAKa,GAC7B4B,EAAQ6B,aAAa,WAAW0hB,GAAiBhmB,KAAQa,EAC3D,EACA,mBAAAulB,CAAoB3jB,EAASzC,GAC3ByC,EAAQ4B,gBAAgB,WAAW2hB,GAAiBhmB,KACtD,EACA,iBAAAqmB,CAAkB5jB,GAChB,IAAKA,EACH,MAAO,CAAC,EAEV,MAAM0B,EAAa,CAAC,EACdmiB,EAASpmB,OAAO4D,KAAKrB,EAAQ8jB,SAASld,QAAOrJ,GAAOA,EAAI2kB,WAAW,QAAU3kB,EAAI2kB,WAAW,cAClG,IAAK,MAAM3kB,KAAOsmB,EAAQ,CACxB,IAAIE,EAAUxmB,EAAIqO,QAAQ,MAAO,IACjCmY,EAAUA,EAAQC,OAAO,GAAG9jB,cAAgB6jB,EAAQlR,MAAM,EAAGkR,EAAQ5S,QACrEzP,EAAWqiB,GAAWZ,GAAcnjB,EAAQ8jB,QAAQvmB,GACtD,CACA,OAAOmE,CACT,EACAuiB,iBAAgB,CAACjkB,EAASzC,IACjB4lB,GAAcnjB,EAAQic,aAAa,WAAWsH,GAAiBhmB,QAgB1E,MAAM2mB,GAEJ,kBAAWC,GACT,MAAO,CAAC,CACV,CACA,sBAAWC,GACT,MAAO,CAAC,CACV,CACA,eAAWpH,GACT,MAAM,IAAIqH,MAAM,sEAClB,CACA,UAAAC,CAAWC,GAIT,OAHAA,EAAS9D,KAAK+D,gBAAgBD,GAC9BA,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CACA,iBAAAE,CAAkBF,GAChB,OAAOA,CACT,CACA,eAAAC,CAAgBD,EAAQvkB,GACtB,MAAM2kB,EAAa,GAAU3kB,GAAWyjB,GAAYQ,iBAAiBjkB,EAAS,UAAY,CAAC,EAE3F,MAAO,IACFygB,KAAKmE,YAAYT,WACM,iBAAfQ,EAA0BA,EAAa,CAAC,KAC/C,GAAU3kB,GAAWyjB,GAAYG,kBAAkB5jB,GAAW,CAAC,KAC7C,iBAAXukB,EAAsBA,EAAS,CAAC,EAE/C,CACA,gBAAAG,CAAiBH,EAAQM,EAAcpE,KAAKmE,YAAYR,aACtD,IAAK,MAAO7hB,EAAUuiB,KAAkBrnB,OAAOmkB,QAAQiD,GAAc,CACnE,MAAMzmB,EAAQmmB,EAAOhiB,GACfwiB,EAAY,GAAU3mB,GAAS,UAjiBrC4c,OADSA,EAkiB+C5c,GAhiBnD,GAAG4c,IAELvd,OAAOM,UAAUuC,SAASrC,KAAK+c,GAAQL,MAAM,eAAe,GAAGza,cA+hBlE,IAAK,IAAI8kB,OAAOF,GAAehhB,KAAKihB,GAClC,MAAM,IAAIE,UAAU,GAAGxE,KAAKmE,YAAY5H,KAAKkI,0BAA0B3iB,qBAA4BwiB,yBAAiCD,MAExI,CAtiBW9J,KAuiBb,EAqBF,MAAMmK,WAAsBjB,GAC1B,WAAAU,CAAY5kB,EAASukB,GACnBa,SACAplB,EAAUmb,GAAWnb,MAIrBygB,KAAK4E,SAAWrlB,EAChBygB,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/BzK,GAAKtH,IAAIiO,KAAK4E,SAAU5E,KAAKmE,YAAYW,SAAU9E,MACrD,CAGA,OAAA+E,GACE1L,GAAKM,OAAOqG,KAAK4E,SAAU5E,KAAKmE,YAAYW,UAC5CvE,GAAaC,IAAIR,KAAK4E,SAAU5E,KAAKmE,YAAYa,WACjD,IAAK,MAAMC,KAAgBjoB,OAAOkoB,oBAAoBlF,MACpDA,KAAKiF,GAAgB,IAEzB,CACA,cAAAE,CAAe9I,EAAU9c,EAAS6lB,GAAa,GAC7CpI,GAAuBX,EAAU9c,EAAS6lB,EAC5C,CACA,UAAAvB,CAAWC,GAIT,OAHAA,EAAS9D,KAAK+D,gBAAgBD,EAAQ9D,KAAK4E,UAC3Cd,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CAGA,kBAAOuB,CAAY9lB,GACjB,OAAO8Z,GAAKlc,IAAIud,GAAWnb,GAAUygB,KAAK8E,SAC5C,CACA,0BAAOQ,CAAoB/lB,EAASukB,EAAS,CAAC,GAC5C,OAAO9D,KAAKqF,YAAY9lB,IAAY,IAAIygB,KAAKzgB,EAA2B,iBAAXukB,EAAsBA,EAAS,KAC9F,CACA,kBAAWyB,GACT,MA5CY,OA6Cd,CACA,mBAAWT,GACT,MAAO,MAAM9E,KAAKzD,MACpB,CACA,oBAAWyI,GACT,MAAO,IAAIhF,KAAK8E,UAClB,CACA,gBAAOU,CAAUllB,GACf,MAAO,GAAGA,IAAO0f,KAAKgF,WACxB,EAUF,MAAMS,GAAclmB,IAClB,IAAIwa,EAAWxa,EAAQic,aAAa,kBACpC,IAAKzB,GAAyB,MAAbA,EAAkB,CACjC,IAAI2L,EAAgBnmB,EAAQic,aAAa,QAMzC,IAAKkK,IAAkBA,EAActE,SAAS,OAASsE,EAAcjE,WAAW,KAC9E,OAAO,KAILiE,EAActE,SAAS,OAASsE,EAAcjE,WAAW,OAC3DiE,EAAgB,IAAIA,EAAcxjB,MAAM,KAAK,MAE/C6X,EAAW2L,GAAmC,MAAlBA,EAAwB5L,GAAc4L,EAAcC,QAAU,IAC5F,CACA,OAAO5L,CAAQ,EAEX6L,GAAiB,CACrBzT,KAAI,CAAC4H,EAAUxa,EAAU8F,SAASC,kBACzB,GAAG3G,UAAUsB,QAAQ3C,UAAU8iB,iBAAiB5iB,KAAK+B,EAASwa,IAEvE8L,QAAO,CAAC9L,EAAUxa,EAAU8F,SAASC,kBAC5BrF,QAAQ3C,UAAU8K,cAAc5K,KAAK+B,EAASwa,GAEvD+L,SAAQ,CAACvmB,EAASwa,IACT,GAAGpb,UAAUY,EAAQumB,UAAU3f,QAAOzB,GAASA,EAAMqhB,QAAQhM,KAEtE,OAAAiM,CAAQzmB,EAASwa,GACf,MAAMiM,EAAU,GAChB,IAAIC,EAAW1mB,EAAQwF,WAAWiW,QAAQjB,GAC1C,KAAOkM,GACLD,EAAQpU,KAAKqU,GACbA,EAAWA,EAASlhB,WAAWiW,QAAQjB,GAEzC,OAAOiM,CACT,EACA,IAAAE,CAAK3mB,EAASwa,GACZ,IAAIoM,EAAW5mB,EAAQ6mB,uBACvB,KAAOD,GAAU,CACf,GAAIA,EAASJ,QAAQhM,GACnB,MAAO,CAACoM,GAEVA,EAAWA,EAASC,sBACtB,CACA,MAAO,EACT,EAEA,IAAAvhB,CAAKtF,EAASwa,GACZ,IAAIlV,EAAOtF,EAAQ8mB,mBACnB,KAAOxhB,GAAM,CACX,GAAIA,EAAKkhB,QAAQhM,GACf,MAAO,CAAClV,GAEVA,EAAOA,EAAKwhB,kBACd,CACA,MAAO,EACT,EACA,iBAAAC,CAAkB/mB,GAChB,MAAMgnB,EAAa,CAAC,IAAK,SAAU,QAAS,WAAY,SAAU,UAAW,aAAc,4BAA4BzjB,KAAIiX,GAAY,GAAGA,2BAAiC7W,KAAK,KAChL,OAAO8c,KAAK7N,KAAKoU,EAAYhnB,GAAS4G,QAAOqgB,IAAOtL,GAAWsL,IAAO7L,GAAU6L,IAClF,EACA,sBAAAC,CAAuBlnB,GACrB,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAIwa,GACK6L,GAAeC,QAAQ9L,GAAYA,EAErC,IACT,EACA,sBAAA2M,CAAuBnnB,GACrB,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAOwa,EAAW6L,GAAeC,QAAQ9L,GAAY,IACvD,EACA,+BAAA4M,CAAgCpnB,GAC9B,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAOwa,EAAW6L,GAAezT,KAAK4H,GAAY,EACpD,GAUI6M,GAAuB,CAACC,EAAWC,EAAS,UAChD,MAAMC,EAAa,gBAAgBF,EAAU7B,YACvC1kB,EAAOumB,EAAUtK,KACvBgE,GAAac,GAAGhc,SAAU0hB,EAAY,qBAAqBzmB,OAAU,SAAU8e,GAI7E,GAHI,CAAC,IAAK,QAAQgC,SAASpB,KAAKgH,UAC9B5H,EAAMkD,iBAEJpH,GAAW8E,MACb,OAEF,MAAMzT,EAASqZ,GAAec,uBAAuB1G,OAASA,KAAKhF,QAAQ,IAAI1a,KAC9DumB,EAAUvB,oBAAoB/Y,GAGtCua,IACX,GAAE,EAiBEG,GAAc,YACdC,GAAc,QAAQD,KACtBE,GAAe,SAASF,KAQ9B,MAAMG,WAAc1C,GAElB,eAAWnI,GACT,MAfW,OAgBb,CAGA,KAAA8K,GAEE,GADmB9G,GAAaqB,QAAQ5B,KAAK4E,SAAUsC,IACxClF,iBACb,OAEFhC,KAAK4E,SAASvJ,UAAU1B,OAlBF,QAmBtB,MAAMyL,EAAapF,KAAK4E,SAASvJ,UAAU7W,SApBrB,QAqBtBwb,KAAKmF,gBAAe,IAAMnF,KAAKsH,mBAAmBtH,KAAK4E,SAAUQ,EACnE,CAGA,eAAAkC,GACEtH,KAAK4E,SAASjL,SACd4G,GAAaqB,QAAQ5B,KAAK4E,SAAUuC,IACpCnH,KAAK+E,SACP,CAGA,sBAAOtI,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO+c,GAAM9B,oBAAoBtF,MACvC,GAAsB,iBAAX8D,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KAJb,CAKF,GACF,EAOF4G,GAAqBQ,GAAO,SAM5BjL,GAAmBiL,IAcnB,MAKMI,GAAyB,4BAO/B,MAAMC,WAAe/C,GAEnB,eAAWnI,GACT,MAfW,QAgBb,CAGA,MAAAmL,GAEE1H,KAAK4E,SAASxjB,aAAa,eAAgB4e,KAAK4E,SAASvJ,UAAUqM,OAjB3C,UAkB1B,CAGA,sBAAOjL,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAOod,GAAOnC,oBAAoBtF,MACzB,WAAX8D,GACFzZ,EAAKyZ,IAET,GACF,EAOFvD,GAAac,GAAGhc,SAjCe,2BAiCmBmiB,IAAwBpI,IACxEA,EAAMkD,iBACN,MAAMqF,EAASvI,EAAM7S,OAAOyO,QAAQwM,IACvBC,GAAOnC,oBAAoBqC,GACnCD,QAAQ,IAOfvL,GAAmBsL,IAcnB,MACMG,GAAc,YACdC,GAAmB,aAAaD,KAChCE,GAAkB,YAAYF,KAC9BG,GAAiB,WAAWH,KAC5BI,GAAoB,cAAcJ,KAClCK,GAAkB,YAAYL,KAK9BM,GAAY,CAChBC,YAAa,KACbC,aAAc,KACdC,cAAe,MAEXC,GAAgB,CACpBH,YAAa,kBACbC,aAAc,kBACdC,cAAe,mBAOjB,MAAME,WAAc9E,GAClB,WAAAU,CAAY5kB,EAASukB,GACnBa,QACA3E,KAAK4E,SAAWrlB,EACXA,GAAYgpB,GAAMC,gBAGvBxI,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKyI,QAAU,EACfzI,KAAK0I,sBAAwB5H,QAAQlhB,OAAO+oB,cAC5C3I,KAAK4I,cACP,CAGA,kBAAWlF,GACT,OAAOwE,EACT,CACA,sBAAWvE,GACT,OAAO2E,EACT,CACA,eAAW/L,GACT,MA/CW,OAgDb,CAGA,OAAAwI,GACExE,GAAaC,IAAIR,KAAK4E,SAAUgD,GAClC,CAGA,MAAAiB,CAAOzJ,GACAY,KAAK0I,sBAIN1I,KAAK8I,wBAAwB1J,KAC/BY,KAAKyI,QAAUrJ,EAAM2J,SAJrB/I,KAAKyI,QAAUrJ,EAAM4J,QAAQ,GAAGD,OAMpC,CACA,IAAAE,CAAK7J,GACCY,KAAK8I,wBAAwB1J,KAC/BY,KAAKyI,QAAUrJ,EAAM2J,QAAU/I,KAAKyI,SAEtCzI,KAAKkJ,eACLrM,GAAQmD,KAAK6E,QAAQsD,YACvB,CACA,KAAAgB,CAAM/J,GACJY,KAAKyI,QAAUrJ,EAAM4J,SAAW5J,EAAM4J,QAAQtY,OAAS,EAAI,EAAI0O,EAAM4J,QAAQ,GAAGD,QAAU/I,KAAKyI,OACjG,CACA,YAAAS,GACE,MAAME,EAAYjnB,KAAKoC,IAAIyb,KAAKyI,SAChC,GAAIW,GAnEgB,GAoElB,OAEF,MAAM9b,EAAY8b,EAAYpJ,KAAKyI,QACnCzI,KAAKyI,QAAU,EACVnb,GAGLuP,GAAQvP,EAAY,EAAI0S,KAAK6E,QAAQwD,cAAgBrI,KAAK6E,QAAQuD,aACpE,CACA,WAAAQ,GACM5I,KAAK0I,uBACPnI,GAAac,GAAGrB,KAAK4E,SAAUoD,IAAmB5I,GAASY,KAAK6I,OAAOzJ,KACvEmB,GAAac,GAAGrB,KAAK4E,SAAUqD,IAAiB7I,GAASY,KAAKiJ,KAAK7J,KACnEY,KAAK4E,SAASvJ,UAAU5E,IAlFG,mBAoF3B8J,GAAac,GAAGrB,KAAK4E,SAAUiD,IAAkBzI,GAASY,KAAK6I,OAAOzJ,KACtEmB,GAAac,GAAGrB,KAAK4E,SAAUkD,IAAiB1I,GAASY,KAAKmJ,MAAM/J,KACpEmB,GAAac,GAAGrB,KAAK4E,SAAUmD,IAAgB3I,GAASY,KAAKiJ,KAAK7J,KAEtE,CACA,uBAAA0J,CAAwB1J,GACtB,OAAOY,KAAK0I,wBA3FS,QA2FiBtJ,EAAMiK,aA5FrB,UA4FyDjK,EAAMiK,YACxF,CAGA,kBAAOb,GACL,MAAO,iBAAkBnjB,SAASC,iBAAmB7C,UAAU6mB,eAAiB,CAClF,EAeF,MAEMC,GAAc,eACdC,GAAiB,YAKjBC,GAAa,OACbC,GAAa,OACbC,GAAiB,OACjBC,GAAkB,QAClBC,GAAc,QAAQN,KACtBO,GAAa,OAAOP,KACpBQ,GAAkB,UAAUR,KAC5BS,GAAqB,aAAaT,KAClCU,GAAqB,aAAaV,KAClCW,GAAmB,YAAYX,KAC/BY,GAAwB,OAAOZ,KAAcC,KAC7CY,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAsB,WACtBC,GAAsB,SAMtBC,GAAkB,UAClBC,GAAgB,iBAChBC,GAAuBF,GAAkBC,GAKzCE,GAAmB,CACvB,UAAoBd,GACpB,WAAqBD,IAEjBgB,GAAY,CAChBC,SAAU,IACVC,UAAU,EACVC,MAAO,QACPC,MAAM,EACNC,OAAO,EACPC,MAAM,GAEFC,GAAgB,CACpBN,SAAU,mBAEVC,SAAU,UACVC,MAAO,mBACPC,KAAM,mBACNC,MAAO,UACPC,KAAM,WAOR,MAAME,WAAiBzG,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKoL,UAAY,KACjBpL,KAAKqL,eAAiB,KACtBrL,KAAKsL,YAAa,EAClBtL,KAAKuL,aAAe,KACpBvL,KAAKwL,aAAe,KACpBxL,KAAKyL,mBAAqB7F,GAAeC,QArCjB,uBAqC8C7F,KAAK4E,UAC3E5E,KAAK0L,qBACD1L,KAAK6E,QAAQkG,OAASV,IACxBrK,KAAK2L,OAET,CAGA,kBAAWjI,GACT,OAAOiH,EACT,CACA,sBAAWhH,GACT,OAAOuH,EACT,CACA,eAAW3O,GACT,MAnFW,UAoFb,CAGA,IAAA1X,GACEmb,KAAK4L,OAAOnC,GACd,CACA,eAAAoC,IAIOxmB,SAASymB,QAAUnR,GAAUqF,KAAK4E,WACrC5E,KAAKnb,MAET,CACA,IAAAqhB,GACElG,KAAK4L,OAAOlC,GACd,CACA,KAAAoB,GACM9K,KAAKsL,YACPlR,GAAqB4F,KAAK4E,UAE5B5E,KAAK+L,gBACP,CACA,KAAAJ,GACE3L,KAAK+L,iBACL/L,KAAKgM,kBACLhM,KAAKoL,UAAYa,aAAY,IAAMjM,KAAK6L,mBAAmB7L,KAAK6E,QAAQ+F,SAC1E,CACA,iBAAAsB,GACOlM,KAAK6E,QAAQkG,OAGd/K,KAAKsL,WACP/K,GAAae,IAAItB,KAAK4E,SAAUkF,IAAY,IAAM9J,KAAK2L,UAGzD3L,KAAK2L,QACP,CACA,EAAAQ,CAAG1T,GACD,MAAM2T,EAAQpM,KAAKqM,YACnB,GAAI5T,EAAQ2T,EAAM1b,OAAS,GAAK+H,EAAQ,EACtC,OAEF,GAAIuH,KAAKsL,WAEP,YADA/K,GAAae,IAAItB,KAAK4E,SAAUkF,IAAY,IAAM9J,KAAKmM,GAAG1T,KAG5D,MAAM6T,EAActM,KAAKuM,cAAcvM,KAAKwM,cAC5C,GAAIF,IAAgB7T,EAClB,OAEF,MAAMtC,EAAQsC,EAAQ6T,EAAc7C,GAAaC,GACjD1J,KAAK4L,OAAOzV,EAAOiW,EAAM3T,GAC3B,CACA,OAAAsM,GACM/E,KAAKwL,cACPxL,KAAKwL,aAAazG,UAEpBJ,MAAMI,SACR,CAGA,iBAAAf,CAAkBF,GAEhB,OADAA,EAAO2I,gBAAkB3I,EAAO8G,SACzB9G,CACT,CACA,kBAAA4H,GACM1L,KAAK6E,QAAQgG,UACftK,GAAac,GAAGrB,KAAK4E,SAAUmF,IAAiB3K,GAASY,KAAK0M,SAAStN,KAE9C,UAAvBY,KAAK6E,QAAQiG,QACfvK,GAAac,GAAGrB,KAAK4E,SAAUoF,IAAoB,IAAMhK,KAAK8K,UAC9DvK,GAAac,GAAGrB,KAAK4E,SAAUqF,IAAoB,IAAMjK,KAAKkM,uBAE5DlM,KAAK6E,QAAQmG,OAASzC,GAAMC,eAC9BxI,KAAK2M,yBAET,CACA,uBAAAA,GACE,IAAK,MAAMC,KAAOhH,GAAezT,KArIX,qBAqImC6N,KAAK4E,UAC5DrE,GAAac,GAAGuL,EAAK1C,IAAkB9K,GAASA,EAAMkD,mBAExD,MAmBMuK,EAAc,CAClBzE,aAAc,IAAMpI,KAAK4L,OAAO5L,KAAK8M,kBAAkBnD,KACvDtB,cAAe,IAAMrI,KAAK4L,OAAO5L,KAAK8M,kBAAkBlD,KACxDzB,YAtBkB,KACS,UAAvBnI,KAAK6E,QAAQiG,QAYjB9K,KAAK8K,QACD9K,KAAKuL,cACPwB,aAAa/M,KAAKuL,cAEpBvL,KAAKuL,aAAe1N,YAAW,IAAMmC,KAAKkM,qBAjLjB,IAiL+DlM,KAAK6E,QAAQ+F,UAAS,GAOhH5K,KAAKwL,aAAe,IAAIjD,GAAMvI,KAAK4E,SAAUiI,EAC/C,CACA,QAAAH,CAAStN,GACP,GAAI,kBAAkB/b,KAAK+b,EAAM7S,OAAOya,SACtC,OAEF,MAAM1Z,EAAYod,GAAiBtL,EAAMtiB,KACrCwQ,IACF8R,EAAMkD,iBACNtC,KAAK4L,OAAO5L,KAAK8M,kBAAkBxf,IAEvC,CACA,aAAAif,CAAchtB,GACZ,OAAOygB,KAAKqM,YAAYlnB,QAAQ5F,EAClC,CACA,0BAAAytB,CAA2BvU,GACzB,IAAKuH,KAAKyL,mBACR,OAEF,MAAMwB,EAAkBrH,GAAeC,QAAQ0E,GAAiBvK,KAAKyL,oBACrEwB,EAAgB5R,UAAU1B,OAAO2Q,IACjC2C,EAAgB9rB,gBAAgB,gBAChC,MAAM+rB,EAAqBtH,GAAeC,QAAQ,sBAAsBpN,MAAWuH,KAAKyL,oBACpFyB,IACFA,EAAmB7R,UAAU5E,IAAI6T,IACjC4C,EAAmB9rB,aAAa,eAAgB,QAEpD,CACA,eAAA4qB,GACE,MAAMzsB,EAAUygB,KAAKqL,gBAAkBrL,KAAKwM,aAC5C,IAAKjtB,EACH,OAEF,MAAM4tB,EAAkB5P,OAAO6P,SAAS7tB,EAAQic,aAAa,oBAAqB,IAClFwE,KAAK6E,QAAQ+F,SAAWuC,GAAmBnN,KAAK6E,QAAQ4H,eAC1D,CACA,MAAAb,CAAOzV,EAAO5W,EAAU,MACtB,GAAIygB,KAAKsL,WACP,OAEF,MAAMvN,EAAgBiC,KAAKwM,aACrBa,EAASlX,IAAUsT,GACnB6D,EAAc/tB,GAAWue,GAAqBkC,KAAKqM,YAAatO,EAAesP,EAAQrN,KAAK6E,QAAQoG,MAC1G,GAAIqC,IAAgBvP,EAClB,OAEF,MAAMwP,EAAmBvN,KAAKuM,cAAce,GACtCE,EAAehI,GACZjF,GAAaqB,QAAQ5B,KAAK4E,SAAUY,EAAW,CACpD1F,cAAewN,EACfhgB,UAAW0S,KAAKyN,kBAAkBtX,GAClCuD,KAAMsG,KAAKuM,cAAcxO,GACzBoO,GAAIoB,IAIR,GADmBC,EAAa3D,IACjB7H,iBACb,OAEF,IAAKjE,IAAkBuP,EAGrB,OAEF,MAAMI,EAAY5M,QAAQd,KAAKoL,WAC/BpL,KAAK8K,QACL9K,KAAKsL,YAAa,EAClBtL,KAAKgN,2BAA2BO,GAChCvN,KAAKqL,eAAiBiC,EACtB,MAAMK,EAAuBN,EA3OR,sBADF,oBA6ObO,EAAiBP,EA3OH,qBACA,qBA2OpBC,EAAYjS,UAAU5E,IAAImX,GAC1B/R,GAAOyR,GACPvP,EAAc1C,UAAU5E,IAAIkX,GAC5BL,EAAYjS,UAAU5E,IAAIkX,GAQ1B3N,KAAKmF,gBAPoB,KACvBmI,EAAYjS,UAAU1B,OAAOgU,EAAsBC,GACnDN,EAAYjS,UAAU5E,IAAI6T,IAC1BvM,EAAc1C,UAAU1B,OAAO2Q,GAAqBsD,EAAgBD,GACpE3N,KAAKsL,YAAa,EAClBkC,EAAa1D,GAAW,GAEY/L,EAAeiC,KAAK6N,eACtDH,GACF1N,KAAK2L,OAET,CACA,WAAAkC,GACE,OAAO7N,KAAK4E,SAASvJ,UAAU7W,SAhQV,QAiQvB,CACA,UAAAgoB,GACE,OAAO5G,GAAeC,QAAQ4E,GAAsBzK,KAAK4E,SAC3D,CACA,SAAAyH,GACE,OAAOzG,GAAezT,KAAKqY,GAAexK,KAAK4E,SACjD,CACA,cAAAmH,GACM/L,KAAKoL,YACP0C,cAAc9N,KAAKoL,WACnBpL,KAAKoL,UAAY,KAErB,CACA,iBAAA0B,CAAkBxf,GAChB,OAAI2O,KACK3O,IAAcqc,GAAiBD,GAAaD,GAE9Cnc,IAAcqc,GAAiBF,GAAaC,EACrD,CACA,iBAAA+D,CAAkBtX,GAChB,OAAI8F,KACK9F,IAAUuT,GAAaC,GAAiBC,GAE1CzT,IAAUuT,GAAaE,GAAkBD,EAClD,CAGA,sBAAOlN,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO8gB,GAAS7F,oBAAoBtF,KAAM8D,GAChD,GAAsB,iBAAXA,GAIX,GAAsB,iBAAXA,EAAqB,CAC9B,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IACP,OAREzZ,EAAK8hB,GAAGrI,EASZ,GACF,EAOFvD,GAAac,GAAGhc,SAAU+kB,GAvSE,uCAuS2C,SAAUhL,GAC/E,MAAM7S,EAASqZ,GAAec,uBAAuB1G,MACrD,IAAKzT,IAAWA,EAAO8O,UAAU7W,SAAS6lB,IACxC,OAEFjL,EAAMkD,iBACN,MAAMyL,EAAW5C,GAAS7F,oBAAoB/Y,GACxCyhB,EAAahO,KAAKxE,aAAa,oBACrC,OAAIwS,GACFD,EAAS5B,GAAG6B,QACZD,EAAS7B,qBAGyC,SAAhDlJ,GAAYQ,iBAAiBxD,KAAM,UACrC+N,EAASlpB,YACTkpB,EAAS7B,sBAGX6B,EAAS7H,YACT6H,EAAS7B,oBACX,IACA3L,GAAac,GAAGzhB,OAAQuqB,IAAuB,KAC7C,MAAM8D,EAAYrI,GAAezT,KA5TR,6BA6TzB,IAAK,MAAM4b,KAAYE,EACrB9C,GAAS7F,oBAAoByI,EAC/B,IAOF5R,GAAmBgP,IAcnB,MAEM+C,GAAc,eAEdC,GAAe,OAAOD,KACtBE,GAAgB,QAAQF,KACxBG,GAAe,OAAOH,KACtBI,GAAiB,SAASJ,KAC1BK,GAAyB,QAAQL,cACjCM,GAAoB,OACpBC,GAAsB,WACtBC,GAAwB,aAExBC,GAA6B,WAAWF,OAAwBA,KAKhEG,GAAyB,8BACzBC,GAAY,CAChBpqB,OAAQ,KACRijB,QAAQ,GAEJoH,GAAgB,CACpBrqB,OAAQ,iBACRijB,OAAQ,WAOV,MAAMqH,WAAiBrK,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKgP,kBAAmB,EACxBhP,KAAKiP,cAAgB,GACrB,MAAMC,EAAatJ,GAAezT,KAAKyc,IACvC,IAAK,MAAMO,KAAQD,EAAY,CAC7B,MAAMnV,EAAW6L,GAAea,uBAAuB0I,GACjDC,EAAgBxJ,GAAezT,KAAK4H,GAAU5T,QAAOkpB,GAAgBA,IAAiBrP,KAAK4E,WAChF,OAAb7K,GAAqBqV,EAAc1e,QACrCsP,KAAKiP,cAAcrd,KAAKud,EAE5B,CACAnP,KAAKsP,sBACAtP,KAAK6E,QAAQpgB,QAChBub,KAAKuP,0BAA0BvP,KAAKiP,cAAejP,KAAKwP,YAEtDxP,KAAK6E,QAAQ6C,QACf1H,KAAK0H,QAET,CAGA,kBAAWhE,GACT,OAAOmL,EACT,CACA,sBAAWlL,GACT,OAAOmL,EACT,CACA,eAAWvS,GACT,MA9DW,UA+Db,CAGA,MAAAmL,GACM1H,KAAKwP,WACPxP,KAAKyP,OAELzP,KAAK0P,MAET,CACA,IAAAA,GACE,GAAI1P,KAAKgP,kBAAoBhP,KAAKwP,WAChC,OAEF,IAAIG,EAAiB,GAQrB,GALI3P,KAAK6E,QAAQpgB,SACfkrB,EAAiB3P,KAAK4P,uBAhEH,wCAgE4CzpB,QAAO5G,GAAWA,IAAYygB,KAAK4E,WAAU9hB,KAAIvD,GAAWwvB,GAASzJ,oBAAoB/lB,EAAS,CAC/JmoB,QAAQ,OAGRiI,EAAejf,QAAUif,EAAe,GAAGX,iBAC7C,OAGF,GADmBzO,GAAaqB,QAAQ5B,KAAK4E,SAAUuJ,IACxCnM,iBACb,OAEF,IAAK,MAAM6N,KAAkBF,EAC3BE,EAAeJ,OAEjB,MAAMK,EAAY9P,KAAK+P,gBACvB/P,KAAK4E,SAASvJ,UAAU1B,OAAO8U,IAC/BzO,KAAK4E,SAASvJ,UAAU5E,IAAIiY,IAC5B1O,KAAK4E,SAAS7jB,MAAM+uB,GAAa,EACjC9P,KAAKuP,0BAA0BvP,KAAKiP,eAAe,GACnDjP,KAAKgP,kBAAmB,EACxB,MAQMgB,EAAa,SADUF,EAAU,GAAGrL,cAAgBqL,EAAU1d,MAAM,KAE1E4N,KAAKmF,gBATY,KACfnF,KAAKgP,kBAAmB,EACxBhP,KAAK4E,SAASvJ,UAAU1B,OAAO+U,IAC/B1O,KAAK4E,SAASvJ,UAAU5E,IAAIgY,GAAqBD,IACjDxO,KAAK4E,SAAS7jB,MAAM+uB,GAAa,GACjCvP,GAAaqB,QAAQ5B,KAAK4E,SAAUwJ,GAAc,GAItBpO,KAAK4E,UAAU,GAC7C5E,KAAK4E,SAAS7jB,MAAM+uB,GAAa,GAAG9P,KAAK4E,SAASoL,MACpD,CACA,IAAAP,GACE,GAAIzP,KAAKgP,mBAAqBhP,KAAKwP,WACjC,OAGF,GADmBjP,GAAaqB,QAAQ5B,KAAK4E,SAAUyJ,IACxCrM,iBACb,OAEF,MAAM8N,EAAY9P,KAAK+P,gBACvB/P,KAAK4E,SAAS7jB,MAAM+uB,GAAa,GAAG9P,KAAK4E,SAASthB,wBAAwBwsB,OAC1EjU,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIiY,IAC5B1O,KAAK4E,SAASvJ,UAAU1B,OAAO8U,GAAqBD,IACpD,IAAK,MAAM5M,KAAW5B,KAAKiP,cAAe,CACxC,MAAM1vB,EAAUqmB,GAAec,uBAAuB9E,GAClDriB,IAAYygB,KAAKwP,SAASjwB,IAC5BygB,KAAKuP,0BAA0B,CAAC3N,IAAU,EAE9C,CACA5B,KAAKgP,kBAAmB,EAOxBhP,KAAK4E,SAAS7jB,MAAM+uB,GAAa,GACjC9P,KAAKmF,gBAPY,KACfnF,KAAKgP,kBAAmB,EACxBhP,KAAK4E,SAASvJ,UAAU1B,OAAO+U,IAC/B1O,KAAK4E,SAASvJ,UAAU5E,IAAIgY,IAC5BlO,GAAaqB,QAAQ5B,KAAK4E,SAAU0J,GAAe,GAGvBtO,KAAK4E,UAAU,EAC/C,CACA,QAAA4K,CAASjwB,EAAUygB,KAAK4E,UACtB,OAAOrlB,EAAQ8b,UAAU7W,SAASgqB,GACpC,CAGA,iBAAAxK,CAAkBF,GAGhB,OAFAA,EAAO4D,OAAS5G,QAAQgD,EAAO4D,QAC/B5D,EAAOrf,OAASiW,GAAWoJ,EAAOrf,QAC3Bqf,CACT,CACA,aAAAiM,GACE,OAAO/P,KAAK4E,SAASvJ,UAAU7W,SA3IL,uBAChB,QACC,QA0Ib,CACA,mBAAA8qB,GACE,IAAKtP,KAAK6E,QAAQpgB,OAChB,OAEF,MAAMqhB,EAAW9F,KAAK4P,uBAAuBhB,IAC7C,IAAK,MAAMrvB,KAAWumB,EAAU,CAC9B,MAAMmK,EAAWrK,GAAec,uBAAuBnnB,GACnD0wB,GACFjQ,KAAKuP,0BAA0B,CAAChwB,GAAUygB,KAAKwP,SAASS,GAE5D,CACF,CACA,sBAAAL,CAAuB7V,GACrB,MAAM+L,EAAWF,GAAezT,KAAKwc,GAA4B3O,KAAK6E,QAAQpgB,QAE9E,OAAOmhB,GAAezT,KAAK4H,EAAUiG,KAAK6E,QAAQpgB,QAAQ0B,QAAO5G,IAAYumB,EAAS1E,SAAS7hB,IACjG,CACA,yBAAAgwB,CAA0BW,EAAcC,GACtC,GAAKD,EAAaxf,OAGlB,IAAK,MAAMnR,KAAW2wB,EACpB3wB,EAAQ8b,UAAUqM,OArKK,aAqKyByI,GAChD5wB,EAAQ6B,aAAa,gBAAiB+uB,EAE1C,CAGA,sBAAO1T,CAAgBqH,GACrB,MAAMe,EAAU,CAAC,EAIjB,MAHsB,iBAAXf,GAAuB,YAAYzgB,KAAKygB,KACjDe,EAAQ6C,QAAS,GAEZ1H,KAAKuH,MAAK,WACf,MAAMld,EAAO0kB,GAASzJ,oBAAoBtF,KAAM6E,GAChD,GAAsB,iBAAXf,EAAqB,CAC9B,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IACP,CACF,GACF,EAOFvD,GAAac,GAAGhc,SAAUkpB,GAAwBK,IAAwB,SAAUxP,IAErD,MAAzBA,EAAM7S,OAAOya,SAAmB5H,EAAMW,gBAAmD,MAAjCX,EAAMW,eAAeiH,UAC/E5H,EAAMkD,iBAER,IAAK,MAAM/iB,KAAWqmB,GAAee,gCAAgC3G,MACnE+O,GAASzJ,oBAAoB/lB,EAAS,CACpCmoB,QAAQ,IACPA,QAEP,IAMAvL,GAAmB4S,IAcnB,MAAMqB,GAAS,WAETC,GAAc,eACdC,GAAiB,YAGjBC,GAAiB,UACjBC,GAAmB,YAGnBC,GAAe,OAAOJ,KACtBK,GAAiB,SAASL,KAC1BM,GAAe,OAAON,KACtBO,GAAgB,QAAQP,KACxBQ,GAAyB,QAAQR,KAAcC,KAC/CQ,GAAyB,UAAUT,KAAcC,KACjDS,GAAuB,QAAQV,KAAcC,KAC7CU,GAAoB,OAMpBC,GAAyB,4DACzBC,GAA6B,GAAGD,MAA0BD,KAC1DG,GAAgB,iBAIhBC,GAAgBnV,KAAU,UAAY,YACtCoV,GAAmBpV,KAAU,YAAc,UAC3CqV,GAAmBrV,KAAU,aAAe,eAC5CsV,GAAsBtV,KAAU,eAAiB,aACjDuV,GAAkBvV,KAAU,aAAe,cAC3CwV,GAAiBxV,KAAU,cAAgB,aAG3CyV,GAAY,CAChBC,WAAW,EACX1jB,SAAU,kBACV2jB,QAAS,UACT5pB,OAAQ,CAAC,EAAG,GACZ6pB,aAAc,KACdvzB,UAAW,UAEPwzB,GAAgB,CACpBH,UAAW,mBACX1jB,SAAU,mBACV2jB,QAAS,SACT5pB,OAAQ,0BACR6pB,aAAc,yBACdvzB,UAAW,2BAOb,MAAMyzB,WAAiBrN,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKgS,QAAU,KACfhS,KAAKiS,QAAUjS,KAAK4E,SAAS7f,WAE7Bib,KAAKkS,MAAQtM,GAAe/gB,KAAKmb,KAAK4E,SAAUuM,IAAe,IAAMvL,GAAeM,KAAKlG,KAAK4E,SAAUuM,IAAe,IAAMvL,GAAeC,QAAQsL,GAAenR,KAAKiS,SACxKjS,KAAKmS,UAAYnS,KAAKoS,eACxB,CAGA,kBAAW1O,GACT,OAAOgO,EACT,CACA,sBAAW/N,GACT,OAAOmO,EACT,CACA,eAAWvV,GACT,OAAO6T,EACT,CAGA,MAAA1I,GACE,OAAO1H,KAAKwP,WAAaxP,KAAKyP,OAASzP,KAAK0P,MAC9C,CACA,IAAAA,GACE,GAAIxU,GAAW8E,KAAK4E,WAAa5E,KAAKwP,WACpC,OAEF,MAAM1P,EAAgB,CACpBA,cAAeE,KAAK4E,UAGtB,IADkBrE,GAAaqB,QAAQ5B,KAAK4E,SAAU+L,GAAc7Q,GACtDkC,iBAAd,CASA,GANAhC,KAAKqS,gBAMD,iBAAkBhtB,SAASC,kBAAoB0a,KAAKiS,QAAQjX,QAzExC,eA0EtB,IAAK,MAAMzb,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK4Z,UAC/CvF,GAAac,GAAG9hB,EAAS,YAAaqc,IAG1CoE,KAAK4E,SAAS0N,QACdtS,KAAK4E,SAASxjB,aAAa,iBAAiB,GAC5C4e,KAAKkS,MAAM7W,UAAU5E,IAAIua,IACzBhR,KAAK4E,SAASvJ,UAAU5E,IAAIua,IAC5BzQ,GAAaqB,QAAQ5B,KAAK4E,SAAUgM,GAAe9Q,EAhBnD,CAiBF,CACA,IAAA2P,GACE,GAAIvU,GAAW8E,KAAK4E,YAAc5E,KAAKwP,WACrC,OAEF,MAAM1P,EAAgB,CACpBA,cAAeE,KAAK4E,UAEtB5E,KAAKuS,cAAczS,EACrB,CACA,OAAAiF,GACM/E,KAAKgS,SACPhS,KAAKgS,QAAQhZ,UAEf2L,MAAMI,SACR,CACA,MAAAha,GACEiV,KAAKmS,UAAYnS,KAAKoS,gBAClBpS,KAAKgS,SACPhS,KAAKgS,QAAQjnB,QAEjB,CAGA,aAAAwnB,CAAczS,GAEZ,IADkBS,GAAaqB,QAAQ5B,KAAK4E,SAAU6L,GAAc3Q,GACtDkC,iBAAd,CAMA,GAAI,iBAAkB3c,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK4Z,UAC/CvF,GAAaC,IAAIjhB,EAAS,YAAaqc,IAGvCoE,KAAKgS,SACPhS,KAAKgS,QAAQhZ,UAEfgH,KAAKkS,MAAM7W,UAAU1B,OAAOqX,IAC5BhR,KAAK4E,SAASvJ,UAAU1B,OAAOqX,IAC/BhR,KAAK4E,SAASxjB,aAAa,gBAAiB,SAC5C4hB,GAAYE,oBAAoBlD,KAAKkS,MAAO,UAC5C3R,GAAaqB,QAAQ5B,KAAK4E,SAAU8L,GAAgB5Q,EAhBpD,CAiBF,CACA,UAAA+D,CAAWC,GAET,GAAgC,iBADhCA,EAASa,MAAMd,WAAWC,IACRxlB,YAA2B,GAAUwlB,EAAOxlB,YAAgE,mBAA3CwlB,EAAOxlB,UAAUgF,sBAElG,MAAM,IAAIkhB,UAAU,GAAG4L,GAAO3L,+GAEhC,OAAOX,CACT,CACA,aAAAuO,GACE,QAAsB,IAAX,EACT,MAAM,IAAI7N,UAAU,gEAEtB,IAAIgO,EAAmBxS,KAAK4E,SACG,WAA3B5E,KAAK6E,QAAQvmB,UACfk0B,EAAmBxS,KAAKiS,QACf,GAAUjS,KAAK6E,QAAQvmB,WAChCk0B,EAAmB9X,GAAWsF,KAAK6E,QAAQvmB,WACA,iBAA3B0hB,KAAK6E,QAAQvmB,YAC7Bk0B,EAAmBxS,KAAK6E,QAAQvmB,WAElC,MAAMuzB,EAAe7R,KAAKyS,mBAC1BzS,KAAKgS,QAAU,GAAoBQ,EAAkBxS,KAAKkS,MAAOL,EACnE,CACA,QAAArC,GACE,OAAOxP,KAAKkS,MAAM7W,UAAU7W,SAASwsB,GACvC,CACA,aAAA0B,GACE,MAAMC,EAAiB3S,KAAKiS,QAC5B,GAAIU,EAAetX,UAAU7W,SArKN,WAsKrB,OAAOgtB,GAET,GAAImB,EAAetX,UAAU7W,SAvKJ,aAwKvB,OAAOitB,GAET,GAAIkB,EAAetX,UAAU7W,SAzKA,iBA0K3B,MA5JsB,MA8JxB,GAAImuB,EAAetX,UAAU7W,SA3KE,mBA4K7B,MA9JyB,SAkK3B,MAAMouB,EAAkF,QAA1E3tB,iBAAiB+a,KAAKkS,OAAOpX,iBAAiB,iBAAiB6K,OAC7E,OAAIgN,EAAetX,UAAU7W,SArLP,UAsLbouB,EAAQvB,GAAmBD,GAE7BwB,EAAQrB,GAAsBD,EACvC,CACA,aAAAc,GACE,OAAkD,OAA3CpS,KAAK4E,SAAS5J,QAnLD,UAoLtB,CACA,UAAA6X,GACE,MAAM,OACJ7qB,GACEgY,KAAK6E,QACT,MAAsB,iBAAX7c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAAS4f,OAAO6P,SAASzvB,EAAO,MAEzC,mBAAXqK,EACF8qB,GAAc9qB,EAAO8qB,EAAY9S,KAAK4E,UAExC5c,CACT,CACA,gBAAAyqB,GACE,MAAMM,EAAwB,CAC5Br0B,UAAWshB,KAAK0S,gBAChBtc,UAAW,CAAC,CACV9V,KAAM,kBACNmB,QAAS,CACPwM,SAAU+R,KAAK6E,QAAQ5W,WAExB,CACD3N,KAAM,SACNmB,QAAS,CACPuG,OAAQgY,KAAK6S,iBAanB,OAPI7S,KAAKmS,WAAsC,WAAzBnS,KAAK6E,QAAQ+M,WACjC5O,GAAYC,iBAAiBjD,KAAKkS,MAAO,SAAU,UACnDa,EAAsB3c,UAAY,CAAC,CACjC9V,KAAM,cACNC,SAAS,KAGN,IACFwyB,KACAlW,GAAQmD,KAAK6E,QAAQgN,aAAc,CAACkB,IAE3C,CACA,eAAAC,EAAgB,IACdl2B,EAAG,OACHyP,IAEA,MAAM6f,EAAQxG,GAAezT,KAhOF,8DAgO+B6N,KAAKkS,OAAO/rB,QAAO5G,GAAWob,GAAUpb,KAC7F6sB,EAAM1b,QAMXoN,GAAqBsO,EAAO7f,EAAQzP,IAAQ0zB,IAAmBpE,EAAMhL,SAAS7U,IAAS+lB,OACzF,CAGA,sBAAO7V,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO0nB,GAASzM,oBAAoBtF,KAAM8D,GAChD,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,CACA,iBAAOmP,CAAW7T,GAChB,GA5QuB,IA4QnBA,EAAMuI,QAAgD,UAAfvI,EAAMqB,MA/QnC,QA+QuDrB,EAAMtiB,IACzE,OAEF,MAAMo2B,EAActN,GAAezT,KAAK+e,IACxC,IAAK,MAAMxJ,KAAUwL,EAAa,CAChC,MAAMC,EAAUpB,GAAS1M,YAAYqC,GACrC,IAAKyL,IAAyC,IAA9BA,EAAQtO,QAAQ8M,UAC9B,SAEF,MAAMyB,EAAehU,EAAMgU,eACrBC,EAAeD,EAAahS,SAAS+R,EAAQjB,OACnD,GAAIkB,EAAahS,SAAS+R,EAAQvO,WAA2C,WAA9BuO,EAAQtO,QAAQ8M,YAA2B0B,GAA8C,YAA9BF,EAAQtO,QAAQ8M,WAA2B0B,EACnJ,SAIF,GAAIF,EAAQjB,MAAM1tB,SAAS4a,EAAM7S,UAA2B,UAAf6S,EAAMqB,MA/RvC,QA+R2DrB,EAAMtiB,KAAqB,qCAAqCuG,KAAK+b,EAAM7S,OAAOya,UACvJ,SAEF,MAAMlH,EAAgB,CACpBA,cAAeqT,EAAQvO,UAEN,UAAfxF,EAAMqB,OACRX,EAAciH,WAAa3H,GAE7B+T,EAAQZ,cAAczS,EACxB,CACF,CACA,4BAAOwT,CAAsBlU,GAI3B,MAAMmU,EAAU,kBAAkBlwB,KAAK+b,EAAM7S,OAAOya,SAC9CwM,EAjTW,WAiTKpU,EAAMtiB,IACtB22B,EAAkB,CAAClD,GAAgBC,IAAkBpP,SAAShC,EAAMtiB,KAC1E,IAAK22B,IAAoBD,EACvB,OAEF,GAAID,IAAYC,EACd,OAEFpU,EAAMkD,iBAGN,MAAMoR,EAAkB1T,KAAK+F,QAAQkL,IAA0BjR,KAAO4F,GAAeM,KAAKlG,KAAMiR,IAAwB,IAAMrL,GAAe/gB,KAAKmb,KAAMiR,IAAwB,IAAMrL,GAAeC,QAAQoL,GAAwB7R,EAAMW,eAAehb,YACpPwF,EAAWwnB,GAASzM,oBAAoBoO,GAC9C,GAAID,EAIF,OAHArU,EAAMuU,kBACNppB,EAASmlB,YACTnlB,EAASyoB,gBAAgB5T,GAGvB7U,EAASilB,aAEXpQ,EAAMuU,kBACNppB,EAASklB,OACTiE,EAAgBpB,QAEpB,EAOF/R,GAAac,GAAGhc,SAAUyrB,GAAwBG,GAAwBc,GAASuB,uBACnF/S,GAAac,GAAGhc,SAAUyrB,GAAwBK,GAAeY,GAASuB,uBAC1E/S,GAAac,GAAGhc,SAAUwrB,GAAwBkB,GAASkB,YAC3D1S,GAAac,GAAGhc,SAAU0rB,GAAsBgB,GAASkB,YACzD1S,GAAac,GAAGhc,SAAUwrB,GAAwBI,IAAwB,SAAU7R,GAClFA,EAAMkD,iBACNyP,GAASzM,oBAAoBtF,MAAM0H,QACrC,IAMAvL,GAAmB4V,IAcnB,MAAM6B,GAAS,WAETC,GAAoB,OACpBC,GAAkB,gBAAgBF,KAClCG,GAAY,CAChBC,UAAW,iBACXC,cAAe,KACf7O,YAAY,EACZzK,WAAW,EAEXuZ,YAAa,QAGTC,GAAgB,CACpBH,UAAW,SACXC,cAAe,kBACf7O,WAAY,UACZzK,UAAW,UACXuZ,YAAa,oBAOf,MAAME,WAAiB3Q,GACrB,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKqU,aAAc,EACnBrU,KAAK4E,SAAW,IAClB,CAGA,kBAAWlB,GACT,OAAOqQ,EACT,CACA,sBAAWpQ,GACT,OAAOwQ,EACT,CACA,eAAW5X,GACT,OAAOqX,EACT,CAGA,IAAAlE,CAAKrT,GACH,IAAK2D,KAAK6E,QAAQlK,UAEhB,YADAkC,GAAQR,GAGV2D,KAAKsU,UACL,MAAM/0B,EAAUygB,KAAKuU,cACjBvU,KAAK6E,QAAQO,YACfvJ,GAAOtc,GAETA,EAAQ8b,UAAU5E,IAAIod,IACtB7T,KAAKwU,mBAAkB,KACrB3X,GAAQR,EAAS,GAErB,CACA,IAAAoT,CAAKpT,GACE2D,KAAK6E,QAAQlK,WAIlBqF,KAAKuU,cAAclZ,UAAU1B,OAAOka,IACpC7T,KAAKwU,mBAAkB,KACrBxU,KAAK+E,UACLlI,GAAQR,EAAS,KANjBQ,GAAQR,EAQZ,CACA,OAAA0I,GACO/E,KAAKqU,cAGV9T,GAAaC,IAAIR,KAAK4E,SAAUkP,IAChC9T,KAAK4E,SAASjL,SACdqG,KAAKqU,aAAc,EACrB,CAGA,WAAAE,GACE,IAAKvU,KAAK4E,SAAU,CAClB,MAAM6P,EAAWpvB,SAASqvB,cAAc,OACxCD,EAAST,UAAYhU,KAAK6E,QAAQmP,UAC9BhU,KAAK6E,QAAQO,YACfqP,EAASpZ,UAAU5E,IArFD,QAuFpBuJ,KAAK4E,SAAW6P,CAClB,CACA,OAAOzU,KAAK4E,QACd,CACA,iBAAAZ,CAAkBF,GAGhB,OADAA,EAAOoQ,YAAcxZ,GAAWoJ,EAAOoQ,aAChCpQ,CACT,CACA,OAAAwQ,GACE,GAAItU,KAAKqU,YACP,OAEF,MAAM90B,EAAUygB,KAAKuU,cACrBvU,KAAK6E,QAAQqP,YAAYS,OAAOp1B,GAChCghB,GAAac,GAAG9hB,EAASu0B,IAAiB,KACxCjX,GAAQmD,KAAK6E,QAAQoP,cAAc,IAErCjU,KAAKqU,aAAc,CACrB,CACA,iBAAAG,CAAkBnY,GAChBW,GAAuBX,EAAU2D,KAAKuU,cAAevU,KAAK6E,QAAQO,WACpE,EAeF,MAEMwP,GAAc,gBACdC,GAAkB,UAAUD,KAC5BE,GAAoB,cAAcF,KAGlCG,GAAmB,WACnBC,GAAY,CAChBC,WAAW,EACXC,YAAa,MAGTC,GAAgB,CACpBF,UAAW,UACXC,YAAa,WAOf,MAAME,WAAkB3R,GACtB,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKqV,WAAY,EACjBrV,KAAKsV,qBAAuB,IAC9B,CAGA,kBAAW5R,GACT,OAAOsR,EACT,CACA,sBAAWrR,GACT,OAAOwR,EACT,CACA,eAAW5Y,GACT,MAtCW,WAuCb,CAGA,QAAAgZ,GACMvV,KAAKqV,YAGLrV,KAAK6E,QAAQoQ,WACfjV,KAAK6E,QAAQqQ,YAAY5C,QAE3B/R,GAAaC,IAAInb,SAAUuvB,IAC3BrU,GAAac,GAAGhc,SAAUwvB,IAAiBzV,GAASY,KAAKwV,eAAepW,KACxEmB,GAAac,GAAGhc,SAAUyvB,IAAmB1V,GAASY,KAAKyV,eAAerW,KAC1EY,KAAKqV,WAAY,EACnB,CACA,UAAAK,GACO1V,KAAKqV,YAGVrV,KAAKqV,WAAY,EACjB9U,GAAaC,IAAInb,SAAUuvB,IAC7B,CAGA,cAAAY,CAAepW,GACb,MAAM,YACJ8V,GACElV,KAAK6E,QACT,GAAIzF,EAAM7S,SAAWlH,UAAY+Z,EAAM7S,SAAW2oB,GAAeA,EAAY1wB,SAAS4a,EAAM7S,QAC1F,OAEF,MAAM1L,EAAW+kB,GAAeU,kBAAkB4O,GAC1B,IAApBr0B,EAAS6P,OACXwkB,EAAY5C,QACHtS,KAAKsV,uBAAyBP,GACvCl0B,EAASA,EAAS6P,OAAS,GAAG4hB,QAE9BzxB,EAAS,GAAGyxB,OAEhB,CACA,cAAAmD,CAAerW,GA1ED,QA2ERA,EAAMtiB,MAGVkjB,KAAKsV,qBAAuBlW,EAAMuW,SAAWZ,GA7EzB,UA8EtB,EAeF,MAAMa,GAAyB,oDACzBC,GAA0B,cAC1BC,GAAmB,gBACnBC,GAAkB,eAMxB,MAAMC,GACJ,WAAA7R,GACEnE,KAAK4E,SAAWvf,SAAS6G,IAC3B,CAGA,QAAA+pB,GAEE,MAAMC,EAAgB7wB,SAASC,gBAAgBuC,YAC/C,OAAO1F,KAAKoC,IAAI3E,OAAOu2B,WAAaD,EACtC,CACA,IAAAzG,GACE,MAAM5rB,EAAQmc,KAAKiW,WACnBjW,KAAKoW,mBAELpW,KAAKqW,sBAAsBrW,KAAK4E,SAAUkR,IAAkBQ,GAAmBA,EAAkBzyB,IAEjGmc,KAAKqW,sBAAsBT,GAAwBE,IAAkBQ,GAAmBA,EAAkBzyB,IAC1Gmc,KAAKqW,sBAAsBR,GAAyBE,IAAiBO,GAAmBA,EAAkBzyB,GAC5G,CACA,KAAAwO,GACE2N,KAAKuW,wBAAwBvW,KAAK4E,SAAU,YAC5C5E,KAAKuW,wBAAwBvW,KAAK4E,SAAUkR,IAC5C9V,KAAKuW,wBAAwBX,GAAwBE,IACrD9V,KAAKuW,wBAAwBV,GAAyBE,GACxD,CACA,aAAAS,GACE,OAAOxW,KAAKiW,WAAa,CAC3B,CAGA,gBAAAG,GACEpW,KAAKyW,sBAAsBzW,KAAK4E,SAAU,YAC1C5E,KAAK4E,SAAS7jB,MAAM+K,SAAW,QACjC,CACA,qBAAAuqB,CAAsBtc,EAAU2c,EAAera,GAC7C,MAAMsa,EAAiB3W,KAAKiW,WAS5BjW,KAAK4W,2BAA2B7c,GARHxa,IAC3B,GAAIA,IAAYygB,KAAK4E,UAAYhlB,OAAOu2B,WAAa52B,EAAQsI,YAAc8uB,EACzE,OAEF3W,KAAKyW,sBAAsBl3B,EAASm3B,GACpC,MAAMJ,EAAkB12B,OAAOqF,iBAAiB1F,GAASub,iBAAiB4b,GAC1En3B,EAAQwB,MAAM81B,YAAYH,EAAe,GAAGra,EAASkB,OAAOC,WAAW8Y,QAAsB,GAGjG,CACA,qBAAAG,CAAsBl3B,EAASm3B,GAC7B,MAAMI,EAAcv3B,EAAQwB,MAAM+Z,iBAAiB4b,GAC/CI,GACF9T,GAAYC,iBAAiB1jB,EAASm3B,EAAeI,EAEzD,CACA,uBAAAP,CAAwBxc,EAAU2c,GAWhC1W,KAAK4W,2BAA2B7c,GAVHxa,IAC3B,MAAM5B,EAAQqlB,GAAYQ,iBAAiBjkB,EAASm3B,GAEtC,OAAV/4B,GAIJqlB,GAAYE,oBAAoB3jB,EAASm3B,GACzCn3B,EAAQwB,MAAM81B,YAAYH,EAAe/4B,IAJvC4B,EAAQwB,MAAMg2B,eAAeL,EAIgB,GAGnD,CACA,0BAAAE,CAA2B7c,EAAUid,GACnC,GAAI,GAAUjd,GACZid,EAASjd,QAGX,IAAK,MAAMkd,KAAOrR,GAAezT,KAAK4H,EAAUiG,KAAK4E,UACnDoS,EAASC,EAEb,EAeF,MAEMC,GAAc,YAGdC,GAAe,OAAOD,KACtBE,GAAyB,gBAAgBF,KACzCG,GAAiB,SAASH,KAC1BI,GAAe,OAAOJ,KACtBK,GAAgB,QAAQL,KACxBM,GAAiB,SAASN,KAC1BO,GAAsB,gBAAgBP,KACtCQ,GAA0B,oBAAoBR,KAC9CS,GAA0B,kBAAkBT,KAC5CU,GAAyB,QAAQV,cACjCW,GAAkB,aAElBC,GAAoB,OACpBC,GAAoB,eAKpBC,GAAY,CAChBvD,UAAU,EACVnC,OAAO,EACPzH,UAAU,GAENoN,GAAgB,CACpBxD,SAAU,mBACVnC,MAAO,UACPzH,SAAU,WAOZ,MAAMqN,WAAcxT,GAClB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKmY,QAAUvS,GAAeC,QArBV,gBAqBmC7F,KAAK4E,UAC5D5E,KAAKoY,UAAYpY,KAAKqY,sBACtBrY,KAAKsY,WAAatY,KAAKuY,uBACvBvY,KAAKwP,UAAW,EAChBxP,KAAKgP,kBAAmB,EACxBhP,KAAKwY,WAAa,IAAIxC,GACtBhW,KAAK0L,oBACP,CAGA,kBAAWhI,GACT,OAAOsU,EACT,CACA,sBAAWrU,GACT,OAAOsU,EACT,CACA,eAAW1b,GACT,MA1DW,OA2Db,CAGA,MAAAmL,CAAO5H,GACL,OAAOE,KAAKwP,SAAWxP,KAAKyP,OAASzP,KAAK0P,KAAK5P,EACjD,CACA,IAAA4P,CAAK5P,GACCE,KAAKwP,UAAYxP,KAAKgP,kBAGRzO,GAAaqB,QAAQ5B,KAAK4E,SAAU0S,GAAc,CAClExX,kBAEYkC,mBAGdhC,KAAKwP,UAAW,EAChBxP,KAAKgP,kBAAmB,EACxBhP,KAAKwY,WAAW/I,OAChBpqB,SAAS6G,KAAKmP,UAAU5E,IAAIohB,IAC5B7X,KAAKyY,gBACLzY,KAAKoY,UAAU1I,MAAK,IAAM1P,KAAK0Y,aAAa5Y,KAC9C,CACA,IAAA2P,GACOzP,KAAKwP,WAAYxP,KAAKgP,mBAGTzO,GAAaqB,QAAQ5B,KAAK4E,SAAUuS,IACxCnV,mBAGdhC,KAAKwP,UAAW,EAChBxP,KAAKgP,kBAAmB,EACxBhP,KAAKsY,WAAW5C,aAChB1V,KAAK4E,SAASvJ,UAAU1B,OAAOme,IAC/B9X,KAAKmF,gBAAe,IAAMnF,KAAK2Y,cAAc3Y,KAAK4E,SAAU5E,KAAK6N,gBACnE,CACA,OAAA9I,GACExE,GAAaC,IAAI5gB,OAAQs3B,IACzB3W,GAAaC,IAAIR,KAAKmY,QAASjB,IAC/BlX,KAAKoY,UAAUrT,UACf/E,KAAKsY,WAAW5C,aAChB/Q,MAAMI,SACR,CACA,YAAA6T,GACE5Y,KAAKyY,eACP,CAGA,mBAAAJ,GACE,OAAO,IAAIjE,GAAS,CAClBzZ,UAAWmG,QAAQd,KAAK6E,QAAQ4P,UAEhCrP,WAAYpF,KAAK6N,eAErB,CACA,oBAAA0K,GACE,OAAO,IAAInD,GAAU,CACnBF,YAAalV,KAAK4E,UAEtB,CACA,YAAA8T,CAAa5Y,GAENza,SAAS6G,KAAK1H,SAASwb,KAAK4E,WAC/Bvf,SAAS6G,KAAKyoB,OAAO3U,KAAK4E,UAE5B5E,KAAK4E,SAAS7jB,MAAM6wB,QAAU,QAC9B5R,KAAK4E,SAASzjB,gBAAgB,eAC9B6e,KAAK4E,SAASxjB,aAAa,cAAc,GACzC4e,KAAK4E,SAASxjB,aAAa,OAAQ,UACnC4e,KAAK4E,SAASnZ,UAAY,EAC1B,MAAMotB,EAAYjT,GAAeC,QA7GT,cA6GsC7F,KAAKmY,SAC/DU,IACFA,EAAUptB,UAAY,GAExBoQ,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIqhB,IAU5B9X,KAAKmF,gBATsB,KACrBnF,KAAK6E,QAAQyN,OACftS,KAAKsY,WAAW/C,WAElBvV,KAAKgP,kBAAmB,EACxBzO,GAAaqB,QAAQ5B,KAAK4E,SAAU2S,GAAe,CACjDzX,iBACA,GAEoCE,KAAKmY,QAASnY,KAAK6N,cAC7D,CACA,kBAAAnC,GACEnL,GAAac,GAAGrB,KAAK4E,SAAU+S,IAAyBvY,IAhJvC,WAiJXA,EAAMtiB,MAGNkjB,KAAK6E,QAAQgG,SACf7K,KAAKyP,OAGPzP,KAAK8Y,6BAA4B,IAEnCvY,GAAac,GAAGzhB,OAAQ43B,IAAgB,KAClCxX,KAAKwP,WAAaxP,KAAKgP,kBACzBhP,KAAKyY,eACP,IAEFlY,GAAac,GAAGrB,KAAK4E,SAAU8S,IAAyBtY,IAEtDmB,GAAae,IAAItB,KAAK4E,SAAU6S,IAAqBsB,IAC/C/Y,KAAK4E,WAAaxF,EAAM7S,QAAUyT,KAAK4E,WAAamU,EAAOxsB,SAGjC,WAA1ByT,KAAK6E,QAAQ4P,SAIbzU,KAAK6E,QAAQ4P,UACfzU,KAAKyP,OAJLzP,KAAK8Y,6BAKP,GACA,GAEN,CACA,UAAAH,GACE3Y,KAAK4E,SAAS7jB,MAAM6wB,QAAU,OAC9B5R,KAAK4E,SAASxjB,aAAa,eAAe,GAC1C4e,KAAK4E,SAASzjB,gBAAgB,cAC9B6e,KAAK4E,SAASzjB,gBAAgB,QAC9B6e,KAAKgP,kBAAmB,EACxBhP,KAAKoY,UAAU3I,MAAK,KAClBpqB,SAAS6G,KAAKmP,UAAU1B,OAAOke,IAC/B7X,KAAKgZ,oBACLhZ,KAAKwY,WAAWnmB,QAChBkO,GAAaqB,QAAQ5B,KAAK4E,SAAUyS,GAAe,GAEvD,CACA,WAAAxJ,GACE,OAAO7N,KAAK4E,SAASvJ,UAAU7W,SAjLT,OAkLxB,CACA,0BAAAs0B,GAEE,GADkBvY,GAAaqB,QAAQ5B,KAAK4E,SAAUwS,IACxCpV,iBACZ,OAEF,MAAMiX,EAAqBjZ,KAAK4E,SAASvX,aAAehI,SAASC,gBAAgBsC,aAC3EsxB,EAAmBlZ,KAAK4E,SAAS7jB,MAAMiL,UAEpB,WAArBktB,GAAiClZ,KAAK4E,SAASvJ,UAAU7W,SAASuzB,MAGjEkB,IACHjZ,KAAK4E,SAAS7jB,MAAMiL,UAAY,UAElCgU,KAAK4E,SAASvJ,UAAU5E,IAAIshB,IAC5B/X,KAAKmF,gBAAe,KAClBnF,KAAK4E,SAASvJ,UAAU1B,OAAOoe,IAC/B/X,KAAKmF,gBAAe,KAClBnF,KAAK4E,SAAS7jB,MAAMiL,UAAYktB,CAAgB,GAC/ClZ,KAAKmY,QAAQ,GACfnY,KAAKmY,SACRnY,KAAK4E,SAAS0N,QAChB,CAMA,aAAAmG,GACE,MAAMQ,EAAqBjZ,KAAK4E,SAASvX,aAAehI,SAASC,gBAAgBsC,aAC3E+uB,EAAiB3W,KAAKwY,WAAWvC,WACjCkD,EAAoBxC,EAAiB,EAC3C,GAAIwC,IAAsBF,EAAoB,CAC5C,MAAMn3B,EAAWma,KAAU,cAAgB,eAC3C+D,KAAK4E,SAAS7jB,MAAMe,GAAY,GAAG60B,KACrC,CACA,IAAKwC,GAAqBF,EAAoB,CAC5C,MAAMn3B,EAAWma,KAAU,eAAiB,cAC5C+D,KAAK4E,SAAS7jB,MAAMe,GAAY,GAAG60B,KACrC,CACF,CACA,iBAAAqC,GACEhZ,KAAK4E,SAAS7jB,MAAMq4B,YAAc,GAClCpZ,KAAK4E,SAAS7jB,MAAMs4B,aAAe,EACrC,CAGA,sBAAO5c,CAAgBqH,EAAQhE,GAC7B,OAAOE,KAAKuH,MAAK,WACf,MAAMld,EAAO6tB,GAAM5S,oBAAoBtF,KAAM8D,GAC7C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQhE,EAJb,CAKF,GACF,EAOFS,GAAac,GAAGhc,SAAUuyB,GA9OK,4BA8O2C,SAAUxY,GAClF,MAAM7S,EAASqZ,GAAec,uBAAuB1G,MACjD,CAAC,IAAK,QAAQoB,SAASpB,KAAKgH,UAC9B5H,EAAMkD,iBAER/B,GAAae,IAAI/U,EAAQ+qB,IAAcgC,IACjCA,EAAUtX,kBAIdzB,GAAae,IAAI/U,EAAQ8qB,IAAgB,KACnC1c,GAAUqF,OACZA,KAAKsS,OACP,GACA,IAIJ,MAAMiH,EAAc3T,GAAeC,QAnQb,eAoQlB0T,GACFrB,GAAM7S,YAAYkU,GAAa9J,OAEpByI,GAAM5S,oBAAoB/Y,GAClCmb,OAAO1H,KACd,IACA4G,GAAqBsR,IAMrB/b,GAAmB+b,IAcnB,MAEMsB,GAAc,gBACdC,GAAiB,YACjBC,GAAwB,OAAOF,KAAcC,KAE7CE,GAAoB,OACpBC,GAAuB,UACvBC,GAAoB,SAEpBC,GAAgB,kBAChBC,GAAe,OAAOP,KACtBQ,GAAgB,QAAQR,KACxBS,GAAe,OAAOT,KACtBU,GAAuB,gBAAgBV,KACvCW,GAAiB,SAASX,KAC1BY,GAAe,SAASZ,KACxBa,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAwB,kBAAkBd,KAE1Ce,GAAY,CAChB9F,UAAU,EACV5J,UAAU,EACVpgB,QAAQ,GAEJ+vB,GAAgB,CACpB/F,SAAU,mBACV5J,SAAU,UACVpgB,OAAQ,WAOV,MAAMgwB,WAAkB/V,GACtB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKwP,UAAW,EAChBxP,KAAKoY,UAAYpY,KAAKqY,sBACtBrY,KAAKsY,WAAatY,KAAKuY,uBACvBvY,KAAK0L,oBACP,CAGA,kBAAWhI,GACT,OAAO6W,EACT,CACA,sBAAW5W,GACT,OAAO6W,EACT,CACA,eAAWje,GACT,MApDW,WAqDb,CAGA,MAAAmL,CAAO5H,GACL,OAAOE,KAAKwP,SAAWxP,KAAKyP,OAASzP,KAAK0P,KAAK5P,EACjD,CACA,IAAA4P,CAAK5P,GACCE,KAAKwP,UAGSjP,GAAaqB,QAAQ5B,KAAK4E,SAAUmV,GAAc,CAClEja,kBAEYkC,mBAGdhC,KAAKwP,UAAW,EAChBxP,KAAKoY,UAAU1I,OACV1P,KAAK6E,QAAQpa,SAChB,IAAIurB,IAAkBvG,OAExBzP,KAAK4E,SAASxjB,aAAa,cAAc,GACzC4e,KAAK4E,SAASxjB,aAAa,OAAQ,UACnC4e,KAAK4E,SAASvJ,UAAU5E,IAAImjB,IAW5B5Z,KAAKmF,gBAVoB,KAClBnF,KAAK6E,QAAQpa,SAAUuV,KAAK6E,QAAQ4P,UACvCzU,KAAKsY,WAAW/C,WAElBvV,KAAK4E,SAASvJ,UAAU5E,IAAIkjB,IAC5B3Z,KAAK4E,SAASvJ,UAAU1B,OAAOigB,IAC/BrZ,GAAaqB,QAAQ5B,KAAK4E,SAAUoV,GAAe,CACjDla,iBACA,GAEkCE,KAAK4E,UAAU,GACvD,CACA,IAAA6K,GACOzP,KAAKwP,WAGQjP,GAAaqB,QAAQ5B,KAAK4E,SAAUqV,IACxCjY,mBAGdhC,KAAKsY,WAAW5C,aAChB1V,KAAK4E,SAAS8V,OACd1a,KAAKwP,UAAW,EAChBxP,KAAK4E,SAASvJ,UAAU5E,IAAIojB,IAC5B7Z,KAAKoY,UAAU3I,OAUfzP,KAAKmF,gBAToB,KACvBnF,KAAK4E,SAASvJ,UAAU1B,OAAOggB,GAAmBE,IAClD7Z,KAAK4E,SAASzjB,gBAAgB,cAC9B6e,KAAK4E,SAASzjB,gBAAgB,QACzB6e,KAAK6E,QAAQpa,SAChB,IAAIurB,IAAkB3jB,QAExBkO,GAAaqB,QAAQ5B,KAAK4E,SAAUuV,GAAe,GAEfna,KAAK4E,UAAU,IACvD,CACA,OAAAG,GACE/E,KAAKoY,UAAUrT,UACf/E,KAAKsY,WAAW5C,aAChB/Q,MAAMI,SACR,CAGA,mBAAAsT,GACE,MASM1d,EAAYmG,QAAQd,KAAK6E,QAAQ4P,UACvC,OAAO,IAAIL,GAAS,CAClBJ,UA3HsB,qBA4HtBrZ,YACAyK,YAAY,EACZ8O,YAAalU,KAAK4E,SAAS7f,WAC3BkvB,cAAetZ,EAfK,KACU,WAA1BqF,KAAK6E,QAAQ4P,SAIjBzU,KAAKyP,OAHHlP,GAAaqB,QAAQ5B,KAAK4E,SAAUsV,GAG3B,EAUgC,MAE/C,CACA,oBAAA3B,GACE,OAAO,IAAInD,GAAU,CACnBF,YAAalV,KAAK4E,UAEtB,CACA,kBAAA8G,GACEnL,GAAac,GAAGrB,KAAK4E,SAAU0V,IAAuBlb,IA5IvC,WA6ITA,EAAMtiB,MAGNkjB,KAAK6E,QAAQgG,SACf7K,KAAKyP,OAGPlP,GAAaqB,QAAQ5B,KAAK4E,SAAUsV,IAAqB,GAE7D,CAGA,sBAAOzd,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAOowB,GAAUnV,oBAAoBtF,KAAM8D,GACjD,GAAsB,iBAAXA,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KAJb,CAKF,GACF,EAOFO,GAAac,GAAGhc,SAAUg1B,GA7JK,gCA6J2C,SAAUjb,GAClF,MAAM7S,EAASqZ,GAAec,uBAAuB1G,MAIrD,GAHI,CAAC,IAAK,QAAQoB,SAASpB,KAAKgH,UAC9B5H,EAAMkD,iBAEJpH,GAAW8E,MACb,OAEFO,GAAae,IAAI/U,EAAQ4tB,IAAgB,KAEnCxf,GAAUqF,OACZA,KAAKsS,OACP,IAIF,MAAMiH,EAAc3T,GAAeC,QAAQiU,IACvCP,GAAeA,IAAgBhtB,GACjCkuB,GAAUpV,YAAYkU,GAAa9J,OAExBgL,GAAUnV,oBAAoB/Y,GACtCmb,OAAO1H,KACd,IACAO,GAAac,GAAGzhB,OAAQ85B,IAAuB,KAC7C,IAAK,MAAM3f,KAAY6L,GAAezT,KAAK2nB,IACzCW,GAAUnV,oBAAoBvL,GAAU2V,MAC1C,IAEFnP,GAAac,GAAGzhB,OAAQw6B,IAAc,KACpC,IAAK,MAAM76B,KAAWqmB,GAAezT,KAAK,gDACG,UAAvClN,iBAAiB1F,GAASiC,UAC5Bi5B,GAAUnV,oBAAoB/lB,GAASkwB,MAE3C,IAEF7I,GAAqB6T,IAMrBte,GAAmBse,IAUnB,MACME,GAAmB,CAEvB,IAAK,CAAC,QAAS,MAAO,KAAM,OAAQ,OAHP,kBAI7B9pB,EAAG,CAAC,SAAU,OAAQ,QAAS,OAC/B+pB,KAAM,GACN9pB,EAAG,GACH+pB,GAAI,GACJC,IAAK,GACLC,KAAM,GACNC,IAAK,GACLC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJnqB,EAAG,GACHub,IAAK,CAAC,MAAO,SAAU,MAAO,QAAS,QAAS,UAChD6O,GAAI,GACJC,GAAI,GACJC,EAAG,GACHC,IAAK,GACLC,EAAG,GACHC,MAAO,GACPC,KAAM,GACNC,IAAK,GACLC,IAAK,GACLC,OAAQ,GACRC,EAAG,GACHC,GAAI,IAIAC,GAAgB,IAAI/lB,IAAI,CAAC,aAAc,OAAQ,OAAQ,WAAY,WAAY,SAAU,MAAO,eAShGgmB,GAAmB,0DACnBC,GAAmB,CAACx6B,EAAWy6B,KACnC,MAAMC,EAAgB16B,EAAUvC,SAASC,cACzC,OAAI+8B,EAAqBpb,SAASqb,IAC5BJ,GAAc1lB,IAAI8lB,IACb3b,QAAQwb,GAAiBj5B,KAAKtB,EAAU26B,YAM5CF,EAAqBr2B,QAAOw2B,GAAkBA,aAA0BpY,SAAQ9R,MAAKmqB,GAASA,EAAMv5B,KAAKo5B,IAAe,EA0C3HI,GAAY,CAChBC,UAAWnC,GACXoC,QAAS,CAAC,EAEVC,WAAY,GACZnwB,MAAM,EACNowB,UAAU,EACVC,WAAY,KACZC,SAAU,eAENC,GAAgB,CACpBN,UAAW,SACXC,QAAS,SACTC,WAAY,oBACZnwB,KAAM,UACNowB,SAAU,UACVC,WAAY,kBACZC,SAAU,UAENE,GAAqB,CACzBC,MAAO,iCACPvjB,SAAU,oBAOZ,MAAMwjB,WAAwB9Z,GAC5B,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,EACjC,CAGA,kBAAWJ,GACT,OAAOmZ,EACT,CACA,sBAAWlZ,GACT,OAAOyZ,EACT,CACA,eAAW7gB,GACT,MA3CW,iBA4Cb,CAGA,UAAAihB,GACE,OAAOxgC,OAAOmiB,OAAOa,KAAK6E,QAAQkY,SAASj6B,KAAIghB,GAAU9D,KAAKyd,yBAAyB3Z,KAAS3d,OAAO2a,QACzG,CACA,UAAA4c,GACE,OAAO1d,KAAKwd,aAAa9sB,OAAS,CACpC,CACA,aAAAitB,CAAcZ,GAMZ,OALA/c,KAAK4d,cAAcb,GACnB/c,KAAK6E,QAAQkY,QAAU,IAClB/c,KAAK6E,QAAQkY,WACbA,GAEE/c,IACT,CACA,MAAA6d,GACE,MAAMC,EAAkBz4B,SAASqvB,cAAc,OAC/CoJ,EAAgBC,UAAY/d,KAAKge,eAAehe,KAAK6E,QAAQsY,UAC7D,IAAK,MAAOpjB,EAAUkkB,KAASjhC,OAAOmkB,QAAQnB,KAAK6E,QAAQkY,SACzD/c,KAAKke,YAAYJ,EAAiBG,EAAMlkB,GAE1C,MAAMojB,EAAWW,EAAgBhY,SAAS,GACpCkX,EAAahd,KAAKyd,yBAAyBzd,KAAK6E,QAAQmY,YAI9D,OAHIA,GACFG,EAAS9hB,UAAU5E,OAAOumB,EAAW96B,MAAM,MAEtCi7B,CACT,CAGA,gBAAAlZ,CAAiBH,GACfa,MAAMV,iBAAiBH,GACvB9D,KAAK4d,cAAc9Z,EAAOiZ,QAC5B,CACA,aAAAa,CAAcO,GACZ,IAAK,MAAOpkB,EAAUgjB,KAAY//B,OAAOmkB,QAAQgd,GAC/CxZ,MAAMV,iBAAiB,CACrBlK,WACAujB,MAAOP,GACNM,GAEP,CACA,WAAAa,CAAYf,EAAUJ,EAAShjB,GAC7B,MAAMqkB,EAAkBxY,GAAeC,QAAQ9L,EAAUojB,GACpDiB,KAGLrB,EAAU/c,KAAKyd,yBAAyBV,IAKpC,GAAUA,GACZ/c,KAAKqe,sBAAsB3jB,GAAWqiB,GAAUqB,GAG9Cpe,KAAK6E,QAAQhY,KACfuxB,EAAgBL,UAAY/d,KAAKge,eAAejB,GAGlDqB,EAAgBE,YAAcvB,EAX5BqB,EAAgBzkB,SAYpB,CACA,cAAAqkB,CAAeG,GACb,OAAOne,KAAK6E,QAAQoY,SApJxB,SAAsBsB,EAAYzB,EAAW0B,GAC3C,IAAKD,EAAW7tB,OACd,OAAO6tB,EAET,GAAIC,GAAgD,mBAArBA,EAC7B,OAAOA,EAAiBD,GAE1B,MACME,GADY,IAAI7+B,OAAO8+B,WACKC,gBAAgBJ,EAAY,aACxD19B,EAAW,GAAGlC,UAAU8/B,EAAgBvyB,KAAKkU,iBAAiB,MACpE,IAAK,MAAM7gB,KAAWsB,EAAU,CAC9B,MAAM+9B,EAAcr/B,EAAQC,SAASC,cACrC,IAAKzC,OAAO4D,KAAKk8B,GAAW1b,SAASwd,GAAc,CACjDr/B,EAAQoa,SACR,QACF,CACA,MAAMklB,EAAgB,GAAGlgC,UAAUY,EAAQ0B,YACrC69B,EAAoB,GAAGngC,OAAOm+B,EAAU,MAAQ,GAAIA,EAAU8B,IAAgB,IACpF,IAAK,MAAM78B,KAAa88B,EACjBtC,GAAiBx6B,EAAW+8B,IAC/Bv/B,EAAQ4B,gBAAgBY,EAAUvC,SAGxC,CACA,OAAOi/B,EAAgBvyB,KAAK6xB,SAC9B,CA2HmCgB,CAAaZ,EAAKne,KAAK6E,QAAQiY,UAAW9c,KAAK6E,QAAQqY,YAAciB,CACtG,CACA,wBAAAV,CAAyBU,GACvB,OAAOthB,GAAQshB,EAAK,CAACne,MACvB,CACA,qBAAAqe,CAAsB9+B,EAAS6+B,GAC7B,GAAIpe,KAAK6E,QAAQhY,KAGf,OAFAuxB,EAAgBL,UAAY,QAC5BK,EAAgBzJ,OAAOp1B,GAGzB6+B,EAAgBE,YAAc/+B,EAAQ++B,WACxC,EAeF,MACMU,GAAwB,IAAI1oB,IAAI,CAAC,WAAY,YAAa,eAC1D2oB,GAAoB,OAEpBC,GAAoB,OAEpBC,GAAiB,SACjBC,GAAmB,gBACnBC,GAAgB,QAChBC,GAAgB,QAahBC,GAAgB,CACpBC,KAAM,OACNC,IAAK,MACLC,MAAOzjB,KAAU,OAAS,QAC1B0jB,OAAQ,SACRC,KAAM3jB,KAAU,QAAU,QAEtB4jB,GAAY,CAChB/C,UAAWnC,GACXmF,WAAW,EACX7xB,SAAU,kBACV8xB,WAAW,EACXC,YAAa,GACbC,MAAO,EACPjwB,mBAAoB,CAAC,MAAO,QAAS,SAAU,QAC/CnD,MAAM,EACN7E,OAAQ,CAAC,EAAG,GACZtJ,UAAW,MACXmzB,aAAc,KACdoL,UAAU,EACVC,WAAY,KACZnjB,UAAU,EACVojB,SAAU,+GACV+C,MAAO,GACPte,QAAS,eAELue,GAAgB,CACpBrD,UAAW,SACXgD,UAAW,UACX7xB,SAAU,mBACV8xB,UAAW,2BACXC,YAAa,oBACbC,MAAO,kBACPjwB,mBAAoB,QACpBnD,KAAM,UACN7E,OAAQ,0BACRtJ,UAAW,oBACXmzB,aAAc,yBACdoL,SAAU,UACVC,WAAY,kBACZnjB,SAAU,mBACVojB,SAAU,SACV+C,MAAO,4BACPte,QAAS,UAOX,MAAMwe,WAAgB1b,GACpB,WAAAP,CAAY5kB,EAASukB,GACnB,QAAsB,IAAX,EACT,MAAM,IAAIU,UAAU,+DAEtBG,MAAMplB,EAASukB,GAGf9D,KAAKqgB,YAAa,EAClBrgB,KAAKsgB,SAAW,EAChBtgB,KAAKugB,WAAa,KAClBvgB,KAAKwgB,eAAiB,CAAC,EACvBxgB,KAAKgS,QAAU,KACfhS,KAAKygB,iBAAmB,KACxBzgB,KAAK0gB,YAAc,KAGnB1gB,KAAK2gB,IAAM,KACX3gB,KAAK4gB,gBACA5gB,KAAK6E,QAAQ9K,UAChBiG,KAAK6gB,WAET,CAGA,kBAAWnd,GACT,OAAOmc,EACT,CACA,sBAAWlc,GACT,OAAOwc,EACT,CACA,eAAW5jB,GACT,MAxGW,SAyGb,CAGA,MAAAukB,GACE9gB,KAAKqgB,YAAa,CACpB,CACA,OAAAU,GACE/gB,KAAKqgB,YAAa,CACpB,CACA,aAAAW,GACEhhB,KAAKqgB,YAAcrgB,KAAKqgB,UAC1B,CACA,MAAA3Y,GACO1H,KAAKqgB,aAGVrgB,KAAKwgB,eAAeS,OAASjhB,KAAKwgB,eAAeS,MAC7CjhB,KAAKwP,WACPxP,KAAKkhB,SAGPlhB,KAAKmhB,SACP,CACA,OAAApc,GACEgI,aAAa/M,KAAKsgB,UAClB/f,GAAaC,IAAIR,KAAK4E,SAAS5J,QAAQmkB,IAAiBC,GAAkBpf,KAAKohB,mBAC3EphB,KAAK4E,SAASpJ,aAAa,2BAC7BwE,KAAK4E,SAASxjB,aAAa,QAAS4e,KAAK4E,SAASpJ,aAAa,2BAEjEwE,KAAKqhB,iBACL1c,MAAMI,SACR,CACA,IAAA2K,GACE,GAAoC,SAAhC1P,KAAK4E,SAAS7jB,MAAM6wB,QACtB,MAAM,IAAIhO,MAAM,uCAElB,IAAM5D,KAAKshB,mBAAoBthB,KAAKqgB,WAClC,OAEF,MAAM/G,EAAY/Y,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAlItD,SAoIX+b,GADa9lB,GAAeuE,KAAK4E,WACL5E,KAAK4E,SAAS9kB,cAAcwF,iBAAiBd,SAASwb,KAAK4E,UAC7F,GAAI0U,EAAUtX,mBAAqBuf,EACjC,OAIFvhB,KAAKqhB,iBACL,MAAMV,EAAM3gB,KAAKwhB,iBACjBxhB,KAAK4E,SAASxjB,aAAa,mBAAoBu/B,EAAInlB,aAAa,OAChE,MAAM,UACJukB,GACE/f,KAAK6E,QAYT,GAXK7E,KAAK4E,SAAS9kB,cAAcwF,gBAAgBd,SAASwb,KAAK2gB,OAC7DZ,EAAUpL,OAAOgM,GACjBpgB,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAhJpC,cAkJnBxF,KAAKgS,QAAUhS,KAAKqS,cAAcsO,GAClCA,EAAItlB,UAAU5E,IAAIyoB,IAMd,iBAAkB75B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK4Z,UAC/CvF,GAAac,GAAG9hB,EAAS,YAAaqc,IAU1CoE,KAAKmF,gBAPY,KACf5E,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAhKrC,WAiKQ,IAApBxF,KAAKugB,YACPvgB,KAAKkhB,SAEPlhB,KAAKugB,YAAa,CAAK,GAEKvgB,KAAK2gB,IAAK3gB,KAAK6N,cAC/C,CACA,IAAA4B,GACE,GAAKzP,KAAKwP,aAGQjP,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UA/KtD,SAgLHxD,iBAAd,CAQA,GALYhC,KAAKwhB,iBACbnmB,UAAU1B,OAAOulB,IAIjB,iBAAkB75B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK4Z,UAC/CvF,GAAaC,IAAIjhB,EAAS,YAAaqc,IAG3CoE,KAAKwgB,eAA4B,OAAI,EACrCxgB,KAAKwgB,eAAelB,KAAiB,EACrCtf,KAAKwgB,eAAenB,KAAiB,EACrCrf,KAAKugB,WAAa,KAYlBvgB,KAAKmF,gBAVY,KACXnF,KAAKyhB,yBAGJzhB,KAAKugB,YACRvgB,KAAKqhB,iBAEPrhB,KAAK4E,SAASzjB,gBAAgB,oBAC9Bof,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAzMpC,WAyM8D,GAEnDxF,KAAK2gB,IAAK3gB,KAAK6N,cA1B7C,CA2BF,CACA,MAAA9iB,GACMiV,KAAKgS,SACPhS,KAAKgS,QAAQjnB,QAEjB,CAGA,cAAAu2B,GACE,OAAOxgB,QAAQd,KAAK0hB,YACtB,CACA,cAAAF,GAIE,OAHKxhB,KAAK2gB,MACR3gB,KAAK2gB,IAAM3gB,KAAK2hB,kBAAkB3hB,KAAK0gB,aAAe1gB,KAAK4hB,2BAEtD5hB,KAAK2gB,GACd,CACA,iBAAAgB,CAAkB5E,GAChB,MAAM4D,EAAM3gB,KAAK6hB,oBAAoB9E,GAASc,SAG9C,IAAK8C,EACH,OAAO,KAETA,EAAItlB,UAAU1B,OAAOslB,GAAmBC,IAExCyB,EAAItlB,UAAU5E,IAAI,MAAMuJ,KAAKmE,YAAY5H,aACzC,MAAMulB,EAvuGKC,KACb,GACEA,GAAU5/B,KAAK6/B,MA/BH,IA+BS7/B,KAAK8/B,gBACnB58B,SAAS68B,eAAeH,IACjC,OAAOA,CAAM,EAmuGGI,CAAOniB,KAAKmE,YAAY5H,MAAM1c,WAK5C,OAJA8gC,EAAIv/B,aAAa,KAAM0gC,GACnB9hB,KAAK6N,eACP8S,EAAItlB,UAAU5E,IAAIwoB,IAEb0B,CACT,CACA,UAAAyB,CAAWrF,GACT/c,KAAK0gB,YAAc3D,EACf/c,KAAKwP,aACPxP,KAAKqhB,iBACLrhB,KAAK0P,OAET,CACA,mBAAAmS,CAAoB9E,GAYlB,OAXI/c,KAAKygB,iBACPzgB,KAAKygB,iBAAiB9C,cAAcZ,GAEpC/c,KAAKygB,iBAAmB,IAAIlD,GAAgB,IACvCvd,KAAK6E,QAGRkY,UACAC,WAAYhd,KAAKyd,yBAAyBzd,KAAK6E,QAAQmb,eAGpDhgB,KAAKygB,gBACd,CACA,sBAAAmB,GACE,MAAO,CACL,iBAA0B5hB,KAAK0hB,YAEnC,CACA,SAAAA,GACE,OAAO1hB,KAAKyd,yBAAyBzd,KAAK6E,QAAQqb,QAAUlgB,KAAK4E,SAASpJ,aAAa,yBACzF,CAGA,4BAAA6mB,CAA6BjjB,GAC3B,OAAOY,KAAKmE,YAAYmB,oBAAoBlG,EAAMW,eAAgBC,KAAKsiB,qBACzE,CACA,WAAAzU,GACE,OAAO7N,KAAK6E,QAAQib,WAAa9f,KAAK2gB,KAAO3gB,KAAK2gB,IAAItlB,UAAU7W,SAASy6B,GAC3E,CACA,QAAAzP,GACE,OAAOxP,KAAK2gB,KAAO3gB,KAAK2gB,IAAItlB,UAAU7W,SAAS06B,GACjD,CACA,aAAA7M,CAAcsO,GACZ,MAAMjiC,EAAYme,GAAQmD,KAAK6E,QAAQnmB,UAAW,CAACshB,KAAM2gB,EAAK3gB,KAAK4E,WAC7D2d,EAAahD,GAAc7gC,EAAU+lB,eAC3C,OAAO,GAAoBzE,KAAK4E,SAAU+b,EAAK3gB,KAAKyS,iBAAiB8P,GACvE,CACA,UAAA1P,GACE,MAAM,OACJ7qB,GACEgY,KAAK6E,QACT,MAAsB,iBAAX7c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAAS4f,OAAO6P,SAASzvB,EAAO,MAEzC,mBAAXqK,EACF8qB,GAAc9qB,EAAO8qB,EAAY9S,KAAK4E,UAExC5c,CACT,CACA,wBAAAy1B,CAAyBU,GACvB,OAAOthB,GAAQshB,EAAK,CAACne,KAAK4E,UAC5B,CACA,gBAAA6N,CAAiB8P,GACf,MAAMxP,EAAwB,CAC5Br0B,UAAW6jC,EACXnsB,UAAW,CAAC,CACV9V,KAAM,OACNmB,QAAS,CACPuO,mBAAoBgQ,KAAK6E,QAAQ7U,qBAElC,CACD1P,KAAM,SACNmB,QAAS,CACPuG,OAAQgY,KAAK6S,eAEd,CACDvyB,KAAM,kBACNmB,QAAS,CACPwM,SAAU+R,KAAK6E,QAAQ5W,WAExB,CACD3N,KAAM,QACNmB,QAAS,CACPlC,QAAS,IAAIygB,KAAKmE,YAAY5H,eAE/B,CACDjc,KAAM,kBACNC,SAAS,EACTC,MAAO,aACPC,GAAI4J,IAGF2V,KAAKwhB,iBAAiBpgC,aAAa,wBAAyBiJ,EAAK1J,MAAMjC,UAAU,KAIvF,MAAO,IACFq0B,KACAlW,GAAQmD,KAAK6E,QAAQgN,aAAc,CAACkB,IAE3C,CACA,aAAA6N,GACE,MAAM4B,EAAWxiB,KAAK6E,QAAQjD,QAAQ1f,MAAM,KAC5C,IAAK,MAAM0f,KAAW4gB,EACpB,GAAgB,UAAZ5gB,EACFrB,GAAac,GAAGrB,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAjVlC,SAiV4DxF,KAAK6E,QAAQ9K,UAAUqF,IAC/EY,KAAKqiB,6BAA6BjjB,GAC1CsI,QAAQ,SAEb,GA3VU,WA2VN9F,EAA4B,CACrC,MAAM6gB,EAAU7gB,IAAYyd,GAAgBrf,KAAKmE,YAAYqB,UAnV5C,cAmV0ExF,KAAKmE,YAAYqB,UArV5F,WAsVVkd,EAAW9gB,IAAYyd,GAAgBrf,KAAKmE,YAAYqB,UAnV7C,cAmV2ExF,KAAKmE,YAAYqB,UArV5F,YAsVjBjF,GAAac,GAAGrB,KAAK4E,SAAU6d,EAASziB,KAAK6E,QAAQ9K,UAAUqF,IAC7D,MAAM+T,EAAUnT,KAAKqiB,6BAA6BjjB,GAClD+T,EAAQqN,eAA8B,YAAfphB,EAAMqB,KAAqB6e,GAAgBD,KAAiB,EACnFlM,EAAQgO,QAAQ,IAElB5gB,GAAac,GAAGrB,KAAK4E,SAAU8d,EAAU1iB,KAAK6E,QAAQ9K,UAAUqF,IAC9D,MAAM+T,EAAUnT,KAAKqiB,6BAA6BjjB,GAClD+T,EAAQqN,eAA8B,aAAfphB,EAAMqB,KAAsB6e,GAAgBD,IAAiBlM,EAAQvO,SAASpgB,SAAS4a,EAAMU,eACpHqT,EAAQ+N,QAAQ,GAEpB,CAEFlhB,KAAKohB,kBAAoB,KACnBphB,KAAK4E,UACP5E,KAAKyP,MACP,EAEFlP,GAAac,GAAGrB,KAAK4E,SAAS5J,QAAQmkB,IAAiBC,GAAkBpf,KAAKohB,kBAChF,CACA,SAAAP,GACE,MAAMX,EAAQlgB,KAAK4E,SAASpJ,aAAa,SACpC0kB,IAGAlgB,KAAK4E,SAASpJ,aAAa,eAAkBwE,KAAK4E,SAAS0Z,YAAY3Y,QAC1E3F,KAAK4E,SAASxjB,aAAa,aAAc8+B,GAE3ClgB,KAAK4E,SAASxjB,aAAa,yBAA0B8+B,GACrDlgB,KAAK4E,SAASzjB,gBAAgB,SAChC,CACA,MAAAggC,GACMnhB,KAAKwP,YAAcxP,KAAKugB,WAC1BvgB,KAAKugB,YAAa,GAGpBvgB,KAAKugB,YAAa,EAClBvgB,KAAK2iB,aAAY,KACX3iB,KAAKugB,YACPvgB,KAAK0P,MACP,GACC1P,KAAK6E,QAAQob,MAAMvQ,MACxB,CACA,MAAAwR,GACMlhB,KAAKyhB,yBAGTzhB,KAAKugB,YAAa,EAClBvgB,KAAK2iB,aAAY,KACV3iB,KAAKugB,YACRvgB,KAAKyP,MACP,GACCzP,KAAK6E,QAAQob,MAAMxQ,MACxB,CACA,WAAAkT,CAAY/kB,EAASglB,GACnB7V,aAAa/M,KAAKsgB,UAClBtgB,KAAKsgB,SAAWziB,WAAWD,EAASglB,EACtC,CACA,oBAAAnB,GACE,OAAOzkC,OAAOmiB,OAAOa,KAAKwgB,gBAAgBpf,UAAS,EACrD,CACA,UAAAyC,CAAWC,GACT,MAAM+e,EAAiB7f,GAAYG,kBAAkBnD,KAAK4E,UAC1D,IAAK,MAAMke,KAAiB9lC,OAAO4D,KAAKiiC,GAClC7D,GAAsBroB,IAAImsB,WACrBD,EAAeC,GAU1B,OAPAhf,EAAS,IACJ+e,KACmB,iBAAX/e,GAAuBA,EAASA,EAAS,CAAC,GAEvDA,EAAS9D,KAAK+D,gBAAgBD,GAC9BA,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CACA,iBAAAE,CAAkBF,GAchB,OAbAA,EAAOic,WAAiC,IAArBjc,EAAOic,UAAsB16B,SAAS6G,KAAOwO,GAAWoJ,EAAOic,WACtD,iBAAjBjc,EAAOmc,QAChBnc,EAAOmc,MAAQ,CACbvQ,KAAM5L,EAAOmc,MACbxQ,KAAM3L,EAAOmc,QAGW,iBAAjBnc,EAAOoc,QAChBpc,EAAOoc,MAAQpc,EAAOoc,MAAMrgC,YAEA,iBAAnBikB,EAAOiZ,UAChBjZ,EAAOiZ,QAAUjZ,EAAOiZ,QAAQl9B,YAE3BikB,CACT,CACA,kBAAAwe,GACE,MAAMxe,EAAS,CAAC,EAChB,IAAK,MAAOhnB,EAAKa,KAAUX,OAAOmkB,QAAQnB,KAAK6E,SACzC7E,KAAKmE,YAAYT,QAAQ5mB,KAASa,IACpCmmB,EAAOhnB,GAAOa,GASlB,OANAmmB,EAAO/J,UAAW,EAClB+J,EAAOlC,QAAU,SAKVkC,CACT,CACA,cAAAud,GACMrhB,KAAKgS,UACPhS,KAAKgS,QAAQhZ,UACbgH,KAAKgS,QAAU,MAEbhS,KAAK2gB,MACP3gB,KAAK2gB,IAAIhnB,SACTqG,KAAK2gB,IAAM,KAEf,CAGA,sBAAOlkB,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO+1B,GAAQ9a,oBAAoBtF,KAAM8D,GAC/C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOF3H,GAAmBikB,IAcnB,MAGM2C,GAAY,IACb3C,GAAQ1c,QACXqZ,QAAS,GACT/0B,OAAQ,CAAC,EAAG,GACZtJ,UAAW,QACXy+B,SAAU,8IACVvb,QAAS,SAELohB,GAAgB,IACjB5C,GAAQzc,YACXoZ,QAAS,kCAOX,MAAMkG,WAAgB7C,GAEpB,kBAAW1c,GACT,OAAOqf,EACT,CACA,sBAAWpf,GACT,OAAOqf,EACT,CACA,eAAWzmB,GACT,MA7BW,SA8Bb,CAGA,cAAA+kB,GACE,OAAOthB,KAAK0hB,aAAe1hB,KAAKkjB,aAClC,CAGA,sBAAAtB,GACE,MAAO,CACL,kBAAkB5hB,KAAK0hB,YACvB,gBAAoB1hB,KAAKkjB,cAE7B,CACA,WAAAA,GACE,OAAOljB,KAAKyd,yBAAyBzd,KAAK6E,QAAQkY,QACpD,CAGA,sBAAOtgB,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO44B,GAAQ3d,oBAAoBtF,KAAM8D,GAC/C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOF3H,GAAmB8mB,IAcnB,MAEME,GAAc,gBAEdC,GAAiB,WAAWD,KAC5BE,GAAc,QAAQF,KACtBG,GAAwB,OAAOH,cAE/BI,GAAsB,SAEtBC,GAAwB,SAExBC,GAAqB,YAGrBC,GAAsB,GAAGD,mBAA+CA,uBAGxEE,GAAY,CAChB37B,OAAQ,KAER47B,WAAY,eACZC,cAAc,EACdt3B,OAAQ,KACRu3B,UAAW,CAAC,GAAK,GAAK,IAElBC,GAAgB,CACpB/7B,OAAQ,gBAER47B,WAAY,SACZC,aAAc,UACdt3B,OAAQ,UACRu3B,UAAW,SAOb,MAAME,WAAkBtf,GACtB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GAGf9D,KAAKikB,aAAe,IAAI/yB,IACxB8O,KAAKkkB,oBAAsB,IAAIhzB,IAC/B8O,KAAKmkB,aAA6D,YAA9Cl/B,iBAAiB+a,KAAK4E,UAAU5Y,UAA0B,KAAOgU,KAAK4E,SAC1F5E,KAAKokB,cAAgB,KACrBpkB,KAAKqkB,UAAY,KACjBrkB,KAAKskB,oBAAsB,CACzBC,gBAAiB,EACjBC,gBAAiB,GAEnBxkB,KAAKykB,SACP,CAGA,kBAAW/gB,GACT,OAAOigB,EACT,CACA,sBAAWhgB,GACT,OAAOogB,EACT,CACA,eAAWxnB,GACT,MAhEW,WAiEb,CAGA,OAAAkoB,GACEzkB,KAAK0kB,mCACL1kB,KAAK2kB,2BACD3kB,KAAKqkB,UACPrkB,KAAKqkB,UAAUO,aAEf5kB,KAAKqkB,UAAYrkB,KAAK6kB,kBAExB,IAAK,MAAMC,KAAW9kB,KAAKkkB,oBAAoB/kB,SAC7Ca,KAAKqkB,UAAUU,QAAQD,EAE3B,CACA,OAAA/f,GACE/E,KAAKqkB,UAAUO,aACfjgB,MAAMI,SACR,CAGA,iBAAAf,CAAkBF,GAShB,OAPAA,EAAOvX,OAASmO,GAAWoJ,EAAOvX,SAAWlH,SAAS6G,KAGtD4X,EAAO8f,WAAa9f,EAAO9b,OAAS,GAAG8b,EAAO9b,oBAAsB8b,EAAO8f,WAC3C,iBAArB9f,EAAOggB,YAChBhgB,EAAOggB,UAAYhgB,EAAOggB,UAAU5hC,MAAM,KAAKY,KAAInF,GAAS4f,OAAOC,WAAW7f,MAEzEmmB,CACT,CACA,wBAAA6gB,GACO3kB,KAAK6E,QAAQgf,eAKlBtjB,GAAaC,IAAIR,KAAK6E,QAAQtY,OAAQ82B,IACtC9iB,GAAac,GAAGrB,KAAK6E,QAAQtY,OAAQ82B,GAAaG,IAAuBpkB,IACvE,MAAM4lB,EAAoBhlB,KAAKkkB,oBAAoB/mC,IAAIiiB,EAAM7S,OAAOtB,MACpE,GAAI+5B,EAAmB,CACrB5lB,EAAMkD,iBACN,MAAM3G,EAAOqE,KAAKmkB,cAAgBvkC,OAC5BmE,EAASihC,EAAkB3gC,UAAY2b,KAAK4E,SAASvgB,UAC3D,GAAIsX,EAAKspB,SAKP,YAJAtpB,EAAKspB,SAAS,CACZtjC,IAAKoC,EACLmhC,SAAU,WAMdvpB,EAAKlQ,UAAY1H,CACnB,KAEJ,CACA,eAAA8gC,GACE,MAAMpjC,EAAU,CACdka,KAAMqE,KAAKmkB,aACXL,UAAW9jB,KAAK6E,QAAQif,UACxBF,WAAY5jB,KAAK6E,QAAQ+e,YAE3B,OAAO,IAAIuB,sBAAqBhkB,GAAWnB,KAAKolB,kBAAkBjkB,IAAU1f,EAC9E,CAGA,iBAAA2jC,CAAkBjkB,GAChB,MAAMkkB,EAAgB/H,GAAStd,KAAKikB,aAAa9mC,IAAI,IAAImgC,EAAM/wB,OAAO4N,MAChEob,EAAW+H,IACftd,KAAKskB,oBAAoBC,gBAAkBjH,EAAM/wB,OAAOlI,UACxD2b,KAAKslB,SAASD,EAAc/H,GAAO,EAE/BkH,GAAmBxkB,KAAKmkB,cAAgB9+B,SAASC,iBAAiBmG,UAClE85B,EAAkBf,GAAmBxkB,KAAKskB,oBAAoBE,gBACpExkB,KAAKskB,oBAAoBE,gBAAkBA,EAC3C,IAAK,MAAMlH,KAASnc,EAAS,CAC3B,IAAKmc,EAAMkI,eAAgB,CACzBxlB,KAAKokB,cAAgB,KACrBpkB,KAAKylB,kBAAkBJ,EAAc/H,IACrC,QACF,CACA,MAAMoI,EAA2BpI,EAAM/wB,OAAOlI,WAAa2b,KAAKskB,oBAAoBC,gBAEpF,GAAIgB,GAAmBG,GAGrB,GAFAnQ,EAAS+H,IAEJkH,EACH,YAMCe,GAAoBG,GACvBnQ,EAAS+H,EAEb,CACF,CACA,gCAAAoH,GACE1kB,KAAKikB,aAAe,IAAI/yB,IACxB8O,KAAKkkB,oBAAsB,IAAIhzB,IAC/B,MAAMy0B,EAAc/f,GAAezT,KAAKqxB,GAAuBxjB,KAAK6E,QAAQtY,QAC5E,IAAK,MAAMq5B,KAAUD,EAAa,CAEhC,IAAKC,EAAO36B,MAAQiQ,GAAW0qB,GAC7B,SAEF,MAAMZ,EAAoBpf,GAAeC,QAAQggB,UAAUD,EAAO36B,MAAO+U,KAAK4E,UAG1EjK,GAAUqqB,KACZhlB,KAAKikB,aAAalyB,IAAI8zB,UAAUD,EAAO36B,MAAO26B,GAC9C5lB,KAAKkkB,oBAAoBnyB,IAAI6zB,EAAO36B,KAAM+5B,GAE9C,CACF,CACA,QAAAM,CAAS/4B,GACHyT,KAAKokB,gBAAkB73B,IAG3ByT,KAAKylB,kBAAkBzlB,KAAK6E,QAAQtY,QACpCyT,KAAKokB,cAAgB73B,EACrBA,EAAO8O,UAAU5E,IAAI8sB,IACrBvjB,KAAK8lB,iBAAiBv5B,GACtBgU,GAAaqB,QAAQ5B,KAAK4E,SAAUwe,GAAgB,CAClDtjB,cAAevT,IAEnB,CACA,gBAAAu5B,CAAiBv5B,GAEf,GAAIA,EAAO8O,UAAU7W,SA9LQ,iBA+L3BohB,GAAeC,QArLc,mBAqLsBtZ,EAAOyO,QAtLtC,cAsLkEK,UAAU5E,IAAI8sB,SAGtG,IAAK,MAAMwC,KAAangB,GAAeI,QAAQzZ,EA9LnB,qBAiM1B,IAAK,MAAMxJ,KAAQ6iB,GAAeM,KAAK6f,EAAWrC,IAChD3gC,EAAKsY,UAAU5E,IAAI8sB,GAGzB,CACA,iBAAAkC,CAAkBhhC,GAChBA,EAAO4W,UAAU1B,OAAO4pB,IACxB,MAAMyC,EAAcpgB,GAAezT,KAAK,GAAGqxB,MAAyBD,KAAuB9+B,GAC3F,IAAK,MAAM9E,KAAQqmC,EACjBrmC,EAAK0b,UAAU1B,OAAO4pB,GAE1B,CAGA,sBAAO9mB,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO25B,GAAU1e,oBAAoBtF,KAAM8D,GACjD,GAAsB,iBAAXA,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOFvD,GAAac,GAAGzhB,OAAQ0jC,IAAuB,KAC7C,IAAK,MAAM2C,KAAOrgB,GAAezT,KApOT,0BAqOtB6xB,GAAU1e,oBAAoB2gB,EAChC,IAOF9pB,GAAmB6nB,IAcnB,MAEMkC,GAAc,UACdC,GAAe,OAAOD,KACtBE,GAAiB,SAASF,KAC1BG,GAAe,OAAOH,KACtBI,GAAgB,QAAQJ,KACxBK,GAAuB,QAAQL,KAC/BM,GAAgB,UAAUN,KAC1BO,GAAsB,OAAOP,KAC7BQ,GAAiB,YACjBC,GAAkB,aAClBC,GAAe,UACfC,GAAiB,YACjBC,GAAW,OACXC,GAAU,MACVC,GAAoB,SACpBC,GAAoB,OACpBC,GAAoB,OAEpBC,GAA2B,mBAE3BC,GAA+B,QAAQD,MAIvCE,GAAuB,2EACvBC,GAAsB,YAFOF,uBAAiDA,mBAA6CA,OAE/EC,KAC5CE,GAA8B,IAAIP,8BAA6CA,+BAA8CA,4BAMnI,MAAMQ,WAAY9iB,GAChB,WAAAP,CAAY5kB,GACVolB,MAAMplB,GACNygB,KAAKiS,QAAUjS,KAAK4E,SAAS5J,QAdN,uCAelBgF,KAAKiS,UAOVjS,KAAKynB,sBAAsBznB,KAAKiS,QAASjS,KAAK0nB,gBAC9CnnB,GAAac,GAAGrB,KAAK4E,SAAU4hB,IAAepnB,GAASY,KAAK0M,SAAStN,KACvE,CAGA,eAAW7C,GACT,MAnDW,KAoDb,CAGA,IAAAmT,GAEE,MAAMiY,EAAY3nB,KAAK4E,SACvB,GAAI5E,KAAK4nB,cAAcD,GACrB,OAIF,MAAME,EAAS7nB,KAAK8nB,iBACdC,EAAYF,EAAStnB,GAAaqB,QAAQimB,EAAQ1B,GAAc,CACpErmB,cAAe6nB,IACZ,KACapnB,GAAaqB,QAAQ+lB,EAAWtB,GAAc,CAC9DvmB,cAAe+nB,IAEH7lB,kBAAoB+lB,GAAaA,EAAU/lB,mBAGzDhC,KAAKgoB,YAAYH,EAAQF,GACzB3nB,KAAKioB,UAAUN,EAAWE,GAC5B,CAGA,SAAAI,CAAU1oC,EAAS2oC,GACZ3oC,IAGLA,EAAQ8b,UAAU5E,IAAIuwB,IACtBhnB,KAAKioB,UAAUriB,GAAec,uBAAuBnnB,IAcrDygB,KAAKmF,gBAZY,KACsB,QAAjC5lB,EAAQic,aAAa,SAIzBjc,EAAQ4B,gBAAgB,YACxB5B,EAAQ6B,aAAa,iBAAiB,GACtC4e,KAAKmoB,gBAAgB5oC,GAAS,GAC9BghB,GAAaqB,QAAQriB,EAAS+mC,GAAe,CAC3CxmB,cAAeooB,KAPf3oC,EAAQ8b,UAAU5E,IAAIywB,GAQtB,GAE0B3nC,EAASA,EAAQ8b,UAAU7W,SAASyiC,KACpE,CACA,WAAAe,CAAYzoC,EAAS2oC,GACd3oC,IAGLA,EAAQ8b,UAAU1B,OAAOqtB,IACzBznC,EAAQm7B,OACR1a,KAAKgoB,YAAYpiB,GAAec,uBAAuBnnB,IAcvDygB,KAAKmF,gBAZY,KACsB,QAAjC5lB,EAAQic,aAAa,SAIzBjc,EAAQ6B,aAAa,iBAAiB,GACtC7B,EAAQ6B,aAAa,WAAY,MACjC4e,KAAKmoB,gBAAgB5oC,GAAS,GAC9BghB,GAAaqB,QAAQriB,EAAS6mC,GAAgB,CAC5CtmB,cAAeooB,KAPf3oC,EAAQ8b,UAAU1B,OAAOutB,GAQzB,GAE0B3nC,EAASA,EAAQ8b,UAAU7W,SAASyiC,KACpE,CACA,QAAAva,CAAStN,GACP,IAAK,CAACsnB,GAAgBC,GAAiBC,GAAcC,GAAgBC,GAAUC,IAAS3lB,SAAShC,EAAMtiB,KACrG,OAEFsiB,EAAMuU,kBACNvU,EAAMkD,iBACN,MAAMwD,EAAW9F,KAAK0nB,eAAevhC,QAAO5G,IAAY2b,GAAW3b,KACnE,IAAI6oC,EACJ,GAAI,CAACtB,GAAUC,IAAS3lB,SAAShC,EAAMtiB,KACrCsrC,EAAoBtiB,EAAS1G,EAAMtiB,MAAQgqC,GAAW,EAAIhhB,EAASpV,OAAS,OACvE,CACL,MAAM2c,EAAS,CAACsZ,GAAiBE,IAAgBzlB,SAAShC,EAAMtiB,KAChEsrC,EAAoBtqB,GAAqBgI,EAAU1G,EAAM7S,OAAQ8gB,GAAQ,EAC3E,CACI+a,IACFA,EAAkB9V,MAAM,CACtB+V,eAAe,IAEjBb,GAAIliB,oBAAoB8iB,GAAmB1Y,OAE/C,CACA,YAAAgY,GAEE,OAAO9hB,GAAezT,KAAKm1B,GAAqBtnB,KAAKiS,QACvD,CACA,cAAA6V,GACE,OAAO9nB,KAAK0nB,eAAev1B,MAAKzN,GAASsb,KAAK4nB,cAAcljC,MAAW,IACzE,CACA,qBAAA+iC,CAAsBhjC,EAAQqhB,GAC5B9F,KAAKsoB,yBAAyB7jC,EAAQ,OAAQ,WAC9C,IAAK,MAAMC,KAASohB,EAClB9F,KAAKuoB,6BAA6B7jC,EAEtC,CACA,4BAAA6jC,CAA6B7jC,GAC3BA,EAAQsb,KAAKwoB,iBAAiB9jC,GAC9B,MAAM+jC,EAAWzoB,KAAK4nB,cAAcljC,GAC9BgkC,EAAY1oB,KAAK2oB,iBAAiBjkC,GACxCA,EAAMtD,aAAa,gBAAiBqnC,GAChCC,IAAchkC,GAChBsb,KAAKsoB,yBAAyBI,EAAW,OAAQ,gBAE9CD,GACH/jC,EAAMtD,aAAa,WAAY,MAEjC4e,KAAKsoB,yBAAyB5jC,EAAO,OAAQ,OAG7Csb,KAAK4oB,mCAAmClkC,EAC1C,CACA,kCAAAkkC,CAAmClkC,GACjC,MAAM6H,EAASqZ,GAAec,uBAAuBhiB,GAChD6H,IAGLyT,KAAKsoB,yBAAyB/7B,EAAQ,OAAQ,YAC1C7H,EAAMyV,IACR6F,KAAKsoB,yBAAyB/7B,EAAQ,kBAAmB,GAAG7H,EAAMyV,MAEtE,CACA,eAAAguB,CAAgB5oC,EAASspC,GACvB,MAAMH,EAAY1oB,KAAK2oB,iBAAiBppC,GACxC,IAAKmpC,EAAUrtB,UAAU7W,SApKN,YAqKjB,OAEF,MAAMkjB,EAAS,CAAC3N,EAAUia,KACxB,MAAMz0B,EAAUqmB,GAAeC,QAAQ9L,EAAU2uB,GAC7CnpC,GACFA,EAAQ8b,UAAUqM,OAAOsM,EAAW6U,EACtC,EAEFnhB,EAAOyf,GAA0BH,IACjCtf,EA5K2B,iBA4KIwf,IAC/BwB,EAAUtnC,aAAa,gBAAiBynC,EAC1C,CACA,wBAAAP,CAAyB/oC,EAASwC,EAAWpE,GACtC4B,EAAQgc,aAAaxZ,IACxBxC,EAAQ6B,aAAaW,EAAWpE,EAEpC,CACA,aAAAiqC,CAAczY,GACZ,OAAOA,EAAK9T,UAAU7W,SAASwiC,GACjC,CAGA,gBAAAwB,CAAiBrZ,GACf,OAAOA,EAAKpJ,QAAQuhB,IAAuBnY,EAAOvJ,GAAeC,QAAQyhB,GAAqBnY,EAChG,CAGA,gBAAAwZ,CAAiBxZ,GACf,OAAOA,EAAKnU,QA5LO,gCA4LoBmU,CACzC,CAGA,sBAAO1S,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAOm9B,GAAIliB,oBAAoBtF,MACrC,GAAsB,iBAAX8D,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOFvD,GAAac,GAAGhc,SAAUkhC,GAAsBc,IAAsB,SAAUjoB,GAC1E,CAAC,IAAK,QAAQgC,SAASpB,KAAKgH,UAC9B5H,EAAMkD,iBAEJpH,GAAW8E,OAGfwnB,GAAIliB,oBAAoBtF,MAAM0P,MAChC,IAKAnP,GAAac,GAAGzhB,OAAQ6mC,IAAqB,KAC3C,IAAK,MAAMlnC,KAAWqmB,GAAezT,KAAKo1B,IACxCC,GAAIliB,oBAAoB/lB,EAC1B,IAMF4c,GAAmBqrB,IAcnB,MAEMxiB,GAAY,YACZ8jB,GAAkB,YAAY9jB,KAC9B+jB,GAAiB,WAAW/jB,KAC5BgkB,GAAgB,UAAUhkB,KAC1BikB,GAAiB,WAAWjkB,KAC5BkkB,GAAa,OAAOlkB,KACpBmkB,GAAe,SAASnkB,KACxBokB,GAAa,OAAOpkB,KACpBqkB,GAAc,QAAQrkB,KAEtBskB,GAAkB,OAClBC,GAAkB,OAClBC,GAAqB,UACrB7lB,GAAc,CAClBmc,UAAW,UACX2J,SAAU,UACVxJ,MAAO,UAEHvc,GAAU,CACdoc,WAAW,EACX2J,UAAU,EACVxJ,MAAO,KAOT,MAAMyJ,WAAchlB,GAClB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKsgB,SAAW,KAChBtgB,KAAK2pB,sBAAuB,EAC5B3pB,KAAK4pB,yBAA0B,EAC/B5pB,KAAK4gB,eACP,CAGA,kBAAWld,GACT,OAAOA,EACT,CACA,sBAAWC,GACT,OAAOA,EACT,CACA,eAAWpH,GACT,MA/CS,OAgDX,CAGA,IAAAmT,GACoBnP,GAAaqB,QAAQ5B,KAAK4E,SAAUwkB,IACxCpnB,mBAGdhC,KAAK6pB,gBACD7pB,KAAK6E,QAAQib,WACf9f,KAAK4E,SAASvJ,UAAU5E,IA/CN,QAsDpBuJ,KAAK4E,SAASvJ,UAAU1B,OAAO2vB,IAC/BztB,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAI8yB,GAAiBC,IAC7CxpB,KAAKmF,gBARY,KACfnF,KAAK4E,SAASvJ,UAAU1B,OAAO6vB,IAC/BjpB,GAAaqB,QAAQ5B,KAAK4E,SAAUykB,IACpCrpB,KAAK8pB,oBAAoB,GAKG9pB,KAAK4E,SAAU5E,KAAK6E,QAAQib,WAC5D,CACA,IAAArQ,GACOzP,KAAK+pB,YAGQxpB,GAAaqB,QAAQ5B,KAAK4E,SAAUskB,IACxClnB,mBAQdhC,KAAK4E,SAASvJ,UAAU5E,IAAI+yB,IAC5BxpB,KAAKmF,gBANY,KACfnF,KAAK4E,SAASvJ,UAAU5E,IAAI6yB,IAC5BtpB,KAAK4E,SAASvJ,UAAU1B,OAAO6vB,GAAoBD,IACnDhpB,GAAaqB,QAAQ5B,KAAK4E,SAAUukB,GAAa,GAGrBnpB,KAAK4E,SAAU5E,KAAK6E,QAAQib,YAC5D,CACA,OAAA/a,GACE/E,KAAK6pB,gBACD7pB,KAAK+pB,WACP/pB,KAAK4E,SAASvJ,UAAU1B,OAAO4vB,IAEjC5kB,MAAMI,SACR,CACA,OAAAglB,GACE,OAAO/pB,KAAK4E,SAASvJ,UAAU7W,SAAS+kC,GAC1C,CAIA,kBAAAO,GACO9pB,KAAK6E,QAAQ4kB,WAGdzpB,KAAK2pB,sBAAwB3pB,KAAK4pB,0BAGtC5pB,KAAKsgB,SAAWziB,YAAW,KACzBmC,KAAKyP,MAAM,GACVzP,KAAK6E,QAAQob,QAClB,CACA,cAAA+J,CAAe5qB,EAAO6qB,GACpB,OAAQ7qB,EAAMqB,MACZ,IAAK,YACL,IAAK,WAEDT,KAAK2pB,qBAAuBM,EAC5B,MAEJ,IAAK,UACL,IAAK,WAEDjqB,KAAK4pB,wBAA0BK,EAIrC,GAAIA,EAEF,YADAjqB,KAAK6pB,gBAGP,MAAMvc,EAAclO,EAAMU,cACtBE,KAAK4E,WAAa0I,GAAetN,KAAK4E,SAASpgB,SAAS8oB,IAG5DtN,KAAK8pB,oBACP,CACA,aAAAlJ,GACErgB,GAAac,GAAGrB,KAAK4E,SAAUkkB,IAAiB1pB,GAASY,KAAKgqB,eAAe5qB,GAAO,KACpFmB,GAAac,GAAGrB,KAAK4E,SAAUmkB,IAAgB3pB,GAASY,KAAKgqB,eAAe5qB,GAAO,KACnFmB,GAAac,GAAGrB,KAAK4E,SAAUokB,IAAe5pB,GAASY,KAAKgqB,eAAe5qB,GAAO,KAClFmB,GAAac,GAAGrB,KAAK4E,SAAUqkB,IAAgB7pB,GAASY,KAAKgqB,eAAe5qB,GAAO,IACrF,CACA,aAAAyqB,GACE9c,aAAa/M,KAAKsgB,UAClBtgB,KAAKsgB,SAAW,IAClB,CAGA,sBAAO7jB,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAOq/B,GAAMpkB,oBAAoBtF,KAAM8D,GAC7C,GAAsB,iBAAXA,EAAqB,CAC9B,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KACf,CACF,GACF,ECr0IK,SAASkqB,GAAc7tB,GACD,WAAvBhX,SAASuX,WAAyBP,IACjChX,SAASyF,iBAAiB,mBAAoBuR,EACrD,CDy0IAuK,GAAqB8iB,IAMrBvtB,GAAmButB,IEtyInBQ,IAvCA,WAC2B,GAAG93B,MAAM5U,KAChC6H,SAAS+a,iBAAiB,+BAETtd,KAAI,SAAUqnC,GAC/B,OAAO,IAAI/J,GAAQ+J,EAAkB,CAAElK,MAAO,CAAEvQ,KAAM,IAAKD,KAAM,MACnE,GACF,IAiCAya,IA5BA,WACY7kC,SAAS68B,eAAe,mBAC9Bp3B,iBAAiB,SAAS,WAC5BzF,SAAS6G,KAAKT,UAAY,EAC1BpG,SAASC,gBAAgBmG,UAAY,CACvC,GACF,IAuBAy+B,IArBA,WACE,IAAIE,EAAM/kC,SAAS68B,eAAe,mBAC9BmI,EAAShlC,SACVilC,uBAAuB,aAAa,GACpChnC,wBACH1D,OAAOkL,iBAAiB,UAAU,WAC5BkV,KAAKuqB,UAAYvqB,KAAKwqB,SAAWxqB,KAAKwqB,QAAUH,EAAOzsC,OACzDwsC,EAAIrpC,MAAM6wB,QAAU,QAEpBwY,EAAIrpC,MAAM6wB,QAAU,OAEtB5R,KAAKuqB,UAAYvqB,KAAKwqB,OACxB,GACF","sources":["webpack://pydata_sphinx_theme/webpack/bootstrap","webpack://pydata_sphinx_theme/webpack/runtime/define property getters","webpack://pydata_sphinx_theme/webpack/runtime/hasOwnProperty shorthand","webpack://pydata_sphinx_theme/webpack/runtime/make namespace object","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/enums.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/instanceOf.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/applyStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getBasePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/math.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/userAgent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isLayoutViewport.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getBoundingClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getLayoutRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/contains.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getComputedStyle.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isTableElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getParentNode.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getOffsetParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getMainAxisFromPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/within.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergePaddingObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getFreshSideObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/expandToHashMap.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/arrow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getVariation.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/computeStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/eventListeners.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositeVariationPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScrollBarX.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/listScrollParents.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/rectToClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getClippingRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getViewportRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/detectOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/flip.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeAutoPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/hide.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/offset.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/popperOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/preventOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getAltAxis.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getCompositeRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getHTMLElementScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/orderModifiers.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/createPopper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/debounce.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergeByName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper-lite.js","webpack://pydata_sphinx_theme/./node_modules/bootstrap/dist/js/bootstrap.esm.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/mixin.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/bootstrap.js"],"sourcesContent":["// The require scope\nvar __webpack_require__ = {};\n\n","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","export var top = 'top';\nexport var bottom = 'bottom';\nexport var right = 'right';\nexport var left = 'left';\nexport var auto = 'auto';\nexport var basePlacements = [top, bottom, right, left];\nexport var start = 'start';\nexport var end = 'end';\nexport var clippingParents = 'clippingParents';\nexport var viewport = 'viewport';\nexport var popper = 'popper';\nexport var reference = 'reference';\nexport var variationPlacements = /*#__PURE__*/basePlacements.reduce(function (acc, placement) {\n return acc.concat([placement + \"-\" + start, placement + \"-\" + end]);\n}, []);\nexport var placements = /*#__PURE__*/[].concat(basePlacements, [auto]).reduce(function (acc, placement) {\n return acc.concat([placement, placement + \"-\" + start, placement + \"-\" + end]);\n}, []); // modifiers that need to read the DOM\n\nexport var beforeRead = 'beforeRead';\nexport var read = 'read';\nexport var afterRead = 'afterRead'; // pure-logic modifiers\n\nexport var beforeMain = 'beforeMain';\nexport var main = 'main';\nexport var afterMain = 'afterMain'; // modifier with the purpose to write to the DOM (or write into a framework state)\n\nexport var beforeWrite = 'beforeWrite';\nexport var write = 'write';\nexport var afterWrite = 'afterWrite';\nexport var modifierPhases = [beforeRead, read, afterRead, beforeMain, main, afterMain, beforeWrite, write, afterWrite];","export default function getNodeName(element) {\n return element ? (element.nodeName || '').toLowerCase() : null;\n}","export default function getWindow(node) {\n if (node == null) {\n return window;\n }\n\n if (node.toString() !== '[object Window]') {\n var ownerDocument = node.ownerDocument;\n return ownerDocument ? ownerDocument.defaultView || window : window;\n }\n\n return node;\n}","import getWindow from \"./getWindow.js\";\n\nfunction isElement(node) {\n var OwnElement = getWindow(node).Element;\n return node instanceof OwnElement || node instanceof Element;\n}\n\nfunction isHTMLElement(node) {\n var OwnElement = getWindow(node).HTMLElement;\n return node instanceof OwnElement || node instanceof HTMLElement;\n}\n\nfunction isShadowRoot(node) {\n // IE 11 has no ShadowRoot\n if (typeof ShadowRoot === 'undefined') {\n return false;\n }\n\n var OwnElement = getWindow(node).ShadowRoot;\n return node instanceof OwnElement || node instanceof ShadowRoot;\n}\n\nexport { isElement, isHTMLElement, isShadowRoot };","import getNodeName from \"../dom-utils/getNodeName.js\";\nimport { isHTMLElement } from \"../dom-utils/instanceOf.js\"; // This modifier takes the styles prepared by the `computeStyles` modifier\n// and applies them to the HTMLElements such as popper and arrow\n\nfunction applyStyles(_ref) {\n var state = _ref.state;\n Object.keys(state.elements).forEach(function (name) {\n var style = state.styles[name] || {};\n var attributes = state.attributes[name] || {};\n var element = state.elements[name]; // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n } // Flow doesn't support to extend this property, but it's the most\n // effective way to apply styles to an HTMLElement\n // $FlowFixMe[cannot-write]\n\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (name) {\n var value = attributes[name];\n\n if (value === false) {\n element.removeAttribute(name);\n } else {\n element.setAttribute(name, value === true ? '' : value);\n }\n });\n });\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state;\n var initialStyles = {\n popper: {\n position: state.options.strategy,\n left: '0',\n top: '0',\n margin: '0'\n },\n arrow: {\n position: 'absolute'\n },\n reference: {}\n };\n Object.assign(state.elements.popper.style, initialStyles.popper);\n state.styles = initialStyles;\n\n if (state.elements.arrow) {\n Object.assign(state.elements.arrow.style, initialStyles.arrow);\n }\n\n return function () {\n Object.keys(state.elements).forEach(function (name) {\n var element = state.elements[name];\n var attributes = state.attributes[name] || {};\n var styleProperties = Object.keys(state.styles.hasOwnProperty(name) ? state.styles[name] : initialStyles[name]); // Set all values to an empty string to unset them\n\n var style = styleProperties.reduce(function (style, property) {\n style[property] = '';\n return style;\n }, {}); // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n }\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (attribute) {\n element.removeAttribute(attribute);\n });\n });\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'applyStyles',\n enabled: true,\n phase: 'write',\n fn: applyStyles,\n effect: effect,\n requires: ['computeStyles']\n};","import { auto } from \"../enums.js\";\nexport default function getBasePlacement(placement) {\n return placement.split('-')[0];\n}","export var max = Math.max;\nexport var min = Math.min;\nexport var round = Math.round;","export default function getUAString() {\n var uaData = navigator.userAgentData;\n\n if (uaData != null && uaData.brands && Array.isArray(uaData.brands)) {\n return uaData.brands.map(function (item) {\n return item.brand + \"/\" + item.version;\n }).join(' ');\n }\n\n return navigator.userAgent;\n}","import getUAString from \"../utils/userAgent.js\";\nexport default function isLayoutViewport() {\n return !/^((?!chrome|android).)*safari/i.test(getUAString());\n}","import { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport { round } from \"../utils/math.js\";\nimport getWindow from \"./getWindow.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getBoundingClientRect(element, includeScale, isFixedStrategy) {\n if (includeScale === void 0) {\n includeScale = false;\n }\n\n if (isFixedStrategy === void 0) {\n isFixedStrategy = false;\n }\n\n var clientRect = element.getBoundingClientRect();\n var scaleX = 1;\n var scaleY = 1;\n\n if (includeScale && isHTMLElement(element)) {\n scaleX = element.offsetWidth > 0 ? round(clientRect.width) / element.offsetWidth || 1 : 1;\n scaleY = element.offsetHeight > 0 ? round(clientRect.height) / element.offsetHeight || 1 : 1;\n }\n\n var _ref = isElement(element) ? getWindow(element) : window,\n visualViewport = _ref.visualViewport;\n\n var addVisualOffsets = !isLayoutViewport() && isFixedStrategy;\n var x = (clientRect.left + (addVisualOffsets && visualViewport ? visualViewport.offsetLeft : 0)) / scaleX;\n var y = (clientRect.top + (addVisualOffsets && visualViewport ? visualViewport.offsetTop : 0)) / scaleY;\n var width = clientRect.width / scaleX;\n var height = clientRect.height / scaleY;\n return {\n width: width,\n height: height,\n top: y,\n right: x + width,\n bottom: y + height,\n left: x,\n x: x,\n y: y\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\"; // Returns the layout rect of an element relative to its offsetParent. Layout\n// means it doesn't take into account transforms.\n\nexport default function getLayoutRect(element) {\n var clientRect = getBoundingClientRect(element); // Use the clientRect sizes if it's not been transformed.\n // Fixes https://github.com/popperjs/popper-core/issues/1223\n\n var width = element.offsetWidth;\n var height = element.offsetHeight;\n\n if (Math.abs(clientRect.width - width) <= 1) {\n width = clientRect.width;\n }\n\n if (Math.abs(clientRect.height - height) <= 1) {\n height = clientRect.height;\n }\n\n return {\n x: element.offsetLeft,\n y: element.offsetTop,\n width: width,\n height: height\n };\n}","import { isShadowRoot } from \"./instanceOf.js\";\nexport default function contains(parent, child) {\n var rootNode = child.getRootNode && child.getRootNode(); // First, attempt with faster native method\n\n if (parent.contains(child)) {\n return true;\n } // then fallback to custom implementation with Shadow DOM support\n else if (rootNode && isShadowRoot(rootNode)) {\n var next = child;\n\n do {\n if (next && parent.isSameNode(next)) {\n return true;\n } // $FlowFixMe[prop-missing]: need a better way to handle this...\n\n\n next = next.parentNode || next.host;\n } while (next);\n } // Give up, the result is false\n\n\n return false;\n}","import getWindow from \"./getWindow.js\";\nexport default function getComputedStyle(element) {\n return getWindow(element).getComputedStyle(element);\n}","import getNodeName from \"./getNodeName.js\";\nexport default function isTableElement(element) {\n return ['table', 'td', 'th'].indexOf(getNodeName(element)) >= 0;\n}","import { isElement } from \"./instanceOf.js\";\nexport default function getDocumentElement(element) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return ((isElement(element) ? element.ownerDocument : // $FlowFixMe[prop-missing]\n element.document) || window.document).documentElement;\n}","import getNodeName from \"./getNodeName.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport { isShadowRoot } from \"./instanceOf.js\";\nexport default function getParentNode(element) {\n if (getNodeName(element) === 'html') {\n return element;\n }\n\n return (// this is a quicker (but less type safe) way to save quite some bytes from the bundle\n // $FlowFixMe[incompatible-return]\n // $FlowFixMe[prop-missing]\n element.assignedSlot || // step into the shadow DOM of the parent of a slotted node\n element.parentNode || ( // DOM Element detected\n isShadowRoot(element) ? element.host : null) || // ShadowRoot detected\n // $FlowFixMe[incompatible-call]: HTMLElement is a Node\n getDocumentElement(element) // fallback\n\n );\n}","import getWindow from \"./getWindow.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isHTMLElement, isShadowRoot } from \"./instanceOf.js\";\nimport isTableElement from \"./isTableElement.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getUAString from \"../utils/userAgent.js\";\n\nfunction getTrueOffsetParent(element) {\n if (!isHTMLElement(element) || // https://github.com/popperjs/popper-core/issues/837\n getComputedStyle(element).position === 'fixed') {\n return null;\n }\n\n return element.offsetParent;\n} // `.offsetParent` reports `null` for fixed elements, while absolute elements\n// return the containing block\n\n\nfunction getContainingBlock(element) {\n var isFirefox = /firefox/i.test(getUAString());\n var isIE = /Trident/i.test(getUAString());\n\n if (isIE && isHTMLElement(element)) {\n // In IE 9, 10 and 11 fixed elements containing block is always established by the viewport\n var elementCss = getComputedStyle(element);\n\n if (elementCss.position === 'fixed') {\n return null;\n }\n }\n\n var currentNode = getParentNode(element);\n\n if (isShadowRoot(currentNode)) {\n currentNode = currentNode.host;\n }\n\n while (isHTMLElement(currentNode) && ['html', 'body'].indexOf(getNodeName(currentNode)) < 0) {\n var css = getComputedStyle(currentNode); // This is non-exhaustive but covers the most common CSS properties that\n // create a containing block.\n // https://developer.mozilla.org/en-US/docs/Web/CSS/Containing_block#identifying_the_containing_block\n\n if (css.transform !== 'none' || css.perspective !== 'none' || css.contain === 'paint' || ['transform', 'perspective'].indexOf(css.willChange) !== -1 || isFirefox && css.willChange === 'filter' || isFirefox && css.filter && css.filter !== 'none') {\n return currentNode;\n } else {\n currentNode = currentNode.parentNode;\n }\n }\n\n return null;\n} // Gets the closest ancestor positioned element. Handles some edge cases,\n// such as table ancestors and cross browser bugs.\n\n\nexport default function getOffsetParent(element) {\n var window = getWindow(element);\n var offsetParent = getTrueOffsetParent(element);\n\n while (offsetParent && isTableElement(offsetParent) && getComputedStyle(offsetParent).position === 'static') {\n offsetParent = getTrueOffsetParent(offsetParent);\n }\n\n if (offsetParent && (getNodeName(offsetParent) === 'html' || getNodeName(offsetParent) === 'body' && getComputedStyle(offsetParent).position === 'static')) {\n return window;\n }\n\n return offsetParent || getContainingBlock(element) || window;\n}","export default function getMainAxisFromPlacement(placement) {\n return ['top', 'bottom'].indexOf(placement) >= 0 ? 'x' : 'y';\n}","import { max as mathMax, min as mathMin } from \"./math.js\";\nexport function within(min, value, max) {\n return mathMax(min, mathMin(value, max));\n}\nexport function withinMaxClamp(min, value, max) {\n var v = within(min, value, max);\n return v > max ? max : v;\n}","import getFreshSideObject from \"./getFreshSideObject.js\";\nexport default function mergePaddingObject(paddingObject) {\n return Object.assign({}, getFreshSideObject(), paddingObject);\n}","export default function getFreshSideObject() {\n return {\n top: 0,\n right: 0,\n bottom: 0,\n left: 0\n };\n}","export default function expandToHashMap(value, keys) {\n return keys.reduce(function (hashMap, key) {\n hashMap[key] = value;\n return hashMap;\n }, {});\n}","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport contains from \"../dom-utils/contains.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport { within } from \"../utils/within.js\";\nimport mergePaddingObject from \"../utils/mergePaddingObject.js\";\nimport expandToHashMap from \"../utils/expandToHashMap.js\";\nimport { left, right, basePlacements, top, bottom } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar toPaddingObject = function toPaddingObject(padding, state) {\n padding = typeof padding === 'function' ? padding(Object.assign({}, state.rects, {\n placement: state.placement\n })) : padding;\n return mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n};\n\nfunction arrow(_ref) {\n var _state$modifiersData$;\n\n var state = _ref.state,\n name = _ref.name,\n options = _ref.options;\n var arrowElement = state.elements.arrow;\n var popperOffsets = state.modifiersData.popperOffsets;\n var basePlacement = getBasePlacement(state.placement);\n var axis = getMainAxisFromPlacement(basePlacement);\n var isVertical = [left, right].indexOf(basePlacement) >= 0;\n var len = isVertical ? 'height' : 'width';\n\n if (!arrowElement || !popperOffsets) {\n return;\n }\n\n var paddingObject = toPaddingObject(options.padding, state);\n var arrowRect = getLayoutRect(arrowElement);\n var minProp = axis === 'y' ? top : left;\n var maxProp = axis === 'y' ? bottom : right;\n var endDiff = state.rects.reference[len] + state.rects.reference[axis] - popperOffsets[axis] - state.rects.popper[len];\n var startDiff = popperOffsets[axis] - state.rects.reference[axis];\n var arrowOffsetParent = getOffsetParent(arrowElement);\n var clientSize = arrowOffsetParent ? axis === 'y' ? arrowOffsetParent.clientHeight || 0 : arrowOffsetParent.clientWidth || 0 : 0;\n var centerToReference = endDiff / 2 - startDiff / 2; // Make sure the arrow doesn't overflow the popper if the center point is\n // outside of the popper bounds\n\n var min = paddingObject[minProp];\n var max = clientSize - arrowRect[len] - paddingObject[maxProp];\n var center = clientSize / 2 - arrowRect[len] / 2 + centerToReference;\n var offset = within(min, center, max); // Prevents breaking syntax highlighting...\n\n var axisProp = axis;\n state.modifiersData[name] = (_state$modifiersData$ = {}, _state$modifiersData$[axisProp] = offset, _state$modifiersData$.centerOffset = offset - center, _state$modifiersData$);\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state,\n options = _ref2.options;\n var _options$element = options.element,\n arrowElement = _options$element === void 0 ? '[data-popper-arrow]' : _options$element;\n\n if (arrowElement == null) {\n return;\n } // CSS selector\n\n\n if (typeof arrowElement === 'string') {\n arrowElement = state.elements.popper.querySelector(arrowElement);\n\n if (!arrowElement) {\n return;\n }\n }\n\n if (!contains(state.elements.popper, arrowElement)) {\n return;\n }\n\n state.elements.arrow = arrowElement;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'arrow',\n enabled: true,\n phase: 'main',\n fn: arrow,\n effect: effect,\n requires: ['popperOffsets'],\n requiresIfExists: ['preventOverflow']\n};","export default function getVariation(placement) {\n return placement.split('-')[1];\n}","import { top, left, right, bottom, end } from \"../enums.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getWindow from \"../dom-utils/getWindow.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getComputedStyle from \"../dom-utils/getComputedStyle.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport { round } from \"../utils/math.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar unsetSides = {\n top: 'auto',\n right: 'auto',\n bottom: 'auto',\n left: 'auto'\n}; // Round the offsets to the nearest suitable subpixel based on the DPR.\n// Zooming can change the DPR, but it seems to report a value that will\n// cleanly divide the values into the appropriate subpixels.\n\nfunction roundOffsetsByDPR(_ref, win) {\n var x = _ref.x,\n y = _ref.y;\n var dpr = win.devicePixelRatio || 1;\n return {\n x: round(x * dpr) / dpr || 0,\n y: round(y * dpr) / dpr || 0\n };\n}\n\nexport function mapToStyles(_ref2) {\n var _Object$assign2;\n\n var popper = _ref2.popper,\n popperRect = _ref2.popperRect,\n placement = _ref2.placement,\n variation = _ref2.variation,\n offsets = _ref2.offsets,\n position = _ref2.position,\n gpuAcceleration = _ref2.gpuAcceleration,\n adaptive = _ref2.adaptive,\n roundOffsets = _ref2.roundOffsets,\n isFixed = _ref2.isFixed;\n var _offsets$x = offsets.x,\n x = _offsets$x === void 0 ? 0 : _offsets$x,\n _offsets$y = offsets.y,\n y = _offsets$y === void 0 ? 0 : _offsets$y;\n\n var _ref3 = typeof roundOffsets === 'function' ? roundOffsets({\n x: x,\n y: y\n }) : {\n x: x,\n y: y\n };\n\n x = _ref3.x;\n y = _ref3.y;\n var hasX = offsets.hasOwnProperty('x');\n var hasY = offsets.hasOwnProperty('y');\n var sideX = left;\n var sideY = top;\n var win = window;\n\n if (adaptive) {\n var offsetParent = getOffsetParent(popper);\n var heightProp = 'clientHeight';\n var widthProp = 'clientWidth';\n\n if (offsetParent === getWindow(popper)) {\n offsetParent = getDocumentElement(popper);\n\n if (getComputedStyle(offsetParent).position !== 'static' && position === 'absolute') {\n heightProp = 'scrollHeight';\n widthProp = 'scrollWidth';\n }\n } // $FlowFixMe[incompatible-cast]: force type refinement, we compare offsetParent with window above, but Flow doesn't detect it\n\n\n offsetParent = offsetParent;\n\n if (placement === top || (placement === left || placement === right) && variation === end) {\n sideY = bottom;\n var offsetY = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.height : // $FlowFixMe[prop-missing]\n offsetParent[heightProp];\n y -= offsetY - popperRect.height;\n y *= gpuAcceleration ? 1 : -1;\n }\n\n if (placement === left || (placement === top || placement === bottom) && variation === end) {\n sideX = right;\n var offsetX = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.width : // $FlowFixMe[prop-missing]\n offsetParent[widthProp];\n x -= offsetX - popperRect.width;\n x *= gpuAcceleration ? 1 : -1;\n }\n }\n\n var commonStyles = Object.assign({\n position: position\n }, adaptive && unsetSides);\n\n var _ref4 = roundOffsets === true ? roundOffsetsByDPR({\n x: x,\n y: y\n }, getWindow(popper)) : {\n x: x,\n y: y\n };\n\n x = _ref4.x;\n y = _ref4.y;\n\n if (gpuAcceleration) {\n var _Object$assign;\n\n return Object.assign({}, commonStyles, (_Object$assign = {}, _Object$assign[sideY] = hasY ? '0' : '', _Object$assign[sideX] = hasX ? '0' : '', _Object$assign.transform = (win.devicePixelRatio || 1) <= 1 ? \"translate(\" + x + \"px, \" + y + \"px)\" : \"translate3d(\" + x + \"px, \" + y + \"px, 0)\", _Object$assign));\n }\n\n return Object.assign({}, commonStyles, (_Object$assign2 = {}, _Object$assign2[sideY] = hasY ? y + \"px\" : '', _Object$assign2[sideX] = hasX ? x + \"px\" : '', _Object$assign2.transform = '', _Object$assign2));\n}\n\nfunction computeStyles(_ref5) {\n var state = _ref5.state,\n options = _ref5.options;\n var _options$gpuAccelerat = options.gpuAcceleration,\n gpuAcceleration = _options$gpuAccelerat === void 0 ? true : _options$gpuAccelerat,\n _options$adaptive = options.adaptive,\n adaptive = _options$adaptive === void 0 ? true : _options$adaptive,\n _options$roundOffsets = options.roundOffsets,\n roundOffsets = _options$roundOffsets === void 0 ? true : _options$roundOffsets;\n var commonStyles = {\n placement: getBasePlacement(state.placement),\n variation: getVariation(state.placement),\n popper: state.elements.popper,\n popperRect: state.rects.popper,\n gpuAcceleration: gpuAcceleration,\n isFixed: state.options.strategy === 'fixed'\n };\n\n if (state.modifiersData.popperOffsets != null) {\n state.styles.popper = Object.assign({}, state.styles.popper, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.popperOffsets,\n position: state.options.strategy,\n adaptive: adaptive,\n roundOffsets: roundOffsets\n })));\n }\n\n if (state.modifiersData.arrow != null) {\n state.styles.arrow = Object.assign({}, state.styles.arrow, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.arrow,\n position: 'absolute',\n adaptive: false,\n roundOffsets: roundOffsets\n })));\n }\n\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-placement': state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'computeStyles',\n enabled: true,\n phase: 'beforeWrite',\n fn: computeStyles,\n data: {}\n};","import getWindow from \"../dom-utils/getWindow.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar passive = {\n passive: true\n};\n\nfunction effect(_ref) {\n var state = _ref.state,\n instance = _ref.instance,\n options = _ref.options;\n var _options$scroll = options.scroll,\n scroll = _options$scroll === void 0 ? true : _options$scroll,\n _options$resize = options.resize,\n resize = _options$resize === void 0 ? true : _options$resize;\n var window = getWindow(state.elements.popper);\n var scrollParents = [].concat(state.scrollParents.reference, state.scrollParents.popper);\n\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.addEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.addEventListener('resize', instance.update, passive);\n }\n\n return function () {\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.removeEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.removeEventListener('resize', instance.update, passive);\n }\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'eventListeners',\n enabled: true,\n phase: 'write',\n fn: function fn() {},\n effect: effect,\n data: {}\n};","var hash = {\n left: 'right',\n right: 'left',\n bottom: 'top',\n top: 'bottom'\n};\nexport default function getOppositePlacement(placement) {\n return placement.replace(/left|right|bottom|top/g, function (matched) {\n return hash[matched];\n });\n}","var hash = {\n start: 'end',\n end: 'start'\n};\nexport default function getOppositeVariationPlacement(placement) {\n return placement.replace(/start|end/g, function (matched) {\n return hash[matched];\n });\n}","import getWindow from \"./getWindow.js\";\nexport default function getWindowScroll(node) {\n var win = getWindow(node);\n var scrollLeft = win.pageXOffset;\n var scrollTop = win.pageYOffset;\n return {\n scrollLeft: scrollLeft,\n scrollTop: scrollTop\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nexport default function getWindowScrollBarX(element) {\n // If has a CSS width greater than the viewport, then this will be\n // incorrect for RTL.\n // Popper 1 is broken in this case and never had a bug report so let's assume\n // it's not an issue. I don't think anyone ever specifies width on \n // anyway.\n // Browsers where the left scrollbar doesn't cause an issue report `0` for\n // this (e.g. Edge 2019, IE11, Safari)\n return getBoundingClientRect(getDocumentElement(element)).left + getWindowScroll(element).scrollLeft;\n}","import getComputedStyle from \"./getComputedStyle.js\";\nexport default function isScrollParent(element) {\n // Firefox wants us to check `-x` and `-y` variations as well\n var _getComputedStyle = getComputedStyle(element),\n overflow = _getComputedStyle.overflow,\n overflowX = _getComputedStyle.overflowX,\n overflowY = _getComputedStyle.overflowY;\n\n return /auto|scroll|overlay|hidden/.test(overflow + overflowY + overflowX);\n}","import getParentNode from \"./getParentNode.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nexport default function getScrollParent(node) {\n if (['html', 'body', '#document'].indexOf(getNodeName(node)) >= 0) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return node.ownerDocument.body;\n }\n\n if (isHTMLElement(node) && isScrollParent(node)) {\n return node;\n }\n\n return getScrollParent(getParentNode(node));\n}","import getScrollParent from \"./getScrollParent.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getWindow from \"./getWindow.js\";\nimport isScrollParent from \"./isScrollParent.js\";\n/*\ngiven a DOM element, return the list of all scroll parents, up the list of ancesors\nuntil we get to the top window object. This list is what we attach scroll listeners\nto, because if any of these parent elements scroll, we'll need to re-calculate the\nreference element's position.\n*/\n\nexport default function listScrollParents(element, list) {\n var _element$ownerDocumen;\n\n if (list === void 0) {\n list = [];\n }\n\n var scrollParent = getScrollParent(element);\n var isBody = scrollParent === ((_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body);\n var win = getWindow(scrollParent);\n var target = isBody ? [win].concat(win.visualViewport || [], isScrollParent(scrollParent) ? scrollParent : []) : scrollParent;\n var updatedList = list.concat(target);\n return isBody ? updatedList : // $FlowFixMe[incompatible-call]: isBody tells us target will be an HTMLElement here\n updatedList.concat(listScrollParents(getParentNode(target)));\n}","export default function rectToClientRect(rect) {\n return Object.assign({}, rect, {\n left: rect.x,\n top: rect.y,\n right: rect.x + rect.width,\n bottom: rect.y + rect.height\n });\n}","import { viewport } from \"../enums.js\";\nimport getViewportRect from \"./getViewportRect.js\";\nimport getDocumentRect from \"./getDocumentRect.js\";\nimport listScrollParents from \"./listScrollParents.js\";\nimport getOffsetParent from \"./getOffsetParent.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport contains from \"./contains.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport rectToClientRect from \"../utils/rectToClientRect.js\";\nimport { max, min } from \"../utils/math.js\";\n\nfunction getInnerBoundingClientRect(element, strategy) {\n var rect = getBoundingClientRect(element, false, strategy === 'fixed');\n rect.top = rect.top + element.clientTop;\n rect.left = rect.left + element.clientLeft;\n rect.bottom = rect.top + element.clientHeight;\n rect.right = rect.left + element.clientWidth;\n rect.width = element.clientWidth;\n rect.height = element.clientHeight;\n rect.x = rect.left;\n rect.y = rect.top;\n return rect;\n}\n\nfunction getClientRectFromMixedType(element, clippingParent, strategy) {\n return clippingParent === viewport ? rectToClientRect(getViewportRect(element, strategy)) : isElement(clippingParent) ? getInnerBoundingClientRect(clippingParent, strategy) : rectToClientRect(getDocumentRect(getDocumentElement(element)));\n} // A \"clipping parent\" is an overflowable container with the characteristic of\n// clipping (or hiding) overflowing elements with a position different from\n// `initial`\n\n\nfunction getClippingParents(element) {\n var clippingParents = listScrollParents(getParentNode(element));\n var canEscapeClipping = ['absolute', 'fixed'].indexOf(getComputedStyle(element).position) >= 0;\n var clipperElement = canEscapeClipping && isHTMLElement(element) ? getOffsetParent(element) : element;\n\n if (!isElement(clipperElement)) {\n return [];\n } // $FlowFixMe[incompatible-return]: https://github.com/facebook/flow/issues/1414\n\n\n return clippingParents.filter(function (clippingParent) {\n return isElement(clippingParent) && contains(clippingParent, clipperElement) && getNodeName(clippingParent) !== 'body';\n });\n} // Gets the maximum area that the element is visible in due to any number of\n// clipping parents\n\n\nexport default function getClippingRect(element, boundary, rootBoundary, strategy) {\n var mainClippingParents = boundary === 'clippingParents' ? getClippingParents(element) : [].concat(boundary);\n var clippingParents = [].concat(mainClippingParents, [rootBoundary]);\n var firstClippingParent = clippingParents[0];\n var clippingRect = clippingParents.reduce(function (accRect, clippingParent) {\n var rect = getClientRectFromMixedType(element, clippingParent, strategy);\n accRect.top = max(rect.top, accRect.top);\n accRect.right = min(rect.right, accRect.right);\n accRect.bottom = min(rect.bottom, accRect.bottom);\n accRect.left = max(rect.left, accRect.left);\n return accRect;\n }, getClientRectFromMixedType(element, firstClippingParent, strategy));\n clippingRect.width = clippingRect.right - clippingRect.left;\n clippingRect.height = clippingRect.bottom - clippingRect.top;\n clippingRect.x = clippingRect.left;\n clippingRect.y = clippingRect.top;\n return clippingRect;\n}","import getWindow from \"./getWindow.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getViewportRect(element, strategy) {\n var win = getWindow(element);\n var html = getDocumentElement(element);\n var visualViewport = win.visualViewport;\n var width = html.clientWidth;\n var height = html.clientHeight;\n var x = 0;\n var y = 0;\n\n if (visualViewport) {\n width = visualViewport.width;\n height = visualViewport.height;\n var layoutViewport = isLayoutViewport();\n\n if (layoutViewport || !layoutViewport && strategy === 'fixed') {\n x = visualViewport.offsetLeft;\n y = visualViewport.offsetTop;\n }\n }\n\n return {\n width: width,\n height: height,\n x: x + getWindowScrollBarX(element),\n y: y\n };\n}","import getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nimport { max } from \"../utils/math.js\"; // Gets the entire size of the scrollable document area, even extending outside\n// of the `` and `` rect bounds if horizontally scrollable\n\nexport default function getDocumentRect(element) {\n var _element$ownerDocumen;\n\n var html = getDocumentElement(element);\n var winScroll = getWindowScroll(element);\n var body = (_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body;\n var width = max(html.scrollWidth, html.clientWidth, body ? body.scrollWidth : 0, body ? body.clientWidth : 0);\n var height = max(html.scrollHeight, html.clientHeight, body ? body.scrollHeight : 0, body ? body.clientHeight : 0);\n var x = -winScroll.scrollLeft + getWindowScrollBarX(element);\n var y = -winScroll.scrollTop;\n\n if (getComputedStyle(body || html).direction === 'rtl') {\n x += max(html.clientWidth, body ? body.clientWidth : 0) - width;\n }\n\n return {\n width: width,\n height: height,\n x: x,\n y: y\n };\n}","import getBasePlacement from \"./getBasePlacement.js\";\nimport getVariation from \"./getVariation.js\";\nimport getMainAxisFromPlacement from \"./getMainAxisFromPlacement.js\";\nimport { top, right, bottom, left, start, end } from \"../enums.js\";\nexport default function computeOffsets(_ref) {\n var reference = _ref.reference,\n element = _ref.element,\n placement = _ref.placement;\n var basePlacement = placement ? getBasePlacement(placement) : null;\n var variation = placement ? getVariation(placement) : null;\n var commonX = reference.x + reference.width / 2 - element.width / 2;\n var commonY = reference.y + reference.height / 2 - element.height / 2;\n var offsets;\n\n switch (basePlacement) {\n case top:\n offsets = {\n x: commonX,\n y: reference.y - element.height\n };\n break;\n\n case bottom:\n offsets = {\n x: commonX,\n y: reference.y + reference.height\n };\n break;\n\n case right:\n offsets = {\n x: reference.x + reference.width,\n y: commonY\n };\n break;\n\n case left:\n offsets = {\n x: reference.x - element.width,\n y: commonY\n };\n break;\n\n default:\n offsets = {\n x: reference.x,\n y: reference.y\n };\n }\n\n var mainAxis = basePlacement ? getMainAxisFromPlacement(basePlacement) : null;\n\n if (mainAxis != null) {\n var len = mainAxis === 'y' ? 'height' : 'width';\n\n switch (variation) {\n case start:\n offsets[mainAxis] = offsets[mainAxis] - (reference[len] / 2 - element[len] / 2);\n break;\n\n case end:\n offsets[mainAxis] = offsets[mainAxis] + (reference[len] / 2 - element[len] / 2);\n break;\n\n default:\n }\n }\n\n return offsets;\n}","import getClippingRect from \"../dom-utils/getClippingRect.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getBoundingClientRect from \"../dom-utils/getBoundingClientRect.js\";\nimport computeOffsets from \"./computeOffsets.js\";\nimport rectToClientRect from \"./rectToClientRect.js\";\nimport { clippingParents, reference, popper, bottom, top, right, basePlacements, viewport } from \"../enums.js\";\nimport { isElement } from \"../dom-utils/instanceOf.js\";\nimport mergePaddingObject from \"./mergePaddingObject.js\";\nimport expandToHashMap from \"./expandToHashMap.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport default function detectOverflow(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n _options$placement = _options.placement,\n placement = _options$placement === void 0 ? state.placement : _options$placement,\n _options$strategy = _options.strategy,\n strategy = _options$strategy === void 0 ? state.strategy : _options$strategy,\n _options$boundary = _options.boundary,\n boundary = _options$boundary === void 0 ? clippingParents : _options$boundary,\n _options$rootBoundary = _options.rootBoundary,\n rootBoundary = _options$rootBoundary === void 0 ? viewport : _options$rootBoundary,\n _options$elementConte = _options.elementContext,\n elementContext = _options$elementConte === void 0 ? popper : _options$elementConte,\n _options$altBoundary = _options.altBoundary,\n altBoundary = _options$altBoundary === void 0 ? false : _options$altBoundary,\n _options$padding = _options.padding,\n padding = _options$padding === void 0 ? 0 : _options$padding;\n var paddingObject = mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n var altContext = elementContext === popper ? reference : popper;\n var popperRect = state.rects.popper;\n var element = state.elements[altBoundary ? altContext : elementContext];\n var clippingClientRect = getClippingRect(isElement(element) ? element : element.contextElement || getDocumentElement(state.elements.popper), boundary, rootBoundary, strategy);\n var referenceClientRect = getBoundingClientRect(state.elements.reference);\n var popperOffsets = computeOffsets({\n reference: referenceClientRect,\n element: popperRect,\n strategy: 'absolute',\n placement: placement\n });\n var popperClientRect = rectToClientRect(Object.assign({}, popperRect, popperOffsets));\n var elementClientRect = elementContext === popper ? popperClientRect : referenceClientRect; // positive = overflowing the clipping rect\n // 0 or negative = within the clipping rect\n\n var overflowOffsets = {\n top: clippingClientRect.top - elementClientRect.top + paddingObject.top,\n bottom: elementClientRect.bottom - clippingClientRect.bottom + paddingObject.bottom,\n left: clippingClientRect.left - elementClientRect.left + paddingObject.left,\n right: elementClientRect.right - clippingClientRect.right + paddingObject.right\n };\n var offsetData = state.modifiersData.offset; // Offsets can be applied only to the popper element\n\n if (elementContext === popper && offsetData) {\n var offset = offsetData[placement];\n Object.keys(overflowOffsets).forEach(function (key) {\n var multiply = [right, bottom].indexOf(key) >= 0 ? 1 : -1;\n var axis = [top, bottom].indexOf(key) >= 0 ? 'y' : 'x';\n overflowOffsets[key] += offset[axis] * multiply;\n });\n }\n\n return overflowOffsets;\n}","import getOppositePlacement from \"../utils/getOppositePlacement.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getOppositeVariationPlacement from \"../utils/getOppositeVariationPlacement.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport computeAutoPlacement from \"../utils/computeAutoPlacement.js\";\nimport { bottom, top, start, right, left, auto } from \"../enums.js\";\nimport getVariation from \"../utils/getVariation.js\"; // eslint-disable-next-line import/no-unused-modules\n\nfunction getExpandedFallbackPlacements(placement) {\n if (getBasePlacement(placement) === auto) {\n return [];\n }\n\n var oppositePlacement = getOppositePlacement(placement);\n return [getOppositeVariationPlacement(placement), oppositePlacement, getOppositeVariationPlacement(oppositePlacement)];\n}\n\nfunction flip(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n\n if (state.modifiersData[name]._skip) {\n return;\n }\n\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? true : _options$altAxis,\n specifiedFallbackPlacements = options.fallbackPlacements,\n padding = options.padding,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n _options$flipVariatio = options.flipVariations,\n flipVariations = _options$flipVariatio === void 0 ? true : _options$flipVariatio,\n allowedAutoPlacements = options.allowedAutoPlacements;\n var preferredPlacement = state.options.placement;\n var basePlacement = getBasePlacement(preferredPlacement);\n var isBasePlacement = basePlacement === preferredPlacement;\n var fallbackPlacements = specifiedFallbackPlacements || (isBasePlacement || !flipVariations ? [getOppositePlacement(preferredPlacement)] : getExpandedFallbackPlacements(preferredPlacement));\n var placements = [preferredPlacement].concat(fallbackPlacements).reduce(function (acc, placement) {\n return acc.concat(getBasePlacement(placement) === auto ? computeAutoPlacement(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n flipVariations: flipVariations,\n allowedAutoPlacements: allowedAutoPlacements\n }) : placement);\n }, []);\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var checksMap = new Map();\n var makeFallbackChecks = true;\n var firstFittingPlacement = placements[0];\n\n for (var i = 0; i < placements.length; i++) {\n var placement = placements[i];\n\n var _basePlacement = getBasePlacement(placement);\n\n var isStartVariation = getVariation(placement) === start;\n var isVertical = [top, bottom].indexOf(_basePlacement) >= 0;\n var len = isVertical ? 'width' : 'height';\n var overflow = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n altBoundary: altBoundary,\n padding: padding\n });\n var mainVariationSide = isVertical ? isStartVariation ? right : left : isStartVariation ? bottom : top;\n\n if (referenceRect[len] > popperRect[len]) {\n mainVariationSide = getOppositePlacement(mainVariationSide);\n }\n\n var altVariationSide = getOppositePlacement(mainVariationSide);\n var checks = [];\n\n if (checkMainAxis) {\n checks.push(overflow[_basePlacement] <= 0);\n }\n\n if (checkAltAxis) {\n checks.push(overflow[mainVariationSide] <= 0, overflow[altVariationSide] <= 0);\n }\n\n if (checks.every(function (check) {\n return check;\n })) {\n firstFittingPlacement = placement;\n makeFallbackChecks = false;\n break;\n }\n\n checksMap.set(placement, checks);\n }\n\n if (makeFallbackChecks) {\n // `2` may be desired in some cases – research later\n var numberOfChecks = flipVariations ? 3 : 1;\n\n var _loop = function _loop(_i) {\n var fittingPlacement = placements.find(function (placement) {\n var checks = checksMap.get(placement);\n\n if (checks) {\n return checks.slice(0, _i).every(function (check) {\n return check;\n });\n }\n });\n\n if (fittingPlacement) {\n firstFittingPlacement = fittingPlacement;\n return \"break\";\n }\n };\n\n for (var _i = numberOfChecks; _i > 0; _i--) {\n var _ret = _loop(_i);\n\n if (_ret === \"break\") break;\n }\n }\n\n if (state.placement !== firstFittingPlacement) {\n state.modifiersData[name]._skip = true;\n state.placement = firstFittingPlacement;\n state.reset = true;\n }\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'flip',\n enabled: true,\n phase: 'main',\n fn: flip,\n requiresIfExists: ['offset'],\n data: {\n _skip: false\n }\n};","import getVariation from \"./getVariation.js\";\nimport { variationPlacements, basePlacements, placements as allPlacements } from \"../enums.js\";\nimport detectOverflow from \"./detectOverflow.js\";\nimport getBasePlacement from \"./getBasePlacement.js\";\nexport default function computeAutoPlacement(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n placement = _options.placement,\n boundary = _options.boundary,\n rootBoundary = _options.rootBoundary,\n padding = _options.padding,\n flipVariations = _options.flipVariations,\n _options$allowedAutoP = _options.allowedAutoPlacements,\n allowedAutoPlacements = _options$allowedAutoP === void 0 ? allPlacements : _options$allowedAutoP;\n var variation = getVariation(placement);\n var placements = variation ? flipVariations ? variationPlacements : variationPlacements.filter(function (placement) {\n return getVariation(placement) === variation;\n }) : basePlacements;\n var allowedPlacements = placements.filter(function (placement) {\n return allowedAutoPlacements.indexOf(placement) >= 0;\n });\n\n if (allowedPlacements.length === 0) {\n allowedPlacements = placements;\n } // $FlowFixMe[incompatible-type]: Flow seems to have problems with two array unions...\n\n\n var overflows = allowedPlacements.reduce(function (acc, placement) {\n acc[placement] = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding\n })[getBasePlacement(placement)];\n return acc;\n }, {});\n return Object.keys(overflows).sort(function (a, b) {\n return overflows[a] - overflows[b];\n });\n}","import { top, bottom, left, right } from \"../enums.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\n\nfunction getSideOffsets(overflow, rect, preventedOffsets) {\n if (preventedOffsets === void 0) {\n preventedOffsets = {\n x: 0,\n y: 0\n };\n }\n\n return {\n top: overflow.top - rect.height - preventedOffsets.y,\n right: overflow.right - rect.width + preventedOffsets.x,\n bottom: overflow.bottom - rect.height + preventedOffsets.y,\n left: overflow.left - rect.width - preventedOffsets.x\n };\n}\n\nfunction isAnySideFullyClipped(overflow) {\n return [top, right, bottom, left].some(function (side) {\n return overflow[side] >= 0;\n });\n}\n\nfunction hide(_ref) {\n var state = _ref.state,\n name = _ref.name;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var preventedOffsets = state.modifiersData.preventOverflow;\n var referenceOverflow = detectOverflow(state, {\n elementContext: 'reference'\n });\n var popperAltOverflow = detectOverflow(state, {\n altBoundary: true\n });\n var referenceClippingOffsets = getSideOffsets(referenceOverflow, referenceRect);\n var popperEscapeOffsets = getSideOffsets(popperAltOverflow, popperRect, preventedOffsets);\n var isReferenceHidden = isAnySideFullyClipped(referenceClippingOffsets);\n var hasPopperEscaped = isAnySideFullyClipped(popperEscapeOffsets);\n state.modifiersData[name] = {\n referenceClippingOffsets: referenceClippingOffsets,\n popperEscapeOffsets: popperEscapeOffsets,\n isReferenceHidden: isReferenceHidden,\n hasPopperEscaped: hasPopperEscaped\n };\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-reference-hidden': isReferenceHidden,\n 'data-popper-escaped': hasPopperEscaped\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'hide',\n enabled: true,\n phase: 'main',\n requiresIfExists: ['preventOverflow'],\n fn: hide\n};","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport { top, left, right, placements } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport function distanceAndSkiddingToXY(placement, rects, offset) {\n var basePlacement = getBasePlacement(placement);\n var invertDistance = [left, top].indexOf(basePlacement) >= 0 ? -1 : 1;\n\n var _ref = typeof offset === 'function' ? offset(Object.assign({}, rects, {\n placement: placement\n })) : offset,\n skidding = _ref[0],\n distance = _ref[1];\n\n skidding = skidding || 0;\n distance = (distance || 0) * invertDistance;\n return [left, right].indexOf(basePlacement) >= 0 ? {\n x: distance,\n y: skidding\n } : {\n x: skidding,\n y: distance\n };\n}\n\nfunction offset(_ref2) {\n var state = _ref2.state,\n options = _ref2.options,\n name = _ref2.name;\n var _options$offset = options.offset,\n offset = _options$offset === void 0 ? [0, 0] : _options$offset;\n var data = placements.reduce(function (acc, placement) {\n acc[placement] = distanceAndSkiddingToXY(placement, state.rects, offset);\n return acc;\n }, {});\n var _data$state$placement = data[state.placement],\n x = _data$state$placement.x,\n y = _data$state$placement.y;\n\n if (state.modifiersData.popperOffsets != null) {\n state.modifiersData.popperOffsets.x += x;\n state.modifiersData.popperOffsets.y += y;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'offset',\n enabled: true,\n phase: 'main',\n requires: ['popperOffsets'],\n fn: offset\n};","import computeOffsets from \"../utils/computeOffsets.js\";\n\nfunction popperOffsets(_ref) {\n var state = _ref.state,\n name = _ref.name;\n // Offsets are the actual position the popper needs to have to be\n // properly positioned near its reference element\n // This is the most basic placement, and will be adjusted by\n // the modifiers in the next step\n state.modifiersData[name] = computeOffsets({\n reference: state.rects.reference,\n element: state.rects.popper,\n strategy: 'absolute',\n placement: state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'popperOffsets',\n enabled: true,\n phase: 'read',\n fn: popperOffsets,\n data: {}\n};","import { top, left, right, bottom, start } from \"../enums.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport getAltAxis from \"../utils/getAltAxis.js\";\nimport { within, withinMaxClamp } from \"../utils/within.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport getFreshSideObject from \"../utils/getFreshSideObject.js\";\nimport { min as mathMin, max as mathMax } from \"../utils/math.js\";\n\nfunction preventOverflow(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? false : _options$altAxis,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n padding = options.padding,\n _options$tether = options.tether,\n tether = _options$tether === void 0 ? true : _options$tether,\n _options$tetherOffset = options.tetherOffset,\n tetherOffset = _options$tetherOffset === void 0 ? 0 : _options$tetherOffset;\n var overflow = detectOverflow(state, {\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n altBoundary: altBoundary\n });\n var basePlacement = getBasePlacement(state.placement);\n var variation = getVariation(state.placement);\n var isBasePlacement = !variation;\n var mainAxis = getMainAxisFromPlacement(basePlacement);\n var altAxis = getAltAxis(mainAxis);\n var popperOffsets = state.modifiersData.popperOffsets;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var tetherOffsetValue = typeof tetherOffset === 'function' ? tetherOffset(Object.assign({}, state.rects, {\n placement: state.placement\n })) : tetherOffset;\n var normalizedTetherOffsetValue = typeof tetherOffsetValue === 'number' ? {\n mainAxis: tetherOffsetValue,\n altAxis: tetherOffsetValue\n } : Object.assign({\n mainAxis: 0,\n altAxis: 0\n }, tetherOffsetValue);\n var offsetModifierState = state.modifiersData.offset ? state.modifiersData.offset[state.placement] : null;\n var data = {\n x: 0,\n y: 0\n };\n\n if (!popperOffsets) {\n return;\n }\n\n if (checkMainAxis) {\n var _offsetModifierState$;\n\n var mainSide = mainAxis === 'y' ? top : left;\n var altSide = mainAxis === 'y' ? bottom : right;\n var len = mainAxis === 'y' ? 'height' : 'width';\n var offset = popperOffsets[mainAxis];\n var min = offset + overflow[mainSide];\n var max = offset - overflow[altSide];\n var additive = tether ? -popperRect[len] / 2 : 0;\n var minLen = variation === start ? referenceRect[len] : popperRect[len];\n var maxLen = variation === start ? -popperRect[len] : -referenceRect[len]; // We need to include the arrow in the calculation so the arrow doesn't go\n // outside the reference bounds\n\n var arrowElement = state.elements.arrow;\n var arrowRect = tether && arrowElement ? getLayoutRect(arrowElement) : {\n width: 0,\n height: 0\n };\n var arrowPaddingObject = state.modifiersData['arrow#persistent'] ? state.modifiersData['arrow#persistent'].padding : getFreshSideObject();\n var arrowPaddingMin = arrowPaddingObject[mainSide];\n var arrowPaddingMax = arrowPaddingObject[altSide]; // If the reference length is smaller than the arrow length, we don't want\n // to include its full size in the calculation. If the reference is small\n // and near the edge of a boundary, the popper can overflow even if the\n // reference is not overflowing as well (e.g. virtual elements with no\n // width or height)\n\n var arrowLen = within(0, referenceRect[len], arrowRect[len]);\n var minOffset = isBasePlacement ? referenceRect[len] / 2 - additive - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis : minLen - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis;\n var maxOffset = isBasePlacement ? -referenceRect[len] / 2 + additive + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis : maxLen + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis;\n var arrowOffsetParent = state.elements.arrow && getOffsetParent(state.elements.arrow);\n var clientOffset = arrowOffsetParent ? mainAxis === 'y' ? arrowOffsetParent.clientTop || 0 : arrowOffsetParent.clientLeft || 0 : 0;\n var offsetModifierValue = (_offsetModifierState$ = offsetModifierState == null ? void 0 : offsetModifierState[mainAxis]) != null ? _offsetModifierState$ : 0;\n var tetherMin = offset + minOffset - offsetModifierValue - clientOffset;\n var tetherMax = offset + maxOffset - offsetModifierValue;\n var preventedOffset = within(tether ? mathMin(min, tetherMin) : min, offset, tether ? mathMax(max, tetherMax) : max);\n popperOffsets[mainAxis] = preventedOffset;\n data[mainAxis] = preventedOffset - offset;\n }\n\n if (checkAltAxis) {\n var _offsetModifierState$2;\n\n var _mainSide = mainAxis === 'x' ? top : left;\n\n var _altSide = mainAxis === 'x' ? bottom : right;\n\n var _offset = popperOffsets[altAxis];\n\n var _len = altAxis === 'y' ? 'height' : 'width';\n\n var _min = _offset + overflow[_mainSide];\n\n var _max = _offset - overflow[_altSide];\n\n var isOriginSide = [top, left].indexOf(basePlacement) !== -1;\n\n var _offsetModifierValue = (_offsetModifierState$2 = offsetModifierState == null ? void 0 : offsetModifierState[altAxis]) != null ? _offsetModifierState$2 : 0;\n\n var _tetherMin = isOriginSide ? _min : _offset - referenceRect[_len] - popperRect[_len] - _offsetModifierValue + normalizedTetherOffsetValue.altAxis;\n\n var _tetherMax = isOriginSide ? _offset + referenceRect[_len] + popperRect[_len] - _offsetModifierValue - normalizedTetherOffsetValue.altAxis : _max;\n\n var _preventedOffset = tether && isOriginSide ? withinMaxClamp(_tetherMin, _offset, _tetherMax) : within(tether ? _tetherMin : _min, _offset, tether ? _tetherMax : _max);\n\n popperOffsets[altAxis] = _preventedOffset;\n data[altAxis] = _preventedOffset - _offset;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'preventOverflow',\n enabled: true,\n phase: 'main',\n fn: preventOverflow,\n requiresIfExists: ['offset']\n};","export default function getAltAxis(axis) {\n return axis === 'x' ? 'y' : 'x';\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getNodeScroll from \"./getNodeScroll.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport { round } from \"../utils/math.js\";\n\nfunction isElementScaled(element) {\n var rect = element.getBoundingClientRect();\n var scaleX = round(rect.width) / element.offsetWidth || 1;\n var scaleY = round(rect.height) / element.offsetHeight || 1;\n return scaleX !== 1 || scaleY !== 1;\n} // Returns the composite rect of an element relative to its offsetParent.\n// Composite means it takes into account transforms as well as layout.\n\n\nexport default function getCompositeRect(elementOrVirtualElement, offsetParent, isFixed) {\n if (isFixed === void 0) {\n isFixed = false;\n }\n\n var isOffsetParentAnElement = isHTMLElement(offsetParent);\n var offsetParentIsScaled = isHTMLElement(offsetParent) && isElementScaled(offsetParent);\n var documentElement = getDocumentElement(offsetParent);\n var rect = getBoundingClientRect(elementOrVirtualElement, offsetParentIsScaled, isFixed);\n var scroll = {\n scrollLeft: 0,\n scrollTop: 0\n };\n var offsets = {\n x: 0,\n y: 0\n };\n\n if (isOffsetParentAnElement || !isOffsetParentAnElement && !isFixed) {\n if (getNodeName(offsetParent) !== 'body' || // https://github.com/popperjs/popper-core/issues/1078\n isScrollParent(documentElement)) {\n scroll = getNodeScroll(offsetParent);\n }\n\n if (isHTMLElement(offsetParent)) {\n offsets = getBoundingClientRect(offsetParent, true);\n offsets.x += offsetParent.clientLeft;\n offsets.y += offsetParent.clientTop;\n } else if (documentElement) {\n offsets.x = getWindowScrollBarX(documentElement);\n }\n }\n\n return {\n x: rect.left + scroll.scrollLeft - offsets.x,\n y: rect.top + scroll.scrollTop - offsets.y,\n width: rect.width,\n height: rect.height\n };\n}","import getWindowScroll from \"./getWindowScroll.js\";\nimport getWindow from \"./getWindow.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getHTMLElementScroll from \"./getHTMLElementScroll.js\";\nexport default function getNodeScroll(node) {\n if (node === getWindow(node) || !isHTMLElement(node)) {\n return getWindowScroll(node);\n } else {\n return getHTMLElementScroll(node);\n }\n}","export default function getHTMLElementScroll(element) {\n return {\n scrollLeft: element.scrollLeft,\n scrollTop: element.scrollTop\n };\n}","import { modifierPhases } from \"../enums.js\"; // source: https://stackoverflow.com/questions/49875255\n\nfunction order(modifiers) {\n var map = new Map();\n var visited = new Set();\n var result = [];\n modifiers.forEach(function (modifier) {\n map.set(modifier.name, modifier);\n }); // On visiting object, check for its dependencies and visit them recursively\n\n function sort(modifier) {\n visited.add(modifier.name);\n var requires = [].concat(modifier.requires || [], modifier.requiresIfExists || []);\n requires.forEach(function (dep) {\n if (!visited.has(dep)) {\n var depModifier = map.get(dep);\n\n if (depModifier) {\n sort(depModifier);\n }\n }\n });\n result.push(modifier);\n }\n\n modifiers.forEach(function (modifier) {\n if (!visited.has(modifier.name)) {\n // check for visited object\n sort(modifier);\n }\n });\n return result;\n}\n\nexport default function orderModifiers(modifiers) {\n // order based on dependencies\n var orderedModifiers = order(modifiers); // order based on phase\n\n return modifierPhases.reduce(function (acc, phase) {\n return acc.concat(orderedModifiers.filter(function (modifier) {\n return modifier.phase === phase;\n }));\n }, []);\n}","import getCompositeRect from \"./dom-utils/getCompositeRect.js\";\nimport getLayoutRect from \"./dom-utils/getLayoutRect.js\";\nimport listScrollParents from \"./dom-utils/listScrollParents.js\";\nimport getOffsetParent from \"./dom-utils/getOffsetParent.js\";\nimport orderModifiers from \"./utils/orderModifiers.js\";\nimport debounce from \"./utils/debounce.js\";\nimport mergeByName from \"./utils/mergeByName.js\";\nimport detectOverflow from \"./utils/detectOverflow.js\";\nimport { isElement } from \"./dom-utils/instanceOf.js\";\nvar DEFAULT_OPTIONS = {\n placement: 'bottom',\n modifiers: [],\n strategy: 'absolute'\n};\n\nfunction areValidElements() {\n for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n return !args.some(function (element) {\n return !(element && typeof element.getBoundingClientRect === 'function');\n });\n}\n\nexport function popperGenerator(generatorOptions) {\n if (generatorOptions === void 0) {\n generatorOptions = {};\n }\n\n var _generatorOptions = generatorOptions,\n _generatorOptions$def = _generatorOptions.defaultModifiers,\n defaultModifiers = _generatorOptions$def === void 0 ? [] : _generatorOptions$def,\n _generatorOptions$def2 = _generatorOptions.defaultOptions,\n defaultOptions = _generatorOptions$def2 === void 0 ? DEFAULT_OPTIONS : _generatorOptions$def2;\n return function createPopper(reference, popper, options) {\n if (options === void 0) {\n options = defaultOptions;\n }\n\n var state = {\n placement: 'bottom',\n orderedModifiers: [],\n options: Object.assign({}, DEFAULT_OPTIONS, defaultOptions),\n modifiersData: {},\n elements: {\n reference: reference,\n popper: popper\n },\n attributes: {},\n styles: {}\n };\n var effectCleanupFns = [];\n var isDestroyed = false;\n var instance = {\n state: state,\n setOptions: function setOptions(setOptionsAction) {\n var options = typeof setOptionsAction === 'function' ? setOptionsAction(state.options) : setOptionsAction;\n cleanupModifierEffects();\n state.options = Object.assign({}, defaultOptions, state.options, options);\n state.scrollParents = {\n reference: isElement(reference) ? listScrollParents(reference) : reference.contextElement ? listScrollParents(reference.contextElement) : [],\n popper: listScrollParents(popper)\n }; // Orders the modifiers based on their dependencies and `phase`\n // properties\n\n var orderedModifiers = orderModifiers(mergeByName([].concat(defaultModifiers, state.options.modifiers))); // Strip out disabled modifiers\n\n state.orderedModifiers = orderedModifiers.filter(function (m) {\n return m.enabled;\n });\n runModifierEffects();\n return instance.update();\n },\n // Sync update – it will always be executed, even if not necessary. This\n // is useful for low frequency updates where sync behavior simplifies the\n // logic.\n // For high frequency updates (e.g. `resize` and `scroll` events), always\n // prefer the async Popper#update method\n forceUpdate: function forceUpdate() {\n if (isDestroyed) {\n return;\n }\n\n var _state$elements = state.elements,\n reference = _state$elements.reference,\n popper = _state$elements.popper; // Don't proceed if `reference` or `popper` are not valid elements\n // anymore\n\n if (!areValidElements(reference, popper)) {\n return;\n } // Store the reference and popper rects to be read by modifiers\n\n\n state.rects = {\n reference: getCompositeRect(reference, getOffsetParent(popper), state.options.strategy === 'fixed'),\n popper: getLayoutRect(popper)\n }; // Modifiers have the ability to reset the current update cycle. The\n // most common use case for this is the `flip` modifier changing the\n // placement, which then needs to re-run all the modifiers, because the\n // logic was previously ran for the previous placement and is therefore\n // stale/incorrect\n\n state.reset = false;\n state.placement = state.options.placement; // On each update cycle, the `modifiersData` property for each modifier\n // is filled with the initial data specified by the modifier. This means\n // it doesn't persist and is fresh on each update.\n // To ensure persistent data, use `${name}#persistent`\n\n state.orderedModifiers.forEach(function (modifier) {\n return state.modifiersData[modifier.name] = Object.assign({}, modifier.data);\n });\n\n for (var index = 0; index < state.orderedModifiers.length; index++) {\n if (state.reset === true) {\n state.reset = false;\n index = -1;\n continue;\n }\n\n var _state$orderedModifie = state.orderedModifiers[index],\n fn = _state$orderedModifie.fn,\n _state$orderedModifie2 = _state$orderedModifie.options,\n _options = _state$orderedModifie2 === void 0 ? {} : _state$orderedModifie2,\n name = _state$orderedModifie.name;\n\n if (typeof fn === 'function') {\n state = fn({\n state: state,\n options: _options,\n name: name,\n instance: instance\n }) || state;\n }\n }\n },\n // Async and optimistically optimized update – it will not be executed if\n // not necessary (debounced to run at most once-per-tick)\n update: debounce(function () {\n return new Promise(function (resolve) {\n instance.forceUpdate();\n resolve(state);\n });\n }),\n destroy: function destroy() {\n cleanupModifierEffects();\n isDestroyed = true;\n }\n };\n\n if (!areValidElements(reference, popper)) {\n return instance;\n }\n\n instance.setOptions(options).then(function (state) {\n if (!isDestroyed && options.onFirstUpdate) {\n options.onFirstUpdate(state);\n }\n }); // Modifiers have the ability to execute arbitrary code before the first\n // update cycle runs. They will be executed in the same order as the update\n // cycle. This is useful when a modifier adds some persistent data that\n // other modifiers need to use, but the modifier is run after the dependent\n // one.\n\n function runModifierEffects() {\n state.orderedModifiers.forEach(function (_ref) {\n var name = _ref.name,\n _ref$options = _ref.options,\n options = _ref$options === void 0 ? {} : _ref$options,\n effect = _ref.effect;\n\n if (typeof effect === 'function') {\n var cleanupFn = effect({\n state: state,\n name: name,\n instance: instance,\n options: options\n });\n\n var noopFn = function noopFn() {};\n\n effectCleanupFns.push(cleanupFn || noopFn);\n }\n });\n }\n\n function cleanupModifierEffects() {\n effectCleanupFns.forEach(function (fn) {\n return fn();\n });\n effectCleanupFns = [];\n }\n\n return instance;\n };\n}\nexport var createPopper = /*#__PURE__*/popperGenerator(); // eslint-disable-next-line import/no-unused-modules\n\nexport { detectOverflow };","export default function debounce(fn) {\n var pending;\n return function () {\n if (!pending) {\n pending = new Promise(function (resolve) {\n Promise.resolve().then(function () {\n pending = undefined;\n resolve(fn());\n });\n });\n }\n\n return pending;\n };\n}","export default function mergeByName(modifiers) {\n var merged = modifiers.reduce(function (merged, current) {\n var existing = merged[current.name];\n merged[current.name] = existing ? Object.assign({}, existing, current, {\n options: Object.assign({}, existing.options, current.options),\n data: Object.assign({}, existing.data, current.data)\n }) : current;\n return merged;\n }, {}); // IE11 does not support Object.values\n\n return Object.keys(merged).map(function (key) {\n return merged[key];\n });\n}","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nimport offset from \"./modifiers/offset.js\";\nimport flip from \"./modifiers/flip.js\";\nimport preventOverflow from \"./modifiers/preventOverflow.js\";\nimport arrow from \"./modifiers/arrow.js\";\nimport hide from \"./modifiers/hide.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles, offset, flip, preventOverflow, arrow, hide];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow }; // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper as createPopperLite } from \"./popper-lite.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport * from \"./modifiers/index.js\";","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow };","/*!\n * Bootstrap v5.3.2 (https://getbootstrap.com/)\n * Copyright 2011-2023 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n */\nimport * as Popper from '@popperjs/core';\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/data.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n/**\n * Constants\n */\n\nconst elementMap = new Map();\nconst Data = {\n set(element, key, instance) {\n if (!elementMap.has(element)) {\n elementMap.set(element, new Map());\n }\n const instanceMap = elementMap.get(element);\n\n // make it clear we only want one instance per element\n // can be removed later when multiple key/instances are fine to be used\n if (!instanceMap.has(key) && instanceMap.size !== 0) {\n // eslint-disable-next-line no-console\n console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(instanceMap.keys())[0]}.`);\n return;\n }\n instanceMap.set(key, instance);\n },\n get(element, key) {\n if (elementMap.has(element)) {\n return elementMap.get(element).get(key) || null;\n }\n return null;\n },\n remove(element, key) {\n if (!elementMap.has(element)) {\n return;\n }\n const instanceMap = elementMap.get(element);\n instanceMap.delete(key);\n\n // free up element references if there are no instances left for an element\n if (instanceMap.size === 0) {\n elementMap.delete(element);\n }\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/index.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst MAX_UID = 1000000;\nconst MILLISECONDS_MULTIPLIER = 1000;\nconst TRANSITION_END = 'transitionend';\n\n/**\n * Properly escape IDs selectors to handle weird IDs\n * @param {string} selector\n * @returns {string}\n */\nconst parseSelector = selector => {\n if (selector && window.CSS && window.CSS.escape) {\n // document.querySelector needs escaping to handle IDs (html5+) containing for instance /\n selector = selector.replace(/#([^\\s\"#']+)/g, (match, id) => `#${CSS.escape(id)}`);\n }\n return selector;\n};\n\n// Shout-out Angus Croll (https://goo.gl/pxwQGp)\nconst toType = object => {\n if (object === null || object === undefined) {\n return `${object}`;\n }\n return Object.prototype.toString.call(object).match(/\\s([a-z]+)/i)[1].toLowerCase();\n};\n\n/**\n * Public Util API\n */\n\nconst getUID = prefix => {\n do {\n prefix += Math.floor(Math.random() * MAX_UID);\n } while (document.getElementById(prefix));\n return prefix;\n};\nconst getTransitionDurationFromElement = element => {\n if (!element) {\n return 0;\n }\n\n // Get transition-duration of the element\n let {\n transitionDuration,\n transitionDelay\n } = window.getComputedStyle(element);\n const floatTransitionDuration = Number.parseFloat(transitionDuration);\n const floatTransitionDelay = Number.parseFloat(transitionDelay);\n\n // Return 0 if element or transition duration is not found\n if (!floatTransitionDuration && !floatTransitionDelay) {\n return 0;\n }\n\n // If multiple durations are defined, take the first\n transitionDuration = transitionDuration.split(',')[0];\n transitionDelay = transitionDelay.split(',')[0];\n return (Number.parseFloat(transitionDuration) + Number.parseFloat(transitionDelay)) * MILLISECONDS_MULTIPLIER;\n};\nconst triggerTransitionEnd = element => {\n element.dispatchEvent(new Event(TRANSITION_END));\n};\nconst isElement = object => {\n if (!object || typeof object !== 'object') {\n return false;\n }\n if (typeof object.jquery !== 'undefined') {\n object = object[0];\n }\n return typeof object.nodeType !== 'undefined';\n};\nconst getElement = object => {\n // it's a jQuery object or a node element\n if (isElement(object)) {\n return object.jquery ? object[0] : object;\n }\n if (typeof object === 'string' && object.length > 0) {\n return document.querySelector(parseSelector(object));\n }\n return null;\n};\nconst isVisible = element => {\n if (!isElement(element) || element.getClientRects().length === 0) {\n return false;\n }\n const elementIsVisible = getComputedStyle(element).getPropertyValue('visibility') === 'visible';\n // Handle `details` element as its content may falsie appear visible when it is closed\n const closedDetails = element.closest('details:not([open])');\n if (!closedDetails) {\n return elementIsVisible;\n }\n if (closedDetails !== element) {\n const summary = element.closest('summary');\n if (summary && summary.parentNode !== closedDetails) {\n return false;\n }\n if (summary === null) {\n return false;\n }\n }\n return elementIsVisible;\n};\nconst isDisabled = element => {\n if (!element || element.nodeType !== Node.ELEMENT_NODE) {\n return true;\n }\n if (element.classList.contains('disabled')) {\n return true;\n }\n if (typeof element.disabled !== 'undefined') {\n return element.disabled;\n }\n return element.hasAttribute('disabled') && element.getAttribute('disabled') !== 'false';\n};\nconst findShadowRoot = element => {\n if (!document.documentElement.attachShadow) {\n return null;\n }\n\n // Can find the shadow root otherwise it'll return the document\n if (typeof element.getRootNode === 'function') {\n const root = element.getRootNode();\n return root instanceof ShadowRoot ? root : null;\n }\n if (element instanceof ShadowRoot) {\n return element;\n }\n\n // when we don't find a shadow root\n if (!element.parentNode) {\n return null;\n }\n return findShadowRoot(element.parentNode);\n};\nconst noop = () => {};\n\n/**\n * Trick to restart an element's animation\n *\n * @param {HTMLElement} element\n * @return void\n *\n * @see https://www.charistheo.io/blog/2021/02/restart-a-css-animation-with-javascript/#restarting-a-css-animation\n */\nconst reflow = element => {\n element.offsetHeight; // eslint-disable-line no-unused-expressions\n};\n\nconst getjQuery = () => {\n if (window.jQuery && !document.body.hasAttribute('data-bs-no-jquery')) {\n return window.jQuery;\n }\n return null;\n};\nconst DOMContentLoadedCallbacks = [];\nconst onDOMContentLoaded = callback => {\n if (document.readyState === 'loading') {\n // add listener on the first call when the document is in loading state\n if (!DOMContentLoadedCallbacks.length) {\n document.addEventListener('DOMContentLoaded', () => {\n for (const callback of DOMContentLoadedCallbacks) {\n callback();\n }\n });\n }\n DOMContentLoadedCallbacks.push(callback);\n } else {\n callback();\n }\n};\nconst isRTL = () => document.documentElement.dir === 'rtl';\nconst defineJQueryPlugin = plugin => {\n onDOMContentLoaded(() => {\n const $ = getjQuery();\n /* istanbul ignore if */\n if ($) {\n const name = plugin.NAME;\n const JQUERY_NO_CONFLICT = $.fn[name];\n $.fn[name] = plugin.jQueryInterface;\n $.fn[name].Constructor = plugin;\n $.fn[name].noConflict = () => {\n $.fn[name] = JQUERY_NO_CONFLICT;\n return plugin.jQueryInterface;\n };\n }\n });\n};\nconst execute = (possibleCallback, args = [], defaultValue = possibleCallback) => {\n return typeof possibleCallback === 'function' ? possibleCallback(...args) : defaultValue;\n};\nconst executeAfterTransition = (callback, transitionElement, waitForTransition = true) => {\n if (!waitForTransition) {\n execute(callback);\n return;\n }\n const durationPadding = 5;\n const emulatedDuration = getTransitionDurationFromElement(transitionElement) + durationPadding;\n let called = false;\n const handler = ({\n target\n }) => {\n if (target !== transitionElement) {\n return;\n }\n called = true;\n transitionElement.removeEventListener(TRANSITION_END, handler);\n execute(callback);\n };\n transitionElement.addEventListener(TRANSITION_END, handler);\n setTimeout(() => {\n if (!called) {\n triggerTransitionEnd(transitionElement);\n }\n }, emulatedDuration);\n};\n\n/**\n * Return the previous/next element of a list.\n *\n * @param {array} list The list of elements\n * @param activeElement The active element\n * @param shouldGetNext Choose to get next or previous element\n * @param isCycleAllowed\n * @return {Element|elem} The proper element\n */\nconst getNextActiveElement = (list, activeElement, shouldGetNext, isCycleAllowed) => {\n const listLength = list.length;\n let index = list.indexOf(activeElement);\n\n // if the element does not exist in the list return an element\n // depending on the direction and if cycle is allowed\n if (index === -1) {\n return !shouldGetNext && isCycleAllowed ? list[listLength - 1] : list[0];\n }\n index += shouldGetNext ? 1 : -1;\n if (isCycleAllowed) {\n index = (index + listLength) % listLength;\n }\n return list[Math.max(0, Math.min(index, listLength - 1))];\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/event-handler.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst namespaceRegex = /[^.]*(?=\\..*)\\.|.*/;\nconst stripNameRegex = /\\..*/;\nconst stripUidRegex = /::\\d+$/;\nconst eventRegistry = {}; // Events storage\nlet uidEvent = 1;\nconst customEvents = {\n mouseenter: 'mouseover',\n mouseleave: 'mouseout'\n};\nconst nativeEvents = new Set(['click', 'dblclick', 'mouseup', 'mousedown', 'contextmenu', 'mousewheel', 'DOMMouseScroll', 'mouseover', 'mouseout', 'mousemove', 'selectstart', 'selectend', 'keydown', 'keypress', 'keyup', 'orientationchange', 'touchstart', 'touchmove', 'touchend', 'touchcancel', 'pointerdown', 'pointermove', 'pointerup', 'pointerleave', 'pointercancel', 'gesturestart', 'gesturechange', 'gestureend', 'focus', 'blur', 'change', 'reset', 'select', 'submit', 'focusin', 'focusout', 'load', 'unload', 'beforeunload', 'resize', 'move', 'DOMContentLoaded', 'readystatechange', 'error', 'abort', 'scroll']);\n\n/**\n * Private methods\n */\n\nfunction makeEventUid(element, uid) {\n return uid && `${uid}::${uidEvent++}` || element.uidEvent || uidEvent++;\n}\nfunction getElementEvents(element) {\n const uid = makeEventUid(element);\n element.uidEvent = uid;\n eventRegistry[uid] = eventRegistry[uid] || {};\n return eventRegistry[uid];\n}\nfunction bootstrapHandler(element, fn) {\n return function handler(event) {\n hydrateObj(event, {\n delegateTarget: element\n });\n if (handler.oneOff) {\n EventHandler.off(element, event.type, fn);\n }\n return fn.apply(element, [event]);\n };\n}\nfunction bootstrapDelegationHandler(element, selector, fn) {\n return function handler(event) {\n const domElements = element.querySelectorAll(selector);\n for (let {\n target\n } = event; target && target !== this; target = target.parentNode) {\n for (const domElement of domElements) {\n if (domElement !== target) {\n continue;\n }\n hydrateObj(event, {\n delegateTarget: target\n });\n if (handler.oneOff) {\n EventHandler.off(element, event.type, selector, fn);\n }\n return fn.apply(target, [event]);\n }\n }\n };\n}\nfunction findHandler(events, callable, delegationSelector = null) {\n return Object.values(events).find(event => event.callable === callable && event.delegationSelector === delegationSelector);\n}\nfunction normalizeParameters(originalTypeEvent, handler, delegationFunction) {\n const isDelegated = typeof handler === 'string';\n // TODO: tooltip passes `false` instead of selector, so we need to check\n const callable = isDelegated ? delegationFunction : handler || delegationFunction;\n let typeEvent = getTypeEvent(originalTypeEvent);\n if (!nativeEvents.has(typeEvent)) {\n typeEvent = originalTypeEvent;\n }\n return [isDelegated, callable, typeEvent];\n}\nfunction addHandler(element, originalTypeEvent, handler, delegationFunction, oneOff) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n let [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n\n // in case of mouseenter or mouseleave wrap the handler within a function that checks for its DOM position\n // this prevents the handler from being dispatched the same way as mouseover or mouseout does\n if (originalTypeEvent in customEvents) {\n const wrapFunction = fn => {\n return function (event) {\n if (!event.relatedTarget || event.relatedTarget !== event.delegateTarget && !event.delegateTarget.contains(event.relatedTarget)) {\n return fn.call(this, event);\n }\n };\n };\n callable = wrapFunction(callable);\n }\n const events = getElementEvents(element);\n const handlers = events[typeEvent] || (events[typeEvent] = {});\n const previousFunction = findHandler(handlers, callable, isDelegated ? handler : null);\n if (previousFunction) {\n previousFunction.oneOff = previousFunction.oneOff && oneOff;\n return;\n }\n const uid = makeEventUid(callable, originalTypeEvent.replace(namespaceRegex, ''));\n const fn = isDelegated ? bootstrapDelegationHandler(element, handler, callable) : bootstrapHandler(element, callable);\n fn.delegationSelector = isDelegated ? handler : null;\n fn.callable = callable;\n fn.oneOff = oneOff;\n fn.uidEvent = uid;\n handlers[uid] = fn;\n element.addEventListener(typeEvent, fn, isDelegated);\n}\nfunction removeHandler(element, events, typeEvent, handler, delegationSelector) {\n const fn = findHandler(events[typeEvent], handler, delegationSelector);\n if (!fn) {\n return;\n }\n element.removeEventListener(typeEvent, fn, Boolean(delegationSelector));\n delete events[typeEvent][fn.uidEvent];\n}\nfunction removeNamespacedHandlers(element, events, typeEvent, namespace) {\n const storeElementEvent = events[typeEvent] || {};\n for (const [handlerKey, event] of Object.entries(storeElementEvent)) {\n if (handlerKey.includes(namespace)) {\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n}\nfunction getTypeEvent(event) {\n // allow to get the native events from namespaced events ('click.bs.button' --> 'click')\n event = event.replace(stripNameRegex, '');\n return customEvents[event] || event;\n}\nconst EventHandler = {\n on(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, false);\n },\n one(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, true);\n },\n off(element, originalTypeEvent, handler, delegationFunction) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n const [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n const inNamespace = typeEvent !== originalTypeEvent;\n const events = getElementEvents(element);\n const storeElementEvent = events[typeEvent] || {};\n const isNamespace = originalTypeEvent.startsWith('.');\n if (typeof callable !== 'undefined') {\n // Simplest case: handler is passed, remove that listener ONLY.\n if (!Object.keys(storeElementEvent).length) {\n return;\n }\n removeHandler(element, events, typeEvent, callable, isDelegated ? handler : null);\n return;\n }\n if (isNamespace) {\n for (const elementEvent of Object.keys(events)) {\n removeNamespacedHandlers(element, events, elementEvent, originalTypeEvent.slice(1));\n }\n }\n for (const [keyHandlers, event] of Object.entries(storeElementEvent)) {\n const handlerKey = keyHandlers.replace(stripUidRegex, '');\n if (!inNamespace || originalTypeEvent.includes(handlerKey)) {\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n },\n trigger(element, event, args) {\n if (typeof event !== 'string' || !element) {\n return null;\n }\n const $ = getjQuery();\n const typeEvent = getTypeEvent(event);\n const inNamespace = event !== typeEvent;\n let jQueryEvent = null;\n let bubbles = true;\n let nativeDispatch = true;\n let defaultPrevented = false;\n if (inNamespace && $) {\n jQueryEvent = $.Event(event, args);\n $(element).trigger(jQueryEvent);\n bubbles = !jQueryEvent.isPropagationStopped();\n nativeDispatch = !jQueryEvent.isImmediatePropagationStopped();\n defaultPrevented = jQueryEvent.isDefaultPrevented();\n }\n const evt = hydrateObj(new Event(event, {\n bubbles,\n cancelable: true\n }), args);\n if (defaultPrevented) {\n evt.preventDefault();\n }\n if (nativeDispatch) {\n element.dispatchEvent(evt);\n }\n if (evt.defaultPrevented && jQueryEvent) {\n jQueryEvent.preventDefault();\n }\n return evt;\n }\n};\nfunction hydrateObj(obj, meta = {}) {\n for (const [key, value] of Object.entries(meta)) {\n try {\n obj[key] = value;\n } catch (_unused) {\n Object.defineProperty(obj, key, {\n configurable: true,\n get() {\n return value;\n }\n });\n }\n }\n return obj;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/manipulator.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nfunction normalizeData(value) {\n if (value === 'true') {\n return true;\n }\n if (value === 'false') {\n return false;\n }\n if (value === Number(value).toString()) {\n return Number(value);\n }\n if (value === '' || value === 'null') {\n return null;\n }\n if (typeof value !== 'string') {\n return value;\n }\n try {\n return JSON.parse(decodeURIComponent(value));\n } catch (_unused) {\n return value;\n }\n}\nfunction normalizeDataKey(key) {\n return key.replace(/[A-Z]/g, chr => `-${chr.toLowerCase()}`);\n}\nconst Manipulator = {\n setDataAttribute(element, key, value) {\n element.setAttribute(`data-bs-${normalizeDataKey(key)}`, value);\n },\n removeDataAttribute(element, key) {\n element.removeAttribute(`data-bs-${normalizeDataKey(key)}`);\n },\n getDataAttributes(element) {\n if (!element) {\n return {};\n }\n const attributes = {};\n const bsKeys = Object.keys(element.dataset).filter(key => key.startsWith('bs') && !key.startsWith('bsConfig'));\n for (const key of bsKeys) {\n let pureKey = key.replace(/^bs/, '');\n pureKey = pureKey.charAt(0).toLowerCase() + pureKey.slice(1, pureKey.length);\n attributes[pureKey] = normalizeData(element.dataset[key]);\n }\n return attributes;\n },\n getDataAttribute(element, key) {\n return normalizeData(element.getAttribute(`data-bs-${normalizeDataKey(key)}`));\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/config.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Class definition\n */\n\nclass Config {\n // Getters\n static get Default() {\n return {};\n }\n static get DefaultType() {\n return {};\n }\n static get NAME() {\n throw new Error('You have to implement the static method \"NAME\", for each component!');\n }\n _getConfig(config) {\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n _configAfterMerge(config) {\n return config;\n }\n _mergeConfigObj(config, element) {\n const jsonConfig = isElement(element) ? Manipulator.getDataAttribute(element, 'config') : {}; // try to parse\n\n return {\n ...this.constructor.Default,\n ...(typeof jsonConfig === 'object' ? jsonConfig : {}),\n ...(isElement(element) ? Manipulator.getDataAttributes(element) : {}),\n ...(typeof config === 'object' ? config : {})\n };\n }\n _typeCheckConfig(config, configTypes = this.constructor.DefaultType) {\n for (const [property, expectedTypes] of Object.entries(configTypes)) {\n const value = config[property];\n const valueType = isElement(value) ? 'element' : toType(value);\n if (!new RegExp(expectedTypes).test(valueType)) {\n throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option \"${property}\" provided type \"${valueType}\" but expected type \"${expectedTypes}\".`);\n }\n }\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap base-component.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst VERSION = '5.3.2';\n\n/**\n * Class definition\n */\n\nclass BaseComponent extends Config {\n constructor(element, config) {\n super();\n element = getElement(element);\n if (!element) {\n return;\n }\n this._element = element;\n this._config = this._getConfig(config);\n Data.set(this._element, this.constructor.DATA_KEY, this);\n }\n\n // Public\n dispose() {\n Data.remove(this._element, this.constructor.DATA_KEY);\n EventHandler.off(this._element, this.constructor.EVENT_KEY);\n for (const propertyName of Object.getOwnPropertyNames(this)) {\n this[propertyName] = null;\n }\n }\n _queueCallback(callback, element, isAnimated = true) {\n executeAfterTransition(callback, element, isAnimated);\n }\n _getConfig(config) {\n config = this._mergeConfigObj(config, this._element);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n\n // Static\n static getInstance(element) {\n return Data.get(getElement(element), this.DATA_KEY);\n }\n static getOrCreateInstance(element, config = {}) {\n return this.getInstance(element) || new this(element, typeof config === 'object' ? config : null);\n }\n static get VERSION() {\n return VERSION;\n }\n static get DATA_KEY() {\n return `bs.${this.NAME}`;\n }\n static get EVENT_KEY() {\n return `.${this.DATA_KEY}`;\n }\n static eventName(name) {\n return `${name}${this.EVENT_KEY}`;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/selector-engine.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst getSelector = element => {\n let selector = element.getAttribute('data-bs-target');\n if (!selector || selector === '#') {\n let hrefAttribute = element.getAttribute('href');\n\n // The only valid content that could double as a selector are IDs or classes,\n // so everything starting with `#` or `.`. If a \"real\" URL is used as the selector,\n // `document.querySelector` will rightfully complain it is invalid.\n // See https://github.com/twbs/bootstrap/issues/32273\n if (!hrefAttribute || !hrefAttribute.includes('#') && !hrefAttribute.startsWith('.')) {\n return null;\n }\n\n // Just in case some CMS puts out a full URL with the anchor appended\n if (hrefAttribute.includes('#') && !hrefAttribute.startsWith('#')) {\n hrefAttribute = `#${hrefAttribute.split('#')[1]}`;\n }\n selector = hrefAttribute && hrefAttribute !== '#' ? parseSelector(hrefAttribute.trim()) : null;\n }\n return selector;\n};\nconst SelectorEngine = {\n find(selector, element = document.documentElement) {\n return [].concat(...Element.prototype.querySelectorAll.call(element, selector));\n },\n findOne(selector, element = document.documentElement) {\n return Element.prototype.querySelector.call(element, selector);\n },\n children(element, selector) {\n return [].concat(...element.children).filter(child => child.matches(selector));\n },\n parents(element, selector) {\n const parents = [];\n let ancestor = element.parentNode.closest(selector);\n while (ancestor) {\n parents.push(ancestor);\n ancestor = ancestor.parentNode.closest(selector);\n }\n return parents;\n },\n prev(element, selector) {\n let previous = element.previousElementSibling;\n while (previous) {\n if (previous.matches(selector)) {\n return [previous];\n }\n previous = previous.previousElementSibling;\n }\n return [];\n },\n // TODO: this is now unused; remove later along with prev()\n next(element, selector) {\n let next = element.nextElementSibling;\n while (next) {\n if (next.matches(selector)) {\n return [next];\n }\n next = next.nextElementSibling;\n }\n return [];\n },\n focusableChildren(element) {\n const focusables = ['a', 'button', 'input', 'textarea', 'select', 'details', '[tabindex]', '[contenteditable=\"true\"]'].map(selector => `${selector}:not([tabindex^=\"-\"])`).join(',');\n return this.find(focusables, element).filter(el => !isDisabled(el) && isVisible(el));\n },\n getSelectorFromElement(element) {\n const selector = getSelector(element);\n if (selector) {\n return SelectorEngine.findOne(selector) ? selector : null;\n }\n return null;\n },\n getElementFromSelector(element) {\n const selector = getSelector(element);\n return selector ? SelectorEngine.findOne(selector) : null;\n },\n getMultipleElementsFromSelector(element) {\n const selector = getSelector(element);\n return selector ? SelectorEngine.find(selector) : [];\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/component-functions.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst enableDismissTrigger = (component, method = 'hide') => {\n const clickEvent = `click.dismiss${component.EVENT_KEY}`;\n const name = component.NAME;\n EventHandler.on(document, clickEvent, `[data-bs-dismiss=\"${name}\"]`, function (event) {\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n if (isDisabled(this)) {\n return;\n }\n const target = SelectorEngine.getElementFromSelector(this) || this.closest(`.${name}`);\n const instance = component.getOrCreateInstance(target);\n\n // Method argument is left, for Alert and only, as it doesn't implement the 'hide' method\n instance[method]();\n });\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap alert.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$f = 'alert';\nconst DATA_KEY$a = 'bs.alert';\nconst EVENT_KEY$b = `.${DATA_KEY$a}`;\nconst EVENT_CLOSE = `close${EVENT_KEY$b}`;\nconst EVENT_CLOSED = `closed${EVENT_KEY$b}`;\nconst CLASS_NAME_FADE$5 = 'fade';\nconst CLASS_NAME_SHOW$8 = 'show';\n\n/**\n * Class definition\n */\n\nclass Alert extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$f;\n }\n\n // Public\n close() {\n const closeEvent = EventHandler.trigger(this._element, EVENT_CLOSE);\n if (closeEvent.defaultPrevented) {\n return;\n }\n this._element.classList.remove(CLASS_NAME_SHOW$8);\n const isAnimated = this._element.classList.contains(CLASS_NAME_FADE$5);\n this._queueCallback(() => this._destroyElement(), this._element, isAnimated);\n }\n\n // Private\n _destroyElement() {\n this._element.remove();\n EventHandler.trigger(this._element, EVENT_CLOSED);\n this.dispose();\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Alert.getOrCreateInstance(this);\n if (typeof config !== 'string') {\n return;\n }\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](this);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nenableDismissTrigger(Alert, 'close');\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Alert);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap button.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$e = 'button';\nconst DATA_KEY$9 = 'bs.button';\nconst EVENT_KEY$a = `.${DATA_KEY$9}`;\nconst DATA_API_KEY$6 = '.data-api';\nconst CLASS_NAME_ACTIVE$3 = 'active';\nconst SELECTOR_DATA_TOGGLE$5 = '[data-bs-toggle=\"button\"]';\nconst EVENT_CLICK_DATA_API$6 = `click${EVENT_KEY$a}${DATA_API_KEY$6}`;\n\n/**\n * Class definition\n */\n\nclass Button extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$e;\n }\n\n // Public\n toggle() {\n // Toggle class and sync the `aria-pressed` attribute with the return value of the `.toggle()` method\n this._element.setAttribute('aria-pressed', this._element.classList.toggle(CLASS_NAME_ACTIVE$3));\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Button.getOrCreateInstance(this);\n if (config === 'toggle') {\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$6, SELECTOR_DATA_TOGGLE$5, event => {\n event.preventDefault();\n const button = event.target.closest(SELECTOR_DATA_TOGGLE$5);\n const data = Button.getOrCreateInstance(button);\n data.toggle();\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Button);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/swipe.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$d = 'swipe';\nconst EVENT_KEY$9 = '.bs.swipe';\nconst EVENT_TOUCHSTART = `touchstart${EVENT_KEY$9}`;\nconst EVENT_TOUCHMOVE = `touchmove${EVENT_KEY$9}`;\nconst EVENT_TOUCHEND = `touchend${EVENT_KEY$9}`;\nconst EVENT_POINTERDOWN = `pointerdown${EVENT_KEY$9}`;\nconst EVENT_POINTERUP = `pointerup${EVENT_KEY$9}`;\nconst POINTER_TYPE_TOUCH = 'touch';\nconst POINTER_TYPE_PEN = 'pen';\nconst CLASS_NAME_POINTER_EVENT = 'pointer-event';\nconst SWIPE_THRESHOLD = 40;\nconst Default$c = {\n endCallback: null,\n leftCallback: null,\n rightCallback: null\n};\nconst DefaultType$c = {\n endCallback: '(function|null)',\n leftCallback: '(function|null)',\n rightCallback: '(function|null)'\n};\n\n/**\n * Class definition\n */\n\nclass Swipe extends Config {\n constructor(element, config) {\n super();\n this._element = element;\n if (!element || !Swipe.isSupported()) {\n return;\n }\n this._config = this._getConfig(config);\n this._deltaX = 0;\n this._supportPointerEvents = Boolean(window.PointerEvent);\n this._initEvents();\n }\n\n // Getters\n static get Default() {\n return Default$c;\n }\n static get DefaultType() {\n return DefaultType$c;\n }\n static get NAME() {\n return NAME$d;\n }\n\n // Public\n dispose() {\n EventHandler.off(this._element, EVENT_KEY$9);\n }\n\n // Private\n _start(event) {\n if (!this._supportPointerEvents) {\n this._deltaX = event.touches[0].clientX;\n return;\n }\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX;\n }\n }\n _end(event) {\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX - this._deltaX;\n }\n this._handleSwipe();\n execute(this._config.endCallback);\n }\n _move(event) {\n this._deltaX = event.touches && event.touches.length > 1 ? 0 : event.touches[0].clientX - this._deltaX;\n }\n _handleSwipe() {\n const absDeltaX = Math.abs(this._deltaX);\n if (absDeltaX <= SWIPE_THRESHOLD) {\n return;\n }\n const direction = absDeltaX / this._deltaX;\n this._deltaX = 0;\n if (!direction) {\n return;\n }\n execute(direction > 0 ? this._config.rightCallback : this._config.leftCallback);\n }\n _initEvents() {\n if (this._supportPointerEvents) {\n EventHandler.on(this._element, EVENT_POINTERDOWN, event => this._start(event));\n EventHandler.on(this._element, EVENT_POINTERUP, event => this._end(event));\n this._element.classList.add(CLASS_NAME_POINTER_EVENT);\n } else {\n EventHandler.on(this._element, EVENT_TOUCHSTART, event => this._start(event));\n EventHandler.on(this._element, EVENT_TOUCHMOVE, event => this._move(event));\n EventHandler.on(this._element, EVENT_TOUCHEND, event => this._end(event));\n }\n }\n _eventIsPointerPenTouch(event) {\n return this._supportPointerEvents && (event.pointerType === POINTER_TYPE_PEN || event.pointerType === POINTER_TYPE_TOUCH);\n }\n\n // Static\n static isSupported() {\n return 'ontouchstart' in document.documentElement || navigator.maxTouchPoints > 0;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap carousel.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$c = 'carousel';\nconst DATA_KEY$8 = 'bs.carousel';\nconst EVENT_KEY$8 = `.${DATA_KEY$8}`;\nconst DATA_API_KEY$5 = '.data-api';\nconst ARROW_LEFT_KEY$1 = 'ArrowLeft';\nconst ARROW_RIGHT_KEY$1 = 'ArrowRight';\nconst TOUCHEVENT_COMPAT_WAIT = 500; // Time for mouse compat events to fire after touch\n\nconst ORDER_NEXT = 'next';\nconst ORDER_PREV = 'prev';\nconst DIRECTION_LEFT = 'left';\nconst DIRECTION_RIGHT = 'right';\nconst EVENT_SLIDE = `slide${EVENT_KEY$8}`;\nconst EVENT_SLID = `slid${EVENT_KEY$8}`;\nconst EVENT_KEYDOWN$1 = `keydown${EVENT_KEY$8}`;\nconst EVENT_MOUSEENTER$1 = `mouseenter${EVENT_KEY$8}`;\nconst EVENT_MOUSELEAVE$1 = `mouseleave${EVENT_KEY$8}`;\nconst EVENT_DRAG_START = `dragstart${EVENT_KEY$8}`;\nconst EVENT_LOAD_DATA_API$3 = `load${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst EVENT_CLICK_DATA_API$5 = `click${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst CLASS_NAME_CAROUSEL = 'carousel';\nconst CLASS_NAME_ACTIVE$2 = 'active';\nconst CLASS_NAME_SLIDE = 'slide';\nconst CLASS_NAME_END = 'carousel-item-end';\nconst CLASS_NAME_START = 'carousel-item-start';\nconst CLASS_NAME_NEXT = 'carousel-item-next';\nconst CLASS_NAME_PREV = 'carousel-item-prev';\nconst SELECTOR_ACTIVE = '.active';\nconst SELECTOR_ITEM = '.carousel-item';\nconst SELECTOR_ACTIVE_ITEM = SELECTOR_ACTIVE + SELECTOR_ITEM;\nconst SELECTOR_ITEM_IMG = '.carousel-item img';\nconst SELECTOR_INDICATORS = '.carousel-indicators';\nconst SELECTOR_DATA_SLIDE = '[data-bs-slide], [data-bs-slide-to]';\nconst SELECTOR_DATA_RIDE = '[data-bs-ride=\"carousel\"]';\nconst KEY_TO_DIRECTION = {\n [ARROW_LEFT_KEY$1]: DIRECTION_RIGHT,\n [ARROW_RIGHT_KEY$1]: DIRECTION_LEFT\n};\nconst Default$b = {\n interval: 5000,\n keyboard: true,\n pause: 'hover',\n ride: false,\n touch: true,\n wrap: true\n};\nconst DefaultType$b = {\n interval: '(number|boolean)',\n // TODO:v6 remove boolean support\n keyboard: 'boolean',\n pause: '(string|boolean)',\n ride: '(boolean|string)',\n touch: 'boolean',\n wrap: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Carousel extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._interval = null;\n this._activeElement = null;\n this._isSliding = false;\n this.touchTimeout = null;\n this._swipeHelper = null;\n this._indicatorsElement = SelectorEngine.findOne(SELECTOR_INDICATORS, this._element);\n this._addEventListeners();\n if (this._config.ride === CLASS_NAME_CAROUSEL) {\n this.cycle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$b;\n }\n static get DefaultType() {\n return DefaultType$b;\n }\n static get NAME() {\n return NAME$c;\n }\n\n // Public\n next() {\n this._slide(ORDER_NEXT);\n }\n nextWhenVisible() {\n // FIXME TODO use `document.visibilityState`\n // Don't call next when the page isn't visible\n // or the carousel or its parent isn't visible\n if (!document.hidden && isVisible(this._element)) {\n this.next();\n }\n }\n prev() {\n this._slide(ORDER_PREV);\n }\n pause() {\n if (this._isSliding) {\n triggerTransitionEnd(this._element);\n }\n this._clearInterval();\n }\n cycle() {\n this._clearInterval();\n this._updateInterval();\n this._interval = setInterval(() => this.nextWhenVisible(), this._config.interval);\n }\n _maybeEnableCycle() {\n if (!this._config.ride) {\n return;\n }\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.cycle());\n return;\n }\n this.cycle();\n }\n to(index) {\n const items = this._getItems();\n if (index > items.length - 1 || index < 0) {\n return;\n }\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.to(index));\n return;\n }\n const activeIndex = this._getItemIndex(this._getActive());\n if (activeIndex === index) {\n return;\n }\n const order = index > activeIndex ? ORDER_NEXT : ORDER_PREV;\n this._slide(order, items[index]);\n }\n dispose() {\n if (this._swipeHelper) {\n this._swipeHelper.dispose();\n }\n super.dispose();\n }\n\n // Private\n _configAfterMerge(config) {\n config.defaultInterval = config.interval;\n return config;\n }\n _addEventListeners() {\n if (this._config.keyboard) {\n EventHandler.on(this._element, EVENT_KEYDOWN$1, event => this._keydown(event));\n }\n if (this._config.pause === 'hover') {\n EventHandler.on(this._element, EVENT_MOUSEENTER$1, () => this.pause());\n EventHandler.on(this._element, EVENT_MOUSELEAVE$1, () => this._maybeEnableCycle());\n }\n if (this._config.touch && Swipe.isSupported()) {\n this._addTouchEventListeners();\n }\n }\n _addTouchEventListeners() {\n for (const img of SelectorEngine.find(SELECTOR_ITEM_IMG, this._element)) {\n EventHandler.on(img, EVENT_DRAG_START, event => event.preventDefault());\n }\n const endCallBack = () => {\n if (this._config.pause !== 'hover') {\n return;\n }\n\n // If it's a touch-enabled device, mouseenter/leave are fired as\n // part of the mouse compatibility events on first tap - the carousel\n // would stop cycling until user tapped out of it;\n // here, we listen for touchend, explicitly pause the carousel\n // (as if it's the second time we tap on it, mouseenter compat event\n // is NOT fired) and after a timeout (to allow for mouse compatibility\n // events to fire) we explicitly restart cycling\n\n this.pause();\n if (this.touchTimeout) {\n clearTimeout(this.touchTimeout);\n }\n this.touchTimeout = setTimeout(() => this._maybeEnableCycle(), TOUCHEVENT_COMPAT_WAIT + this._config.interval);\n };\n const swipeConfig = {\n leftCallback: () => this._slide(this._directionToOrder(DIRECTION_LEFT)),\n rightCallback: () => this._slide(this._directionToOrder(DIRECTION_RIGHT)),\n endCallback: endCallBack\n };\n this._swipeHelper = new Swipe(this._element, swipeConfig);\n }\n _keydown(event) {\n if (/input|textarea/i.test(event.target.tagName)) {\n return;\n }\n const direction = KEY_TO_DIRECTION[event.key];\n if (direction) {\n event.preventDefault();\n this._slide(this._directionToOrder(direction));\n }\n }\n _getItemIndex(element) {\n return this._getItems().indexOf(element);\n }\n _setActiveIndicatorElement(index) {\n if (!this._indicatorsElement) {\n return;\n }\n const activeIndicator = SelectorEngine.findOne(SELECTOR_ACTIVE, this._indicatorsElement);\n activeIndicator.classList.remove(CLASS_NAME_ACTIVE$2);\n activeIndicator.removeAttribute('aria-current');\n const newActiveIndicator = SelectorEngine.findOne(`[data-bs-slide-to=\"${index}\"]`, this._indicatorsElement);\n if (newActiveIndicator) {\n newActiveIndicator.classList.add(CLASS_NAME_ACTIVE$2);\n newActiveIndicator.setAttribute('aria-current', 'true');\n }\n }\n _updateInterval() {\n const element = this._activeElement || this._getActive();\n if (!element) {\n return;\n }\n const elementInterval = Number.parseInt(element.getAttribute('data-bs-interval'), 10);\n this._config.interval = elementInterval || this._config.defaultInterval;\n }\n _slide(order, element = null) {\n if (this._isSliding) {\n return;\n }\n const activeElement = this._getActive();\n const isNext = order === ORDER_NEXT;\n const nextElement = element || getNextActiveElement(this._getItems(), activeElement, isNext, this._config.wrap);\n if (nextElement === activeElement) {\n return;\n }\n const nextElementIndex = this._getItemIndex(nextElement);\n const triggerEvent = eventName => {\n return EventHandler.trigger(this._element, eventName, {\n relatedTarget: nextElement,\n direction: this._orderToDirection(order),\n from: this._getItemIndex(activeElement),\n to: nextElementIndex\n });\n };\n const slideEvent = triggerEvent(EVENT_SLIDE);\n if (slideEvent.defaultPrevented) {\n return;\n }\n if (!activeElement || !nextElement) {\n // Some weirdness is happening, so we bail\n // TODO: change tests that use empty divs to avoid this check\n return;\n }\n const isCycling = Boolean(this._interval);\n this.pause();\n this._isSliding = true;\n this._setActiveIndicatorElement(nextElementIndex);\n this._activeElement = nextElement;\n const directionalClassName = isNext ? CLASS_NAME_START : CLASS_NAME_END;\n const orderClassName = isNext ? CLASS_NAME_NEXT : CLASS_NAME_PREV;\n nextElement.classList.add(orderClassName);\n reflow(nextElement);\n activeElement.classList.add(directionalClassName);\n nextElement.classList.add(directionalClassName);\n const completeCallBack = () => {\n nextElement.classList.remove(directionalClassName, orderClassName);\n nextElement.classList.add(CLASS_NAME_ACTIVE$2);\n activeElement.classList.remove(CLASS_NAME_ACTIVE$2, orderClassName, directionalClassName);\n this._isSliding = false;\n triggerEvent(EVENT_SLID);\n };\n this._queueCallback(completeCallBack, activeElement, this._isAnimated());\n if (isCycling) {\n this.cycle();\n }\n }\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_SLIDE);\n }\n _getActive() {\n return SelectorEngine.findOne(SELECTOR_ACTIVE_ITEM, this._element);\n }\n _getItems() {\n return SelectorEngine.find(SELECTOR_ITEM, this._element);\n }\n _clearInterval() {\n if (this._interval) {\n clearInterval(this._interval);\n this._interval = null;\n }\n }\n _directionToOrder(direction) {\n if (isRTL()) {\n return direction === DIRECTION_LEFT ? ORDER_PREV : ORDER_NEXT;\n }\n return direction === DIRECTION_LEFT ? ORDER_NEXT : ORDER_PREV;\n }\n _orderToDirection(order) {\n if (isRTL()) {\n return order === ORDER_PREV ? DIRECTION_LEFT : DIRECTION_RIGHT;\n }\n return order === ORDER_PREV ? DIRECTION_RIGHT : DIRECTION_LEFT;\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Carousel.getOrCreateInstance(this, config);\n if (typeof config === 'number') {\n data.to(config);\n return;\n }\n if (typeof config === 'string') {\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$5, SELECTOR_DATA_SLIDE, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (!target || !target.classList.contains(CLASS_NAME_CAROUSEL)) {\n return;\n }\n event.preventDefault();\n const carousel = Carousel.getOrCreateInstance(target);\n const slideIndex = this.getAttribute('data-bs-slide-to');\n if (slideIndex) {\n carousel.to(slideIndex);\n carousel._maybeEnableCycle();\n return;\n }\n if (Manipulator.getDataAttribute(this, 'slide') === 'next') {\n carousel.next();\n carousel._maybeEnableCycle();\n return;\n }\n carousel.prev();\n carousel._maybeEnableCycle();\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$3, () => {\n const carousels = SelectorEngine.find(SELECTOR_DATA_RIDE);\n for (const carousel of carousels) {\n Carousel.getOrCreateInstance(carousel);\n }\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Carousel);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap collapse.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$b = 'collapse';\nconst DATA_KEY$7 = 'bs.collapse';\nconst EVENT_KEY$7 = `.${DATA_KEY$7}`;\nconst DATA_API_KEY$4 = '.data-api';\nconst EVENT_SHOW$6 = `show${EVENT_KEY$7}`;\nconst EVENT_SHOWN$6 = `shown${EVENT_KEY$7}`;\nconst EVENT_HIDE$6 = `hide${EVENT_KEY$7}`;\nconst EVENT_HIDDEN$6 = `hidden${EVENT_KEY$7}`;\nconst EVENT_CLICK_DATA_API$4 = `click${EVENT_KEY$7}${DATA_API_KEY$4}`;\nconst CLASS_NAME_SHOW$7 = 'show';\nconst CLASS_NAME_COLLAPSE = 'collapse';\nconst CLASS_NAME_COLLAPSING = 'collapsing';\nconst CLASS_NAME_COLLAPSED = 'collapsed';\nconst CLASS_NAME_DEEPER_CHILDREN = `:scope .${CLASS_NAME_COLLAPSE} .${CLASS_NAME_COLLAPSE}`;\nconst CLASS_NAME_HORIZONTAL = 'collapse-horizontal';\nconst WIDTH = 'width';\nconst HEIGHT = 'height';\nconst SELECTOR_ACTIVES = '.collapse.show, .collapse.collapsing';\nconst SELECTOR_DATA_TOGGLE$4 = '[data-bs-toggle=\"collapse\"]';\nconst Default$a = {\n parent: null,\n toggle: true\n};\nconst DefaultType$a = {\n parent: '(null|element)',\n toggle: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Collapse extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isTransitioning = false;\n this._triggerArray = [];\n const toggleList = SelectorEngine.find(SELECTOR_DATA_TOGGLE$4);\n for (const elem of toggleList) {\n const selector = SelectorEngine.getSelectorFromElement(elem);\n const filterElement = SelectorEngine.find(selector).filter(foundElement => foundElement === this._element);\n if (selector !== null && filterElement.length) {\n this._triggerArray.push(elem);\n }\n }\n this._initializeChildren();\n if (!this._config.parent) {\n this._addAriaAndCollapsedClass(this._triggerArray, this._isShown());\n }\n if (this._config.toggle) {\n this.toggle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$a;\n }\n static get DefaultType() {\n return DefaultType$a;\n }\n static get NAME() {\n return NAME$b;\n }\n\n // Public\n toggle() {\n if (this._isShown()) {\n this.hide();\n } else {\n this.show();\n }\n }\n show() {\n if (this._isTransitioning || this._isShown()) {\n return;\n }\n let activeChildren = [];\n\n // find active children\n if (this._config.parent) {\n activeChildren = this._getFirstLevelChildren(SELECTOR_ACTIVES).filter(element => element !== this._element).map(element => Collapse.getOrCreateInstance(element, {\n toggle: false\n }));\n }\n if (activeChildren.length && activeChildren[0]._isTransitioning) {\n return;\n }\n const startEvent = EventHandler.trigger(this._element, EVENT_SHOW$6);\n if (startEvent.defaultPrevented) {\n return;\n }\n for (const activeInstance of activeChildren) {\n activeInstance.hide();\n }\n const dimension = this._getDimension();\n this._element.classList.remove(CLASS_NAME_COLLAPSE);\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n this._element.style[dimension] = 0;\n this._addAriaAndCollapsedClass(this._triggerArray, true);\n this._isTransitioning = true;\n const complete = () => {\n this._isTransitioning = false;\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n this._element.classList.add(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n this._element.style[dimension] = '';\n EventHandler.trigger(this._element, EVENT_SHOWN$6);\n };\n const capitalizedDimension = dimension[0].toUpperCase() + dimension.slice(1);\n const scrollSize = `scroll${capitalizedDimension}`;\n this._queueCallback(complete, this._element, true);\n this._element.style[dimension] = `${this._element[scrollSize]}px`;\n }\n hide() {\n if (this._isTransitioning || !this._isShown()) {\n return;\n }\n const startEvent = EventHandler.trigger(this._element, EVENT_HIDE$6);\n if (startEvent.defaultPrevented) {\n return;\n }\n const dimension = this._getDimension();\n this._element.style[dimension] = `${this._element.getBoundingClientRect()[dimension]}px`;\n reflow(this._element);\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n this._element.classList.remove(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n for (const trigger of this._triggerArray) {\n const element = SelectorEngine.getElementFromSelector(trigger);\n if (element && !this._isShown(element)) {\n this._addAriaAndCollapsedClass([trigger], false);\n }\n }\n this._isTransitioning = true;\n const complete = () => {\n this._isTransitioning = false;\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n this._element.classList.add(CLASS_NAME_COLLAPSE);\n EventHandler.trigger(this._element, EVENT_HIDDEN$6);\n };\n this._element.style[dimension] = '';\n this._queueCallback(complete, this._element, true);\n }\n _isShown(element = this._element) {\n return element.classList.contains(CLASS_NAME_SHOW$7);\n }\n\n // Private\n _configAfterMerge(config) {\n config.toggle = Boolean(config.toggle); // Coerce string values\n config.parent = getElement(config.parent);\n return config;\n }\n _getDimension() {\n return this._element.classList.contains(CLASS_NAME_HORIZONTAL) ? WIDTH : HEIGHT;\n }\n _initializeChildren() {\n if (!this._config.parent) {\n return;\n }\n const children = this._getFirstLevelChildren(SELECTOR_DATA_TOGGLE$4);\n for (const element of children) {\n const selected = SelectorEngine.getElementFromSelector(element);\n if (selected) {\n this._addAriaAndCollapsedClass([element], this._isShown(selected));\n }\n }\n }\n _getFirstLevelChildren(selector) {\n const children = SelectorEngine.find(CLASS_NAME_DEEPER_CHILDREN, this._config.parent);\n // remove children if greater depth\n return SelectorEngine.find(selector, this._config.parent).filter(element => !children.includes(element));\n }\n _addAriaAndCollapsedClass(triggerArray, isOpen) {\n if (!triggerArray.length) {\n return;\n }\n for (const element of triggerArray) {\n element.classList.toggle(CLASS_NAME_COLLAPSED, !isOpen);\n element.setAttribute('aria-expanded', isOpen);\n }\n }\n\n // Static\n static jQueryInterface(config) {\n const _config = {};\n if (typeof config === 'string' && /show|hide/.test(config)) {\n _config.toggle = false;\n }\n return this.each(function () {\n const data = Collapse.getOrCreateInstance(this, _config);\n if (typeof config === 'string') {\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$4, SELECTOR_DATA_TOGGLE$4, function (event) {\n // preventDefault only for elements (which change the URL) not inside the collapsible element\n if (event.target.tagName === 'A' || event.delegateTarget && event.delegateTarget.tagName === 'A') {\n event.preventDefault();\n }\n for (const element of SelectorEngine.getMultipleElementsFromSelector(this)) {\n Collapse.getOrCreateInstance(element, {\n toggle: false\n }).toggle();\n }\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Collapse);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dropdown.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$a = 'dropdown';\nconst DATA_KEY$6 = 'bs.dropdown';\nconst EVENT_KEY$6 = `.${DATA_KEY$6}`;\nconst DATA_API_KEY$3 = '.data-api';\nconst ESCAPE_KEY$2 = 'Escape';\nconst TAB_KEY$1 = 'Tab';\nconst ARROW_UP_KEY$1 = 'ArrowUp';\nconst ARROW_DOWN_KEY$1 = 'ArrowDown';\nconst RIGHT_MOUSE_BUTTON = 2; // MouseEvent.button value for the secondary button, usually the right button\n\nconst EVENT_HIDE$5 = `hide${EVENT_KEY$6}`;\nconst EVENT_HIDDEN$5 = `hidden${EVENT_KEY$6}`;\nconst EVENT_SHOW$5 = `show${EVENT_KEY$6}`;\nconst EVENT_SHOWN$5 = `shown${EVENT_KEY$6}`;\nconst EVENT_CLICK_DATA_API$3 = `click${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYDOWN_DATA_API = `keydown${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYUP_DATA_API = `keyup${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst CLASS_NAME_SHOW$6 = 'show';\nconst CLASS_NAME_DROPUP = 'dropup';\nconst CLASS_NAME_DROPEND = 'dropend';\nconst CLASS_NAME_DROPSTART = 'dropstart';\nconst CLASS_NAME_DROPUP_CENTER = 'dropup-center';\nconst CLASS_NAME_DROPDOWN_CENTER = 'dropdown-center';\nconst SELECTOR_DATA_TOGGLE$3 = '[data-bs-toggle=\"dropdown\"]:not(.disabled):not(:disabled)';\nconst SELECTOR_DATA_TOGGLE_SHOWN = `${SELECTOR_DATA_TOGGLE$3}.${CLASS_NAME_SHOW$6}`;\nconst SELECTOR_MENU = '.dropdown-menu';\nconst SELECTOR_NAVBAR = '.navbar';\nconst SELECTOR_NAVBAR_NAV = '.navbar-nav';\nconst SELECTOR_VISIBLE_ITEMS = '.dropdown-menu .dropdown-item:not(.disabled):not(:disabled)';\nconst PLACEMENT_TOP = isRTL() ? 'top-end' : 'top-start';\nconst PLACEMENT_TOPEND = isRTL() ? 'top-start' : 'top-end';\nconst PLACEMENT_BOTTOM = isRTL() ? 'bottom-end' : 'bottom-start';\nconst PLACEMENT_BOTTOMEND = isRTL() ? 'bottom-start' : 'bottom-end';\nconst PLACEMENT_RIGHT = isRTL() ? 'left-start' : 'right-start';\nconst PLACEMENT_LEFT = isRTL() ? 'right-start' : 'left-start';\nconst PLACEMENT_TOPCENTER = 'top';\nconst PLACEMENT_BOTTOMCENTER = 'bottom';\nconst Default$9 = {\n autoClose: true,\n boundary: 'clippingParents',\n display: 'dynamic',\n offset: [0, 2],\n popperConfig: null,\n reference: 'toggle'\n};\nconst DefaultType$9 = {\n autoClose: '(boolean|string)',\n boundary: '(string|element)',\n display: 'string',\n offset: '(array|string|function)',\n popperConfig: '(null|object|function)',\n reference: '(string|element|object)'\n};\n\n/**\n * Class definition\n */\n\nclass Dropdown extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._popper = null;\n this._parent = this._element.parentNode; // dropdown wrapper\n // TODO: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.3/forms/input-group/\n this._menu = SelectorEngine.next(this._element, SELECTOR_MENU)[0] || SelectorEngine.prev(this._element, SELECTOR_MENU)[0] || SelectorEngine.findOne(SELECTOR_MENU, this._parent);\n this._inNavbar = this._detectNavbar();\n }\n\n // Getters\n static get Default() {\n return Default$9;\n }\n static get DefaultType() {\n return DefaultType$9;\n }\n static get NAME() {\n return NAME$a;\n }\n\n // Public\n toggle() {\n return this._isShown() ? this.hide() : this.show();\n }\n show() {\n if (isDisabled(this._element) || this._isShown()) {\n return;\n }\n const relatedTarget = {\n relatedTarget: this._element\n };\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$5, relatedTarget);\n if (showEvent.defaultPrevented) {\n return;\n }\n this._createPopper();\n\n // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n if ('ontouchstart' in document.documentElement && !this._parent.closest(SELECTOR_NAVBAR_NAV)) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n this._element.focus();\n this._element.setAttribute('aria-expanded', true);\n this._menu.classList.add(CLASS_NAME_SHOW$6);\n this._element.classList.add(CLASS_NAME_SHOW$6);\n EventHandler.trigger(this._element, EVENT_SHOWN$5, relatedTarget);\n }\n hide() {\n if (isDisabled(this._element) || !this._isShown()) {\n return;\n }\n const relatedTarget = {\n relatedTarget: this._element\n };\n this._completeHide(relatedTarget);\n }\n dispose() {\n if (this._popper) {\n this._popper.destroy();\n }\n super.dispose();\n }\n update() {\n this._inNavbar = this._detectNavbar();\n if (this._popper) {\n this._popper.update();\n }\n }\n\n // Private\n _completeHide(relatedTarget) {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$5, relatedTarget);\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n if (this._popper) {\n this._popper.destroy();\n }\n this._menu.classList.remove(CLASS_NAME_SHOW$6);\n this._element.classList.remove(CLASS_NAME_SHOW$6);\n this._element.setAttribute('aria-expanded', 'false');\n Manipulator.removeDataAttribute(this._menu, 'popper');\n EventHandler.trigger(this._element, EVENT_HIDDEN$5, relatedTarget);\n }\n _getConfig(config) {\n config = super._getConfig(config);\n if (typeof config.reference === 'object' && !isElement(config.reference) && typeof config.reference.getBoundingClientRect !== 'function') {\n // Popper virtual elements require a getBoundingClientRect method\n throw new TypeError(`${NAME$a.toUpperCase()}: Option \"reference\" provided type \"object\" without a required \"getBoundingClientRect\" method.`);\n }\n return config;\n }\n _createPopper() {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s dropdowns require Popper (https://popper.js.org)');\n }\n let referenceElement = this._element;\n if (this._config.reference === 'parent') {\n referenceElement = this._parent;\n } else if (isElement(this._config.reference)) {\n referenceElement = getElement(this._config.reference);\n } else if (typeof this._config.reference === 'object') {\n referenceElement = this._config.reference;\n }\n const popperConfig = this._getPopperConfig();\n this._popper = Popper.createPopper(referenceElement, this._menu, popperConfig);\n }\n _isShown() {\n return this._menu.classList.contains(CLASS_NAME_SHOW$6);\n }\n _getPlacement() {\n const parentDropdown = this._parent;\n if (parentDropdown.classList.contains(CLASS_NAME_DROPEND)) {\n return PLACEMENT_RIGHT;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPSTART)) {\n return PLACEMENT_LEFT;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP_CENTER)) {\n return PLACEMENT_TOPCENTER;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPDOWN_CENTER)) {\n return PLACEMENT_BOTTOMCENTER;\n }\n\n // We need to trim the value because custom properties can also include spaces\n const isEnd = getComputedStyle(this._menu).getPropertyValue('--bs-position').trim() === 'end';\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP)) {\n return isEnd ? PLACEMENT_TOPEND : PLACEMENT_TOP;\n }\n return isEnd ? PLACEMENT_BOTTOMEND : PLACEMENT_BOTTOM;\n }\n _detectNavbar() {\n return this._element.closest(SELECTOR_NAVBAR) !== null;\n }\n _getOffset() {\n const {\n offset\n } = this._config;\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n return offset;\n }\n _getPopperConfig() {\n const defaultBsPopperConfig = {\n placement: this._getPlacement(),\n modifiers: [{\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }]\n };\n\n // Disable Popper if we have a static display or Dropdown is in Navbar\n if (this._inNavbar || this._config.display === 'static') {\n Manipulator.setDataAttribute(this._menu, 'popper', 'static'); // TODO: v6 remove\n defaultBsPopperConfig.modifiers = [{\n name: 'applyStyles',\n enabled: false\n }];\n }\n return {\n ...defaultBsPopperConfig,\n ...execute(this._config.popperConfig, [defaultBsPopperConfig])\n };\n }\n _selectMenuItem({\n key,\n target\n }) {\n const items = SelectorEngine.find(SELECTOR_VISIBLE_ITEMS, this._menu).filter(element => isVisible(element));\n if (!items.length) {\n return;\n }\n\n // if target isn't included in items (e.g. when expanding the dropdown)\n // allow cycling to get the last item in case key equals ARROW_UP_KEY\n getNextActiveElement(items, target, key === ARROW_DOWN_KEY$1, !items.includes(target)).focus();\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Dropdown.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n static clearMenus(event) {\n if (event.button === RIGHT_MOUSE_BUTTON || event.type === 'keyup' && event.key !== TAB_KEY$1) {\n return;\n }\n const openToggles = SelectorEngine.find(SELECTOR_DATA_TOGGLE_SHOWN);\n for (const toggle of openToggles) {\n const context = Dropdown.getInstance(toggle);\n if (!context || context._config.autoClose === false) {\n continue;\n }\n const composedPath = event.composedPath();\n const isMenuTarget = composedPath.includes(context._menu);\n if (composedPath.includes(context._element) || context._config.autoClose === 'inside' && !isMenuTarget || context._config.autoClose === 'outside' && isMenuTarget) {\n continue;\n }\n\n // Tab navigation through the dropdown menu or events from contained inputs shouldn't close the menu\n if (context._menu.contains(event.target) && (event.type === 'keyup' && event.key === TAB_KEY$1 || /input|select|option|textarea|form/i.test(event.target.tagName))) {\n continue;\n }\n const relatedTarget = {\n relatedTarget: context._element\n };\n if (event.type === 'click') {\n relatedTarget.clickEvent = event;\n }\n context._completeHide(relatedTarget);\n }\n }\n static dataApiKeydownHandler(event) {\n // If not an UP | DOWN | ESCAPE key => not a dropdown command\n // If input/textarea && if key is other than ESCAPE => not a dropdown command\n\n const isInput = /input|textarea/i.test(event.target.tagName);\n const isEscapeEvent = event.key === ESCAPE_KEY$2;\n const isUpOrDownEvent = [ARROW_UP_KEY$1, ARROW_DOWN_KEY$1].includes(event.key);\n if (!isUpOrDownEvent && !isEscapeEvent) {\n return;\n }\n if (isInput && !isEscapeEvent) {\n return;\n }\n event.preventDefault();\n\n // TODO: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.3/forms/input-group/\n const getToggleButton = this.matches(SELECTOR_DATA_TOGGLE$3) ? this : SelectorEngine.prev(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.next(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.findOne(SELECTOR_DATA_TOGGLE$3, event.delegateTarget.parentNode);\n const instance = Dropdown.getOrCreateInstance(getToggleButton);\n if (isUpOrDownEvent) {\n event.stopPropagation();\n instance.show();\n instance._selectMenuItem(event);\n return;\n }\n if (instance._isShown()) {\n // else is escape and we check if it is shown\n event.stopPropagation();\n instance.hide();\n getToggleButton.focus();\n }\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_DATA_TOGGLE$3, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_MENU, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_KEYUP_DATA_API, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, SELECTOR_DATA_TOGGLE$3, function (event) {\n event.preventDefault();\n Dropdown.getOrCreateInstance(this).toggle();\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Dropdown);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/backdrop.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$9 = 'backdrop';\nconst CLASS_NAME_FADE$4 = 'fade';\nconst CLASS_NAME_SHOW$5 = 'show';\nconst EVENT_MOUSEDOWN = `mousedown.bs.${NAME$9}`;\nconst Default$8 = {\n className: 'modal-backdrop',\n clickCallback: null,\n isAnimated: false,\n isVisible: true,\n // if false, we use the backdrop helper without adding any element to the dom\n rootElement: 'body' // give the choice to place backdrop under different elements\n};\n\nconst DefaultType$8 = {\n className: 'string',\n clickCallback: '(function|null)',\n isAnimated: 'boolean',\n isVisible: 'boolean',\n rootElement: '(element|string)'\n};\n\n/**\n * Class definition\n */\n\nclass Backdrop extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isAppended = false;\n this._element = null;\n }\n\n // Getters\n static get Default() {\n return Default$8;\n }\n static get DefaultType() {\n return DefaultType$8;\n }\n static get NAME() {\n return NAME$9;\n }\n\n // Public\n show(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n this._append();\n const element = this._getElement();\n if (this._config.isAnimated) {\n reflow(element);\n }\n element.classList.add(CLASS_NAME_SHOW$5);\n this._emulateAnimation(() => {\n execute(callback);\n });\n }\n hide(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n this._getElement().classList.remove(CLASS_NAME_SHOW$5);\n this._emulateAnimation(() => {\n this.dispose();\n execute(callback);\n });\n }\n dispose() {\n if (!this._isAppended) {\n return;\n }\n EventHandler.off(this._element, EVENT_MOUSEDOWN);\n this._element.remove();\n this._isAppended = false;\n }\n\n // Private\n _getElement() {\n if (!this._element) {\n const backdrop = document.createElement('div');\n backdrop.className = this._config.className;\n if (this._config.isAnimated) {\n backdrop.classList.add(CLASS_NAME_FADE$4);\n }\n this._element = backdrop;\n }\n return this._element;\n }\n _configAfterMerge(config) {\n // use getElement() with the default \"body\" to get a fresh Element on each instantiation\n config.rootElement = getElement(config.rootElement);\n return config;\n }\n _append() {\n if (this._isAppended) {\n return;\n }\n const element = this._getElement();\n this._config.rootElement.append(element);\n EventHandler.on(element, EVENT_MOUSEDOWN, () => {\n execute(this._config.clickCallback);\n });\n this._isAppended = true;\n }\n _emulateAnimation(callback) {\n executeAfterTransition(callback, this._getElement(), this._config.isAnimated);\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/focustrap.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$8 = 'focustrap';\nconst DATA_KEY$5 = 'bs.focustrap';\nconst EVENT_KEY$5 = `.${DATA_KEY$5}`;\nconst EVENT_FOCUSIN$2 = `focusin${EVENT_KEY$5}`;\nconst EVENT_KEYDOWN_TAB = `keydown.tab${EVENT_KEY$5}`;\nconst TAB_KEY = 'Tab';\nconst TAB_NAV_FORWARD = 'forward';\nconst TAB_NAV_BACKWARD = 'backward';\nconst Default$7 = {\n autofocus: true,\n trapElement: null // The element to trap focus inside of\n};\n\nconst DefaultType$7 = {\n autofocus: 'boolean',\n trapElement: 'element'\n};\n\n/**\n * Class definition\n */\n\nclass FocusTrap extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isActive = false;\n this._lastTabNavDirection = null;\n }\n\n // Getters\n static get Default() {\n return Default$7;\n }\n static get DefaultType() {\n return DefaultType$7;\n }\n static get NAME() {\n return NAME$8;\n }\n\n // Public\n activate() {\n if (this._isActive) {\n return;\n }\n if (this._config.autofocus) {\n this._config.trapElement.focus();\n }\n EventHandler.off(document, EVENT_KEY$5); // guard against infinite focus loop\n EventHandler.on(document, EVENT_FOCUSIN$2, event => this._handleFocusin(event));\n EventHandler.on(document, EVENT_KEYDOWN_TAB, event => this._handleKeydown(event));\n this._isActive = true;\n }\n deactivate() {\n if (!this._isActive) {\n return;\n }\n this._isActive = false;\n EventHandler.off(document, EVENT_KEY$5);\n }\n\n // Private\n _handleFocusin(event) {\n const {\n trapElement\n } = this._config;\n if (event.target === document || event.target === trapElement || trapElement.contains(event.target)) {\n return;\n }\n const elements = SelectorEngine.focusableChildren(trapElement);\n if (elements.length === 0) {\n trapElement.focus();\n } else if (this._lastTabNavDirection === TAB_NAV_BACKWARD) {\n elements[elements.length - 1].focus();\n } else {\n elements[0].focus();\n }\n }\n _handleKeydown(event) {\n if (event.key !== TAB_KEY) {\n return;\n }\n this._lastTabNavDirection = event.shiftKey ? TAB_NAV_BACKWARD : TAB_NAV_FORWARD;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/scrollBar.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst SELECTOR_FIXED_CONTENT = '.fixed-top, .fixed-bottom, .is-fixed, .sticky-top';\nconst SELECTOR_STICKY_CONTENT = '.sticky-top';\nconst PROPERTY_PADDING = 'padding-right';\nconst PROPERTY_MARGIN = 'margin-right';\n\n/**\n * Class definition\n */\n\nclass ScrollBarHelper {\n constructor() {\n this._element = document.body;\n }\n\n // Public\n getWidth() {\n // https://developer.mozilla.org/en-US/docs/Web/API/Window/innerWidth#usage_notes\n const documentWidth = document.documentElement.clientWidth;\n return Math.abs(window.innerWidth - documentWidth);\n }\n hide() {\n const width = this.getWidth();\n this._disableOverFlow();\n // give padding to element to balance the hidden scrollbar width\n this._setElementAttributes(this._element, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n // trick: We adjust positive paddingRight and negative marginRight to sticky-top elements to keep showing fullwidth\n this._setElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n this._setElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN, calculatedValue => calculatedValue - width);\n }\n reset() {\n this._resetElementAttributes(this._element, 'overflow');\n this._resetElementAttributes(this._element, PROPERTY_PADDING);\n this._resetElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING);\n this._resetElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN);\n }\n isOverflowing() {\n return this.getWidth() > 0;\n }\n\n // Private\n _disableOverFlow() {\n this._saveInitialAttribute(this._element, 'overflow');\n this._element.style.overflow = 'hidden';\n }\n _setElementAttributes(selector, styleProperty, callback) {\n const scrollbarWidth = this.getWidth();\n const manipulationCallBack = element => {\n if (element !== this._element && window.innerWidth > element.clientWidth + scrollbarWidth) {\n return;\n }\n this._saveInitialAttribute(element, styleProperty);\n const calculatedValue = window.getComputedStyle(element).getPropertyValue(styleProperty);\n element.style.setProperty(styleProperty, `${callback(Number.parseFloat(calculatedValue))}px`);\n };\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n _saveInitialAttribute(element, styleProperty) {\n const actualValue = element.style.getPropertyValue(styleProperty);\n if (actualValue) {\n Manipulator.setDataAttribute(element, styleProperty, actualValue);\n }\n }\n _resetElementAttributes(selector, styleProperty) {\n const manipulationCallBack = element => {\n const value = Manipulator.getDataAttribute(element, styleProperty);\n // We only want to remove the property if the value is `null`; the value can also be zero\n if (value === null) {\n element.style.removeProperty(styleProperty);\n return;\n }\n Manipulator.removeDataAttribute(element, styleProperty);\n element.style.setProperty(styleProperty, value);\n };\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n _applyManipulationCallback(selector, callBack) {\n if (isElement(selector)) {\n callBack(selector);\n return;\n }\n for (const sel of SelectorEngine.find(selector, this._element)) {\n callBack(sel);\n }\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap modal.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$7 = 'modal';\nconst DATA_KEY$4 = 'bs.modal';\nconst EVENT_KEY$4 = `.${DATA_KEY$4}`;\nconst DATA_API_KEY$2 = '.data-api';\nconst ESCAPE_KEY$1 = 'Escape';\nconst EVENT_HIDE$4 = `hide${EVENT_KEY$4}`;\nconst EVENT_HIDE_PREVENTED$1 = `hidePrevented${EVENT_KEY$4}`;\nconst EVENT_HIDDEN$4 = `hidden${EVENT_KEY$4}`;\nconst EVENT_SHOW$4 = `show${EVENT_KEY$4}`;\nconst EVENT_SHOWN$4 = `shown${EVENT_KEY$4}`;\nconst EVENT_RESIZE$1 = `resize${EVENT_KEY$4}`;\nconst EVENT_CLICK_DISMISS = `click.dismiss${EVENT_KEY$4}`;\nconst EVENT_MOUSEDOWN_DISMISS = `mousedown.dismiss${EVENT_KEY$4}`;\nconst EVENT_KEYDOWN_DISMISS$1 = `keydown.dismiss${EVENT_KEY$4}`;\nconst EVENT_CLICK_DATA_API$2 = `click${EVENT_KEY$4}${DATA_API_KEY$2}`;\nconst CLASS_NAME_OPEN = 'modal-open';\nconst CLASS_NAME_FADE$3 = 'fade';\nconst CLASS_NAME_SHOW$4 = 'show';\nconst CLASS_NAME_STATIC = 'modal-static';\nconst OPEN_SELECTOR$1 = '.modal.show';\nconst SELECTOR_DIALOG = '.modal-dialog';\nconst SELECTOR_MODAL_BODY = '.modal-body';\nconst SELECTOR_DATA_TOGGLE$2 = '[data-bs-toggle=\"modal\"]';\nconst Default$6 = {\n backdrop: true,\n focus: true,\n keyboard: true\n};\nconst DefaultType$6 = {\n backdrop: '(boolean|string)',\n focus: 'boolean',\n keyboard: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Modal extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._dialog = SelectorEngine.findOne(SELECTOR_DIALOG, this._element);\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._isShown = false;\n this._isTransitioning = false;\n this._scrollBar = new ScrollBarHelper();\n this._addEventListeners();\n }\n\n // Getters\n static get Default() {\n return Default$6;\n }\n static get DefaultType() {\n return DefaultType$6;\n }\n static get NAME() {\n return NAME$7;\n }\n\n // Public\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n show(relatedTarget) {\n if (this._isShown || this._isTransitioning) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$4, {\n relatedTarget\n });\n if (showEvent.defaultPrevented) {\n return;\n }\n this._isShown = true;\n this._isTransitioning = true;\n this._scrollBar.hide();\n document.body.classList.add(CLASS_NAME_OPEN);\n this._adjustDialog();\n this._backdrop.show(() => this._showElement(relatedTarget));\n }\n hide() {\n if (!this._isShown || this._isTransitioning) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$4);\n if (hideEvent.defaultPrevented) {\n return;\n }\n this._isShown = false;\n this._isTransitioning = true;\n this._focustrap.deactivate();\n this._element.classList.remove(CLASS_NAME_SHOW$4);\n this._queueCallback(() => this._hideModal(), this._element, this._isAnimated());\n }\n dispose() {\n EventHandler.off(window, EVENT_KEY$4);\n EventHandler.off(this._dialog, EVENT_KEY$4);\n this._backdrop.dispose();\n this._focustrap.deactivate();\n super.dispose();\n }\n handleUpdate() {\n this._adjustDialog();\n }\n\n // Private\n _initializeBackDrop() {\n return new Backdrop({\n isVisible: Boolean(this._config.backdrop),\n // 'static' option will be translated to true, and booleans will keep their value,\n isAnimated: this._isAnimated()\n });\n }\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n _showElement(relatedTarget) {\n // try to append dynamic modal\n if (!document.body.contains(this._element)) {\n document.body.append(this._element);\n }\n this._element.style.display = 'block';\n this._element.removeAttribute('aria-hidden');\n this._element.setAttribute('aria-modal', true);\n this._element.setAttribute('role', 'dialog');\n this._element.scrollTop = 0;\n const modalBody = SelectorEngine.findOne(SELECTOR_MODAL_BODY, this._dialog);\n if (modalBody) {\n modalBody.scrollTop = 0;\n }\n reflow(this._element);\n this._element.classList.add(CLASS_NAME_SHOW$4);\n const transitionComplete = () => {\n if (this._config.focus) {\n this._focustrap.activate();\n }\n this._isTransitioning = false;\n EventHandler.trigger(this._element, EVENT_SHOWN$4, {\n relatedTarget\n });\n };\n this._queueCallback(transitionComplete, this._dialog, this._isAnimated());\n }\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS$1, event => {\n if (event.key !== ESCAPE_KEY$1) {\n return;\n }\n if (this._config.keyboard) {\n this.hide();\n return;\n }\n this._triggerBackdropTransition();\n });\n EventHandler.on(window, EVENT_RESIZE$1, () => {\n if (this._isShown && !this._isTransitioning) {\n this._adjustDialog();\n }\n });\n EventHandler.on(this._element, EVENT_MOUSEDOWN_DISMISS, event => {\n // a bad trick to segregate clicks that may start inside dialog but end outside, and avoid listen to scrollbar clicks\n EventHandler.one(this._element, EVENT_CLICK_DISMISS, event2 => {\n if (this._element !== event.target || this._element !== event2.target) {\n return;\n }\n if (this._config.backdrop === 'static') {\n this._triggerBackdropTransition();\n return;\n }\n if (this._config.backdrop) {\n this.hide();\n }\n });\n });\n }\n _hideModal() {\n this._element.style.display = 'none';\n this._element.setAttribute('aria-hidden', true);\n this._element.removeAttribute('aria-modal');\n this._element.removeAttribute('role');\n this._isTransitioning = false;\n this._backdrop.hide(() => {\n document.body.classList.remove(CLASS_NAME_OPEN);\n this._resetAdjustments();\n this._scrollBar.reset();\n EventHandler.trigger(this._element, EVENT_HIDDEN$4);\n });\n }\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_FADE$3);\n }\n _triggerBackdropTransition() {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED$1);\n if (hideEvent.defaultPrevented) {\n return;\n }\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const initialOverflowY = this._element.style.overflowY;\n // return if the following background transition hasn't yet completed\n if (initialOverflowY === 'hidden' || this._element.classList.contains(CLASS_NAME_STATIC)) {\n return;\n }\n if (!isModalOverflowing) {\n this._element.style.overflowY = 'hidden';\n }\n this._element.classList.add(CLASS_NAME_STATIC);\n this._queueCallback(() => {\n this._element.classList.remove(CLASS_NAME_STATIC);\n this._queueCallback(() => {\n this._element.style.overflowY = initialOverflowY;\n }, this._dialog);\n }, this._dialog);\n this._element.focus();\n }\n\n /**\n * The following methods are used to handle overflowing modals\n */\n\n _adjustDialog() {\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const scrollbarWidth = this._scrollBar.getWidth();\n const isBodyOverflowing = scrollbarWidth > 0;\n if (isBodyOverflowing && !isModalOverflowing) {\n const property = isRTL() ? 'paddingLeft' : 'paddingRight';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n if (!isBodyOverflowing && isModalOverflowing) {\n const property = isRTL() ? 'paddingRight' : 'paddingLeft';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n }\n _resetAdjustments() {\n this._element.style.paddingLeft = '';\n this._element.style.paddingRight = '';\n }\n\n // Static\n static jQueryInterface(config, relatedTarget) {\n return this.each(function () {\n const data = Modal.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](relatedTarget);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$2, SELECTOR_DATA_TOGGLE$2, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n EventHandler.one(target, EVENT_SHOW$4, showEvent => {\n if (showEvent.defaultPrevented) {\n // only register focus restorer if modal will actually get shown\n return;\n }\n EventHandler.one(target, EVENT_HIDDEN$4, () => {\n if (isVisible(this)) {\n this.focus();\n }\n });\n });\n\n // avoid conflict when clicking modal toggler while another one is open\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR$1);\n if (alreadyOpen) {\n Modal.getInstance(alreadyOpen).hide();\n }\n const data = Modal.getOrCreateInstance(target);\n data.toggle(this);\n});\nenableDismissTrigger(Modal);\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Modal);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap offcanvas.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$6 = 'offcanvas';\nconst DATA_KEY$3 = 'bs.offcanvas';\nconst EVENT_KEY$3 = `.${DATA_KEY$3}`;\nconst DATA_API_KEY$1 = '.data-api';\nconst EVENT_LOAD_DATA_API$2 = `load${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst ESCAPE_KEY = 'Escape';\nconst CLASS_NAME_SHOW$3 = 'show';\nconst CLASS_NAME_SHOWING$1 = 'showing';\nconst CLASS_NAME_HIDING = 'hiding';\nconst CLASS_NAME_BACKDROP = 'offcanvas-backdrop';\nconst OPEN_SELECTOR = '.offcanvas.show';\nconst EVENT_SHOW$3 = `show${EVENT_KEY$3}`;\nconst EVENT_SHOWN$3 = `shown${EVENT_KEY$3}`;\nconst EVENT_HIDE$3 = `hide${EVENT_KEY$3}`;\nconst EVENT_HIDE_PREVENTED = `hidePrevented${EVENT_KEY$3}`;\nconst EVENT_HIDDEN$3 = `hidden${EVENT_KEY$3}`;\nconst EVENT_RESIZE = `resize${EVENT_KEY$3}`;\nconst EVENT_CLICK_DATA_API$1 = `click${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst EVENT_KEYDOWN_DISMISS = `keydown.dismiss${EVENT_KEY$3}`;\nconst SELECTOR_DATA_TOGGLE$1 = '[data-bs-toggle=\"offcanvas\"]';\nconst Default$5 = {\n backdrop: true,\n keyboard: true,\n scroll: false\n};\nconst DefaultType$5 = {\n backdrop: '(boolean|string)',\n keyboard: 'boolean',\n scroll: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Offcanvas extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isShown = false;\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._addEventListeners();\n }\n\n // Getters\n static get Default() {\n return Default$5;\n }\n static get DefaultType() {\n return DefaultType$5;\n }\n static get NAME() {\n return NAME$6;\n }\n\n // Public\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n show(relatedTarget) {\n if (this._isShown) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$3, {\n relatedTarget\n });\n if (showEvent.defaultPrevented) {\n return;\n }\n this._isShown = true;\n this._backdrop.show();\n if (!this._config.scroll) {\n new ScrollBarHelper().hide();\n }\n this._element.setAttribute('aria-modal', true);\n this._element.setAttribute('role', 'dialog');\n this._element.classList.add(CLASS_NAME_SHOWING$1);\n const completeCallBack = () => {\n if (!this._config.scroll || this._config.backdrop) {\n this._focustrap.activate();\n }\n this._element.classList.add(CLASS_NAME_SHOW$3);\n this._element.classList.remove(CLASS_NAME_SHOWING$1);\n EventHandler.trigger(this._element, EVENT_SHOWN$3, {\n relatedTarget\n });\n };\n this._queueCallback(completeCallBack, this._element, true);\n }\n hide() {\n if (!this._isShown) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$3);\n if (hideEvent.defaultPrevented) {\n return;\n }\n this._focustrap.deactivate();\n this._element.blur();\n this._isShown = false;\n this._element.classList.add(CLASS_NAME_HIDING);\n this._backdrop.hide();\n const completeCallback = () => {\n this._element.classList.remove(CLASS_NAME_SHOW$3, CLASS_NAME_HIDING);\n this._element.removeAttribute('aria-modal');\n this._element.removeAttribute('role');\n if (!this._config.scroll) {\n new ScrollBarHelper().reset();\n }\n EventHandler.trigger(this._element, EVENT_HIDDEN$3);\n };\n this._queueCallback(completeCallback, this._element, true);\n }\n dispose() {\n this._backdrop.dispose();\n this._focustrap.deactivate();\n super.dispose();\n }\n\n // Private\n _initializeBackDrop() {\n const clickCallback = () => {\n if (this._config.backdrop === 'static') {\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n return;\n }\n this.hide();\n };\n\n // 'static' option will be translated to true, and booleans will keep their value\n const isVisible = Boolean(this._config.backdrop);\n return new Backdrop({\n className: CLASS_NAME_BACKDROP,\n isVisible,\n isAnimated: true,\n rootElement: this._element.parentNode,\n clickCallback: isVisible ? clickCallback : null\n });\n }\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS, event => {\n if (event.key !== ESCAPE_KEY) {\n return;\n }\n if (this._config.keyboard) {\n this.hide();\n return;\n }\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n });\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Offcanvas.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](this);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$1, SELECTOR_DATA_TOGGLE$1, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n if (isDisabled(this)) {\n return;\n }\n EventHandler.one(target, EVENT_HIDDEN$3, () => {\n // focus on trigger when it is closed\n if (isVisible(this)) {\n this.focus();\n }\n });\n\n // avoid conflict when clicking a toggler of an offcanvas, while another is open\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR);\n if (alreadyOpen && alreadyOpen !== target) {\n Offcanvas.getInstance(alreadyOpen).hide();\n }\n const data = Offcanvas.getOrCreateInstance(target);\n data.toggle(this);\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$2, () => {\n for (const selector of SelectorEngine.find(OPEN_SELECTOR)) {\n Offcanvas.getOrCreateInstance(selector).show();\n }\n});\nEventHandler.on(window, EVENT_RESIZE, () => {\n for (const element of SelectorEngine.find('[aria-modal][class*=show][class*=offcanvas-]')) {\n if (getComputedStyle(element).position !== 'fixed') {\n Offcanvas.getOrCreateInstance(element).hide();\n }\n }\n});\nenableDismissTrigger(Offcanvas);\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Offcanvas);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/sanitizer.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n// js-docs-start allow-list\nconst ARIA_ATTRIBUTE_PATTERN = /^aria-[\\w-]*$/i;\nconst DefaultAllowlist = {\n // Global attributes allowed on any supplied element below.\n '*': ['class', 'dir', 'id', 'lang', 'role', ARIA_ATTRIBUTE_PATTERN],\n a: ['target', 'href', 'title', 'rel'],\n area: [],\n b: [],\n br: [],\n col: [],\n code: [],\n div: [],\n em: [],\n hr: [],\n h1: [],\n h2: [],\n h3: [],\n h4: [],\n h5: [],\n h6: [],\n i: [],\n img: ['src', 'srcset', 'alt', 'title', 'width', 'height'],\n li: [],\n ol: [],\n p: [],\n pre: [],\n s: [],\n small: [],\n span: [],\n sub: [],\n sup: [],\n strong: [],\n u: [],\n ul: []\n};\n// js-docs-end allow-list\n\nconst uriAttributes = new Set(['background', 'cite', 'href', 'itemtype', 'longdesc', 'poster', 'src', 'xlink:href']);\n\n/**\n * A pattern that recognizes URLs that are safe wrt. XSS in URL navigation\n * contexts.\n *\n * Shout-out to Angular https://github.com/angular/angular/blob/15.2.8/packages/core/src/sanitization/url_sanitizer.ts#L38\n */\n// eslint-disable-next-line unicorn/better-regex\nconst SAFE_URL_PATTERN = /^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i;\nconst allowedAttribute = (attribute, allowedAttributeList) => {\n const attributeName = attribute.nodeName.toLowerCase();\n if (allowedAttributeList.includes(attributeName)) {\n if (uriAttributes.has(attributeName)) {\n return Boolean(SAFE_URL_PATTERN.test(attribute.nodeValue));\n }\n return true;\n }\n\n // Check if a regular expression validates the attribute.\n return allowedAttributeList.filter(attributeRegex => attributeRegex instanceof RegExp).some(regex => regex.test(attributeName));\n};\nfunction sanitizeHtml(unsafeHtml, allowList, sanitizeFunction) {\n if (!unsafeHtml.length) {\n return unsafeHtml;\n }\n if (sanitizeFunction && typeof sanitizeFunction === 'function') {\n return sanitizeFunction(unsafeHtml);\n }\n const domParser = new window.DOMParser();\n const createdDocument = domParser.parseFromString(unsafeHtml, 'text/html');\n const elements = [].concat(...createdDocument.body.querySelectorAll('*'));\n for (const element of elements) {\n const elementName = element.nodeName.toLowerCase();\n if (!Object.keys(allowList).includes(elementName)) {\n element.remove();\n continue;\n }\n const attributeList = [].concat(...element.attributes);\n const allowedAttributes = [].concat(allowList['*'] || [], allowList[elementName] || []);\n for (const attribute of attributeList) {\n if (!allowedAttribute(attribute, allowedAttributes)) {\n element.removeAttribute(attribute.nodeName);\n }\n }\n }\n return createdDocument.body.innerHTML;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/template-factory.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$5 = 'TemplateFactory';\nconst Default$4 = {\n allowList: DefaultAllowlist,\n content: {},\n // { selector : text , selector2 : text2 , }\n extraClass: '',\n html: false,\n sanitize: true,\n sanitizeFn: null,\n template: '
'\n};\nconst DefaultType$4 = {\n allowList: 'object',\n content: 'object',\n extraClass: '(string|function)',\n html: 'boolean',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n template: 'string'\n};\nconst DefaultContentType = {\n entry: '(string|element|function|null)',\n selector: '(string|element)'\n};\n\n/**\n * Class definition\n */\n\nclass TemplateFactory extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n }\n\n // Getters\n static get Default() {\n return Default$4;\n }\n static get DefaultType() {\n return DefaultType$4;\n }\n static get NAME() {\n return NAME$5;\n }\n\n // Public\n getContent() {\n return Object.values(this._config.content).map(config => this._resolvePossibleFunction(config)).filter(Boolean);\n }\n hasContent() {\n return this.getContent().length > 0;\n }\n changeContent(content) {\n this._checkContent(content);\n this._config.content = {\n ...this._config.content,\n ...content\n };\n return this;\n }\n toHtml() {\n const templateWrapper = document.createElement('div');\n templateWrapper.innerHTML = this._maybeSanitize(this._config.template);\n for (const [selector, text] of Object.entries(this._config.content)) {\n this._setContent(templateWrapper, text, selector);\n }\n const template = templateWrapper.children[0];\n const extraClass = this._resolvePossibleFunction(this._config.extraClass);\n if (extraClass) {\n template.classList.add(...extraClass.split(' '));\n }\n return template;\n }\n\n // Private\n _typeCheckConfig(config) {\n super._typeCheckConfig(config);\n this._checkContent(config.content);\n }\n _checkContent(arg) {\n for (const [selector, content] of Object.entries(arg)) {\n super._typeCheckConfig({\n selector,\n entry: content\n }, DefaultContentType);\n }\n }\n _setContent(template, content, selector) {\n const templateElement = SelectorEngine.findOne(selector, template);\n if (!templateElement) {\n return;\n }\n content = this._resolvePossibleFunction(content);\n if (!content) {\n templateElement.remove();\n return;\n }\n if (isElement(content)) {\n this._putElementInTemplate(getElement(content), templateElement);\n return;\n }\n if (this._config.html) {\n templateElement.innerHTML = this._maybeSanitize(content);\n return;\n }\n templateElement.textContent = content;\n }\n _maybeSanitize(arg) {\n return this._config.sanitize ? sanitizeHtml(arg, this._config.allowList, this._config.sanitizeFn) : arg;\n }\n _resolvePossibleFunction(arg) {\n return execute(arg, [this]);\n }\n _putElementInTemplate(element, templateElement) {\n if (this._config.html) {\n templateElement.innerHTML = '';\n templateElement.append(element);\n return;\n }\n templateElement.textContent = element.textContent;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap tooltip.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$4 = 'tooltip';\nconst DISALLOWED_ATTRIBUTES = new Set(['sanitize', 'allowList', 'sanitizeFn']);\nconst CLASS_NAME_FADE$2 = 'fade';\nconst CLASS_NAME_MODAL = 'modal';\nconst CLASS_NAME_SHOW$2 = 'show';\nconst SELECTOR_TOOLTIP_INNER = '.tooltip-inner';\nconst SELECTOR_MODAL = `.${CLASS_NAME_MODAL}`;\nconst EVENT_MODAL_HIDE = 'hide.bs.modal';\nconst TRIGGER_HOVER = 'hover';\nconst TRIGGER_FOCUS = 'focus';\nconst TRIGGER_CLICK = 'click';\nconst TRIGGER_MANUAL = 'manual';\nconst EVENT_HIDE$2 = 'hide';\nconst EVENT_HIDDEN$2 = 'hidden';\nconst EVENT_SHOW$2 = 'show';\nconst EVENT_SHOWN$2 = 'shown';\nconst EVENT_INSERTED = 'inserted';\nconst EVENT_CLICK$1 = 'click';\nconst EVENT_FOCUSIN$1 = 'focusin';\nconst EVENT_FOCUSOUT$1 = 'focusout';\nconst EVENT_MOUSEENTER = 'mouseenter';\nconst EVENT_MOUSELEAVE = 'mouseleave';\nconst AttachmentMap = {\n AUTO: 'auto',\n TOP: 'top',\n RIGHT: isRTL() ? 'left' : 'right',\n BOTTOM: 'bottom',\n LEFT: isRTL() ? 'right' : 'left'\n};\nconst Default$3 = {\n allowList: DefaultAllowlist,\n animation: true,\n boundary: 'clippingParents',\n container: false,\n customClass: '',\n delay: 0,\n fallbackPlacements: ['top', 'right', 'bottom', 'left'],\n html: false,\n offset: [0, 6],\n placement: 'top',\n popperConfig: null,\n sanitize: true,\n sanitizeFn: null,\n selector: false,\n template: '
' + '
' + '
' + '
',\n title: '',\n trigger: 'hover focus'\n};\nconst DefaultType$3 = {\n allowList: 'object',\n animation: 'boolean',\n boundary: '(string|element)',\n container: '(string|element|boolean)',\n customClass: '(string|function)',\n delay: '(number|object)',\n fallbackPlacements: 'array',\n html: 'boolean',\n offset: '(array|string|function)',\n placement: '(string|function)',\n popperConfig: '(null|object|function)',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n selector: '(string|boolean)',\n template: 'string',\n title: '(string|element|function)',\n trigger: 'string'\n};\n\n/**\n * Class definition\n */\n\nclass Tooltip extends BaseComponent {\n constructor(element, config) {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s tooltips require Popper (https://popper.js.org)');\n }\n super(element, config);\n\n // Private\n this._isEnabled = true;\n this._timeout = 0;\n this._isHovered = null;\n this._activeTrigger = {};\n this._popper = null;\n this._templateFactory = null;\n this._newContent = null;\n\n // Protected\n this.tip = null;\n this._setListeners();\n if (!this._config.selector) {\n this._fixTitle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$3;\n }\n static get DefaultType() {\n return DefaultType$3;\n }\n static get NAME() {\n return NAME$4;\n }\n\n // Public\n enable() {\n this._isEnabled = true;\n }\n disable() {\n this._isEnabled = false;\n }\n toggleEnabled() {\n this._isEnabled = !this._isEnabled;\n }\n toggle() {\n if (!this._isEnabled) {\n return;\n }\n this._activeTrigger.click = !this._activeTrigger.click;\n if (this._isShown()) {\n this._leave();\n return;\n }\n this._enter();\n }\n dispose() {\n clearTimeout(this._timeout);\n EventHandler.off(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n if (this._element.getAttribute('data-bs-original-title')) {\n this._element.setAttribute('title', this._element.getAttribute('data-bs-original-title'));\n }\n this._disposePopper();\n super.dispose();\n }\n show() {\n if (this._element.style.display === 'none') {\n throw new Error('Please use show on visible elements');\n }\n if (!(this._isWithContent() && this._isEnabled)) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOW$2));\n const shadowRoot = findShadowRoot(this._element);\n const isInTheDom = (shadowRoot || this._element.ownerDocument.documentElement).contains(this._element);\n if (showEvent.defaultPrevented || !isInTheDom) {\n return;\n }\n\n // TODO: v6 remove this or make it optional\n this._disposePopper();\n const tip = this._getTipElement();\n this._element.setAttribute('aria-describedby', tip.getAttribute('id'));\n const {\n container\n } = this._config;\n if (!this._element.ownerDocument.documentElement.contains(this.tip)) {\n container.append(tip);\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_INSERTED));\n }\n this._popper = this._createPopper(tip);\n tip.classList.add(CLASS_NAME_SHOW$2);\n\n // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n const complete = () => {\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOWN$2));\n if (this._isHovered === false) {\n this._leave();\n }\n this._isHovered = false;\n };\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n hide() {\n if (!this._isShown()) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDE$2));\n if (hideEvent.defaultPrevented) {\n return;\n }\n const tip = this._getTipElement();\n tip.classList.remove(CLASS_NAME_SHOW$2);\n\n // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n this._activeTrigger[TRIGGER_CLICK] = false;\n this._activeTrigger[TRIGGER_FOCUS] = false;\n this._activeTrigger[TRIGGER_HOVER] = false;\n this._isHovered = null; // it is a trick to support manual triggering\n\n const complete = () => {\n if (this._isWithActiveTrigger()) {\n return;\n }\n if (!this._isHovered) {\n this._disposePopper();\n }\n this._element.removeAttribute('aria-describedby');\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDDEN$2));\n };\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n update() {\n if (this._popper) {\n this._popper.update();\n }\n }\n\n // Protected\n _isWithContent() {\n return Boolean(this._getTitle());\n }\n _getTipElement() {\n if (!this.tip) {\n this.tip = this._createTipElement(this._newContent || this._getContentForTemplate());\n }\n return this.tip;\n }\n _createTipElement(content) {\n const tip = this._getTemplateFactory(content).toHtml();\n\n // TODO: remove this check in v6\n if (!tip) {\n return null;\n }\n tip.classList.remove(CLASS_NAME_FADE$2, CLASS_NAME_SHOW$2);\n // TODO: v6 the following can be achieved with CSS only\n tip.classList.add(`bs-${this.constructor.NAME}-auto`);\n const tipId = getUID(this.constructor.NAME).toString();\n tip.setAttribute('id', tipId);\n if (this._isAnimated()) {\n tip.classList.add(CLASS_NAME_FADE$2);\n }\n return tip;\n }\n setContent(content) {\n this._newContent = content;\n if (this._isShown()) {\n this._disposePopper();\n this.show();\n }\n }\n _getTemplateFactory(content) {\n if (this._templateFactory) {\n this._templateFactory.changeContent(content);\n } else {\n this._templateFactory = new TemplateFactory({\n ...this._config,\n // the `content` var has to be after `this._config`\n // to override config.content in case of popover\n content,\n extraClass: this._resolvePossibleFunction(this._config.customClass)\n });\n }\n return this._templateFactory;\n }\n _getContentForTemplate() {\n return {\n [SELECTOR_TOOLTIP_INNER]: this._getTitle()\n };\n }\n _getTitle() {\n return this._resolvePossibleFunction(this._config.title) || this._element.getAttribute('data-bs-original-title');\n }\n\n // Private\n _initializeOnDelegatedTarget(event) {\n return this.constructor.getOrCreateInstance(event.delegateTarget, this._getDelegateConfig());\n }\n _isAnimated() {\n return this._config.animation || this.tip && this.tip.classList.contains(CLASS_NAME_FADE$2);\n }\n _isShown() {\n return this.tip && this.tip.classList.contains(CLASS_NAME_SHOW$2);\n }\n _createPopper(tip) {\n const placement = execute(this._config.placement, [this, tip, this._element]);\n const attachment = AttachmentMap[placement.toUpperCase()];\n return Popper.createPopper(this._element, tip, this._getPopperConfig(attachment));\n }\n _getOffset() {\n const {\n offset\n } = this._config;\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n return offset;\n }\n _resolvePossibleFunction(arg) {\n return execute(arg, [this._element]);\n }\n _getPopperConfig(attachment) {\n const defaultBsPopperConfig = {\n placement: attachment,\n modifiers: [{\n name: 'flip',\n options: {\n fallbackPlacements: this._config.fallbackPlacements\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }, {\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'arrow',\n options: {\n element: `.${this.constructor.NAME}-arrow`\n }\n }, {\n name: 'preSetPlacement',\n enabled: true,\n phase: 'beforeMain',\n fn: data => {\n // Pre-set Popper's placement attribute in order to read the arrow sizes properly.\n // Otherwise, Popper mixes up the width and height dimensions since the initial arrow style is for top placement\n this._getTipElement().setAttribute('data-popper-placement', data.state.placement);\n }\n }]\n };\n return {\n ...defaultBsPopperConfig,\n ...execute(this._config.popperConfig, [defaultBsPopperConfig])\n };\n }\n _setListeners() {\n const triggers = this._config.trigger.split(' ');\n for (const trigger of triggers) {\n if (trigger === 'click') {\n EventHandler.on(this._element, this.constructor.eventName(EVENT_CLICK$1), this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context.toggle();\n });\n } else if (trigger !== TRIGGER_MANUAL) {\n const eventIn = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSEENTER) : this.constructor.eventName(EVENT_FOCUSIN$1);\n const eventOut = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSELEAVE) : this.constructor.eventName(EVENT_FOCUSOUT$1);\n EventHandler.on(this._element, eventIn, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context._activeTrigger[event.type === 'focusin' ? TRIGGER_FOCUS : TRIGGER_HOVER] = true;\n context._enter();\n });\n EventHandler.on(this._element, eventOut, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context._activeTrigger[event.type === 'focusout' ? TRIGGER_FOCUS : TRIGGER_HOVER] = context._element.contains(event.relatedTarget);\n context._leave();\n });\n }\n }\n this._hideModalHandler = () => {\n if (this._element) {\n this.hide();\n }\n };\n EventHandler.on(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n }\n _fixTitle() {\n const title = this._element.getAttribute('title');\n if (!title) {\n return;\n }\n if (!this._element.getAttribute('aria-label') && !this._element.textContent.trim()) {\n this._element.setAttribute('aria-label', title);\n }\n this._element.setAttribute('data-bs-original-title', title); // DO NOT USE IT. Is only for backwards compatibility\n this._element.removeAttribute('title');\n }\n _enter() {\n if (this._isShown() || this._isHovered) {\n this._isHovered = true;\n return;\n }\n this._isHovered = true;\n this._setTimeout(() => {\n if (this._isHovered) {\n this.show();\n }\n }, this._config.delay.show);\n }\n _leave() {\n if (this._isWithActiveTrigger()) {\n return;\n }\n this._isHovered = false;\n this._setTimeout(() => {\n if (!this._isHovered) {\n this.hide();\n }\n }, this._config.delay.hide);\n }\n _setTimeout(handler, timeout) {\n clearTimeout(this._timeout);\n this._timeout = setTimeout(handler, timeout);\n }\n _isWithActiveTrigger() {\n return Object.values(this._activeTrigger).includes(true);\n }\n _getConfig(config) {\n const dataAttributes = Manipulator.getDataAttributes(this._element);\n for (const dataAttribute of Object.keys(dataAttributes)) {\n if (DISALLOWED_ATTRIBUTES.has(dataAttribute)) {\n delete dataAttributes[dataAttribute];\n }\n }\n config = {\n ...dataAttributes,\n ...(typeof config === 'object' && config ? config : {})\n };\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n _configAfterMerge(config) {\n config.container = config.container === false ? document.body : getElement(config.container);\n if (typeof config.delay === 'number') {\n config.delay = {\n show: config.delay,\n hide: config.delay\n };\n }\n if (typeof config.title === 'number') {\n config.title = config.title.toString();\n }\n if (typeof config.content === 'number') {\n config.content = config.content.toString();\n }\n return config;\n }\n _getDelegateConfig() {\n const config = {};\n for (const [key, value] of Object.entries(this._config)) {\n if (this.constructor.Default[key] !== value) {\n config[key] = value;\n }\n }\n config.selector = false;\n config.trigger = 'manual';\n\n // In the future can be replaced with:\n // const keysWithDifferentValues = Object.entries(this._config).filter(entry => this.constructor.Default[entry[0]] !== this._config[entry[0]])\n // `Object.fromEntries(keysWithDifferentValues)`\n return config;\n }\n _disposePopper() {\n if (this._popper) {\n this._popper.destroy();\n this._popper = null;\n }\n if (this.tip) {\n this.tip.remove();\n this.tip = null;\n }\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Tooltip.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n}\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Tooltip);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap popover.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$3 = 'popover';\nconst SELECTOR_TITLE = '.popover-header';\nconst SELECTOR_CONTENT = '.popover-body';\nconst Default$2 = {\n ...Tooltip.Default,\n content: '',\n offset: [0, 8],\n placement: 'right',\n template: '
' + '
' + '

' + '
' + '
',\n trigger: 'click'\n};\nconst DefaultType$2 = {\n ...Tooltip.DefaultType,\n content: '(null|string|element|function)'\n};\n\n/**\n * Class definition\n */\n\nclass Popover extends Tooltip {\n // Getters\n static get Default() {\n return Default$2;\n }\n static get DefaultType() {\n return DefaultType$2;\n }\n static get NAME() {\n return NAME$3;\n }\n\n // Overrides\n _isWithContent() {\n return this._getTitle() || this._getContent();\n }\n\n // Private\n _getContentForTemplate() {\n return {\n [SELECTOR_TITLE]: this._getTitle(),\n [SELECTOR_CONTENT]: this._getContent()\n };\n }\n _getContent() {\n return this._resolvePossibleFunction(this._config.content);\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Popover.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n}\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Popover);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap scrollspy.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$2 = 'scrollspy';\nconst DATA_KEY$2 = 'bs.scrollspy';\nconst EVENT_KEY$2 = `.${DATA_KEY$2}`;\nconst DATA_API_KEY = '.data-api';\nconst EVENT_ACTIVATE = `activate${EVENT_KEY$2}`;\nconst EVENT_CLICK = `click${EVENT_KEY$2}`;\nconst EVENT_LOAD_DATA_API$1 = `load${EVENT_KEY$2}${DATA_API_KEY}`;\nconst CLASS_NAME_DROPDOWN_ITEM = 'dropdown-item';\nconst CLASS_NAME_ACTIVE$1 = 'active';\nconst SELECTOR_DATA_SPY = '[data-bs-spy=\"scroll\"]';\nconst SELECTOR_TARGET_LINKS = '[href]';\nconst SELECTOR_NAV_LIST_GROUP = '.nav, .list-group';\nconst SELECTOR_NAV_LINKS = '.nav-link';\nconst SELECTOR_NAV_ITEMS = '.nav-item';\nconst SELECTOR_LIST_ITEMS = '.list-group-item';\nconst SELECTOR_LINK_ITEMS = `${SELECTOR_NAV_LINKS}, ${SELECTOR_NAV_ITEMS} > ${SELECTOR_NAV_LINKS}, ${SELECTOR_LIST_ITEMS}`;\nconst SELECTOR_DROPDOWN = '.dropdown';\nconst SELECTOR_DROPDOWN_TOGGLE$1 = '.dropdown-toggle';\nconst Default$1 = {\n offset: null,\n // TODO: v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: '0px 0px -25%',\n smoothScroll: false,\n target: null,\n threshold: [0.1, 0.5, 1]\n};\nconst DefaultType$1 = {\n offset: '(number|null)',\n // TODO v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: 'string',\n smoothScroll: 'boolean',\n target: 'element',\n threshold: 'array'\n};\n\n/**\n * Class definition\n */\n\nclass ScrollSpy extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n\n // this._element is the observablesContainer and config.target the menu links wrapper\n this._targetLinks = new Map();\n this._observableSections = new Map();\n this._rootElement = getComputedStyle(this._element).overflowY === 'visible' ? null : this._element;\n this._activeTarget = null;\n this._observer = null;\n this._previousScrollData = {\n visibleEntryTop: 0,\n parentScrollTop: 0\n };\n this.refresh(); // initialize\n }\n\n // Getters\n static get Default() {\n return Default$1;\n }\n static get DefaultType() {\n return DefaultType$1;\n }\n static get NAME() {\n return NAME$2;\n }\n\n // Public\n refresh() {\n this._initializeTargetsAndObservables();\n this._maybeEnableSmoothScroll();\n if (this._observer) {\n this._observer.disconnect();\n } else {\n this._observer = this._getNewObserver();\n }\n for (const section of this._observableSections.values()) {\n this._observer.observe(section);\n }\n }\n dispose() {\n this._observer.disconnect();\n super.dispose();\n }\n\n // Private\n _configAfterMerge(config) {\n // TODO: on v6 target should be given explicitly & remove the {target: 'ss-target'} case\n config.target = getElement(config.target) || document.body;\n\n // TODO: v6 Only for backwards compatibility reasons. Use rootMargin only\n config.rootMargin = config.offset ? `${config.offset}px 0px -30%` : config.rootMargin;\n if (typeof config.threshold === 'string') {\n config.threshold = config.threshold.split(',').map(value => Number.parseFloat(value));\n }\n return config;\n }\n _maybeEnableSmoothScroll() {\n if (!this._config.smoothScroll) {\n return;\n }\n\n // unregister any previous listeners\n EventHandler.off(this._config.target, EVENT_CLICK);\n EventHandler.on(this._config.target, EVENT_CLICK, SELECTOR_TARGET_LINKS, event => {\n const observableSection = this._observableSections.get(event.target.hash);\n if (observableSection) {\n event.preventDefault();\n const root = this._rootElement || window;\n const height = observableSection.offsetTop - this._element.offsetTop;\n if (root.scrollTo) {\n root.scrollTo({\n top: height,\n behavior: 'smooth'\n });\n return;\n }\n\n // Chrome 60 doesn't support `scrollTo`\n root.scrollTop = height;\n }\n });\n }\n _getNewObserver() {\n const options = {\n root: this._rootElement,\n threshold: this._config.threshold,\n rootMargin: this._config.rootMargin\n };\n return new IntersectionObserver(entries => this._observerCallback(entries), options);\n }\n\n // The logic of selection\n _observerCallback(entries) {\n const targetElement = entry => this._targetLinks.get(`#${entry.target.id}`);\n const activate = entry => {\n this._previousScrollData.visibleEntryTop = entry.target.offsetTop;\n this._process(targetElement(entry));\n };\n const parentScrollTop = (this._rootElement || document.documentElement).scrollTop;\n const userScrollsDown = parentScrollTop >= this._previousScrollData.parentScrollTop;\n this._previousScrollData.parentScrollTop = parentScrollTop;\n for (const entry of entries) {\n if (!entry.isIntersecting) {\n this._activeTarget = null;\n this._clearActiveClass(targetElement(entry));\n continue;\n }\n const entryIsLowerThanPrevious = entry.target.offsetTop >= this._previousScrollData.visibleEntryTop;\n // if we are scrolling down, pick the bigger offsetTop\n if (userScrollsDown && entryIsLowerThanPrevious) {\n activate(entry);\n // if parent isn't scrolled, let's keep the first visible item, breaking the iteration\n if (!parentScrollTop) {\n return;\n }\n continue;\n }\n\n // if we are scrolling up, pick the smallest offsetTop\n if (!userScrollsDown && !entryIsLowerThanPrevious) {\n activate(entry);\n }\n }\n }\n _initializeTargetsAndObservables() {\n this._targetLinks = new Map();\n this._observableSections = new Map();\n const targetLinks = SelectorEngine.find(SELECTOR_TARGET_LINKS, this._config.target);\n for (const anchor of targetLinks) {\n // ensure that the anchor has an id and is not disabled\n if (!anchor.hash || isDisabled(anchor)) {\n continue;\n }\n const observableSection = SelectorEngine.findOne(decodeURI(anchor.hash), this._element);\n\n // ensure that the observableSection exists & is visible\n if (isVisible(observableSection)) {\n this._targetLinks.set(decodeURI(anchor.hash), anchor);\n this._observableSections.set(anchor.hash, observableSection);\n }\n }\n }\n _process(target) {\n if (this._activeTarget === target) {\n return;\n }\n this._clearActiveClass(this._config.target);\n this._activeTarget = target;\n target.classList.add(CLASS_NAME_ACTIVE$1);\n this._activateParents(target);\n EventHandler.trigger(this._element, EVENT_ACTIVATE, {\n relatedTarget: target\n });\n }\n _activateParents(target) {\n // Activate dropdown parents\n if (target.classList.contains(CLASS_NAME_DROPDOWN_ITEM)) {\n SelectorEngine.findOne(SELECTOR_DROPDOWN_TOGGLE$1, target.closest(SELECTOR_DROPDOWN)).classList.add(CLASS_NAME_ACTIVE$1);\n return;\n }\n for (const listGroup of SelectorEngine.parents(target, SELECTOR_NAV_LIST_GROUP)) {\n // Set triggered links parents as active\n // With both
    and