From fc84f09f22ef2705f42894fb01a197a18914fb35 Mon Sep 17 00:00:00 2001 From: lucasweber Date: Fri, 14 Jul 2023 18:28:14 +0200 Subject: [PATCH 01/57] Add ICL consistency test --- .../tasks/icl_consistency_test/__init__.py | 0 .../tasks/icl_consistency_test/config.jsonnet | 52 +++++++++++++++++++ .../tasks/icl_consistency_test/doc.md | 19 +++++++ .../tasks/icl_consistency_test/task.py | 5 ++ 4 files changed, 76 insertions(+) create mode 100644 src/genbench/tasks/icl_consistency_test/__init__.py create mode 100644 src/genbench/tasks/icl_consistency_test/config.jsonnet create mode 100644 src/genbench/tasks/icl_consistency_test/doc.md create mode 100644 src/genbench/tasks/icl_consistency_test/task.py diff --git a/src/genbench/tasks/icl_consistency_test/__init__.py b/src/genbench/tasks/icl_consistency_test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/icl_consistency_test/config.jsonnet b/src/genbench/tasks/icl_consistency_test/config.jsonnet new file mode 100644 index 0000000..3b5d222 --- /dev/null +++ b/src/genbench/tasks/icl_consistency_test/config.jsonnet @@ -0,0 +1,52 @@ +{ + name: 'ICL consistency test', + + // @TODO: Add a description of the task + description: 'ICL consistency test aims to measure the consistency of LLM predictions across many different settings on the same datapoint', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'keyword1', + 'keyword2', + ], + + authors: [ + 'Lucas Weber', + ' Elia Bruni', + ' Dieuwke Hupkes', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: false, + + task_type: 'free_form', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} diff --git a/src/genbench/tasks/icl_consistency_test/doc.md b/src/genbench/tasks/icl_consistency_test/doc.md new file mode 100644 index 0000000..92811d6 --- /dev/null +++ b/src/genbench/tasks/icl_consistency_test/doc.md @@ -0,0 +1,19 @@ +# ICL consistency test + +## Abstract +*Copy the abstract of your accompanying paper for this task here ICL consistency test.* + +## Examples +*Give some examples of the ICL consistency test.* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this ICL consistency test.* + +## Limitations and Bias +*Note any known limitations or biases that the ICL consistency test has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/icl_consistency_test/task.py b/src/genbench/tasks/icl_consistency_test/task.py new file mode 100644 index 0000000..a55ad15 --- /dev/null +++ b/src/genbench/tasks/icl_consistency_test/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class IclConsistencyTestTask(Task): + pass From 3f9a01016721ec78368686808bcc8018a85b8a05 Mon Sep 17 00:00:00 2001 From: drndr Date: Mon, 17 Jul 2023 13:59:20 +0200 Subject: [PATCH 02/57] Add NL Codesearch Classification --- .../tasks/nl_codesearch_clf/__init__.py | 5 ++ .../codesearchnet_adv/__init__.py | 0 .../codesearchnet_adv/config.jsonnet | 56 +++++++++++++++++++ .../codesearchnet_adv/doc.md | 19 +++++++ .../codesearchnet_adv/task.py | 5 ++ .../codesearchnet_go/__init__.py | 0 .../codesearchnet_go/config.jsonnet | 55 ++++++++++++++++++ .../nl_codesearch_clf/codesearchnet_go/doc.md | 19 +++++++ .../codesearchnet_go/task.py | 5 ++ .../codesearchnet_java/__init__.py | 0 .../codesearchnet_java/config.jsonnet | 55 ++++++++++++++++++ .../codesearchnet_java/doc.md | 19 +++++++ .../codesearchnet_java/task.py | 5 ++ .../codesearchnet_javascript/__init__.py | 0 .../codesearchnet_javascript/config.jsonnet | 55 ++++++++++++++++++ .../codesearchnet_javascript/doc.md | 19 +++++++ .../codesearchnet_javascript/task.py | 5 ++ .../codesearchnet_php/__init__.py | 0 .../codesearchnet_php/config.jsonnet | 55 ++++++++++++++++++ .../codesearchnet_php/doc.md | 19 +++++++ .../codesearchnet_php/task.py | 5 ++ .../codesearchnet_ruby/__init__.py | 0 .../codesearchnet_ruby/config.jsonnet | 55 ++++++++++++++++++ .../codesearchnet_ruby/doc.md | 19 +++++++ .../codesearchnet_ruby/task.py | 5 ++ .../tasks/nl_codesearch_clf/config.jsonnet | 32 +++++++++++ src/genbench/tasks/nl_codesearch_clf/doc.md | 17 ++++++ .../statcodesearch/__init__.py | 0 .../statcodesearch/config.jsonnet | 56 +++++++++++++++++++ .../nl_codesearch_clf/statcodesearch/doc.md | 19 +++++++ .../nl_codesearch_clf/statcodesearch/task.py | 5 ++ .../nl_codesearch_clf/webquery/__init__.py | 0 .../nl_codesearch_clf/webquery/config.jsonnet | 55 ++++++++++++++++++ .../tasks/nl_codesearch_clf/webquery/doc.md | 19 +++++++ .../tasks/nl_codesearch_clf/webquery/task.py | 5 ++ 35 files changed, 688 insertions(+) create mode 100644 src/genbench/tasks/nl_codesearch_clf/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/statcodesearch/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/statcodesearch/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/statcodesearch/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/webquery/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/webquery/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/webquery/task.py diff --git a/src/genbench/tasks/nl_codesearch_clf/__init__.py b/src/genbench/tasks/nl_codesearch_clf/__init__.py new file mode 100644 index 0000000..b8d3157 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/__init__.py @@ -0,0 +1,5 @@ +from genbench import TaskDict + + +class NlCodesearchClf(TaskDict): + pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet new file mode 100644 index 0000000..67ce7ce --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet @@ -0,0 +1,56 @@ +{ + name: 'Natural Language Codesearch Classification (codesearchnet_adv)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification (codesearchnet_adv) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift'', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'python', + 'robustness', + 'covariate shift' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multi_choice', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/doc.md new file mode 100644 index 0000000..8193db4 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (codesearchnet_adv) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_adv).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (codesearchnet_adv).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_adv).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_adv) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/task.py new file mode 100644 index 0000000..d9d9062 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchClfCodesearchnetAdv(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet new file mode 100644 index 0000000..26ffa93 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet @@ -0,0 +1,55 @@ +{ + name: 'Natural Language Codesearch Classification (codesearchnet_go)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification (codesearchnet_go) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'go', + 'cross-lingual' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multi_choice', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/doc.md new file mode 100644 index 0000000..aa3720e --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (codesearchnet_go) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_go).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (codesearchnet_go).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_go).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_go) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/task.py new file mode 100644 index 0000000..12e66f7 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchClfCodesearchnetGo(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet new file mode 100644 index 0000000..427e0b6 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet @@ -0,0 +1,55 @@ +{ + name: 'Natural Language Codesearch Classification (codesearchnet_java)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification (codesearchnet_java) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'java', + 'cross-lingual' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multi_choice', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/doc.md new file mode 100644 index 0000000..16abaa2 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (codesearchnet_java) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_java).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (codesearchnet_java).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_java).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_java) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/task.py new file mode 100644 index 0000000..6855c0e --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchClfCodesearchnetJava(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet new file mode 100644 index 0000000..af49e87 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet @@ -0,0 +1,55 @@ +{ + name: 'Natural Language Codesearch Classification (codesearchnet_javascript)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification (codesearchnet_javascript) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'javascript', + 'cross-lingual' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multi_choice', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/doc.md new file mode 100644 index 0000000..86806bc --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (codesearchnet_javascript) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_javascript).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (codesearchnet_javascript).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_javascript).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_javascript) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/task.py new file mode 100644 index 0000000..86cbe4d --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchClfCodesearchnetJavascript(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet new file mode 100644 index 0000000..349a4a8 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet @@ -0,0 +1,55 @@ +{ + name: 'Natural Language Codesearch Classification (codesearchnet_php)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification (codesearchnet_php) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'php', + 'cross-lingual' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multi_choice', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/doc.md new file mode 100644 index 0000000..024058f --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (codesearchnet_php) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_php).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (codesearchnet_php).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_php).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_php) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/task.py new file mode 100644 index 0000000..53da09e --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchClfCodesearchnetPhp(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet new file mode 100644 index 0000000..1cf2cc4 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet @@ -0,0 +1,55 @@ +{ + name: 'Natural Language Codesearch Classification (codesearchnet_ruby)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification (codesearchnet_ruby) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'ruby', + 'cross-lingual' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multi_choice', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/doc.md new file mode 100644 index 0000000..012e885 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (codesearchnet_ruby) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_ruby).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (codesearchnet_ruby).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_ruby).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_ruby) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/task.py new file mode 100644 index 0000000..a53da4a --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchClfCodesearchnetRuby(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_clf/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/config.jsonnet new file mode 100644 index 0000000..b657f43 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/config.jsonnet @@ -0,0 +1,32 @@ +{ + name: 'Natural Language Codesearch Classification', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification aims to measure the generalization capabilites of language models in code understanding. It includes multiple subtasks to measure three different types of generalizations', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Florian Sihler', + + ], + + subtasks_order: [ + 'codesearchnet_ruby', + 'codesearchnet_go', + 'codesearchnet_java', + 'codesearchnet_javascript', + 'codesearchnet_php', + 'codesearchnet_adv', + 'webquery', + 'statcodesearch', + + ], +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/doc.md b/src/genbench/tasks/nl_codesearch_clf/doc.md new file mode 100644 index 0000000..1b2e9dc --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/doc.md @@ -0,0 +1,17 @@ +## Motivation +*Describe the motivation for this Natural Language Codesearch Classification.* + +## Examples +*Give examples of the Natural Language Codesearch Classification.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification.* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification has, with links and references if possible.* + +## Citation +*Cite the source where this Natural Language Codesearch Classification was introduced.* + +## Further References +*Add any useful further references.* \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/__init__.py b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet new file mode 100644 index 0000000..dfb9c42 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet @@ -0,0 +1,56 @@ +{ + name: 'Natural Language Codesearch Classification (statcodesearch)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification (codesearchnet_go) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual and domain generalization', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'r', + 'cross-lingual', + 'domain-shift' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multi_choice', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/doc.md b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/doc.md new file mode 100644 index 0000000..0826a5c --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (statcodesearch) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (statcodesearch).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (statcodesearch).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (statcodesearch).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (statcodesearch) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/task.py b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/task.py new file mode 100644 index 0000000..f7089b5 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchClfStatcodesearch(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_clf/webquery/__init__.py b/src/genbench/tasks/nl_codesearch_clf/webquery/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet new file mode 100644 index 0000000..49abde9 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet @@ -0,0 +1,55 @@ +{ + name: 'Natural Language Codesearch Classification (webquery)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification (webquery) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'python', + 'robustness', + 'covariate shift' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_train_set: true, + + task_type: 'multi_choice', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/webquery/doc.md b/src/genbench/tasks/nl_codesearch_clf/webquery/doc.md new file mode 100644 index 0000000..8973fdb --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/webquery/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (webquery) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (webquery).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (webquery).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (webquery).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (webquery) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/webquery/task.py b/src/genbench/tasks/nl_codesearch_clf/webquery/task.py new file mode 100644 index 0000000..a5e5f21 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/webquery/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchClfWebquery(Task): + pass From ac4fc0cad88181ef2fc41cbd9f2a08f5957b063e Mon Sep 17 00:00:00 2001 From: drndr Date: Wed, 19 Jul 2023 14:50:07 +0200 Subject: [PATCH 03/57] update adv webquery clf configs --- .../codesearchnet_adv/config.jsonnet | 11 ++++++----- .../nl_codesearch_clf/webquery/config.jsonnet | 14 ++++++++------ 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet index 67ce7ce..1583456 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet @@ -23,7 +23,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/test_sample_cbt.jsonl', }, has_validation_set: false, @@ -33,10 +33,7 @@ evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + hf_id: 'accuracy', ], preparation_strategies: { @@ -44,6 +41,10 @@ // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can // also provide a custom prompt preparation in the task's Python class. + finetuning: { + objective: 'binary_crossentropy', + }, + prompt_based_testing: { prompt_builder: { instruction_zero_shot: 'Add two numbers together\n\n', diff --git a/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet index 49abde9..29171f4 100644 --- a/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet @@ -23,22 +23,24 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/webquery/test_sample_cbt.jsonl', }, - + + has_validation_set: false, has_train_set: true, task_type: 'multi_choice', evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + hf_id: 'accuracy', ], preparation_strategies: { + + finetuning: { + objective: 'binary_crossentropy', + }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can From 4071f1fffa919d06d2ee14fbcd613a6e9f402394 Mon Sep 17 00:00:00 2001 From: lucasweber Date: Thu, 20 Jul 2023 16:52:13 +0200 Subject: [PATCH 04/57] Implement kappa, write doc, update config, create eval card, create test-script --- example_evaluation.py | 37 ++++ .../GenBench_eval_card.pdf | Bin 0 -> 46680 bytes .../tasks/icl_consistency_test/config.jsonnet | 23 ++- .../tasks/icl_consistency_test/doc.md | 52 +++++- .../tasks/icl_consistency_test/task.py | 161 +++++++++++++++++- 5 files changed, 259 insertions(+), 14 deletions(-) create mode 100644 example_evaluation.py create mode 100644 src/genbench/tasks/icl_consistency_test/GenBench_eval_card.pdf diff --git a/example_evaluation.py b/example_evaluation.py new file mode 100644 index 0000000..d15782b --- /dev/null +++ b/example_evaluation.py @@ -0,0 +1,37 @@ +from genbench import load_task +from genbench.api import PreparationStrategy + +from transformers import AutoModelForCausalLM +from transformers import pipeline + +from tqdm import tqdm + +n_datapoints = 10 + +task = load_task("icl_consistency_test") +ds = task.get_prepared_datasets(PreparationStrategy.PROMPT_BASED_TESTING, shot_list=[0])[0] + +# selecting a subset of example for illustration purposes +subset = list(set(ds['data_ID']))[:n_samples] +ds = ds.filter(lambda x: x['data_ID'] in subset) + +generator = pipeline('text-generation', model='DistilGPT2') + +predictions = {} +for datapoint in tqdm(ds): + prediction = generator(datapoint['input'], + max_new_tokens=1, + num_return_sequences=1, + do_sample=False, + return_full_text=False, + pad_token_id=generator.tokenizer.eos_token_id + ) + current_setup = str(datapoint['setup_ID']) + current_data_ID = str(datapoint['data_ID']) + + if current_setup in predictions.keys(): + predictions[current_setup].update({current_data_ID: prediction[0]['generated_text'].strip()}) + else: + predictions[current_setup] = {current_data_ID: prediction[0]['generated_text'].strip()} + +results = task.evaluate_predictions(predictions=predictions, gold=ds) diff --git a/src/genbench/tasks/icl_consistency_test/GenBench_eval_card.pdf b/src/genbench/tasks/icl_consistency_test/GenBench_eval_card.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3f7530200f507a9d1b6591c7712255cd8a3fc707 GIT binary patch literal 46680 zcma%?Q;aUq(qP-RZQC|a+qUgKZQHhO+qV6+ZR4~xckm~Z%p@~AsY>B_)zjMKieeJ< z%na->oBZDOhgXG)-Zg0FpRQh_7<*|MBGelME~=_FiKe2x|%r?F-q7PxtfWY znK+o5!3YSzxVSo-8QH;jY}RVXscbPJ^`2>*QxVd&e5^H=hEgxPSx&|}GYyvHQ6M}+ z;+OgN(QZLgFc%^m8%{smChsmUOP`v`b8+Y1*M8nX@8_Zt72q`!MKFhX-c8zaclI0} zLm1>C&y~0zE1-6fDN~nq4!;EH%J$g>z5x}HvsyDkg~>qR@W9R4Vxj2gBTTUZogal_ zDof$f)*wfvY(qrEEq`7dpNnZ9W^xWCT+k!yv&?X8vfQ9&?k325n5mh)CMvO2HmMQl z$<0#`miN}7pE?3VDq<&y!kmWe#1nIkMIaO(6mU1r=pY1wYR-lvk5CPl1g4F6B3sEN zYo*R+jgdsfj*UIg@XbBJahJX29|ZYLRg+B1#KgothvkXMFOktcw$Ooo7o(bed=uL- z{(;M(KJWS*?A$J1u}lM%4zw7{YG<7z-4z~d3>a1$fl?K9rO7Gt)Fz9rvCT!5S!s+S zTL|a%0m`~fv}Ehmi)50>r?l#O;%LF$blttky z$5d0AujV2b zE;xJQ$>jV%rpI~}@*>v3isDu+j(;#}m2K*og@Bl-=M1`&d;F`XK941U5XU+Afa7xV z%jNf_3MWgsrem|3&D|I7u35ynoxfq%`p{W`s08;=V4iG(7eULdtWog?-s|N z*oJc9NOUotp6fa$+HD3P=KELA@jbsxp(a>%>!sqz2P(xqm1|QY>0Tr zB}KCEiePJ#!M?uogJs~L8cX*gfGcqKFR8Crwe^lx{$6LnAmf`N}2dm>91oCQ8yt2 zFz7ea>F`OU64)KK_@OnhF8$n*kxn~W0roEsq~{kX!q-ChQ(K67hQz_?&6n1o@Q}2!^df#=v{#xv2&yaoU^sW7@Sch@q~YqidoMCi4+^6nNG1Kq+;q-_|fVc}F*KrEwfEAsb zAcPmOP4#qpi4Apl*In}b@quO4XMpUDjD*_+cYv4ViJ;bD&fydxhpvU>=do)8;(+Wh zX_=r-zYT~pMy?H+G?3-x6%-T@3Q$vq!HsDLXCUsuhOdFNiKw6sftw({7FmQq&%r*r z*%4eJ0^;zoKGH?DMj=k2Ljr*2K_nZ{C$K-R4{vSZgMjjyL0eK+f$H^y=lrp&zs&|9 z-n_Yh>=ADL!#}q^+tjq=9n?riC=4v)c{ z!-N-qD-;mmfVKGNce0=ErvEy+J-EJnV>SWv>TG>2hP8Inme;Ya&*GQRy|&LbLwyUf z=pexj%#V(eQxSmybpi|NtGng+lPx^Bfqz#X|A>BV2lv-kN7D|RcETqJu0#C$3ciH} z_X+@Fir}5td(Dsfl{nwt2C`{Dhckq30_QFMrTl2dGJV4I>+^?Nhc%pemKC%I0ubi= z=ga0}o8KH>pMMiR3D#$*JhbK%b+KIS(f{gDlO7#{-{I)BTc8{idG(>Ldnr7a!$&mG#5@2znKut}lK%L|dG84d=BB?VNaG=KKOw z3I72ur#iT_f9vJ3A$)ehtt@oL{o|40)PTGLYxV++)ZP3SPZ&FI38PP;+#q_L0)u`% zw1KX-b#;G>y>}UC!_Uq@KV~Kc)e6o|-tST#=hPY2{B5!4pWs1>kqS6Jp2!7B{Q?+Y zpLn!`dwxur1Coa~z+bLGJnZKP4Ua&Ce~%=IIh(U%+BZ{xZ-u>;7TrXQE<>yPs8{BZ49|UXn9msL=Cq9QXjf>vgmfA$C>!l;% z?NS-va_S1=rR3i53q{I(oayYGcR%ElSZ(Ev{J(dpLq=5c@TFML6ic*g9%Wv?czZvu zK?KIlgKjk}nCtZ8bsJZeRV}&$5Rik6@YMUpfFMY>XrI4F~ay;h5@s z)c$u!V7nge)U=6YDgJ<5?9u#d_fc+H|MBAC=<^n=+vw=jYlyZeK}z?D;UHpG3TqzK z!8v;WotMLm00sN{s7kkxI5kGCPD_N6DwHAUr{pxZbr2bTbR~f7WyZuF;8c2CbPD~0 zyvO{9R+}1Q*p(uRbeVm@$2mrXh0;UjoJ?WF%%DW=wbVOOw<)nvr40KFsln6SiqbKT z5x;fgA<2QtD$N78i~{36N?VY&Wm5&7$~O8Jq7^V}ug6Md(-Y7!zFX`T#r3Lz7qS!aI>0w+}rCZqr24z`#2^~M?$K$>_)*JGuPw2yHV32Ng z14*v_*{2RQVt9p-CT=)iXug74yAm>&4x7Re=&6mGwR|Bj%O>jz^)?Lo1!0h$OA<

?UKNI}6t(G}eOF&VKZ52tsO(_AYbuE}Om7E2cy-DEhwUL|x z#FZwje==}gs@PE<{I?MvCyZ=~LbKietxviIx?!iULmFo|TR{3l64}WQ}e@$3Yk}c1Lj+vtQ!)KgBfGG27azUw=&EnlGeEdw{ zjdJo?XsKwth%W(zM1#cN%-CMboK`ZuAX5730VQ;^eEK2zp1T`#g6W&7Nhh8SGzj4A zwo?oa1uKh1-)Gt}=QA7@)MyBm*>iVd67O}~=^I~=IMX@Zv>_w;GQ#<}4FqoglzvX; z5=;-P8JrqgLh;v9dWqiqXJphhG0^bs;V$hR3G&KG^y#jZSVTq>t&8eO)xafp3%(1P z38fvlKQ*$I=~@WF81N;|)e1TAqEM9xvTwsuP7es!9d&=#nWAl)eKY6X4@z*G31w4) zKoQ`eXXOSqzAwdn--!PaQS69eBYrqG|dE9tdF%_?Ri=jf96%hJYd1>Ja+8ybn)$!!m zPZbkF8b321RXRH{$J*vvV{*LIP)ACV>g9L!4;c`iBAzXIIX!drA4V|RM?|%P8)6T0;re0P;o+%O&BIy-*$RFG_fAur-Y3Wb3Pj#4xljO5(cUT zP8XVXdKHp-*laIFmyiPJpH%Ddv#yqtKN{%mLebYFOaHJ3YyA_SnQAMhzifmI@9cmfUh+ln~%1%fw-sCl%a(?V_&kt_V5hqn)2!=hu zd^%wgO!|M`ZAHboQ*!L9Qp%+IL?vdZRKZr=^7r)f*J`>tV-TFo^_U?3w(2=5s2as& zhX5J)@&!C&yI`6#Mg&gr7W7^B9;kfShui5bo!6*YpUFqx$HHkI8rwYHe_t#XNrvPN z)kL=B4nwBLq-mG1Z{rb*S(}E)jx4KS9N#jgB;rglvhz=cyd(MW_)9+e{+^)4fI4Cr zm!(YeYqqlC>rRNi)4TDMO`CT6vio-rsTx0IQSx_vH-@+_jwfM8+Mpg?q4QS8>G$4^ z^R(c)lrD!@Hai|MAO#lB?2kdHd*Pjc8pMowQW;jCF$8t!i4^JJr5KD=XYx(vjZU2K zw|eOcXAw%emj+cK?u+Q@#Svk$bs~<2=RN=!5+BCOB2>nG34Z@@>;?W&j$7tDv#3dJ z{Sgil)Z|Xv*E3@%d4^er1=Ow0Wt%Sx-&MaT)*5!OjK#+7ASqJ5M>%esIC8xKy1B}< zWqoZb>K`K~H^qqcuEE6F>3soZatmB}kr zTrmAi0bbKB^Ai6qFU!FBdT)aFVd}Ek)|OyP_rm|I5-&I3rw8*4**++K$Zi)HcSL70Tp~Dbx zW?ld7?Pu~Do>0WXG2peVeOeAE8B0c%Iy$9Da{6efHx@0-DY|LQm~G+JMq`(s_HBLN zm}v!|hip7LQ6v!i+RyA}R+6Qsye5M=?q<6r!p^W1D_KzJDGS;LznV~}lH~$D+S|iY z-!X)~&777=a5#3AA3y3)1t1OuWn`1v@hA7O51VbgsY$TOonl-k5XQki(3R)f-#au* zJ+)C`@SGOI7-d9QH{G`_pOeg`(O4q8TjPJ*KQO955O=?PnST#4(y+Ju!%=f|vjGzp zvk^qi!NrPOqr8NM>o4g-96SYCuE0Ph{qxq>BJL2?xp+BjDk}zCI_;1kOBfZMSb%q&gv-)67#VZcy9qMHk_sWz@PY!4=+gQD{v?y9v0>tx``psYYEsaHgi-fTY9b>%EkkOkBET}p6UOJAv`w+V?KO9kAaE+*C5 zOQh)1Nd=Tl)*?J*tKVl~f$MGM@OQ9sgz^9mjvA$+4Hy57qdupd_`HzdJEBZ+YY3Xo z2!y`K498HLCcJf~z=Ba26B!L(eO8q2XTw@N;X0M%ol9AK3;J?hG?55O+2W4pxYPTj zT>ZDC-%Go8d(W!hpvcFUX(*__w-Y;1kx}&A=JxYl6LUp5WEay>$U-PEDlhZt9=&*0 zk||$}5ZW0SInnOCD3oIqTt9wd|Rw#KXwSRxgdhCWy%suMgNZ3A=&Z`+Ugoh}I)Y9lQ) zP8Mzk%fnezlz|jl;_IvSnDb~|_482H$7PTp{o^@p-V8bNC;Vy}2afZFFXEZAXPgo% z5bH=l(q#KdF~eWssFO=7ys(+FO#TJc<2X7m`BVk+QXG(X1@Xt|V<07(jA|9Ma4L;T zqjOFX9kuKYF>pmn<&@`+x_n@bISUZwvz#AFkZ7##?gG=HDVcoWrj5j!0E@;2Te{doH5uqDu9?? zbBqAcvZB8$D*fg27@+84DoY8fUHlEf{ zpXh$=sCk2lTUH}2i3g@kbrxxz=_j(gl;rk~IX7bx07qbd{(HJg}!x=Ws6Ig_#i zmXF*~9qQs|Mz}M(TVT?L#MIb>&@We?zE7p2RD#y=f)^KRuy$f=Q$=<7fnf2S_)SID z@4@w`a-3dSP^hi)d~Gt}TEqvlWzuNYF}!n_{GnUbHMM}ZuQk>0YTFjCP$+1YDLh#D zz>m5VOn^2YSU~)NRSL4u7juiY1J!s(<7jg=RhJ7g^O#u;dz0OyO^+hm&i7R*GkjVl zqQ*@G?;wP2*i0@dvj6=nQ4>-koDr5)zSd0g3`V>ju@ddG`!YIMx(^O(QHqm15ks9V zPyQ%q`*@4o5vi+ng=TSA0K9TeX7Q7L;sRhQF|X^~w`eKGku7^-cA(t53A20Wuluny zbuIW*q;6)gljfF{_bY$C-80=LjWI>KMK@8Fl^xHTO&Q6v%Plad+mMVi*dF8=-a8@( zxp%gtl)APnEm9(DXPEkV3h)s!rR6N}AVhzAx=Gvk1n!e8G6J5;L^JoE0+b2{hz0l& zrDaCHn$&0sul}Po3R?X)LA$auNE23M&&)g@>+A$Mneu=ct?;6}>hMK{R~HHT9)+G4 zH&pOL2N9otrY3VU&B9otD+db75<22l!h!0=>Y)>Lv(@hDJiNzJG6V~29h1NK2}YLj z{47EIhpW<8eR&=2=LFw#eO(5_Ed&M<$j?F|uVBsBFzqM-FFQCJ^(RBeVz~V8tcoWN zr`f`;Kvd|Zlw_z$-HB+NHQVLfbz9#lYzJw_Sf9vA>(+{Z_Ux{&XiFF+CriR{=nP#5 zkIPDY)(lhnZhLNl70t_RssD;O3ydQI{WYOIUD`{ zm`M>HMn@XKaw8b$ny;z|E;0DA*{vkwKmY(~TLsy+MB=M$j$X5ozkKs}n1oTf%Zryq zBFf@m2&j|vg(F;@~Nj!l*ow$rLy`*rWwT<|or zDSY^M)62zZc*f)SVE~g|X_dCV2$Rd6Zu{n+J{s{MR8j!Ea6u$Eek^_@hi9f-eh8ga zh-r-Ztp?>L>}5K^f*GegCGmVgi4UO_9>I0OqKYYuE~rBymIHPAg-oy4dxGyqmt zW)1mzYv^Ef!Q>)_N*(VI6T-zxX!WfgV}I-wb+v+VyLkO*ANt;Q(Qamog;mXZJiBg5N63W583KjvS|fZ;>(J z&}rm?t*U(B^$sj8W=K&omFsjjYdoFlylLAU_}nXg?TUKu(&CGOKD42|DwAXvz0`dy*V$R5JtL8_ za3(TQUmGG`o)s@@`~~ym&3-Xgx(Z|tjJ>3w)M$IX)oz@*6_F5B5!AP*))ymd=Vsfi8=jX45PTmZ zjH3GF(CPuP>=u~y8@3H#ocdG4uG>iOO-!u&VB-uI&<%#&TnLR!{Ud+ec^V`4Es9po zkKVvY?pq#}Ms9@*2(L&y$8bxCn}7rmg-mY~)<5~d*$u$dLHc*{iOO{Qiu^T|JzfZ` ze+zPy!eyWJ7f#WjvwJWs(YhwWFQ~N%V50+dL%4#dNOo@gT1%blvfiY>lnIu+Y7+rTlb&Jf_!aa87fL z^;~A8Unw5@t>aM+ikUfNBb1%rN-g=q#???MMOEf(L%P?Qfu$JFY@p0SnmFYblqbGA zz-XzO{oP5zgV|RW;kclHB6r8%ykc(1SY4cpzeA^PsknrEPHn8PYJ~%zpZb^2lU{aU zj&RzyLMV}EZtvVE9m_d)NoR8oC#|0}l0ugU2Fj~e)zbsI89EWRt(P7B)xbmcu5dN! zw~WgwO2}R_>U?o{Ndb8ITfNfH3@vA@WTrjkZbLwG;6Qpb#Pwkd=4L=Ji|)5Zj;D7} zONI)Gk^L78Vn1ohK1)E~HWgCoNj7O%^^$DmZkzqj^ED@5C8O`_nLvdZD{le zA(9wJtT~VqE6z4tW|UF;^`>q^(+~WMrwHpmI}yXfB6R-|eCW zvfs-pwqo{ltr2q}^x(;uF`)}g1qMh^kjn(bU){S}S#4};QajFvXmgu7(h9*1S(qi) z;{-@#Rx$O6xGjcQDp(J7i!ZmYe!F)^o0oGF&N)e9WH!2!PR#nE6s@bYrfg5$2 zl+pn^51IFgWRW);nCu~0P>zOamrEm?CA4X0c!*;svNJC13bs(*bfxMGoBhfHt^h|WepFw z-65h^W5vQ!G3g^+Zz0f-y&nm6yREbPPL3t2rZ$i(cYVUz@eOgz&pVTmvq!p74iidr z$i+kkxtNemoSacn%OjtY*0ksF$?pS}U~;`z7{7D^<|ItIBv*+JliX_{RL!eI?IwgY zxImV#Q13}PQN*uQE@Xk))6a1(B4|3GUxj5t@Wt?t7~D04{oYIx6hsJ-d@g%vhAsCu zOoSd$dFF2nOkNkbdOdxt+AfDI2yZ)T;KMw7yKC+ik=;epJzXaTnXs9H@Bot7le+tq zn9eEm#RPrWc@`vZC393Ozp^niXe9Tyj`sO&sW)$b%I`@Cs#Zs&XYwosq@XH>y}FOI zczu#?M!b6Kf3%NXxhCagS@f;RlyC;OAZgTV~8z7*?py#iydr8r$gx_TumlQPzhb{-ajo_G7 z-mN)gYAsnedq0q0XnJf?bII&Fr}X84O1EI>(}E=4ja)8URSQ7ug1`s;eh(}abZ(Is z0?e)U7Az-QuFvMFl{v2J7xbZV8e<#TCWF$i0M+j#pIYj|#nlA(#NCo@gFPbl>?X_0 zCQ@{!M54pUcj~1pg_iBEa*+{s@4;1zCVoY+MN043f}p=U0P<%nfM7_3c49HsMkEp= zubZGXPJK4O1aK>&(a^@uR;9MX7!+jfBj=j(s8?{C1qLFCd*E^^@ZT~W9tOzcpTj-L88Tu!BexctzHM=L<8E=b`9`yX9p!c;8OHUhuU<&6q+2lEn|N2 zm2gBv#lpsLE@?#NU7oW&$0cWCw`(|HCb>yU;yG5rw7 z%8`DI-27CQ=k3R{RW4AU7zOP@LH4Hn;&UHEFxAtIxa5aIbQ*OLFciboT30Uj)+ zedsLBInsyWcQ)`8Tip>5{&d@)|ks^&^n z-$GGDGeI6lK_{5uWf72WP$?(TpUv4MY~5nT7`YTk?^gn zZ6e#0@)~IDqWObFNFUYxB5c!;D=!$up);N&FPmM*mWRy|dbiWw4)5dtpn&AI^H+Lx z5MC{kbcjh{rD=QnJocAHtC8`3;Ll~T^G;*I6ngeJV7(i^+AZ|P>M^zNicfD|ZR zpMhC}I@*A7xB5cq8pG!EvHuZ zwZ|=oC-4YpTU;+;z+!&Zc-6~Io$k^c7HxpqB7FqcywwKe@R6Sxab6H1hea&<{}bg6 zjv=6v>Gcz8@bMt8==8GN840RS=ZKh9)1mpt0^n`rAzC(1aJZwtK=qz2i1jm^kDS5E z``(^At+5jx(QIq6jB#OXGK9cLYn$5gZjN|rFfr~fi|kIy0o#6M5g&&mcL@>J_B$&U zpEFnPX@Dt&LW;Sobs(zJy2LZ$zl6Y0{QUxVjib;2WS#{MK*<9iGl~C(6>BWp5G%B& zFsJ2OrHmQ}idW#;6r)&IpV|Z`uv|lLD4(beX+_3|zgh}<9A_d$iStxKELx%luJwCe zdx_R~^N^w_E+(%TB-FXF)%!g1Ty`L!2p?e~X8bx0ErMY|W z=)*rD>##%hU4@tF-1>YA<`LtVivNCi^8q3B-YoPl3|hC`S97)8p%vx32g@cd+*`_V1x)uRG8(m%>obnN@%*zI67W3qiZdA*sN^S ztaY>eQ!N+|8IaP#k0pmD$a?h`@J*6zJ{e-#!>YKC1fw^-*voEOz$(5s7+rdHEOl|BbKc+NgwV0UF`(k{e`q~J1ICgWzpCu&QrKu zQOX9@TvH985J6eVt4xQ#fuFqfjiTr*|$CO`L3hkYekkH`P9 zmIFSonU6sq!}(aqIW%{C`H;hCY=Ghn)HAixII(sVAh=sF)IPl~iRgHrZKu;9iKm;6 zMS#Am45ZT;Y1a2$1(Itz!5V{%8<>dE)A=Zsfw#QXC*1DjoQ7TtzW>Zx$AJ&Sqv}8C zrH`y&$7~y+hDh(2!RVeL$6=Zi0kT=cjzS@4F-&P+JtRf6yB@gbphqmM@Q>de&$=A| zX~dRf>;sZXyhK&{sCi}i>#i?ba+j}*d0}Y0TG?WM9DP7d0+4A?IE1hAa$EOJz;B{( zWh?IVg(j3WRrPMTCasEh_W&-Q%DTwL7N_`ZIb|kkyGau>F+p0?ou+QFg;84+-bV08 z0p5EvrZ66K8`)S-jCXPSIth!?4ijd&IRVXQBmtRYi=~A}#Yq_p?Oajth`(y4Vw@bR zLY|IsQ+%kn{yM|<416qFf+Puw$+Y3##D&O8Xk)Ve=rvu00LP4X4qSg*8NYio=cfIa z+#_i(p3*Hwmi5;I!Y-`gZE!^aZWK)gjaM)TM4Ihw>oEKUeDG`oOOt~%l;cYo#X;KZ z)&XIO?q6yD+%X#ld{`LytAm}Q{dL6|6LyDEne#L8;v%Zx>;Qg}o}XoU9e@7&DVny_ zt!VrE3O_BEmUjNe%DvxdYsmAu4^~!{jsclV{#tMBL&L}D;Ud{Rc&oqK4b0@uaIRa) z+w}f6ylHhzdlv2~N3Ox)^)WlbI-hJ3xIoCdgwPb!oj|LjP>3U3kI`!+H;+QW&s@kU z44LdmEd`$_g7-+if-IoFwnN)oaPj3Sd952W=?`Kr?UZZC!m_5dPt30X*c_hFwydna-TH#N>fp^w7q1zeF_Ykz$1 z;XS`(TboTrBIR73dd=pv@c>R z&Fp2sV`oUvrLsVBudsSga6579pB_iD9(>NyvIr6*d~lX?vUHu0d^F-f zRX1%TeJdeI*Kq3XL{{0FOFF#w^IAU0X;=R0NMKUck-e=|J^tgpT-ynr;0nH`kdvDz zN6DGZzc__#WGq#4=a41l6feX}Z-UECZKb{ZQns5hpx769dxR%pJ*07o|6rOQ(R8z} zEiANEmJrsvovst&J~m+_5wVJ*6}PtZt#%5Ern6>qd;Bb`G+R0oc!wIpeBs@4sIgnO zaZYkEAL8QoRm6~R=*EAoiKdWFekVm@P7{GuAO&3~#Y-MhNVh_-5=U~5&j>QB7}*Ao zEE&`Earhx_^J1V@MJKSbHiWZhHG7j1c0>ZCUuLQJO?4sX@C-CjK)@RBmXffrbpM<; zkI@N-f0Fg%x;rMKF7XJTq{qH%=xIPl;H#KKsz9L5-qI5iUnTSc) zn0C^SJ||6|q%_8d^~t_NGT4N}y^5qfRHeQfBJa&a>Q(@2kc zCjHAR0NGqI4cnkb8TDyHbEV|XNVrKD>Q;Wj-6BcGY~7wsB)F$YYwARj>HAj5t=rpRKDeW?@3D*i5|4f30&rA@P>m_=P`k6x zD{cxJK21uc`$eEeh6+BsJ(}7W@9S~7ZDm^0)-qMOI6Vi$B0pP4TZ)m3j1D(bHP0Qn z=Wa&q%M>Jaw0r*fjq(y(u;DST^U-;1#H!!3BZ;_7373G^;yiNi3=Df9Kimq)fq5th z%PVoPE3_Q1dN_;uh>g^=g!S(_tLS`P*1ey{tnTBDI3M4O?9vkP)jL~-&aWWbF}AOg zeX!2-e2sV+Lh;+X!=a5MxNi9>Y2~K%;V#V1E49t|L1H8OG~BId;ntl8HT)^feKuB5 z&8%shwuY;vQjKcgpMcFI$?Tug_ce54z_$F!)P9ec`<62%6!e#9CQC+hk?hnArI_Lq zeqR4FO*B8U!pc>9&KOl{>-HqOU>G#tsW`GFH4`z>1od%iS&x2gO%Y^tlqG_N-vSu^ ztxQbST9LYdYu%%99)hkz3xGz9Id4YLv4L>VDXjA>%o22#We;b(W zo6y9XIo0O6>ddEg+7KWW?mZ{;AAJdOkP=JuLbhgxC3Bf4hz8&}UC>Zd>{haoJD})C zZ6fmL@5H0)-p)swj+92@B2~NA&lV`J;CZ=&jPAoCTgoDO*av!Z-|L?Px6CXakzQQG z&+X}KHsn#ck^_#t)>(IO*(PitZ4NaNVUsr)}Gy@ z`(B73En{OE6hLo1IY)iKA%Wx(8|&mL@8;57yRPTnPf9Q#Q*D)=%S6>}$V!kBp6ryX z==e+ZbjIhsV7_LPSW}rc`NnQgpt5dM-8=>M#F%;)PKELI$64lQ)3j3rNIZNboz5&X zH)p%y1g5@>{8L`ayLWWUPu=Kcy_uc^W}WHI(Sc$bmiar)>=|20TCr)rp|l~RVI%ab zxzqUp@P`^>Quap%V(c^(B*Ss{4sUs}hX*KenhBUsDXU6Nb2e3Oe9gxZPN|2Md|3dI%THGX0oKC(iAxKuy{a*$6Xbn@W)W6HXP6}EaEEL;Q2@ArWAETm zvWNr)|EZ;E5E zOm_F}w_tc+_~ZY@+2{DbIs2>}|C_VV&dtX9U!MN|uIzJgas5wYpXmQX*>{ChP2O7K zh#-PMx)7g&U9u5zSc2}AO2;7*Vj@ElfdsA$F9L}o5w8+KLMp*5l}1k3%~npOp^}@z zobG(h`~0=>-J8;Oc**lLf0@ZU+t|jFpctZ=vy|jQiyeXy6&r94$oi3k!U75&6(2BU zM&fLz28j;&We)C`J-~7R!y$hq1Y(E73me^9G1BCdCyWJg^+^B{!wdK`DO_?gU?>C! zm-dYtF*X5w$z%}1TEGZ$UZCXYfYgq&$qE$g_;>g){@ok;p8Ek5NlVMB(BL2-3KB2a zF{lv87i$gvHc)U8It)y9K^}^WW1o=v4Q;zYQ-NDaNl;J_IU~Fs3g@V5f)VVExWRrv zu7eZr2-Y3Kj{^>CfN#H_7ZunEw9FprZqE(11$GTFED+QK6y*R+(({PaM{&*(Ekn{j{l%y&#-q7hKygkmkg0?dhY;~8lsJJ&l5bz*gbAq17;fMm{>N&53p)6I#4Fl^G(k`Y?K=~~ ze;S9%g1sdi5-;=_;=4uxKQNH@pq^jo->a!Z@q%APfgmm>*wn^}1?rqYUXeLfNIIgf z&Nm{M1IYK*Da<%Xk|C3(wmM#bH?X5WCFf%Pg&Zi~L4Qs{zPS%1v2dQH!T|Ul7$4XH z$oOwyP>)fcBT)21U^v)!`B6SE5*P>Hi1@I7SBpvSE+ z(Ea>gzj~X87k9DJM1cRW|E$J}*p4zlTJ7F_{P(f4s^~kA$OxK=h>ijpf=Ez`BGw?} zi!~M*?x8&9H)2(yvjBKwZ}qVL;x5Yf7b)N_p6e~p|CB}ONDCJ7?MKcTw}EUO`Zw?^ zzw(Fn_;>t1U+af6>DOIYk&x^$J^L>G_t$Bd4{0Jm5Qxvpx4{GWy!e4d=$mr|s?$i^P|YI*`$q z<{7j786jST2dcuLDe+R0i zGu#as;e|f(hR2)qNml;7^KY!ttywhX*N|{>21Wi6Z92h2`hR{IjbB&uUE25ZG*=A1 z>UlN2y4u9WT-_M+eZs(e9j8-i60M&PM24Yle{R@JDdOUGO+0kv|81}$h`|B zaTAA$Q-ke%S9#6Ay{8~T>?IM~lU|!`B+VfFcrligSdZ_|4neJQEyZtg9P)`w?QcKa z9OdTvP;ffIhsRj<<+#SC^*f*5KH*Gj3{*>yI2Q53lK&&Q6bHWcn#?Do>~nKQ|6^&> zMjxN3*n-Ts-L8(lTZ)6c%Fun+^J3NCp}87&%ewhx zSw6z3-uk&Ksd6!q)4#h7=e>4qmr+)U;(zcycxbt0Hhz^|R@H|`!2U{2FYAEE_`)tI z_<%ZORqgVGA$#e5?x!9Bo!649$Y^Qeg778L+_jHzyZ?O;_ei>%w? z{0qqN=QmOBH8=Lk@aA_6EU@#ba7aA*-Y3yOY8p8W74xPiV$4jC2$4NJz%qdI?&9W|{;k7KTCe%oB;DkB%e zeqA4?i+d1G$io5_?fko@Qsod0UigIv&7hR#E|t4haVQ0ifiB@`)lG9-?$;}2LRObJ zj7&|ZAwMe9>c~;xEFRt@8{*N_Div`v5Hr{K|an8QH_Y{mY1wl{(q}jw>Fk{YrGaX&?hM zSHFynnD8I|ce?RSOrVRvq}2v9sWONHE?Az5Yl|`(V%~J$4P%am_58B$pSzLyx1Zrb z$!vUp#X1T>Y0;=R;b#B0P+~^iCwplPo3j}&?s~;)+1aZC_HDecB0~exlZ8R@!{LGp z0gYZyn0|jV7&E5axpHrz?$3=!GHA}WacVFE-{B@74%a!{kv*d)@4DRCtJ7_920add z12JVYpT?@YxBQ>hw9@l>P!iLGns8l$djlVpCu{IYN{wtu5Is z_w*Y&*Se0X+_0r$!k#8=!! znP3a`*ND}lpzC$z^gK$JSMi9JZreP$trU+Qp8AAu#K1Vx&Y3uoyfmk_t4egC202U` zy$*#_C{5OLzx#(VQUJCfDJlzIpWD$FIqMJYkDxLLOJhppx=AV((^{#!L&J>UgVD2? zm$mP-(?(UeT<^V@=4mR~*tz~s2~)zreSVS2JPVZ>z~}MB$kd zNXMh#M#z^b1~}!z8SN(troBNSie>?xPmTEx%e@_%umDwz_UHi&)P$elo(P))sP?DW?iDr?g@>c+M=IkKNOw3uM_LtfBj=R|G}6&&lbY zf){ZI;Dl>CcW+j}*1JB>rL>_k>_ur(&#F%+Ge&QX5XF|aDlZqDmaZ@Z!w^8|Tdt?C ztc5PUjo>hbQrv9lc~cTinI?cq;_T74!f3TF==S8~Z*iZ`us01&41eDub)k#sgl^ty zm1yn=J!|2C$xc!+Jd%DYB^k!5194kCq z_q3=J@KS!e$M3Jqidq4yplfq-cIi*^HRbaPt-Ou-l~QYUe4p)iys_~zk+_;d?Cl_~ znTfZE>vt%7dUewVn`d3JGqTBLX`T;47ZNPnQq)G9v@WM&LgsCC>iCv>xp(Pt||lIPiXLb`>u- zbevR8`2-T{1ap^n#;1K<5Lq&a?8zyZu2!+sqfl6OfMoW=4X`V$?9R=e<0>W84&eB2 zjGRNPFifyuuWj45ZQHi>UE8*8+qP}nwr$LxnY?5cvzS#Q)#+`ePSvrp`eTtmMVt)r znjhw*pxbpfAmOI#a-d$&W($p9Gi(TsF#={bbs#anuQw=~@_P3=8^UrmMM{HsgyB|; z+MvK=gyg>@G9lQ2S_@AfVUrEnvtI6k71kVk>Nb@dI-9fb{G{0g$zN`5DxAXwvWFZ! zNi*)k-^Adz>H$OTpCORB=p(D$zy$c3ef9W+L;P4E+b*qxy>uzLOG|*-mBHSsA4Mt7 zbJp~^%im~z3=CsQm9LLSAI6u_7{S2x1AY;PoMCZff1O2g4Rs;T6sK%Ka;L6DfwaS< zep9xRr!(kyN}W|^&a8p{DB1GZ%{J+JDPo$K%HY#}h?N=As*>I6ebh1%5clYkfI2zf^Z5Rq7SYwGUY|;8B@v>&9Pb?cLfE-N__JlK9uI=sbV);{%=} zcU6v)jKUx$Hd#v20Z)ovm|mL_Jbiul1MB5lNNWNSsfO$I z!*tU0by-Fq?tVc_7U%gu_i^LnsD>!UBi-d%H%*w>FM^?_+`4n0K8;3>>ZFWyW7MKf zYr#IU$~P%{n_H9fyWDh$Q^#7?5km4te#<$Lg(G>ygDO(;ilsdFR@Ea4$!0{g>(Ph$ zv5Xn5Gf=ge?uTS4t?*(;5u0wzH%WR`Ei#q-b)7gr8DDV(&x)@uu_d-`BN6l}rbeL-vWx{PEml|OB;%QEiN zlq1+fNut$}fDQF{pj52C4M+_?I6XN&%gxj_jw?AdpKfm_$hhlzu-0gom7~))5(8UR z+GguhiB^J<@N4mQF}z8I8BtEcVINsVRm^dfP_ClLC-fN%4~bhVK55#6ddrk<>c2u) z`rlkaLGaZZ#C!Vk>^9df2YQNULtCZYF5H^xCrv1L?z+OVG#|mn37%H*C{wNijd350 z7vaUx&(6*vPgJ9G^o*xt^6AdSto`Srm^M|5I zL>s4D!!vDNXy4MetD<(DF*>_`nJe;96Vyg30vZ|{v*FwD)8q)ZPCS&RYN@N`czs%+ z{rc~i+CgAt+{!Pi6(@C7L=ng5gUyBR8rNwu(~gF@Ukym9 zZ~y^gq8#Zyf7E|pdU zbB&1li51d9P``Y*-X$48Sn28Hxnkf8Xo7agbBBWy)4w;SaxFhu5jV*Tqfc_CQp-E` z!eh6*T7&RTlP*wrHTo3oA-9@|qwyWI+PY_r6E>Mq`z$=&r+15tk{g6VH)U~ky^Vt( zBOM;EkWp+P*@qoDue!24y>A*mjiNFaAPuswTmPa4XRq46Vh@$STJ|b=wHv;htCfFb zG1mbid5)y<{e!+4%yewP?`s7YGZ8KMx6^pgYu9?gy&%&s#)!~vG1h*s$I9r>J>CDT zY}4`%JE}DG0$L&lua$Ew?0I1LFb=FXeAu4)I2~l429H?Q_RlV|y3CLNHyQ&%+{mQ`*yrAZ>^*3QCct;_p^tQL#gi*3r z|IBk}gw!T_tyQX^J!*2tgPJOMZmNEDqoj4Gbl@h2(29C`Wv=>i@9pH4T#mCzw^LJq z1-iK0pkjP2!~s6o;)8!>$o^yJ#Ws&`Tq~~oo|Bwa(=>TS)-1BvihYGPP}7n2qrEZwhYEm#~i*_|dfu3X==cu6I@PeBY_uJAm{3a0=G7vms->WwPwOtnL%n_YCeUjc(JCizvo??cljZ*rHU!*I(N`Bh)36h zKfKii7X>hEQs`DKYuAb2BZAulH-Y*0f(5KDKt;-Ke$0E$lm=fYU=j%OU7b_g60y?NV?x=QREkh> z$Rj^SS-^Z#-xa!nxw~>-9@}|%Hl?=Mt+f4ZX_Rj@Sz4qJ?c%@oiefas){RFMD$m7V@|8u zYB)YkOeFc7Rkd2h39Rr@B?QJ3(#;ZH$dTm;e1vwRTa(ah2YfvvRqB{Ei9Bx;sv6|f zC{311+qCZ(lJ*U23yo$>av+OD=c4d})%LYSgj^>}f4`N101wq9TJS9-7cy>Shq-Ju z4_l7tOU+HtL?qoU2~!7Ujk~djXou4Xs48#3J~vy4}yEq+Psbz4Iogn1Jb8fR zuEB1bFz9YkRgKZy!MS86sKi{RhpV?nsPlRtIL(c^h>wRU&+DE5`1zP*hof=P(@bul zdPFJqbb$vZee98HcGGGNN5ptYSYYrL zpg<*m#_O2Q^v+Tzv=*dEK)=0~Vd=eEG+?4p(ObU)N-l}LFsIk&H2vVEU!iWab$!@B zHNhEL7;~$1B~i4^9<;e_xRy|?MRC$@posX*pGiFE5B6u_QeAdp{F4!n~;9v&EaI*g4tjp6Zeq@i<#L`moXl2I{h9;%;BH%wn@KOzU zM`vz8K;JJWMD$6fldS-eqU}+tnxvFqk21sHA?gsi8U)cp??lkeGMXqrplHE3@iY$G zBvi_Dt-6Vh2<<#Pwg!V|_()IfTxPij@QIRs5 zF_O#MyC@W<@UDhN#%3qr;nbB=k3brloavgH8;IrQ%Yt;R{e2@4DwzW0=JCAgfBImD zastR{>>0rkQ&~BMych&$R?r4V0QC+}O%L}?jX>*~njgPo^ScrF1crxTOyCquz{l4E z0_Gr84qo8$riB%ig?d@3{fPi*D+VPkO*WafHP;B5_fV*(us|JJg@j+3{P0Zu3aY+9S8$GR>O4fI@ z_vGLrTpvc?i~AxGkmqk`Z}DpWc4}hR_i(R&+0_FC%+UT)3~sLkhyuenI)hG1ecL>e z3B8M(K{^B4|CiH0HaY?SCkV?A%RrsNKUm?#_4lLN_(kYb-M_jqxC5wnrv^HQZUpH4 z5x#e2a{va?$<_(z-ThPhW+$v~0Gt*yqXSURCxwW7KqBXwCgg|MTdh$#mRIq|2?9HZmkEu|4F&g&iW-cZtDa0f4w`&2mW@ej(fFJh6b7cWj_9_ z)~PnLiJsIZiU6wJ3(Jg*P0mbxrtcWvgfb3nQ48b9*!20& z-^@uK)&AD!_ghDz0khW=|7&zk z6t=IZsKz=s;kQH0whjQ=0>6kK zBD}xoH~t}Ly~KBzy9OYQ(jNl0zvw6aAwd23ukbbiT8Q5{?c~@W!ksqCiC;Kwf8i&5 zkEr5T7zZHD(4XLIoA7R@#i8{JeNr0> z&2NYuS?b?lo5-)t_bTIj-#sm3mfwN4S?hmy%h$hxt!>Nw|9ku2YfFxA=)GB&PxRiL zt8aSme~nZBg~P7>7mj87NA-C_FvtJU7dke*GI4Q(_~SIT=lRJ0eHH}^;t4Q=ka~JE z10q!YPk8gLQWSD0!XPRV8p3^v=)8`=^^?^XF*o>`OM9_%(5j z4BG}r4SNVSuX*m9L;B^00j-oD)2Ean7%w&(RcFI*!p-FZ%gKL+)?qeNHEg?1#O%Tz z6PMogj0wINv?7bzu1_;(V1WgXnOIOx#4mK{=;TBo5at45=DHlE3Nhzrfl~Uyo093j z%vj@cpRdL*Ip?33K}R$qP;-iyyL-4f-@131)3@L3RTpOOof075CnmMWxw=Ru~F{H z-{MoD#6=eX%tDfhdf_9@g_43xuwp({G?0U$)=a>}9`DgIof}3k?H`KS zPeLcvg0i?4YS+Gp{U3FLocODr`5Z|!1oj$^;}{hFj`D#r@d;>hZ)(e6VSO2ZU49nc zXg}s@t``wAo2%B247cRB&@S?-$miqczX|IQ51biBQ|giX3{gO`vd=&=Yu6MHI05G( z$u0D$+Kk}N;WuH)N-k}d!P^9ELJ>8&vWKEV|7wo&@OZ8O#FNlOnXf94((7l#CHQqS z2-dz*VJb?bkzB#~24Y+#kYsV2Fpb+|hnQKJkbk#;VdR%wV|Qlx)Io;LG{&H%#Jg#1 zsDzfQuev|6T4JyQt2xP3^8@YMm_hnu+fni5cR;JO%Cv?4rC>$_g&8vytIreHb%e68`(J%$5I$POy;)(fH&N!CW2ao@~}h7iYQL* zi~kHWeQ$>!usajq$$()qPDr}Ayr-GRSQ`TUfx#W3zS8E#Kalll9QeCe$x}Fo-8N2Z zyv{;=)jL+_ZuqMj(bb9Nm6x^=%EW0~DxG@h391# z0t>=p-8KpV>tfT#I?CKdtMq0w#lTR5&!f%Al6;tg<3jMKV)x0hA^WdkFeI)Nzc4V= z4{9c`$q*9ler9vp@7fI%ht*faqrjzJ18q&G$?(z)xy)-|%wlu`Ff*VZmP#X+k~a1DPaR9V_3aA!J;b+3w?pD5 z3McOlJuUC|mN-bPtycNRBklJ`iJu^)DQ$K|4dOPzZ@T(AsNr2n2&~zj11D zs#F2VBlpC%s74=lVDwRQ9E1!B`iNGN@KrHQioriHHuS_L`a)In2__UPvCL{3V=fN9 zykuAd7B2V@^_M@mAxBV)`OKXVPG64-XSvcz^jVEngRZq%VZt-JsCsskw7Q^;-iyN+ z<}Y#2_U?{A+aejlgVlJ7r>15c&Is=kjVh>Ea_W*re_dxE+2d6tia82mHEG>%cAZh| zutKrFKx{%MFj*2W z7vxH>+jBh^N)N#TorrL~J1vQ>Hedlur0wp0TeLM_7|E@Tn5qOkbFx2&lR)tYdOx6; zmLNFY`mzfN!IU=kNLcz-7d2>2l8_Y-gJ0$F`<#5{shJeDyD4UAs23-Vw+eOYEsCjh z=nT6dc{gnOLYE{o94sN)`MUijS(#%ij($F!ebMia!;Umu!ct#VT<4@&rE?9!=Jdwv zk{C!#p;(Dn^_!y!)h3|vtBw)iS(4_*6w@=`(3N@OWvl&fM3OQrG6`KH%#!5=&<7j? z2sUc{QyxA+laM&yZjf8=Os{pBdcjxgQlTZiR=Yt|9ttLGZOdXLC<)%lDP^G>6YokU z%|VvxkYRsSC%?I{wbQ6H^3P}3;K6LgcTWL~qV5-r2IaJNZ6tK% z5K%Aj)GB3k;+lzL;k+2(`MYr^V|c^4&K1RZiNE^eLkQSq`KsPKB7;0LgCvDRG`A|# z9&>kiBVqccH>{CFGl35ZML;B@j8Q31zAENr6f%(-9O_)4WQCRxf`GI=|K!6^>G2bu zV7Ds1DK)b}_11Mz{CtBAv*PcZ87cscOPRl(kD)io-Ja#R%Pp|yVM;41(Sk)v4`P;- zoax%>u9XWWof?8X!&P!38E8#t z&at0ZHU-tF|h4vhFB*w z+I-)Cxd}d5crEZU(9-G4BuTF4WlfzQvDN_PJ6JTsLx=({=GxegAEuP~Um$vQ%08!^ zJG&4F=;R9h%I?=c9*W^-yA~xQ^L|!$8#otTKt{4YjB3c^voQ?L-DTFGU2nb`Q;e}r-*?1p4Y+3|19k|1bkJP%q*@SR)Jm+Ek0 za3VN<&JQ$OM`AiiO~uwOux?WuAG= z5C-VHzzp|>J2pzel-@^|uXNL1luR*#I+b&()n+2FB7FwBCc2uzxJ;}zzymmg8P;mr z>!=5yI-oJ0uGa2l`iOFkCcN|w-(}2LN}9zzEV%s6ZG1llPlmO-SU8KH70U%OgA7_# zH&|9mGXkGQQF2pcn4rXiK<9{Yr23Lh%bxvd$e)ESk>t%txJ0$_z;{e65#YJ4_OH^i z4;srVZ?{UB+{qmF_Yt~YoBW;y`U}+(KM2__tJ#~b`f-sX1qwBVD=vIC5QU(nx+^N| zmUM`i#sIaRqz47$>&-X4W}~QPk`3M1$)0Xa4!Oq5dT2>-lqn8OI~7ll*5`e-c!-Z% z?lm&>K9`;8k#qMUwkW73$6|94eXB zU8BbAlw{h%^~3YE*%{4D^jY|;D{ljtApWV`@+Ks4qHOM%@?JHvx?&mEvOdy8aGTSN z#-8w->H9S(3wiYG*8xL2A;tVWWHg?6-&*t%COUg{JOToeOJRH|8Lrpe_i)n>UVGc2Nam!+lLfrOkSAjvQ(y_wj`57T&FIGjlog;NNw;V@IJLogww zs+PrK+9~C?tu;!JV(4(pEfh69T}BCNRL4K@K!7eMdo(AZ;t!5YJ^FHFm7XcjJ4hgg zUD^DnM(p4|?4b)#gmaxIi%mMrAb{@UErNVnq)OfrmVq&K5SECeoG?l$1>bu_(MeNv zX^@|D@!wp)uOywDto*F`$Sr#dx71$ZNON(_B`xPVRuvfzE@wj4B#O#$biu-z3`JEkVG10S&rR&v7QE7X z#2^DFtEzag_>9O-jt{h}oZj5CU6xhNp$I4HF&km`jx=DGaIjmjI zl>9)SSR^vPTq_^aH4!M4fpo3Udw-k<4`h?B*BAy32a?`P!d_kM&SI9{`@(BwscrL~pV8atl`9$%urdE{qUtE7wI#-x*6I}C}bD#S3n zC^2)rvOSAJ*mNv=w{G}%#9po$r-;Q0h`q{Y`edMFCJ}PE-8;X6wOv|hm0%`K6NZF) z*G@z(0gEJBh^{fvfb(st9BDgL!wFq_i1cmZ3Z*~3tuCi@&fEdJnOp%mu{`lbgX`$c z<;kTUh|cA`wzw6?_vt7TVv9^=P1L8wl5~Qj{L!i0usY?9nbF83+tTQ)SRFkpbUB@- zb&m2zA0N)jaIzJD?hrd6)dF2RdjB{0RM-|5@`H~p-{v|}@cUk1G;=BFN5FF$LPiXQ$>YH@M&F;7)#-*eET zC7^TkPuK9-l+o<2P%Ctg@jp(pF8Rbmhe!|K{&>i-7S*lN>=rno)BM}(B-EgsbWf~^ z%C{80GX6I9O5z4b5gWA$$9F<2dFj%xj%=;MdAw`|@1+nVoPD3>WoBDK?puTjV5vIo1ls72@%?)9d=p@Godsmf- z!tB|Bt`gh#0ycbh^LRmfyx;JT;n)w_ewgvpua?c7H2lF!UXYBAwcYHsepLlD(wrH1 zZ9PZAvjjyxZwByNjvw#%Dx3))B7v1!Uxwf;u@4IEBTA{AQD7(;5eh=R+pz zGD6bbIyV^;a(U*YMg)W6=3#?>=^={Z!jHTsB*KxonMVA@J}jQ(Bn?5EOqn`Jvay8U zCg|;tz@%lY%RW+REF_26BR~V|DdvpEgj#kCsQ^L`$isu`A9gg}yq1vP!~ET*7yk`t z{HKuu)8@kFQJ|85pQfx8#T93qxG;Lp+&1JST=HcvtW&EMD?UT>MVT^-X*Qk6i0Kv~ zGS0%%j$+gjjF@q_oM$t<+aAA}VHJO~>rLDn?X#lv#c-ldh@ed~lD;h@7bSLEZASZ@ zk?|#)k9-&KrihSI6*LxgV_tdvgi;q;nOd?n0AW!mxR}4CI%Q~|;fkGyOQx*PVkM$Q z!wgF`44;Ux!ZdrcbfAHUmdQ6EX87E2woVc;aF{^HfQrkKQT7#Tm26ly)pb|aX?0-5 zn@)LrbeTJCVfDg+N>CK|D*dc-2K23&BTu-w;imUPOsDbYea~D@KZjMmr8|=F_Pg zP%~mkaOWJ!emHWck9wt2bOF4H=Zv?Rdw_|Ao6Bx2oNVZ2-8eYP=mij?QlJ|ch(8&0 zMEwMprd8P?yrbXYl~K$%FfadC60({YoSAW6U6)Mj8BB8~RigATnP|d;n=E(V)DF35 z>YJw$hGVsPvXT1{qo1fb@_yV{X3M{lgB9hd#%-QJSvWwSuz{kwg6u5JXJ8(P5 zE@YW-tLEIx3>YWr^p(;;xNWs^-=`#BPJyW+1vAmWBi#C0H?R?{eJczfX=M1140X#v z{)`L>L*sy+y+*-Xt?H#O+5HAOxrLnH+$>s*xdm|LILX>%#1ct<@kuaN5uu$c0nOJ} z@~%TKj>DYi+i3V}`>-mS2N^%?B|_+a7VM)M9`Ymhvf^u9C6vPiZ1A`wk50%RV6>>- zp1{5R^c#58Quk&Esol1b@yrDQ#kaM$92_Gyz&CJ&9OvfVUdbhj@ferbR@8GA0>7na*+HWS z(^@;*G+^hdN?CmYst{h2(t~aA4X309Fhb9V@`aa}Sm9={mbM5f3Y6@kKsmnKtj&~` zffLklPG{yG^TbCuDzaZ35b+^nuknmV0`t7aSR2COH5YTq+E))-l+RLwKNF)Y#$jlC zkQ-S)rz$K$Cx32{5SlW>6b5(gz&G9_RwKqWe>j;d979Qf?~AHqG2HPf$p`9-Ey$7k z;RhMp+QdHs_Pv2;`Ut`D?kd`OU+`y&R0F=alPj`f+t!>)n6;dD@3_;&$wkSaI7c4ZcMtCx8M$9UyIbC>zbKR<0c{ z$qRby0zKMN6KEIpY#L*a2Cmj;vjzC?X9}VN`{%%o?L!ced$~%jGody*adoY(uzDaH z#@hgm$jOJ{>{={0P}JZfvELhj&T3Vl?MQC(tpo61e|!qhA|yi8r~D2#>5!!N`M~^# zC$}_<*zUJ6;V#QI3%-J$AbMW1(xnKiixO~M9A-Phm_Cq(@# z>A##K>p`ed@vlVNJZ9e2D{jPW;$%4ylgAwo01_^Hw{ zO#x@DK={r@`%h{C<0uPgkSNim7d3WVQpP|h@kQ5ddPmMG&HA>=O6amCq2CSXb+JzI z<5`a1qNY$!wlY`J?cU*9O8)>NIRA-S22cd}R7F~MPiPRF2=)Di6L6`b+V&NLOYV&r zB5S664(~iw#EKh%kT#PP+hY75{opZitHIc#Cbt)#0>=2Xg`#?9+!UIH5Bh1%I57aB z_nKG}>GRBuC4EK0r1-xw!k8Idf99zEb7 za7_~@#?I~0JHI!iY0A*qD3oE5MMP>bw|xT=bhI^y&%i_kQwutF?XdgtRE%V;yp$T% zP%TJ7XHkpvX1w{3zCVK@-dwu!Jvp6?e(Qn5#Vtjo`VALUo>oDm5*gQ%P(F{Z2=BeS z1>q0Vo*8&BUfDUpZoAMA=TRScln^Xyrr*s&WVOq>dlsR6lW>n6Xbg>z1i7mXdpo65 z$Cr3{%>K16P|5tFhxBIBG#%4HY4iOcBbZSGf7P`vUIk2qgGC2?v1z7^xzFVNVV{+{22Vgd5$z7ohsy0( zgO6tRGkP|-blW0m_88*(vV*Y*Ih(@L75Dae7S0A&cx1e!3CnAC0l`6uy0LJKY&{hF zOp@wHb#oMH&8#GIuz0h|0Mv)SUMubo$~lor`>D4K63JZoj2Bl3!ZaLo+fX#B^fm$o zXyN{}j7JEGuvh}614!>r9nbRPr~5b(LL*P7ad$2jfrk;#kuma$-ic*7oRERWGvu)n zMic_S0)O)Y@s=0(el{fF03jTSNP zURI`FBa_cr_FgIOG=dKzJedbv+i=!-dcGR|oMK?|B>2NPhlDulvTKn3$DY7i@Wx%I zfwOwGv=veTavQnK|CTwor@xCeUTja&fk-M`vI8He;oPV5LCXpOOXao3b={wGge3xM z3iCl$N>lvX|FhP~Wp$*S--CI&6`!?jZedPt>t+7wUdPN%1$jb)nh!NJ1eq*`CS9=l z5=1JP0YuXi3K?yLD)urL#+O4f6cM$mB;PwHnfi_~M-Ep^pcl#PfYBXl&>E2s59& zzy*28!*AY}VR-z*kR{mYRgtn9$~-cMLJ|XQm4lUYRN-Q$_^|P^kdl!Rb>dttj7 z=F(~Z)7O?_eJEcr{`aJA(yS?dmw<;jbQjFl5f@>3i5RxYbGf=}7ze;?0l%)lu`Dw9 zhB_@|?y^esWfcV8s|RcP=v(4q!H)B(+h@{KkNz(s7RDQ}Pgp}J1%49a&Gu@(2kz5m zFc47!$_OS(sLfx)7&sH~SdNb`wG|1@4>4f{zj8j%8~3{%5oIWpqgO4ytSp(HGW`-=wwAi zW8wMiiz2Tle#t+iHOa?qLZ(hG*XWXnYjdMQb%r_mW+-+q(OW_xG5GQrozi~DIS=hk zzIZgZ%WkwT)r9COhC*3v-|2lK!a>=)waZOeK^AyK zc&o!F;jKy-mRwkva%_`T`9L%I+!koQPOV0=m5;yJIk#hy95Y#w#cA1ekj0M)jLawH zq(ZWJ#+n7fWm_@Wb4FKO$*p;SPl?$?-Vh@^#FWJCfrPpcmLB{y^|*>c%Szt3}Pok}cjgtye@gIw_Ts;1$BLjK{u% z<-=L|-k}bGY63lOy|X7L_5tR&t|D(3jv7AXNwjLwY3Ul739(wonl-(M9fFThz~89w}!eL4}6 z%XQ@??|~kn*V)_0drQ1JeI|y6Wu|(ZDxKfH5}d>BCS#R93a`IEH2(>157w;mcU=c1 z;p1roET5@LrC65OopvWj9wrS@m3~hY*P?cmXxtm}r`S7l{#=N-#vvvCw&fM> zdX=omOLlCQ&=@ykyh`x>CY_{>FBIkWCb@KKLLOr8_7s6KvowOAO`ztwMSJjs6 zxJ0)gQ)ipOv1%%Ww)d(9?-q{rF~DME@h&dtnQxZtndcpqgT+biC+z{k6f{YvxrOKR zD%Jtnu=T`ThkD1es5=mHlxeQqrjl~#!H`H#BG48;Q`DV)_l(8sje4qB_R~F^Pxg#C z37bMU*%4D|;zN0{Q4D17t^{G9^lAqlV7)T5g&tTiwQO-AR7V#|jfA4U2e;b1XVtF; z$=GU2QBF3b0N{H3mHQA`4;O1*(+E<6N-4K(7fB5jb)F$zeV$PEmA(XsvgY z`r>`dcO8Z_9<@@g1@gn>bK%S37Kqu8FY8Hl$`omGGyU<;F-chZISJydObI8~e!+(*#K0%K25#!3!_{X!v z#p`Z;@ZXWkiJJZ~b4U!*h@9^)tf9J5F!_B5$xMN;RMMD>*a3Wh`(mMf-ZVYh6tSXL z#@&GEY;fw2Q=D?!vw@}XSk@Y&;ZgXQoo+D0z^=X=J*?OXXNOWFcg`)%6#uJWH$6$9 zR$}M!-s7t;!%DEvqIhMc93D62XrO^%^`q~NnUW&=5aseF`WOz&=BENtzxTgG8dG_* zl}ZgOo`V1-d+E}<$wxGJxc>c;zpMrDpsEOt6-P&Yn~})ZYD;n#=cuzy$QYa^*I55A zZ~n5$Wv)1p!`^0lXU!}gHpG1Vk@pADtRtjpsUeY0mw4FA_`Gb1NNc5P??sL&bERh= z8m6z+=BF~5@iqy8r=y{-U|rO*7vjIQEM2p(S^aVne43FXUE<1;vylg$*Sy!SoATQR z$PuwHKR+CQn5?Jyz*Dj#Tmm4Ii15VB;AD#Kqzrht~JW);I4Ph;Hg}cgvI!|x4TD*X;uvy>SeDPlkbS1 zBNvx}OP%iBNu=;FT&e0$LEHNTnmE`sB8?#xpV8G)S;}_>)MrTE?Ckyv zI5}|V*u4K(%KN6ghSffsVa#Pj(efJrTSux`Se*SYMX{ymr}bt!1tzcf9&vYt!Y}Qnm#klNc+!+%UUS$ZwvK+oqTzYX3oa?Nj8+*31t_RJNL6>^vZ|* zqpiReyxZ;WiAS;28agB-|{w zq<<8g{TC1rMfW)Lc?-wxbN8$gJ#Gg*YjZ5VLkB@CS5!u&#}xI)UFFarLTae|Ns$^{ zk+y$py}TIm$c9yx6|2^CPWTXB==ZpkQoirl-JG-OqTAJO32}Qc6{w55kFb)h{Y96A07!G?O zE?tp6G7#;pHRQ2cM3ok{4nEV9DQ}usyPUalbIDoz7BHQn>S^o*%MMGv#jwa@4SGhsarjOQ0_*(K`F)1^RV*qpo@ZZ3J%BI#H1`ni=bi zR}LB64BM-bwyM}MSZBjuiZ}hn(=IVZuA2ES=+&`mP|a?aBWRYjLHCnl#i42u&|d7h zpdxE?#wo1l{a0J_Umx%tr*flOa`Xm%hn4pYi}i$W{%q+nvt_j&w5sqt3aunZ)Hwsi z!lqGV7kt{_Ov)63zZ%9GBqF01Q>69&_aU&|n&Ny$h318&uJDhyVbdc7~mvpe@QOT|jWW$XEH!qb{3o?@*0I`y|v z%jbEKa^c2oNcGPBH@J|pkVUgCXUUbsnTQmj83MUa+2h;9U8CwG4y7Xc1QsiIsMLj5 z2$Fr-XUtHflw&IN+3O(4IqL$)!z@-EMP9;dP1j*gJ|SVr*)ZJo(Ed^H@8{x|kDq-D zBn@k6ctDA=Y|rbWfRJ`O6sRzdVMiwW7+b&)l~`Fzh}*twW8+IrPS}kA8`f-?Yv*_K z&9HO@#w6~_C*L6-i3TI~*kl!<`8mgrYOekGHM8q&mwZH{(p8IBK^+dpRPT&9rg9*q z4|sr~S)W$z4xb}5RR=q&EPjz5uXo%9Lrw3}Va_XC`6H7FL5KxPMiLOC+Z%!T-EXZb zzAs&la|)h{e+c1;^qFwt(2OUp!`-Wf4Ey$Gq_TBHz#%LyV!5V%9vW^^pN^r-UV8^6 zGg2HMK8ma#rjE)vMv}Do?{x5dpy!>Ejv78iUCz^nzf9q(pl7=W8!gSPt3R5Fgpr#A zcw}rTQ7o>ukO{!D+fW1l2%6MBt`ci&j5e5;lWi*PgdsOw7Nq>Bgtd{4v7v*Eet7TH zF9Kb-ZU%Bz%}hT$N9R*!n|lLqH$XMIewkTlN=vGe<3p|`rY_JijvA+!4{V+dh;>g< z+E;wU7rt4at+c};3yg*Y%Jw7V;mYkP8?4NRRs?!>?YWUai4M9Pe{%zx%gmsE-x-BT;x|+@;#P? zXr(I~M3Wd8Ca~*s_TC10?im4bgNkd6u?ZaFj+6neXDv%^O(ZR8^4v#$WmV?H6QJ}y z&jG2r>mv5fkCMHyze9;ek;5ERTfjFlCf8L`Mm4(XBni~LD$mG|N4%`xpSzoZpdBS+ zOp*xW5lHH~TVKpD4oka?bQV&Yt5(*siF{4VcIEBEyYhnC+P~y0;hRNjyO(U-tcII? z>d$g9_gn&8F^mra+{W8=+1)tA$JEE~)+cWyeInw^JK)G85C#hYt~AZ^2#zsmBWUabgc?>iiJeSrPyFP2 z@V=+&{Ho4*>e;pa*xjpZudY?=b9e7`_th7HRfNW+Vm~sw0cO}7Pm@tVLhWQwO119bH=mP5yyo#6#8BC{KrW|Ujhf&2MPgwIZqc> zu^0-PuXzln=QEF)lHc^7RbeRF@L4HGiOF3Hdp6(aXo)d z3e;wUtHr7qPrp%`64B|-FXa>vB^*W6jN9e3*95C&zss&i1IMZ0l5sXcdf#K)oh0MH z3u>AMh#E-;ch>J0f$!Y>Jt&Ehzq^UkWArE>iYQ95{=UC2i`aJqpej%LiQjz$T?&W!3L$VsTBo7xu!>*M}{})*Rd-~Sj0up z15c6Zu4-*Or{Liyv?6PyGGXSR&_KS-tF&cHT;iDedMs9KBU<6R7ZWNr*k{HA4~a z*+43ezv1M50NHP5#pD_u$LMarFXH{gm1lwb8F*2GIpJ{(TSeSnf@oRXeRgad&iDGW5$-wXaPLHa~ zl1LXtb7$r6&#ZDrX8=0PEUSN^inx0>aA$`yIj|3*sMK(`Dwr$7XzhMaV7g*>gv}`# z`Xd9tMftbnVu_yjw+>wb@Hy~;<}EXxQ|Hw6uV-Ws{0%N}Pv+N3VQnrhl+{LN=Ah!f zV|#b{PxI(Eesh6GHBgJHT->RP%-Nm!4y*A$0*>vCqN}o$G%xS_6@AT|(8qX;DZCO~ zjLU0=-u;VHG^oYS!WaF6on+pvo&jSK-1DHU?5pG2C9IpD-nki}9Z9OdD=0@EmAS4A zy2w?NE?JphDIeo$qY>~XS3;z?Y4_iQr(`o?g~0|Td0 z(9KTG<6(m}m7TXFLEhV-Rhf{o`2$|=CRgqhWfm4NU)@W*X*bZk&Rf!tJd#}oG4+xT_pR> znUwsO0gvQL(y)`15W%*IR?lsS|Jn-U(kCd1_f7bJ*Qc>E|0{hOkeT~G1!=4R4%Yv_ z^k3y^oGdKNfPee^FXU-%kg6#cOB^JdBqZ!E$Rgnp3zEnr(h=amJ`^A`kPW7yZw?VH z32k9Pk``uR6Cf!Q5d@$ietF4r-n#o|)5ok9^Ssx!<*lbU8$lZnZp2uz)Iv%83@Eff zR7~g~3=pr839mf>2n;zaL?F4Or>EQwj>XD07tIwv4jiHcDaJ=|VN{S1vqlCU#9efR zC`j{WF6dqoh$AkfqZT9z3;+~t{8LV}2m{0#1(^$@02;!Q43(LUrjB-D5g)S6y^k8? z;{gVx9up#Ba&j{2%?+Bmvro}M2L`MFx}U30x7MyXlnXp2vDE=*23zZiDF|jq!5L)l+l_zd>;5|(3c<}f zuCYzDi(@FDYZt~lfN%pBig|7h(zuJ66a)tBB^t(2oUHDRgb3v)aOg1Vn%5QPn+PsK zfUer#a)D+BxXUOZ@Og(L1NNwQIOj!kR|r+ilRYpI!?}{)Tb1Bq!&VL7C#+YXE)~QA z`tIw~I8t1|+B+@y=_RdsR6u9@;APeOuC7qY=g7GcP5_&vvaY^70f;krU)OiSAwNCC zy$iTk+c!jm#ht6OC>O{Ty;RV<@b&&$KS|$t6y&d+I-SCwUq8xs^Qn{_9YHYcqCr>s z&UaxJ{3UvWjNJTl>ki|Cyg})|Y22bi2l)AZJeazcGE<}Np1<3_T%ks19$Re!yQtpE z|M=sosV3-y7;pzvq)esiQcw(o?rC&D{rv>SkU)G;#eO<;N7svhnSM?@R>}EWoZm=* zf%!j*pj^*tzKqd*8U2BMF11rg;+c9sP<(t@lh~(!=8yS{JsY@wY~xz}wk`d{j=%G0KwJW0_U_B*1W#=Q4JX?w6pWA?ST=>sRe8T#{0;>1vzdhz`MU7%ps;` z(17+icCdEnQ^CMBgaPfvMuxn^3lA_5duaw-+R zZ;@~xxn875(mz%d4G`w${MjV?GNh=4+wIa9N%rDiwr@tAM2>F(Ji&RdJ;88xrYi%1QUH z9p@5CXpa;KXYO;Z&M0ndHG85wGSCIFdqcg}Z&rMO*4oKp^J-xFNMda)$^vpGR3OIOmUaXK5jZ1mXc98h8n*CSnN*Wv#kU2I+#!Ncl6Ddfdn0jw13n88$yu86DPz7l_=%jgJ38V*3e`|CKLl`rn>{*Bet;(es)VI#jaaF1sb!lh5XF z+(Ajeb=6zXU-MN)#f8PWYqtT#+RV(JQUc22_mG+7Rg3`|n;>Zw%7+zwOik)AHO*R6 zQoJ6qi#mh!q2~-zcxm71<}%;;P!Bm232FP zrE*uC^>R)=3Y&8u+czjeQI1!4%f$wQ1swT4u466833UHR zcxmf5Bd7^pw~W%E$(p^@*_C=`xi-S<9?_ekGUrY<_K*>gkEA>^$%Lb7Bwq??Q18HD$Y+1vnzG zc7Mp#>~X0vA+oN1Br|I1QxB0*!tlz;YjNEvNq)NDKXV(6sU*6B+r|H$T!?oDM6xVO z7R572$e%_!A}aL03+WkcmCclj>2_?P<@ZmspGPtK_vIdr`-aJ!SQg^2!-BE=21Lfi zU$?>9#GR2S^v1VuIRGksGoHqb%y*8k3*p=*kn`{7okf1?NxA=6ZD_ly>AVu?7w93G zBP_{TY>18XxdNILHWQLHehhl#Ucf>Qy#Xxx`@S7sVnl5^22NeDOBF5{@?Vzf`GBk6 zq0g%5Mk-$&kGAhjP~--*LlslZ&&sJ@f>9M+t*)zM##u{pFFEqZNCmV8a6WivZ!N;c z0Eb2bLn>Ff5Q1Zg><=<5-k#`b70}iz%8@JSv!^4r8yJ5WIZmng`c4(T4(^EO;2u>O zt5z5U82ZmSS2mAZ|9#+GIc&>Xpef8Ui5fS0B203nr){xa)KAt#izxrZIrt6o0jP&b z9pxm(P$p9fSLQl=tjG-0x3iY|mzI=P;AOl#h>t%;EF zk-N8PSf%F|JB|ilg!}zKxjrYvl1dRz8Zr zoeOemD8`K_5 z3sk(SvsRUOy9`{xWX88=LMn)jL?2W^hjckNsU{v$6XS2p+xXam`ey8<`pJ^Q+rW_^ zwgd@ldJ$~D9J6r(7x{tte!s>P>@-Y0zWsQC?sbZsZWMx~$KxPIE|yEQ+|N_K7-?P% z)&qLd{G(M-(sIQ_d4puv6B|j_c(Kx5GbP(~(*ajh)d3n}e7io}>%v&qB;`@EyX@_L z5^-E?jNv1t7>_u_S5dWUPD|Q@HK;*i>T~6Z!1?BirsdI+SJtNOG8r$9ss1Z{dYgj> zZbG~;X<}82Z49T)r5t&|kKY(g`N5KMHDiewsPlia*Ixy^^uWuh!&;o(3L^~Lrn86E zc0hA6es)DYm%#4x&$cDeRfTM|!SK}MsO#VEI?8cwM$>3A`4%iw)5grY_{HG_Q=&HD zE(7QWrCT{&1^L^StS$EtQ_K|n=?j(&Ia;c>{cEu-8N|_?!;|$xZ_*mTjTo21Q{52M zWDR?qiUW@NNI!nufTz<7Po_VnGew;cQ9iwtG?(K|m8U|}rCCjgaui8B_E?q1C*Wjg z@pepDoua&61yhlQm_1Yq()p-?X;M;}in0qMZ6MX`0 zcbsu>QQq}hgVN+Xd|nYQ@ba~;X}Ar>$)F6BPWW#a<7*a(47}S>w1%Ui4n7=P^Gh;{ zVp6w>Z7_a}+au?1o=!>Up1;C*Iin5N)k|9ssgaF7n>O58t>K&D~u#Q~jfyJI~&IqLd6W#a`2s0AjZ8!C%e_{Zc2WlOx){ZF5C-b&_1_Z+t-H z8W@C_X;@3#_vUokb4ec4lKATz-d;n|D2o&&@70fkSAxp5*VF0de^pO2DR!Rajp1z*K>_K=47wAWOcQlWvb*8 zWOC9QxEd}x1{0VwJiB(EIFn;a{2IO6;&HFgu)(lRGd06xpZ2BiIf^0nDer}6u0|!R z%rlyJF;g7*bjki1oW=25NAY!z9yP&<^CV@^l50mxxvQv>qH8ZnMDrBiRHV@L>Xx?z zplyaror&n>_TssIV3fPv9dY_y?RKyjSM;cUm~qwFaR@9<+4;+~m|e5C#Yk=++_cuA zp3hoo4wl@{1K~|8_34>U>|rC5KRd>VwrIZlDDl zx~jmJYUTokaQQ^lpMKPr?XG6{N8i0UUVe%a&G1t$^G_pKN%cYui|}I#XoFh+uRYt; zd;HX+mz-{!gX&q@wP&IdSw-S1QIiz>F5jrb_t4g#zk~0qRz|=9_}*^~ZzFzim2HVU zbh>+%uVsC&C=6Xg)ZTcAa9Ws;OQS^OqkTnqjD>;QP0hOQUvaO6RL%~*Jju^P&6n~~ zQ-N)mY{olcIv(OeZ}}b4NKB0oc_oWlOC5?6s48yzzU6Le+l#^DmD%B+RiG%gI4JgJ zw`ZOqesDsSG~Zlv&7)lP;nKiNfAZp;?5RN_+{w{Zg}AIJ=v(|Mi%J~Xa)4`}Y$8?N zBW)PFrD4Uqn|+U*UgN(`y^reW)HkklIMg5`L4>KS(T(~8U(jRnHA6}Cw-T8BMqy~c zm}`+61f5A$0@V-H#)n)3Zp@Bc7fnHd<{THeaOMkkvuuUZJ+@Q-TLnT=k+ylHKUgdxC+lTzL*Q!X6!@1AnYo)WShUtP%r;L z)X&b8O!5r(ug10xF4Z#dh+)%gh9%Q0!c5$-=HB9WL&8h{O_7;9QB=(7abaWiYdB5n z#n6;VEN?ue>|n`Qkrnz29Gr>SO|U?uJ$ta z|1Z?-$$1o%mmvb zjmCFdB5j7^9Ckt)lF(Sf(YEWUS!KZ?dP6o6EidOH2-@55Gtex=@^2)hNE=jhtn}8c z3FJq&k5piBuqh4;j^mh?w0G${GvqxUz=Lh*z`5mYA%DAOh37j!0$cj1Yf(30w;TDH zx@bCL1w)LR*(+lD@yhXGv;OD@_fZ6AV1TS{wr{HIBTx<#vdY@&fn zp5_<6U!_aPZ{K(Rm_Sdl!IjGHsGb61m=NmSt#zv96tf5y2P!-96 z?d+3LzaF)avgw@dQZQI1i49@pT;^4NGo@!yoFA`-Zv~!>%qHE$HdXiqo$OfHK(tk9 za1#>cKk$l65=o0CY9UGWY-;z>s?0pt#9<6z>gA=vtxyr*6i==xlzO!4FUj{9$yFJ^?V==q_E$cg~TowArQ$<`4*0ipgtNXGZ((Ni`A9AV9<0!+7|HEF$@1f z=lhnx2_rz9n->(sPC~lsV;*L61sC(A+2c*M7mgARB{-Hv5iuPPruQSJ|K!A)uO3wNxN*5IABWTf^HB z)HW>V_Mw9!Pg1D;HWxD8`QkxczKqMHBP!blrLcU05cp;r?K8A3yWEx~G4h@GCa$Dv ztrRg$ofvVr@tOOVB-@vMQe>%Zgpm^HUIY8iT0+bx1f{IEgmg}+6LU>H-M3Cww=*YL zf@}4ZSco7Uu2;rBt+QGqB85 z>6cMlE>iTx7ats5wQY<)y%}i2?a*czzl(>J=BZO3-;( zur(PKk6VT>2S2d;_Rg&w2Jjk!sXxoUC#YEb9!&V23maIP>o*X>c<`y>LF7gTu~wA;+RBE>(SzIH%2Y7Y>RZ{4+UY@^5S=T1y){Ga`_vz zXH`B0d;yi?I@0U1C9+|Rm+NORfhjyZmpDOg5^bKp%^3>|=FBvWc$5cRFViKi!a1ij zwovqM_#PmFzogM4{i&v34t6|XSl-6~^8qIA@xRUvlfcw+TRlk#!bnq`g{hv7;sk0= z1#(Nv}hu~AAv1CBKA9VM?d;O{9Esl;}P06QdvL{Zy6?mI{p5KJ4hH#Wpg zjOHAjoM#3%rf9Pr-=aU*Yp|!c&aw**r*$c2)`dp{(I(cs__Dp7cV}qiyXYk`Wqax2 zv4bj4i#sM)nxZy9P)X#v?WW{5`|8#IxMD`57#t;_ENUv~9pmGNpsT)#uFGa_bkXlj z)4bo)$!rcKucU=>Xu7@r9dz=!bkZ}?4Q1jht#~EA3SF$3^T(B?EK}Pa@UR@AqD}uX ze{aP@LFq&$SbpFHVne_mHl<-5dPpR#M6ND%riR#i2tUuwfw*M6h4l~ld@zAlQx2K=4qTFO&iD2dYgKFISnJ6i`gbe~4|O3O%7;k0U%@Sv z!Gyw8?vZYjkLtYbfGDpVY5#SS!h+crUC z{FCZM&-kYc)IS%)@^-Daty7iDI*yy~7T7%_%v(5nttX8<*~zzEhc8=0B0Gu=jicXy z(FDq-@jNm3UBNazVjYYDWDendeInzD! zS5)4FA8i_D8RB*%?Z^$V0{SUT4w?Ib-KQ&g`NC+dovwny*#^b!NnlxdXlx-AkPZ+6 zh*=}qkKg`4Vhg$MO4pc}3+#pKv2##v6I}iV zB}8YX<5z0EUv^Rl60MjCel+mYdI07=EaP6B6XT?hN9$PYXUvWq(MR;#_?2QRSax+9 zJH~dzOYyQ+-!pF7*I#zs5~G3-|F9zEc^$OjH0N={P@Y_Eja|ftZE9T>S{E!i-iGw^ zm&Zp`!`Jb*viJ|49mXynBsx@_v0dU!@a}}i9+Fx`Cy}q#kDo6&%vl5<6~zU4wc{kQ zxL>VzrZg^^$AOUHYjN1-2^h}bJKvrEFtW)VoAD3*DM3Vcy_;bl`g2i4tp(znhRe3g zO_*7|vqC(mf}Vm{z5Et?_GHrwR+-c-San38l?7Z)T(lpI8tXxGvWti;+ELK=-AHew z^_ELQAR4Li0#5(jS!#5JGWhSfVik7^2iElnI9?rDC&I2*7b<0@4H97*>>!3U<&-0) zeXrM+x0V-8Cfi1qP=AK^P57Z~-&ET@31LP6%^UmSSgm{*+a0nT>F)Eet(`)}UQSpk zX-T?c|8yr`j84+drS9PGRayP6!Xy%tM^0%%%Crm=44*E~-RajJ`J?>SUj`*CNOC8G zM!^>&+-j{!-bIJQ!c1smtX03AgSURvye?FsIz!uozg+G&_4t`!IiO%Cep5Iv*+N1% zopkpTU_^Q(q+X9}@<*9Qv#6YcpVdUDPicxC-7dB&q0>$~^yr@VeOTgkqrKXKtTF*Z zmKG)N-I|WCJaj|D*|X5B;K*b2VaD&=&LCfhA3Z`Z#7UU1tr9s7C}9=m<|!1g>2qSy zaziGn8As`Ed?6(e5>K%O3!jB?jw#SW^2^5&(|$1WI1A6PG*<`1*Bptd@Rndf)h5fo zu(*03Pi@xh_xvOGurJU6U$8Dv>ty6vBQ;e4z{b>ScKOuylrTe^G!0Qj_^y;7Ro~Z_ zy%3D^n(RH!n#a>Fo6^jyqIHy$utTydc2HDt5MjlHQ~JU!qdJVbDSYlJF6tuWhsEW^ z(R)>q&P$tTI_J?T(E9bj5VSXNd^wiapPW=W$~UTT1N9tIe)%;Mj5Y3#BAcO)`#fC( z&wm6;`(m_lCxz*#FcoelRrHhmK=)>HZ5iKJGMIH>3$7DQ-JGNjbJ#wsY;3Ga8PoQH zgUD3+qchW=*u$7RUomSasBF>Sxf`>|n~vH4laoQtvhLku#`$Vq)61cAgySI(Yk}4& zjVj>ol*yT~$j+YmHnEKKDVnqPw+X+i;DS}B1O37@g+d#8@`XdxDd=4eainZ-HE|LL z>&>H}1*dQ$BrT=tf%)r?9UPrt9U*(BY*|bb8^0Wi9tScu!k4m82nU0~ZXX%(b#b$4 zf3n0J&}U^vYC+g)Mu*tgmV|7YQ>p4h2Hzb{(g~ts99Ejl`K>ew{OsIfkO(chGGjWA4iq&^1Bb4wr{yqQhGX9vS$V@nCFo?4$=%V^ha66 z7uwr-gnDc9JGrr@X9poZ0^qFxy@zS}G$NIq>X6s(=Umy5A?trj9s)SoSpGdJ_`k4&vsII`-($w;I@0h!PF({Zc9R6d6mGmD zaa^%gt6J12VcC>SC_pK>=rxEp2uim_{?ZQq^&BCZ+H6ao7FD~LksXkj(VuSbb}7v) z*={S}xn#7NoSbKSM>e5ViETQfdR%BZ7`Z8&Ro8 z?(_%tQXV)yNnDR#DignSWI7~lpMDrh8O#86Oxwf`vEJD>+wHQcD2cd_S`#?c)`*Pu z31p#ZzVk$_FUu_=daWRxf{|#crN1*fZ6D0%L1Wnq%ye_tX8YpPr+xi%7sh0shLpQSTpt9 z%plihyCqlu$7eWAtN~6|<()HqE^5=Sgfea7=r5dFGh7;Lv*rl3L$obua$(uP2G+p9 zUIZK#^rWU{4z)%-mnLO~tTNjk7(a$r#jq>(4;$5lDG9NID_c&>qg}8`EKzgzL_V?{aEWGc=s4I;fx#I&1{Y}(Jq1?O(Z7Z{FE?QOQ}HS>cm99L%74}bQsqFS z?$JZ=tt86^P&gJUa3Sl?2IN9#^2FT%fR>mph;ZTbXjODO+%>KQN3j-hBJK%;(GFXp z5T8Llm~jeu%A@0vWP#)1>l$z!w#Vs7wJhNBFoW)r{Fq!M4+#CRCoydIq9PCul&Fqh zwpTfC!Xl~(O!qO|H-#kQ6-a)wJ}a912<6hf>kAo(s3mc`nCyB^Ck-Jhz?;4YSGe zK6&>WM^a2adCgxHYe!P$U#IRo7usi&8GZ7UFYi*|^2=3U_)443a@!4hUQ73F2V}f_ zA1A)va$(Q&w_O$vqHAy-zL4gxp{uvYdw=cnh!m3-y~Ffvs!H=;K=YMD2iO~1!}0O` zXO11f3FQ8NfkrLF^hji{{=Y(l^!{{qL*}vi2!baVpKo#Azwz#&r=YrNJ5BEM=X^S+ z!`UEH>e1O5;qvmfmWNoDmk(bh+bf5kzhSxO$CA0>(<-e&%k;J3_>b1Ld>de650%y3 zd~;rq@5+1UGk+yg_fGOf`zo>=@9s_y=iru8PqumMh@JHm!?W3o%C8m$Sw5$kFSFLO zYTmv;aNFr)F%mK|adz_IY_>f?IaN7&yZ{uraNx7mR` z5(?ph2St7)M>mwxJw~4aUj`Z;37N<3^NkT`L+S+&kVJ=^!gM-MvC&?{82yPIrklG8#eabe+rL1D z?f(`TlSvO}KwR1v{@muw)SU-%6 zl$dFk`6HLwN;Ss}aR-mgk)J_$T6W50x^w3F9|y2QN6^U^M$qt?)od0IYKd^bQtQ3AUGb&UGKd3^nrKjBDKc$VkDShQlHB zOD*^-Du(X-=3BpouWy82f+u6IQlnc6+TMazw+T=JFGYkmnNOKB_P%A&I{CPMXF#%^ zm#MKj0pP^z6n)pM?6I#o=@k&&J3q7ZDO1ZcANS&GQRP#nRQIgxp|82q6;Q<^zjWi3 z9&^j0@?Jm&dzo`EG5{UK9BXR%jE#QQ_X>&v$%r6;`Q>y(|Hx(8O)*rE;QH4#Y_)#W z4pZ(xE$6?0hyB09!@|t^zxM34RhP40=R)Z|)38JaT?n>G+6!03fs!1Da$0G%3>wxa zjFdtgY9v_?e|x>1UO6;R42~_I_vylo%X4{Jo*m63ew=kw_r*M@^VrBQ7noV=c-f#CTE588Q!MZD zkNTW(8=aPqTD|jpbz6%Af*zG$oG@_vdiBbV+!i;K6!`DCY+R1om%h=4NQNzPHNO(B zabexTr#ksjJT(%ApA3}yKy?aNIjuaFPh!45b+REt2=)*wI6g7=oJS8A<$OwW#+RSu za{1953YL+`uivzEeM&NHdhrn>*`u;aE77MPpcp8R{Qi8JW=y+N@a}r%>n1j?Jl1Bd z^AjA}H~KYd1?(9yCK+nftzMoajT`*Dr0FM_WzyJYe2!9kQJLVJG$si=z=OgSWu=DH zP3(up!824KNtF+Sd!UtC8tDs2h!R$9lZHY>mLsZ?AHsrKp?is z!_~&gs03q)cJQtg4#R^oF4qpx3|apsrGwH0)Al>3k^1c06l#NXD>ES6W(DLF3EWAy zRY>xiWP+zLgToIybQzW2IE|?FuGj7C6DKwr`7*A4Zah^~KDc-eh!)co2_R)BWNYO` z2%!l)!o)-k-A)+G^W3#*bgQUgOl3sc*&YGJ?7%XYXTL>^cG6F&J<9eLBnbvjWS&3i zSH-IwNCs1ok*iRaxoqZMpP+501E9}zhkXrG{X9FEM-cf@vorm*Co!^ssn=gc*wY(e)>emD z%p%jk!d2@-^L~TnoN$N13?IIuA-m(ai=AqXn0~D~+~B5Ne$EK?3c3(z=)unUdGP7Z z{&>NwdQtg7aeOGfj*4 zC3J^U8RivMZJQK=hlgCffY!f$`a5EhBjF!|iLxnw~C#@MHT~PNt zuIV8Zx5K%T?>e!sJNV#}JBgLc2frsBq4VG!S;rnU^8beMeypNnnz=EFni*Re+0g)O9E@Bnoz08@ZvXKc9E?Clwtt$?zzGQab9erk z$p2T12USl8GXRrN@dj;uwBU zbs3}TsCln)+G5%T-&5q-{~CtwrN`eaS)hO0T&KL7K45wLgoJ+{qw!Nzyp*Yn!4Ah>CLTp>6bg4xPfq zfcjaD9SyO|J4E7p?e)q*NkoF%-a+a@&Y;u}rUV37i<$*gp+bUh07z4|E+!7x>c$Bw z!pMXzp3}4Fg_qjRzb8)Bv{b^|+bhdet5;Gj{lo}05oj5L%ceEP70FEC@}gm6wkA`S z$Wa{9OvKwmlu+H@4T14A{!Yi6C~cg9@MGxhBFa(p0yoK3O`I1(a#ux&Eu%HbNwX@k z6YqdxFT3G~l~2cu-S=XN-i9RjP)ksbJm^f zM*1Z!kyzpajr~2-VxMeHgtt6li3=^VCC;|S!Dx*L+-4VetcZQP-1MNP6B%|PNEL(Z zQ2$POjL#hxJ@X8k?VF)7sduBB-f4d4Kb2?L{%q?3?C}NSJP~sR@3QDLDKTGhn=Oj8xDSXR#wdb=gPXzDJW)XXo0;_dh z6&`F{$_K-r@?Q#gxkuF-(iluWFk5d08`yh9)NdJAowA7)S<)5TKOc6}70?wkH#>+` z`omjnb1Xi?rJ3%3keD23u`auO3wH@Y+FC%qC+jXocX0~S_Y>#TIJW4~Gh2kR)f*l8 z2wf8BUeI-y5;Na-e{z4aANh55lX=Orpd!9kmc+-V=|)G|f!Y6bQ%ZU+Yk|qD6LWgS zf0@IG+3zO{IsTY`#)T+tdSanQWM7^<=-J1o+kCTxkJ(v;B(vX{Iftypy9?GN7^T)- zIj~5eXK`?~Zmd-cBB6_PbbL)v*f46Da#aAEY;U*%i{r_iQGDSKF+v>7@>0Nd3mMBL zRLm&h-1UI83*(0%;Wp<|e~nK6f+6!4w`yQ}j))F?_|;e~(X*t1>mT7l>j96Iu>Obt z2c$u{&%gQ?^WWoyrm~qi9FvrtshI~rhZ(@itp~@XV&(M}iN4m~YXEexi zEIngHlb`~y{0B(-|h)w^CH+Yz1orvE+aKv`KySKyUGX^-snFHm^6C^P( zIO+ptps?olKZetj2|)6LBfyG6vLq?nOEwt2L%H^%s023G{rary!@d*Q4Ar)c5GXj@ u_P0Z&FX@jzfIMLf>Qo`a{P*F<*~Q4o#lz|AuCZ|dfoyOT6k-bEaQ_DgvYAf+ literal 0 HcmV?d00001 diff --git a/src/genbench/tasks/icl_consistency_test/config.jsonnet b/src/genbench/tasks/icl_consistency_test/config.jsonnet index 3b5d222..ce9e921 100644 --- a/src/genbench/tasks/icl_consistency_test/config.jsonnet +++ b/src/genbench/tasks/icl_consistency_test/config.jsonnet @@ -6,20 +6,25 @@ // @TODO: Add a list of keywords that describe the task keywords: [ - 'keyword1', - 'keyword2', + 'consistency', + 'LLM', + 'robustness', + 'in-context learning', + 'icl', + ], authors: [ 'Lucas Weber', - ' Elia Bruni', - ' Dieuwke Hupkes', + 'Elia Bruni', + 'Dieuwke Hupkes', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + // 'https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/genbench_all.jsonl', + test: 'https://drive.google.com/file/d/12K-qg66PTmlvzmpID_kijMjbixFT_juN/view?usp=sharing', }, has_validation_set: false, @@ -42,10 +47,10 @@ // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', + instruction_zero_shot: '', + instruction_few_shot: '', + input_prefix: '', + output_prefix: '', } }, }, diff --git a/src/genbench/tasks/icl_consistency_test/doc.md b/src/genbench/tasks/icl_consistency_test/doc.md index 92811d6..c2d5f78 100644 --- a/src/genbench/tasks/icl_consistency_test/doc.md +++ b/src/genbench/tasks/icl_consistency_test/doc.md @@ -1,19 +1,63 @@ # ICL consistency test ## Abstract -*Copy the abstract of your accompanying paper for this task here ICL consistency test.* +Finding the best way of adapting pre-trained language models to a task is a big challenge in current NLP. +Just like the previous generation of task-tuned models (TT), models that are adapted to tasks via in-context-learning (ICL) or instruction tuning (IT) are robust in some setups, but not in others. +Here, we present a detailed analysis of which design choices cause instabilities and inconsistencies in LLM predictions. +First, we show how spurious correlations between input distributions and labels -- a known issue in TT models -- form only a minor problem for prompted models. +Then we engage in a systematic, holistic evaluation of different factors that have been found to influence predictions in a prompting setup. +We test all possible combinations of a range of factors on both vanilla and instruction-tuned LLMs of different scale, and statistically analyse the results to show which factors are the most influential, the most interactive or the most stable. +From our results, we deduce which factors can be used without precautions, should be avoided or handled with care in most settings. ## Examples *Give some examples of the ICL consistency test.* ## Usage -*Describe how to load your task and what is required for evaluation, if anything.* +#### Dataloading +The task can loaded through the default GenBench interface as a zero-shot task: +```python +from genbench import load_task +from genbench.api import PreparationStrategy + +task = load_task("icl_consistency_test") +ds = task.get_prepared_datasets( + PreparationStrategy.PROMPT_BASED_TESTING, + shot_list=[0] + )[0] +``` +#### Evaluation +Provide the evaluation function with the model outputs as strings, accompanied by the corresponding setup-ids and data-ids +from the original dataset. +For the predictions, please follow the following format: + +`predictions: Dict[setup_ID, Dict[data_ID, model_output]]` + +For the gold labels, please provide the original dataset ds: + +`gold: datasets.Dataset` + +With this input, run the task evaluation like so: +```python +results = task.evaluate_predictions(predictions=predictions, + gold=ds) +``` ## Data Source -*Describe the data source for this ICL consistency test.* +The original data stems from the ANLI dataset (Nie et al., 2019). ## Limitations and Bias +- the number of factors in limited and does not cover all possible factors that might influence the predictions +- currently only works for ANLI +- factors such as _Instruction tuning_ or _calibration_ are dependent of the model inference process (Which model is evaluated? How is it evalauted?) These factors have to be manually added by the user. + *Note any known limitations or biases that the ICL consistency test has, with links and references if possible.* ## GenBench Eval card -*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. +- The task is evaluating the consistency of LLM predictions across different setups. It evaluates to which degree predictions change if we change certain factors in the prompt design. + + +[Genbench Eval Card](GenBench_eval_card.pdf) + +## References + +Nie, Y., Williams, A., Dinan, E., Bansal, M., Weston, J., & Kiela, D. (2019). Adversarial NLI: A new benchmark for natural language understanding. arXiv preprint arXiv:1910.14599. \ No newline at end of file diff --git a/src/genbench/tasks/icl_consistency_test/task.py b/src/genbench/tasks/icl_consistency_test/task.py index a55ad15..573275f 100644 --- a/src/genbench/tasks/icl_consistency_test/task.py +++ b/src/genbench/tasks/icl_consistency_test/task.py @@ -1,5 +1,164 @@ +from typing import Any, Dict, List from genbench import Task +from sklearn.metrics import cohen_kappa_score +from pandas import DataFrame +import datasets + +label_map_to_numeric = {'Impossible': 2, + 'Inconclusive': 1, + 'Correct': 0, + 'True': 0, + 'Always': 0, + 'Yes': 0, + 'Possible': 1, + 'Never': 2, + 'Incorrect': 2, + 'False': 2, + 'Sometimes': 1, + 'No': 2, + 'Maybe': 1, + 'Guaranteed': 0, + 'Neither': 1, + 'no': 2, + 'yes': 0, + 'Not Duplicates': 2, + 'Duplicates': 0, + 'not duplicates': 2, + 'duplicates': 0, + } + +factors = ['dataset', 'balanced_labels', 'cross_task', 'diverse_context', 'n_shots', 'template', 'calibrate', ] + + +# @Task.register(IclConsistencyTestTask) class IclConsistencyTestTask(Task): - pass + """Python implementation of the ICL consistency test task.""" + + def evaluate_predictions( + self, + *, + predictions: Dict[str, Dict[str, Any]], + gold: datasets.Dataset, + ) -> Dict[str, Any]: + """Evaluate the predictions of the model against the gold data. + Calculating exact match accuracy plus consistency across all setups (Cohen's kappa). + + Args: + predictions: A dictionary of dictionary, where the keys of the outer dictionary contains + the setup_IDs and the inner dictionary the data_IDs. The values of the inner dictionary + are the predictions for the example. The keys are strings and the values can be any type. + gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. + + Returns: + A dictionary containing key-value pairs for the evaluation metric(s) computed on the predicted + values. The keys are strings representing the name of the evaluation metric and the values are + floating-point numbers. + """ + # TODO: + # 1. Insert some assert statements, assuming that we have the same data_IDs for all setups. + # 2. For consistency metric (Cohen's kappa): mask out out-of-label-space predictions (-1) + # 3. For consistency metric (Cohen's kappa): make sure that prediction vectors are aligned + # (according to data_IDs) [Order results_df similar to what I did in the analysis notebook.] + + gold_labels_numeric = {} + gold_pandas = gold.to_pandas() + for data_id in set(gold_pandas['data_ID']): + gold_labels_numeric[str(data_id)] = gold_pandas.loc[ + gold_pandas['data_ID'] == data_id]['target_numeric'].to_list()[0] + + results_df = self._create_df(predictions, gold_labels_numeric) + + em = {factor: [] for factor in factors} + em.update({ + 'accuracy': [], + }) + + # Compute the exact match accuracy for each setup. + for setup_ID in predictions: + used_data = results_df.loc[results_df['setup_ID'] == setup_ID] + temp = self._convert_numeric_id_to_dict(setup_ID, n_repititions=1) + for factor in factors: + em[factor] += temp[factor] + + em['accuracy'] += [(used_data['predictions_numeric'] == used_data['target_numeric']).mean()] + + # Compute the Cohen's kappa for consistency. + try: + kappas = {} + for factor in factors: + if factor == 'dataset': + continue + factor_present = results_df.loc[results_df[factor] == '1']['predictions_numeric'] + factor_absent = results_df.loc[results_df[factor] == '0']['predictions_numeric'] + kappas[factor] = cohen_kappa_score(factor_present, factor_absent) + except: + breakpoint() + + # Return the evaluation metrics. + return {"exact_match_accuracy": em, + "kappas": kappas} + + def _create_df(self, predictions: Dict[str, Dict[str, Any]], gold_labels: Dict[str, int]) -> DataFrame: + """Create a dataframe containing all predictions, gold labels and labels. + + Args: + predictions: A dictionary of dictionary, where the keys of the outer dictionary contains + the setup_IDs and the inner dictionary the data_IDs. The values of the inner dictionary + are the predictions for the example. The keys are strings and the values can be any type. + gold: A dictionary, where the keys are the data_IDs and the values are the gold labels for the example. + The keys are strings and the values can be any type. + + Returns: + A pandas dataframe containing the predictions and gold data. + """ + results_dict = {factor: [] for factor in factors} + results_dict.update({ + 'predictions_numeric': [], + 'target_numeric': [], + 'setup_ID': [], + }) + + for setup_ID in predictions: + n_datapoints = len(predictions[setup_ID]) + results_dict['predictions_numeric'] += [self._label_to_numeric(predictions[setup_ID][data_ID]) for + data_ID in predictions[setup_ID].keys()] + results_dict['target_numeric'] += [gold_labels[data_ID] for data_ID in predictions[setup_ID].keys()] + results_dict['setup_ID'] += [setup_ID] * n_datapoints + temp = self._convert_numeric_id_to_dict(setup_ID, n_repititions=n_datapoints) + for factor in factors: + results_dict[factor] += temp[factor] + + return DataFrame(results_dict) + + @staticmethod + def _convert_numeric_id_to_dict(setup_id: str, n_repititions: int = 1) -> Dict[str, Any]: + """Convert a numeric setup_ID to a interpretable dict. + + Args: + id: A numeric ID. + + Returns: + A dict containing factors as keys and the factor realisation as value. + """ + setup_dict = {} + for factor, value in zip(factors, setup_id): + setup_dict[factor] = [value] * n_repititions + + return setup_dict + + @staticmethod + def _label_to_numeric(label: str) -> int: + """Convert a label to a numeric value. + + Args: + label: A label. + + Returns: + A numeric label. + """ + if label in label_map_to_numeric: + return label_map_to_numeric[label] + else: + return -1 From c9a97e21db449a20608ff5373114fb776a19aaaf Mon Sep 17 00:00:00 2001 From: lucasweber Date: Thu, 20 Jul 2023 17:30:38 +0200 Subject: [PATCH 05/57] .. --- example_evaluation.py | 4 ++-- src/genbench/tasks/icl_consistency_test/config.jsonnet | 4 ++-- src/genbench/tasks/icl_consistency_test/task.py | 4 +--- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/example_evaluation.py b/example_evaluation.py index d15782b..f2ce80b 100644 --- a/example_evaluation.py +++ b/example_evaluation.py @@ -6,13 +6,13 @@ from tqdm import tqdm -n_datapoints = 10 +n_datapoints = 2 task = load_task("icl_consistency_test") ds = task.get_prepared_datasets(PreparationStrategy.PROMPT_BASED_TESTING, shot_list=[0])[0] # selecting a subset of example for illustration purposes -subset = list(set(ds['data_ID']))[:n_samples] +subset = list(set(ds['data_ID']))[:n_datapoints] ds = ds.filter(lambda x: x['data_ID'] in subset) generator = pipeline('text-generation', model='DistilGPT2') diff --git a/src/genbench/tasks/icl_consistency_test/config.jsonnet b/src/genbench/tasks/icl_consistency_test/config.jsonnet index ce9e921..e10f89e 100644 --- a/src/genbench/tasks/icl_consistency_test/config.jsonnet +++ b/src/genbench/tasks/icl_consistency_test/config.jsonnet @@ -23,8 +23,8 @@ data_source: { type: 'manual', - // 'https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/genbench_all.jsonl', - test: 'https://drive.google.com/file/d/12K-qg66PTmlvzmpID_kijMjbixFT_juN/view?usp=sharing', + // 'https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/genbench_all.jsonl', 'https://drive.google.com/file/d/12K-qg66PTmlvzmpID_kijMjbixFT_juN/view?usp=sharing', + test: 'https://github.com/LucWeber/icl_consistency_data/raw/main/data/genbench_all.jsonl', }, has_validation_set: false, diff --git a/src/genbench/tasks/icl_consistency_test/task.py b/src/genbench/tasks/icl_consistency_test/task.py index 573275f..89f4880 100644 --- a/src/genbench/tasks/icl_consistency_test/task.py +++ b/src/genbench/tasks/icl_consistency_test/task.py @@ -29,7 +29,7 @@ 'duplicates': 0, } -factors = ['dataset', 'balanced_labels', 'cross_task', 'diverse_context', 'n_shots', 'template', 'calibrate', ] +factors = ['balanced_labels', 'cross_task', 'diverse_context', 'n_shots', 'template', 'calibrate', ] # @Task.register(IclConsistencyTestTask) @@ -88,8 +88,6 @@ def evaluate_predictions( try: kappas = {} for factor in factors: - if factor == 'dataset': - continue factor_present = results_df.loc[results_df[factor] == '1']['predictions_numeric'] factor_absent = results_df.loc[results_df[factor] == '0']['predictions_numeric'] kappas[factor] = cohen_kappa_score(factor_present, factor_absent) From 5b13c7ab0e26e7bb093f0e97522280253411788b Mon Sep 17 00:00:00 2001 From: drndr Date: Tue, 25 Jul 2023 13:36:21 +0200 Subject: [PATCH 06/57] update clf configs --- .../nl_codesearch_clf/codesearchnet_go/config.jsonnet | 11 ++++++----- .../codesearchnet_java/config.jsonnet | 11 ++++++----- .../codesearchnet_javascript/config.jsonnet | 11 ++++++----- .../codesearchnet_php/config.jsonnet | 11 ++++++----- .../codesearchnet_ruby/config.jsonnet | 11 ++++++----- .../nl_codesearch_clf/statcodesearch/config.jsonnet | 11 ++++++----- 6 files changed, 36 insertions(+), 30 deletions(-) diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet index 26ffa93..c20b68c 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet @@ -22,7 +22,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_go/test_sample_cbt.json', }, has_validation_set: false, @@ -32,13 +32,14 @@ evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + hf_id: 'accuracy', ], preparation_strategies: { + + finetuning: { + objective: 'binary_crossentropy', + }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet index 427e0b6..3abf69a 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet @@ -22,7 +22,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_java/test_sample_cbt.json', }, has_validation_set: false, @@ -32,13 +32,14 @@ evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + hf_id: 'accuracy', ], preparation_strategies: { + + finetuning: { + objective: 'binary_crossentropy', + }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet index af49e87..ec48eb3 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet @@ -22,7 +22,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_javascript/test_sample_cbt.json', }, has_validation_set: false, @@ -32,13 +32,14 @@ evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + hf_id: 'accuracy', ], preparation_strategies: { + + finetuning: { + objective: 'binary_crossentropy', + }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet index 349a4a8..7e8e71c 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet @@ -22,7 +22,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_php/test_sample_cbt.json', }, has_validation_set: false, @@ -32,13 +32,14 @@ evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + hf_id: 'accuracy', ], preparation_strategies: { + + finetuning: { + objective: 'binary_crossentropy', + }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet index 1cf2cc4..bc4ecd2 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet @@ -22,7 +22,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_ruby/test_sample_cbt.json', }, has_validation_set: false, @@ -32,13 +32,14 @@ evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + hf_id: 'accuracy', ], preparation_strategies: { + + finetuning: { + objective: 'binary_crossentropy', + }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet index dfb9c42..67c6695 100644 --- a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet @@ -23,7 +23,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/statcodesearch/test_sample_cbt.json', }, has_validation_set: false, @@ -33,13 +33,14 @@ evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + hf_id: 'accuracy', ], preparation_strategies: { + + finetuning: { + objective: 'binary_crossentropy', + }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can From fd82cb405d418e98a5244308a34c1915ebb206e1 Mon Sep 17 00:00:00 2001 From: drndr Date: Tue, 25 Jul 2023 16:26:37 +0200 Subject: [PATCH 07/57] fix clf configs --- .../codesearchnet_adv/config.jsonnet | 23 +++++++++++++------ .../codesearchnet_go/config.jsonnet | 8 +++++-- .../codesearchnet_java/config.jsonnet | 8 +++++-- .../codesearchnet_javascript/config.jsonnet | 8 +++++-- .../codesearchnet_php/config.jsonnet | 8 +++++-- .../codesearchnet_ruby/config.jsonnet | 8 +++++-- .../statcodesearch/config.jsonnet | 8 +++++-- .../nl_codesearch_clf/webquery/config.jsonnet | 8 +++++-- 8 files changed, 58 insertions(+), 21 deletions(-) diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet index 1583456..276829a 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet @@ -2,7 +2,7 @@ name: 'Natural Language Codesearch Classification (codesearchnet_adv)', // @TODO: Add a description of the task - description: 'Natural Language Codesearch Classification (codesearchnet_adv) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift'', + description: 'Natural Language Codesearch Classification (codesearchnet_adv) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', // @TODO: Add a list of keywords that describe the task keywords: [ @@ -11,7 +11,7 @@ 'binary classification', 'python', 'robustness', - 'covariate shift' + 'covariate shift', ], authors: [ @@ -24,16 +24,20 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/test_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, has_train_set: true, - task_type: 'multi_choice', + task_type: 'multiple_choice', evaluation_metrics: [ { hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { @@ -42,15 +46,20 @@ // We provide a few options for configuring the prompt. But, the task creator can // also provide a custom prompt preparation in the task's Python class. finetuning: { - objective: 'binary_crossentropy', + objective: 'maximum_likelihood', }, prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + // Currently, we follow BIG-bench options for prompt construction: + // https://github.com/google/BIG-bench/blob/main/docs/doc.md#optional-fields + instruction_zero_shot: 'Add two numbers together', input_prefix: 'Q: ', - output_prefix: '\nA: ', + output_prefix: 'A: ', + choices_prefix: '\n choice: ', + append_choices_to_input: true, + few_shot_example_separator: '\n', + stop_string: '\n\n', } }, }, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet index c20b68c..aa6154c 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet @@ -23,22 +23,26 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_go/test_sample_cbt.json', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, has_train_set: true, - task_type: 'multi_choice', + task_type: 'multiple_choice', evaluation_metrics: [ { hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { finetuning: { - objective: 'binary_crossentropy', + objective: 'maximum_likelihood', }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet index 3abf69a..5ca8d57 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet @@ -23,22 +23,26 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_java/test_sample_cbt.json', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, has_train_set: true, - task_type: 'multi_choice', + task_type: 'multiple_choice', evaluation_metrics: [ { hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { finetuning: { - objective: 'binary_crossentropy', + objective: 'maximum_likelihood', }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet index ec48eb3..851c047 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet @@ -23,22 +23,26 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_javascript/test_sample_cbt.json', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, has_train_set: true, - task_type: 'multi_choice', + task_type: 'multiple_choice', evaluation_metrics: [ { hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { finetuning: { - objective: 'binary_crossentropy', + objective: 'maximum_likelihood', }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet index 7e8e71c..e5face3 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet @@ -23,22 +23,26 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_php/test_sample_cbt.json', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, has_train_set: true, - task_type: 'multi_choice', + task_type: 'multiple_choice', evaluation_metrics: [ { hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { finetuning: { - objective: 'binary_crossentropy', + objective: 'maximum_likelihood', }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet index bc4ecd2..23486f8 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet @@ -23,22 +23,26 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_ruby/test_sample_cbt.json', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, has_train_set: true, - task_type: 'multi_choice', + task_type: 'multiple_choice', evaluation_metrics: [ { hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { finetuning: { - objective: 'binary_crossentropy', + objective: 'maximum_likelihood', }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet index 67c6695..43dd2fa 100644 --- a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet @@ -24,22 +24,26 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/statcodesearch/test_sample_cbt.json', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, has_train_set: true, - task_type: 'multi_choice', + task_type: 'multiple_choice', evaluation_metrics: [ { hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { finetuning: { - objective: 'binary_crossentropy', + objective: 'maximum_likelihood', }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. diff --git a/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet index 29171f4..e4d2d8a 100644 --- a/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet @@ -24,22 +24,26 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/webquery/test_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, has_train_set: true, - task_type: 'multi_choice', + task_type: 'multiple_choice', evaluation_metrics: [ { hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { finetuning: { - objective: 'binary_crossentropy', + objective: 'maximum_likelihood', }, // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. From 7978de2f071d5746f4fff3acabf75a36d1bfc209 Mon Sep 17 00:00:00 2001 From: lucas Date: Tue, 25 Jul 2023 17:01:05 +0200 Subject: [PATCH 08/57] .. --- example_evaluation.py | 82 +++++--- .../tasks/icl_consistency_test/config.jsonnet | 3 +- .../tasks/icl_consistency_test/task.py | 189 ++++++++++-------- 3 files changed, 163 insertions(+), 111 deletions(-) diff --git a/example_evaluation.py b/example_evaluation.py index f2ce80b..53f796a 100644 --- a/example_evaluation.py +++ b/example_evaluation.py @@ -6,32 +6,60 @@ from tqdm import tqdm -n_datapoints = 2 - -task = load_task("icl_consistency_test") -ds = task.get_prepared_datasets(PreparationStrategy.PROMPT_BASED_TESTING, shot_list=[0])[0] - -# selecting a subset of example for illustration purposes -subset = list(set(ds['data_ID']))[:n_datapoints] -ds = ds.filter(lambda x: x['data_ID'] in subset) - -generator = pipeline('text-generation', model='DistilGPT2') - -predictions = {} -for datapoint in tqdm(ds): - prediction = generator(datapoint['input'], - max_new_tokens=1, - num_return_sequences=1, - do_sample=False, - return_full_text=False, - pad_token_id=generator.tokenizer.eos_token_id - ) - current_setup = str(datapoint['setup_ID']) - current_data_ID = str(datapoint['data_ID']) - - if current_setup in predictions.keys(): - predictions[current_setup].update({current_data_ID: prediction[0]['generated_text'].strip()}) +# delete after testing +import torch +import os + +N_DATAPOINTS = 10 + + +def make_predictions(generator, dataset): + predictions = {} + for datapoint in tqdm(dataset): + prediction = generator(datapoint['input'], + max_new_tokens=1, + num_return_sequences=1, + do_sample=False, + return_full_text=False, + pad_token_id=generator.tokenizer.eos_token_id + ) + current_setup = str(datapoint['setup_ID']) + current_data_ID = str(datapoint['data_ID']) + + if current_setup in predictions.keys(): + predictions[current_setup].update({current_data_ID: prediction[0]['generated_text'].strip()}) + else: + predictions[current_setup] = {current_data_ID: prediction[0]['generated_text'].strip()} + + return predictions + + +if __name__ == '__main__': + # Load the task + task = load_task("icl_consistency_test") + + if not os.path.exists('cache.p'): + ds = task.get_prepared_datasets(PreparationStrategy.PROMPT_BASED_TESTING, shot_list=[0])[0] + + # Selecting a subset of example for illustration purposes + subset = list(set(ds['data_ID']))[:N_DATAPOINTS] + ds = ds.filter(lambda x: x['data_ID'] in subset) + + # Generate predictions for the dataset + generator = pipeline('text-generation', model='gpt2') + predictions = make_predictions(generator, ds) + + # OPTIONAL: The ICL-consistency test provides the option to add factors to the analysis by using the `add_factor` + # method (here exemplified with distillation). + generator_distil = pipeline('text-generation', model='DistilGPT2') + predictions_distil = make_predictions(generator_distil, ds) + torch.save((predictions, predictions_distil, ds), 'cache.p') else: - predictions[current_setup] = {current_data_ID: prediction[0]['generated_text'].strip()} + predictions, predictions_distil, ds = torch.load('cache.p') + predictions = task.add_factor(data=(predictions, predictions_distil), factor='distillation') + # Evaluate the predictions + results = task.evaluate_predictions(predictions=predictions, gold=ds) -results = task.evaluate_predictions(predictions=predictions, gold=ds) + print('EVALUATED SUCCESSFULLY!') + print(f'Exact-match accuracies: \n{results["exact_match_accuracy"]}') + print(f'Consistency: \n{results["kappas"]}') diff --git a/src/genbench/tasks/icl_consistency_test/config.jsonnet b/src/genbench/tasks/icl_consistency_test/config.jsonnet index e10f89e..80746d5 100644 --- a/src/genbench/tasks/icl_consistency_test/config.jsonnet +++ b/src/genbench/tasks/icl_consistency_test/config.jsonnet @@ -23,8 +23,7 @@ data_source: { type: 'manual', - // 'https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/genbench_all.jsonl', 'https://drive.google.com/file/d/12K-qg66PTmlvzmpID_kijMjbixFT_juN/view?usp=sharing', - test: 'https://github.com/LucWeber/icl_consistency_data/raw/main/data/genbench_all.jsonl', + test: 'https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/genbench_all.jsonl', }, has_validation_set: false, diff --git a/src/genbench/tasks/icl_consistency_test/task.py b/src/genbench/tasks/icl_consistency_test/task.py index 89f4880..fdbbf8f 100644 --- a/src/genbench/tasks/icl_consistency_test/task.py +++ b/src/genbench/tasks/icl_consistency_test/task.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, List +from typing import Any, Dict, List, Tuple from genbench import Task from sklearn.metrics import cohen_kappa_score @@ -6,33 +6,25 @@ import datasets -label_map_to_numeric = {'Impossible': 2, - 'Inconclusive': 1, - 'Correct': 0, - 'True': 0, - 'Always': 0, - 'Yes': 0, - 'Possible': 1, - 'Never': 2, - 'Incorrect': 2, - 'False': 2, - 'Sometimes': 1, - 'No': 2, - 'Maybe': 1, - 'Guaranteed': 0, - 'Neither': 1, - 'no': 2, - 'yes': 0, - 'Not Duplicates': 2, - 'Duplicates': 0, - 'not duplicates': 2, - 'duplicates': 0, - } - -factors = ['balanced_labels', 'cross_task', 'diverse_context', 'n_shots', 'template', 'calibrate', ] - - -# @Task.register(IclConsistencyTestTask) +LABELS = [['Correct', 'True', 'Always', 'Yes', 'Guaranteed', 'Duplicates'], # `correct` labels + ['Inconclusive', 'Possible', 'Sometimes', 'Maybe', 'Neither'], # `neutral` labels + ['Impossible', 'Never', 'Incorrect', 'False', 'No', 'Not Duplicates'], ] # `incorrect` labels + +LABEL_TO_NUMERIC = {} +LABEL_TO_NUMERIC.update(dict([(label, i) for i, label_subset in enumerate(LABELS) for label in label_subset])) +LABEL_TO_NUMERIC.update(dict([(label.lower(), i) for i, label_subset in enumerate(LABELS) for label in label_subset])) + +factors = [ + 'balanced_labels', + 'one_label', + 'cross_task', + 'cross_instructions', + 'n_shots', + 'instructions', + 'instruction_quality', +] + + class IclConsistencyTestTask(Task): """Python implementation of the ICL consistency test task.""" @@ -56,48 +48,65 @@ def evaluate_predictions( values. The keys are strings representing the name of the evaluation metric and the values are floating-point numbers. """ - # TODO: - # 1. Insert some assert statements, assuming that we have the same data_IDs for all setups. - # 2. For consistency metric (Cohen's kappa): mask out out-of-label-space predictions (-1) - # 3. For consistency metric (Cohen's kappa): make sure that prediction vectors are aligned - # (according to data_IDs) [Order results_df similar to what I did in the analysis notebook.] + self._set_factors() - gold_labels_numeric = {} gold_pandas = gold.to_pandas() - for data_id in set(gold_pandas['data_ID']): - gold_labels_numeric[str(data_id)] = gold_pandas.loc[ - gold_pandas['data_ID'] == data_id]['target_numeric'].to_list()[0] + gold_pandas['data_ID'] = gold_pandas['data_ID'].astype(str) + gold_labels_numeric = gold_pandas.set_index('data_ID')['target_numeric'].to_dict() results_df = self._create_df(predictions, gold_labels_numeric) - - em = {factor: [] for factor in factors} - em.update({ - 'accuracy': [], - }) + results_df = results_df.sort_values(by=['setup_ID', 'data_ID']) + self._assert_equal_data_ids(results_df) # Compute the exact match accuracy for each setup. - for setup_ID in predictions: - used_data = results_df.loc[results_df['setup_ID'] == setup_ID] - temp = self._convert_numeric_id_to_dict(setup_ID, n_repititions=1) - for factor in factors: - em[factor] += temp[factor] - - em['accuracy'] += [(used_data['predictions_numeric'] == used_data['target_numeric']).mean()] + em = {factor: [] for factor in self.factors + ['accuracy']} + for setup_ID, setup_predictions in results_df.groupby('setup_ID'): + temp = self._convert_numeric_id_to_dict(setup_ID, n_repetitions=1) + for factor in self.factors: + em[factor].extend(temp[factor]) + em['accuracy'].append((setup_predictions['predictions_numeric'] == setup_predictions['target_numeric']).mean()) # Compute the Cohen's kappa for consistency. - try: - kappas = {} - for factor in factors: - factor_present = results_df.loc[results_df[factor] == '1']['predictions_numeric'] - factor_absent = results_df.loc[results_df[factor] == '0']['predictions_numeric'] - kappas[factor] = cohen_kappa_score(factor_present, factor_absent) - except: - breakpoint() + kappas = {} + for factor in self.factors: + factor_present = results_df.loc[results_df[factor] == '1']['predictions_numeric'] + factor_absent = results_df.loc[results_df[factor] == '0']['predictions_numeric'] + mask = [(f1 != -1 and f2 != -1) for f1, f2 in zip(factor_absent, factor_present)] + factor_present, factor_absent = factor_present[mask], factor_absent[mask] + + kappas[factor] = cohen_kappa_score(factor_present, factor_absent) # Return the evaluation metrics. return {"exact_match_accuracy": em, "kappas": kappas} + def add_factor(self, data: Tuple[Dict, Dict], factor: str) -> Dict[str, Dict[str, Any]]: + """Concatenate the data with the factor present and absent and update the setup_IDs accordingly. Also add the + respective factor to the list of factors. + + Args: + data: A tuple containing predictions, where the first element are predictions with factor absent and the + second element are predictions with factor present. + factor: A string representing a factor. + + """ + + # Update the setup_IDs of the data by appending a 0 when the factor is absent or 1 when the factor is present. + setup_ids0 = list(data[0].keys()) + setup_ids1 = list(data[1].keys()) + + for setup_id0, setup_id1 in zip(setup_ids0, setup_ids1): + updated_id0 = setup_id0 + '0' + updated_id1 = setup_id1 + '1' + data[0][updated_id0] = data[0].pop(setup_id0) + data[1][updated_id1] = data[1].pop(setup_id1) + + # Add factor to list of factors. + self._set_factors() + self.factors.append(factor) + + return {**data[0], **data[1]} + def _create_df(self, predictions: Dict[str, Dict[str, Any]], gold_labels: Dict[str, int]) -> DataFrame: """Create a dataframe containing all predictions, gold labels and labels. @@ -111,38 +120,43 @@ def _create_df(self, predictions: Dict[str, Dict[str, Any]], gold_labels: Dict[s Returns: A pandas dataframe containing the predictions and gold data. """ - results_dict = {factor: [] for factor in factors} - results_dict.update({ - 'predictions_numeric': [], - 'target_numeric': [], - 'setup_ID': [], - }) - - for setup_ID in predictions: - n_datapoints = len(predictions[setup_ID]) - results_dict['predictions_numeric'] += [self._label_to_numeric(predictions[setup_ID][data_ID]) for - data_ID in predictions[setup_ID].keys()] - results_dict['target_numeric'] += [gold_labels[data_ID] for data_ID in predictions[setup_ID].keys()] - results_dict['setup_ID'] += [setup_ID] * n_datapoints - temp = self._convert_numeric_id_to_dict(setup_ID, n_repititions=n_datapoints) - for factor in factors: - results_dict[factor] += temp[factor] + additional_keys = ['predictions_numeric', 'target_numeric', 'setup_ID', 'data_ID'] + results_dict = {factor: [] for factor in self.factors + additional_keys} + breakpoint() + for setup_ID, predictions_setup in predictions.items(): + data_ids = list(predictions_setup.keys()) + n_datapoints = len(data_ids) + + results_dict['data_ID'].extend(data_ids) + results_dict['setup_ID'].extend([setup_ID] * n_datapoints) + results_dict['target_numeric'].extend(gold_labels[data_id] for data_id in data_ids) + results_dict['predictions_numeric'].extend(self._label_to_numeric(predictions_setup[data_id]) for + data_id in data_ids) + + temp = self._convert_numeric_id_to_dict(setup_ID, n_repetitions=n_datapoints) + for factor in self.factors: + results_dict[factor].extend(temp[factor]) return DataFrame(results_dict) - @staticmethod - def _convert_numeric_id_to_dict(setup_id: str, n_repititions: int = 1) -> Dict[str, Any]: + def _set_factors(self): + if not hasattr(self, 'factors'): + self.factors = factors + + def _convert_numeric_id_to_dict(self, setup_id: str, n_repetitions: int = 1) -> Dict[str, Any]: """Convert a numeric setup_ID to a interpretable dict. Args: - id: A numeric ID. + id: A numeric ID of the form `id_1010101' where each digit represents a factor. Returns: A dict containing factors as keys and the factor realisation as value. """ + setup_id = setup_id.split('_')[1] + setup_dict = {} - for factor, value in zip(factors, setup_id): - setup_dict[factor] = [value] * n_repititions + for factor, value in zip(self.factors, setup_id): + setup_dict[factor] = [value] * n_repetitions return setup_dict @@ -156,7 +170,18 @@ def _label_to_numeric(label: str) -> int: Returns: A numeric label. """ - if label in label_map_to_numeric: - return label_map_to_numeric[label] - else: - return -1 + return LABEL_TO_NUMERIC[label] if label in LABEL_TO_NUMERIC else -1 + + + @staticmethod + def _assert_equal_data_ids(results_df: DataFrame) -> None: + """Assert that all data_IDs are the same for all setups. + + Args: + results_df: A pandas dataframe containing the predictions and gold data. + """ + used_data_ids = results_df['data_ID'].unique() + for setup_ID in results_df['setup_ID'].unique(): + assert used_data_ids.sort() == results_df.loc[results_df['setup_ID'] == setup_ID][ + 'data_ID'].unique().sort(), \ + "Not all data_IDs are the same for all setups. Check for missing predictions!" From ad279158ee6fdeacd0faff79b0a6e33634164693 Mon Sep 17 00:00:00 2001 From: lucas Date: Wed, 26 Jul 2023 16:51:37 +0200 Subject: [PATCH 09/57] .. --- example_evaluation.py | 12 +-- .../GenBench_eval_card.pdf | Bin 46680 -> 43069 bytes .../GenBench_eval_card.png | Bin 0 -> 91665 bytes .../tasks/icl_consistency_test/config.jsonnet | 14 +-- .../tasks/icl_consistency_test/doc.md | 87 ++++++++++++++++-- .../tasks/icl_consistency_test/task.py | 39 +++++++- 6 files changed, 121 insertions(+), 31 deletions(-) create mode 100644 src/genbench/tasks/icl_consistency_test/GenBench_eval_card.png diff --git a/example_evaluation.py b/example_evaluation.py index 53f796a..69ed445 100644 --- a/example_evaluation.py +++ b/example_evaluation.py @@ -10,7 +10,7 @@ import torch import os -N_DATAPOINTS = 10 +N_DATAPOINTS = 1 def make_predictions(generator, dataset): @@ -38,7 +38,7 @@ def make_predictions(generator, dataset): # Load the task task = load_task("icl_consistency_test") - if not os.path.exists('cache.p'): + if not os.path.exists(f'cache_{N_DATAPOINTS}.p'): ds = task.get_prepared_datasets(PreparationStrategy.PROMPT_BASED_TESTING, shot_list=[0])[0] # Selecting a subset of example for illustration purposes @@ -49,13 +49,13 @@ def make_predictions(generator, dataset): generator = pipeline('text-generation', model='gpt2') predictions = make_predictions(generator, ds) - # OPTIONAL: The ICL-consistency test provides the option to add factors to the analysis by using the `add_factor` - # method (here exemplified with distillation). + # OPTIONAL: The ICL-consistency test provides the option to add factors to the analysis by using the + # `add_factor` method (here exemplified with distillation). generator_distil = pipeline('text-generation', model='DistilGPT2') predictions_distil = make_predictions(generator_distil, ds) - torch.save((predictions, predictions_distil, ds), 'cache.p') + torch.save((predictions, predictions_distil, ds), f'cache_{N_DATAPOINTS}.p') else: - predictions, predictions_distil, ds = torch.load('cache.p') + predictions, predictions_distil, ds = torch.load(f'cache_{N_DATAPOINTS}.p') predictions = task.add_factor(data=(predictions, predictions_distil), factor='distillation') # Evaluate the predictions results = task.evaluate_predictions(predictions=predictions, gold=ds) diff --git a/src/genbench/tasks/icl_consistency_test/GenBench_eval_card.pdf b/src/genbench/tasks/icl_consistency_test/GenBench_eval_card.pdf index 3f7530200f507a9d1b6591c7712255cd8a3fc707..037573426f0d8c61be742604deb230985c2aade9 100644 GIT binary patch delta 2205 zcmV;O2x9lx>;k>e0+1w=PN*t>tyfKR(>fI0=T~ScFRZ6~^}U1w9T;GEG&^8nQa5;L z9PkHB=l%J2Wjn1&oG?zCg)Q4xM>xp)7$0J=*C473*xkXdU*2!X9O zi%(x9yQYtCm=r2__PYz+Fr~FH(o_4ET`m6I0|^ntC}H(U5DHfU3=9~5Qn)xJy8bz- zAOK*D7e6SiXs)nBsB(4_AqGy*my3U1DaC>a7A;%eFbqmiZ)=<|L9^vG`^4XEo8nhu zHP4!2TP9zYZx%0?qoFnRkqBo{nUF)F@aRrsprjZaFggy}GADwQ5IKM|0=;5ZTcIrU zBtBG$F+5u}#j0!ECaE=l)VSEj$8}i{$DH0-iTrihR7F`+WNKZo{8i!=&x)$d>pIe> z@o+pI(GJK`kV3;S+-E6OlA?6p^zGxS&1O8I8U$gTeL538;GJb62Q*#HERr%fC~}#X zd6nH3b?>57k)}Irea)ANN1oN+BXb7c3+*-2!YLfE@55t2 z>V%e#r9o&r?98_vdbw`1btfM)C}aRO3faTZ!jl+dj2GKYR&=|X^cEWZcm^N4&R^TQ zIp%$mLI(1_5unw7$ooeZbM`_^tfjL8)zjt75#D(@(+X)7F_QxPAnVn);-HAisSpkg0K4S0@6OvT-H}$YKB3BNMu)0sPZiL?BNH1^ zb)%goK}b2&@U9N`Rw&BN3!+_X% z!fG@#MKhXVwfTY^{)vegH1wQ5F@G4vk9?wNw*Wa3CsufCdIr!pkeUv@-V_%dCe45*7y#hycG=A;4OoDjDb0rm12=8Wef&9 z!F4V!I(+)&f3aZvCSMgD-rsGjVq29=V;$oT9~b5BuwN7<7L^pEV!6qCyU%PUpm121 zk80jQ_#SacPhPKIr^G`fV;2{U54C27ED(*uiW*U2RkawfT9OdSZ}z*zq^Q^oiy90W zpBA4hHm9UBE5y7r^WJBH-qB^2d26!BybBrXCSvB9e^)*;%nN}kp$y5(NQKcll?4`* zVa}_}vFIeCYLB24!P98{*66Pi#`{su9PRC*+9tDFr8&?;t%S#+S0OaN3Zw>2he z#Mfh6Q~R{_?buqcya7_6LNp7kJ&!V5qG)sg6>S|^{a2fpI^{Bip(M%4FU6y@Q5fhV zN@%nLe{l;>gSo`$qReRLr5Cvbr&G1av6h3QYP*y&kVi1^4K8N1qeD`sNj-W&ZPAis z*OD!^Z!I-*8=It7QsWvKEt6mw}_P>^G0qZeQ>H_xS@XSAPtye@54@`lH^R6foT9tCE@8qnA!n*E-f! z*e_B#6vJYV^UHEU=gCEQ_VdR##b&j}6i1d54B5JpjMr6uzgb*vSN8=&`bkwh+#(Sy zcTJEnlw+Om=}`T`ulR^hc+MBR;D7Qp-|&0BjBx3K5@StAC2Ei!HKRw#n|c#Ss@M9|Y z7B`KH;{Qvd*H~X?8XnZ$2{9WEmq{8gOW?8LE zx|v@LesWY!u>C2vi7jlJe1-lGyid0F33NP#cJ_$2e!^BS$3ERe$KXnyg6kG`DGOPC zk4*nCCH=%4fCJ38n`OynX^Hb=&wh^Wwp(@Tkf-04e*ub3XYiAOg%guGs0@<}wHALh zT?#K#X?kTKGBpY>S0Gz4ATl5^T?#K!Z*O!UGBO}AAW{l1Np5CuATl!`Fd$M2FG)lo zTRcKUMm94>FgQd;Mm91-I7T)@LpVk@Mny(KGdMvpLPb6xJVHc9HZw*rI7CK9HZnsv zMm9r3I7T)`MMgq1I6*N&MLt~$FH8w#ZfA68AUH7~lTWBAPZh|y4S+Bd1kqia!<;V0 zebAMAaE2hDBXV*wA~M(?JdHE}K8HX75ux@n$|M8#%ib(byi`)t-}W1()!K4-t%12% fJBi#*@Obb8i#!VAlQ6a&3o delta 5733 zcmajjbySq!p9gU129ZW$29WL}@ z&B|Yg`B7c)j%UixaUjN9xGO)4ll(UUb0MOgZ=Q%#B$xcB)zI-l;?mUA)6M6aQ1Q&8 z^2M&N2jC8#8*a%{IGx@5j%Cc^K)ClWGzjsJAOxi z(n53?GAFuEleQGEhOyJPLnmm1OKS94%r(Y1S&P~#Xf{2u2%a&-%M)*gF2(_&s0i{; zt2yG;!=~gRI7TC(iM`xC`60=@0hCS}iN;AIA5m<`7!>Ll!3!HbB^59HlfC)|KKPk! zNG&=#=66_@g61R#-$@-m&h;lMJIpoEO-O|_gJ-+>va4y~(>KRtoTniUsE>`HQT8QS zRQ2l~{a!*nd0*a)!T=MFs4~|~Dz$IbNRIJv$)I~54-y=~ZRU}6eaA#?tnz{OV)Qun zld(h@?mYq^#9Uo#tl?0LRl0r}o=unf16UEAzV=vYvyr6wN*oKFnuJ^D!?(3Bo2uJ~?hLY?b}smu3t@%HeSMkV0rhmqf^ z-fA{SWzV|RA2;PtXp^2PR>pBb0I`$9F2{r2k*B}F6O83+dvm*J)r3bhw90(4e&;RZ z)6@b7>ZSm^@?)ag-p_q06LM%v=prsfHpCP2dadEXjwfKJKPA%_CEsE<4TPOlLaxWA zIQ3-;4+3>PVK}PdgPFE=3zFBJoMrHsvHDo8o`wzf z-ec^&)8S$>P2ba8{uV4?K&O)q@KXDQXDgtERS*m3T6nW(BT^gpkU??H8Ouyybluvt zk&HTr;Z}km;0lNQ>iA>rEY5W%ohU#U;9=uT41*C1soS|b_&Nf_z#{)}b<@}ISQDmh z-7@s2NSedx^Hag5k*hfcdd;}$mwvYZiY%(c=iJZV@34%se3Rlvk#l_dCjiAVyx5pB zqM*M&s@|*|l9GMX#P1O!$FKnQ(e#*75_wvR>76Gk?^k|!S zMSAc<{uEb?GuK!!dKD~I5_z-Ig?}eg^d=D^?S$>t2hcj3XD}`JBmvTYrf4>xh#@ z>x@v`o>6XCiMfT{3$)$cYZ1h>1Vctf8Qg|dP$SM%bsg{;wiI<>ao-EN&n|lL!^(;q zhi+nyTdUYZP)3B>fUTA43+@%5H5rUO14p?Ihai@yMY?CyUBLG#&|FKDvj2}&2Id%h zeK<}x`V{epz35AM2v-IHO<#0LYQXmy9@^|3N9= zig@`8rmYxfUaCmZ zK!LwWyNGt8!DkBcxSpJpUTmIu8CP=hdhtSM(c;Isz=2|NxZD+K@Po;n10Ao)$Wy)U zEhW|LhcuXl;_ z6Q3;K?Nda)gS3{!Nx)?FSfXZ7*4e_As!&i?-QB|6mTJ-6)@i_G(^#TlP?pZY=?CKM zB0UVOpmsO2$+G>jVBM`-Eim|eP;GjaZbGW@Ah(CgP;3*!mOV#h&=mJ-u>Y1kIxA{L z=*e)Y5wV^7b01%CJ8QSU)n5&wpgR^+|LOuH{(rjA4}HERzgG2MU65a+caPcqXV333 z%EV~$?%`a-Ygs>XWHs=)VJ_PRRMXDobUIO&^(GiGbKPb)Y;^bYXWPAGbvbbXEW<|~hf%L>t!Y$%iji{hMv%1YI2n`#ti8f3L4YahD~`^sd3 zXa2Su7!~@9*k99yxj$gNX?>iT`iLzP<(?ZJpHKN?F~q^{--fr86s5&@&zrO($6|D(#gH|5Im+gDDh^Kbk#{Oi{zFCO zoeGz~D)Jrd|8Et4!Jq>s-`%MQ&Jw*-L6lvjj)B?z7Zs9$!M{s=mgfm3q#FLJn6BHz zu&}nU|5fp|zk@8=VsJ(1__X+0;z}Va$$v(|f4lmBC%Atift4eK_n$~;;Qj7g8K3IT zp_0eJggKlB$sg71hd9FEAov)Z@c`cH?$GRv_A6u&e^o30>Of6W4RO%??~!mi09miC zPbx25T#alycWbI=Z_yH)EiCCLAXox#9GlG&{g(PV6W4rvABqTH?3iD!Jpun1n!Pb& z?Qc%=>TI80pRjJ9w-@Yp_a;??&%%r6HzOmJN1N1sY9uvqjSJ(Rl2AahQ<;Ai{t|jV z!%~bVS5qq3KSbQIF<_2Q?I9CBkp&saC3_z_vb?Yh9`S8YAo|Dsrc8S|yDkx0)+7CpWXkoL&ME`m+Xp+kOT6FGs@39*j=&^B`YX zrT^+^OXWQeCt{T3w(M5k@nq%@^{tKubUos<(q(wcZ!Sk8NY1mm28axevW+34BTywx z_act@@=P!{H{8Np%d3?WUrkzs_jV6M^0LC<87jB>0}S!TNQZ^qQ?WY4U)GD?|f8%YI#DVohJzlAh>kfTQ zFh%XixE**h7gqp4-0G7IF2P-b_9*uO*r9PJa*7GqhgJ7DCO8uMNYW}RK6@k1R_3IzeM9!WDT>8z$ zg0QxV_ICutg9AI%Q750O^K%euz6%Ha9tBstp(>zcb5Pn-k|b~VBXkXO_)d3G z5PpkXsrSrz(Gblwx&=&uE7Q)Z<%Je*-;3(Y%eZ|i7FV>B35D})$`k@M=a1`KZxJd2 zi-9m!at})IQ*Hi~jr*25sBgFE5y9kBtyirVur^kkl5G>wO1Mn#y7g4Q6Q~_!1MICa zE1Mo-i|x5R;OPR62^lU6ULf@`x`SdvHo%Y#MtlYZQEoi5gf0SlMk_5~l4cn3Z(h|O zsLt2%NIAX6r}$(P8Z4!ny|nl<2=4PE;m&!-LG4dSS(+YwYz7nh)DmowD9=|Va$$`4 zHbo|(MxpahR88;K5;lIzsNvpyGE7-_9HV}} zlgioH<{E2BsM_WZ5lBnxM&L3>c~%6UpbyqUo*EF?{qa(+1NkRk_MR!~YbBEhy?5!y z6p|~N@A~*4Ll~=kfJCgK5CWOf8Io?|!!qYrvAbgFWiF9|{1r&xo@C2fqie3M%`fx? ztMOMi)&<{YXZ3KG#0t$8ZZ>jkw5wt#S{*QaIh98@vViUEv`H&0KLW~~b>6h^vYgHc z_v}dxf^GG>Z^!Phlo>{M8CNpAtzDH8i+Ehj7fL|T(=suEGt$x(hmbj}Qqb(7D3gr~ zd-dU~6OTi|itR;$ez8kaf9&1@`+iZ|Sp;*{f^Aervcukvs7J&#uK#)YH10aGRLGa%riQn>Ma2|vH8Bz-jOylO1 zsNv|!)P4iXtV<0OMUx!Y2BIC}Z9n$jw>W}Ei+!f9r9YR^>D7sk@7E~?AN?6_8bN1* z)~f;0o@=mGe^U)8*TO2LzbsE=a+s$^MkuO#sGIO@6_PN*q)FkOil!@}wgOdGJ}17t zOC*;Zo2C_Sy;eoV1Bpva27)RurTjFMe9$e^V;8BilFH%x-f#onIL!ANiS%mct$|;> z8)l%!|58R5g}W-*3k*IAyo7MF02!K^*?c(!w7amiK>%k3yVs?ZcsL6Hwhv4DgB#$= z0jA%s#65%cSm@TK7z+)P-jD%{FuIhwLqVEuq0umO@u4#ri&rx%| zepQeO?HpS`yoT}*C|1n3(RzR{2In@_&a zmnlgRui3zrbNiuqY}DulQF?Hy;gI>USxSrjDPlOws_DYb9|39)2GOHgp>J13Obrzt zvt0Xs2%L-0>V&T@|Ew3nfh^J|67Iob8U0 zwQGlznZ3~Zjd1ZT>Ip|@V{#}{W@e7(ag;7VeK+YasK zcHi;c%==L{?|w+PO`5$_RkUA!iGGRpKuzr)ryWR4>LQfZ3g5%PMt*#3%`{!;;{vul zbq5+r)97D^QU@7=G+l6Yyn*xVT`1Yp=Q|GiEFMLPJpr9Cv%2FS4{2IT$<@|d(#9!_ zC0DU)Wsv%9CEeed%pEok=WUED?kSs*Z*3nj-&ldlrONtOM^eXEYB_%YOKe;7);m&aN7aQZH7^b-Q|tD$F1 z&u1DoB%I_dZV)$kmPJ9&{+A{KO*|_EBE-#!g>;<)@A`r`v5+CaR0JRj5V>2^@$m2k zh(hj06CGk9Rd;(2fT-AiJSYH6VGtz+F_4%rNI?W7Ck_%9lT%a_hAM%@!QvuNhzJNG z4fy|$+`UDe@Sk5q;*i9QB7Q^**^_(pimR{Mmcu4eyCF)#A(}<;D%8}Fs@J3VwBzeH zeJP1d_p;wcVCRK?RN-vOUl4rt4!VZql3tvLcRE*vxw5$QRy3lRv^-C`n($M8SPcBW mho+M;Eu|p*pI^Yo*V^0H-`mcf7z_b{z{HR69w}-m5&s*oU*zxr diff --git a/src/genbench/tasks/icl_consistency_test/GenBench_eval_card.png b/src/genbench/tasks/icl_consistency_test/GenBench_eval_card.png new file mode 100644 index 0000000000000000000000000000000000000000..ff647bb05e0f1641567d6c4029d3cc6058c03aab GIT binary patch literal 91665 zcmd432UJztwk?Q9J!UutL=f>P2oeMV$;m7rNpeO+Kyn7j92F5I2#Vw+IU_lvq97TG zl2rthoOAen;k~c^`&F-6)oay$E84s7=yvbTT64}ZM(=&}u{>p@ZXDQ4wU>m1m-;O~^y*Oaa0%=E17v@LZ>^i9o7 zbkAAoSnBGUTHQ0Vp4?t8OhWQEiP+Uk^7q5WdhMO$+txRy7w>BQ{g>z;+x&KaI_(%^ zyf-Q9;or}$-P(5WLvP;R+;e%VDIZUCD5~B%LLq5-br!lwKu=*Yi8JVd1P@d67*A1(*{iMI&L?TnJd-v|o>_%ZZ1A`>G_DBEz z^wM8q_x|-3x5nrH$IJWw+b{2ET_C=Y^}-Dw>d!;gui{n-op^L2@pG=Ga#^lz7=nUtYs^2J(u6}`pXyPjIq@iUt!s+S07ngS%v3$Q*vJSrsB$?$v5roVqQ)4IdGgi zF(*fU&z?P5oyIh|k`fX*{z_Vz1lvyI_Hn@)0V_QlVKi90B}^7{^Q4co-iRC6uQ zuJ&(kB$Sj0+D!Io6x)tx)p)b7jAb`k4~N?+I+pi4FNTv+^G1c57Ku%kE=II0XjeQ= z(ye{f*5{l5V|aKpSUX#8?GCw}A7Q6BmoI3icl z%T^4zOnbgW8@DAZux9u#$ltm1`u+R&7p#9a87?XuZfac^u1>th(NA(-xe+&!z!a^Hq(d3g0SI<#mkx@=518KYUwSR-?$K zx=xjkf{ctzz>0)~#LMz>*HUr%bd~IQ)$s~kqTPf#R!bb`!mzg}_s;`Vv2Jd&t49j> zXKkY!DDk~c&$7ixNZk12u|8D1=1~=GjeLB3s>QY@0k4{QZAMP}utgkXNG1dI4 zw6gj9MyDzV7jL+Dn3_7(kM@Rx_SN}^JXo~Qtwp1$wZONwUec-bvM7pH$4Ysz(=6s% z|B?+Ca#X~%&bLoFC1ca~9c4*uexvy*h=+K;0$nujZX^naW`4Z8BYN!T=NRJP-B>?z z;%@rABW6HKQoBmygA=v)o3s~RG%yK(!t#x>wHn8(vlkAWYhZPjZVPJ zmp^0{su>~^l`=E~CUgdC0{ER*aw@%#508HnThR@Ej@>d?oap7ZpNX}!w8Vi(z{zB! zr@xMWQ0Pe4U@oEj9?YqqV>hXJPP0V0*lyCctL$#|q2*;hEfMxRA9e?hlAHAu+`?I9 zI(t^e#ieZC75}f&i!#P`yo;|@tI#rzj`8omFaGn-KQoJqfpoYe;;8F4MkrWVyy+g4 zQA|~ij*9ww=gytg{ry^J&YWRlVUgF?j>aC{O&fIn{$Y3hkq6=eSg*#eP!4DSZB8E=y^x0KEZHKyOQ~^v$zz5GuPv+0midun^IM>smsop ze|@`cyuVxvXP~CK`j))>%R6`O965P11Ia|ma13iUbzm!)ZRBwZ+urEh81F6aU|QW+ zoy>LJbjdaAFMGcb7Z*oF*AWVe1R5cS`!>b~1_mo%ACl#m_VVM_ME!~HJbpZ-xmk9+ zGy6s;k6F8Zq;vf9=YM(Qybe_PDj^BTX;D&AhH#sx7OsYhxR%w<{Ih*mkwp8(sMe;e zscCvGHxhxAE)IsdT0bit-3OCHE4dw-*+bvd*H^KG&P(4s#3dx;Z{6~{eEBl*+dh2~ z-MMQQGZ&WvgH%{%`d!m6nbDPqB%IMA$?J}eSeI-o`&qtIVugi;Ap&;k-Rt#Xd~)2O zk{+Zdn$^ro`I}k{u=D#Goafk&Z6*{QwroZPAa&?uiw| zm-AT-@xDAmPd^gvL6e+mg7x+}DU!a6=Kk9hot2qdw)y6pY$`mwV_)9HOcV@|dNAvH zvv-v_7h|~-a&j&>&r~z0mb;YFIQ%?iGtyceT|PBs-fLcQpnYI|w1Zhls5iRd(VoL$ z?Vn!7zI}T!P^V;NBqiX*ix{)=bwvj0h}5Yd;*`CAe|d0d$Z&aPkhGhM%P;$@YrhdqyNYLYoX$#HdgdAY05Dq&`3CTwJ^EBB_a$VLIS zeeP$BZ}QDR*1e>pikg}cdW{ijS3F3Q3JR2&6;owy-;UC&3;Ooif=G!1zf+>N^Xh_v zv2iLAXKGe#EL|K@Z^4I)$QAfWWwt2A%^YwT3f51p3g@?J4&$>*9b_Ie<_`TnFfc#Y zB0c)gE*cpOSu}8``fdGTEIkjqqw82n(auuxV33;k4@B$m=$3!Hp$mly9ga2{-o? zT9H#z8=GumXHSeUoH>&e74`O8O;14quh8^|!$nxNv`3>6LXLA8uIo3+gqNaPgiD3Y zA{v6j*&+@%vnf67x9fNIkIgme7+D^UwEGg_@Bg}r#!;fegRG-9N$$jgez|{%acJDl zK*Oo8@8<8ASmV6S9{62*Dsa=-?Ex7*Exj1Q1zmybzBC$1x$exgIP+{}ORukV{-}Nt z?(eOO0;PPIULuo*f}H%9Cqo30l9Vdt{gCq4+8aLBUBdULZhKTYugqDjgm)Dd$=aM}CS*RI+rfXPO?Y$7D)1)1)5@=enc3ME#Byog=6H9$z?t2>8r7rX_wFTD42+E# zd)NeyhM*4cX^5?sZ>~2Ewhec7cB+;-v{jV|5z08bjG;InKK`gSk9hJY#wjapzZ9_@6Izx;bPU=vv+SZt~%HJ z+vSwhsHlsZ>yxfF1KvFRtr}T(JsTo~Qr73ipIs)Ag?@3fRjdV)U|fxvv`%r*SB zn)>>*H}hF3DS45H4<0-i&_{ubGo;wsIfpw)sH(Y!0Tpdwo}Qjmg7-6XStpv~ZX##@ zu;#27scC4iWn!KjvKqFuvB^jg9`!#+dmX7sSoYGTe-suFdq(&M#Xq=2M6_yOoqJKQ z*HU0HP&3YkzmIlgbd0p6pwtPEF8%}<$TR6q26Qm&%!+&Z^yysW=315AuAwuz>tTsC z4*j^Q#1p>(izjmIFQkSJz@)US)>e4}*D0x6xc>%w9P>XvUoq**nI0)yUkg*RU}j`Y zvUXIe6)PCYW;JMv`seY1#a<+_4REv0n9cy3F7_)Ho##@MCmR1`TAMU0!nPG z{p=85>bTSNM~YE|Gp??by8Xt+uY^b^^9y};r-T`BSu$Ky@B8OC z%p0OmDBizjs&MuCb^ZWya&m>D(v+g~mATk*w(_;+Q>P~P;lzfwtkh#+na-Uf0E$ia zd!XXt@quS-Wlc!H)Q6golUwk&#FUJ(E+Zb_)cs}+Y8sAG_^nYB-^GdOa)>qRNH^@x zOG8Py7ckyes)pcfNr-ewMz#c4X&GG|M^%(lF*P-f>!Q+8?`vvQhzyvYZ4?>BeJG+N zq!)aD%Csowhhw(B(ox=?tdNqL1%74%tmW?RUQ<&e8_25B3h)y+E+;29-({4+_Y!4P z3`y25jfLS`y?u&N;~rd8tA_fX16Yy9f}-Z-q=X1rRn?GM=D5;m$Hj@gd-hnIy(uMS zTNwMkP*zSZ+%%5@$Oe>2!Cp|evQLt$mYy9JaVp?pnTF=wZTmLyzyP`(& zBE`|8iaI%ivqL3qxx*j{BqOX_5Nf_IKuI&{=g)f{S#@<1Tqa%e+yC4@RV{qw^5w8Y zJ=d;X6PJ=o&&0|&hS$(bh5gKPL28uiEcBFIJupTfqi-82DGDiyIE;Z;I-{?6ybK8; z7Vl$KRk4ZV&!$reSx*n8-SDMJYvwg_=rf$*!*08+c>m75vcb#>>+9<_$N+o+zBEEz z2J3gssCZ1nS`LtjY_82(b|2Hp0TTDfa#>$V(kgc`?andoY))bU==e4bEKj%Y|9ASH ziF8}mNhTDBlSf4@mBk0uk$+XooC??0*H6?i&z2;5+ovS`zH6SXyUtBaOcZ5gWNcS0 z+C@f#qL#p<-hBJh^HZceIr|PAXhVW^=5GfUTt{xP5w%TM%a?j~^qef%wA}jZ*QbSW zZKO(-wxjKmWP(!{3s(z=LX3d>f5_zB`M7sffAzpN6YkJINvW0FDyBsVkXmS^?~xT0 zbd0FmgL%g}?ml_&@ZpJ?qvup(Q7;mTkm(#ok`jOwMEEqc*Lv;xe}NVZ(#II-^cjx) zUbK3>ny_kC^#Yj}4ASXSBRjE+@)`sgZU|@AktiJ9{PWL0nLuyBu}W&k0TknNS*L^8 zbuO56T?m}-b6(VNMxrLw1%j8FCw_f9(?oxH`9%4qOM(Ml4B;`00MODoL#1=)PHVB9 z*|i%tQetDT{`&PxTwFZMRG8w(kyzB(k@!$k|46Nm9hZ!OP+T`x`_t~0)a%J1z*_;v zo#)$>y7J8;iA>=*QCKy&L@yaaCq~36fN;eEI0TX@BR{|D=Gw3bGxf7xB(J&r^7VQL zCvaiIAD{fl^KjPN5oH-KUmi(e`CS5K0D+JE^;Z6eG!2^<|BZd8Afv6Sq8^YdH zD!ht}j0{LT$*JdMTDGj0u>J?K#L7`N&D8uphnrvlPknq|O_pU4ff|U_kXKbr{`&Q{ zyN5?@W1}DK004NJwv#`0=>`Z_t8fueI(SXdJYxENWmusq|=*XPvo&YnJfnNzQ>De+cJPo9Y5Lh2vex04?_ z^cn;~S*!ujiMUGq^c30*q6#R1QUd6)<{GqIf4GY*UA~yxr0d>9Poe6YJMMXA{pnC$ zQcxF>XBESNw+K`XqI~NGy(FC&vGenQvOlX%y0-pmNl;K%XTdpCs#enPOXk!syD#;{ zw@o-6A6s5st<}p;ijDoSqQ=O?WLYrM6_HB1f4@8cLM`{8P&tyqSLdzt=VpinRkF3s zg8(y#wR-Yo_cc_3b7Z&3K}FjTs@5Z|jF5Id4D#1<#et!MNy*@Pl1nyhQ3s^L`Qtzi z2=4y*^XI1kf=e=Qj~hDnIW31_?=JRM1`s;XR1r7=*eMe?cl)UGE(8v!G&?)H=47`V zKvNsdjf)rEUdO~ZM;u8Xx1Sxf!g(N>-?s&F@h6Ba|9#Eu{}bWuzxa|JFP;CQqDp@B zXa_k3MGC50E*JgQ_2Slr+Ol+So}r`rq?awm&#)&vGBN`Mju6#Q0!`BP|Mjmo%D56l zZGB~)qf+z^-@i3QH?l2Cx%Hm^CpW*P47Tpq@iOLCGa=i4ac6T&O8}?@A{uou38=Sd zuK8y82*LEDqoWNWRh{!XM0~wl4ecK1ZRab0_Bl=}T)%!jZo~sJ0>XZzDf%IiRq(%+ z`;VsF0Wf}*F80ZRS#fZ1SkL?rBczXkO7E2M=xw}Js77-Q0+Tjf~ zUDjW|dKHgY14PsHkDcf(Hv0a-(-4SCAs+!&<$JQj|IE!Cn^8p+-Cu03%g-i?Cxz2r zpQGjElyhCGI0;UY2*orb&trf<6c920=j7kMFTpu@cZGohj z3Vq06q%}zuKo;rPtPO$J3@DUrlwNp!cDS(vDdK|V4^dq`J*HEqqL1FtvT~xd8oeU8 zOD6k$MFrRGw^i`ep8YN&dNl9d&o*n zEAylCfE}X#tYu4g2=x@Gm~?*MzI_3~!AVFkhS(gODu0xki5GMirgtaqkgM!AD>tDF z2A`w~i`8&Qy*Vf7C=rig{EPIbSIm%LMF17 z$>j^_d3yS1@-UThgt@m(=^)}CQ27IX{+zvy?ac@>s#d`iXiXfJmUUHzgS5(%TWm4 zs<LQ;V+!^F=1Mt0s~Kjmv2i!E{B8imG>fzJxQ>vj>(pTBJz zyLe(65ZY*I>RU}+U2CXWS<+&^tCnQQ1;+gPPkyvq)<0!NlYMatJGjm_7rlECu7t2#rDJqh!~^8*fa&@ax93$)Vp10s`VG;PSI>5 z*WvToS65^+N+$-cs|dN*eLkb$``*&&%HycvZ8!{VrH*zdeKUv1p>zUd`^N&EDI`m7 zZmj2`ZfTL$8jY3D?qk&6*{TaQr)@i#;NJ)fp1%86uv|uQ-NH*>9}>qhcHgL3Q5O56 zlkYHzVZ^#%GT!~v(=+L_v(lM(1qEks_j#KhlH@9yP`2W+Eb*!#BzLhHjYA>#cgTA& zN3;FyL3UD6T>NQl4Fg*Z>&oVUugJ;>|m= zZCPXjQ70w9R$kK8M+kMt?8u=5>8V%=E;rC0w!-nBT^R39n47Z}%bk69lKlWl7?GlI zr2GX1PLl4~<42b+`={YSrOR3u$0Co42Z;Ff<(Y)e&n`R26(bPER|c~r z|8km^whgbU(i||?CP2*x&%+AHA~#tJuV(8vE)@8pz!9(;R9gWM)9HN8%D9sc)}>KACrr-7Y-<%ve^lm{s#UJp%r<&y5eNAF3T~Dlnp^TMPmlRS zX&kMF;X$Ucs#njS|1Bc2(eqE&1*;*6(e5*BrE{_Y(r$qF4Yl_N5B$F08hU=FqQIwY zi_#L?-8SWdzX&=m7&C8bSLKVlbyWp4QT!#{@z7;+eG$BcQLGs%ZCF{JKgtK4_0b5s~$|7s4w)X;1K)U+gWiX+i!; zo+C$(tUW`EC`6+m;@JJqZriqpnV!zJpVdPXOVppx&gVO{Zu&!65=K4{lO<$|*Bf8o zJygxS{ZCeQHkV=RSzl8rxz0C*vf<0$>i5a;V14?El3E7?wbG3;O7m)*2L}fYpjHjG zvsV&FRyE%=+~hB$TJYiE-2WBv)Z5T(x!O?^Jj5tTV3 z5zmykkFK1q3?$aT=;KpzaY;!9+(28tnHKVECI2g{wst-~x1x76p#rvJiO_5XOd4xy znn5JeQqqfRqSq>qo3(oL^!=1C=by|^IdntFyQ9qKo$P#rw2=3=KEr1#o;PXP{sJ2! z^o`h>Gjt$g$sRNgqW%{g=I#kCe9BSS4KWU*;EcRIWc0p>vF&*tF-^g7U>x^MeFSv~ML_1f9dQ7@T zG1y|k`)QYr&EwudOg#)x`E~9o9Xadzh z`;3g$Oq;_Zo4=Yiv5QG4@=V|ggy_X@oe23@>sx`~iBO*5;kW;6C%O`J{!JH696^H8 zA`>KnWz)(QCVErAoC|w5u%}A6%8mm(JyAgG1kfdJ3gQ{ihTg}Wc2beTX#x`kSFmj@ zXv+pXe;(D-qYj=silQoe`SOD+V|A472iB0^M9;@~TU5vPJ8Q z`{T98SQIALa+uhAj+@+S4Cd%_Sv8@pX3;2W{kk^Ou4YC_lXnJu2!RD!X&EF=kLKmSFqd$MX2?Xi5 zju#*LT+3}4F1l-LYjwxXO1?EM>i7!jUA}U~o9^(TL*FeoZ9$obfRVWU>reqDrKEf+ z7Op)zN+~u(EN|xq0ipcvqK*yOo`h;1kM;=UlH7)4a2EXb0&w9{0A=*(f|r)==bI!8 zaGTXNLY_3Et%mRv;_vToS+)1A9YlKxGzG|aadL7xXGP+0SfN>~!p`hrn=9gBKFQ*Fm3gR z4<8DsuPErG=MO9ctx8y1=cqswWv>hZ&S((jIKgjip6u9dlCP5S1`yAtE5grD8=~fI zx?oC9y`V_fjZW?2b*`1EZxw?yv~D3fBCY{4Cbkb}vr$_lt5Y_S$I`t{#KbGXI6f3tW8c#T$Dg7t^9&^rIgSTpSNZM~~cfb#Zbs zd;QD6nu3zX;6gprlooqPn^<ST5q~pXhZ@g#orzuSEq55jC`>@(GkMjy^le}@5?djXxRE6;%djvohscMQywA( z;}ysMWBbU7?NbdnHsPI@=qh?yD~jMjIPll51*@4Be*D${l2NX%E|{}^G^5;9R#vuV z99s8BQI39>)ij(-lriR}btD|iIc*|S1qjRvaCy6{m9)t810gxH2v~Bc56%1rmF?` zZD}dz8rq6%jIe2_T$MpHT4XNho)wG@AYUqmTVq`5LHrC&g!u87Em{&f&~dKPZ=E_U}l zjUoSNq*eKKBqGv*s-m>*=sX#S<8z_=VjSR`8Mdxk&hr73NZs>G$Zp*J5@4m(H8o86 zCQwQ|eSB;k>{C`J)QLYFo5UqySG1BKuGY&V*=Htyjm_L5>^>`=4LZ`-zQ z2>8d&(TT*c9a?nSeY-}XMc)4uAt-hy64rCsA?TEVs?b9s)?O`C8f=>*YMXw4nX{sC z8S<4z`*M}YhBl62_^aMGvz4nRW*ZDr8iNx}a8h8|nFAv4buyOW$^RFk6k&;1A+e)T>HlYWb$)G8<4SRh`GW z^BZ-$0koS$%bjeRdIOplIv%1aRea)MsZ{3~p`Ij(I<88)>7IrS@>-m+VxfgShv|d5 zO^fTJ$B;KOhiuUL9Qu6YB$q4Vv>u65(#ffW2ubv@c(f2oaa9N2E&@5MsBeMJ3(^fA z8wbT$=7ub~GGy{LvpbFBA>gCC9bpkFIAh(@cfTARl(WS&bKgHc!*W7!P@4c^btu6F72DaSW&m4}5sS~%R`h%RlmiWcuJrQ-)4|eXO6GNE#MMjQ_M2>jX ztIg_&uw~)_Km$r`%L)>Iqw1SX45^mRL!L*5f`x!Bybyl9CmoH!5gH^`g z#Zy|Bkiz|A3H%S#XV=+BqXg|D;p^A0mZL8zlz?JRiBX|+bo1K=coqiqnklTliX}L= z|6FMQTIJxI<2pG8E%e^he9h5L@HF_xLds8sLW%C1Y>Hyq1vqX>mxn_6VMuy)PEGM! zZ}+y=)U4qKv{w?Xoy|T9OY4L%41XVQwvccvkYE8!3k}O35FVTDYi&AJgnz@^wfz zejLIa-6)N9WpCa*XF4ev930%P#Mhp#p=R0(Qajp`4h^*^&8|~7Fkp2)2;r9!iOe4- zKEPVsuBn@Dj?e~NtL&PDiKIoOwdGhdNV<0n5KBe~xnEA}F_e4IG_;?83*-h46U zo<(p9ymUNdg{Go@+qu=YVl;hIoSI|BOuOwSdOF8D7Q8H+gv{**KL9$XIjq37E%_a&Jh^Jgq>Gjsx5+&wl|hK%v-83 zK!y`pn|erBCB1}9#xObzYnY12A{0Di1Ax(Z}l9ravck%J{wd>3?9I?=Tu zr(m|482dl1Hc0sea4^Ey064(^7$i*I=--)Ol_tN0`Jdz5#)+unf!!tZF2$+IA3i64 zFf#ji+bl9t@yuh#m~$g+{!PP$Sz0R7*A8{{x$2fBAA+>EZcNrROgI7PU;z8H_cDxF zif~7!9-e`-F2x1qKbdO+dno~Np0u0j%@6{o)mhm4njI%g8N>)tQegur{Z@XzOYwXG zzs_v?+85t6(7JXdvVPZ1XAsoJtrbSY5F({$^XB>i$2-;jXI8#HW1FkvY~(dD`uy4* z`oes+01QinzQ!m<7-ib>Ow^3jI*;A2J`-4Q;>tCKGF>gQu`0D!?AHO%)9GL^6;oW^ z$5s+`E9N?1YnpN0xneD|Dyw3s0dVaVzR1ixpD<4JEnZ)HW(@&F;N`9zJJ29!^`}E~ zO(1u>43E9T#8~|orM&W*N+R>t&RjU^Fu?ol*)tn-aZ~U`U2k+e>o-sjh1S zCrJmfC$D}8G5{Dg1F~3Gov`D5>58^K(cCsS&m=IE_u~3ue+OEYX%1VpP9N;TKNcB8 zZ5^6?fvgeLA5&%6(fT0vEB2_<1zjs1Ifid78xOb zT@VzNa6v*q7(GHpOG}$})uX(;T&^3*Wi&Pu?O;}Er?{sbNvoiNsXA=Mq&<+5)1Vpf z5?c}AMA10z#qR&(c>&ufV6_ZtFX2|;H$f!|jnx{;6r9jZA!!vf%h)w8Hugf$*FZr- ztF=Y>r}uz5y2rvBOFBfNB0OfbE@SK+y$vWuwgBRFP$bN~z2n%VyO`9fOw zb6S4ih>RV|wSO!m%7Wg0baGQs&F`Vz+5YT-27DnEE_6wmnKI?8y_42pp?oiIX8Ft# zl>&`Hy7fa%4<9_R1_5!@HSPpd93@m#OXo8)g(W9e=^F2ZBn*GowLi44)H#gM zlUfQ(dx_RCq%O7TZ2p1tiPfDXHV@e=pVp(4D}f58wEFVQ9+Yj{O8DLE1x3RNZJyuS z44%zi?c5nt$gaJP{A&mkq^Zl=^bK$dY9ot9NKqj`Wg)1Ma!a$K_{RdQWAAuI6lUrocUJW`t!AT1l|xqAgQXZkO|$T3x-z{{R4tnQi`~gPzpKZ zFv9ocmR=fMDbiC{#j@83fIFj2h;Fc^V zC6#At%>j2?Hax+B^JU9J$2p;twv^agWrOAJT|LLbaslpPE}&~t_JqX5Pl8xp`oI5v zI|n(Oj)9?goR*EP5w^E?1Um$T3c-QOHtytr+3p7Cy{5lMbMGRiSi8G*?&A2VSIX#k z5%PuKT?oF0UhMR!6zZ1MXNDe<37dCo>E20a8z^M2guoMO+FL{!TCYdA4FVV#7>K_I z4@WpQ=n530dsa`-#?9d72f?8x-Cu5XZvO>VSNm zNcM=2ImuzqBD?cA-ZZII1;2Qq1C7^&@UvnAzBUO%tm(0=Jx9;NQkSZlTf_YO*yO_C z+Im+{F9fzgfe%9jW`L)mcMTjH-J;q7m(l>=^YMljIs8Bl8C(i{B56|l7q73~Sht?= z-XnH7QXjja2Z=`l3n7W4Cz-8R4_mvJL03)^SRlb%Ot|F>RT#xUUfZw8a1uBCc=rO&ZV2?V;DM8bR z&=NhDznSclRx~RoFMk(et_ds?_tHxjR1Y6Mtb^{_cUxByLn&}x`>+4(l0csxRyhs` z&zuOC3ys37slgC|L6Ci64PYn8J|FbqxYhdVYIeIm`-DCF_g^6R1^f)>k;^VX$vThT zgTIZxMg-dPR;Y!cXtRXEdBo|oWS$MDdiK4~r@`tUKQ0oQ&Ml{TX_sK3;}wkR4wE%I z;rH_N_U3~BE34Ka;sc~pE5r#DYl0PnRB{6wZ~aE(x;M)J{k10g{r$AQf_I=9N>PQqcNz^RQr%yRBH)FTdQDC7%gmr||j98#a>4{aZX_k0}dG(%Sjd!AXd}pB? z4?&ycf<#4_QhA`qa!gN8)1N;5p4h?hT{#Mr`Rr{147+3;VHYt*m`g(MA*8l|?k7Gz z+~|Q2hYo5?Fx+;&s|$+4ubC`zn5MfZaG->U1C?P)94OO6U3zt!TKBIt6O%zulFn%q z$-_cAH^bGz+kXTuSrm+0lYOOWxdBXsFNYWzfjHTUZXBd)KI4ASQZ(7yGPFcsG7s!l zLcJ*4+;GGJNP_w~(o>iX+{Z-yko`JZ1%w~D$aY*cDfAWLjV3HEzP?JhJ*HHxwm0@V zE@fzPq63p?H`#|a^(FV!9Wqu@5rRqN%!)su9W>*RlEZ2M@R%rKAZ4Vf7dGpL!$O6B6a%nMp3*x_z-k|AzSr>SC!*L4 zwxf8bd7@%KGa|Z4uxz)csqy$~u0pKN)u|#47j{;?b?a7uix7#7-7?dH0{52AnU2t> zsIp&MvN$1lUq(em1)d$n1gY@UjBH1v9$kg`FzUC<)uGEzYAQb`gYcKOiz74=s0G?rB7=;D70|^Jc#4Uq@!`B|V3tL_gR%e`KfLkC#P~wIbYwbms$*Oul&yO}|C+yu z!MjAsFt+U^ZbeV%E{w#1P$=Q>E{bar28npd(2{1t(JMn?UWSpbkwgE}B;Ef%?LYs8 zE9k#?nXu@%B`ag{2a{)C1Ilm+h5}r0!)`?nW_5AjfzKaZF9pXrOqlgi*=7vOg&gLl z3|;Y_izZ*4Fpt%GjV}6D2KpsLXERrbn0$+p`u_W0QVBon*3T5>mhW=w&ylD8_rJX7 z?CRg>42BEX5k5ai@$zaqgrkf2X_Y&5>J*V2K>%Z|{F~5CU%@Dmf~@Roq zA&n#gcplR2fZ-%@Y3U9S?i7rkC==aUC_ovIC2#tp88iow23bP^Rf&tssr3Pb2E66d z_yQVMg*^)h$d%qnmjqmkm@Inbd8WO|=mlMaon?ANd6apG$fGutgo|DVYQq`TlKXEC z9X@P>c=s?t(N;nYBrGUk&=`)Dg_#Fd`u43En6ee|<2RA7UtM&B+5dV1Arg<^mbXY`;$ICP)#c&LtY8-IzyXxMMJ zoI+8r`-K}^(aGh+z7%YYrpja3;|hU;p#2WW^d&1WgZxMW8L)Pv0isS^CS>%jZa9j{ zXJV-^50yv%*qi?}rtE-=o)fl{D}*R3Wax^b6*!26X`@loAemNx` zL*4dhWQ`af{z|n~bq0x6J%Rf9&qQSuPwOd zype3F@C(BSt+-8(EW#p+E=?bmVz@ab6qxc$Wrh;z<%bt>fg~MUWuibH(F4y`ok8yq z@%EV?)d~|l=g-TyuCzqX75A@8A#1mtpl2n~*48HG+%T082Y8fg+N=8T&jYd--!9uQ z6{BDKzkE3_GhENC=d{Yy(fgMgM10OK?fq$mgJ$sl(Vi7dLCdANn19<%O4^R5u#w|2 zVXLt(>==@eIN42$R{aeW^j1D>&xKUCG1ZxG=mW}|pNi}Rcbx~6iGye0m=f6L= z<)r+27p*VW+HMRZwXLHOC`WiUnu~HFfw#bqk^_b__~XY}Ha2Ns`KM2w$Y36!En8n2 zz3xI2B7V^}#s)FNT7gzNsYqi8SM8~9F8_A}lY>(!Hv5U`Z1hct!3BX6kRcu*Z=4}! z3<(E{|BDy8Ubi5M*wk}%5Kg{9pK;ifh!H^i6EU4fe)Q-F;LqLz2NbY7){uTu;NK)B zT%|is^x-QoVwbAJyy{4WW)aM7!+9pK;u5Knn5`uAPPD0zUWvIs3`%H#4G~k{z&jWW zj~CgPh8p}a)XEYOj!{k}h?MPc!iDFtsbqZ^%;F*DpGJ!Lt*q0b3qj(%Nq{9VA*4f? zOhn-S5rsN9(v_P6K%1PInQ6J2w9i#5;G1`U1x8fPh=GOe-M=5R#^teE?QLvuv7v>d z@J8}t>A$@I>guUzz!yzO^$QMzU9^&=8>!pF~C%RqXU2Y{f zlvqt;V+S_Q8V0j)L3>#oM8noZKNuk52{elw)E1I4QmH`#pXh0#>``MN)d;)InyMa3Th_sW(VC;<;VCJ$NQ;wvI*~GjA zBM(o?Y6{y4W;Un=1e=ePL&2$Pb;6bzwvA+8`DHWLTlY1z_)8>gGq_g%M@KOTPo zq7vZP1Ndd9f||IsGee&<@u^NJS7t?tyl1jrzjojtQ!J z;JSz$^G+?34QGW{`7UI{e6GNzEvmffi>8dyg%Gac;SmfzJb3V+X5G;j4I3;nF^SQZ zqBM$QZd&!3sG?!Kw@{UI3kQS(^S`qq@0)KKph-cwQg<{J_M8bz#=(#vNR0LRhYwc) z)-U2h{awTodtm=iY*z5c+#HI5ph;)eHFO7CA$vL!_D?{i7gq?=Gtps!O_6X-2G;r# zH0=Myi0Un!swNEH6Ve?aS|e45x4-FygilaeoIFXp9t#iRX%P@sq%hZ+Bn&-OwZJ@D z?vB~U+Ole%aqxTrtVcMkudalHtdF`=Xf~mS2LuKtiefp+w6CRV?*XLo=1k00{CXBfI7sN4QRT32<*H13s3NK#?cP{ga_1#t!;PftG( z_*p z_}YF94Sh#@={j~ru^SlW2=c+>tEb;VBsqBWD7}|d0PX28UW-2fN5~)8jtu>AbT#P9 zmoK;BwR_)>*s6dl;7Id+_?-q@d1RC14T4##60=~qxQ#~LSth1GNS2nCZlfgarA_qQ zO~I=E7H#qAsc%g%_yWt8gc>kq6g&+fJ1Vq+tN5MBZNF@;VGEep$j zq_2m$xm++8nPXyp4Uz(h8wwu;K@zBs@amOBx0S5jiLiq)lD+-DSfVVM9%PqeFrqby zX+vC89TaK`Fd8bpen8aiiy9 zlXTC4mGK>1ByAHSBt2!$#|i(}`z*pU{}WldavUBrH$pV(8b{k~3i`?^9v%wB@&{t+ z+552)cOaFKxM5ur_s>wR1mS#83^zt9Dd%BYC7|Wvz1QRH$zw~?1IjqiQ~4PgY_Ota z$oCK8;Rxq-D)+;OO+te0cys&RANL&{Q`Wt@47FAc4LIIk9nZHIZ9nI(i(&6X+ow-c zlrp{}m0yGMF@^scP|ph?WWyjaXa~Ey?*wi1K`$_|X9z=KOXyENy2wES{f_*k%Tjcu z{l)?NK3!s0D(EL~#3m$ER9AayNqmBa%YF?I$FRe5owp;Sp*#B&e90pkIM2|F@*l8_{+4fgi%7^wdvD` zH4gJ5XLfvR-8tYjA0!_)&hC#IN(1pSKGPV--f(-2Vd01@Ba z-bmpVd26ZbW?9$FG0T(5uci^AKbzzB_4M>S(gpT_h`CI?I&_OT=!_Dj zv;pGe@yap@hNU>kAFx@5@~m&V1K<3F_n?`25d%gSL3_GPU@m(I`Q}|$7ZuvmZ7z?K zb>i!583^Nkjxq4l{h1%0HFJ-JCpmD-sTSY(qKP1mzu0oG=KJ^WswWRPZ*DlN7g}D^ zRzc0mb=aecW{^3cKgmTG7h$A?oQRklw3W@Rt>ud~+j0)fFz!uGe&)OzlTu5?sEo>Q z?(76v)7IDbK>Q~4m}2Xu=H@=2aDB#X5B4E>eLu2KoNiH_7)yLj%!S&TnoJ@D92~yG zZdC>R=S!3WMz>Vc70SXD7YX_5hKB0^gzvM6*L6$NYx??VxVgFazN;mo5II$K*ZYsg zT9sE%`~6K%|7UF=8^tSB2wdig`{+|3bB2T_}!=cP9kBUFwV74Yt^Cn98Lz>PNwcoa;O<)b?USzH>=@4v0By)8+8 zFGgiW=?(=21)be_^T}?ww}FXosHjjPT{J=8)TY{_5F?t)xSb^Kpl7~We>?xN*M=|6 zZCXpeq_ZwWgBOU`N=Qlu#PcPJpZ1G_g!vQjJ~uL&Nk$o5dffIgF^6~LZy1a&VSwQL zyxYTv+tFuy22AoA?`|tI&n`Q8SV6wDE{oo3QB_5Sk(zky0=gnGD6Og{Y8n&w|v79@{4!^V$dRc}K zUqKVa6LOBggY1sG(EgICeGIPccDAS#v#P5BC_{0&Gu#OW6j~NyTPTdQe_Y&Jm0jbHdsm?c8o#CMISrKX<8yj9#F*-2sfrXxwV}WHGvgn8j%c1X6iwDVlxP^S`Ph7`rUhAi`}+cMU|#|+z3uNmxmAE&mS=SD>g#vZ=nPmU z4<)9v$164GCa#v>j-=*9JMzbX{N`^B+ zh^N!6tfZg|@3YXjJjKCrh$DDER&X0hn#-5N&ypFo%R8IR4~BgtPTg zlH}z171ho#mm1Rn;wt;}^(_!XO$51;`P z3lAsf6&SkyI&|m({0mNKuA{rghNqC6jz#LM1lD|1?)@uB(0S#EmeaKRJvnEr*4c9{ zwiBACWhv|A1N5)Ry_kI0-hLD-;o;tB>U@P873UFt2G?A+edI5a3*QP@|BT8> z6%NZLQlnz|zbYq_y@LauSo2xD7PCGczz;uPCKOS8Gys8*nd3;q1k`slVpNCyA|iay zH$Q#uTv}x$+vAl?GGezv?r38>sD>xf1mCwP6L7G6eIZoQs$*qk>^Tv;iLiEi3>Ezej`YEOABExC@|{GoRrA#alQ`YBo#d1K4S1a9#0^SAT5D&Y7843%RF>=M9WZg=wD zyUC#~rNZnV7V&^7PDorgw1WkMMh6BSf-{XOeqoZYpI2`~C4MD9Nk5fLG7Fj~R) zj6g2)`*!`V;-8^!v!jFMt_wQ=2H42VfrF?@>&C!?`2(UM1QBrIeHN6zn5d|zE)y~u z!6#^zzJ2#@8!Bo0ui1~c$5v%*T4_lL-eBT@+=&yxTo<|jiO3x#rK^uJ%2txQX8!V< z8hDbQpa1^n*Dii~8X6k$S|;gzf2^4?JpxC)SC>MMC)2#XK>jG1!b9=;)%WU^mMCpX z$=&mQ(PbWZ#6UpI@JH&MrPwC#`P9V3-FQ}rl5#sld~}y@jdNGJ{O1iaxNB(m6k>`F zgv`u0Zw|h~^LVuJD4ZZ}6Dsuf9;K$TNH;vFoCFsHc1`-2n{Csjhepx(q;lh!lN008Ug(l#IQ_B>0j+wb) zIMUSzyMER?zSL=%1K9QUk#|2`H5mgV_WVp*1Q!PfvdD2Z#y%03{dkJqb- z)}+IDzSAky;1PeU7oaXJEMeN67}a2|*+)N!M&tv+5#^-)gJ5lV$_G=c8h(hg+tAQp zID28i>L`&EM=16vj2tlglDQ4lFwJ(yj;+#8Jm8NYbzh3@GHNJZK*c$P195{O7ciRr zjY^K}{{ejLmZm1@@#DwqLb&$9+Vvjr*6;aq5)$}RPvEIKvxXv(PXMJo@Zo({&Ej;2 zN~YfLo0^$X8Ms5c23U2 zLzXa{#Gf}CfqpxM%uho8&r%I63XlF)#FN!zW#sCdzZ6s^KtSX!^!d?Xh$$NP#L&rZ~lY>WZq>P7!g-xMlu$=@GSj;67GN=9A zp4>Jw^99v<+ueN}W&Z5BbJq+FnULrFUcTG`5b~k2@-z!eYP*kTT3XusBGlEdFijoI z!%4)GK;3+O$F@_kV0j=-J$)I96OLoh=F%=XemHM-*I)kt{WxK8(LeBl$Di15Wfx&NAt6H zZcpEe14qZoG9pa^v2Xv4v_IYD!2i7r2#AK{sV$!<7(^c+uC1#ZXBnD&N&Cv!6@2C{ z2PGIa1p96HOf<{YM>8{)(Q92&!Odt3u&8A1Ks>%h{x%=1A%|Vz6dpw)ssnBcga`nM zHqADYDLgLXHkd4c3GTe)a0r#GbtPszfFXHI~e`tAY0Nq7ZmMWjc+fmBH<& zNlA?00{h^(#dpWRp=dapnz9hb=3KT*vZS>1+wialw5NBSos*@Ab^8x(h(mYA9Fjq z8%S6R4W3Mo5mzt{on}LE~w3 zNeMM3pOiwfc|=4`VqO_i*8_Pb_v~y=yu&EW)r9&5RvM|C79S@iC1o0p=gtAo|MBAo zk(gji%FNDwNN5q()o7yH0F+O3jF&kriUyQvztkIzFH;qdmHsL2|{_Z$A>C=91 zKYzY8`it9IJ9U_ah#*R~--HTTImpMfzs#F}$1t=YExd(96cGPEhcjb@Quwb@d!;%!K7R!U?#15q zN3?O&-e$;XO?S7+oti&BP5#MaqyIIFDG82r`b2 zTiXfhQr*w}@I^&LIYw1=`>|uaVq#*Jty!~^la6`7$c4wm%>9@ta7g)a-T!lQilnRF zXbQf8xqc5ofhyN>JG1J+f}Y=oPb2q?%eqm9r7_x{79uVG&D$r z2W);3_(!I5*F7!Yy?b|Z+XV5-qw?QUcl@*FzgFhAoxd_m@l)|6(*C#nLt0FAKfV}b zjys=<>Vajzo_{{S`AbYS>#*mySXf@A6VQKrm#IViw(s82^53gye$~;$jQjV!Cyl%L z#Ho49{QC*x`v2EjHNQ@Noc#a#A1+q-{qrHF#b3%E55gT_pyWtGt)C~3#1I8!7hQ2` zK>}(GF00`0tJ=6;lij^$Sl=E0Zc{mN96~(5wt&_Mw;~+Ps=ERb< zXiTB(HYuXkI% z`qvP|JT||usvUAtEs4xYrB)PPaWZq9f^Nd=roP8fJ*reEt4#hUui=ZsDb#6+08Sc% zi|O6!ofHZ>c)Q8{gvFYeU#mAqW9UCO8uJ*ISQP*|S@ZA9um{ zYb+QwPTwMv(~+yD*sd8K11Y;CV0xT{bd$%lT*!}Up`@)bLuQ=We|xPz*wxM}S0+h1 z$is{wQwgr2IynpFnH?!8uzY55Us2uq^!4jq3UW`uuR@?3r>Y$cs0ExJ%pPlqgiHWw~sO@uD#vru3M(8 z(WscN+2&4UrMR#MZl7m2r+;FZYB6GMo3dnj*eL_OSk4iL&F{Dw~cpv(HECGec7`90D1c zi7NH3U*~dVOc)9)N|A>8(rlFg1Gs?J?(ycA4{B&?MxH&pd-v|GTer5AsJR`*WTE_{ zH~Xq>u1eT%REc$UmHg7nXf)_AjR$HG(#7|h2#Q;J0pH9X;%77BK3z2TQd7M_AEM7` zj2wA@(`dudJ?U_F|31}xmw}%vCg&3qj?;oUK2-8Qabmu%u142p1f60~nMk{d_$4nI zD(NJjd+C}GqPd1vH<5-UQ8$y7P%_+2uWm=^+C8!%w1h}rxQ_NUwKsT~@KKuqoQO_O z;!)ez)8N#C?C~y9-rZ-E^Q#faPC`58q_$*cv?!Lk@8brozZO4eTx{&t36=# zN?yozu0RXqTw-DxSDEujkF30fByixI!-#iy%19A66{Sg&Cf$7WsFUFST0d7UUKvx@ zw{v`)3#R1!_wD~WgYz?S*HdmcwUCw7z5f+IB(^i5ri_GvY9op2709Z7xu%B3nB})y zPEuE=U%v%S4>Ai?r{#1UIUnZMFI|A5BFm~$q~TjgKYunjCfL9_l`(87<;R-uf`z`k zh~0PqESPO-d8z8?XVtXLopf^k`;sWspPd)FVy|QgRMin0YS*71@?!gR>Zdj0hX+O+ZJzb@4@yoOw}ZBzc*6NSo)PrstSz~Jgq#p9Liss#7+Gi{J3 z9}L!$bJ{O`)$Q>e1dFL)13$b=iOV~MlIZBAcT#@Vjm9dsAdC{*(?>PWMW^t;-(+xx zPm>Vv^`C`vnSXrJc7e57G=2NLk~&8#ow{2|^bBA~TS(k%jA|7?!=i-?C5fgrqo7B3 z?zAKuO9fIASnL22arq*5vWEvD+E&f?m$7W4vWa$2zwghdP zO@FStyMNdDT$h{ce;J2f6aR^eV+!dFaZ_nuzmF!g9T40y=BUV26)_IX-&+WZ* z_+W%;_i^A?z!tjV*_dn)NM>98HM6BfPut(W8XYygQe;a+wG$&N-K5-aJ6>P3?#ho_ z^BX)jFZACd=b8AqJ5wWjU0&W!y7%y*Q4qC5cLGWs>RO*^7*mdYnX*kAk9UxgQ^rXcue{EmTm#i6tZ}vUDi8pk=%jdh#c2?6{Ohu^-J;w_ z2nA&G=aC~l1;$Fy*uGqV;8dS&>fHLQ?3ga9@f50P_-X+o?p{X5ZIazFtL`029@V?s zRBl(u0L7S|mn9Md@TdFXZ*syclF%57q^@vRZemNu(2hW2bMryWA`2B$xvitlS{9l_ zdI-=QAZSMV{oA)?$=h}ss>lu2wsg80n}n)ucYvuhFvibRq+fFwavfZ-h4B1aLr9rL zT2Qcoc3QQ-p*9y-mYVh?H4S93yF!R*|LE#v9ZeI$6q?ApRr#iq=RjnuSOL!moFcH4stu!tJE-Z z&MfE$<^D9b_X(=AJ68VYg_E>KR#kLE!*ywkmn7>wa!yeyqGL90)11{;8xeM>Og5c4 zT3!7Z-#<$lrlhnZ&MGBtY!N3^T%m=frO-mneJ2jFtPUWekis)s0)s3P=#9GgOe}oQ z!*n)g!G5^+;KBaMn?B5?dBC=YogBet$mR**zGpADBqTkcs3~rx9MP1M>+V*xSs7KT z`#s+^u5@lNf%2Xnqbz1q-MyWVm6_>-jFS$U4X5x^*_;@mHW)r&`p!cT1D-v7nuP?y z3=-|uBl}pLS+a7R$M+4gI&4{8-JE5AW`VT#NxE{Sv$Po@a%I|wSAK`#Al?dm)J?Jl zq8b_CpPSRB-`4)e&%tSpR;>a(88&%xAiTsun_qM+LQJ@=q&{iVLfE~QJ$IFtbAHI* zXQ8TBTU)+3p?t`K6C*BdXQ86!nLJ6%T{u3i@tw=|*Xyk!KHU3r2qxh)42p$l2N?v? zl{0HnKjTh^3==V5W7X6K6XXz0>>QPt_qHnU@#DXq#~!9^XiWqU^e_X1A^4|ai+OMZ zOfQ&2iT&;S_j3tyBUo{}fKteEZ6($-dFJYLRvKq>h#d&6q&Jv~yPMlWGFBT%H-_XW z_KIkbixv>@v2c*8Y|`6%FJFedfBzn!Lf>98$geVOTsF7v`0OU(XKXGIz`veMEygpU zoc$h&20=?Nw#XURqWoFYljl!NYZ!F;`phmnetvoGn9frN)U+m;FmvqLtC#ZeQ!vCj zn&_N90i_|B79>g2e6w`&n|)#u)X3>Rgp?4yFG+~v`q zWWvTlai{I0!>so=JS>&{`_&EoBHExequP&Y9}WG*dfkj%~e{GLEoWFjOJ zCdXzTNzCm$K2PktZ&%_CE(7eH1AY5JM}8`>di2zxecLc@u zKRH`bj*Jc#+`9?n$Na&`^OM1Yy#RI~9?&mvC2rs1c#1V0_-Tuq_F)6)f#@hz-I%jS zSC+0tdh((@1}tA`Z+^fjrQV{y)`#ev>qOIkKX~vPA{C2*(3lhmy%hEPTdmEtZMC$t zR`ggjI^g-p=8}@yOlsn{V7tgzzqC?OR_^jMw-WvbuUN zkt(^ntK%J-bInpHAZ>5}lO|8TT~t&gMXX9Fhe3lZZGF9BkDT=}m-nXTTuxv|&j={5IpJAd!ji9$z$&D%l#UKNg= z0eKIov>oYqnd}tNiRlVQb^>Qadj!N67qkqcDCa|VD4o6HNiDH=l=W@bWPE`2%Ny5k zX`PucUK~zw@tX;{Z^s(}XzyYv=GU8=ndLQDg`2kW>~6HFdSyN`(?^evKl(&B+&?&Y z5i>z-2%9sVY%%ay55$l{3*7o45Y2&C`bw176t(EQAduifh!Cl|?o^f~I; z`_Ak#oC%n9Cn*abc;J>_g@C&-8o8$j4eYD!*%$khiiGK_k`iwq`d2jC8eE-+{}U8} z5zBEG{eyn@1gE+JtCi34m}!}&7n0Uv2B z`~6;e`cz%rmZZc1+02A!j}MYmWNi>0XthxF`b`*3l0@-J-{r84-{qrsr!Lm)KajM$ ztq5Mo1t}yPgvl31xMFss-C4IE)&zaC_4TJ=k2)jw6RsZnZAJF_D@jSWh}d~LZdAb^ z(^dejC~BS~k^dzHb;|RjyT9kB0izJoNTQcQ%>EId;|(%ML<4Ln(C^1oe$Vh|Jp}RAjqqS(pQld&49cDvqTl& zTbQ!V$|eZx6-=%xx(J-Y7E8BOsfl`z?hYzlEnBvH1%=I;#<3-AXC2gAb`tJiL<$Z1DcE7+QhB%DGSl~HgrL3H4Q zMDNp6p_qJk_L0W;7|JNLtmv?GFtEf+I7lnW5HQfXIHqaeq1X0@7EE*|@Ka!<^YoTR zY=wm2)2c&{WkBrTX!`etK!gvXFw^#a%Fpk1!PPw~Cs{|e;VFBkq-cwIBBo203{X;P zNua(RRwoHZoY65lm{bw57YrZhx;i0R9s&v!EKHL)i(9PgZo%6Enr;ptWY~m6 zxNh4C!~-3mE~awaJ|k$+UtN7kw1#a#VpLwu{~B_7ip5qW!x%GhBF1a5z%n6eo*iM% zt`?w`U<$_MJ98t$L1Vp@1qtpoXNhM07WAj`JW<>{D)z1H_mJPeS@a(@{fp6^v!ivY ztX`%TB%U7MDs+3`!!{jtl%T44B&>$Y89+8-i6g+BE(O2=J!2{xwZO z##gTo_~2)oU7Gdv#=`gCc2WN%D0o#~z6T`eR(5t*1iz;X#P&CNFW?}%!ln5?jqSgf z_vst8iXXM~f*&xwZpNB5LkKuzbqpf?*(|?(SwfQV!mYTYtPTcB=yrJ5!A4h-P}BS%keTX4%D=PHCks{df z1LjdAlj%>{7eIZnieqkOZC%}9X*|Pps=s_W4VvB7=KlTrLUvUIs}Qt$cr*>~17nMj zI}0Z)clb<1#9!gzgXlb)ZF>0l@wvFMT9S=!T^@6o^(10XIs|le;hGJQ3560DIWOGq z#m=*Ba8*MYu2FDq`JdCilLyJ_U^;u>*VU!)HiO{;vJ|ees7b}2`@pgy6EIv>hfqjN z1eg$lE>36Z!sW~P-!w0LFD4TbL8WYHZ9JsNTA;LS4t_$$oS?*rxD&){{}YP%`iG^d z>1=Lg2*Jf&U!1w|md+Y3kge zwbIE+b;it@*|;J!`gafm-+TT%NXo)?*j*Ca?&$djI-tEPzoD>dcp6Y~o0wBGkh*ns zZSARMw%*p%veD@c%OzaV9%}Y-d7ysBr7S9Or!U2Zeel~*EZt2qBiFk6}g;n0@vp6O9>AVoFU}+)Gze|_ghW@pL{N6JG%GPCN_SW{) zkO}1$qOLo^wwWRm%Caht`1m-yjnMPD{gOCvS`x-2cI;@#6$7MC6cEnNEoNUyWyH1m zl~JB+s7-md4nBT7#{VZc;}B{5>WuCIArYBB@1ye^B4r7&luHiSW3mM{_VQ;WpX9xU z-^@J++7RAk{3t=(G@v?4KmS@squ&h`E)r2KZ^V!09M&7+w+QdiRuVclHXa-n@PaN< z1v2AljeXQ?{X1V<*6}Vtwl$d8Djwcgd3zfhJv8_uC-k!~IGnuMfxmJCMze(?Y8PoU zMB3eFPg>r9A8>oK`}(K%@6Rb&J408uvm|=0(wu!1rW}@-rr4R9sdCH|*qwSxU1fDZ zA|AV^q>i|%b$4M*4e9!wm1Q^fYSnq4b%^`8!rrd!zo|>Wqn?4gbHCK8D^_pp)2-Wd zT{)RD&@M5*$<)Z^0=DJO<`F$q3tTpWqta z+x6k|g>}o|&#KfUxmF%NH1o-5H&FMHGW~J=5mr*kTWwPqLniiGy1G=WdjIJZCYAq(7uTTu^-4L>GoL*D?gXL@~iVp<-6|L^ea$$X7c#* zjhs{wD5V-NT-d)h7AIwQDc00}K}hic$IEm*y%ymlKDbUVlSy_EjE{*Do=t<)D7GIMG8_uTEKFYHGN}al}yLZRr#=q%3aADDEY?ji08wt=^fu zAUh#5?4w86jn6&^pmvo+&d(_cu=&Y=Ek9DWj3q60Z+N+BWyR&ZHXTQ`RgjZg{poRE z;#L7~NM(FW&e4sWk=vskoLO}-m+|#Sa-|M3{aicF`6(|ef$<#A7?1z&l5ObmTWG~v z9N=FP@QU}RZ}vvjv*utjbc>oY-o89S6-mf60A~;Kt_#?93sI@@4DE!BdE&$lMWWoZQoBM?^-I0;BjJI~I9icTmR&%DQ5x`6x6NifoiZm+D(>`n3&l`ut|bf%<#>{$^%uM8Wv#Wsagz`!qVc zS~C5utgI4jkgAyG_5JQ>m3;>e43-35m|(M%sT;EXrN4D`X(}1iwWLX@YdhBug~x#5 z&W@jDX9RXSM0yKJ-f6ymeW5NoGFeBh=G9Cv)PxVhM~b&6*r;S$x)Tuw*bqt^R{3<+ zZMs^9s=j}zm%T0lL?pD%mR`*zk0%lKHZC_7&(fMPpEJF~{f>%x@&;uYx$sW8VC@I< zs*DvZznJg8a6k90{9adahOXF##}72|`@UjC{w9w^xuCe3sr!Dd`wgdrbLT=aUU}a? ziyC=_K}mgRiObOFLH+bBEVo(2{LH3SG*2hB_H2c0faCSL!=IK^%>2aJj1RL5nI6(G z&7-&<`lg*Trhrz{20iL})6a(Sv{EvGbttEsxHp`@+IoafT!X{h#=P&Lw_1hKzPn>| z6G-6XQc(vH`ga+w$f-n52%T-Hs=4z={`h$=S{vU#T~s`~*T9q5%Wk2bo@)l4BnRjs z1wzeEbMDgU*e^%ZVW4074y&(GhbMBbn&8RRHLvi*MsYp|4-N@kp*Y6%ZB=4&PiO0s_dWM5+L#pB zcl!B9ZMKf;Y+_$)^w!qEYJbW1xXi>APh7RERpV2W6@PcXT31sOj(zqE4pzXuKBhi0 zm*~;#V+S6Bu~qsReFAoqDQbrcp^Of+u&Sx4$vxdIIz?$)`77*t?z=AX^0RePx7-@v zrE=-?{C}Mi{BLimfh5TW_N%aSS>!n7-2Z8kme`E&=IA6r%X}~jmE`emg?QC)^y>{;w7^%_1X+KYTahUDq-rfI_?8gR3 z((KHUlx1PSw<`IS*@-IoH9#rTD=&05y0&hb=fUSG+fuT>6MH*v*r24-xKGXg)BS1slp3_DudLaXQQ`&k}xNr5>%S0>%qmk=wzA^3N*_jczk=K1WpRupHE*n~U3SDy3v{w;s z*=o;vu%%;CV6A%X_0c&)e@@k%7Bjc#TDDq*UDL(5hS<6p#02-PWW(cRY(?Hv^-z>* zynpnhb62%reRvDFJ1D%K8hc=?Vqds@0Y39OD?e%zOo#c{hXP4I>*hb*^Y#e%!1b*c z*()~rYp)nc=a=kxmR$ubu8ZgA#S`VcG(JMyg%%f4zv-<@x5&vGVcMyb4$mIqI6b!o z(LRI6jE~LyX&p~H`MDVAlCNj94QNsJa%;_jjID4xy~n3C{k(eaT7`z#mD2=KwezCX z_#~s!K5NtBuU&)sdfXcD)y(lCXv2Y?^DGMvUpry)wJb@68c&x;dQ9eAWVVeqi!hDq zSuaaX9Kw;& zR>r17`>7hLee-}TeB$yUlE*(6xoednx2LnR013shs;UEYYP$KY_;Q4}s-48-TK$qQ zfHqyU?7&TbJr1L&N)fl8b^H;|sI9i&jK3;WPRswf*0R>Ta$nR`>uxzNg~GX{kH#m zfi>}eEU?;m0IM)$Vyz??f!-y^o+v`~Id#a`bL(@?%ZB%2l!bt-bCsPJccI<;7Z-qM zF667jenPbubxdj5gQKdqtEo2vAx-;ct`g9`DgMWGqaltx@${e}EhPcKp-$Zw5q}R&xgJ$@ar*SalKtdE-Up{Rb6EI<<+egZtm^^Au{_o z6f3&sfvvNGw$q!II8Pbr7eQbJ`gyn z1?v~r)TNv0>3Yf`6=lgE)1ErU##eZBE_nE`BXE=CMT-0iriPj07UBg_TNP9>Bwr0r zpHE2eK67TM)F1P0zf|ycrcRYX#_s?ZFxP&vdvPUEXoAi6FICab9zYeeJV_GGpmK-f ztx^;Ti;ROpwrbNgw7dlgr9d@>e!=o!`Gyp#NC^8WAhv&pCRIa+1-g(+Y0^}r7w`Y$VT#@eFz1u<}Z}irGGp!?w zeqVF^t+qxZiI7=@HtmCgge-J$N`7CW^YD3(Pbr(1`Yk(nT*E6a?xL9XX?Q}k%e}s4 zvv%#!4F>22sCh1B(~FH8u@Q;V|H*xZgf*x*t3{4>_3+5Q?6|CLb@5sVnJpa>2F!f^ zKr_&C%6k%D+!iXtOA_P#t`&rM2giHLH;b5yMO$qepLKZ&n|qP0#IFXAibJ zA-S`Y)8k3`#lW-Pv)XW6;>OapxreFZAV`L%)i;zK8WXYW$D*ASY7b$EENMm>U|-_i zUQ<&upqdi676f}g0)>K)mM3*O_U(G^6OiA)0G*@CQBgvsYdn z9CdV}8f9Z{Cg4$+T9Kpb{kZ$8-HW&0q-Z&2gVl^9-wlosyf(C6K|gTUcP4$uEYmq~ zIsbB1`4$b{)J}o~IM>Fq8M%mMw3KNcno`MaY~CmI*}| z#V}|i3KX{!>S4=uKFr?sJ9FmD?JXDX>}_!k+2xjAK$xe`{eT-G_z1F>w)ETgU;C>2 zpr4-wj2BuzBeUYe#|=fBR#sll8`{xfzID>ux2s-zkEQ~uzrAG=VbfxYB1y@~Dr%mt z|6vQ9&a%cd3(BlW@X5$e^EUF9(p&V^?)8T0ffNuL2IlUOkQnq{SL-)0dN zrWX_h3XH^gOTCjAO}=M0QV4b;v#B1^fbam|=C+?(*&yi)=g$2NI4(-#om%?U36(eo4fY~a@Vpt;%{BL*uS`T z?09gnkRBfpGI-8mhzJPe?LNEWAdNPTzUixaf8EuqAB!*e9WpIi9|{i-FGvL08Nh)X zK)xmve2<=qv!8RMK6KA+O7Q7LMZxRVt?QoAqi4@q6lHJs*(3C|2M#p5=B8Rt*JExz zPy$mLTL2M9%+I9kTgDifP~vDrSPx~W*V!e%Tyfa)$jV`9^}=bbC+u7o4V6q2_www)5p_+7 z@3xhgh8Re^?YqI*`6Q%*-U=5BJ^|1QHBABEJ!M|c6tOpj%-1DorpcS@Y%P2Goj0qY zgz{Ltim{H?-RKX!L|kTuD&8kYx`t~RZ?wqTdYC}HRhAai(YTrX!h7>zU`!f*W>M0M zsXRGmBz(aqSg3OD_4cc=?)@tH0|d^vFGX#@!@FnuFbvW3224M|VUxC7_sX?_@H~im zE3kOy@3}txv`_Pp1&}k6`AgA_d0&@Kf2nme?yL8TRPEinX)T}8bOQ`9&RK2o9&Lx^k@Nb!@AUJ!t% zuYLsWcrz(Y4yA=gY;Yl}RrTC(xH%kAnZTFFM>Um1B`=yJCZY>+1>w$F0xBWhZN560 zoELQM`Q_1wWBS45LRc+}g-K6K+eu~e(q{AI8x3@;>!KDlaN@*?h=UGeTG>vz1%cNN z@{*hvySp(j7HQ9Wya&;(xxi$XoQh`#*pRL%PnvX;&I4-zwp!>C2J6<1fT~;;d|CeP z_SsiA>*$X6{NZ-t(xp=X7=A%PK?Rq~x^`jU+)=ULDdSR(n*rQE9-b#)_DPy)daFAt zVXGH$v@i(SLOiM(`fk|0d$TVjBt#&e@fUqEqEoJ*ad}!vb5%kVc2$NC4+j_U#jn8H zc_Iiu_`;La`i?H$Tt1jX5l*??rmxIM_&0;&OLMQ!F!Q79^ANQwZ1jF~dS?P#oxz}G zQ&r(GdLg1edN8F;*X`w#J%%p4oxy<^9r*w~THxN6@?{R=$z zE!k-46Lv%7VDg84kid9;J%7;As;iAu_oBc)j<7H2UN`lmhhAZ{6CC zU-90Ckq}-IAJ==^gAmn~t!4U~n3!PqdSWzv;1->Bz7!|#Bn>=CmUM{Nk6cSkEWZaG zi6TM7m(|`CWyF^Y`pwO6n5J6XkK!{U`ub7Vl4H;1l>MBa?-76B)6TPF6xAcPFI%wp`j-p$F-A=OqIAztg&=h&^-aQ?v;0c=t97}Y+-z!=1p5^t;azI!!C2tl|pifjrllIT~ z3v^^hk8t&-LsWp_s%V6SeiV&+deL4Urh481D3-p3an4gUfg|QmzFKN zmd`8CHpg{L^V*^0SJUw4%R0$2eQ~mZk1~U)2_PSjyjt>K2mH0;{m2*^QM>MI8?T?u zPhH6q_?4cYKQZ$>33dYfwbZmUvxy&R6uGhHH}7MM3%hJt6c;w-U5j_Jna*F=2AsU{ zZf{NF^;G#*P?}q+D8kzlRtljv{`#>)uPNc&Ne5qm70XXpI74 zszRr@KS3qz2yZ4c!j2n_69WKINqdm6ZISk9JrmDaXe#+oZ;ZgEN1}V^y|+jqWvJYj zai&wZjjq-GIX&5+=VrE5^7_|*sF=hZ|M>FbsoNo2Y`zz6*4(c!#aJM6qQZvuvca(( zX>t(h^NRGh5mIP4?iA9Qg>6AQPGkBh6ItUvxu}KjEAnEa@l|Ijn8ML~l&=M?%$O95 zvQY%WP>Xo5U)KAhGn_oRo#nR@n=7wlmt36bOBpwmb?WW{J6_Ux!9fb;gZ0D&;=L2) zU$qwMH45s^x(2tFcB4>XjA*b@IsG2Bx~O+{Z>v{k@ZiJ_dfpCTruL~LsVwz(O!-m0 zuWp>4b*Odx9s);wUopF!Q#EmBD@o_pUpq!cz__o&DeL*t`DXs$G4GP{50862Q(660 z(^BD~sV1vr-T1OO0V*CR?aJL>KpsI=|d;!et>0S1)p$Z1M~#pu9gwy!^V#F z)lNnA^=8NWC)V#nBJO_2cKF>m_z7h=FIyzLVNt{*QfT3l0@ME?EOcgGG>`K^#DD;2 zcCz#I(=>0}v2c@_uqD8eK$C`XjNA zcumJR58|9*K@Sg?c1(0udGDHRHZ?OnfVz*E=nV5mC~*_c0g#6TZCrk!QSC9`WcT9G zCtD!-yiACc`R^U~KgN#M;pNLHzC5n_P|ZJ_Fhk^dHLYidMIaK`pW3Fk6ja}aC*rf5_$hQNXDWTtG$nXB(wO2ev5 z+gWCS5Quuas5{>HQ9bA3v2nA6Tp|7}oJ_38CO00F*PIVA&5#>S2WBV+JfWNzVisDdgifS4iap+ee)`;`e71b>x_AQm=8LQyz1 zoWC*+%YjU+i6P^zS=DwfQRhG% z9-if%ymG$e6|^(`HR-@a-v{`TX1QgH(QNzke=$q~4y4@X<4A@+frL<8Rb`Ku*@tsg zLX|d`A>O5?fZ(~sO0l(7R!i{Tq+ ztW>UWRmo6K>KzV;VWzM9n>|Ax0I2m%%tXy}_E>=Ihc(Cb`!u-E84DL<^rxRJ@wqPu z#kf4X%K%A`vfg^9A?6LiG%w%6%*?-5iCGWmi~gNhxO6xlszwSZW*oSdr4R-TN;f>a z@D_?UFKOovf(LyUKV$B}Z5&W4uB^0s6rvqcS}#|^G=Wdc7e^WlzDP?KAQ(`ns_$HO zRPe##bafd?|01}*yq=gGF2C|0{qIx-JsbF3)huF+Xj2NEZINIXMGJ6Ez+~UrYVjsf|Fm)2Uxk3G)rVT!CVzdX4_! zykY<=ZWxa-%I66&%b;|m4K?yj3!Q@ZI?q>+ICbhMb7}vcJ$Cxe-<)R8E+BztvN={H zDb1iax}F)c$6_(xk>Io{n8-*Dj&U!&A(JLOEpdJ5z8`D#_WR(e^fuft-L^D7fXMb~ zi78l)n(tipt2_05i!WkYobLn7jf-Z=Nv&P8_RBA)jm$`^SZcEiE&#XJ|Ka=H?Y_Z(`Y z=%zzQ#i=J+bW@)91PFelmMGGX0(u27&gB;_>5`A3MLVRtNq_X6)KnY1xDdUL^+e+H zp4v}9Z8V`vlZ)cc=0^@I-H<>tk_~ivHy3GY5CzLLNvo?h3p40vG}^ogw=mM_Offk^wFf&yl0?|$7FqEzV6 z6K=O{j^isL&YYR!>yXm85o9Da(g*ouj?Lhb#?2}jE>C|Zi;=*&iKwgUIVabL4=rse zU3s>C$B|#qmifsp_z6sa>2TFF$fAcYtH8n$(uep6iu=xb& zeBd*!%%Ym_>%2hmYVl;blH<+=)b244qx8PlgfzYPx7NhH31ks6jwONZUp%&cB-@}%S_ z;Gn-!g5%4jV2-02c&alXxA=+#vb2{uc2HQ5RB`a5Gl8o_=1#{g)$OH82N%VfsWDXJ zWr@>2M@}FJdA3E6kSpNE7}F#w=3;#lt6Z=#aTMU2OyD*BGvtpNnc8*6Hp1ie#GN_ zbZjLbZ6|yAZ074|D-$6$Mfypg>?+3BN5&7uQD5oRtWhg1jA`$;z5$ZfCAZHj#2iB3 zpN=JKL8W7>WlSy(JxyE{ee>qct#<=V=6;d$|6&vnw-sSb-#Dc_@nlQSlcHP1sT@8< zd)@5h3m0;Y`Zd&-$i-ic^PBRh90%#YVX%!SgTQ9{(h!@2tdbz63F%{s?9Q4;vLAR> zkz(b7o6A-;i1x9;gJ=Aj`&E(>dtzt@@-n@CaoF*kb zT5KM;TqB*gyt00(!3VwEV1ubBFw8Ja5t#8cRY@BYAtt zgVLP-+_@VjrC=ywTV+=i$5p;P=)!A=9gZmJk)>=B!lk< zTvrbX`65@ARFAmPV9O<8TCkVdl<@&n#}e$H=dBLm1eZZO#ikhI-NgJA=nIPxdsDKk zcP>FC%eddBl)S3wP$6{2eA0QooU#w=U2a8ZUObEcD2(|vF=5z*Ri+A_4fTy%V>RPF!+Hj12pv&cx2NPJU!NRDjG6KB1r>iVMXiS;>`C=bIEsgzN=2> zoSE9T>C>lM9(1~>GWWm+J%OEx)*UfEfW$88aOPR#*g$$QVnST}xKR8AnL&%2g$~`V zeJ-=#k`DWxk6(YID)D*W6WZz?5OIA^ktdiX_zTiANK0Ww@uU3B7!IR1Ysg8B8yG*o zsWN_qMey21!Z|GY=NPYZ9#Uu#`{`l2d^oX38HCK<2sY+=ijm;MwP1oA|H63I?|^c{ z%QFt7QLj39|5&__R9M55;=#7_PVN~x$@i!E!U*cX>vv}yU7eg;! zA-1lAH>L0FsaeF}mAsXkZscuavZiQgWC)o`@bZh#YDvs(UftZ@N-Oinqju8zi_IeH zQm834Dmh<*8v{2tw>S?3B%CVw4j5 zH84fVX-S(2s;ZBp8f`~+Sf4>KmcGHFA(ub3vl%ZkRU zZLG>K#v$eioMi1wa%j!!dxONoB&F%Dpy2Uvw2RV`h;^u=y?1Uv+9jIb=0;J5k2zA7 zL$*fzxNeNKC!QLlCr0Al*AmLXsOpnf<12>indmrI2XUgx8vT9lx&+a=Yc`bpwd7XDGDH4op-W(R zTq7SWBAAN&c*n-_AFxyG1|N8TdhW4-6r~B!n*1KF@!K$XJG13yk6dy=x9Qtm?U)IA zf!KLOZ7p8Bb;Rk7O*_k=T)G|)+^;!zWXQQAEfJZxLgEolb@zet@~@(gXZhbdH{%us z%0fg{iKEG3imy$;j6=>=gKb~pGt@+VikRYJ0sWQcfXZoR?#B)v9-{Z1BXou+Wy;{j z!e;9&U>%VmvxU^xUFBgA@kM~g3jB%yK_wv}{D3O|ZWGp9y8O8i-`?EYXH7BZNdjMO-W6aVA;a-6{qtQSaww=dj%_7T^KU zmvSs;4D&)XE``k^@Y2k8s;)76Mo>!|ky5{YAr^jFsIj7{dxTJVFsf68^w#(XLDhPa za`ozozrQ$DMSn;C@eEXb8@QtLqrVnu4wY`sOMm-`@^K%%HH@5YjSrCu?fG#URMXii zIooJmc!!9Bllm7F!`6uz=A>;85uunHp&#&osKb`WGZ&flUg*$^jF7k_M8x_e52NlL zgIM<0DnP1$(xm;Q5iMJ{4&LO@LKN5Ou1)!V2p6!{M~c?2Qk*$DH*5t)s1{zQj5&27t; z(grm|2G;Rx)(8tA*foA9PAolJe3}$Vo1yh-nVFd+C3k;Q)OczbzqDxlx~Ey!nEwHX zzcO&;+tP#!yk$^2Vl`7T&oi=bwa5;Egd82N*BRFox@49V;p7&&vIu5rf_`jdbamIPHf=Sw_Lbb07P)IahaB zi;eq#!dP`j0w{pl*cK%xWE>tDRQj0@5}d%ZB==DU5&GMUJt`v42u!>@?&^nZdMo0d zX-}DA4kOf%t&B}8T{y_ZSIpSq(J;tCpF}yfXE8 zadNTxHB@NfBuA4&Mj<#7miZ)3xOe{^*wtK-R-wo3y>sW@U!#(``{5NtFSgk2L*^eh zEqr}thKaqMCQg8AivbcsuY;7(1_L~QqT$uQ0-CSs z#F2(7Uy-^<-ig$QJ($Wl+3i#4Qm%=1jwPVOCIm&bqB8*+NUFk$?f<;*-+K|0s<4Uc zoOZo7j`&Q3=vBJtKeN|Vf8|Bu^A-Px*=zr&{<+w^u=(Ej`t?5Ss3?FK3dK&9S1@}| zs5=%;9^#<*?=^ZEAlN&f_J+rRqjHLh9}cUEq7h-()zu>kkN?+*tO)$=%I>{-l^;G2 zB77R)?(%-`pl)i-Dd7KKxB9<*b#J69hMoY%wuGVQlg1chi3Dbx9gFnCcA%FzHRF7VCB9HE(O)=XLi#bKczk z!KeOD{_}pvM{U4bDQMFoav2cI#6tDoy`n#C%ZX?7u4EhOl|g+Ia4?7~saR^){#>K@ z75Y{>rLrILIYY-I6!>Xx2CQ8rieduw=gj96|R{i zRD=dx)7Z4K!{!c)*wAuax`Yu2C@U%5DJUg{5#zyx{4N>#uFp_!&w?2n)3uPd4H<~! zt@Rh6CydAVGh6X_RjCsxoIuGb9~y$)sKKu`iZqDOS&4AsFwie?E+{T<`0{=U(M~kh zoYQmzm6#|@ZnEY^ae4L4yD+pRuJnQkV6du7qCNO!6gI8Zvvm_h*9>GSI3xSTghN7# zvh#Np7c*83V|tdVipWk^ciqfxaaYVQ0#a%ozRWfIo}&6MDjAyrhK$ayNVWSwO#$K1 zI*?OklrVh_BlP7&p9I_~F#%fSv5ffuI@bfrcWzJ@*LP(?^JKrlx_qPH;!}k;^)&>P z(ERzWm;~QH{$qfTE1y8q#U~Ivc5|~{1nIf@h26W%D#SU=MnAA^Hp^;dzNr<() zYD`^7jP&Ch_Ii$;v3`)K`$%rG!UcGBy}9OMIZ)EYu30n(X7N}{K7Ra&+(+gDiZ0oL z!Hu-_LvRORh`-hDj^^qJWw3Lrfqve}i$c8fwE z&;7#@CNKfbPPt_1@#GGL%Z%Prl$3M@|I>9T#i&s$-hOcTXD!>8h*_u(>nT5q@lJJ< z`u7jjY1rcKZbPO)Eywrj^x?i^S7(&rJ@dpMb1EQF1n|t{yBhO7FG-#LoyE^{qc77J zVeIkk^{&b2)e4mHkf{|q6IhjBC{xy_8xAP$kp3wGJn{6?j=t$svU4vJ=cG^Ln|JxdEBXN{>IojyG6LzN zgw$NE`AA_?;Q)em?_ImDj=R5Ci&4iFd7DyLLDlHlR9$lt=E%Q}G+Z zT|zyww12PNP+wopXugZ5h7Ye5?w`yIE31dJ zn2ObnYf*-gCm~CLtC`J67P~ADB5VR>GtwY2MUqZMf>L8Kcn-jEQqdtuztY4Z|2z9EW+;zekfe-^IlKI=SGy*`D1NCWfWV>^D^7|W0K8Gp z#5PzPJ0XtM_Jg=uugZNQX4PqjT2^QKr{@Obw>)WSVR0O*e+k%7&%`$y8H|OXqIeJQ zfvM+UmtIE~5TNdfn3;)f9|q}#ye^O_L}Dg0kdHv-*$A5{(>W_dvoQrP5WjagK4|bx zZ|`z?zxz}?r7D6EIVX@p@o|Z2Pmc5Z>RIlx&;dbu6()Ey#muf3jc9T0L z4?YYSRR)|gDt;>RGt`m&)>dx??Gf-SjsK%~E|*zg73?p}vB6@fnGqa7A|G|SU~QFwa1DvSDCMFjjoFY2PiaZ;tR2HA!FDSOW!G<*5_Y|o|ITcJ3$R%P@9YCH%&1BIT z4pr-tgVqw3U2q0Kw=R80oS+OuB#(qZRF*Z7q1jv>ain{fE=~_Wv&aiIn!{FqNjQr^ zJ{l*nFFmbOP~H?B)`Y})lpf-Fch8D%V(#M5kgX><34(iK+^emgpmd-laQ5P}i6Hx_c8c1}mV>^2?$q$;lkxL!iheF!`G$hb8$KH%cHGO| z^AfNmX~g!|4%jx=IU_`7T~`C(DHxoUpIMn)AV-Zkh8h4(1yD|elmd%g383O+cK!t= z;RNf)(|l_)hR0jO)XhsIE~BtwgM@Jf8TsFys7w*dEhKWO;~AUoc?%3Ex(6}t1}CDc z$q>uV$xeYZ2_P7?c+WBT1fozbY`+lHgJq_N49;UP1F_RfCTnqS_x|2Iz;xIEu>@56 zdq!QGiK7(B5!{F5g?H%)Pexmw3Fs-D+31;zn*wFg5xXh>A{ppXp2- zyYpQgTTHjawRhq4g>%AS(Yruu8w z3)rN=UzmJ;^S~})TW@y!KqbAIi!x}Cz}dHc?1>AGX@Nz2M=* zIMNxx1;bM(6bRuP0V50RbZU5n3i5}eAar!>JLyyM9l7q^-{!ot^-hdVq^@WS5>Y?- zVB@>J_>g2k#11`_xcj&c|9CxWjg{=h#aMFe*;Cn%+Qh~%z-F@?7|Go zrlekUFU!w4?m?o9QR`DHQuoeB?7&8G4`_^(`sXiC<;18glAoToFX2q>x|?sOwocCl zRyTF@8HCn0PP+_3O$iInfR(6=u?q94k$xk{Nv+SCR`_Q zn@RaPR!Km%>&@5Y`@)VB0eu2>fV{|9z@GUNRrfPxnd*Lx$2E{F$UO7lD=51mz$<;4 z^y{SF`=eC{F+fB1<5vLRCE72Y?~jC86g=f`l`a+m{nIltoZZ$f$TK1Ixsz&;ayn(bs4vBw*L`^R8oUaTI4pLFcC^?{Z{o1wh@X*eD`NLng{;l7FS|V$h z+xGP@2xg`H-+$fd+g}#i7ju8&qX&%MSfVm>AF`vVM<(tbUPTh~AqN6~>8GblTfYIG z9{JaKF|QQqKWE!br7C4C!jMd(0-oV_1d|jBL=uBF9+B@M8ev4{n+PMNf>V&czZ5Jj z8&^mAI0MdAePAOFa(!cS%06s4#F}_9-&#O>9Ooer-t1mN{H=7{))17Ckx1;DEDWy6 z*@^zB)DSkl45JTebzUZ6=67`LMTkbz+>v{x7^2sgG8jG(2u5!7SoV<^QM0);cvDQD zHSac1pP6?;V+rkL#^y=RCBhgnA#6mxs#r2eHljpoJ4atXTX<&pK}m%}F=iniUye~2 z^B{&5SJY=>A*kP#sqV-6-j`pKc_LN}lO=jc?IkvnvnfnZQS#6eJVE@mD2)pNXy5Mg z@@L6AMEWk`RA6re`bGS@D_emI>=TSZa7F7m29OngGMQ$MuSie2?gxg=^CZ{0Y}-Jf zWq`PpztWkYA)B{sG2fF%U?j-z#@buzTbbOY8~g}}%Czaz>t65bXIAwk_gb1f2l?6Z zey~B{C7R}o={=+3OIT&W(0{I*j6v=oS4;rcY$?KG2`Pl-COkex=pqnnESRWvi*Sy(+|+~eQ(Vf=y7>B0$}9H6 zAlx?|U1t>D^pc$|Ve^TI1|?5jQEaEoBj9JI+0*J-jL_DfQ#4Kh8Q&`Lw5gFWH0+QkR9zoCraW)HqBhu^S)8wI1 zrIW#9FGaivD}l{3IOrjp?leIxc5E2cgo4ZyI$vsPL^sNP53;aeW4pC$)|lN*E;(%e zw#uXA_gZqgRTrOO;a(eVd3wVKzfA5lh&mTY!+rTdEBEvCEER{JVHT6n`p)9Ci7I9V zk(HRGjoRZ`gcvj#wF^SLO|hx>hvG<6hpSZ+=;*N}{1i2D+-qU)uQ=>T42XzA#f0MT z)r?3I?rbsbn(dFvf2pp{X6SSdE&if4gZ0iMxJzKbh=_eFN}2z~L>O1XLJ;W<^{MSy zPKUlRzs$CXy~Nxn3T46ilV+P3_n=G2fb$_zMhZSOA`1KnoMR#BRchzD1EVq9T;3DW z9m!z^pg$aFDj+26;XE<5J%=94X+FiagKI@V3zbbjxw>Qq*cn}zVq$iCITcaho3Xdi zmeWgzRW>2$>Wk01qx&q9bV|Chf*j`6a8BTup*!ddn|s1_dsWFK>WbSG!d2?wj?k2H zcx~((U-|poteChE$!7{CW}lPy(P2Bnj`aWN+{gQMW#yK?e@x-?Lf8L7kq6&Fe4tj$ zYa*JURW1)j$>aKq9t2HX;Jbl-+$$F9zq3RZj?LJ#SJyY>1NnsRgY^j6joEwfxt7`U z%u>YE>Q}E`4GehA>!rf{WYG+{CU~8}XDEJRd=tU?SLHJ>V1rY*xJ8R$imy z@Xy8Q07ke@*SSixH#^xk^e5?WI7vt(#d-I<&ca^DP0Vl-A3OLC#Sb-Fa`X&17MR45 zq5(*7q(u`PzboRmHAVwuwfESM($ZA+${{6~Cbw)FcC_pEwzznZUZCpAnPEO$q*qY0 zyJK<7o}LfiuPOo_F##HPsP8Md==>oHIkXvCX%IdH?kILOi9Va48gX{&$*?FfU$DZB z-WGFpuii~X#2_qjgURlVw)*ey{3GfIf#QLwKY9 z#Aa8h(+=KgEJ1BFWgdrqQ{vxV6pxW~uE=v8FY}*Yc(gPB7r$Qpzpr5w2i0unW0lgM zKQ|Xx#$m7KPvk#?^EsJ=OYTi9EwtY6^iBKUe{bdFdg7RK-+%w=(~L1S|Lv1}^OZOM z`TyeX&7-mG_qX9wBaKLfq9{TrQ&B{cLS-%)%TOURnTJYcE;5VA6f%ZFhB6g0Whjy& zWk@1Yg!lN?eeb<~Ydz~->wW)vp2u2y-+SMgx|U)sB$- zEN)|Hb7RroeyK!z4m$VS;a|JqIYbO|asl^X z5M@^TEsjHe5-yaCD(Y7$lmlcy0g{LR1ilosb0+7`{ShQDTh)AMt(3i<;SA)3V&@w- zW5py71~(uiC+Z#?QjPSRtEr_#aearsFP{qXWjIU@ma3 zGgB`x`9XRGv~5>15?5OqWmbK))*#Fa^Ku0#0EXp!?LY;G*31d;3RBYYxT zbRwpZ?ePj~fDYI_ex4<<3~5W#*!ZJ^e;SB#B4i^PcuY1ZhSZlg6zSK(S9;z-Vh_dS zEhPWwt*t*9CP6|YmKq@%56>N6&AeL%Q0khwT?SARi1OcuL&)=lD~_cb*^!B|s$iE@7B5>xka-w_3;0e4w0dv1 z=ly{iWCUrv1IRxBqI6-GQ_P@4)%2D@3qO!&W`h(hb|Fhtrb^-0UReW&nw#G?g-l= zupvY_YPk>)-Rm6sYlBxR*`Y;H%A}nJuQ5RPsrdJSHBI0|!+<#_qo^0q%8@}NuFCc;6*^!i_;F*vgdAN<_ov@g1sBF5e>|cXQ8}#<{+UDJT^=Q<0-|0wM$fr ze?wLil7u`>cps;$EeqAJ@Y06+_l!AHii>9NIG{D&iAvRA7Hi3lPUtf3EC0^(U4U|g z)7=wNZXaQHt}=sLl0c2m`x8&jhn@)S=P@CTB+*-jBq5$};Gq#vKvDh31M0!RAzZ0; z6#QhE0B>xAqd27)iT*}wOS6^tTH_fE!yAw=|y>5{oga9_s)G=AL&VQWMti^^(v8#dQQxKN6Wvv=C>t8N2Nvfl#ySX(Hs zdK;XkZv42x?s9ak)TBz;fg9ay_zWNu0K_(&I@We|nY9$wJ`R(K6(Ax;TswNgyx##Q z1N)Fd_#`YDA$>I|-3I917ThiQ)OSZB5J(e|fP%WnEzH&}Qp1cw8;V0FcJ?f2z4p&> z0d)@`sR23~9u(csNP3n@yLPYVYP!`uMvuo#-)*&?BGP;HjKuk~^Utth?JkmJ=*W-% z*NGq8UfzgVFfRmnM$>i*`Isa%JQ@3@Hc7eU6HO6(;3b=+Qx%kyUSY&76KUQQJv>2N z2~J7gI+`&eHX+ld=v;2hq2mw(o=+MD62#UiOD4E}Ejm)eOx(f{YRHx%a)wZo50k{A zt}JCTT25HfHJR_;y(4@LL=~4Z>$L}+NH8zO+GiI4fK!kc90v)5&c78t9%coeVBw0` zJHd_uSvW~>Uowq=T`Gh$!0B7>|Te zgDr;;--vs|6!3>nBgN!QbdJ%Z3OFfs;hl&g$8EB-)^d9k3TW2Q&2V@ks zPq)=VB6aS|>shBw8${FF0_c|2}JxCgpLt=PsZs+2PLX*jicSROU+^|@f=`1Irx?p2yK98R}ms0ry!ejEF z#$a*~^GcAz3WqchbtX!EOYk2TrsgmL@L(i5Jp3H2CoZC_G{yll2@oPCsPZAE=v(4C zvv|foiDGtS+Y9WJ;&1AIgP;(DG>?HK-yLzgk%?_zAHa9Ez1S?}B5Kf2!}I3A{7)lu zkJ!Cd`oT4}wNK97HR2-ZKbElf=eWbRK|X{~i1l;+!nH*HBp(8j*v6%4k=LfvCJp8;dQGBkk!rqH&%DqH$%vt@t~p-zL13B+>#*ss_lQC2jMDd5QC}&>!`*ek+w3F0UQS#V_4xO`c=J}(1%uMrLP){IQn3cX4?xmY)9tj=S%ii z1X@9(Bp>2?^XA?fW|I4`AUS#}As>wZ`~F5Wn*rNjfpF1Xw*te_@*%%)JTD;S9KE7-XB(&%u0DO#ET zPLBD8pwSnfWJ4SYHYQTs*dut9GDm;BAahQF**Et0y(H!b@*ynyFI|kA3lPFS_3F1a zwQTq|5O`K@3g|&J!W18o8>4!1!@yYXyImj$S4ahwd@4XW-W=@-5BkCUIi%8u?;NvE zwn>Ng76>xM@Tbs#<{04O8&g*l>6*Z56&m;U5KIf-doYeWU_NKsFVZAQ(3l(Z z#HoiU*if&w@4Jcna=IHl8IJh`W&l8D2RcTLk8^J&HhvC!#k~8BSDaHHUFtZ*1_-@^3dQj0 zVXG_ZLidTCfF1ihEd3@}o@^~Y^es+RaiBrvA!D&ID}HM#vlEh{o&z3u%;?sv*%Q)W zFh3C?NifgWqr#7RF2%GLVqo`1m^GTtz33D0=wQS|FGvLD9Y7hxQhz|iw2qb4YQ zm6m}rCUYMUQk1gB+iIgM|yPD32t z_FBE~(Q{3UxT4Tq6o0w>9XKPBUcp9(p#5lk#J=JQ{%+GjskqrT_QDd$=FMk_&c!fk zMxMX~sqCf?=;vZziKAD06f^}@`$yJ)s@ua;3$L*+{eISO&-Y^qSFbP~{IZ(Y&+wG! z6|Ntz6KmoOcXLPgh=m&m`o;vBE3{<`AmQn)6AEWm`m)q#wd>QZepl?1cpb#crGLKE zyB>8p>g=**?&Wc{M?Sr?Ym{>Q)cE<`uZAeQ{{H^MNDoz@?Nf3C47*gQKD=k`N14x) zp##w&dol+it_;in8dh(*m8+$Gq7s{e`$c0C5)#y00ehX)`GlWWiwaQ>;0gWAs@vH_ zADwb8IX(H~(ZN=$?+Ti-wz3K$gGa-lt@C}cU0Gz(&GLHU(k z)*!0}kYTu_-eGkutaaaGH5kCQYZDvoWO~ePGr!dy=%!`M8yNQx2w=dzX!}n)&v6xY=8IUx{tn;AM>IM(s@`Ezygs>72?RQK-=9K zy6Tg)FJB%XIMK5LnI~)>{(Qxgj?PAJ@ryJRyk>rL(K19yTJ$+$pAS;Fu$UY2l3iN!vV8VpxL2ry0`hcQ+bv1wCXQtRwXU zwnIB!k+F6yOO&FN7fR=6U0vEHrSywHl4{{Ws@qpRxe>K!aLPQ70>fiiF|UGKZQvYL zH~qZ=z35^J$USzpPl|Qjb_Ts39SR&Qg$aMN zhM{zy-wvUC-)$6W?Z|>Xprv8qEPPq(ZPE6E@R~KT}qiBs4 zC#Slqq{s3lOYRFm!IicrShW>-fQBM_S6tb$e0sZ1RI8rcZ1RNb-UL~&VQsw|5U=j= z`2mc&7y)soLYEJPnV@DRE7R@WcY0TVJhhpdae$HWQgnt_GBPrr4)ZeeZCKnjAe#Sz z3(Xrd)eNyoph=zG>a6LvMF}| z^7t=Rcv6J1&o@)^4S}O<;2cd=VBu{>VF1T$2JAm!97>}i`uWnhbE?na6fp#~E!vDf zRtaK?uC;#DWt?!L$`821Vq%Nt$7uvcmQ-5PCZ0|m%reHh`}-o}-6 zzQ5i$pweEQooGN)eMFAu%9SikZ~EgwO#}d{SbtZ_m3uDpXNuBoJm`yfX1Fh+Ff*&y zeQZqwrwAB0vR+ljEk=>7>G6zl)25wsnO~Qkq;7?VZl7~2ahe^DivTZvkT#)HS&At! zz87kbp-;ZBp~VK{-HM)=7EIzjp%~-^mNvw4D@EZ&oqkPlQABZ&I>&kM)GJ63lUCYV z>>4I^TTtQZDvwow`n2xz0o$sos=>D#7=m-x z-#ne{ZVfuG)2ZM38sG{z{ZeoRH#?#M|McY`%oMx%`1v(_Z+6wF*648Z-2`I9DJ8{* zl7R2wgT5MXnHKxa#VZiBK*2I#4+xZCywWOeD^?$B7t!EfHXrc{E67_SUp=-RSfT&K ziAKim-af1o#OJWKvpa-}g@%c%64eb~7;lr< z>t3^BV&sZ`{yNc1nCOvXeOF=Q78^Y^Gc!JT=VJm1C(8%*PP)KG1_qk=3ubF!vKBUl z8~YkQi)7|L%%R$|BjM=p@TAwCFeu}ty>ac@)5^+2rg3KPJ-3PEM-?VM#)9La&mx%n zipvM%EPaYq=)&{)4O?jXKYdDRt3i?U4HSry%SHGezWkukrR;^%oq*E~lm%l0%ZOeF z4UC+&<)HW~nY6h+;1kOzlq?kBj$Hz~a0?_iB1E?`5gQFzG&J^>{yxuNPewHvtgF7^ zO03B{KU9AnKJ{C}1rywSbc-$edHMwyhIS$Y0a2+0WM44zKwpwhujA+-ZY?n>#J;gA z+!>7y7(j0UDML{j=-%H>DGF=w3+i`30y&z`Ua|1$lNFj7MZtq-0lZRPkj9WjqB1R8 zE71@tk?>&|&_u5xCPX*4;=W$V9*l5O6sWt-p_qI4hY?V(*M%0Fh}1O?%{)7@_#&0b zR~L?BH6e)~qA=$ns%X)0^<#$Fyb7g2a2LGeyDba-H7_)@Fyjh zbH1)ke*_vLwdrIWTMz*|r^L41OIMEhCAZ$29xV|oV0*HAGuh#KfVtmF6^HK!4Y2=~ ztn-0q7ZHHxl}QQ-Zr{E{rbTqCpF3(o!SBu-oaw8ItH$zpya!CH8TCj41j*R1`1Mn0_xu3oQ#>o0VNj-M zr^$k}SyZ-ufOz3b!IPe(IU~TDp>{ z36Zksf_}RVt$Ye?Pnv)oe04$pKEoQX=(s6laPdoDn1T8wYMksfw?5=?J)Z9b(?i@k z8X4>^JF3#YZvA>LJQsqL4}RYlg;IuqBdpkjuwxQim;{l5bk3E)@9-Yp9EKld??61H z9TyR?14DR-u>5jJjfXWfG@>jZZuZCRq>DPnjYOW@HjV$|3xFqFdjZlTJg~ZxaNPmR zP0Qi!G~RbD5)#J?P#gV(V4s)r0Z~K8;f;v*2my8-VZTSeV8Df<@95~jlAMwTkBp32 z;0^fg$Btuh_km)IV;Z7mWVu6uW!@x zf1@5YL@2t4kNOO;MbddfE_H4P=c`j%T0EHItU^wzBEn~imNJBdmJ>8B<`4bR1&x1s zBVsfCRnG=U+c+^9dHneCQ4{F%G7BvZRTqHfI%{RM3-IRlkG4hhvOZ|HS{_*$z|vQc zFe_@)s=dsQz_iiG{2sOPL_*JnXs!j3C2cJZiGuoX>{2Phvi6NAKNyg{Tnb`CLO6lG zYy%+44Ifp0fnMOJ7?%!r0^PBc8YdJZEe9zGypabL-P-m93BO|uE(Fkd7|?ShV0fnK z1CUKKY}sO9R*K6%j*sRGP?#3igChS$S^xkSN#^Z+An^|4d8-DVOjeP#7Nh$Rg6Ib; z5+OntKYPoCi-m8)&Iv{!0L_u>XzAz}C>WL(9fi%64mXULP*;C|R%BEO5xN>XvD(5r zr&-^s+~v4$76pul+KPhg8olgpx;a0vmh^xX=nD%ApVieVPC{dcP3h`x5FK)2do?Mt zu_ezJ&9wO{+2N$rRDa^oi}KbF<%>V$W4^$z$XbfMILggPr3!q8j zN!S9(`Z#R8E@&MS3?%<;CTry5ck zjV>+BPEcC`XJ$K;uZP&A8cd)Zl*(1$i{+5PtB}tFx_TkTX@SeopI;B!%nOcjD>9E9 zDBrZX-{`-uWoBj)4FS0qg8-0o9zEKK=DZJu+rF{=44SKfS>G0{n)-F#!a@MEFp0VD zhpNOlyvaF0ief}3VF6SzBUXA!hJUQh6 zX;$NzCC?FdLzP(D+cV)ITYinzIxLuQrOD-z@j4xoi zaBU*NCZPhU{5Uqm)4{v+!|*WQW;V8K&^EyUKC#OM&@subj7a_)h-FwLM($zS6-!=| z%$$qIEBE5aacMZr84UQTeCUJjfv z?9=NOD0?=bKL`XIOzNTjq=_ycKR;Pxch6m{B z;XqD*EL*6=)wqRabHHjr&Thc9Xtvzp>m#~bPIs~2*B`JR(L2Nl|cj)(gk z(?p}qe!$HKmCNJe;%AUbMBBb%p7Psi#ot(8e+ZN!<}L1z%ln`Q0tDv;|4!USrBP)K z4JLL;CohbgB!VF@`C&k^i~IHX^s|^i&?COG`&v9}lo@I>w_ho#106y-@_-Tw0x%pgQ}{J$?LU%6fM-)}K6u&ln_ z{NL{xiyo6*_n&VDx8Rcg_nZIgU(^`F=7yamEC2kQ@%9a2Jv4Yv_d2m~j1Q^D8u6BR z52N9ir3J;p1|ZJs=FPQO`|?3mTLKv}c^EeDkKL zVMdmgXl5|&LqVQPLuRa=VY~!GlQ$tmU5%{u7zi0+H$<5GnBBJgInDR`{@yU-?f8>F zQN1v%U%!+>nJswW_qRj9OrJixHz>@HGk_MQ60MO9Tx-di79iW#k8vLY{#H|%p_y@= zZXNelTtcJ^8PF9i)8~N(s4dbaw$128Hv!btI5!6{WDP*Q2=<$s_O5~Y^GZ|{OBuXq zOpE9jK?U*@Z!T>3?taHL$nvzSR(T)e-r3vu=X9xF`}=%9r~POv0S6%-u6UGD{@}1k zF`0G;J7zKZ zby6+16L(#@{pW5RTlM#^$Xma!pcxuNT{Ud~y$nhvWPhEJiS=T9P7r61+NU)SSM&`0 zUXwfESW^i>M`(E16VpEY_2fcqklGE`+~he}r<|5>Iq~Ot3G@B?pt5M8{J^LfpA;(b zj=R-s-T)&j`_p0w+=v_-iIuVZO{j?c@+daseSVygUi@HjmF|uRFp5CR~V-%Eg-2F zffWAs?VAeU-H?zgw{Fo>GSIy#x}vGP78@Jeng?xPMNQ2zl<5`V+Zi@*4!}po`gVes z3`r8_%vYg1ig|c=D!`~&W5J}P(>D??fZpPuFTcm215B`FGn3sb!dV}r_a}%jZ}fRXgEt)g z@vxAU%dphG|91DE=VjH^zZK!)hYuef0x_Aq{h|$~lwOxO?t&m_C50mC28{6~rKCQL zj;>Z#Rt}4q2s?uJL;^8{4Cr3l(lu8C3w>T+znId}(t3~GbyXnqDZJVQ{WC=ATXVKpw= zyX4%L&_15QXGVt_5#A1(5UE8Laq{cCQh(4B6i>82@$Q7YIH=|a31mkd|H*n!0WyDs z=tq%G{y2PA0ZQ!RYDdG}0%vI0Ni7GoJTQF7$w?d`ofc0`!(HhunC{6bER@G)DZ*Xg zxkvz9N*)iyc(PTgus!j}T70g4Jo(x^{4H3Hg`~QS!b+Mx%mXNnU?;sMVOERWAB@QG?KNxTDe!esQmPl8P0oO-?+g?YcxobrO;!NyUTbG(mnBax4Lhlnb30taLvBV5x>MMF@!0Pl&AZP0h{y z@f4GE2!Q(t1RN)4JAqJ;H;4oPu7u~I8)}4RTm;||*rlXvBE5~1GY8;MzFGSU40j-$ ztj&P~5_LGSU?b3K3?8J-J?qCvq!w(_L-WI57?W;+=to7h7jRpDLUKh>z~`S492H`Q z`^RU-x*x4YjTUYh^aeiC$KXZ_J*y0&KQi78Om6fclEZFp{xY8I4EH$FvK^KF=f)jG z|1Sh(6QQ?LT~KxqBptdapIv1?7eQ&!Y(9%|@@IGjp)(Z}aRO8HRd;oDUA1Y?Dxe9s z!=eAC#UYjl&fQ+P7oae8*mo3Ab8*8cfW@<*eJDTqx$tAMjw2nbiS$k9lYaPXqX@a z9(Yh_C{#=WWItpkC_9oSkP&YDEjCsVkz4xR+qbV0*fijJkdIFVZa|p4v~uje^$6Lg zhAP2YhqBk=Xu!W-(bt5cg|e5mU=&-`7BI%fm-s?YPY=$P2E=;nvBSs){W3RSsLIbk z)(f;~*LMj-KF)i2<2BwSG06+VQ{@bh#4|aRYY#IJG)PY&&+O3eTDa>_b^fE}`_oFJ-OMr)emtuvE{LBH9~5pQ=cA zf$%?c7i8w_bZpcz7uY zC{jUbcwkBpsVPCN6m0i435nCDr!jd{Gx}rb)sZ0^Tw2XCjW?L&Yt`TYc*fUcq<|Xo z8dA;VPh5RPUmNm)TDPS3llD(o7zzU1+l8lS(5*3qt6T2D2D+ou>CodobS=413?sN1 zq*@^NFk-h0YiCA`v;>AAQ!Snh``wHdKtc!nLmChCAIl)LBzlKB`_GqC^8ESH#?_=L zGp7DV10(N&a{a`f-a{k8ol&@_OtElmmW||L)|AmXt1@(Q_`@ z+fc5BeH_EY=J?NXBU61tltIXf2`D*G<+q9VTrH0IIV9Yh1LRaqnlwG@*IxrQI-I8@ zpZb+q3_A^3(jL%n+LR%@qU+hSdky`MfEMC?Thq-QM`!-1yz4~ z&S09q4W$MhcD?|k^+M5o^wmn>aeah9+All_W^x}_f*98!1y`_5krH;?WC%WxZU)oQ zfEhXX`BzELO|Fv<+UyG+D&wuV(7t`z$ytW)j4gpm%Z6?FTiY)S2iAvr{aI7fT4deU;yCQsCWt*i zQ$lnqjwJLlstV23IC42oyX?@it6Z3KG{mk8 z<~-uLKxO3TZiyo zoF)eu2#QGxMIhU0zY%qJ<%GgX1sAY~tGeh<8CAU$S; zib+%3^6c5mXu;w!CICs%Gh(gIe>!goDx;7(NzeJsNSUohpcTV@6tMLOdaLYT?^C>I zO7nNhjrA{Hy}~w#x_(2%1&26~6mjx+m_QcSULr+e3ZL{}Of+CS`W-autr?atetDp- zGxZTH4XNAjewCY)X~ZTYTIn?~Y`EE%ir(EFFX{3I@;XyCO78ICwAgcrNyuwq!B23? z)D&GJPML6-VjF53dpFm*g=7gVa)+9Dn-nBL*CEnTi^8$pzGH`Z&pAUwPR!!^fZfU* zAr~k3+&@J@3+5m2XeQg0qO4H##7Ad?Y2EcNC&LmfSwlj6bq+s!2g929?vo8~+!p5D zGeh!G;1RZO^6}3>2h`Nb68A3O+g`Azwg!n(GaMeY@;$%Pa(dk1Ku*(9k$*1OAWGxNy;kfn z;7D@!wZLO~S-RLubw0PCo>RH~=VuxOl zXzyC{pgV|&5^V&@i{_o8P^7DAcb2+3hJC&OCI#n^@1Zm*=}N%8;CTZuIAn3@GWiU3 z6@QDKM89~GlFppky)S=O3nX@)5rJf8FT%S zE$-e35WE7g!xF-neS5gFqT;hV5_SX@L%m8ycNIrWgjo)N#wSg&c6fBOAE_cSQf=1V zfJzQmDhIlK6c}f5FP?DlkTZArxm+#>8);fV<_1G`Dt4D4ZG#=>mK-$Ql2P2b5xjiU`ALu+< z1F$6|qzOxmi4YY09!Eqyx~iKPAsD*ujkk$#%uKdw3D=#D#k(R66|#GHIIN9mFpJ_< zS}Fz3``fn4Wel^DuT-d8GWIXHE!0$EAgIE3=YE>nC6h93Y$ppmLX^Ey&Su;Ji57y=L_A!bFa z9UQI=?_m8CAU7VKAdozH)Pd+U2Sf!e*^le8q6}#LtKW-uZxBp44n7X ztBcXPYNQzLUQ%t(3X6e*xUU3`L9tGfD4)6=k>8R&A4#tL1czE)Px;)a6*Dq1fXo1d zm2Id#Ot$)9n^#p zs_TpT`rBoc%9D3ECg`xQ1Maq8#jCGgZIe1;&yMH-q(Bz1E!h#<5Ag=D#)Qdir}7eq zk?UB}77qZ;q!43^){~>%8_Cvjf<8mKPsBO6=V5hGxFSeL=;jVVBiH6Jjujkoax?u$ z|L{$VT_FVo+|~^8JS=Uy*S+8zK#V^A{)f=&uUN4Hz5x`P5dxQVbQChW=!xNK_oFv; zn*S|vCpH$uqFKHLcGwVshDp{CP(vWOTX*bOj@t(H$kp4o)r$(lVq*iLVt@j(0yk&P znl;w8wrZ}w2e;wpP?M1bf%^LF=wmLUmgNcD8sz5}!r0)q{UJ!|$?}wznwn6wb^xij z0m33LY;W&PUU)`LMa2u1AzoNUFrxD5QySC~D6dG@eBVv{FY5Um@Tp?#T`eIt)b2-? z{upkLKRQ!fRK()01D`1~t4kNGtk$A?xoTODC=-;pBKL5X2 zWWo$ix-iJfaNlNm!jA}4VOKlio$b(0s8@(Z<&GSwY;0Tut;Yo-O)LU1M)D0b&)MGk zA-ub*ryWgx3<14)?EIY@XO@AUo?5xmBOvwE;_r(m_r1@ZmDZg3!WB_Z7z5z2n%dmA zU+g9b3gkk`9dkq+429b3uC8Yd4=%_!Avh6i0OU$Ms?M4(Gv-Ekx?UIA?LZK}6&dM+ z6S-vhawD(;1AIzyVM?@@@suLjt7c5#ctL3CjTN&%X_n*EZJWNOzY+LvEwIXDyCdk4 zJ${cLd4fsEupTkt-TFH;juT7==rIZXcycSMtCy0V7cmF@-3|Pj5fBQe{=mIS6IM^g zmjOq(3^Gl_b}a2)q#^&_ut+j)cM+5`a78(2CxDn*_f^qBQIRA&dr;FKSpoCv*MORq zb_021-XF6y4^AOlDFwNM8khJXhQFBOAtWu6y;xl`HOh}qitPseHh|U@kX)iZ;D8nv z`&^#FI>`@#Uu^_VP3(9`3EzX#;VN9swsLbXLp62*)X36N>_T5%kJXmjB33enH?-8Dwb?=s@%cL;Zw^wg= z*f#BuFjC|O@P()?2z5rXBY4gVB)c3;AK6Y_)p!5Pej>`#u3TA(axE9t6`2V@3_>{5 zn87-Gf9PKUvn?j98~F)9#bEX7;^z!Ky!4l+IGqq-xfcn}W(C!UPjoN+bh$?(P#_r@R4^0LB*8I-Ooj79I}Gp|N~b z_jOHJB`R{B(r}1cc?-T^aK#&Om)eTXJ7&f3*%O7{~$gtdI>H}a$d^BOt2}+f-w6pm_AKxm_T>Mh+VUb zyk6&-_^ZxT{iaxMr&Y<}Olks~Nui@S@qj^g7{*SgN2XX9n3)N9O;zAQD@cz(e-H4& z5avfUU*e3R@WKrA7Ccp_$mIbmLQ!Cn!X-8a37Vu055G!X3-ca={bIm2JFn!>K)h3v zwzf7WOo7|-P#@Hw^`R&LK(*Mm67a5kqE*at#P;Gy>z-bb$;`{eUXrG6?aS*GgAq-kULFwhXbTZUWB+SXsFZb;& zp(7D?zwkEE246+2CH5ku+$XmWrmV|}9!u@Cy7Sy~QFQ)_p5?e@CgIalQ`HzaUPj5s z#N7QwBh0Pt>xHK6JxP&w*ka zy4OC;T0oH{V!L)KRrzhlkJlXqcpMd{i(~gY9Uq^0(xLeRPAG^}tB_TIXs0!}<}*!W zwblm|Th!04lcMT=J5qkm?=ik>FA=dt+G8`7^sDvd%a`MDF3?H7!&~ZM9L-c~4H0Sb zL01#B_B3anY#05$*qA?sX`SvZ<~*AN+J_qFjIfZEMi%n z^?8XAG?z8NgJ_-{_#D(Z_49+nl_6|(*iFWOC};(;PS9PTE?^vZa<4(Rp9$Z16_b(k zc%*uZf;TQI4FyG)mYKw#+XpI<0_jZIyBfTC{Tf3m9`3dOJ>pSUPI1z?qcME_<_!wj zHAr+U;5lh1u;t4M*OdDXH9C>Fx|dwMh>5~w6p;zEwB*Vbnyz4)9sT&x8+;<`1AYe0 z1J#d8ynbwH#Es{ivl!lcVfVSW9(ql5HP-N6OJu5`X7VDh^|Lk`1t{@7G>BY@t9}1_Hbs_Z%Q2F0Gzc>^egOd*3X*Qh3*{tQVI8AVFhlX+ z0q|Gv-|{HSIyAn1mW`QN>r?)Y;!v)8b&lZFsl(q7f)|ZAeE<@3=m03f9iD*zQhwCH zndP4wrXm>ubVUAFjT@;-ov zg^Y^~5o3215RS+zMgTWpgRxe$V-rB$j&A3Pk94GE1EfnBCcqMWQ=fSOw6yMhjefXQ zIUNZQh&imf?CjJP-U2{6i}!H!`0;8;NEAn5yAO#o={z(#p!n_wr}7knQPPGSyCcr9 zVZ#QsQo>{cX}k_u6sG5{0gYq5*y}Iy(M`Gdz+U_j#O1d1yT$aff99O;m4sb;cJ@iU7rQo|&3oVQ4oqH($CW`5eIwh8dQPIXA=VEL&U%KvY1M^wZpuTO zlGtWeBfweK0<)ldE{<1B43%&KhAZWn!^)91Q)>X7S!otaHf&S_@Xd4^$MkQ6#B zp3)qY60MTCyhvqZMTZ`FwR(>On4?prnjM{I|FbEWq~2^UbERL z&;>tdX71rN`ML!`i-S(i!GsI`9W>*#NRHR(7a?oP2Bs4wAtiH;VhE70}b%ib`UQ^AckA@&cfNa8H#_@yt!?o;OJOLu}QxO#rk2w zM8XpuwfsGF$KuWt+X+#L@2K6iyN2=$9w|%N+1Vd^%#r;~c`LfF;nGLMH?yXNcB2|aua_=tISJ!9|TdH{n z6y#A)zWw0YTKS>N?p@4dauOa=;T9n3$HJeIHy8T8wGZE4Ct1@MSbYk zDYx8h-nYU9^q(-pLV;&`_@t&} zYS?GiTuC9--tVVRyYKlVky*E{`XbE8D0)&1YF5Mldn>we$nxV_Cs1tD$WZhs9W>Km zio%bWaoEMhC2cen;{br|Rsi$MMJVuULbkM;)$mdK%sczPS2C^TXjy56!?LFJ^d1kkJ*K z#?U%|( zO8Ed!Oz-Ig+DjTpr70zLSHNl;s4CQQtJOL~gHmBK`=PIo6Jd(Wo4yYy&LS_Ous_*- z8pFqAkOV}2jM^{Yg)}nIo22!m_Cd6L7@Ykzpr(kMH(HAUP3j||U6|5OS%5~upXt0Z zR@IulL66~wI+;vd2%!xVzS}=hwdGKiG)KOFcNb`du=c{T(eS+|SZ1Y|v2mO7!7`3< zw6n=me*HrLj1}wa_icEPnYolYYhjT&!8%+S*+i4g%l9?<3LmjD&&kgZ)&Gt(o@k|h zSIf{W_oc==b=eCWmFr4Kz--Qaf`?_s7jMBAb;q9}UwTlfhS-8;Ho|Kd1V9UY-{qfx zc@LjGn~;BC$BrZD8p&)D=pD`B5m&*DvOY*TE2%ytgPjF;va+|7>O&5(LoWlH2y{$i zbp{g-pD!4CDAZ)C`4TE9mmPom6uh;OZKiNU~0HyUs)vC0Br;M9iXwo}kaQ@mc z?sa2Nz6XKCQ;ouznGcNCMr0btkG$IuVy9~^hZ=-xryz{vGoI*p34}olw zWKz6&B~)3gkFr-9NiBbpa{smYigacSK!gS@?0x4{IoSPbZ+N8kMUk-SQ2MUw+Y?`{ zOsZ_J0hqv)7p#f-MiBe;hRb#uuA%;nc3H4M%z!~H0I7Tr`qnAU#KC!P za0;_a%tKmaj}2HEa-|8IM8v1sdtHBAESOsDERgUm{p0ew*YgI*SrBf6C<$@ns#z$Q$+QCo@6TG#|a28kk+A<~%Q2oC&G zip;qq2B8gX{r(Mlll%E(3nnqz6?LgrnI)z(fxem+_2Hk3exXmSdz-Zf<9l2bE#Pei z(CxWz0R``(AU%`k5io!+5w(>JreVXe#_%<7Z(8sls@O6RHZ{|Rl&H`Wfe4{!!nBbF z*g(R`ZWamKq`FH8Lny%1SR8s)_;~co2@l`f9Nq>=5{---*1@oLbC~$4?gMtiglu=} z*Ivkuhyo-RO>Dt7Z&6<5-$HVjhJW-}uOnAa?X<;6x21D`s8H`!vbKoY3cxUPB01+7 zsz;)r1j2hOe8`sn{wx*0)9Ao|js|gwv6sZi*()549e<8S@^t8)@uVlS8Rnj=8S4Gj z-6TpN=?Zf|o>&j$m=h+<7J7#p#((_SfY4%Ak71b=6emWlOpD4s+j4lLSbboB3{a0L zf9eR5)|SJ<%@FEhoc`d9BVY=Ag6j*l@7!yTXeKW#UN{z_aMjye!_SNUsFyeA%7mlT z!6l)5_v}y8I~<-aF{~}*Gdyk6S-1h@0L?)^(e<=aix#QMGMHYLVZAK+e0$W#Uvp2b zvP7an(xRfiIhX5~8GpS4)14jD2c{cD92>*P7$ya&*VUHd#?4BD8CJh&T%`@VHYtyE zi@iMEDZDC&NE~{_L-q90Eu) zCbJ|bM}FD~Ocb_ku_h8Q zMy+rTf&u1sF&NwqOd$y^^+JSELMiy>(y+LBFVaauC^4m~w$SDqIohjb*J)w_7G^sf z<*4UIH>J+yewmx@-9;s#0I17D;|v$9G>19Vjfwj=j_P&Z4%(roW8W+IX<&d?x~F|h zWK4IEcIKX#3UCnxRHx@BweM?e2>*pAWgRs!oRJ+Zkl4L|klt3TJo(vcu@{=mO4F%* zte%^t^c^nxb{Bwa?66QS4XWYoxuj>aCM7j>XL-kNhcy7r>VSV#W1QywA^*%;Su?Zr z>bb(}88co+e#LC+Dm^S>W}y>~r_0=mG)Kt2CmTF~U<82tQ6+my(finUC^ZegiZ1wHKfU{0Ki-aHgBDP9N`?<2GcrYjEF#%0FxByMLz9 zl3-~o8B+|4R@)Z-o*S%5l$II-C44O08D?}ZPpA$zj|YQ_vfn}(`1rWf`XLn@W&Zn)o!LVt^X)?jzlr+ankv+$AWhpXl+nxaH%ho{VKjrH8WO2(Oy<+ju<^Z}=T>x>i{ zSr)U+35TaNlX1S*8|4f0B`<^#?l!)kQNG^be*5@!`_7eA9$J{x_K}9yI}I1c&6M93 z{&-zjU}-+=AbKLI?}6PnR70n__5gi$h7a%njM8tz9_=WAyd-2!Vbtw!itJLKx-=fx z@a4mU1Meg6S=KUXI6|QtR*uB)nyHO~;sAy@IH5kRD=uEp;0psd^Bj{w0UZzbhPQqE z`ud!uj)VIN-`5+IpNNHzFc#0$YN74;2*1C=pQlwe+9Be;thobXf2Yk0UBe@_T|!r$ z-*vR=#qNg<^>6H?;y2xxcWV|KYwP@l%V>G&z+m(pq44{=e$gC}HrO{Z!_n8%DSF;u z%Y#7Qpf4FE*{1LApJVJ=tIrR)HrB&l#`)Iygyjmvw`3*t6U;U4=N9r%K86K52uHI^ zrPvThQSiD7gm~k&3DEI1)Zm+kJJ4Ue1SZzKhmt{m!~gYW8I+i;)NZ6S;?mYrDCRI+ zXf>)X5sUjdik+_&wY9bBd$iR%y2@ApAXK|ND8N(zC4;*Q->~x!2h!8izrnkG5k(B^ z`4^y+2r~)q*jpX!Fje&3DcuxW0m=*JjoQC;bnM;%bEBh(Hs_%UTuoO~k8#RcfF1aE z4`U$W#XN^m@qxt`{VfV?&V5<>MOuHUoJesp*nZp0xWQPg4dl)Z6yaS-qDrDF#jQgP?&%0c60YM)2XK~W4( zA?4W(yKt)WCNcPDX-jK*r=@ng#3-idN86M{a=C?hqq|k$0CMp&NDIO)`OxAa35Mkh z^rL4n@{W;EVQuAdtA^@3n8lZeD{BST$w)eKkj!zIh)m!l%iof0}G*+V0VXUu}uBuCKcCLvCZx6?Tjoerk4&} za4&xG0{&~;3BQWQ+yDE)&mTYX^jaQ^+4J&Z5e9i^&p5|zoO3osMcA#~&1dT#V&5b8 z33ZUDOL=jzcGS75wyN(l_UbpOjya}^sM)NTt7h1$QefHHl~*|V5(W+CJ(d=J{{CU} zWHi4z59dj}ZrG#aZ2e5o#coDNc4aZLOY2Q496z-R*z9@>NoaJ4ntc&5e^rlPYAaEA zEc4u(u7?Y>boZQ$w5x>3yw7XTCtd3vWlG^Lc1OFg1*)frk8EaQAp4G~PoL97symE6 zpd!gHwltT>SSy}#e^zK|do1h#cs={U55;sY0DH}NumBWPhu>V8KkY?N?n^b-;h8&m zVsTF9{GD&0`;^3jsa7bflbdM1uie~MeFw8s6(47@&!8^Uxqz*tBZxsx4vTvqM89f_ z8=>jZvahv9dTI#g6%^bvyVcyikyWYf#&0i?0|xv0|9JpCzdq!Gd31K1{qgVIVq@Mv zx^xuaYe6)f8D(dqAQPnG@872-22l9d{8^3wh6Z|3!tXxK4MGz_^`&Qi91wI86ZqA0@D!y$``&&z9# z?}f%a)Dv>-m&Q^vwGTytqBb4GR5UzFS637P5PrS#e|b;a%nBY~8fB%n9ysvMuzT{2 z3;*Dj8!16cUB$_5eVmn5!=&+Oa^W1h%RSlUSa3YhORVeO*#4*qiGIu0-B6I4)x=v- z=+yTzS9b>&*HYk)m4F5@Hj_3vjg}#>Zn$={o#P2864_z62X3wgGXJoFl*Yy8wX~Eq zI*5TW!=V!wT(g$j_cu#l7wB0JE0smkh7{JJ(hqsy5EQJ#TT$-q=8D zDEiaOCwu5h5f{OsCPIK#5RM9gAJTprYFF7$PT)1x@Z*gZ$o3p5wTxGJh0=$j0P5D! zp?$v&!Q8<6&VqFmmach$9KN~_-U>csYm2c9*b$HBBs*TVmio zwVpbIso}Z_=oEXGZCNteRD{3!#<-*d7Iq`mF{_3ao-{=%TsU8&KX<9i% z%(U*Ve;?gROuXaH&{TYEihXJGBlpbO^HVgnOr7oRkynO0;1izwyWuB_zI^97aMF|v z7)`i5HiZA1Jkz8coKFl{bw@k(S>)SqIBcnpZOM7zu_}(6GJTZ&BUJM!7*P38N zr|XfGmF*^?^|GG(yntw{(P0kVP+fg}m~CMIgO_w_ABa=2{g;-0DPeC8tbAkSa6W)* zSjnTJr75DZ*n@iKe%!A8b6J<+y=}9@ba$197iS-rg`Xu0bn1m>`q7ZynUocwJJy6-&sVE3=lBmtAm(M{)HPBSqw zUO}l&^VnlYsRYon!=|RZXmYPYmaKQ09U|`~MIj-K{@tx+1(C3j$Sd16GHmyGJZX#! z-9Zq5aaL<=n_v9cL((1X6EoZ`UyMf1ewxQqQTNCf(O5Iw6IBqjL!lGeJe&zZf~>_PLa-W_$2Aw7*IMhnm^@KAZFQZd$C*XQ2{FA)MGXs60{V|1r}M) zxjmGV4w9JF%DM6IKlQF(V4!AD3HC@aK?HQ8eIz9%g|%nsoUc@F61QXR9=p-B5xW&R zQgKe|{JoEDwt?>Og5TyAirVFwGan1+$_j%T2RJlb|%$r~`&tGdKtSsvrD1mF3yLOn1w3 zH(brfP(iG&GZSUKYD=l;>jy2&mM>H$-aKHIqkH7hu>~FbSd>;g(wknzO0otngWO|p z*CpHw;?Sri{n(+S46H9pD6fD30UqPsSmK*VH5@21)_6zMl#z;Ls{-fBipdB;JQc*F zFVRaB)hbni;`60tKZaQVeQqYZepacl>7A;Lt-XDqrx<15L5Q*_-EwMXcGd_J8{ChK zzNQux&x7d`gJHz7_cN7eG3_e^li{m*#U&?*b#cybV6n{mFFmlmW5>5 z9QG9v7x}TzmOR{l^Kv+OxM%LTuYupsCaU>wew$bzj$co#%PIZJhEOnKmlfpM#^OT7{3q&t>cpfA}muUKaND-1uC9h|4tF0jvZf5Mi!S zc09rNms6lHI42q4WH60SP6nXIa91s11N}X;58e{hEE2*A%*J5NkNy+Gp43*L6Kg~A*dGUYnAfyG`;BPpIsSqC+aB@`xQ?xQ=@sckf zz%7y>2EX+5#I6~g6lq-FM?_&CxSnDFK8iF&ZSr3F)Es7#Y@j9}HasIKNgUZs5RR0# zm~olLUAA8LZt(%TDy5#Dp8Ig&gXAR*+#Cb@DtzZ*IK$zeZReK2WdPv2E`oEOvbAw zqy!T%u%OyfMH0T3>IRWU&8#e3<8c>zYLcO2iv=n*CdGo?flM1V$e?WLL9MjjOGl|P&Y-b@~g zcG3A5YVZ6^fTWhAvre5!7sVb{o8GS`l`cvMk{9y>>BW|VQ0geO?1TlaCR%3$03Njf zJ9zbKW>)yX49v`2SXW-C8Nr3~Ey5r%f-clt%;W`FxnkwY`=}4K(L9CebVls*$$v0?V@W~?w1W4Ht=G40&tmo`J;I!@wp9#N;=09yyTQ1T>ClICLhuzX1}z3!eLs?64Dy zRW$k9FEwHj>@o5)8vR}d*q))67y#b&TgZQwx?yE3UYHZE{%a^FfMe_ew-krhf8$ob zt2k|$E*fb!`Oc^8EK5kTXl~MPuFo@zQ-3{v_(|7Yt7j;%bR7nElSM~sNQ1d7)MDSK z;tkF(c#gMvvcGXgS!IB>JBEDi0 zL=o8zTc%1G=&SCU&5T%-3GP0xCo&$R;WE)JlZ)x!(I}93!Q=>nB|ju^@*vzBfRjxQ znconZ#Y0J>Fy2@HE8^Ge3Bc?I=sA2q>k4H{jEcJYmVopgEkgR=Z66R6WMc7iN|!Fy zMGL?#g2oD{dm$JxDAh{oq#SleO8iZc!2YnR8v-Ya?*wCeuwh zxuo0bwN6gI)~=H~njks<>xM>-m`@$-#7NrIV&500)+%$RT~$@}Gm;Z%)+I1HgdUac z*gU`{Ul5IO{cU;o>xrV|6}UBl?+fGN^1<8hRMpkHkPjV)0RvJa{R7L^P(tC7Ro9_0||^)czOs-E5ofY(P3OG13~ZeJU|@op8P@!(39%X-hy%DL z3EwemBr!ntl3<+LoIs*(PneUj(n7nu^yjBt%S!&$Kz5@JVB^NOV@Xc zsVD2aL`LC8$Q8h#V`F0_z>B#D<|p_Soq^is7Is^vUNb0qn1x?aXxu^q-eL(fXVL$= ziH1(kk?VOTJaY8(!>##DxKa})gB|B*?a5;TSxvGDmNrf9!G-i19V61>^b?;vSf}PV z?;<&6X}yo8wrEyDKFj5K#-8v+lS9G1i9^>#d1~6`Z5ssMZxNm8k0Kls(s)PL5pp}- zC}htF9&F3Jv4}8YUJ97#3Z|4oJ(B}3}{!}`hLfRUHC(mj=qzu#{~SUb*IlB%^UX*415g61e&Ns{Oi@`3QN34br*@3 z8(U;K2YPRWs6q3hf8Yh-ol7ReZJju!^h`qhMnjQtnfbwz-2x&!;xDQmNM^z*sQ_p$ z2a>qRx{HsL<5ssd7rBa)K$ig~Ka5GQoL*J_ai4b9Y6M)uoRE8tAU~8EG4eTAm}Y)Jh769unDOjs$tR@BHU6S z&F%ol&b{&7LHO{v$MZvEwnbwfQf|%W`D*}V5mE_iLl1Ory3iRSo;XC@8+Cg*QH0Uw zDg#G^S;KY2_2np7U!kw~H>$tTDg)NE`@$28XPsoa4~sqYEs)aG+(fKoF10^rM&=Ce z97%M+#cgTjpOC<<_5(x6EfC%Se>^HE*a~7ADGOoDr!;i>-2&TN=ZNGp41EUPO^b+3 zz8zfIj!{E;`+&Sky0Ndj?~ULx_5DwGFw5Xz8DoXA1FL1cPb-yC;(&pxe;H*q#FM5*!m zV|c;(*a4*2WrWKOofd?#L;gZAroDlCP3Q?XIZ{0T_%Nwdy$cptg=XVb*)s?tf{0rq z{(0Cw{iUx3|I?<;;J--o5B|$eOXt6=*DN>vmfMj{2)C5~z27))WxM##L*x=qW+I3tq0>%@(|-cYcY5S94L6#l(fEI z5QM+@U$na7bHt$%cjqnGWs!+nwweFdS-2OdU}7O!C5Vtzt@Ke-{>|hz!KDr_TZ@y6 z|NWJ395Gyz1~}SHcj}LE&!0_w^q+L-Pt(rhLDW#>g z*=250y}P@cfVQmHkm(VVMdSA8qKH9g<0mIt;X)*X_y}tZ+zm68KEzr=nMXd*Qy7z0!I<*Li1Qao_c}o#0w-Y!lp#+B7vz`@-{HUJxkD`=gz|L{IvjLj30p& zAiQAmMF7adn#>(dGPQ80$PJus{be&phOU#qK5EWJ)F` z1tYG8Q>vK>I1hP>;VM)YC^*4#Mnf+^7(uxr4}>F4GTordCT9Ur%>;$FKP> zzandlDGEKVQhfb5N})*1SIJD?&#LjrCz8!)k)_KSm#>U5Anx+)S>|>%QMjp5vAHAFrzB(*nViv63|RYL8fTjlEVXy;_z^<;I8f4DNGN1DnCTZAg-_DJaaq6=GYHggW2G& z=Q3Rq^T%<_^or15WO?XLsi_%`<)O>~2A!H44iHi}K}I(rFK;u*lQ-OR^4S+2a>?)b zFLG2;@F}MQ+bxSQFGcO#;AN-~X23C0~yaBjbsqmMA>tJHq9=b_RkNdp%?-Zq* z2438MWomih4q3*FjBwR7r^* z9+r>|c!7;n7u?^LY088%gX50RA2VBoxCLzIC?@Y7nMOT8#NHG(;sup<^cL1`-FoTS zB@7({Hu@Stj|fN;HBuKrCn+_APha0{y$u^a-Q=xbo*H&9CNp88=%yT}X$&Z1Uo1)#5_*Z$3%>_&0HKy6xKk9AItSp94r(SdE=;zeA81l~ zfd1n51$>z*V@=enMCYDvHUr9uXW%3#uSjtpVjNb<+%!rDd2W`&S0YcaJ%T{3MN2?(RcmbEo5v9|3*@W7QRAjT$G^B-YWci6l z1RV<`-@+7yF60ZKW%TRTMfqb?S1FJn)2mwm`y9xf1sRyUWBKO|7{1~$qJTPp$mnoP zc0jX@yj)sNP7-kwN?cNlgV9`bmRY+`eL5#D9H>9-NxNV)VktB6USHZtla${dIXG*j<1;SG<@M z{@ax85uV5c7kjXddiJ&}&MWT)%k<+u4G#4#{@B0AnI;l?3@Gcvl^&};Q6_37JM=)u zz^R3e4-1*-GC$on4m%ejk}=FCVsU-ER$5zeaX@%*28vZ`#3=Sn1t2e#;)&L>u+VkP z@QkD4T|^Q4NAEQ!x7l^jP!eb^lZz~&zQ&lJxDZG#LBHcAT8Rc}p|Vei!5q2$!R*mi zJw#%CZeX9!Ym#l6Z^vOSMddRbtQ&>24mGq0e)34$JjsZFE8={%?j2u2LBM#>j*IoM zMdytYp)d>EDX;CEoU6#pkJW+l7*X2Yeq;}9et@W))>TLH2KRuyoSb}`^S~E~%yl(G zHJk_7)>Tt-+iv%Izek3jKEhuXXhR_Gp30FX7cPo|v8t?gnCO``O;+6S zIKS)DeM}R$-hc(yNH_E@YrLI1Kyow^ z51{O6>>a`IHCT|n8vYs6d#VyG6no(d{h;xEN;mR(nTGyKLT}<POSF!Lg?$l zjCBFo)hT*KZN|P{W zY)t$L?Q^{iA9=rT{<%~&;KIcNpqotWV7-PT)rDk=_%?fQ6TLXa-3@Yl2T^JOcDdA~ zOHvr~oh&oG#A|i)r2hLG@w;*J(8~0a2^M0`n}I(m1)@ZG+0Sh(E?%Vds~6nq6Uf+; z%smpYT#b7hU7+qFBA%f9JY>l54LkwAJY2*^zB%M3Vg3I3w;`!6`tpxZNI;l#V4LGG zj}r_iejvbG03{0ER|JhpNcPL3z#UvoCo)C>-M(0;?*yA*NRw@tGZ!!_xzC~13p+0oSpanf$a8+ZdYlRz{MQ2A z@;AW5cm)$}XB8!-zkyS+VnKHH3S(y-(uxsgOFS2U>AjG=Sroug)=nGS7l(|ishv`u z#0nFDodq<6$kb`^@?Rmu!b0ERlnZ1-WD9lVMrd?#FK)E=aeVXR5qjz7lE%>r5@fMJpWkRl&4 zRG8NJVntXFc_ZC_unqf%xOExvE3)hhVR|S(ZHtVr1}#IQgB7L^gKtF}@~S_02J7=! zzk_vb2M(-7=sl>RrlfSP&f*I4#V7-iwhw96*G?d@O=!x2UlVEkBCQaZejewN!qw;s z05~CQ&8*;wDQG~FWOXsd>Jc9It?4|k8#(RHe~QQhKrM^3v}~izQxkX$>*Yfpg*Sd;B~=3OvqrgeEmi~L(d~0)=MPO#jmH267ECM z>V^J7$UeZIju=|>`m82QaHqbsU$ziN0=h7Gic~chf?(_wb$bW8|LHcmdDbf+is#YN zZCGQo*@ z%-bQ((8nwfn>rOAD@;ax+1}oMwh@`PzpH7c76CLl0%U`ZeOiZ1uM=!Ie(A@Tkvm}V zj@#H|Dox+LyAq4p-eEofJpVIp!t9(da_Z4grL7>w%!mbL7p75OiOhZbNbJ4Mi{@Ym zRa1N+7uLd2`Cs14PZB*h+Q5&{Ix+@>_@yJd)B?d;F)_$=$c<;%_nwiS5bo6-p6JNcI+QKNk)juDKq)j1bt6qqxU?M1Aqf%!Bzkd9c%&!H zTqGrzpeqH<*9~9_v-3Z{(8|0apfaYp?46yJ6L%Ft8sYl0xt*dAT1gf7w&rl9*x2h7Q zlK0TV1$#}sQbHZLp{g_bFe)~5SVyiOUFS*F-8?DEan*2*C21j;$)&`5363@$_fBl% z(Y{@n*lJ5XM2o(r^%M35ku(w_#;yn?qW2Kt(A)q%umkBXY8It!(*0k)_@FPU4>gXI z?amNZ)*q<2Nj86d3d7)OiF2;CH9xBSD}c30+X@-}Mlq+uQ2fxK$DP$SF=1H%t9D@T zrvh!DnnYzMfm99-NEWb}CXNn};Sm4#-pVjj%O-lzxRC5f$!y2{x(yN$9=9P^MfxK! znu0Zy5Bgd}sbF7C%w-9Z4mtKYD2kW!Zv5K5lO^N{nRbhK4aUUWC|9u5(br4>FLoU4lUt#}~ zN_iOR*4aECoHNgDqMI?hHudmvDYVvK>a^vh;WjHpB{q($_B{G-xDqp!#1EnqiO*Dw z`%6xtYT?b;c-SsXL9lm2uD=7L){vnI1j{bUak0IG{207XC1vGBNFO2CBc_yMV%w0? zm>Xx>#v)+@>+?#_+YRVQZp7M;n%{>o>n*_mQIf)ncsoD;dNgv`d^R|m@NWj7vJ}PW zsf0RGi^DvxL^vWTEv0l3I)!4%G!5MVngyIojt|{`njrn}ZBmq0b=-_#3FbFhM8U^7Qk$T}|NPSio}(E|6ITFxaN8_njF*pL z>i|nZR>*zw_dj2TE4&bg;$B$T8hV=l_CLD$f8ENNZE2+}-vNd54G$mn&jZzP3?1I1 zNovAR#8~5t2f@Z8PoqNkcz--=gi;OgX-p)S5Qtn?H(a&n`n7As8vuP@eKZ-t z60xvYb;sRY&a+BV<-x-nQ|A$9ablG2OQ7pbfCt2~+KU2`O8_qs0ECK!D4+Brw34I@ z40e-PiW7%yPb$s|KM_?s4>RA_duVGxVDi_G|5-sfw-O{;O&|GQ~M9-)^SZ8vR_iqgb%!gAiWt&8o%1S>;kszZEKnJ*7szI>502s64P*!$jrgu@kh28AV}InDNz zf(cpX%<6+})^fbMbxAYT?biC|oUTU9tleJ_S5z36)Mkz*!6FGTAOLe(9d%4aI>d_T z0(78+xDP8tjbpObVv3ZWgkm~Ld;}3h@uIFrL>RSf6^Hl(71kzV8wId)bhP(V59up| z6a5U{L;rdla>LGxLdC09v4@TAJjgSYTZnmH`GrlFy=g=uw)V+8_rEWT_dYb4eJC?P zt{HGAC6T1I6T`yKH$tXJk_5^PMeHUpHWDhDjW{n0v298jRCDgXoR@k)EML*tovE$1 zy{)!>rj(D@`$9UVbrWAjV$2Q9ZGlRr?rZhVWSV%>z%Uhs%H{^;O3Bve&|AHgoPz`qyWE_wfuLnrZY~ zx9XTqWF`S_mi(NX^1A4v+9z4B7$8Z%U4d-w0ZjgH>T)X30iuSU1L z+3qSU_;CRyT}+XpcG3aYCDoDH{w<{VYq`a6abA!7+z7txBl0m1G?J)HA9xp2k7 z#Xvo&2Z>~={Z~VZOg$S#wecXUN=<}qi1BC08=H+0<@A3xk2_Am4iNW9XA!B8VZ|q2 z7PQBtWMozV-GqPA4cz1eJtGqXCL&(}vp)|iAMrNGusg4AX|x=o49d; za{UTCsic|mIRIzG(;ntV=i%)Jl;K&qB|t~gt^@jco75GA&p&@>xeGkpkczM%OdTU| z$+=fc&@o7^9}#9nI)Q|ba)Y>ppgmNiavXt`DG6(9>m(*?FRANb$dWjSuOi~ATYoX% zh4P}l=@vfwIs7ovVL)O^>|78*H;G>QplP;y(R_jX9vCkkJXqg92aKQePIK_THG%C2 zYdL6vtPZqSUj1xJC*!-55Nw7q+rb?Lf|UXL5nCm0Jm!@6^%+3X?q)#a#s1MR}OyNl3FxS+Iwew)0L5j zq2sSy!ViQmN!)VUQe@zqWEIXnJ9PnBQ6U?k9-9D| zdk5B_gpLT(Y7bDC2gvg6;mO|v%-jzZ^vMesqR9!Pwb9AHDnCD!U34cRB0}!rfg@xD zDvFs{4CQ+ZdW-?U>-!HM?!_ULYHo*r0ycIz4176!M5i87zOcm5sH;R|l15$&-jyVg z?<3*&CFOV;Mm*$V=}Ity4`zDjT#dBO;dOsVH&Psc{s*b26y+}aL^*JBxbZX)iVIhr zlhinYgAt-0;lCDuM@T5T3j{~Jq6ug}9-dK;sv%U!Xym{XkO>m1qXD{}_!rYaeNqVN zti_q_{xFTl4mf<8Z(BcMBzD609f=_0(1icSvqQPYRnyo;+yIDtVcDk&14b^Y8-)O0 zV>obVX*lH*)UHH9acGreXoIcj&t97={Zp;(R8qMlE-rkH9p^9Gr zTC*wjMYw*@_}lcZ4dPX68_Pa+rn?W7b$c>-TlCo}zs};>UFJ6mjB6jwc5F!ikh`o! zC-DRvO!u)P>$K(}Ux`4=T^5REcXw2C%PFEU2g6SeBD*;MIiS=_-EgIWqPz}7o(KCY z5E)qE2RMQ1dSmSzSeMeLPWe*4NIIjoJE2=m9uP3`UH3Pbhr&ul{5?Rqb^-e;W3gk=|X!@Dy^kHfR&T*wy&p02Cfwp;ncoGO0c?35BDzRiwOY1I<81l zAfMd{C(%P1uz2T0={$i!wuh6FBxl5?yVvcm1jW3zqY#NUkq)CnbrSB2`|7*cd0y05 z)dYVXK>a`ntAv$Je-CCXtEf9ZfLi834x*<%;I4}LE|R-{SXG-v$RUa0^Q0_Gin_`J z>#}83V}X5oNg8;w*q9qvVDiJ$aEG`3I@bJ*hj*6p=-TTRboOmx{b{G{#26RCBiMEG zYwaY~f1a$rjgL(K7ReOd!X@MlyQ7<$=>-3WZgBQa>q46^xgr>yG=aUsuv4Si4!Rbr zrD$U>TXgMm1PWqQPi0q&}?7)3j!&VG{|8cbO zV+#QB#8JKr`lo3e;8hf1)7ChKw=hk`C($wIlAWx&j;tk+nwYI!00i#9+g)<{GGGP7 zqlh=_6h(Fb8-1)C*NypLemD>h6D#g%bR=#pv+J=RzZ+rXKHSAVt{FXAip^bl`azcr zeOamhR>gWzwnvvKP$Xrmf#+)^^}u*$sT(mGXJrMSJY`Apf+oTVgIJxA`n^5!4e8nn z$^@QuHQ5?MKrRwO$9Egd!z$2)QOAJqrs3g=G?!N^w z2qV#EoSpCcID2N@q zU_H_@zpuwIkQWnGR&)}#CFBPi3oZ|a9CvJzRq=xW95oWJW-nx|g!T_=Jg>G-zP_8g zRpoKh$~~>m2SJHBi7J@#MI781AP$p+4`oDWrF15|=|46o3AKKs*ZHX+9{pR#Yy=rr zg>LBDm?E-YxAhIS-`39yOIFr>=wiXx)OfV}oW890p4KQR%Ex%RRrdFXUs^?hZl03q zWNFC@Yqd)mE;#emITrdJL&S{}-bc%*7F_6T6@y!T4X%7h0MhNY<_XhMm4Zx3f)N)-hfr&P7eOYh_$fBmmsH|(6W&ZL zx8leu$GZC4|D+(SwjaW@-E8zvpWJ$9Y2{rDPxWx76!Y#XK1uH}{~xPsM;^{fc`>z# zg)bRqAxX!khc}XMTklavrevPm!Ok=uX|wAqMZz7ATOoHhry_iPumc=7UhQWF2uTLmmVHzOyHs z2rE$^mIx6aHMG1rT$j<4v2zfrli}j9z}f&P=&$U6Shl%c*0`R^c|nTd57NnuMrXPs z)T$=@>i|$GS_+UNEwCM_FGJ^_3fMR(>wT)I7t83;S~})npcrCpSIBx|K#wnU&NgE= z<6%y6ZI~Ct=T>@++249YAI=a#^WCfba8@%)zwd;WTj$QgdSJ<&AighntH_5z(KE@arLA`(OHOj3tI6Z3cdPB8c&tO2?+>)k#dQb2mn$C2=pk@y1s9x(+71dM*e=1o9K zuQnsNET)L0W18I&bqr`p#ui1x0gxG^2|$(F-h$$|6B*oFQ{X|!?`bH^Q)6zsiY__0 zQ@4?$Z0F-UJ>5L;tG28yi=oc?Po4K{R^AdDb9OfI*Gn(q^KN{V#~mx5V&fc5r~|GO zQs6g>q!?U3YRrfQtF@Dpvm!Qe&*aU*OC!g$lU1n{b%Ak*<2J~sR-sEo+wnuO&@>G@ zVo1ZVV4}tDNiohv{W#7=&SkkuyK~~U-g|7CGp@>&k4N{NWylLpwGlJ68q)syk$x^h zw*Q);e4gMPI2{H=`u>(s?_N$x05zTX_zhJydqsqO?Rc9iperQ4{b_OQw0&C%m)F0yY@l9WnuSSKlcfuV5uz8iaA%0=_8 z78qc;7bqRLn*Kxed|U_rnPBesy~%wyt48;P@!7TuH=9-UeKqeJ*7${6v4c{pW$-~%htbj`9UfzOPg+a~p*u*8MFy_Y#d-M^z`i9B;Lt;%n{z=~_EY%q` z7CpTMN|~=&&9$}bF*_pl?8|HTt$`oF7a=o@JkjOu4K0iQ#r>nbUhCAz2`$s$Lgw=P#j#%lzr>g zt(MJEwyiem`91`h%5N|O7OQ;MJ;So+t=doIMsY29B1mWLWCy+pJw=DhSqly{n~L)B z6fm(s){<)mwuTJe{s7n~m(nAUBOEp=(siAiTu(m`Y?@qd_7myUX=KjI$Bq>*EzF+E z8_8-`mW)Q9kSluxO*V8b&4v2%B*DtJzw6F_?$wQh&6sv12lkmFs$&z}k?c?W;g=G| z$|?$qWm2WQ2Z~c+{M{2YVXsQ-Jvt5pE?}~<^!HG@H=P@~YOvb8wpXzR~Eu65Y<<{Y;>+=_K#U@)HgH6W1fvo4SQ2mX0CP5g;0bSS+9HQ(8sPp;= z=ed)SKFq?5Ha0e?=p}uEmeBm?bRveXof$fKT(aQXhm$p^xikvw$S7eBR)zM@->Z+& zl^}KR1bwgE%){Ld+}Wr{k9J_J;d8zw6HTVro*H*_)?R}SwFR-#bTUWgq&VQ5EeP|% zMn#x}$Uy(gQO*!-4!BDe5cW>xbdtddf=ujW?F9Pd(h4_*(?!IS1cF~@H$#FiDUFM)QCyStMG0`2fM{G>hp{i7GGz9#400opHswzMtnggiu<$%u^3t{+7NEz1QDXJoF+@vw!`o#bRjD~&ZQ%J(2 z@miztmHnY*Xwh-DqLXX_S`~3=HbE9<0*kOX02ttSkFb1Ec=t4A_Lo+!rNJPgd4doh zMl_RELhKIU&L2+8fU#?VUQ~jq#Pf#pveSBsAj{f7#;XyJdKNjqEDCr~tK0M$$q!3={>RTH!D$v85_5!-jyAi`5Xb<%t1rEu9t}-LJ3;-(O z<>O>viqN*n$w{`t1BVaQ&=}&fFUL6F+-GA$g510B4j0YMy%9!Afy$o;+G3O_z(k)4zP|CLOP+{wU}wrC zba{;HbPpY^I=4e#^~QMo&GEY)XC7rn`jXIgnS{1H-A9@GA6xZy4<7$4qq9OnB#&hleXMf;tZL^Gg zvX1k0pFVBGJlQ+odRQChCDUwh7~)DOuX5tq^erC!>k_f z2nG_u(Oi3Jk3uyw(nusn!IGTAc_-9r)+MkuRQ@rXg5gm5X6<5ih$sAXZOg?({+W!F*$U_VO literal 0 HcmV?d00001 diff --git a/src/genbench/tasks/icl_consistency_test/config.jsonnet b/src/genbench/tasks/icl_consistency_test/config.jsonnet index 80746d5..f868746 100644 --- a/src/genbench/tasks/icl_consistency_test/config.jsonnet +++ b/src/genbench/tasks/icl_consistency_test/config.jsonnet @@ -1,17 +1,15 @@ { name: 'ICL consistency test', - // @TODO: Add a description of the task - description: 'ICL consistency test aims to measure the consistency of LLM predictions across many different settings on the same datapoint', + description: 'The ICL consistency test measures the consistency of LLM predictions on the same datapoints across many different setups. Different setups are defined by "factors". On the one hand, factors can be specific attributes of the used prompt (e.g. the number of examples the model is presented with ["n_shots"] or the type of instructions that were used to wrap a specific datapoint ["Instructions"]). On the otherhand, the analysis can also be augmented by factors that are related to the way a model is evaluated (e.g. whether a model is calibrated) or the type of model that is evaluated (e.g. the number of parameters or instructions tuning). These external factors can be added into analysis by using the task.add_factor() method. The output-metric is Cohen\'s kappa for each factor across all different conditions. A kappa-value close to 1 indicates that the factors does not change the model prediction, while a factor close to 0 strongly changes model predictions. Currently, this test evaluats the ANLI-dataset (Nie et al., 2019).', - // @TODO: Add a list of keywords that describe the task keywords: [ 'consistency', 'LLM', 'robustness', 'in-context learning', 'icl', - + 'anli', ], authors: [ @@ -31,14 +29,6 @@ task_type: 'free_form', - evaluation_metrics: [ - { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } - ], - preparation_strategies: { // A recipe for preparing the model to perform the task by configuring its prompt. // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. diff --git a/src/genbench/tasks/icl_consistency_test/doc.md b/src/genbench/tasks/icl_consistency_test/doc.md index c2d5f78..28f4852 100644 --- a/src/genbench/tasks/icl_consistency_test/doc.md +++ b/src/genbench/tasks/icl_consistency_test/doc.md @@ -1,5 +1,17 @@ # ICL consistency test +The ICL consistency test measures the consistency of LLM predictions on the same datapoints across many different setups. +Different setups are defined by "factors". On the one hand, factors can be specific attributes of the used prompt (e.g. +the number of examples the model is presented with ["n_shots"] or the type of instructions that were used to wrap a +specific datapoint ["Instructions"]). On the otherhand, the analysis can also be augmented by factors that are related +to the way a model is evaluated (e.g. whether a model is calibrated) or the type of model that is evaluated (e.g. the +number of parameters or instructions tuning). These external factors can be added into analysis by using the +task.add_factor() method. The output-metric is Cohen\'s kappa for each factor across all different conditions. +A kappa-value close to 1 indicates that the factors does not change the model prediction, while a factor close to 0 +strongly changes model predictions. Currently, this test evaluats the ANLI-dataset (Nie et al., 2019). + +*Size*: for 600 data_IDs. The user can choose to reduce the number of evaluated data_IDs. + ## Abstract Finding the best way of adapting pre-trained language models to a task is a big challenge in current NLP. Just like the previous generation of task-tuned models (TT), models that are adapted to tasks via in-context-learning (ICL) or instruction tuning (IT) are robust in some setups, but not in others. @@ -10,9 +22,42 @@ We test all possible combinations of a range of factors on both vanilla and inst From our results, we deduce which factors can be used without precautions, should be avoided or handled with care in most settings. ## Examples -*Give some examples of the ICL consistency test.* +The test evaluates the same datapoints across many different setups to determine the consistency of a model's predictions. Every datapoint has a data_ID (specifying the original datapoint) and a setup_ID (with each digit specifying the presence or absence of a factor). +Example with data_ID - 1120; setup_ID - id0_0200020: +``` +The city's name derives from the Greek words "άργυρος" ("árgyros" meaning +"silver") and "πόλη" ("poli" meaning "city"). The name's older form was +"Argyroupolis". The first name of the settlement was "New Argyroupolis", +given by the refugees from Gümüşhane. Using only the above description +and what you know about the world, "The city's name derives from Greek words." +is definitely correct, incorrect, or inconclusive? + +ANSWER: Correct. + +Undead is a 2003 Australian zombie science fiction horror comedy film +written and directed by Michael and Peter Spierig and starring Felicity +Mason, Mungo McKay and Rob Jenkins. It was then-relatively-unknown "Good Game" +presenter Steven O'Donnell's first film role. Using only the above description +and what you know about the world, "Steven O'Donnell was not a popular actor before +the 2003 Zombie movie." is definitely correct, incorrect, or inconclusive? + +ANSWER: Correct. + +Let the Music Do the Talking is the first of four albums by The Joe Perry +Project. It was their the most successful, selling approximately 250,000 +copies in the United States. The title track was re-recorded by Joe Perry's +more successful band Aerosmith on their album "Done With Mirrors", albeit +with a slightly different melody and Steven Tyler penned lyrics. Using only +the above description and what you know about the world, ""Done With Mirrors" +was an album by The Joe Perry Project." is definitely correct, incorrect, or +inconclusive? + +ANSWER: +``` +_Added line breaks for readability_ ## Usage +For an example script copy `example_evaluation.py` into your genbench root directory (`/genbench_cbt`) and run it. #### Dataloading The task can loaded through the default GenBench interface as a zero-shot task: ```python @@ -42,22 +87,48 @@ results = task.evaluate_predictions(predictions=predictions, gold=ds) ``` +#### Adding factors +External factors can be added via the `task.add_factor()` method. +```python +predictions = (predictions_factor_absent, predictions_factor_present) +predictions = task.add_factor(data=predictions, + factor='') +``` +where `predictions_factor_absent` and `predictions_factor_present` are dictionaries of the same format as the original +predictions dictionary. + +#### Removing factors +Factors can be removed from the dataset and the evaluation by using the `task.remove_factor()` method. +```python +predictions = task.remove_factor(data=ds, + factor='') +``` +where `ds` is the original dataset as obtained by the `task.get_prepared_datasets()` method. Note that removing factors +will influence the results on all other factors. + ## Data Source The original data stems from the ANLI dataset (Nie et al., 2019). +Prompting templates are taken from promptsource (Bach et al., 2022). ## Limitations and Bias -- the number of factors in limited and does not cover all possible factors that might influence the predictions -- currently only works for ANLI -- factors such as _Instruction tuning_ or _calibration_ are dependent of the model inference process (Which model is evaluated? How is it evalauted?) These factors have to be manually added by the user. +We identify the following limitations of the consistency test: +1. The number of factors in limited and does not cover all possible factors that might influence the predictions. We limited ourselves to factors we deem relevant, to ensure fast evaluation. + +2. Currently, the test is only implemented for the ANLI-dataset. + +3. External factors such as _Instruction tuning_ or _calibration_ have to be manually added by the user using the `task.add_factor()` method. -*Note any known limitations or biases that the ICL consistency test has, with links and references if possible.* ## GenBench Eval card -- The task is evaluating the consistency of LLM predictions across different setups. It evaluates to which degree predictions change if we change certain factors in the prompt design. +This test can be used to test generalisation in LLMs (pretrain - test locus). +It is designed to better understand how LLMs generalise (intrinsic motivation) and to give practical hints on relevant prompt-design decisions (practical motivation). It can be used to assess robustness. +![GenBench Eval Card](GenBench_eval_card.png) -[Genbench Eval Card](GenBench_eval_card.pdf) ## References -Nie, Y., Williams, A., Dinan, E., Bansal, M., Weston, J., & Kiela, D. (2019). Adversarial NLI: A new benchmark for natural language understanding. arXiv preprint arXiv:1910.14599. \ No newline at end of file +Nie, Y., Williams, A., Dinan, E., Bansal, M., Weston, J., & Kiela, D. (2019). Adversarial NLI: A new benchmark for natural language understanding. arXiv preprint arXiv:1910.14599. + +Bach, S. H., Sanh, V., Yong, Z. X., Webson, A., Raffel, C., Nayak, N. V., ... & Rush, A. M. (2022). Promptsource: An integrated development environment and repository for natural language prompts. arXiv preprint arXiv:2202.01279. + diff --git a/src/genbench/tasks/icl_consistency_test/task.py b/src/genbench/tasks/icl_consistency_test/task.py index fdbbf8f..e1c1fa1 100644 --- a/src/genbench/tasks/icl_consistency_test/task.py +++ b/src/genbench/tasks/icl_consistency_test/task.py @@ -64,15 +64,21 @@ def evaluate_predictions( temp = self._convert_numeric_id_to_dict(setup_ID, n_repetitions=1) for factor in self.factors: em[factor].extend(temp[factor]) - em['accuracy'].append((setup_predictions['predictions_numeric'] == setup_predictions['target_numeric']).mean()) + em['accuracy'].append( + (setup_predictions['predictions_numeric'] == setup_predictions['target_numeric']).mean()) # Compute the Cohen's kappa for consistency. kappas = {} for factor in self.factors: factor_present = results_df.loc[results_df[factor] == '1']['predictions_numeric'] factor_absent = results_df.loc[results_df[factor] == '0']['predictions_numeric'] + + # mask out predictions that are out-of-label-distribution mask = [(f1 != -1 and f2 != -1) for f1, f2 in zip(factor_absent, factor_present)] - factor_present, factor_absent = factor_present[mask], factor_absent[mask] + try: + factor_present, factor_absent = factor_present[mask], factor_absent[mask] + except: + breakpoint() kappas[factor] = cohen_kappa_score(factor_present, factor_absent) @@ -87,7 +93,7 @@ def add_factor(self, data: Tuple[Dict, Dict], factor: str) -> Dict[str, Dict[str Args: data: A tuple containing predictions, where the first element are predictions with factor absent and the second element are predictions with factor present. - factor: A string representing a factor. + factor: A string giving the name of the added factor. """ @@ -107,6 +113,30 @@ def add_factor(self, data: Tuple[Dict, Dict], factor: str) -> Dict[str, Dict[str return {**data[0], **data[1]} + def remove_factor(self, data: datasets.Dataset, factor: str, keep_present: bool = False) -> datasets.Dataset: + """Remove data of factor and update the setup_IDs accordingly. Also remove the + respective factor from the list of factors. Keep_present determines whether to keep data with the factor + present or absent. + + Args: + data: The dataset as obtained by the get_prepared_datasets() method. + factor: A string with the name of the factor to remove. + keep_present: whether to keep data with the factor present or absent. + """ + len_setup_ID_preamble = 4 + index_factor = self.factors.index(factor) + len_setup_ID_preamble + realisation_to_keep = str(int(keep_present)) + + # filter out all unwanted datapoints and adapt setup_IDs to exclude factor + data = data.filter(lambda x: x['setup_ID'][index_factor] == realisation_to_keep) + data = data.map(lambda x: {**x, "setup_ID": x["setup_ID"][:index_factor] + x["setup_ID"][index_factor + 1:]} ) + + # Remove factor from list of factors. + self._set_factors() + self.factors.pop(self.factors.index(factor)) + + return data + def _create_df(self, predictions: Dict[str, Dict[str, Any]], gold_labels: Dict[str, int]) -> DataFrame: """Create a dataframe containing all predictions, gold labels and labels. @@ -122,7 +152,7 @@ def _create_df(self, predictions: Dict[str, Dict[str, Any]], gold_labels: Dict[s """ additional_keys = ['predictions_numeric', 'target_numeric', 'setup_ID', 'data_ID'] results_dict = {factor: [] for factor in self.factors + additional_keys} - breakpoint() + for setup_ID, predictions_setup in predictions.items(): data_ids = list(predictions_setup.keys()) n_datapoints = len(data_ids) @@ -172,7 +202,6 @@ def _label_to_numeric(label: str) -> int: """ return LABEL_TO_NUMERIC[label] if label in LABEL_TO_NUMERIC else -1 - @staticmethod def _assert_equal_data_ids(results_df: DataFrame) -> None: """Assert that all data_IDs are the same for all setups. From fe51b6c2ad1fdda688a949ec2ccfb0044068cbbc Mon Sep 17 00:00:00 2001 From: drndr Date: Wed, 26 Jul 2023 17:34:33 +0200 Subject: [PATCH 10/57] add eval card and doc.md --- .../GenBench Evaluation Card.pdf | Bin 0 -> 72032 bytes src/genbench/tasks/nl_codesearch_clf/doc.md | 44 +++++++++++++++--- 2 files changed, 38 insertions(+), 6 deletions(-) create mode 100644 src/genbench/tasks/nl_codesearch_clf/GenBench Evaluation Card.pdf diff --git a/src/genbench/tasks/nl_codesearch_clf/GenBench Evaluation Card.pdf b/src/genbench/tasks/nl_codesearch_clf/GenBench Evaluation Card.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3d4e16e3e1eb452ad3de5bf0c25dca0cae7c8c2d GIT binary patch literal 72032 zcmc$_Q*>qDx-S~rc4lnbPAV1Kww;P?+p5^MZKq<}wr~B<+WWM%+d2>D;kJ9{Yy+KR z{xH7TAHGf|FDg#QM9&68*7KbI1;YYh1lSo`!tn9}7^E$1O$;0@JWPxMOn*xN4rWFc zW&pzvfEIw0jTOMe$^y^WU_&e_@!|nmL;TIN1RV;uhAnd;H zXyXLKZH-Ob{#LL6 zSh)UGHnscP!U#|VFeuvDIscvEf88tnB^1E=m&d=;x&AMrxc-A221!wX7B4%8DZ2@m z0UL{{2@|J*sevJ*p~+tf7#R%=n3xTX*!X!lxlD~1O&I@jZOqQW#lgg4YGi0^z{bSJ z!f3+A%E`vB^N$8j&WK>q?Eg06|7bHM z0Lxzo{+Aiq+5e|Z`v0h;k6@&)ua72c*7dV=QM9~VXlf`(sq1%=A zkodBYUKXVuV*TaTMZ7Q;N`gcv*k~*cACT8(ocDWRXEG%2#tkdUP~V!6fj17u2z-SY zjmjMgNk_KEED^J19wqiSl!z&!sQO4FGn7&KQ^X|30m-Mgl1Xg50&7{l41sq}S9WLm zqZf_#U{x1!DvQ^TzO32-tbl^(6UGcuR*Dq&2_jNp2+SXHQOwdf=80~4qM8fFx+eEW zhUi1$`$JRY8}#>ZPSUpm1K^)*H4(H7bIj~ZxE{D%l8Ke03-xfPJ*{F9PhX@;SPpPm zR41L@d)lrk`DUkS%n+#N*2-wGC39#GKqUpQoC8#Drj_pe(U-T?^J(mp2Ff6d#N6Fi z^SpEkDyq#Wkz^>kVkjpIYMsCt2#l=U9ZDgM3(*^pOpKma5XzyfKj_1DQ?PB;I&Im{ zYlQSNnPJ1h7bS19WGnJ>)LiP4^Lq)qs5BRt4{A@M9zNiABz4|NSY-CQuSHG&`265tQZ&qUQ6VT-Bz%QEOkoum_N zsqpQ`dEY<PHNltWb)^NMT6B;lf0yxN_n=EgwL#zo)yG&@cRilcZ()yl1q&8)tW7fk03{B4{9)YK*+k4f) zjJMG&aoDm!1Ar|@0e0WAVnx@#kuL0UU_E&Zs-#~acduU@2Yub(j#Cq zw(gwcqxNif-7JzwJHoAjH48r;g+8 zAEe$Nri=>Vr}I2IEK(x2NQd?gW2U<}VjlarExe_Vg`_E&vVgqa=@Go<#kge>^B>>o zt9-6f9{U_5c7dKuIKf>ff1gk5YmzNu-!zQkJpZZ1>{F$N+}Zt!AhC(^WO=(%|Dfdg zZKluD^(|jAq*Rc$AM1ls;44p>qRG_Yjo$B1SAHQNl9prepKx>@0s5 z0RM_y%m5}PMwY*eo_`hpSuy>0AZKD`{2My|{VzAV1za9?4TByI?N_Q{UqnYoM`Np$ zA5foUfG5?}m8=5|J)J70VwUjl)(&*4t&w=o>$c}_v#rcY(s!-yrLUzNXwWHK8&?Y} zOsTklNS3JV%s?qHKn1-~?-cmX-rnKgL&!{j7I*+p=sTf+<-{K#UmMvlDnM&Y1OQxU zkV6LX;g)xBf+}y|18;N!*#aZm3L(?e10iCj-t@rf0EmIO@*r1m@}`019ULHp=CF-* zw7ZD(wRl#Xa(wZDrBx?^Yz+*Anguq2=VbvtmDLC_Phw+=Q0X9&L- ze5|)r;q@ViBj_MMpcxQ}TJ%xuw~M_iYxn@5oCeUw(G8%a?Ta))oA-?BGk}VS=V*|*l79CC>x*nXT;G5#D0n7Lv)2G`PY6Vt*@=;pA7HF9;*Ed%>7u)o5 z@8aZ>@Lr%MP3g8NE5Duje4FlDi|TIx0>3di#ssRpzi|R?Z3BM+3YdZX{*Jkafcol) z@qMA@S#Jdj`lfz%m;I(*-M2$v{Okfu`hCX~?cPmlBl(>Qh;r=i9oKt&?S1(czvt3_ z`_g`sjDIJed~3xAbQJ98dKCA-eF=Evp{~rm+eMlkwGZU92yPy_V`hCXD-%8}pN+S2 zYJS(qVncXsfm@hqjrhhQ!>Isy`d4oI8K}DY&K)weT@gkeLAgS7Irs;ByJ-SlY-nqL z7kF;b(}W%$gT76U38>^9AHH5E-A${~FZo(y&)ma<5+UVryxo%tkofp9Jl}I`26lcK zGx;SBtb#vXfVkOC6YB4S2z~F1gSX{|--n(skV5S!e{-Phf!kYt<3bCZ`+W=b1IbqS zl3bbDItF|Jy8~shfBSg>$#(b(`T$Y95XwXKp1u&iOzOxU`}Q7>(trES#Or(m=aPtg z{d4*T1_#Qj{o8`D@wf8c;{)=Jm*VaV^=sL$YJ({5*z9Nq-7V*<=O)(<2kHR4URW>9 zsSd^4Z2RVFU#5$-jpF^O^#yLA*$0B9;~L~3@g1N2C$*E#^t#Gulk=H9;njRG??Un) z#50NQfhY2$n;7G%Y0nau1jWZnxT+uBI0tUqCa2vV0@j=l4~Y) zGhoJkZQsqOsoY+>2gKis2y8u4OqHL-&p~t&1$S2HW6mHbo|ETYk3Bbzhj5;0$e07^ zGAVN8zlAU-(X5d!&%>u?9xM8R+7X^oHYLLCRA~r2Dn8<6pSU)>kPRh9*pFPr@F?g+ zqFOs2LO}6u5Id;?m!K`iDcbC)p7@w|lIIXC&_ARuTJc49_ZPv>%N|>3#P3nYHZqP)pQ02MEGhDkqzD}ta`xvR#-PD4@K869mY4tY2 zj?yB<4Y!ya<0e8=fTnRx37*nA@*ARQdCFFYh0?0iuXSXrz%_#F;SbtdpG@SFL%l6y z(iIV~^SsDvhR1uDaUsr&_$e)NejMGClx|V!F27WgmraBS$xbct54++mcomu8MK72| zpJEG8{3qj+-|h0EpU}p8F&Sm9#itk)#i@m~e3)-{JE~YO$U|Piw~PJ(+7-3L*}BK? zT2zRkr3UJ_p}fJlaw;tf$edcNa{Hi1Rw|aVzj>I~SpHCLz>u912Ix2?pfswu54Dlj z7Bm{jDWsHLLP_{U*}{Q?(_(*C@m$H7)Qq1UTr) z8k%l$Ha_IvTd3+x4?AFt$nzh9YPT;4L&LW-SZJ=9_Z#Wj_nWjxPy;pip)nNuWjzz3 z9qvVAU#pLo^hO62rP7aT@BuPzmu?n}1&f}ds8A+(gudk-8n^;B`&ud0+*!8c1%!}> zk4*bzjt)%G*4dVr>`#?c;SwY|x$QmudW1)a$MYTzkDv7go(fH8wzQ7}zTS5JuJmBu zE1FyiIpERo`rBPb?JPTebFlZ85@h9qJTi`P*KE$T)6W%?KF2)bkBh52A9 z5lU*MwkRAE2CF>FR0|%cI6;X<^kYpgo1Jg!Shug^LWYQ0?{#*&P?#}sy%qdNv-O)@ zatWQR)@LH~NPcwp$~E{I=L?B%wRASY=quqxKiC2_9>gZc>-Hv2MPZX;n=2J79wQMg z>kdJ}+AIoc{Ru2lgUA*n0|SU8*6kR?`MF5bahPX_gfXh)g0SRxNL81?lJ8)->2}`t z<@q6)pIR9Ag=Tr!HH-Ig?$JY!&ycn-yBJ|cQ#NHJVwVmD2NPmbF13%RB<^w>K)FgD zQ*B03NmUm%ASU>+JfmaW7rCFu+UM97PW^Z|?HT$w-nO}B`qpWQ63Q_ILhfPS9WV*T zeBZCvBVvA0uDbFD8$@LW0qJ@3 z`aNPhVVW|8`H%DDbzgMtD!ti;+UU%mRH|4W%SPTr!>R8XTHW1zp3db<1m*NshBsyp zK&D2eXcn?<;1P*h8V5-aE+}CfTrniY#?3`3}T;2nbMMGd=C>Q@}o2ej%4=WF35=?zz;^N!^VjUMtfd1wn| z5K8L6WG#(vpP=w){oc{|+mL``C^{QO_gIp|=fv#@9N)oO2k3p&G z4@ACuh_46fH)Uz^vPSaJ_yLLm6&+_6ID%(4g!knY0vYiHLshlGDS>yL4 z;3e&153$dZ;xwF(*Lrv_#&)X>O>x$Hz(FHO13=#@{hb)L;M&s*BO=W_k4-CkHLCaB zCR4kagRgr3(8sI^fv_ov8!VHdjIQ^*Z8_t3lnqu~&>x*X_hRZNB z%&rcNwI$qkmNS(j{}nRpm|p;Y{p_+5PlsF8eL0AI%^Us0Nk># zz7g)-C~N%XIzz(M78|)Z(YkAh(t2@CdnK*G9HabMH|fQzIp%A3zEk+A7QNrGWzDCj zkI{2zTs|{}q z$)jR%cKh~{gF7usKg9lkv`jJ^zQk^}0h84i6>(OXBaDkU!Wh_F+LCPB8@mR{`({cE z?xO-2gS0Tq`kUs36XNL*5brl0)`)NG9pI4bt8R$xM+RsyJ5 zxL7eu6lc(IJ%#OveMcY*r5MO0KVG^U#q1*5=FSF;rA1+jChX#*2_quoOL3}4ZGMFE zb#;9vLBV<>_B(@y4dHc2_{*IgC;h-qv)2)FqnI7KzPz{MJW?WjzaB9oLmMu5Unt;B zU>yj(bhyjC(IqC`Ts(q7Vmiio@%<>of3cw$`U+NpP~ykVUa643>g2n+-|es&n-dgx4M-QWgrIHIl4*6)_qC% zKC@}Dbua%82)}!pfP!+l8r_5nkD%i+wVi1nozBl9J)MX^7DRzjdYVaf>%y~;NcyOQ z&`iU~iu{#3?yV3+XBoT{X1`a!@#agpGts5!$#Z7L|31j1trK?u%&WezC5wsv#{|m- z$-AyBgo?z-0mO;&RwD6web@qlIb43e|BYF^BF^33+E0t{s^x&s;f$cRD%?!%aQ3pV zB$Qc65lF5vwx)cWDTl^cHwR^9L<$MgHhp zMY@LsGxQmbDzUKC1Di3!=pmp6$KG+?tK6T5e3z^}fG$I* zzj3>to-0k139hrI79!%mNL;ZPOhh+ekA#AQb5^b0UryElfFg7uJOmAf`2V#7|KDdmQ z5us|~F1;qR+6-dG|3gSn`fw!%rh^t#AtV41woc%P?k6LEtECikqyJ~O_;&xtPLzT# zvR1@q0&IEY_K`Gff?y~`cBWo;_MlCA`NP4kR^KPVn$eVnJ29sIr1q~!!wEIj(T?ZV z$`_cJ1r?IQSYV1|N1?_e5?XgiSbiNa{*XT|*Fg7vH_bEKN_>$}v8dCSm!G08@=Tba zCSq^Je9?2JSxM^7V+c#@=M;q3TqAaqJ%kpxK#^u@<-uU6nM8aO?XvU>=@g}~ykz#O zP^Vv$LT#BH{9{(c#)fW$KH0i--Ab)R;xzWBJh)JORio=`O3DMb1aq%MFG|urx6b=z zBXo)ag3VyT&Kct0zohfXMkRk{k} z?FO+9n8+lA_q=|@t3!&1GQhIPR+&f~!-&-&mZ80OoJ9uy?1sadljIk=+m2 zI9MmMM`~~SLp`^}4_-DcHTO<8db(^ZKBMi}J!dY;o+*83va8s&2D5eTtNk`Vej#w5 zuWF*Vnc|v}^DTR_(K*rl6JwlYop!W1BQusIlOmjZi;KTcyEYN0uO+}ev};fXa{G8* zA$e)*r*NUPjehd`(XyAIF%3ta8zK73{bkDPJ8-u|z5(!fI-05H_;QhepQxV?ASFHW z*{D)OXz_u@AYk!fl;+Q7A9YB+EfdpBw4(#$Skf(Kq}-F@qTL52UUfL=YXo{uOn=@N zEktbYv5M5?1T#aWwhSmJbMT-?Av>xEiO+wjOV+dLi{|bl*jAF(;cnr>ru9Gknp0ceB8?#w9L#YC!IQK>+)jUD zGbR~Rw_39EEU2HRihTc=GIQ%slWT3z9V^lN{^_gIgcm($xLjF|_j=Q)a$N&jY*R-J zI^PGT7_-55CRz09N~=q@7bes?3zPg((T!;bW|yFN+O0TbRoAR>TCJvVc2YUOa>%6A z!aG`#p~1k+`Z)XLH6L>%k6$|Jdq{DDam7{f@`F9sJ5{2m7?Z0=VTJAc;DnWKXV@qo z52H1OV4)6-W64`t7?%kAz~oATq1SI2X+sIwx={S1d74hWj<00xV1St6XS)Xvvv`Er zZa+{FG1gLo*N~v}-li0af<<0#b#;sUHG>wDs}$c8+j6sKP5%-4dfeE@#_3Q-8noHB z!mMGU5VAsap3!N8>3!EG-lo5CVZO+F^4E$znTgDx7;9#n!Kyii^^IDRyKIJ6IGA8thpr!K?{~ECta_UZEGE1s=gg+?Zw$uWYeXMY2-4jw$u{x z#d`nl(5%sE6s0QO9wvm7h2Y{#4aUy!GwNa~!$!f%{toz&^%UO~((IzE_SVF5lQ$KN zys52X(JmV&;qymOEO1Hjs}=6{vzi@*HC|G`kdMvdP!N8TG`AjiK`L^L8opUtuU(sg z6SlJAuE#6zPf>mH!trc}>nX#DbjLNlFOk|eL0ms72y)hD!k=|#zJH$Oy<^4M8`bws z^lyR@gjUIv8F5Iek3FF6T=5IEhb%>m(^N9cc?wa}q2s!bVQ96)3i)sXGP-}(Xl*b~ zIka+KE!ZmZJo0NY`PTQmX8eBa61#9ly>V*v#z601)m)THu!)@SIFM;;%h#L~Pntaz z9<8Yg5-Z7w6*2sPx%Xr{oi17gG6lw-ms6;-K6ka$9-M)%z|||n#Cq1C3M%aEVrxR& zl(>;qB8T(JxyyRObnEKY6Aw_aFtp0MnoE>inu~_rLsDEtgh;@QArw)Rcm#Q@>Pp3Y zRk26GDS5(6L1;rLv1M%J;l6B4RatK_OkWR+3n&ff-d5?3lD2WRZqN?RiC-3Y?I(<& zd}r6_1TpX6pYj>7_G1|TQOTxVN9RdIqBqD{(0(UvIH2ws`H7;fpav2abu|ZgK?*nJk3sVj0+r}#*)!{AdVk~_y>tFK{ zU@wWwHsvdnq(*CVt6!*b0l?3zvI=1&z`x$}2w#6#4@uGc$&FKGe!MXseFL z8zXk@!kg>?e=&lW`IKJ8Ro-Ud1gvJRd_-nbbb ze%2#}T|?JU#&%WnkSws31_r!1FO%naBC^likHl`Rd1}?u3{4KQG~4MVX&TN*uB~(d zts0$!VOnQu72|#6RQjZT??ykV)IgRbI|3Yn$exlNAAvBu+qVP~n#EVN#L6pfdQ&{Y z2{s~)FDM)|F~)hW)+jUhs##`w$JRRhG|I?NnICA(VhJFkFqg$u&I9c4`dq(Mx0~7- zgNEy0k$yZmgT1;m+Fz;B5YI0vk<6p z1mVSkyAi~0J-Z%wqU;xTArmrKZf-bWhz=$$zDwU<}Wk9pVqP_6sY9fbQ0@QI17bfTwJ+D{9YtaGQ*tP>U-)b1Otoo?bJMZc=v0lRXONG{b)^;zp49u3*h$&CD{!Jh*w#yV7Lh(YuDvU*kN3pt`jo;8SFak?lia{;xk`?>uk5F+G9=ArdSN1Q1* z^Hx#kJg*b^Z(d|%SOz;j5|6u&K~LksLrSe+JGu_3Rl6CwWY%aaC<>a=X$u_ffRqL% z^WC(Utm!(3Fi}i~W-?Y}%){-Fv+Ho;D?RvHN5#X{KL@Yj;YB(EiuHaX+3X74S94~_ zK2M9-@|hD=228)9`woW<37ud{F+c(WoJJvBw6CkAHLVdk9= z;vkV(L{-CL*6E`y1ntuM6NnH9mrNZ6a?N#nS&p12gxLdMD5)V?lo<}or^>JRFIB0N zih6C_q+Ul8l|tIkiZM^FG?uptU~tC;N!J@)kb_dzxOjG9GW(@L*=r|YZhsmlvn@So zvnSUd7qAHmVShYW6L!UYKTOw#wW`8d0yC3 zTmaC^#)Pcr;E0G=7<5~itY*+-e7!gwA z0+~NUy(VZy5Is{mk@{;+JjOT)qiKPD{4N%RFMz+p;Ho6-@njSy2Oxy=I&GurH{M(_ z61qj?n7+_6dYt0wbau07I_)tdylkp~4{&d9E&VbJ@5rC%Y(LaXhfNoNUnY(|tiDN# zY8ywNi_?XjVMg*)Fh#ZSDIPX~M*8*A+A^~t`QqtI@i_)T*<_FONR}ao6i`mTUHz64 zt4rL$fLCMrKy&vyzL%feg*p+m*g{LQY4q814PnuwSVg&0B5Nlqm$E%WF_0oS=W+OVH4Z3p`V7n!HD}s^jcMe>Y~GLne`y#7mTLK3~y){$L~+var)g} zf(o~>w`9WewpirobVC)E5F%t6KyF@**pz>z+%cYc8F(42{f=0V*SF)4ERIh1Ayp+B z(N=&(fcK__jx)8U_ypK@f30*Tb^ETdGw2SF+Mgj~v||YOP6z_%CksB<0g~<=Am~OH zv^trD%l8hMlnh5jVO>?Jn2>YBXPIc4XGH<$$0!CDH-_Z0pd#{yE%mn&Y0-!)W)3MvTji8>^j`#Obf*^Cwz zj3jA|0U`s)*Q!N-ew(*A%Y=v7yatxf8TsT#=PSHo3xIxaE|WcCEeiyNX~q{|t%f5p z@VE+C;?!g=8=)>-QO#Z?yd{HyRgYw(C6xQn>;}%8Xkf6xSi9zRb(fcr7pe;WCNvg2 z$@6Hd=f&G|+Yc(^rpV5IQb%RUQ&v#HDPI4XFF8LtyzhoG9MkZ}YFSnQEa}qu#d}Ra z9yu}987kX`8iD{6vHT8U#4)Ju6u0>l3C`Phf5%skZ580uKqkEF`dRvWUCRE)d8eD1 z0tF8z=uihJE+6}^1V_lQjkN44Y`W&Tt<~$fsg6%x@qGJT-^d15k->9Mkl6E3M&~Fr z!a!fu_hvBCybW&l1WF%u4r_spWf5pcbBV&GuuM5;D_R? zhaYkK07_<7`g3p3C)o3nQWTvr?y!;NyIYV1Zlcy4mG>!3LNdDscT%lKaMISFFo(u( zVpv%}@50yK6=ivP@T|EL#dDmjdcs-SQ3k8sd2T5LLJrLDvD-N}A}t~%?SCEfRpxW5 zc8VvSNfQOr8SDb*m~Xv!uHS`d^`WIs?sWxx-ooufMs11Zldp+DSK$@Qj$3s^gB(313&O;-v)` zA~YP!f(-@gMZ68Xjz2NeAea$N)FawSqikJbPu+=jhjGDPkUV*q;~IoZc-*y_F0&|S zOIBP#kw-E@9z;OLnc(EwglsW%GR8gmVR`MnD4*JS(&x%qVo?lZ0;m7Ujc9@1!OdJR z@47P;TIY}LTH5VbBFUQqgOE6tCa|>nMKv9A{)IEJ zSKfuc(tcf4m!>Tawk(<==~tyyVeSl-2S3fIgP*gma0GWc_%gqfU2?2Q$KYF&koPAN zzGa0~c(X!IEwxP~Uw|;lovKfmbqaFXDT5ev+P%bCgY)phfGI-PM#{_HP3#X8kn9$| zGLKfm^Eu*HQE{vkO;4}8o}x$Axla>C!Ifj z8r&B?ZTJy&=X6?xkR5kypd&gDDZ)71CRP3}FB}xzowV<_ z)A_yRHkS+@|J`!)+)5!m7Sp5JvrcyMM7#QcNG;Sl$sM@*mF995FWIpH$0-0gBy7(2 zL4+qTihx$C%SW)*%Z;qG&BJDMFrX%tJ#0!vi~50inWv5$uwWWzcTIPS>N%Ac?V~>v zK8cm{xiNiIX(KeK-rQ&&<-}004}p==JihMP5O!Z{WY|$0-jR?6w(-m?HUdZH6eOhS zb6g-cZK~K=3-cQaDeAh)4p6RfhG)Qc27w{}{Q>SAL$0=*e(c{1B@2AODE1vvptfK| zB-fI}l#*?cG-T*6R*Gv?fMQv3WEB|4d;z_xc&O5^5gr@*Y%bt-kd71~#$5(6XO8N> z)Z=mCAyVndO@bmnm$;-CSMADL;~R0h@?(SZVQTb|Tv-u4fG6N$Ctkc&`%nlqf~@F6 zee1^Fi*HogZjG2B8Ey_L_|8?*34MOm>LGU32TD!zseX+!@M4e+n#uCjy ze0qbAOGhC0dxuSiz8FH&MO>heCJq0!Yi@UW5P&5j#C+Clh6jEwIMHnw87q<2z7#lM zQaohRv{v%25{QSqoYcsNC4(lwa_+MHNt|dp7G&JXBEN$Kqcbtr!VEIn;G9b*f%LrZ&pg6mm*&7+ILu-4b97Fspp zSlWqb1nBEBe_E}<23_w(Aes6@tYOHQ-q9!>t+yg6c=Ib=!i_eL3FxK3oA-g_lzOh>) z8CSg^b=VRN-9S?DXQ&EqmCwvBzjUPwuXD9APxTEK%NlJDB6q2Xm!)du_TbAsTvxo~ z@axH)Sqs{{p$Vmpl|5@ONXlb>xd9i9XPjnYi;=%K9x)L&U#5r}86nMSPf#^l!>G&& zZ6Nrf0B^q-k{kB94z8}m$2z%w9EL<_h6piU9)jjF5Q9v!M$^Ef;v@|RxBZcGi#>0k zWEkr&N1ljsmA@^$_&COP_rJ@Zha?V&O1I)#!-dF*Yi6{3>oT5$0LP4V^j~@TGjjc6 z%0=^#*ePKvmee6in&ILGVG~mO()ULWZU{{YjYl8=t{__>d5?XFD5t+l$g;M(kFFV#i0Kxj9sUsb2g99Ut@5YQEgp zBQ#CPE0LDhKYTQt8k)JQe{Ot^nt~ozys$FLwe(1xa+kWIZ)@L%_U1@$z?*zkE@8$t z2eMrYUnX`o;EgMyS~75t*t7NaE)LibR(Pco!1;q##0AHpuKAnn1%vG2It`w~xwz%> zzNUkYU`VA0tH^mp5IhHS<)oK;s#-No1?Haa6PG$L6Mi6e(TqC>%`T{0dPRNngH7WJ zZV1WeWC1K@aU0Z;Os2B$^6*j%U`$(R0uzF>d6gG%$d-_0&oX}hp(iQ~fv1$PdvKfg z;^-dxQ^XXYxo=f?GDVI^PwBivA)EdK%A)m}jn9xPj0o1j)QP_Q z(?KS+&a>fX>GXvPoA1Y!S{f#Mf>Y~Tis)&RpMKUkoz2qn;f(_sgsTe2p5WW>idkIY zY)fB!u7Pd5zga%}ZfjnIjxq@-E@Oiiof{enB;W{>%=eLHCO_8LLTH3hkdi2gtAt|I zQCj9O6(+aS;IY%hX_J{Dx&E+tj&eD0=^h<~v+TZ4(J%`TA$+oXg4;`4WhDKDIfgsT&QXTk8=iJ zlFQ1Dm!aUuy_{JzuLzWx9nFt!@o7o z4XeLg(G(I~FOCc8+DO$3`ZYXiARe}eq7kz+|EY2Wil((>b#?G4tuR$I>3@wH#dPY~ zxTm&Ny?R1?IuqpN^N~*ZEy+U`_M7$(ol*?(2|feJ zlzezIJhDVo{oCG~nAMY>N;xh6pQV1BZHuYPq>z1LAl+hf`A^DI8M{ZI(L4f{Sl6Vu z+4-BtoEeNZIQ+wmFXydcVO8-v_yircEq!-AS^{tR1QIy{Rkp^?Sbhx(-J)J_!mff9 zr(37UeBqg;&pD@dgeU)^*>t)1Be5t)4FL7b8?) z{`c3{Aw+qo%$xIh6Hhu7#hc;g5RyDK}; zw44QVn-`K(XlL`=e5CyLIO;7rT-_YjxBa-Z?Ri1_W(KpOnAnS4w#sALkV12ZJLxHXpXp}lzBP%7u#q~bmuurnL5BK3U_B+ngq_y!YLAkcm3vR=jOuc_kJ4bT ztMliRdDf0w_(BipCq3}&?eEIjQxshVB1#o#%?D(zc-4p^V@#5cu3GhzEn_e>I>D|4 zjd$NG|3vS&dY1I@X6HpmUX7z#Kgm$fX~DngOOSmOSR$v=m6Ocr3)}(J%kC3-wUq^~ zg{#@U@;+2X!arO_@0_Om+f9$%Z zXK)L5;p)F{Ok}bm56KknvTq+`j*VxR_C1<*Q!Z_myVKHeK9XGoju@}eifLy=s!l?x zoL#VVZYA7wK?Gmo3qb-a8Yxe7&QvMowa$pz^lMBpd@XNy1(qE+jvq&#)F6#vssyj z_7>!I+q>dU2?!S2AGF*oZRN_&xAyZud2q@-d_SB*9TMf)?!nMjjuo6(J6v-nE0p~( z$QrUM$v#jjv*YShAk;tf?mx5jnf}?I^naJG&%ycc>G~`zO#gec)&EG>XJuvO`j2$| zto3<%xIVJh^PQ`!D}oJ|+Ft#@j*uPyoj#)04v3bWE2ONQEBn>E?5>&TZGn2!2LJ>$aAHTw~ z%xG}IaBRRe5J)>fQIeC>gL_?FBXE2EV}`_UDGQ-@cw}VYrG!UtlHkPJ$QA~KGCiQm z|H%$}O6C&i4B?1fu=dzDI1jxMA`t&oLw#d&GhOCVCk;QVD(Dmo_=;WMsuwa3c6Jrs z9QdWcz^}fH@1|xt4Y>>mXNjHOcWe=#w(qKVTCg{>cX}8P%F+Ga9$W(`8`y3P_{=c^ z2wehj!eBJi1BV6dI|I=>Jo%Js@9X)EJPPqp(oojc7T8rlHM#&{3|`w<2MVHGh@Rd@ zpfB!6HM)l=qQrw4%HgZTp@hq$)W8uaK*It4BeD!wdK>4XgAdcm3|FESwjnZllaQ|d z1Nq2iToC{Z4e99#fVv2Hk?>AnpBB7tsq?J=aH%I0Lbw84_xKmVkM`filGAGAYp@Ej z|J^9SHP$1uB^vfUTIr_`gh@j~gGdMobOHwCk*3w~fvh{Qfc#RDaEW-U1=+n7gAfBW z(L(Om&7RWRLMUp7sl)*Qy10OOc>HSF>p>>Q#qI4M#)D%7)m-&M`VRjhhtT{A-bUD7 z9R|&tc^l?q2RbLr{pQ8)o}1_gHa>ohea$_d$S>8!(m z%KHpGfT+~2JhIcX{+?0(UO4=Y-SriH?TP>J#U#D7GBF_q z>9d2)JGg0v$osY_hySp$NcO{EuYUU0qJd=h)&gUSOWyPyHs2?<*#~ZvZvxKL`W{N@ zImqy}PS1viDm~jVydP--(f8oV_%8C)qPvf^QwyEICHqze-ey1UQTpjWiBJ2b&g4pO z>sQv$P)4!0PSit0@5iyePOk~X_eIL&7mR%XntB>ufLnu`k-z21IM<8}=o< z0fceui-hPWwnsP!sUQD_xMu)bJNgmm224BtE!1hp-QwCCpELJPP7I`hp}aaxB`TpJUqh$l_w@Q2m++BSqM zC8+?-^MaYg+f>9vo~kfdRL;-+*HFVI??EV0X`K8H{ux<*O@G4Q8AQILN+vvQclo79 zhFfjV<8FVRl4)aurY=h_RUK&ne96*GVpihVU>MW#hiN90d+`GS>0!I4Ow+vbX4Q7Z zO!dQhl7~HvfHwj$Cp5Qno7$0$8IP(Ii`ow7J*B<#U1Sj*qbO-b&&X?vvHfXJO!UeJ z1~*n%j5yOSGihA|VWTZGxthjTBxuWvGFM!U3;p$JS+Jm4q$eX=3S~?VLu{VJ3VpR^ z90|jK3NC(-@Mj*{A+>ee#&%kp#PF1tVoN<>mTQ+PM9_@(PQvpn)zFalblZUht*a26 z(3WY_YlFX zGhZt!Jvtfr;z&d(v{(6$nYs@j&BpO@Sx)Xto$d*<+q)P2J z0nf1ru(gXlIXOnouUBv6#Wl1*1A5VuVN9NbGpv1R0GEzBVeHg*`C=Ft$k1w;_*RRGzD1T zKP@YIb7N@7nUyJ^w!8@WPzVgA>{B4DYRTD8KR}DALzNLUpiq6;xZ&T1CxWmdcrD3T zCJpVPgFdi=cw4~&+D9@W*<(6z;)&(0^3C$^?OkPEQy6!d6N=oh#5}gfv$ZqMtt#K! zJUENw4z2P6Gj>+C)R|G{3&ZsS^&y~ft?CfH&MNfJ{XZ>q_CNbunuyLDnl9oV?*f5d z-rmZ~o#TnTP~)X?3Ptcp_v5XxJ_lo@bP-jAWSceiq=;@_vS|lV&OCvy{v46(nSrY{ zgT{=o%0NS5WwK^*YuLL_9W-7?QxJ@8MCNiqC-=24SojG^P0*LXl;EcOwBJ@~OW7`3 z*c_s|epkDb0-tj`oCYU+bSb9LTdn@6%;b3H-dg4g>>%jnIi@Z7&FYBY^;@g71{+7I z9cGdS#;qsxq#et_Bm})vW`OKr(1dv%Su%_-)k1+7&Ai`ZMMb@&DS@W-P??JAk^qrl zJR=Rop5uY%7QxJ~KqUSbDA>&l49WeU3uRTkDe5l%&GFcs66SX97pmmC=RNnTw&4;} z-;CX2Ijf@YukM<6@6!XC?by0}m-sE-;2O>&XE%WnJ74T94HN%~GO>sj!(q=F0-;)* z(5p-64x6M^jByAQnbTJzDQXU<{e1AX3xdM;PFdp!3?+1}AcRn#b!$ypg}^=G{6#gy z5#!2Ropvq+-RGo{XVex@VCAku0rJq=P2Z|dLk?%kQw>OEEP=>Tjc-wx%^Bntf%uT2 z!x}RYf4v{|ehdT;2i@>z+M1w9QaubXeM(>sWqPejm@N3RYAUAVZky=0pTnp)h2h)9 z`0loep7p9erJ46zs(nCtg|$Ld?ddaXMguEg z+S5QpzV#Dap_VffYjb)H-8T~rmSu~mhjIqw^2x+Y=&MKTY19Lp`hzuyCv z^NBeiyH&N)0&8bxZy0Iwxa2sxL4u2q}^AblbU+?ei0Q zA?@`ocT4iCZX2*$F@_@(G#A$8)ag zQ~uSYj#l?bVez*f0>hQ`t>k#;aS)aN9F+D!-Nu1vkB7GN-#O~bfC;wuX!cTn%it0` z#y^>Vpf@#(3&8TEH#1H`|mN@^uG&O;sIc$m!mMHERsg%=I znAoi`w%k3W&cp-bJO>498iQei*@P!b*zr(`?^qnC@sxU<%|S=k7dACioK0vlWKzWH z5?OE3rpac0S>9i~?ItTmL$z2B!6{C*oENFgsc-F+?s!>H%B zrq1$+6{nvE*`o#cs2xNjDaL6bU6oR=!<7>c4vLbMnmr0+5wz-wzCjz0UW30$;t;57 z6#WRx{p+2|IWy~!h{;0PuhCNX``AptM>ZqD|n_g@Ddt{Pcugl2st! zYErK?@#KSPC}{`IpMgCvHg6R3jLMn5CanRg(!c5zUhy<@*IlK}lQWOQQg(meCqm=y z&~@&bA%yuz^k|$@8@i&MeMv{aS}vwgr0IK|gQls6D{iAb2ugb^|FR%g$b)9^(8jNQ zc##dz$$ti*p;-`0elQ(!759(U0Y#-X-i{fKzdJUUxqP-l4E)}4L}>Y>6Q+~O)8*`a zfLxgv?n#Mf1Y6=BYuYu#zbG11kw#0P|0{2Ovz09b|58dFZb_1Jv~S+2e4Kg0y%6cT z_X#X!+@K=&@H_4<_L-~uZrL}VH~IA3w<4~fs5W7EWG6PfS&_GNm350Y&A6Vxzk7=D%#|EUc?BkqS=$-mMUDXL>vt3#VRLtMV4Gjt6n$X z&lbDGrUbU7iGE6!4tyF~u3)iD(L!-Iq&~ppEuO`_;6g9vgT2&!Gr+sz1oYXeL=TUL zCbC0!=BwbvD;{ z@bIkY#%-b%7r+!>^#Jc|VVWw_h0WqQYvG*zW45ZK!9SWWOyMztgG1uJNun^E*>jzT zY!QC)GC8~pwfJ&1WhRQhG_%?%(D!2>D}zG)1apuk^Tg^zkoDD_H^{inCWpGFrs?X$ zfn2+;Nb$p%b2FkVr;XIm`9)99?|CKvTu~!9(l$i5W(IemZd$mj$9)#RKS4qwPTb=# zbCA*U^MVyCE8(i)ARvxy8e5=BS4oMMES&_#o?^s&u^br~%a_lXDSyL|;FI~Yc;)_v zpC0C!goQyf2G-3TwH+wohq)H+r2^wO3DsqY?vP~>6z22x)p+Cis}u)UY=MVK|7+dL zW4QQ8&Q1L$!t_u0ohujD0z^ka&PAy2X};R6;^fU0?$8O`1QMcH0@K#G`t2(QaZxWQ zUTqCuHnL<@=4B{|=1!UCAF#1dQ-hE1kOkigB+XR(v_ZjzVWLyGo&&8e@6GN{A}c{2 z-AY)Gkrb8`58Ihy?-8{Fis@N`9jae%wVOczXM}luVGq@7zp9ZiAoP`*^6Oe z=wp&^{xS3MXd#?m{abZ@Q^i=EJvL|kP45C-r1JF-fvxbj!1nU(Sb+DK zq5t198zv?bc}RSM!yOM}Q&(oShE0pH76`rJ}&08$m5 zjKd8`KqTICb0VShLkVpii0pBBg00&1E7W}1%3_3VEy=1*fYAjFSz9Tuk}MWDmdIntZ{B(lI;HGj-Ci-Hien~7cA?6mnvd%*XQjRM;w>$Fn_v_M zW;qisPJXg22doOBFDO11EFnwpJp{`7zY5dq!M~|e?<6faNtyfuC{UskB5lY^rkLOs zaRRkU{cz4*W{0bXxyo9nghr|^1UE^wQ1PSVXNb#=4wQNc)) zqz}mW+P0fU$gsE_F7`vHBs46Ju4}Wu7|X_-&a&()izdHk>=W_$dz^9yN*_`b1TFWF16WpP3v_jP_3MpYdEZSTklkh&#?W zc^1qmLazP|=PtJIM)VC_s)C-WX4Fyslzw1Ci<(&UGDvlt1E#f~7+o^V-+R5Y9^HYA zf{R8^Fy)5KFZF zq%G^2smR)4P|a9yK0oWve(!YOpKDQM2K~8c0p`ly71|BXM)R`|;mENh|D>pDFCKCQ ztz9!edAntK%A-jWnj|>gtQptk}5OVoG*Fi|mHW2y+F;leFzM%oE{%Z6Q? zv!y&rXP*$Yy`dwe@G*FSyA^o~wv~cy*ETFkb9u@!GOv8?A!)2PuB zb#BywzAdfitGAOb^fTT{MO53F3?W7faAv#hbT3bG? zuNatI-lZ|4Ii4!mKPpAGCKwZS+y8Rq6V3!;;kq=-!sbtz-OZmitd^a)$Y+}5IXoFu z>p>My`yHFgifj+J0lhan`71NjW#MpL25vZJ3D%qDwx8c3nF?#cNksDnr&^Jx4<-1I zHUi$?AaVDII)1+{VP3Fvv2-mao&!cY7eO% zBYR{8Y7JGf z)_Qmu1VoucDqe!9iE{Tq_aHMToF`NORbf0FCCf!gY$7-_;gGoFI&fyN3Sw&m9 zUVawZ(6pE%9HOB-IOx|*8EN0J#Hpo*Wu18E1=!+O<-E&{UtTLoW3&jw zv^-XN76Quasj~80o5ordwA$}3yl&?!cj)#<>QaintK=OeBH$}-jq-yWh6rtXM)KD7 z_!CHXA3Bn{-)bs)fawsA7i=L}7Lg6~SQ{}Ei*4{=ULbRAqRf!;!yxkFKot-!+gai-|x2K z?yp?{8_kBj58@D=&oxoE7c%y36_GGl6#S zx6Y_$s_BS)4LPRo>@?PtP?^+f^+PoW*^G$ z=$yM3u4-y;vkxbiIk&`f{-m%9%+TTb5}oIlm>@cZVpqmk1{C^!Y z1Tt7^pBV!(LUTY0BB$Q->j{%-0g$w%4ajvtFDIOZ+d*Z6JQoE(Nxwf;ZGv8MRVEan z8j>=x4D|f#p@wg7K~^GGEW3tAZmi_iX-^>;>)hHDl4e~L!cdgdJB3{W8{xAvRWRho z$Zb-BtYp2j{k##>!2TiH;Kbmle4NA1SI4?w<+>I>775Xv%pDG1(E2H1CDcFf_B`t7>*tM3S@^BV!%o9SDWLOhfED_eIVA0gtx*?ptgs`mN3mW-_~ znGZhU@v}!CHN6jzWr!70D*bUue#!S%g#oo! zWh!+bFw}?4b+QA>7W&-At-O04x`(S{!bRmWcNC!V#Zb{LD;{R3tENG{3fV7RZ-#x) zcAb&fftIA$YQ3pAX??rIufz%v^t2_BSgE3SAG5N3u}m16jgs}Ar68BH-2}52yI7|w z3(VW7cdZJAKKrt&-_jc^Qpv ztZl>v@OpG;3fbExV(7k?;FMWSaJqq!-{1U4k=X4KDugTUk1SE^NO?CK5;OEJDf8;%&DaG+-Qpy1&HJ_B}91H$(k-*S>z;A39vfQF4^1JY=)GMj?w zaK*U8;ob0M?_5I3Q6P(RaMyW66U9bkwZ@b7<66K7NtW3c)g*7GmSG?b%49#152)6t zZV*#Ba107IoRs~lrz$=TvGz@qWP%6A!3ZK=XglBs8?JCj0-F^K~*%Q-{-MEU6;ac$Q-F1Gc`=^zUK2U*a9yp=Au=3 z=(tApZ?%==(KHxM?UH0SvAU2QHrk&W66#d@T~i=>z8}eV+ujZs=szW*P(GFTzYuwh zy|j=21opc7f=)LX38)|?hKESU4k3_wgMjTd*;}A?y|uIS2ZHPs+$Gwn8P_2l{C@>8 z$7JtLA1Y-^;{SR9E`vL&IHJA+0EmOWlrNPBt@KM!A0IXiEtT?a-=0TY>lsz}nIUQ6 zlKfVWXRa+b93o(y)0a@##1TJx{)kZ*9fX%hggqr^=3WG!i*so2C5*L%0Ui=lfAOeK zD_0AT3%w#!r^}$QA>=P1SA<8ZDYd(T`e)UWN^KvNF2xYhB$1wXQaHn3yLvN~BOpx| zwH{aMfzGnr7OpO(3Qm_8@#FC#2qFH}k_R|7rzEH~Kq!~Gs~Gfln816#v6l@8{Mc9R zEY7H0YOjIcK=XWvL{E1P+_@gRkIco)r6HtVDAMzfVO~qFt0$CCEynjRGrmffjvbop zFwvS_W?RErHf(*~6omO?Wz{Y<%oE(BKX{pz_LAKIZklN|Iw?F&#}X*K8PAGnh8K?+g+apBaT3Dsl$%s1YVK{N6U% zhuW2)qM8lN2T;e}whkl~6PnO4KZD-ow?CRy>wxaq*LsRU$Nzo42?!xR8U=X+{@qob zoM78k-9v*)+Cm=PdcTh8T5xHye5755?crtTnr;ekMczH>vPT>#CNX!gVp5nAtB0le z9;}VmA(JAJj9S%!?$Z2o_HalybJw}l+`A7Odee{-RyXEt7 z;aR6cc<0^x!h2ClLnc)<8l{w*w|z;`Thop_TnLqbBWesKS3~!CZGlkUejb||&dZD; zwm&=5vUa3=ymsR=(ns7}NI z`eJrZJD-DE?qY_d9$As*Y5aLv_1v&%iBj1)Ys($ttzm-wj7fB!vB$I`2e+I%SZbky z2}aFMfThCk42vy-u}e)`6Eckz>cqz+RH%W8ya+#{tXPcR zfn><2;2jBhznMN0Mc}~!`U_!okX~@MNEMC1q_#>u`fT$_O7VAMb$uTiK7V41RCekP zoAXrOf_ck)I=uwm-GniNfBe})4=6Ophs)wS_)Mvk0A`#ki{r_tt@-aWjT8c*d_)r>DokuNA*X+J@Bstv?g+u zcnPEB4c?%fp1TiXTmTMNoR!ZwD^1?D%BWOY5&!4{m|MO3KVopa<31F-$G6D!7Uq$N z|Ik(iMK+me`|s5PQY5rwNTyDIZGBL*dAZ9(6P;(mdfQugh|7B{Rhj0UB$lilEJT93 z>sHuxOqi4)VjJ|T(qeUGh|Gi)qhsQ+1S5hsYh%Sgj}Whb*-iog8aa^4bQF(5NHl0iIoJaAY!+d15jf!oCp7d2 z`$&{$dFiW2T&jNWC&`@*+z=r>q$QYXzd+Y`FJA}?0(NmE!T&h-l|+&CzF+gq_wf+@ zxiGa9hG{Es%h_gewTy|dL=wUt$42S8PBIXt-enE$PG}0MeBFwq)_h)~P+Ff%OmJB? zwS!sQg)f4Ym1u1meZOn8fXV|xA;Rd5t=RYlR6{srC4133Mu}iZLgLR4=O2)g=n;0O zB|V1i<-=FW_mnM;OkwGwy|Ec%?usGxMUTLVv%G5a8}Q@Le>~7z3O_Jz#iequsmgi= zkUOdbj{GnU=%M9Fgu~iDCvo`HEsd?1_AgyN;R*8av!&6CNS)I|&>)`GcINY13gKc`Dbj=w*AhJZ7sK7pRl;J&vd#jR zBPe{9_>OWfCbPTB+`czs`c7dK>jiBAPH!l>$7*qE|sXOX=LbcIxrKW#B=6z*GvD z1Alj-s>ZpodxA>t)Td#j%nr|QU^4iagsz|{QuZ*fK3dR6a)R>vzISyorI z0(?u0Fx5-wkh*(LzXJ8~tgOsXWiXgglnh2nn_&#|l4nI~dhg=fquRb9 zsQ(RSUHRF8?c#xKq>iu8dt=Oe-?6%3-m{{IWCz(N6BD5Y!)?AAd;OD4=6S1^PBtg&{v{izmA4~LaEiY}25L7hBSjsWl+JEXZBg{*WY0_9-^KvE_>8u_1 zlv;*O=+(4 zLRgWDci7h3M_W(#KQdgJR{d@uBt)Kyu(ABz;Q^(?{RvV+aX8liI=FKb3;(O9!w^wP z1_>}r`LIvGA6cYa^MC@+Pw(4v&{`(AnIaVEmQwAK3=Na`rs(9%SI(kv_i;s8bDV7LY;4MyO5J( zQqaP#6Tk?n%%d-{mZtDcvei~0VN>6t30FigvqO?K8zZNTCMa-a-#4jukpu@nH907W z3p6&ZG05=Fc%Z-{)XA=lB>VF5*?viz$si|O$McBkvDVogc5c+JFMhmbM$UE6R&JwIl?lR-C^X67eqiv_3K@^ z^*MnSapd$X9@{e)$K+H=kOMsCi~EV`Alz8Q_l2YKOzJ7hZA;!i0@>;^*N37-1nw%0 z01>=UBib`J>PBABSz4D(*G3kq_#E&hs9;vkL>C-_hB%Hz0ED zXJrfUyrD3?A~85nqyCes`)5NJckyhVMr=_93VgKeVON;B2ZcxY`Kyv%{Sh}olyO=e zQzj}UNwKlp%abFG$GC|!qBg~JCi_;^c;#NuR(RSh(t2A7>Sj{jspzO*j%fu>Y-Y`Wh=gQhMUokv^D45u$$ z_zu0gp^&fag+eA*I1}a#t$N(IoCenhHpb@SuVU(*3=)pssrj<6YdW7(6CGkEi7?)_ z(0Wc;%K`~JN>>R9?{;S8!oG*Fou8FaA*KHOWV;4Fs{z+&N}lCQ7qgEy>M`e#sW|Kt zgr(2?=MSpijYcRX-4_MxIK7{V;Q4Bo8^T;ZVQTHI(m?@)lJ@(R=I#sx;mSH+<(^kx zFUQoX-UP+ROtn2*QACYa(JdWyNhHX~JMA#W;OH%>lrS~x$qz_B0$&ptHo2WZa20U^ z!u=8Ilg?2;BZ^^83FqmAatG_?Wk1UB(SyOKJ1R9T>h>?3L6|=W^_)V)yHN1(Cz+A> z!6>(O>2+D6bu* zcL0Z*oSWv03x+MqJ|!sK=yA7TTbtzc#tH-_bR}aHsl486BiwE`al^dF!POz!g;JU}6Pc6%KRA`XQpt>)bjLQh+w_8{G|&AOu3fFlxL&*AgO!d^ zE6zo8>#qn7u0YvSKG&}Cw!aKRbi2My`+hT+9~?8_&_))8RbClS`m_|yVGVqHU2Pi5 z-u+^xs2=g}glG4WLY)MJ62&Y=!Y=p3++(rl0XQ#0y#=afp&9qSvyYE)(;QEBsA=La zGlG^8OcV=>F%VoLrRlo2uC%1mUVGSJC8rM9t#|kE*J(3Pf@Mv*r*Dq0;0(d7=t3$P zvByaonH=Eo;oFN?^llQfy0bhCecB+o(Xq?0h?kru4bY7nIAD(zh932HhpD0?^m8{j z3TC0>HdDueIMNmt7?&CCSBrDhrj^!SjAnDuG*w?o!q}yHVMd(fzc^KA3BHkA_CwYoRNg#`1 zwPVd18?02JK_RJ9XD6hMTJkD+&=FS)d7m9tR(29~EozNG9mONBIVjWdLHsI-X}r_f z*(2q9Fp0f`3q0=bB-(qK)?wwqqpDcHAsRJ8T@J z&DkWOg`&qs=T$z1AeL-Cvh@{c0$Y2e zm=S+)TM8&mz809 z3*)qirgX8*ytY85_$KijMAij*sh|zv1sb8nJ9K2_n{thW=Jiwl=X8lZt$0od>l9*m598C?8xtdiwq;= z)KQYOU(s-Xad(paWB_@_Ihb%R4lwn{_C8bvBXQH_8YNZlZ z=Ah9JLacVrs>Zx9|7hM6Nq-|D{KnA9{ew#pw^HILMy;4AWX$!VVsSzgJYAAA<}!P| zM!@YPDxZz}a|n5Xu6EMe$z}6uD2;AvRhzBF5T9K?P*=Y0BYPq zIG2LniJiY?m2Dx*X?d1W(N6Ar<5d_4#4|9MN)R=TTX2g3VQ7I#aMNfo{%9mcg(!zUwxWI=Hu3b! z24|XNk^`*GA{&Ut@i9b;VXTLM(%=G{8U@ zX^l_nJiXeE9XUAZ;IT3x!_d#oB1s8gtBewQ4~4H{*{PPs%VeKDPMhAKJ#Sep!aKIY zw5~NP{*}F_8OK~IQth;igv*Lr9Drvo8G=-{;B; zz*wl&Xp)GQSttyM1hc+a5s*ehf#{KBd##T@Z){sh~F)DL1fr=eOejob*7? zU2Pho(bpmLryV4|R@tcV=W2}Fn%fol!Pd<;RNZl?eWZ`h?A>>liPbL#&{`;)>CaY7 zg_oPErL?_dK}Rwb`+t*BAN!jgd*2vHX`EW21y(i*ch!{LauV^O^vWz~Z+|8$P;Eu>Ffd?OqZBdR3-N?nb%bB|Jq#cC_}b?$*wrZvv!&s;$+1Mzg6sl&ZiKub z;hvF4VxEUSU7;u1wHS0dDC2h4cZlJArh(ItqR&Q_F#=mJGYidfMBBe{HIjA-#v4I+ zw$$2!B%gB-fa~bBJc!$(&dYA|Yqhk=qtbkZTA}jwz^@mNuS9?J6%gH+UPYuQs_oxW3*NqT zYZ=%1q_8zpj#bxL=B`|^&580d_~L`IQsr}FUJj#NS*QdZprK42Omg4)$NQH8@xLEG z>mUjof8Zz?oS$H{Rp8!Al5Lhz2-V!e#eMn~U}Mk>?s#qg6l1=v1E3O=+#I9*l{&JK zLvc-IfzdLBlcg-9*kFiiiG2?QmqJJtxt=kXHO`YNc5oAAUB!5^F*|bUCtzS0i&-kU zw=DMcI|O+DX7v7suAc7RS21h1i78QsmCnnQZZ-#C`&Z(qfWlWW9(=bV)I49`5f*9z zZ9y6-CrhNv-#MY(R>OJ8x@&5krd;INPApDT$kPYDGPk=1@3=BYVDu}nI zM{o|#f`ymgKM?x>4nQCyBO~sz?OqK)Ob&5K zA4a$hcme7R4lEBw0Rj?AC{KvT;8o8Bpn|JkUJE+!0zC8!Wc35O5BR--<4=&c-?#V^ z@tXnx^o0up60l!u%byMpatdMZ&p!a5gG$R=%$^7i5WMjR1XKX=j|%S`GMGQWm7@P{ zH2?tB!v_F?^&sy9fxs;s8)+cSpWkn0{Er&eGAqU4HPT*9jR7X2|7#@=X&4tq@TLp? zFK@^tV4z3e*RKwTfWa$F&uHDN{Q-N3K#sfrQqqrs0(rO3CRU$>UzlH8Sy>nXz!wO> zD=7Qj50&B30r=1Lm#xSY;@&B!3lK-%$RC&hPJbCZ7a!;x5{zJnrytPw@8bOoN=_a@ zA0Chp&{bay0_JXSL&3CuNY85dqf4k8Fcpa66A}o-x99i8IK(iW2M6rsgZ=vr@ky)v z3M1p(@elJuPZkukoxML!O&q76nvMzp0R;sGG&~{#0O&VE3?AZRY0Pi9atLQ1;Gu6* z5R`U}gT8$!I=zZ(@XAO*|#Fpffu z8S3|Vx%~q@R|rASL)g}}AOZndzlKx#t_S)a0YZj)JRHCem!bSZ5Rl)gFeJ=zP?snI z`Q`5$pdyy%xz0%jgzew7^=uaBFlRgetGV% z?jIUA#x*S10qyjFK;pq{-5?kJ)$v8QdtbCcO||P7Nd~-d6u|~B_nXt=;H+wnZ=t@G zo)FKcv|TTeqvdQ~?A5;G_SZw!cKiuU3lK@>V}Iu$qTRv$+Y)=MzNpu~nxo+LrgWFY zA!8pfA}uLtsaNHtjw^@;-GiAly(qg*#4NPKE&w(D3l2slFkB{G-aaq%p9&7-DJsX| zZ$<~8iY}AVu!PQsVc1V4@xc&5Gn^T>JeR59>IAvG-BUO?Ze(7{#p^26nh262!nYOu zNqia1!UFNx6?1|72FmbkYSm(L#--$T(MDRM^~nEV0wvb9I3Cigx#O||Oh*~^d+kOM zARQBV&R&_^Ck+LvucSQX-=N%6yM=6%Q~y zbsxi;pq_Q?5F8A0>o2E-j|FyK4h@BI3LUHu%kxJ_U^!m$g3a=P^MfW8F2CrMWNBG! z!O(H{CF7@m*xgnXpm@s0kMjD4>ebe4!frY7zbQcA5ohs44YgO>KUv49&hZZ~BxE`h z1>>GmaQ8S^Geh4)>39JF-cnt$w#wp=?DuaHr@{yZg#yG0aN-)7W@%h)rwE9TeN00^ zrnDfa(6p$P!n<6luvW=3J}5K@?M&{@qT;?ZaqT`|xxTkAohuTqH)3Bx9IKf?TWInv zCR|t8;~^69C)>rB)W8qt@r3nl$ACe(BLeKk$Nn);xCcI@UYlH2T(tBFc3~}kqeais zpj8{)Zlm)?4C(wiZ=Q~|wUON3i<(tumsk&BQ}#kC#$g;?pc+eA6brR)7I8!)YYvFz zOKz(XhdZCMdGA+Tz4=GM#{<#utQXa!*@CUONpdJj07atL4+w{Y7pxn7HU~cGZ}TJq z(m=a7aY+9pnF2v423{8OFw<_>^6FC}lZ9H+sijr*#87ILwxTEAPdVe8m*;ORq27Gx z=tTJxF+T2>U_Red_9axGD`NP&3O9(=qI{6ZG*$n_#1dV&Y zs*C!lvII)Y(#Mk?R;xr<=_s3!I_RPL>(H$`JF?8RHD(vwxGIN#(0 zNPKPjs!QK#?Ux=7o-E5r{lD6>0K-pjNYmQhL}WbJgXnBnxnG16b2^1IXiAgr9AIZ9 z-5SbLY^_jI0U}r*{(;wmpM%RDFbN0jPayCehj9% zTN>?i|NT>uISe+|-;8k^RiRbc7Hr$5YN{JkRV+8+c?FI}EtC+<9c$zYq>7r7A2Hc% zG==JHtSGbBcGxq@4ZpdelmA3S3e(Zj~!Y!U-D^L~~n7}#`X zAH;yH02wmtm1#f{+YhNcMAa$6cQhCv8$ZPEBemiR15NK(~AH zw8zIGIO?l$zi8-IHDP*kJSomX4Zl;g;E7Vd2J=LpUg6H1Uxm?iE-HWVRUMy315%#g z7jWl2?@!95RrGe|_sty=p$vHh8gz^Yx41d1F!A=WK4I)4TK)u-gwWnjT|}Tx5Sx1* zib5C0&~ksn`Ymo|vYN3aEu{VG6AVy(TD}Yn8V^yKh^G-c6&N3?ezt_MW*Wfcc8^P> zmIty-C?=3wQ%;%VmXT36_!cH{7+w|zPDkQfMYB`%(|cOltJmt21$|Naum#`G%78-r zy9LA6)wIk0;J#9m$X7y)bUh!AeAn&pNrvaQ`l_!~lE&k zSkzBwSYd8I;FzPh4ZIUdU9@ZlX`;qI57uuvPh?9Gg{BQAo{T zUB>^e7>=^2;x&?H@ogUFw~IQllFAykS@W7bs0BFzf6^j-y{l0)5SYc~X2QO4b6{`nc7$CADkf8BcfL03Kz>O&Llqx- zJTN5k+K_uOMM*ctF*r3q{6VMVER{K}ds8_rX+;6TVrtdFO8u-?gL&mZGb67KIn{^& zoj<9wd(|G|nm-(kXf61yHMq}c^FoEUcn>KpWr!m&BV9MmxjZ~$n3yTW9;+)S+`ljPbU%!#|TQGS&ru$*fvM7x$Ke-^Z zlctOBA$~OV*HGXw;HFtMN3`8XD|!KUv@fJvcvl59`DHuP(2N$UR#WyURHd-;Zvoa$ z&Eiz+gne;y*O{7C5y6j-sh-D!fcZbPksV<5m*Kvh|216w zxg^-`RP)e#%8}9LWWa?_2fw;b+Loigvt4EM&HI8=p14}j3mR%aF4BZY597P)u{c7< zYO&iA2zt7MeB4J8O^pEkpT~seaX-y_*^ZGLe_&RkPC4|5ECjl?mSDk@c?H^yYjaxb zbnT^^gHQNnL~6u(pClFmoT>X6n|HY5*a1>A0U`8pYFP9%>g9Iz&?0RJG`G8}tJjGX zFBEVDqFR0IeNY7l|(yktbr5!bhf zj){6(l#;szZHb1`p{&vqs5TByIFBJj`Kb@g_sU`)W5C^Kv?MA4_Z1(4HSZs_YzD&H%XdpOho>yMi->@PVT& z-DKIn*A)xLo8WYd*FyRXmZmQ{_3YjxGxg))5Zr9JZL4(FOKIYZDM`&mEIq>Ibsp}G zR@K1dJ@r&P34-EB-d3m=Cfp=?Y%!gWvwRco{!cnXq~0qA6P&ImVS1ANL(fgUPc}O2 z7q3z^;WfQFcq`}xT3_wabcFf`(iG^pfUN}?-t@BAvzX>Sr<8bzh(r5w_1QL!@8qHA z%~G?c=V1Zwt#7ImXXhcCA|Me=@u2OMS9NDzQ%`z^USwJs_yXN~r~A?Ds;!9tQIu|l z+FD;);^Wm*b1`2|@pfyOJj4HblNOF?;!ni2JIZsICN!|_DpF|jEve_`u54>y6T$-JwJ=cSAr!Z8qSv&~d73Tszd2C-O z=dus@1I{-*&vZV|ZbwW~C-QdPH+@I%tSkQztvGTzR6pjWIPbtvd&ugJVt@4BiLB*psA@HQfsoO zI0FnQGJdc~!bF=zeP&jfuIXEtc;&yZ3$q*|UHf96UCjy-xnPCWk+*47FwTc=JHz59 znRek2-HaM9MCq6Aw7**ibSsmJHr?Aj*_A>OeDb}oTbfjeDX*2+rA3S8)m#NE_|!Ge zm=0yraHIVRckdumayY_Zo(73)Ysz+pjOb2y2h;9b)}@-`{P+J;$6j;;g-PpLTBWKE z^}p#lNg{Q=&U_~+QHV)>1gQaXjI|dtSA}c_~B|o z)LMpdBhyPU9m$Kz_=2Bi^E!6IF4uVFR{klWbEe#M0UL z%uXZ}J@vbcKkpjsqO@6&zt_D)uQTra__6xwV3EUR}~Y}8C@=$O`o9a3st+f z<#1S2DLbgmb@K5j>vA*;%hGaQS}u-V_33vQdXUP(txL8ivIW_j zrTU`9q)2h`$MnXrS#^=+`Ms@fjt0`1LN7qv=XC3QR{`c%4fCG zAi(O5e&+s8q(scQL3T;={E+MLc(SB|ipk@xQu{FVt{B_?AC#R_j3`mtrQ5b`+wRl0 z&C@nc+qP}nwr$(CZBNf6|0Lh!pJe8$kgA)ysqFo{Ydv|Kc$i~aP)qm4 z`9}O!7v_vDStYsJ^&)x*j~LWWWWod1yFZ-?%(P5-caC~wU>FrTQEEDPHFHIu956i5N>v?V+CDs zErT9ha-l}=3hLv#TwW4ZFCG z7mM`F>C+_^Z~J#K#wx)^0pfCBPJTa)#iSZJK|@%O_wbj3k`I}-Z?KzDv4x~N`yobM zS10~4>`JZXm;Bh{^o6GK3$kwS!l&NhwOK60m_ppxx|s;D3#){Ll+)COC0N}mn4z%Q z^F&*?K+Y6B@mJZZD5zaH(MtJswB@k+*C5f2I#NOQP~F=KDcjTFK1i{TV%27qB^X$~OY#&y=TmMQt;7eFE5uPRZk>4=~$#GxUQ~e2I+=UUE20SKf z0d4^UHCS`F2Q^#R=;6M7RQUox!)337$+CcqkCxYz?~I7eiX2ZOk%N*U1hDLyzs);- z*u&Ojl9Sc?oI90oYe1q$tMOfkh-AT9?!KvswL4P+!w9psQ<4ninvT)BCsJ}=-8F|< z?5u|P+*19aLsE6BbZEp`l9Ysk2>N@e2Cc_x1#qz9xHeI^4&_0;8opyPo_XI@Rtf?> zL{M0P(QO0X>>Qq{G~wFR{1xS3Thh>Zj3ZmEZT;l|-W#lX)?i!n(Vf`bCN>Y*^v!E5 zzk-3>g~Y=m4lmLK&%x)37kw-C+L|+(QnG!IPIhWIO{RPPjz)~Q=T%`a=Sh@dxuBKK z=4qp2(p){5(pi4GJIAtws4=iu zNiY_mUMTx!>&Z&a^>V_aBRb@%zootVvO;Ll9x-CcHNCX{VCJLkb?N3O(--AS)ORDY z`nXu_lwIm%1f7yNfuvK<+$Saw)|7#o6A=!#g0Xr?Z4HGwz731fXVJUC@|zV2!%P%a zkA`oHwyNb!8AaCf(dVG9D)S`qfM_hJoGk`qchrNh(?EH^Ha$ zHE@Id=M2+SaxfRTMrxGxgFXiF2zk3qNqH`x5-53TJgr7`G1r(&RJ&^rwml25%~|2A zp1vpP&YkM%ES@FfTE!$16wot{WfFzpLJBfdl7z}6md$F8WaW4NW#*2*Sc_T`x#`bX zZ050U;mn+AzMJH6XTQ33tLa8+rqqanqWy{6;N_TUGfX?UV2)#&@P|aJ)Ui)Pbu+Oc z=rNsOL?Ap#A@N!nQ7%5Aa2GnJYI9x?yT0hP&bT{AoxbTod0!+6;rGTAQ~U$vY=A0H z#&$FKT+m^Us&E6pVCQ-T&6v{8!{&@v@D!crV>}JhuJ^)$aaW=juYvSUIEx{-wDQ*P z{%nlJdSZoEqZ{$hKr8EdGYO$iz|Dp@?ziXaZ|29`J1~c-4pel)m}M4wFgxxW=OjK= z9;dU|Nq4;UQ`-5ThL|M2O)utl-?x4q7c#*XcLbwZ4O@Kb#|lxmd@ApnQ?^r znYN)D69&h&&fE6*G|kd6V0tCYOd|WStseOhiUtn-k|Dwx8|shbH@5{k%^%dOHH9p~ z#KvB?U7T29p^Jzsi2%_~oKCdSak1jF5A~2v1~$(J_b-1%H}DKoTq`0L8KNo~Q1Szk z6WBQb*fdYN#3Hq!w~KEZoc(5{;HDc+(s&g z=8L45hX-V%Fc0qe4ueAT<3)|AlN6g6XV*g!J>qj}s=Bk!w)&MDg9cT|N?(X>QQYb& zHPI_rU&_0}jXDzzbK+t1^_fYnYHEOCixKXbEowQQE50*V;tzI?OxMKZ2m4VE>v4}3 zp1Wbs=(90ZX6v>VbZO(g)aUp<75cM`wfnhk)v=SZm$mlTMcwQ^&(2N*E)n%W2X*DDjvU{rtD< zvi!1tTTXi{?FkK~SLltCpX|6Vj>}BG2z_%deb_%zCY)RZ(CCd={y))A#RMyzv~qKaGTco8^`g$K0t}jzsX<({8Q8t zBM7SUjd7aW&=Xz2bNd^9FV{jUSQr6Y&JpZ?+zH?8FSAwr15T|{)c4;IKc@c=;>YkG z#E*6o%cQ>6esI}TnH zq|&Qv7!*X{aPjeQ@i8c9V3D98<8M&mJYzo#1b7gbQg9%Pf>>J^UnSI|^C;0(Zauq= z&kN*H;3LrR(NS^8Pq^RC>E1aA3J}P}06pAYx>npg1k+qbsBmAyuAex6v2DNzrzBLQ z`^QHR0c{2Xly#jrHGtEwUM_&BT?~$OU^w7!3k+BIeW0HOOo(1c7CU~u@6{*;XLUIU zEVx0;OhkDOq<{!pvHo2%f`1OVb;)${JE+m0z{_922ms%_*trnmhdXEAqCb*>{NHfF z{CO1UY7q0-evRPkguhG8ua0(tN_15K2!VcYK*DJZ6gs?pKmiURD?8tvBq3jyBv?NL z?Es&R0sc7%H5#x$0gfLFg;O=GP09*l{c;L)bPl8lz|V3ySifK)yOC?!N7H&)@kH|Z zmz#b#zreLG3!vj`Dx+}W*0zDB#cx4dvR)snXImP1L3L3@MQtz$7vTP`TsXm=`3%>N zejjcT?=(By*N^smZ9nH$vfXb8ZjNiv+aZ3QU4EK%-0Pc<9^4;oKw)8D1`rhJ#-I)2 zSl=I`k+eN$pXJuQ0z@7DT83y3BK|$ynVz0XyGQ@L*y-&bzn|=y*x>TKuxO^Aoamn` zB?SRDKrhgtKtF#bBqV-BM03d8#6+0cpB!@-g3umgh#zuQX!8)D@y{|e7wMn!)dPA! z*YEHE^w$~f7kQ55em|Zc!nUN^Kn9c-;`<+&ryar{_TZoK`yZk=A9Tl+t+lt7jFXn1 zA6p@9gz0S`K#uwLUjgu}(4LX+r%fs6XN=j?E<<3)gpWs4EkY(jV%#$a6LffJK;Y2N zDq?B}1U<(jfL{>TXWk@!#~FPYCvmJTFbde+#lJogNa&vw_!9b3O=UDvk_(2=o| zoR4ziJc?ENtT0J&AVM>iL2QD^f&_|x+ucMQ0{y!>J!vo~K!ghF+7JjU1$?t`L13R) z)Yovx{zS9#rW}tegS9=KJ-A5u{De4&ggw~r#-Ah2@-8e`+nfpc$xpA>#ORu(heJ>n!JzzABq7Yno&v!odn0^}EQO=2xT#zB;hF z0)xRz2_N*cFqb647Rr<2Sm6OI>~H|1S$K7Zh1MVK#%g(Uz4ov{EQ{Xd&f@7Ut!d#+taEe;Q4Uv- zE!q|n5Tzq?6)j3T{#+|gD)8*fw1)nLnzP@KpX!y!4bR(Y@fg zV9&_vhX7p?jS(7O1aw(i7aYlH*%f@%g+d9nMfA25 z!E|TfeQ>YGQ`=L1mY4_X@eM1zWNq&@TggAj(y)WPl!qXlVwF&#QAVzCe5G^L8&QzcBz%&}&9Qk(1M?T4at0gw5 zZ4|cw5!~7n^X_*B_rdlP255z(ngljGhi+~{agAAcR}WnkJLph-2!FQS3_z0~v4WQb<%)DR{Lg8V|;swB{38N;XUVR0Wm~slF)qIlR ztF51K15N^T%V61F2Cgal109m4b}6~lFhx3yB4=NmJb(~t_&2OXKlYdit~jt4|3MJH zD}y?Qra2HM9-P;J*-!AM)IvY`+xD?}7qY0U4Wt9cCFKUa;jripAHTqI1&qu6qI7Rp z3>6V`XBy_)rouyeTXZ!fUS*9Z8w*#Y0M>0tVz<;eu!pPIVPl4{N;+)d115A5_$|%S zN=uzq=90GNtU`7fV3C3;<$G>A;p?g&3;bSOkPN~)u0eXMVS!H@M<4r1@G6lqA$%OK zi|6H5xTu+fJ-C7mhL2xo$FZ2eA#9vTyrRKqWa5&r4i2U7;-&_PHoV}cdIt5)j& z(8#B_JnB&mVohvZ|AzAf#2BdehRtr{tf6JN!te&t7JNO@E!XrlksZr3UPP7SHm+H} z|8794Tob9@|Jc<5kZpjH{{Xf6XHc0%5|MSG z?3*;IdRorhHIIw)!3v2^vZJ}ui;kirQhjnz`Q7-=TB)bYrfc*xfMaGEibitjY_AU7 zoKm@OntPiq^J{rrbVZQ}#Crs^P+(GPNUj9wuMu{gy>>PSt|SCvSh~oJr=&+i`t9JsPE9?PRgt*kB7wxaAYW8M zPQo$%^Os8Rv0Bu)GkFRU+aM~m$Vf>GWL%-6_U6*W+4g|dIJE-kw5pR)yG1k;?2C2+ zD3R3}-S89}mk$@5_Dfq6LXMdW!l0~t)BUP`DCzdzIOZuPl&H{6{B}ym>{~GpMC}1q zqWNK>Y{U}WLMaKnq+cXtp@0=_W2ThDz4*hF+QE786ouC`a@yY=n3kBp#@?)3!iBaO zTTjx&*e%9jExSUhJH^Q7ce!P-^La`UGT@_nFAY7QEO6x_cT!Iramx0KX*7w zxc|*^^cJlL2Q;(K&CXt4aGIC~_=ZDXbaa=9`i?vJBuOnlR442-hux5%sj*xWne4vt z`hmplv~Xoqa}=gm&3M)#NigZx?(hY2!s2|Nd^kp)=8^!px0b#Kvk4xI!j5ZP$;Go( zoXwk2;K^oR>$nT{A`NAz6KrHPT<&X9`j5Uo?jFm@KD2X61LX(lsgkL(mPw}hp!?%( zbw1+!sHMj1{HC69hOKT6)jlRY4AR?01OvYr63Qr28^UTuGX#veNVNMzgG3qSj-hpm z^jy0ar|UqHyj;XZ>V!uv$YWWFxG;qAx9FTpgZlT2lIeqZwktGOT)g_3It^gwtR|!B zq*=@|p3e_7ywy|6z-@mnVo_&xO`S;Dll4xJ8x&(!go_Elfb_#7fncq^hUBq3>M}au5lJpPwuNA z2BG~qGM6rVsJPkoRLEZ$){U|N_wo-JNcRyhkDh!WQPIT9_MA(?^vM+-)7vY4E!V3E-9l1jE82kFyr|J?_5@lz&Iy1wDxDg0+1wqHN_{EiG?Hx)qEP2?= zfP;~d!QpN5@2kUAZ54q>M7`nr0+Fr4X3tbwaYHB_RR%!6-%oq#c3|Q%{NC2d4Culf zOXTfjBpxi0&jfCJ;k-`mSd!U<2aC-{Y^ABI=uf>%iG!L-h!dRJztr1Eit>^iUy>AW z4(+It4D<}@6_L7tHf(dW4eJ9W6xA9G>f|xp$7aPv0zfnwQ6m@#JHV zu{P$9@CKrU1pke1se%i{?Er~s=*iO5{zBom8b(-JS54@*1}Mzn(!9J-MLzYgcZm_X z>s68NLVL@P5a4gg8Q8!uB%ldt!Rq*87rnT9#;q(y#nAS;HNDi#k@OI#6^ZkZYR(+Qr5PST<}U;1FOlGqmY zF@H`G82JDsk57a9P4mf+Mq$V7RXpE09f_s3K3Sy;7B11_yi9%RVM{7La+YM4kJ!O^ zWKor~2}@pPhrX=S?wr&EapKk84X7SYFE78_Wx~i=Nyu&_4eJU8d zt$v^uPRg#Y>v_45QCIGA0Okl#hBw3|{RN`z9d*TY-wWd%{?3dyn|e3B8M;*Pn+za& zK57vtz3S z!)qa>Q08-N`WqlG_x|DLfZ)-xSKC<53yuR%dzQHIfHayZIMl6;_PodhmgdvjR6MPy zHw!Fc1=b|9u=X7>wC-&5q?Yo^c~!{n%mzDBV3eTi21OZ%55yInk5Y$%>xj>4{Hf-a zpMguc$92H2$=J#>&+qCRcS-~V)nh9Zn##V56&?fN=tp4gYh&St8>)lY+T@%oqa+>iH&4gA43yo z`8GK>d*E;E%Tl*Yj36hn>R^{AJkj3*qYF+iuuH%)&ndl|k>tfH0x}DZmk!W@wmDQH zU>>q{(L=E!17AZ21Pk2X-zvbHZMl*fP??0hvoHYVd~Mo3A-)(G{OcFXO>H|~pLT~v zQ2l1~O(7ll{#dX25Lh_cvE3rfu~36+bD*l8*YfQn9$#RE8PRB3PEhlN!SV9d_NPW{ z1g3T;WVOc;h$rOX07SvvRLXwck&30=A-2Kk&+`%)zQ%rIB_5kIq{>lx(H3U9^z}>+ z46{Fw5fb!t6wscS4+xExu0XXB;rv57_M$#&#Yf!?mr4T7+lEPBUGr{Jmkn}> zDKbJbY5Ma%c}|=D#c?AmszZ-m*LSZ({&>a%J*f_i2*-;wF(W%75h&J18qYFmCL?#% zV4TaarwPqXXgO1_E1F%otb)@qlAiQ|y~btJp{x7EdJ9m1JW~dnpzrK(D7lD~z^P$1 zrV?BB&N!q{U<|hx%6lhzD47t%lltD}pBEKT_wmg=gr)>r^_-$q3xFf(fZnW{p{&EY zWcP?d(ctyvrBpqsFM zgIwr-u~+W1bQ=q+2YTmyXJ^*6;{FsSIi#1qxmD@_mNXP_zRV6k$tQdgwqg63+M_Y9 z+K{c1H(zNm$OT7QAZO|eRx@eV3VVp>ei$;V+m!99yzwNQU?UF}yKb^2Rf)C4n?|1c zeD*9{HWy3$Iv`U*P$JfNR5>PSC2WZ-A*Lq$EA|vOG@NZQ+$m zFWTA2M_M^n@|=2TdAH3SOVS3<%{UxqW0ag!Vl&x8#8#X3x8ZG(_>b$#2w!)2s0B}t z=7V|o+DBJJFKVYDk=09;O92pps`ts6Rl!`h96y5Os4y3UHEsXxJxcW{ut}5qJ%g9R z`j~JN1?6qYRDqzpnuGOq-^_pkODPssUtKy9CJ zrc~SeXAVylCDv(~X?2HM5+8jT@;LA)HRzsJ^2<~ebp?#gwwEq@XkR@_B?bIj!t9fm zjZ89SEN?n_N8nV7O4hFMg5AGn=BBbkN~kN4s4k`jMw9#ePCg?|B_C^$+zrRGHCCaI zwIA3(6W#~h^MPX7ckYmuM=>DsDCBlBK5^2T^-eXfg`SZHAoz=^TH}#ogw733>aKyS zc=SQo5iaDi2sh`Jv*Ho)u%KSY4adyZEo2w+yPkP>D|X5F#LKKxP2DnLap!n9$B?O> zw1hHEDBGfQeo;)K=n)2Y;aEQ0%bO}&i)#7q-D%smtH4+1ZIq)sf{riAXBu9$j1ML0 zupl%traWQ-S)eJy&OSO2);5{Sc6G*&aN|~8eckpi$)@jsQ5NW$SUY(4Fr#RL`7TG_ z4*SB{we)>G2^Uh3sN8_n(`l{at&N5JQZ<_9GfH@Esgsz(RszTx!Yy=ydurFbQK|aP zKSq|zcg5Eo$^TM1hUM0xNo?e^TZ#De?)}?8epTU2*>Yc^4cE#2pc0S4rcCC!eVfPY zDq2U!Tki%2osk!kgfCjF^4+XX^gG$lp_tgI^wKvIshrM}3cl!zFR2TbW7NO|^ER}y zTsebLI`r+7GD52exwJi38+P^$Vl*$${$rdOgER6JLyyV#E*2fw$4`IN7{RvPB!;e< z9TJJ(4yc|x4<{wKgmU#Xs*)5e2UaCHuVZae8Jj(y&m~J}YtZj&vR7tg&-h+n{cNTW zP%Hm=y=@JDZ2LFm)^uSjz>< z84p>*r;bMO_*AWc?UCqsd$~Q2sHddhT1IVki5BNb?=1H`0i4C7sNl}a9!jd0^R$_V z^z;ZV&*L*jz3a#g_f<24cNF{IU}5VjYyEIQNNsVs5TiKQ5szG%K}iM^bc{3PWS8s{ ziIFn~C0gXR`YfJ~U?hc~r)y2xa;*iRwN)t0VsYuVHSP2pqSYg-^WpS{J)0YL*xXma) zoR=iMxtqK|nsV(oyk_B#)id(~yGZP59ib4cV?bz=;C?~_r)-0JRCO79O(CA%>WS^x zge$kO05kmrRq?zrS8Zz*iLJZUHlM82CMettRn0|5?zH38_c>}(>FX54v)4eg#=F-A zEhZICo2TWq67)1*QmM_z0}^hD!m=BL7$9&k-Z2F(f~9vv^3_aqwBJ z{83}c6^>>iRc!K=4b;B7Z z$z%UgMziG)yF{cikGtl*{-LW_s_M#Q zv!mWGa_f#Y*yU{o8z_wGP)T0w1b;PBk$_#{ceogZQjg<2piim=1dR@IzMuWlcgb=8 zIbYj|>0z;}1Z;#*?y!Gd=1vEhnglc&gjyTyBzw)<_6ucgYiJ+Y1)G$r#J)e}Y9&!* zSc^jT>n%V|_mR>z*qv6=hIy$ZVn0)wAMm|QQ;8;>JII#!iaSJLXRTZC8*Z+_GR1$~ zE%N$Y7Y4U|7bS;(n^$3P#V`?Ry6TV+6+kp;7EY0hZ6m+6T2^!1RWLNdQDOe@dqcFw zcFT2CQ)Te&z&B^|a8}^Cjr9THpDblq1q^zrN&XL@Qbv*de}nw~C$IZ|3;A)dviuk1 zV2K9CpV4dP5sdY+n41>PGL_A44cTv%IC!(II)1%I{>J)duX`3ZKwxY z)zEnR9+}+^!za++2V(-KU;;k8?Bh2Dp|bM;mm`lYN}AYU{CR-PWi0@%y}P?(_&$R} zXaeT)o0RSa5J5(#&fVGwGa+jRVD(@|m>qiP5uAn6Y-{DXZK!SS=wQxRY%9$cFGkb7@BiE8c zSV7hTtN{T^Dnd=t;n76`AlJU(2a`2VKihnh{UamLLSFb488Wj2iied0NN+=ZcJP=o zlg9=`FgGxdKF1(wd_z1e8%xx-wRUCUB3$lA-imo65s>GuX>IUoe79?0*LHEQe%RCk z_)XD%Q}k~x`H29-*gJxbOMY74lL@|xnLs)M+Sb+A-`6_;`$hxghNPiR;O{N*;`;hf zt$ic(sO(-`>D>U-x>5t3Le~Rye+k_>v)KUyX=iHtb#MQuezFtR)&Wimn9uP7gup86Fsd-gbJue!E~8BhT)b*M1*RLpRrg-+m=sX=Qwq8@BX- z`##?sd&8t4fvA_$;b-LTBa z*yPOAC%X2r4Jf0)X4NqEj1BLWM&I$4pLG_^1PtdsN%QZ$#mjGj z=9b0Y|8@HNwmHWq^v<-?J9>B4#V5U|W&OnOaR-<}NIt%r0uijV6k5M27lGVLb0%Nqj02}5uGwvJCb`>sR`wqR zz43AI{OY7w|2kMX`H+buevV%u!?wmz#U6moX`K1wkb1geKr80M^eScu#EZ^E)n4-% zb8$Mua`2s^wVO^?3E8X>HaWA!#HDvWVS+CNEz6*`>CwpQn`6OaCKiwt_6Z(1I6C6@ zhdD!-x-3PhK+O7@qm(-Hq-3(Y4^lth{Zr+Wm}Tjq*A|Hg)R-jd>Kb}U;FIL`O6b&+ zlEJ_23dJDRv_Kv9f+2?OxP*r!Zh6jAOtxBCjx|wvJG9ezHM?bl?m`KdcOoo2M>D}* zrh?j*F7HTW?j;a>qpYHCr*}*L=havkw)@)NM zl451^Ym2{7HTx(oSF{9Y@kCmna-yROdl?qi-i614@$*gqnE51QwL%9Pb47XQV1<8F z&_MPIn$rQ}yF7=` z+DiLM#Kxe>J*mwDg>w)t3iBYl`Bxt@egtS_3|(p(Z>f;-47!XFPCf5xnW+;FBC zjj4xj(?kHtO5Xj+telhF;P@R2BsS0|s?!472VaCF%DJ>y`mf`#358Wxgj`K@!ERL)5Pi?V_fogDkHB zL&(p$Ms7@UsRQ&IX$(P&iMLbOPzlYKpZ|VEYl_11FJ~oE&Gxl!Vg~4rY(~VE-T*Dr zD$(Zq7K0V^x`(^F?NyTjwO&zGJ7nXHc*O)*6Zk)aJliYIVR&pYt-P6lmNuiFg-F7@u@X|6WTN`{Qu4QJ89ZF*G zFqvNW0bX$@8wqF<%E1mODWEvG&RgoIdtVRUVYkP>k^#e}9g%c!c}_Bqu-5te0)yK{ ze5A~bz9H+<*zt8PlP7Twx~v^nd7K1!tFGK{b501*40aWhbal&_lG8fUkmSR0#ft zv8E#?-V>~nOE9KTj%89+A9c3(;UUfHH+S|2QFrl;8*&J>@Q7$$3C1b2D`N5MlylxBRgmtlHwn4(nZ zMRB1f`-h@R-h?X6#2~H9FpI$&`@##I!uv*Hcu(wJ=x(~E(INPq5NC{VK;-*cg%xv? zJ%GmhQh!@x3Gur3P`}6X%kE%s=G^bDU~9JfT=71bzXK7jXS+Gk#TqPNk(ABNcaxUJ z6C=5m0aFFPdsgQAU;-$9U-ugn(*gviOHXD#A(-OY4hc)o^1M2&Q3A5Ue&Dk#evgCK zEH#sYRwuz__v30CIlvV*U8M{o4o z!;k}Yr;y}lW#<_wR;g^gkQtrPngj+CV<=W4R^7%(Le(*7{E9;ac$S3O5rx$B7jz|_ zSeZ)SE8&DRvvfk|Fq1?%e)K+jKZ3Pt-=w>D&;%sTmn-Dv8{^BrOx@tiHObHt9?P8| z%6EBV|NfOkiBl51l2gh+*T>%!jhlij)F8uttBikgVQZyOspp z8$F^_b#EU77)0F88T89)Zd;50l|@87$5Snr){bc;j)wDKgy-wT9gpG-D=nXn zRA*P?1SKQ5fDizr<^CfVf=Z7c_W-+5_DQLc397TIjpE}SsGkvgxpy)`~N_VN0H||gu;2x@+70y6|YGDWLX+JZE9~Ryjyc7CU|nBSMBOIS31+P?C4wcICN)^}Sq{hEL}hrn zA3yC{ZophRp`XBvL4w@cZWl`6CAKgMs&M%mwVRNNAzp0CGpw0hnwN>~kL#CUmblG; zbTK>rwMha54UPL=QxU#nbLv73P83cU$JgneM)Ocq8>zAA${E&ma{YaW`b(*L0k4!N zS-t#fI9oVE|NXK{=({xC-=+0#VJd_^Iu9`Yox!%XVlc(m;l+!;DNl;V7y<1{S(U0& zVOZf_eH~*RjbL0x7Hi;soPi80RjoDDeNb)C7!MaKx6(aC*#=`Cy85ruCM-pbVs2(! zKBv~+Z~aGuS{*E$g^voQ{OJLD%_?gwOT}sak0L19NzzPEVgaBt#5j^Y3CAUmzBJ^I zg6BwbCL~-UT6o}FM&=0c+!nhRDVclqCFR!}#Z0bbcDuU>9nTFukNmy)s_}1xZ0D8i z4Hvz*$l?6?8bW1fUTcVg(2|{HWi|`iL`)-qnh#R_{BpIX>mJh)R8xuiF6?9v*GBtX zqa|ImBsfYGd&cbw#|Nvk-kLnbhfTNYX*%x-%GbPc z37jbFTgJQ>^{mcVhSjY1G-2GPG{ezHd?va+bxMM6y}J0_YV@qC8X_pjD} zwJm{2V-}^$Lbo|_uPHp1wbgqCV`} z@Z9%D|CXR8>cvDgLnko@89KS-ur`>N?O4#Lr7q8={uL}7N0(MOKowc)F71Q-HknA1 zJVrswP}*K3W7xL9pTuzKo~tmKOR|=Ed{7d34l!W!7jqvJBW#h(zP!0%J3lmN|A|YE zam+dDmtRR@fQVi^SVPLqSODhNU>tT#ulsI)Aw^7~9wKQSBfj{P*8KztIZi;5W>kDN zwvij6{y4Wk6)yrOA6Ug@&K+&0J3SDe=tskxd_X}T$_P1b~5Uyo=wGN0^kPxmDeD3zX6wbyHJj2jPR!(WdP z3>pq3o!6j-rtiK7P+qSlL%CAYwmq?^fVV1Z7tz1BLkxkj8ZG%oPy8&+&BRvNxis+j z;=PSSUxS)O9RycK?c`b^NK6$$`k@8!>8s_f85BasBbnPZgWp58vW++eEEYiQ71onS zeMM98kc+LJxn-=aQi96_Qz;rSB;4CJ!m@E#B$0x2_5OOCFB7Fmo5AW1=u!iuFJl)d zy|FDdS;aG^cG!*N^2qU}@lWbp2QN+!PPIUEPOsI4%{bl<2k8(SWXdZdUQOntV;rUT z4yF2)NiWQd21c3Y1}BAT=o!I_sWi*m22b=vtAx-+{+M|8ODS z{;=g*Uxo{O-trHqU7y6r?cgcK!EIxXKf;$LTmk%<%u7I4n!QLgckmo( z_}L_%500@^pJi>rQwVKkPEB9;_U=n5Ld4##x*n^kfUo*Ad`TnvAxPGIaTZU>GV)u0 z*8%J1V~tSO!QVy}nF94aiD$#|kTdN%~X5*TP;-TxTzAtvY7^N@yV`Rs7MGshK~E zmnrYL5QK!Y>(#i(>?ummDt$bcJF5FM{wjY}#9FLK?^^mtS&Tni?CIWj&AnJZB9aKG zClzRjss1_XMN&^=O-MO1!9L8^S$Ql!bGomi=-+D|8$P>ftbi@vcj((-^c!t2%xLmQ z)B08le*ZZyNLt&T?oWdVZ2JmZ+FVENtoG~vV{-tVfhQJKb zHwvu-O3AHZU?^!}b@AGfNq@S#3HCHnC+E!1?BwGXy=pRrooipX@ev2~{D4s^4U&p2 zm;g?nwe13j_un1Yrnk1Ln{>vQIYWzQ#YS|LRWEO{p5_S7^5yo<(L-06mZ-?5IE2dw zcVB32+_=908`Z|kDpUCvc1)ZKa~sqp_nE9p2uZhVT%?W2<(L!d5%dZh2lXsdgA~Ms?s<<$ zgu=7a4fqPZSlr1;>H^l8(zTIfq6t5Z&|B|;NlRE4y(CjuNcORZfcjRGOc@OcHEkG@ z0R-=m2m96DY-l`r%^`oCXPw4plb&x=yw_k_T-ZGFRO0ZHl+_}*VvJ*F2CwOx`kaId z-t74`s@0-}CurU%6DCoO#$#zwox((h8CY5ojG6*rQ+DUGZ2C8wqgPX`VlOt`@jJsk z7L?u?4%BgBv?&Hs*ZJfk#4gK?Xx~%P-ej}kul$}AVUj8Wh9WM^OV966YJy7>3;*;$ zSmg80X0NG^e@C*oqG#a}DQh!WiD=OQ;^ax+-Zm+p*$JCf(mV%^Wwedf-6CDe!+3zgIW{dRL7T^+fKh z_Thk34IcA;onXlY1AhE5(=K$PfD$IS9FEcCe1gkFqY%ZjUY>%Gq|qXr5Dj5u={*SZiZZ`^QJjgS=3aB?}xmi>iDJ_ERkke4%v9&a?{>`uc9zlzK zO(c189wk!Wf%){L5GfKG?J&6Uk9JL;ssTfsE9X$=-GM88#50wG6W~=WXRP(i9ZWdf zOlE!lcwIN^+TKB0H-IRWJY8R3>`}ix>N~g;t@0M(4gD6cv_jgRX{lvV&~kiWdfI7a zO(Ly(AkB$nk>cHWyb%v>qU>!$E9AVfcaCxhj>Y=XTJ~F%ZoJ0O>tTJ#Kfa|btO$Em zZqqo*!ZC*UjBp9Hr-79t$oHawy~d?(>Opa4<+ZwogTP<<{av`@ZNzQX7%Uh=j>vc2 z^T6TUxW23k1MVf)=_6&25t_(q2Gf1>hIg#^H(X#EY$kYZ5z~gjiU&??CEmQiDqKGc z!w4*DTGs~D6Ia6S4>MDP|JF5b-*rE`pn2T2s$(}ZV2p&rM^YW(rp3~2kD^>D1*Wnj z%vc?dQ1f$5-&&;BwGe!`fxaaf>V}=%2^kWG`W`)dmAt2F#Zyn>Z+tGXiJZ^WBvO>Q z32^B!!OCdJ97%5eK_FTIp_MBR&D&ezrcEb?!<6UKVDMvezao+c89(GHOz?IZ?5z?W z@-6zj>|<3qn8OIH|F9^BPS6)%xS-aSz_s=G6L{GC@6`ZO>%aQno_HP?IGYld9y^qz z{?M+-g7%vNtDR{#i(UE(9#{IVB=vL{2I()dRNulqYzj?aTi(f@=+zgkK@Ga9cDg6?NCr$_`-p$>m;26<<-u^@6 zI9Io}icS%Xhq%Q5MBJw#@SD08?bHh}t+XQ89IF;A-u zwIJ-CvoRN}ymhce{+O%trDOaK?+tABb0h0!RfL3T=g!O%LQ|$0!{Dy$dB?g%t4BHK z4kmJiVkq+e`Jif>54C?t@Phha3$W+D`$ERHGV%?BeXZk}JU}qNxrlTeI^yb;K~vpB z+UXZtIuW&csppr__RJnn$nEk1Bwqzf9xZ|-Uxpy-&wxwXT9$i3G31P9s~lJ`7DVHl zml`m<^2QhU2QuD_!tXqjXo&bXGTnc3SSw1vx3sDdA+PKz!T-+vEP(N74cPNYrpt87 z)g4iNIz4~zZ(4n?i0sTE_!>nVNSASTIGyB(X@-$LCDVa>ptMu*FRMiQ12t@w(M6!8 z0cmXl;Coy&bAMf7ycG4_NuF7AxY=3M!S1`s#CwqqL4F*#wZ}(#f=3cJ&RDqDu&sw? zWHEZJy8#$w&Lk_BbEg`~(EeY_`7?k{eD9&%T3Hwo_X+3k)Wgb{<{P;KVA{J=;#TaU zXt7^8>YsTg2Y@^QZ6GarC~J+4X09y{i8FfaJRRC%BWNeJOd3PCIS*W-82EMpnwJf6*bN5y5tb zLsdC-iNzNy2;B0J62a_2iY=Tf`DA+TQGBd7Jtdq)`Y>=DkbPoc3 zzGg{G@UH9E22pvdD5eJK35kLjah2m8a)`iQ|JKadwEqw8-XTboXkE80+qP{Rt8Cl0 zZQHhO+qP?!ZQHK9PUpVZFZPQ#?bpoAMt0_ioRR ziE;%I#jSrYsYfd)?hLa$p&*>&?_&SFx%6Mmk@X2 z6aS>^Hn}Bdm12EeVI_1~71!&A^EzLn`1UNvZ&8)6CtIE?>2~jMEv3H)5t#o#EdwY5 ze5@j^yCc*OPK5gU#0j`iR%!c;!6o-b43RNaI)itfDq_WrKuDWOifu9aH~ru-cB{eI zr6#u*n*zr8u!W*}V%!j%g%A2}%{bNvq4%0t73G&ng6a6=@ZJ}8Gq@p)zh}{><7P4Z z_Y-;f!LZ(jq*#P{iN7&U&>|vP7Cm~v;m;LKoG3fDNALXZkcJ6EXQN<-c@`0=`P|ku zNYLTtAU*>V5lk)U*pB2B9$+=pz^c|BIU@qo`mvwd_8)Ege1rvE!dkW?K-~1izD{Wy+4)A&$>vj#!b^P9W?$LdR(wy zdzWRp(+j)O=+Y6gX^DmJ>bamHsK9QzN|BUX&6PROOvl;xp5rCv_xo_;D>N#l1j|9T zM&7hu)&XV47K^BLAv1YAf?t4D-Z-V_V&Oe2V7~XY&PGiTQi1lih+BOsXnuEr>Q40t zCTJ3z0V7EegMyO8$}u6}urKQLv!_z{k;Po|C{3ekA2z~wKC{=ZfN|in;Ypr-IE+I6 zDH))o!c^?pxh=&7j|$|RPzBHX`0d`=1&G_C(ZKe$%}Ei8?Jb@9qz!W=Bx z;EPQ&Wz2oXZ})qw)HQg2#1heN@qDPRW+~u}>wazEw6xkycGhG6#z{s`Np9`0F*}exaNb zsk9z@%OH`=lumhZ1tCnrLAMM$Q96Khf7S3T-oLw#A|cfC zv>SKkVi9;4@f;Z=FX^3Fmcj`cXgot6Dq%z*@GB6T_LJluUZ0nU8#^i@hr|qFE1;U& zgqHiJ$EmN?u=rcdG=c(f6Cd?(cvnpj$Li%`wIxBPziEB#Wx-k)Vz1V)Fm1?lpJX=g z?J(7E#8mj-U|v-NSgxNnP1(PB4OD3nA&p>tOc&!wHr9AS4vwUcJl0bJ`D%V{{vzK+|_%^gv{Xo=#=kk1^fG zO}?Z3bgYL-Q)W1mN1BZ??)Pbu(7-<04KG&5KR|sKpj2I<0JDiE$oPkDHSAT=xLm>> z+o(d!2EY!cXcGE(5K27Xrh>+{zYa0;$qQVNhdlh|Z5f8g-wjxT4PO)}tD(#yb0{P* z&{jBDIY;Hsw~G%NFBnqe-S_C9=VDb|Ma1>%2>>n2T`;&%vmh(mY4 zY#nhCmi`jMHhC^pcManJm@eSg^*5G9247RBh0I-4i9D}>zqB~x{7Jc<{r zE`e7LELZdb^m$z?xQey5w7SL*3=thK3#%_Yoqkf}^~C@64{1&Eahs5?lg%}}AmZ9s zuTYs`j=uiq$xHMWmro47I6|kiA9Bt^dzC95&F!)qtxGi~x{RSv65Vrpn}~3b6avo5$84;NEJ9kyK%B62=H)VWu*?wh0MtILUX* z+pD;MO8r?Bl*l0XueLh zMxvFEzt}mqW0D**S%JlA$z+hlj|q&-C*`<8qIt%e1;S-ZA=q<9M@-SJd4Nxm*;vn- zR;NXTELyCqjN+)0FAi!)d*$4A2z`3eyz&RJ`!Asbz#JP)go2#-`JaZnrU`?*sl9@j z?@v?u^RSCzdE}Z8I0}hE7j8ZDWMwXLv*t3&6Z2?Ga{w7`r&wPp3gSHCdH;)*fMG1; z)e+u*jRXhj;1M0_f#+5`PnYPOM|`m%comeV_39PZvqjdOcvivIiO87`di9dZDw$xo z_V_t`z&Y)RtgSK(IQQ^$Y`m)jmZS{a1{kYz{@9q`>5!Y{^B z010=XfD@zA=1wv#YuvH+9&{IY5$fV>`2rfuZr08May#W~n9H{$M6%H6lwkdD}E!5ZUM?l}mz`3CA)X`u3L&X61T^I{s7>=yL0xK02`vFvoQjdBbqj z@F7p4Rf|kZ)ksf>);iX#>PBo6ynku19In$EZ(|G06_HR(haB+nX<1>kJU~8l+`^<^ z2i3*Fc0-uLs-9aYys8^xVY-`jl+M=TuDHcVB&+>TX{yeKKi)vRyHBgR1(krhBO zDd$y9c@Gyd_JJLYxYSjPRU9AX?-|xn*OTm({_PErj z_bw!fnq6mcnCEYlMbJ;bVHT z&I|*)@_cx|Y$uc*N{!q(_jjh)s)F6*IDuM`oy&Wduf7Z`!9I)Pg_Ux6+=Qcn28Pv- zzBgt{lI&fC%bVzZI4ql=3PkPB{}yRX`PEi3HLQ3J0+j5zOZPe-QUCtx=Tq*Y7Qln5 zA~;qI9r<-eJYTad$z6=2&Nd-qaGG3w?ccun(;#d}Yi|vgyvv}A5^Wj_0A4H>$ zkfx=EL@HhUemCReq9r1&m8!iLIi}2&o_%PTzE+E$%6P`xI0T-KhQ5MzQPW)(S>Hf(Q;!?(I_UUe$6{FBUJ58{jGCwZXW7R%uT`}rckJ5Y zY|eCJi4+qS`sN<6+YYJr zk8(pfmFyCh*+KCA$av9qv#{d}sg^MkDEDmnk_OW!Qka! z!5ayzYmz4f4!&dzuX|E!5=kBxK$XebfN(vVQMbfJTS*9kI}6ODC@3PJAn6Ih z&r6bmyw3WG;}!|J!1Q_{ORP+EAlipag=YG#u!twfqNvfbGNm1zYQkaU*nR{$_WB*! zWjOUj@RrCnilIV%T?&vVdBKo@AO*Sry_Ph&Mt#g-`1uQ=?-pH3?a~0>tNPuMgVk!= z8XYSYI_>$_sbHi`3qU3@SZ>ogOH(5%$`3l>Pezt)wP%FTq5i}J1eX)=1{{odhbU<=;OR`*0EOXTs+OBYcEr`Qz3g9QQq zd*p;!7&9AE9gKe_fJ)3#vQco1UPKaZmRph)1!w;`1Vqst4t?Ijk^9^ot9Xywe$VP0 zi|^2W(DEgfVd)V?{ZUsrbcmoTDt}U>I#;Ca?`khEh8(g%l|{vhHJuYagcmt04{uzL z#P+V!4iL@^TY6h78CyZjbA!0rs+!Jp)=@U=5W0;WzH;r{KNQFl#^?!ts3 zofCg!h7%Zv4XF)F2V-AOC=uA!9}IgY{k3(aCQpqtiqi&Hqt)0;&WhqO(Kp_DHE7*v z9T*0wZknXwtc;^Etnk?+$DFZ-LN^w#3?Z+zZvY|S%a2P4=uD<}yzlR4IHa*^ed0VY zQboO}8ud0(i0Yn40p7&<1pCTQ!eHk8s7||t6vYBBBz^3~IhKBeGhS6K$qx*>arWNx z^L>nl+)2Vq)QMaiO3r{TQkvz$ij~Kwp|0F%($g=qKVr0V1j~Q{g_ykLo!Cz5IqB4M zj@^FPiC4N?9wTQ}`EjuWW8%McPueGJ!P$a&zmo&o_B$PpT-Atgit%B9zp_-SP z3Na&Ku=17xh1-fj`}Y5}>pEH!fp}~+{S6eM$Tl(Sxb!$bEPJ0i3A!2`KNknhS>0gW zG77#;l+F1LK#xolR*Ej?6xu^DGiaFk zfE~rtzzDAF&D~Vy*DJ~Iq9A?fJt>30iYe8xgz1PXEo>cpq9;?@Ft&C%b>-%gwe~Gw zIziP{-wu`;mUxX}k;57Q3-$U_ZYECmu0od`Y->duUV`LsxD0bgA*~?WM9V6?7XLVG z3@&HsqO-^VgA_5MPvkI3$ zUsR%b`o#-Zg( z-q$SF6TbPgrAN#b)&D%ELh~rJ5*$%y3=|6+hLK(HX@fHK!;= zCJfQ@#`?ubkLeyrCS`kSuP|D6StQg^$f%^R6$DHeOf!BhgM`K<@Adb%Ra>brSYc}Q zz93KT+)peO%Mll?XTu4PtDblYv2ttFUxqCoXGKbd>$4%%+jn2!f=Yts&9snDk{gCJ+D3mo^eSalS6 z2`@EW2RZqKge9lLa92ZnhrK@^i=RGz_AQV!tfk=rB}y_qFN=Q!wc4RTg?J1)GTF!2 z0*0tW%UVL*_GB6xpKEf$uK%!M&4#&lel=eYOI2V@;x2#i9q^H;Gh&ZTRuP(=aeS-f z+K*o`yWVukMKmg2ws;lP;b2Vl&WK?u1ycHe2N;<4Y1VG@IYLu)u%pW07wPhP$DK3O z^!`1_d0{JmU@|5MF-OTr0%CM~B`~}Fsa3)ErOR&vW^HifW<{D*U-yD!%gbbHjv(J|Ceh-isQpak@3USRzAZ>k}~_54xSJ6yj9dz z#iyvtd0h9GE?g1tZ1-TJrMYqSM>7^TbaMcYj4dUK#nlot23T?%YTzG1lib5qWNnSn z0`qdRO{JYM;HJxhlp7VdHncG^aFEsu@16QVpbOW@K+dX}>4)d&e9UZfZ{Y0)s7BW- zGYw5?NmX>b&$YnR0Xo7_Mm9wrtp~E z0DCv;Giv(|$(bb=8w}6o#YKgS{0vLJ!!j4CbY+8R5(UEqc74j;T_?{yB_M84c8xJI zhC|$z)W`L#Wy!6Hq$N$Bd(W?|%A9xvl-lFjCslJ@#NPf^v^Vm1C{ZtRn1gBy_#(#S zx=hNbMpv06fx1)S8Ts~zm+|{`cheWJqhyRp5@tLENnLa6iy6jYX_uDDLP~Sh%vv&* zt7+M)yt#i1jUZl2r&c@AZxZb1wC>M3lCBPNMcqhPZyjhdki9>uyefVm9 z^hVMnBEGlc~0A$q@QzWGVxk$7Dfq zxD#KzzH$iCGKl%DEHCQSjBxh8WkOQT9Q*OPl>$rRJ_6$Sh^*Ma#4+E==pQQ9YyBoj zQe2pUB-D&I@JG1}!vwq1S*SMtO24Qc*0jN_6kgm+Z%gSDhx?uNXm!AFrV0W77QU)F z5ATm>BmP0ap(*3)Mk^9QK=Lt-M)i2-GEwlIIa(J4qY9goa1fhHLvm$ zpj&?PO|iPY!iw!ZI?q>`3#t~Wq`UY-W{gLzy|9vF$oG_<$*w9Nd?$=i#N5V6#QpBAk{3Q|Z~oFyyF^K>%g zFe42)_bL;ne3S#P+*PPzQ+luk*RXG?R~_)wG=Q{G83Ec0XD?%)H4GV;%A!&j1m=H8En*QaO3D>B||0dO7#w#a&< zRCWYYV|7|yw{IFmZN>!ZNXkbGKR*VgOKLs95e8}9Yh~E|$H7NigsH(p5P5}$$94W3 zDLPBHCmiE7{Zn*y@sO%?0uHjb;_DR}?j$!Xxos-!gSlk~?v+(5?Zf&j$n`0(pTIq0(nb?g{6y z9X)f+ILKQg(a*H(3W4`~H7_R(mgKfx;yAfqLl&h1il)z4IoljL(`1=w%z3JwqD}h& zrY&!`ji(|Ptif79F5kjS^oN-%S{cNqpfXK!jb+IvoO`v8-PtZX9(*||$PO)RV^D?~ z1X-Od^D@G?1GW*Y_m>i~;BK|zYl$O{5&}58MjE{j!G0TSbSuArgkJYy|0SBn^1qIz zF*E$%{>gv-Ii~;T>Oa~j85tS=GnnT3&p&s)!bZ4FNXY68FBBHOC=O3384k?ckHE~p z%mPu~KM#wPh_tvUP6e^JO&~569!Nlm|M8LQwDb6D<+GdFVw(H8vAXjbYsGKn&IubM zl2Ra!oeqW+fQSknhyvg_I_Y^p002P(2@*ge?%^S`hhet1%Rza|iva~IMuhScR1g^` zz@V0n1@agbE)3ATodbA~2;hJT>YxFN06_oID-amiJ_VdUU z9UK5qY@-0z`mgpO7X8Hf0u5aKa_Uav0=)of{-`}5gZumX{1ec*lrT^r?O%P{ecU2O zWt>@TGk24Jl^ywEswl_n0_bu2mnToBXp@qT03N7yg1!ARM-zhlkjMPm_e9kT0U7^J zzEsNinO)tB0Rj2F2qE0fseg=9?+*e1{jPM7ienjjJ(K?Useam}{pOAPiM;E%{On>{ zv~zR+wnBf2y!l-rptm=@>cP0UnPFV?L7LY0y{N#0g>-d&wLQ~;Iko^zLU^6}wZO-& z_czLS5{`ma##4x0z zPp3MYB3XDnW*99uog7MWm4&}}!Em6fVJe;#!F#80r^d4@#8t$W;;nX{BNWoom7%2YcP)vC1J(5P8B|+weC?Z2Tb2Gy?aUx^?5Ze4g zN8JiOjJ;mvv!xDNRxrVLxN9H^a(VBq>|2GZ5tr9T=As5 zpT0>IqNZ76T7ug>W?5^9Cgh4%0xQ*;dOqWu2VoJGcR)oSE8Q6Qb2Z5?2pXJ47`2PLn0@5#ECU@V z-S*B|JFpskBZag4vX6c0MbMP<%&tKmjBKL1MQc(v3!<8tj+%uf8GCyScP-kWhH?7uoNL5-|Kz zZ0AOxBjDkw;L6St9iS0bkCeiR(T1JH<*jN)nI_EV0p6FqBKuwz`mh0>x40|=;iQ9d z1Wz(*{Psy{V@vw=DA{pdx9v$+Ge1U~^bO=eHCcy>8N!UO<;|N29kR-S(rUM~E@B|% zq*o>3PPyM_$2uKy40wuF_9zW?FAcb3Lv|BMXuuIrf@~+l;x7A~O(Ih>WKt)Yn?nsj zERvj>Wrr?Rhac#A)`Y~k0CD#KoC$^Gg}cxgZdk?Cov8M-I4|{5drqSvxmb5lhv?h+ zwP=@r1mm)JVH~ZP>_vnFtX$u>fR4dV>1>IJcIOUK-oOm&RV0I7f6mE-PpH(nc>xA1 zBoN~gK}2laT^poT>?NUGUtGtIJwb&}`s=uX>E0=N0hH?`eBRT7lTf>kgj>~mL)&dl z*Dc=wUoYW2ZgKWq=HbzR;EHt)n4lOuyW@1TuU8r>Ii$_NOZiS zTXSb@j06l%-MmcrtRRsFEGK$dE-GPe$HB3=HBTUIuEgY1MvGJNF_r3*9TZowtwdBm zBhEI_@(}nQo#7kIob3*36Z8hq=K~a~Ly-}w5ZpTkK*z>XfgH(LJTdVqmykzrvGLpo zgeeQ}5qnY15V0yRTb1H$(=qvz=)T@@$w9Ufyb<~B(_~yFnz)FJ4F53fVx#lxny{AW zCW#B~GLHtb#EV(d2%-CCn@r$1%MLCK_%Lt0HTZy~GiInV{DA;To54s>K4^rY|+xBDL6~wqC zDvl99X6+6TiejRpjGQV&yT`(Q3aeDJn^PTcfDPeOTq#ZlEHsxlt&SCcGBxd%N_nzP z58Ud~SRFre;$nqL;wzi&qBw4^WXtmZETJ^z1&Pblj3=NVE*xcTe)4(h0GCmOwm7*K zgzLA>WDRfZ0p_5zcSpV#Lmu+ZwIxzl2Jf^%aMfd|>OSl{$gpomQL5AXO9Gwz9kM^R}&6njgR>o5=am-Rbq`JeU^{Z!opr_l&brM;xlN1o%6y?zuomtjqprGQhXT1*PF6-qkv zT9m}aW29?vcTQSdAbj2ikrM}-Jfj{A&*l&7Dzu=ygeW>azBuY<8LHc1fr}ki>>oy1 zl~Dt^##{PW|kAKc0C8c}4ol%LS zj7E7ng}7wIXv$h0GW)&|N*IXl>ydO|e+`?*CDVJPbO=we@;SI^IklYbx{cNI=|*+P zIT7)WItRBsOFzCY>;9}kX!04kDi7oP_;;ddqz%GRuN074@GX??GZR1x+VwO_%|TuZ z8;Yg*BMCu1vB$_J2s_&Kg?)*uOWdjVO)ximtl_SDW#>61qS1T%pQEz%s#5v;BsOFMQj?lhV zqI3PdH-Jn7tpEchQ?c8@yjDjJ;Y(^FZ+*kpX9yBmp}hE$N^^pR)pfq|XJ;z#%cw+) z5*az1#InD@9V@)5CvC!c$-Yr6T{n+f>PN zC`a@j-cVEwSfMPb)ph1^BXjIZ&CjV~;yrZslrLC^o6aCaA14J~X|j(fHnF zeb5Zmh{P4S29qBq@}s}bS?xiYY)e}5pYt?`@s8~0$wTHGdm4(}g%zaT2Z=)J7ud!^ z1unM_+{FZ%CYTf%u%50T9-GGoIlDdK7uG5dLq(Xvr}ZOr>rM{CK(UHW?|_o5>U}K+ zGKav%we~F+vZ#_FPCi@!09=**Q)E1_MCItr8cBDl=0BrFYeuU2pG>jxk`*XNUUL}Q4Isr;3sB6$&Pc)a zYW+I9epGvXRijoMADRQ}89Ov*BNLc}Vk;06<$Q0xD8deqHs7s*50xvzp$K@sAM_u> zt2jz`gr2+HJjymQt;-8SHefZkp2Hm%7h+S%V0oxMVP0Y&p!QQT?*`V~YC#pVf^N?9 zauM?+yj7GS8>X7EPU+8vIgneHf*bKEU?Og%5o;+zFanfCZN7KhjBWZ*xI8mD{WA0A zMV5y|zHARn(?yTZ$rBfvYwoxdD}S6D7--HvyplZB2nD;?x+`H<<@tSzKBW=yBU+9z z?UIZns(U5%V|LUm81}QC;L~dSHYpAfeI5HJ6i$X4q{Q%071eqWtFZaqr#{magqIY6 z?6wL*{Ks7iT|uagD&xtk5F4L!^f*yFbDY)r`I@txp~4uhSxvI!N)A{q{2t^0VK=;` z--Ye=Gu|8CbboA<&`!METTCdH(W!N7ny>0>Vr%|Y=wr&g*Z80=?wYU;{{^wF*pY6= zcY?Y(0;!*yEuP{U8CZ{L9a^cS_btPN+@f*AnRmIUy~MiV;-7~-cMMWiPA`vAavTkr-?ou>XLmSlZrO=EO^En z?y2@P^x$SBc9Nd-GF^5U_{lVjKO{eR@hb4VFb(Un&yBcCW_;+DMjj3nEHQVIGQhV& z*ih0kF#F0*Y4t7FWa73(&ZU|%isH?+|1sashJjYbiQR$J>Pv5Vb*l9tDu0cZHX6K} zRVKBXkJk3ES)*Rb#I&(D()m-SE*#0md=Hg;L4ABZNzhb+v=bZ&&nQS$x_D^0qx%$9 zay%Pk2R9biV-B|+f^pIXs!vF14n@_WqhgT>1?vUch_||$1H*4;#Y;=M7{j|2pDd|Y z&9>H8x51Ye)iGL*#>OH)!aspwT-?#E>qMLTatsZ$r3K}hy#xQ}jtQFQ7!GLVr>;fS zh}CwqJ!RQ=)B*w@GtqgViN1v`{|AWrx3&#=E*Ay78j?h~RnjgPi>idm{OqVnh@wTK zXMD1OLzeO{wr_=V@RH9rZ*+i%$k19vPh@XC`8#!x^K+)9P(qb*$kmn=qM<L~Z_G zopaD1)IMMF>=1nTIk|y>tb|1C-lm~ew@}fq>b+j%2Vn?d%y^wPVNO6Z?~+WAP!scP z`t+;s`p0vn<~#ya#N<~pp=T8?5O3Hj6;o0$`%L9#6Jk$*%10xN_0Q$n(^Oa5I$hb` z#n0ok>Eg^DFXBB*Zr@y>knfZ;#avnurG>$Gn^t;=DE1IR-`2N#E=g-fj zoIeU3!%L`)i46j=`5^XFxoL!AfTWMkuz@0}`e9-kg=U0RMlO`I3CiCeS>QQhg+>v2 z)n}!BDQ##@9mB_k9z5Ge|43v#nWXBvIGsP}~ z*%y>HU^HLY?g0FMC6Oci$Y(x|_uL^EzsCs{{Egh={$8FW0;%M*dJy7-5+yqclE0qD z^3`1Ml_gx3t*rW%Dq8tbw`7oe63=0A+6rE>8MCVOqz;PDi!Ik#4UP={W;@mzy4rqC zygm?!oiG#}Qlw=?ksHUyL@ERhI#9ZG7Jt?NeYQ3@irfM~B$Vl~otD||uU9>CL5)JtJB>$JR+rN`!^RFqR{j#+l+M`d zrrDdJ{C=XA+8$0?OATdHcl~@Da`e7&)G^Wyq30CUvfv^mb0p?3J9Y%H!r={_RBOU==aHje}wG1(66{bd~xU&7X&@a{BfDjBCT+$BS!ontj6|3ah&tAzDN*~&I*e# zS%@9`0}eM}8J}%tStXkX#Mf%fCbiIsNf^fd-H~jmY^fMyIn_(^jb`SqDxgL796|jz zsHGwZSCHH-!gcCJmAiu=(lZ+!8;l1&GhDRu5>QtH|3OIS9Hdc+{I0=HR{dkS6A9X; zjo%Rayn5Lq?)4h+=z2ufw)LTPx?)w!Vf)(*y?2yh2jigiypby_>7o1NV`o@sPrjjX z><@Dkj-qiKS2T8akX5fpC!K#;D4d&B;@pM(tCulNH4ekKG&2b+MybG>yB7tuqUu5X z@rSO2=IMC)=p6frnK7W=2u+tOoMMU}NHof-eQAl`orU`DQ$Y&Kp{ohjxfr?Zv10kp zo2bn?J#Geic_=+bW2s|Xa=a`MbK89{Qw`g=3OVyiksWrH$y_E?NJD#Z-Lrs%SA&?0 z@qy_lB6oaMo0>_ws4Za!d;=t(ZZf@n#$iCu#aeEjAQDrT3%_8NUQtIPP-ZR?OE4*@ zJqRBxTHbCPbNFKn)08xp&v>1Y(&N&rH=?+#25;KSpCeFo0hfKr8Y5G_gD@RdHnLru zn$uQujhh0?{1u0X z-~ryUxbSM|I^I@BzoE;MnAPJ1`|?Yc8;nWry|9=QB8#X*lJ)wDs}=isv!K($*g($? zj6_Da+s&@z#%0r30Ag$nHk({N{e>r|$Ezv>tDNy!zmRq@JnFmsbi0uDWg(RokUwh9 zyUzEaCiPBoae#6S@a zr}T%OpDkZ4AL{frjf^3FwBOs%!&yElHV0yYbOe-NtS95OvY{-G@G?aE?;|$0a^(lv zp(RAcX$}K3T|CiRiF-HNLrd$@y4?kdghnsylDK54=?Ey^-5&chpS`lDd98o-iW%W# z&W8+wu17glS`)nrPeugkk;a)Smz;ujs%kzLD-oT*?SMaS4x4&?jnM27&=da1T@~-Z z!CXwa`SQ`hz2H)8#y0sO%pjRnOheD9!_+4?MUCwiSrk)irk=R>EciUHaJy37?toSr z0l`ZOll1M(#8sTQB4Heush6|m(s?srckQN=Y(kHnA{Su9FVt2Fo%t6t33753@LBac zGHSTO{lK{!R{Yry&DVTfox8@OMFrJI|o0%5CSb1P|_X2(&|{Q*a7rnUAZ`m@d58)-3;3dO zc(V1~mZx!3esck#9UTQE2s4QxU~i1p{^6%MkEN}S9(_Q`*Jn{mb)_s8H(kSDxf z@0hQu0wjG<+Bg$KwUp=!wiC+-2&<62=v`VS4i)s~>{){9gj2SsC_?RbFDn`wYm&z` zJ)uA{6pplJ2NHVeau&+x^!XLd2DOF1 z*_iHM_|4b_8$qeal#fk6tM)Log0uwe=(D6zjjVjLNqg;yS#Uo}LqP2HhI+iEL^nlE zX8ec~vH{-}=_vRitLf}xVp`&}C@&DyBNg^?;6SEe>E$X^viyJ7iB_s83rIuW5RLMzBG88r`D&k zgMM}x`~?^p$Oru|V?);e-Pn+To#j8}bT%pjwg(I--KW&uVUpJYhTJ3pP=xD{2^`j1 zsLdPHB&FDpNGO5L-=EKKr0v#T05N%EbIdRLH@@AqVwKjerYL-lFQ&6ICA8BN-yOW5 zZf+V%+OefKxiOS7oYT2^YgU^mR(~5lol|NqD!uBDRCI9YN-))TlbSv1Eae9Hu6&Jr z<021l>h_AZOSqg>cn=8dDsS{9c4wKjjN3#`(7u_jJ1j5A$?!N&TkET*nmGqYIWb-p z*W3}>>Mk0upX+g#Km{8csXBd+dZUVX;c0aJvbmkKmp{qU=hok5WOKV-hAg}xM(J%V zHY$D1UfUM0mOdQ0aAAxY@MzGgS-dtEKKyp@V6pu!?x;Qi?&9mg zauIZa#F~M@V|m_f*knHT4gpAq{rgoh*gBT|ph zu?n-j3(Te?;9(_1UG9(%%+8YHW!=&ulVr2YkoedoKeo!}PeoU*#6hJ{O9!i{}Y$1 zR;p#oy7+Wk67fP3oR8Pbd*c7Z#hSd*`o$FKA1;<3CtIkdi9cH{1KT<)D5pskVS1y< zr8s7vV1Zhxm&)@1$_IQ7Y_s>ys04-=nmp{=-Nu#vl;1)Yn4XT?s!spz<(Gg`x2 z1jJWYNA5E@?v700fuY&)e+*=^Jr_WxO0oI0Bz@2OaD>1QJatgZs4d@K{fK7dtmkgN znCA!H>$ez|aIS@}eAaDG)StV7)g`yZtuUOA1}dY({)apCbOEY8? zn-7C}Qas5=UDBzpmJ?*eMMm~@-4y18LDL+X(~!mmkS-VahXu3VMc-j@6ykE&ORqh5 zTwSdQmZ2;@lOQ`p!;eK}3 z51s$v=98yn>y#3ePp;z0L(*)O+o{j{RcdTA*zM`#0`;`E3BJ(FdRr)rytZ=Ynb$T8 zt!itG_vmmJOmxoU4*veJ{J*s&v;Qw);bi!K!y;q1$%W8;sa78ba6Z7wPohW%16*tj z%=rW?b9Yx8JA4EgZX(nQ|Na`+WR+!;AQZi9-@(m|6Y7@d%Qx%Y=NHTTC=#i6-_9?m zlg+x@8g_6~yX|(WgCnf3pTIH#{hLHkj_t;<>sNx7mNyqZtPKK75UncZ>`YN-DP z5Cxzz$QqJA=pKN3AK)G!$W9?(ey*iQ6nH&YJz+oK{1QM)izWuZQa{rkfIKfzsirZ; z5`*?okXVJHc0pS*C0!~i6V>c|xdCCYN;l{*O=#27rubcbzCJaWCBXa;9Y8xP0J!XI z!=JN*`m=deDO%(`K=(Y-r}Qmm_)OA#fqIR<{+dR{C9t7n@SJOU+wp($*bzQumn;)RSg4lG6uty+Gh%njXN9ol&ZHaYi*P9PNp> zAl*l6YR>OEGhFh%`f>ZwIQI>yj+(I!bha~$I%BY7drH42nDiBJyYeaDw=OSz?CZb1 z(?72&FMVz6zdI(spIo2(<5$1=jlV&&Kk#Iq{K@OTgvP&NWS@PE9;v0>GvaO8wyZxL zhm@T?*xMeNi%EjFs68m|#G#Bkxy?gA{yI-$cKgrwBdc*T_x!c$7kK~W%g6Em2bf`F80b0cYXw#+9{uVd%Y#I8vVUEgsyn^dJw-jeLzQjagvm zjeO%zFHRo5+q*~U|DVRL1FDH_U4uXnq$)*FB!KjmO0OclDJ^tJAOWN#gwPRC5v3}< z69kkl2vQUR0s@K%0;1Fdf*@6jv`BmLo_pRp@A&Roch=0BJ+uE^)}Hl$-}ld&JzK!_ znQe?(Yw>N5-Sr6vjvo&IEss@5{DmL3t1>Mc*XK5C)@GmcIk`Mb#S5qByf44+a`HJl zpkI^AI71AxW>Gc*MYBu^yj)kTyX-Xj zPzr{m1X|WJj18y)0F~CM}f^LUfLWu zb1C<7rH4ks48@J%9ZD3Bu1>NU2Kd2$KJidmT`Bvt9MU@#F|mX?UB$Vqr+u4lBRP9E z9i@iX_SN5yMyZhjQjh0J4>fBqGwKR0m7*sEMfyj0U3!@-uI4+l2;({UW3>6YBr*Y; zxBUv5(^u)lDmi;fi{L$@6uhURi*GmsW=UsIsi4mK~;*1!{n-aR-@M+MBQSVaAfb#<=oGx zjL|gdBJ*06?jPUiGuy1MXdl}OR@``==z^=8{QRhu^ik9J-OabJ(+?Urs>Rz1!#R;9W=Zpk2*iMrm4KjI;tJ9 zeb#Z>A?#?BpZ<2z7Cp}h9X?#;R}t24O3-4Isc0Kp4NDkSE_1sit{=J)oi}< zLdaVN3UoFUa520sf4M4~b&^gW$TQkz@0H}&wK?~!?H52RwB6XSulcyoaLa^y zKl%3iI28NXlb=}W$9xl0RK|R%rkmH59IyKEG+T>^;g-%=TSPOOBd*{ z9J-=nZpCB}SZBDni@8w$0J#@!2dG?;gd4S$q0DiQ={dSilUyMHU>so<{dxr;G??)Ff zNp})T2E$r-67varp9gxf_;&|Nt1z z$VDY>Z%u0te9d=N062&Mj3 zvbH3xcr&EUsC%pES#d$cEMf9{5BBk)M3i3ih{E#&zVTn8HVq#xyjRP2Z>ggs4A6X~ z96!2S@{aRpuNlTltKQ0Q>bnPOU5)E1%snxXe^9cNrw}f_p#DUFB2nvGPV}UwLR9!! zurxr>Y))p~A5Zq)%cTD>4Q(M)m=IOAXbY(hSARW+9PaUewsxZgUf0@JetsvI z*m?3@yj@^G7BMYDjhYKyc63q)hmDomE zM@?wx+p*Q1Zv2>AE4HH1$NtQq0R+6~IZCz6LW8$&(A3k=dl-z8##Q*7Gc<2YM8rCW zm)%canY~Cm(D7LzxZN$@Hn~ZER^&xJi>`DqtJ2quY(KL~WIVP0yP^W?o`9|DvBa0P zNM!}Xg(U{4sp+xwL4ocV7(m~p(#(<>9MX|}aHTDFzgUyXuKP{(MeKQed3MH`L(UOP z02HCZuNAD6s4W|t@#2~bfTt@nPQDIG8L`VD^gd}W{I%u|*RM;-Hc>Y&cTkfh z7hXN^m4PW0Sx7f9KhMn zx5qOst0NMz)J37xXhRr5=q_!s-hP3T>DB z$dWj}jKe4ot3+7uW)@@a-MN>y2=@+dfQUcU4_4`X3#=)h=LV(KcOYyeTl#Ik7=L11 zueI7&^1O?!wF)BEcLdr<9xgxOyo@fkuRRuX4bRiQTq-5_s8{eTfh)4UVHl^SPhicPjW-W?83LV`SN z`>z#)fM6(vqGfeFya;GK50nS1hI}Myo75#hzMe z4s#&TY9j{u<{$b?ib)SSkx+-ON<8@^e!D!&$JCU6+^^&^e@N4rtit&7rt!BUA8h)u8nDK(!!BAP0|w3q)lNSW>~ z`tr3FQ>(F3@?GVRp27LU5FG-#SQ6JCK?*ur;=ujXeH7C@28TK0ITOOq8c!y6d+7|X zlN8kzBL3eoz6?;v-zrHKJWdE@N_PY zNq``D0oZt#T|dvZ?s)=LndwTR%uDZA2cMkas8XH6?utZ-TDHiw4{?##TMVj9Xy|wM z5<4-4Zv@Xfiz=^)IZ3`13{_q&zy5N_X*?Ml^OP^x+E7|iZOmrYK$Qi{9VK(lVT6~X z*tt71(PPQ>Ve4}-+6Alnc=of8$1omw(u7n!?H&$UQ@tX2(T6)y^kqhQ%hng;lRHFs zQnex@WDOU$M*kST*gQ3i_ij`?hs>WpXTk_8)ixJTy)0zin1C{|CAgxQIq#pz90Uq- z(vLzm&N(}AND7@J=n8Y;fXW@=Bq!H;;<`Z{r+Ps(McCWqdE%}{;ImW zo7oC=`n;@wY?7Ghcu8{pJTMP3HCEFNo-1sRF`pup=^{P{F|$-F^euMmLDj#ihIdP< zeTARgar(jD_Qgf*OJG>9TUh@(dHV1RclVM-vAc3|2j8Rx0*cuZXlzy{$7*@?C^Zzh zBT||ehr{LG9MKo$WT)8yNc@P#opFaPcgLyt=&4Okugws-YV5%9TC;DqoCzlU`cQjo zqk?7Qd_@j9fGjgTSDmy6K8fr*I25X__E}$Es^oRrl66KYyvT|dz+xrC8649qjk@7M z!CaC}!5FKk`<|70&kPI?YM&=@a<}O-%BXvrxzDT>GwBY0F;>l;R5kh*rp9Lpo7sDO zPnMXs=sqNz7ub-OZ*N$A^WerpqgF~Dl-BKp$n?x+{e$j>UHa8hwFan#r~lN;5Uk+` zRrPl4$j+tdZ}g=rLdPuiH#Zb#31(Jp9tT76_-T$~x&ZRtQNWYhHafmt8V=YRYMDW1+T%|xRQ0%G_1O$PC0gs)Lecfk)zSXjcktAiez1zozy5I}n8YMnu+TXw3i85Cr-kNa#;8x`xDi z`vFx{C}V>4^7r=f1HylG1MTYJLZA!~_?HF-$|8VvKrj$WIUInp<{lwvN?KO`D$q_v zP97tNRz$*~7&KS`i9w=3C^Q5F1%Z%AFa+fSS5r|?#JGUaAbFUAtDL-|JQ#{`LAfH~ zU^o4kZ{R;RMn3`)PYA-JF^mwf N9HX$Xwy6%|{{U$S7{&kq literal 0 HcmV?d00001 diff --git a/src/genbench/tasks/nl_codesearch_clf/doc.md b/src/genbench/tasks/nl_codesearch_clf/doc.md index 1b2e9dc..1ffb4e7 100644 --- a/src/genbench/tasks/nl_codesearch_clf/doc.md +++ b/src/genbench/tasks/nl_codesearch_clf/doc.md @@ -1,17 +1,49 @@ ## Motivation -*Describe the motivation for this Natural Language Codesearch Classification.* +*Language models can serve as a valuable tool for software developers to increase productivity. Large generative models can be used for code generation and code completion, while smaller encoder-only models are capable of performing code search tasks using natural language queries. +*These capabilities are heavily influenced by the quality and diversity of the available training data. Source code datasets used for training usually focus on the most popular languages and testing is mostly conducted on the same distributions, often overlooking low resource programming languages. +*Motivated by the NLP generalisation taxonomy proposed by Hupkes et. al., we propose a new benchmark dataset called [placeholder] which builds upon existing natural language code search datasets to systemically study the code understanding generalization capabilities of language models. For evaluation and comparison, we collect several baseline results using fine-tuned BERT-style models and GPT-style large language models in a zero-shot setting. ## Examples -*Give examples of the Natural Language Codesearch Classification.* +* Given a natural language query, determine if a given code snippet is relevant or not + +*{"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} +*{"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def _resolve_entity ( mo ) : ent = mo . group ( \"entity\" ) s = mo . group ( ) if s . startswith ( '&#' ) : if s [ 2 ] in 'xX' : radix = 16 else : radix = 10 try : num = int ( ent , radix ) except ( ValueError , OverflowError ) : return u'' else : num = name2codepoint . get ( ent ) if num is None or num < 0 : # unknown entity -> ignore return u'' try : return unichr ( num ) except ValueError : return u''", "target": 0, "target_options": ["no_match", "match"]} ## Data Source -*Describe the data source for this Natural Language Codesearch Classification.* +*CodeSearchNet : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub +*CodeSearchNet Adv : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE +*WebQuery : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE +*StatCodeSearch: R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors +*Dataset Size: +*Finetuning set: +*-CodeSearchNet Adv train set 251k +*Test sets: +*-CodeSearchNet Adv test set 19k +*-WebQuery test set 1k +*-CodeSearchNet Ruby test set 2k +*-CodeSearchNet Go test set 14k +*-CodeSearchNet Java test set 26k +*-CodeSearchNet Javascript test set 6k +*-CodeSearchNet PHP test set 28k +*-StatCodeSearch test set TBD ## Limitations and Bias -*Note any known limitations or biases that the Natural Language Codesearch Classification has, with links and references if possible.* +*TBD ## Citation -*Cite the source where this Natural Language Codesearch Classification was introduced.* +*TBD ## Further References -*Add any useful further references.* \ No newline at end of file +*@article{husain2019codesearchnet, +* title={Codesearchnet challenge: Evaluating the state of semantic code search}, +* author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc}, +* journal={arXiv preprint arXiv:1909.09436}, +* year={2019} +*} +*@article{Lu2021CodeXGLUEAM, +* title={CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation}, +* author={Shuai Lu and Daya Guo and Shuo Ren and Junjie Huang and Alexey Svyatkovskiy and Ambrosio Blanco and Colin Clement and Dawn Drain and Daxin Jiang and Duyu Tang and Ge Li and Lidong Zhou and Linjun Shou and Long Zhou and Michele Tufano and Ming Gong and Ming Zhou and Nan Duan and Neel Sundaresan and Shao Kun Deng and Shengyu Fu and Shujie Liu}, +* journal={ArXiv}, +* year={2021}, +* volume={abs/2102.04664} +*} \ No newline at end of file From 7c8e79e8345c264f074c6c859b0bf25168dcbf58 Mon Sep 17 00:00:00 2001 From: drndr Date: Wed, 26 Jul 2023 17:37:16 +0200 Subject: [PATCH 11/57] Update doc.md --- src/genbench/tasks/nl_codesearch_clf/doc.md | 72 ++++++++++----------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/src/genbench/tasks/nl_codesearch_clf/doc.md b/src/genbench/tasks/nl_codesearch_clf/doc.md index 1ffb4e7..cb863eb 100644 --- a/src/genbench/tasks/nl_codesearch_clf/doc.md +++ b/src/genbench/tasks/nl_codesearch_clf/doc.md @@ -1,49 +1,49 @@ ## Motivation -*Language models can serve as a valuable tool for software developers to increase productivity. Large generative models can be used for code generation and code completion, while smaller encoder-only models are capable of performing code search tasks using natural language queries. -*These capabilities are heavily influenced by the quality and diversity of the available training data. Source code datasets used for training usually focus on the most popular languages and testing is mostly conducted on the same distributions, often overlooking low resource programming languages. -*Motivated by the NLP generalisation taxonomy proposed by Hupkes et. al., we propose a new benchmark dataset called [placeholder] which builds upon existing natural language code search datasets to systemically study the code understanding generalization capabilities of language models. For evaluation and comparison, we collect several baseline results using fine-tuned BERT-style models and GPT-style large language models in a zero-shot setting. +Language models can serve as a valuable tool for software developers to increase productivity. Large generative models can be used for code generation and code completion, while smaller encoder-only models are capable of performing code search tasks using natural language queries. +These capabilities are heavily influenced by the quality and diversity of the available training data. Source code datasets used for training usually focus on the most popular languages and testing is mostly conducted on the same distributions, often overlooking low resource programming languages. +Motivated by the NLP generalisation taxonomy proposed by Hupkes et. al., we propose a new benchmark dataset called [placeholder] which builds upon existing natural language code search datasets to systemically study the code understanding generalization capabilities of language models. For evaluation and comparison, we collect several baseline results using fine-tuned BERT-style models and GPT-style large language models in a zero-shot setting. ## Examples -* Given a natural language query, determine if a given code snippet is relevant or not +Given a natural language query, determine if a given code snippet is relevant or not -*{"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} -*{"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def _resolve_entity ( mo ) : ent = mo . group ( \"entity\" ) s = mo . group ( ) if s . startswith ( '&#' ) : if s [ 2 ] in 'xX' : radix = 16 else : radix = 10 try : num = int ( ent , radix ) except ( ValueError , OverflowError ) : return u'' else : num = name2codepoint . get ( ent ) if num is None or num < 0 : # unknown entity -> ignore return u'' try : return unichr ( num ) except ValueError : return u''", "target": 0, "target_options": ["no_match", "match"]} +{"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} +{"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def _resolve_entity ( mo ) : ent = mo . group ( \"entity\" ) s = mo . group ( ) if s . startswith ( '&#' ) : if s [ 2 ] in 'xX' : radix = 16 else : radix = 10 try : num = int ( ent , radix ) except ( ValueError , OverflowError ) : return u'' else : num = name2codepoint . get ( ent ) if num is None or num < 0 : # unknown entity -> ignore return u'' try : return unichr ( num ) except ValueError : return u''", "target": 0, "target_options": ["no_match", "match"]} ## Data Source -*CodeSearchNet : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub -*CodeSearchNet Adv : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE -*WebQuery : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE -*StatCodeSearch: R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors +CodeSearchNet : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub +CodeSearchNet Adv : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE +WebQuery : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE +StatCodeSearch: R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors -*Dataset Size: -*Finetuning set: -*-CodeSearchNet Adv train set 251k -*Test sets: -*-CodeSearchNet Adv test set 19k -*-WebQuery test set 1k -*-CodeSearchNet Ruby test set 2k -*-CodeSearchNet Go test set 14k -*-CodeSearchNet Java test set 26k -*-CodeSearchNet Javascript test set 6k -*-CodeSearchNet PHP test set 28k -*-StatCodeSearch test set TBD +Dataset Size: +Finetuning set: +-CodeSearchNet Adv train set 251k +Test sets: +-CodeSearchNet Adv test set 19k +-WebQuery test set 1k +-CodeSearchNet Ruby test set 2k +-CodeSearchNet Go test set 14k +-CodeSearchNet Java test set 26k +-CodeSearchNet Javascript test set 6k +-CodeSearchNet PHP test set 28k +-StatCodeSearch test set TBD ## Limitations and Bias -*TBD +TBD ## Citation -*TBD +TBD ## Further References -*@article{husain2019codesearchnet, -* title={Codesearchnet challenge: Evaluating the state of semantic code search}, -* author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc}, -* journal={arXiv preprint arXiv:1909.09436}, -* year={2019} +@article{husain2019codesearchnet, + title={Codesearchnet challenge: Evaluating the state of semantic code search}, + author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc}, + journal={arXiv preprint arXiv:1909.09436}, + year={2019} +} +@article{Lu2021CodeXGLUEAM, + title={CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation}, + author={Shuai Lu and Daya Guo and Shuo Ren and Junjie Huang and Alexey Svyatkovskiy and Ambrosio Blanco and Colin Clement and Dawn Drain and Daxin Jiang and Duyu Tang and Ge Li and Lidong Zhou and Linjun Shou and Long Zhou and Michele Tufano and Ming Gong and Ming Zhou and Nan Duan and Neel Sundaresan and Shao Kun Deng and Shengyu Fu and Shujie Liu}, + journal={ArXiv}, + year={2021}, + volume={abs/2102.04664} *} -*@article{Lu2021CodeXGLUEAM, -* title={CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation}, -* author={Shuai Lu and Daya Guo and Shuo Ren and Junjie Huang and Alexey Svyatkovskiy and Ambrosio Blanco and Colin Clement and Dawn Drain and Daxin Jiang and Duyu Tang and Ge Li and Lidong Zhou and Linjun Shou and Long Zhou and Michele Tufano and Ming Gong and Ming Zhou and Nan Duan and Neel Sundaresan and Shao Kun Deng and Shengyu Fu and Shujie Liu}, -* journal={ArXiv}, -* year={2021}, -* volume={abs/2102.04664} -*} \ No newline at end of file From e00b4dfc68234a498b4d1ea3c400f9560e1aadbc Mon Sep 17 00:00:00 2001 From: drndr Date: Wed, 26 Jul 2023 17:44:09 +0200 Subject: [PATCH 12/57] Update doc.md --- src/genbench/tasks/nl_codesearch_clf/doc.md | 44 ++++++++++----------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/src/genbench/tasks/nl_codesearch_clf/doc.md b/src/genbench/tasks/nl_codesearch_clf/doc.md index cb863eb..4a5ef14 100644 --- a/src/genbench/tasks/nl_codesearch_clf/doc.md +++ b/src/genbench/tasks/nl_codesearch_clf/doc.md @@ -1,32 +1,32 @@ ## Motivation -Language models can serve as a valuable tool for software developers to increase productivity. Large generative models can be used for code generation and code completion, while smaller encoder-only models are capable of performing code search tasks using natural language queries. -These capabilities are heavily influenced by the quality and diversity of the available training data. Source code datasets used for training usually focus on the most popular languages and testing is mostly conducted on the same distributions, often overlooking low resource programming languages. -Motivated by the NLP generalisation taxonomy proposed by Hupkes et. al., we propose a new benchmark dataset called [placeholder] which builds upon existing natural language code search datasets to systemically study the code understanding generalization capabilities of language models. For evaluation and comparison, we collect several baseline results using fine-tuned BERT-style models and GPT-style large language models in a zero-shot setting. +Language models can serve as a valuable tool for software developers to increase productivity. Large generative models can be used for code generation and code completion, while smaller encoder-only models are capable of performing code search tasks using natural language queries. These capabilities are heavily influenced by the quality and diversity of the available training data. Source code datasets used for training usually focus on the most popular languages and testing is mostly conducted on the same distributions, often overlooking low resource programming languages. Motivated by the NLP generalisation taxonomy proposed by Hupkes et. al., we propose a new benchmark dataset called [placeholder] which builds upon existing natural language code search datasets to systemically study the code understanding generalization capabilities of language models. For evaluation and comparison, we collect several baseline results using fine-tuned BERT-style models and GPT-style large language models in a zero-shot setting. ## Examples -Given a natural language query, determine if a given code snippet is relevant or not +Given a natural language query, determine if a given code snippet is relevant or not \ -{"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} +{"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} \ {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def _resolve_entity ( mo ) : ent = mo . group ( \"entity\" ) s = mo . group ( ) if s . startswith ( '&#' ) : if s [ 2 ] in 'xX' : radix = 16 else : radix = 10 try : num = int ( ent , radix ) except ( ValueError , OverflowError ) : return u'' else : num = name2codepoint . get ( ent ) if num is None or num < 0 : # unknown entity -> ignore return u'' try : return unichr ( num ) except ValueError : return u''", "target": 0, "target_options": ["no_match", "match"]} ## Data Source -CodeSearchNet : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub -CodeSearchNet Adv : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE -WebQuery : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE -StatCodeSearch: R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors +CodeSearchNet : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub \ +CodeSearchNet Adv : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE \ +WebQuery : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE \ +StatCodeSearch: R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors \ -Dataset Size: -Finetuning set: --CodeSearchNet Adv train set 251k -Test sets: --CodeSearchNet Adv test set 19k --WebQuery test set 1k --CodeSearchNet Ruby test set 2k --CodeSearchNet Go test set 14k --CodeSearchNet Java test set 26k --CodeSearchNet Javascript test set 6k --CodeSearchNet PHP test set 28k --StatCodeSearch test set TBD +For each comment in each subset we sampled randomly another code snippet from given subset, to create a fully balanced binary classification dataset. + +Dataset Size:\ +Finetuning set: \ +-CodeSearchNet Adv train set 251k \ +Test sets: \ +-CodeSearchNet Adv test set 38k \ +-WebQuery test set 2k \ +-CodeSearchNet Ruby test set 4k \ +-CodeSearchNet Go test set 28k \ +-CodeSearchNet Java test set 52k \ +-CodeSearchNet Javascript test set 12k \ +-CodeSearchNet PHP test set 56k \ +-StatCodeSearch test set TBD \ ## Limitations and Bias TBD @@ -39,7 +39,7 @@ TBD author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc}, journal={arXiv preprint arXiv:1909.09436}, year={2019} -} +} \ @article{Lu2021CodeXGLUEAM, title={CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation}, author={Shuai Lu and Daya Guo and Shuo Ren and Junjie Huang and Alexey Svyatkovskiy and Ambrosio Blanco and Colin Clement and Dawn Drain and Daxin Jiang and Duyu Tang and Ge Li and Lidong Zhou and Linjun Shou and Long Zhou and Michele Tufano and Ming Gong and Ming Zhou and Nan Duan and Neel Sundaresan and Shao Kun Deng and Shengyu Fu and Shujie Liu}, From 0d72e3761f40c4460ec351376b4904eb64e62c3c Mon Sep 17 00:00:00 2001 From: drndr Date: Wed, 26 Jul 2023 17:47:16 +0200 Subject: [PATCH 13/57] Update doc.md --- src/genbench/tasks/nl_codesearch_clf/doc.md | 34 ++++++++++----------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/genbench/tasks/nl_codesearch_clf/doc.md b/src/genbench/tasks/nl_codesearch_clf/doc.md index 4a5ef14..bb53e7e 100644 --- a/src/genbench/tasks/nl_codesearch_clf/doc.md +++ b/src/genbench/tasks/nl_codesearch_clf/doc.md @@ -2,31 +2,31 @@ Language models can serve as a valuable tool for software developers to increase productivity. Large generative models can be used for code generation and code completion, while smaller encoder-only models are capable of performing code search tasks using natural language queries. These capabilities are heavily influenced by the quality and diversity of the available training data. Source code datasets used for training usually focus on the most popular languages and testing is mostly conducted on the same distributions, often overlooking low resource programming languages. Motivated by the NLP generalisation taxonomy proposed by Hupkes et. al., we propose a new benchmark dataset called [placeholder] which builds upon existing natural language code search datasets to systemically study the code understanding generalization capabilities of language models. For evaluation and comparison, we collect several baseline results using fine-tuned BERT-style models and GPT-style large language models in a zero-shot setting. ## Examples -Given a natural language query, determine if a given code snippet is relevant or not \ +Given a natural language query, determine if a given code snippet is relevant or not {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} \ {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def _resolve_entity ( mo ) : ent = mo . group ( \"entity\" ) s = mo . group ( ) if s . startswith ( '&#' ) : if s [ 2 ] in 'xX' : radix = 16 else : radix = 10 try : num = int ( ent , radix ) except ( ValueError , OverflowError ) : return u'' else : num = name2codepoint . get ( ent ) if num is None or num < 0 : # unknown entity -> ignore return u'' try : return unichr ( num ) except ValueError : return u''", "target": 0, "target_options": ["no_match", "match"]} ## Data Source -CodeSearchNet : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub \ -CodeSearchNet Adv : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE \ -WebQuery : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE \ -StatCodeSearch: R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors \ +**CodeSearchNet** : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub \ +**CodeSearchNet Adv** : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE \ +**WebQuery** : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE \ +**StatCodeSearch** : R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors \ For each comment in each subset we sampled randomly another code snippet from given subset, to create a fully balanced binary classification dataset. -Dataset Size:\ -Finetuning set: \ --CodeSearchNet Adv train set 251k \ -Test sets: \ --CodeSearchNet Adv test set 38k \ --WebQuery test set 2k \ --CodeSearchNet Ruby test set 4k \ --CodeSearchNet Go test set 28k \ --CodeSearchNet Java test set 52k \ --CodeSearchNet Javascript test set 12k \ --CodeSearchNet PHP test set 56k \ --StatCodeSearch test set TBD \ +**Dataset Size**:\ +-Finetuning set: \ + -CodeSearchNet Adv train set 251k \ +-Test sets: \ + -CodeSearchNet Adv test set 38k \ + -WebQuery test set 2k \ + -CodeSearchNet Ruby test set 4k \ + -CodeSearchNet Go test set 28k \ + -CodeSearchNet Java test set 52k \ + -CodeSearchNet Javascript test set 12k \ + -CodeSearchNet PHP test set 56k \ + -StatCodeSearch test set TBD \ ## Limitations and Bias TBD From c222b66df56ccb1ce7db417d46677769198cbac1 Mon Sep 17 00:00:00 2001 From: drndr Date: Wed, 26 Jul 2023 17:52:50 +0200 Subject: [PATCH 14/57] Update doc.md --- src/genbench/tasks/nl_codesearch_clf/doc.md | 22 ++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/genbench/tasks/nl_codesearch_clf/doc.md b/src/genbench/tasks/nl_codesearch_clf/doc.md index bb53e7e..a0f66f5 100644 --- a/src/genbench/tasks/nl_codesearch_clf/doc.md +++ b/src/genbench/tasks/nl_codesearch_clf/doc.md @@ -16,17 +16,17 @@ Given a natural language query, determine if a given code snippet is relevant or For each comment in each subset we sampled randomly another code snippet from given subset, to create a fully balanced binary classification dataset. **Dataset Size**:\ --Finetuning set: \ - -CodeSearchNet Adv train set 251k \ --Test sets: \ - -CodeSearchNet Adv test set 38k \ - -WebQuery test set 2k \ - -CodeSearchNet Ruby test set 4k \ - -CodeSearchNet Go test set 28k \ - -CodeSearchNet Java test set 52k \ - -CodeSearchNet Javascript test set 12k \ - -CodeSearchNet PHP test set 56k \ - -StatCodeSearch test set TBD \ +*Finetuning set:* \ + -CodeSearchNet Adv train set 251k \ +*Test sets:* \ + -CodeSearchNet Adv test set 38k \ + -WebQuery test set 2k \ + -CodeSearchNet Ruby test set 4k \ + -CodeSearchNet Go test set 28k \ + -CodeSearchNet Java test set 52k \ + -CodeSearchNet Javascript test set 12k \ + -CodeSearchNet PHP test set 56k \ + -StatCodeSearch test set TBD ## Limitations and Bias TBD From 2720c09f8221f6b639df66acfe1f7ff2f3e014f2 Mon Sep 17 00:00:00 2001 From: lucas Date: Thu, 27 Jul 2023 09:52:52 +0200 Subject: [PATCH 15/57] .. --- example_evaluation.py | 4 ++-- src/genbench/tasks/icl_consistency_test/task.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/example_evaluation.py b/example_evaluation.py index 69ed445..da53187 100644 --- a/example_evaluation.py +++ b/example_evaluation.py @@ -10,7 +10,7 @@ import torch import os -N_DATAPOINTS = 1 +N_DATAPOINTS = 20 def make_predictions(generator, dataset): @@ -61,5 +61,5 @@ def make_predictions(generator, dataset): results = task.evaluate_predictions(predictions=predictions, gold=ds) print('EVALUATED SUCCESSFULLY!') - print(f'Exact-match accuracies: \n{results["exact_match_accuracy"]}') + print(f'Exact-match accuracies: \n{results["exact_match_accuracy"]["accuracy"]}') print(f'Consistency: \n{results["kappas"]}') diff --git a/src/genbench/tasks/icl_consistency_test/task.py b/src/genbench/tasks/icl_consistency_test/task.py index e1c1fa1..4410aab 100644 --- a/src/genbench/tasks/icl_consistency_test/task.py +++ b/src/genbench/tasks/icl_consistency_test/task.py @@ -123,6 +123,8 @@ def remove_factor(self, data: datasets.Dataset, factor: str, keep_present: bool factor: A string with the name of the factor to remove. keep_present: whether to keep data with the factor present or absent. """ + self._set_factors() + #breakpoint() len_setup_ID_preamble = 4 index_factor = self.factors.index(factor) + len_setup_ID_preamble realisation_to_keep = str(int(keep_present)) @@ -132,7 +134,7 @@ def remove_factor(self, data: datasets.Dataset, factor: str, keep_present: bool data = data.map(lambda x: {**x, "setup_ID": x["setup_ID"][:index_factor] + x["setup_ID"][index_factor + 1:]} ) # Remove factor from list of factors. - self._set_factors() + self.factors.pop(self.factors.index(factor)) return data From 31b09af92830bc2e1f3b0ffeae2aec707e548c23 Mon Sep 17 00:00:00 2001 From: LucWeber Date: Fri, 28 Jul 2023 10:46:55 +0200 Subject: [PATCH 16/57] Style and quality checks --- example_evaluation.py | 65 ---------- .../example_evaluation.py | 118 ++++++++++++++++++ .../tasks/icl_consistency_test/task.py | 107 ++++++++-------- 3 files changed, 172 insertions(+), 118 deletions(-) delete mode 100644 example_evaluation.py create mode 100644 src/genbench/tasks/icl_consistency_test/example_evaluation.py diff --git a/example_evaluation.py b/example_evaluation.py deleted file mode 100644 index da53187..0000000 --- a/example_evaluation.py +++ /dev/null @@ -1,65 +0,0 @@ -from genbench import load_task -from genbench.api import PreparationStrategy - -from transformers import AutoModelForCausalLM -from transformers import pipeline - -from tqdm import tqdm - -# delete after testing -import torch -import os - -N_DATAPOINTS = 20 - - -def make_predictions(generator, dataset): - predictions = {} - for datapoint in tqdm(dataset): - prediction = generator(datapoint['input'], - max_new_tokens=1, - num_return_sequences=1, - do_sample=False, - return_full_text=False, - pad_token_id=generator.tokenizer.eos_token_id - ) - current_setup = str(datapoint['setup_ID']) - current_data_ID = str(datapoint['data_ID']) - - if current_setup in predictions.keys(): - predictions[current_setup].update({current_data_ID: prediction[0]['generated_text'].strip()}) - else: - predictions[current_setup] = {current_data_ID: prediction[0]['generated_text'].strip()} - - return predictions - - -if __name__ == '__main__': - # Load the task - task = load_task("icl_consistency_test") - - if not os.path.exists(f'cache_{N_DATAPOINTS}.p'): - ds = task.get_prepared_datasets(PreparationStrategy.PROMPT_BASED_TESTING, shot_list=[0])[0] - - # Selecting a subset of example for illustration purposes - subset = list(set(ds['data_ID']))[:N_DATAPOINTS] - ds = ds.filter(lambda x: x['data_ID'] in subset) - - # Generate predictions for the dataset - generator = pipeline('text-generation', model='gpt2') - predictions = make_predictions(generator, ds) - - # OPTIONAL: The ICL-consistency test provides the option to add factors to the analysis by using the - # `add_factor` method (here exemplified with distillation). - generator_distil = pipeline('text-generation', model='DistilGPT2') - predictions_distil = make_predictions(generator_distil, ds) - torch.save((predictions, predictions_distil, ds), f'cache_{N_DATAPOINTS}.p') - else: - predictions, predictions_distil, ds = torch.load(f'cache_{N_DATAPOINTS}.p') - predictions = task.add_factor(data=(predictions, predictions_distil), factor='distillation') - # Evaluate the predictions - results = task.evaluate_predictions(predictions=predictions, gold=ds) - - print('EVALUATED SUCCESSFULLY!') - print(f'Exact-match accuracies: \n{results["exact_match_accuracy"]["accuracy"]}') - print(f'Consistency: \n{results["kappas"]}') diff --git a/src/genbench/tasks/icl_consistency_test/example_evaluation.py b/src/genbench/tasks/icl_consistency_test/example_evaluation.py new file mode 100644 index 0000000..98b7e4c --- /dev/null +++ b/src/genbench/tasks/icl_consistency_test/example_evaluation.py @@ -0,0 +1,118 @@ +""" +EXAMPLE USAGE OF ICL CONSISTENCY TEST + +This script requires additional packages to be installed: + +pip install torch +pip install git+https://github.com/huggingface/transformers.git +pip install bitsandbytes +pip install accelerate + +""" +import string +from typing import Dict, List + +import torch +import transformers +from torch import Tensor +from torch.utils.data import DataLoader +from tqdm import tqdm + +from genbench import load_task +from genbench.api import PreparationStrategy + + +N_DATAPOINTS = 50 +MODEL_NAME = "huggyllama/llama-7b" +BATCH_SIZE = 8 + +device = "cuda" if torch.cuda.is_available() else "cpu" + + +class Generator: + def __init__(self, model_name="huggyllama/llama-7b"): + self.max_new_tokens = 4 # some labels consist of up to 4 tokens + self.tokenizer = transformers.AutoTokenizer.from_pretrained( + model_name, + device_map="auto", + padding_side="left", + ) + self.tokenizer.pad_token = self.tokenizer.eos_token + self.model = transformers.AutoModelForCausalLM.from_pretrained( + model_name, + load_in_8bit=True, + device_map="auto", + ).eval() + + self.generation_config = transformers.GenerationConfig( + do_sample=False, + return_dict_in_generate=False, + output_scores=True, + max_new_tokens=self.max_new_tokens, + return_full_text=False, + ) + + def generate(self, prompt) -> List[str]: + inputs = self.tokenizer(prompt, return_tensors="pt", padding=True, truncation=True) + + input_ids = inputs["input_ids"].to(device) + attention_mask = inputs["attention_mask"].to(device) + + generation_output = self.model.generate( + input_ids=input_ids, + attention_mask=attention_mask, + generation_config=self.generation_config, + ) + + outputs = self.tokenizer.batch_decode(generation_output[:, input_ids.shape[1] :]) + + # do some post-processing + outputs = [o.strip().split()[0].translate(str.maketrans("", "", string.punctuation)) for o in outputs] + + return outputs + + def make_predictions(self, dataset, bs=8) -> Dict[str, Dict[str, str]]: + out = {} + dl = DataLoader(dataset=dataset, batch_size=bs, num_workers=0) + + with torch.no_grad(): + for batch in tqdm(dl): + prediction = self.generate(batch["input"]) + + # organize predictions into output dictionary + for i, (data_ID, setup_ID) in enumerate(zip(batch["data_ID"], batch["setup_ID"])): + data_ID = str(data_ID.item() if isinstance(data_ID, Tensor) else data_ID) + if setup_ID in out.keys(): + out[setup_ID].update({data_ID: prediction[i]}) + else: + out[setup_ID] = {data_ID: prediction[i]} + + return out + + +if __name__ == "__main__": + # Load the task + task = load_task("icl_consistency_test") + ds = task.get_prepared_datasets(PreparationStrategy.PROMPT_BASED_TESTING, shot_list=[0])[0] + + # Selecting a subset of example for illustration purposes + subset = list(set(ds["data_ID"]))[:N_DATAPOINTS] + ds = ds.filter(lambda x: x["data_ID"] in subset) + + # Generate predictions for the dataset + generator = Generator(model_name=MODEL_NAME) + predictions = generator.make_predictions(ds, bs=BATCH_SIZE) + + # OPTIONAL: The ICL-consistency test provides the option to add factors to the analysis by using the + # `add_factor` method. + add_external_factor = False + if add_external_factor: + predictions_external_factor = ... # some function generating alternative predictions + predictions = task.add_factor(data=(predictions, predictions_external_factor), factor="") + + # Evaluate the predictions + results = task.evaluate_predictions(predictions=predictions, gold=ds) + + print("EVALUATED SUCCESSFULLY!") + print(f'Exact-match accuracies: \n{results["exact_match_accuracy"]["accuracy"]}') + print(f'Consistency: \n{results["kappas"]}') diff --git a/src/genbench/tasks/icl_consistency_test/task.py b/src/genbench/tasks/icl_consistency_test/task.py index 4410aab..4be8b31 100644 --- a/src/genbench/tasks/icl_consistency_test/task.py +++ b/src/genbench/tasks/icl_consistency_test/task.py @@ -1,27 +1,30 @@ -from typing import Any, Dict, List, Tuple -from genbench import Task +from typing import Any, Dict, Tuple -from sklearn.metrics import cohen_kappa_score +import datasets from pandas import DataFrame +from sklearn.metrics import cohen_kappa_score -import datasets +from genbench import Task -LABELS = [['Correct', 'True', 'Always', 'Yes', 'Guaranteed', 'Duplicates'], # `correct` labels - ['Inconclusive', 'Possible', 'Sometimes', 'Maybe', 'Neither'], # `neutral` labels - ['Impossible', 'Never', 'Incorrect', 'False', 'No', 'Not Duplicates'], ] # `incorrect` labels + +LABELS = [ + ["Correct", "True", "Always", "Yes", "Guaranteed", "Duplicates"], # `correct` labels + ["Inconclusive", "Possible", "Sometimes", "Maybe", "Neither"], # `neutral` labels + ["Impossible", "Never", "Incorrect", "False", "No", "Not Duplicates"], # `incorrect` labels +] LABEL_TO_NUMERIC = {} LABEL_TO_NUMERIC.update(dict([(label, i) for i, label_subset in enumerate(LABELS) for label in label_subset])) LABEL_TO_NUMERIC.update(dict([(label.lower(), i) for i, label_subset in enumerate(LABELS) for label in label_subset])) factors = [ - 'balanced_labels', - 'one_label', - 'cross_task', - 'cross_instructions', - 'n_shots', - 'instructions', - 'instruction_quality', + "balanced_labels", + "one_label", + "cross_task", + "cross_instructions", + "n_shots", + "instructions", + "instruction_quality", ] @@ -29,10 +32,11 @@ class IclConsistencyTestTask(Task): """Python implementation of the ICL consistency test task.""" def evaluate_predictions( - self, - *, - predictions: Dict[str, Dict[str, Any]], - gold: datasets.Dataset, + self, + *, + predictions: Dict[str, Dict[str, Any]], + gold: datasets.Dataset, + save_path: str = None, ) -> Dict[str, Any]: """Evaluate the predictions of the model against the gold data. Calculating exact match accuracy plus consistency across all setups (Cohen's kappa). @@ -51,40 +55,37 @@ def evaluate_predictions( self._set_factors() gold_pandas = gold.to_pandas() - gold_pandas['data_ID'] = gold_pandas['data_ID'].astype(str) - gold_labels_numeric = gold_pandas.set_index('data_ID')['target_numeric'].to_dict() + gold_pandas["data_ID"] = gold_pandas["data_ID"].astype(str) + gold_labels_numeric = gold_pandas.set_index("data_ID")["target_numeric"].to_dict() results_df = self._create_df(predictions, gold_labels_numeric) - results_df = results_df.sort_values(by=['setup_ID', 'data_ID']) + results_df = results_df.sort_values(by=["setup_ID", "data_ID"]) self._assert_equal_data_ids(results_df) # Compute the exact match accuracy for each setup. - em = {factor: [] for factor in self.factors + ['accuracy']} - for setup_ID, setup_predictions in results_df.groupby('setup_ID'): + em = {factor: [] for factor in self.factors + ["accuracy"]} + for setup_ID, setup_predictions in results_df.groupby("setup_ID"): temp = self._convert_numeric_id_to_dict(setup_ID, n_repetitions=1) for factor in self.factors: em[factor].extend(temp[factor]) - em['accuracy'].append( - (setup_predictions['predictions_numeric'] == setup_predictions['target_numeric']).mean()) + em["accuracy"].append( + (setup_predictions["predictions_numeric"] == setup_predictions["target_numeric"]).mean() + ) # Compute the Cohen's kappa for consistency. kappas = {} for factor in self.factors: - factor_present = results_df.loc[results_df[factor] == '1']['predictions_numeric'] - factor_absent = results_df.loc[results_df[factor] == '0']['predictions_numeric'] + factor_present = results_df.loc[results_df[factor] == "1"]["predictions_numeric"] + factor_absent = results_df.loc[results_df[factor] == "0"]["predictions_numeric"] - # mask out predictions that are out-of-label-distribution + # mask out predictions that are out-of-label-distribution mask = [(f1 != -1 and f2 != -1) for f1, f2 in zip(factor_absent, factor_present)] - try: - factor_present, factor_absent = factor_present[mask], factor_absent[mask] - except: - breakpoint() + factor_present, factor_absent = factor_present[mask], factor_absent[mask] kappas[factor] = cohen_kappa_score(factor_present, factor_absent) # Return the evaluation metrics. - return {"exact_match_accuracy": em, - "kappas": kappas} + return {"exact_match_accuracy": em, "kappas": kappas} def add_factor(self, data: Tuple[Dict, Dict], factor: str) -> Dict[str, Dict[str, Any]]: """Concatenate the data with the factor present and absent and update the setup_IDs accordingly. Also add the @@ -102,8 +103,8 @@ def add_factor(self, data: Tuple[Dict, Dict], factor: str) -> Dict[str, Dict[str setup_ids1 = list(data[1].keys()) for setup_id0, setup_id1 in zip(setup_ids0, setup_ids1): - updated_id0 = setup_id0 + '0' - updated_id1 = setup_id1 + '1' + updated_id0 = setup_id0 + "0" + updated_id1 = setup_id1 + "1" data[0][updated_id0] = data[0].pop(setup_id0) data[1][updated_id1] = data[1].pop(setup_id1) @@ -124,17 +125,16 @@ def remove_factor(self, data: datasets.Dataset, factor: str, keep_present: bool keep_present: whether to keep data with the factor present or absent. """ self._set_factors() - #breakpoint() + # breakpoint() len_setup_ID_preamble = 4 index_factor = self.factors.index(factor) + len_setup_ID_preamble realisation_to_keep = str(int(keep_present)) # filter out all unwanted datapoints and adapt setup_IDs to exclude factor - data = data.filter(lambda x: x['setup_ID'][index_factor] == realisation_to_keep) - data = data.map(lambda x: {**x, "setup_ID": x["setup_ID"][:index_factor] + x["setup_ID"][index_factor + 1:]} ) + data = data.filter(lambda x: x["setup_ID"][index_factor] == realisation_to_keep) + data = data.map(lambda x: {**x, "setup_ID": x["setup_ID"][:index_factor] + x["setup_ID"][index_factor + 1 :]}) # Remove factor from list of factors. - self.factors.pop(self.factors.index(factor)) return data @@ -152,18 +152,19 @@ def _create_df(self, predictions: Dict[str, Dict[str, Any]], gold_labels: Dict[s Returns: A pandas dataframe containing the predictions and gold data. """ - additional_keys = ['predictions_numeric', 'target_numeric', 'setup_ID', 'data_ID'] + additional_keys = ["predictions_numeric", "target_numeric", "setup_ID", "data_ID"] results_dict = {factor: [] for factor in self.factors + additional_keys} for setup_ID, predictions_setup in predictions.items(): data_ids = list(predictions_setup.keys()) n_datapoints = len(data_ids) - results_dict['data_ID'].extend(data_ids) - results_dict['setup_ID'].extend([setup_ID] * n_datapoints) - results_dict['target_numeric'].extend(gold_labels[data_id] for data_id in data_ids) - results_dict['predictions_numeric'].extend(self._label_to_numeric(predictions_setup[data_id]) for - data_id in data_ids) + results_dict["data_ID"].extend(data_ids) + results_dict["setup_ID"].extend([setup_ID] * n_datapoints) + results_dict["target_numeric"].extend(gold_labels[data_id] for data_id in data_ids) + results_dict["predictions_numeric"].extend( + self._label_to_numeric(predictions_setup[data_id]) for data_id in data_ids + ) temp = self._convert_numeric_id_to_dict(setup_ID, n_repetitions=n_datapoints) for factor in self.factors: @@ -172,7 +173,7 @@ def _create_df(self, predictions: Dict[str, Dict[str, Any]], gold_labels: Dict[s return DataFrame(results_dict) def _set_factors(self): - if not hasattr(self, 'factors'): + if not hasattr(self, "factors"): self.factors = factors def _convert_numeric_id_to_dict(self, setup_id: str, n_repetitions: int = 1) -> Dict[str, Any]: @@ -184,7 +185,7 @@ def _convert_numeric_id_to_dict(self, setup_id: str, n_repetitions: int = 1) -> Returns: A dict containing factors as keys and the factor realisation as value. """ - setup_id = setup_id.split('_')[1] + setup_id = setup_id.split("_")[1] setup_dict = {} for factor, value in zip(self.factors, setup_id): @@ -211,8 +212,8 @@ def _assert_equal_data_ids(results_df: DataFrame) -> None: Args: results_df: A pandas dataframe containing the predictions and gold data. """ - used_data_ids = results_df['data_ID'].unique() - for setup_ID in results_df['setup_ID'].unique(): - assert used_data_ids.sort() == results_df.loc[results_df['setup_ID'] == setup_ID][ - 'data_ID'].unique().sort(), \ - "Not all data_IDs are the same for all setups. Check for missing predictions!" + used_data_ids = results_df["data_ID"].unique() + for setup_ID in results_df["setup_ID"].unique(): + assert ( + used_data_ids.sort() == results_df.loc[results_df["setup_ID"] == setup_ID]["data_ID"].unique().sort() + ), "Not all data_IDs are the same for all setups. Check for missing predictions!" From 6a07d785be0bac452eb28e5875719e8108c406ed Mon Sep 17 00:00:00 2001 From: drndr Date: Mon, 31 Jul 2023 11:11:02 +0200 Subject: [PATCH 17/57] add new clf prompt and mrr task --- .../codesearchnet_adv/config.jsonnet | 12 ++--- .../codesearchnet_go/config.jsonnet | 9 ++-- .../codesearchnet_java/config.jsonnet | 9 ++-- .../codesearchnet_javascript/config.jsonnet | 9 ++-- .../codesearchnet_php/config.jsonnet | 9 ++-- .../codesearchnet_ruby/config.jsonnet | 9 ++-- .../tasks/nl_codesearch_clf/config.jsonnet | 6 +-- .../statcodesearch/config.jsonnet | 9 ++-- .../nl_codesearch_clf/webquery/config.jsonnet | 9 ++-- .../tasks/nl_codesearch_mrr/__init__.py | 5 ++ .../codesearchnet_adv/__init__.py | 0 .../codesearchnet_adv/config.jsonnet | 52 +++++++++++++++++++ .../codesearchnet_adv/doc.md | 19 +++++++ .../codesearchnet_adv/task.py | 5 ++ .../codesearchnet_go/__init__.py | 0 .../codesearchnet_go/config.jsonnet | 52 +++++++++++++++++++ .../nl_codesearch_mrr/codesearchnet_go/doc.md | 19 +++++++ .../codesearchnet_go/task.py | 5 ++ .../codesearchnet_java/__init__.py | 0 .../codesearchnet_java/config.jsonnet | 52 +++++++++++++++++++ .../codesearchnet_java/doc.md | 19 +++++++ .../codesearchnet_java/task.py | 5 ++ .../codesearchnet_javascript/__init__.py | 0 .../codesearchnet_javascript/config.jsonnet | 52 +++++++++++++++++++ .../codesearchnet_javascript/doc.md | 19 +++++++ .../codesearchnet_javascript/task.py | 5 ++ .../codesearchnet_php/__init__.py | 0 .../codesearchnet_php/config.jsonnet | 52 +++++++++++++++++++ .../codesearchnet_php/doc.md | 19 +++++++ .../codesearchnet_php/task.py | 5 ++ .../codesearchnet_ruby/__init__.py | 0 .../codesearchnet_ruby/config.jsonnet | 52 +++++++++++++++++++ .../codesearchnet_ruby/doc.md | 19 +++++++ .../codesearchnet_ruby/task.py | 5 ++ .../tasks/nl_codesearch_mrr/config.jsonnet | 31 +++++++++++ src/genbench/tasks/nl_codesearch_mrr/doc.md | 17 ++++++ .../nl_codesearch_mrr/webquery/__init__.py | 0 .../nl_codesearch_mrr/webquery/config.jsonnet | 52 +++++++++++++++++++ .../tasks/nl_codesearch_mrr/webquery/doc.md | 19 +++++++ .../tasks/nl_codesearch_mrr/webquery/task.py | 5 ++ 40 files changed, 628 insertions(+), 38 deletions(-) create mode 100644 src/genbench/tasks/nl_codesearch_mrr/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_mrr/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_mrr/webquery/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_mrr/webquery/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_mrr/webquery/task.py diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet index 276829a..1d042e4 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet @@ -53,13 +53,11 @@ prompt_builder: { // Currently, we follow BIG-bench options for prompt construction: // https://github.com/google/BIG-bench/blob/main/docs/doc.md#optional-fields - instruction_zero_shot: 'Add two numbers together', - input_prefix: 'Q: ', - output_prefix: 'A: ', - choices_prefix: '\n choice: ', - append_choices_to_input: true, - few_shot_example_separator: '\n', - stop_string: '\n\n', + instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, } }, }, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet index aa6154c..48dbee9 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet @@ -50,10 +50,11 @@ // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', + instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, } }, }, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet index 5ca8d57..86a5b4d 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet @@ -50,10 +50,11 @@ // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', + instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, } }, }, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet index 851c047..874515a 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet @@ -50,10 +50,11 @@ // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', + instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, } }, }, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet index e5face3..ca2b9fd 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet @@ -50,10 +50,11 @@ // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', + instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, } }, }, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet index 23486f8..49ad8eb 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet @@ -50,10 +50,11 @@ // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', + instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, } }, }, diff --git a/src/genbench/tasks/nl_codesearch_clf/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/config.jsonnet index b657f43..63dd636 100644 --- a/src/genbench/tasks/nl_codesearch_clf/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/config.jsonnet @@ -2,7 +2,7 @@ name: 'Natural Language Codesearch Classification', // @TODO: Add a description of the task - description: 'Natural Language Codesearch Classification aims to measure the generalization capabilites of language models in code understanding. It includes multiple subtasks to measure three different types of generalizations', + description: 'Natural Language Codesearch Classification aims to measure the generalization capabilites of language models in code understanding using binary classification as an evaluation task. It includes multiple subtasks to measure three different types of generalizations', // @TODO: Add a list of keywords that describe the task keywords: [ @@ -19,13 +19,13 @@ ], subtasks_order: [ + 'codesearchnet_adv', + 'webquery', 'codesearchnet_ruby', 'codesearchnet_go', 'codesearchnet_java', 'codesearchnet_javascript', 'codesearchnet_php', - 'codesearchnet_adv', - 'webquery', 'statcodesearch', ], diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet index 43dd2fa..c4f3cf5 100644 --- a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet @@ -51,10 +51,11 @@ // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', + instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, } }, }, diff --git a/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet index e4d2d8a..9f4aef9 100644 --- a/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet @@ -51,10 +51,11 @@ // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', + instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, } }, }, diff --git a/src/genbench/tasks/nl_codesearch_mrr/__init__.py b/src/genbench/tasks/nl_codesearch_mrr/__init__.py new file mode 100644 index 0000000..85a91e5 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/__init__.py @@ -0,0 +1,5 @@ +from genbench import TaskDict + + +class NlCodesearchMrr(TaskDict): + pass diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/__init__.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet new file mode 100644 index 0000000..4c73f01 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet @@ -0,0 +1,52 @@ +{ + name: 'Natural Language Codesearch Ranking (codesearchnet_adv)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Ranking (codesearchnet_adv) aims to measure ...', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'keyword1', + 'keyword2', + ], + + authors: [ + 'Andor Diera', + ' Abdelhalim Dahou', + ' Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: false, + + task_type: 'free_form', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/doc.md b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/doc.md new file mode 100644 index 0000000..901fc56 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Ranking (codesearchnet_adv) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Ranking (codesearchnet_adv).* + +## Examples +*Give some examples of the Natural Language Codesearch Ranking (codesearchnet_adv).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Ranking (codesearchnet_adv).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Ranking (codesearchnet_adv) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py new file mode 100644 index 0000000..6df3d41 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchMrrCodesearchnetAdv(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/__init__.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet new file mode 100644 index 0000000..06729d7 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet @@ -0,0 +1,52 @@ +{ + name: 'Natural Language Codesearch Ranking (codesearchnet_go)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Ranking (codesearchnet_go) aims to measure ...', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'keyword1', + 'keyword2', + ], + + authors: [ + 'Andor Diera', + ' Abdelhalim Dahou', + ' Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: false, + + task_type: 'free_form', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/doc.md b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/doc.md new file mode 100644 index 0000000..8bbf5c3 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Ranking (codesearchnet_go) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Ranking (codesearchnet_go).* + +## Examples +*Give some examples of the Natural Language Codesearch Ranking (codesearchnet_go).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Ranking (codesearchnet_go).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Ranking (codesearchnet_go) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py new file mode 100644 index 0000000..c9aed7c --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchMrrCodesearchnetGo(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/__init__.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet new file mode 100644 index 0000000..7efaa64 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet @@ -0,0 +1,52 @@ +{ + name: 'Natural Language Codesearch Ranking (codesearchnet_java)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Ranking (codesearchnet_java) aims to measure ...', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'keyword1', + 'keyword2', + ], + + authors: [ + 'Andor Diera', + ' Abdelhalim Dahou', + ' Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: false, + + task_type: 'free_form', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/doc.md b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/doc.md new file mode 100644 index 0000000..a18ffab --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Ranking (codesearchnet_java) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Ranking (codesearchnet_java).* + +## Examples +*Give some examples of the Natural Language Codesearch Ranking (codesearchnet_java).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Ranking (codesearchnet_java).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Ranking (codesearchnet_java) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py new file mode 100644 index 0000000..44b086c --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchMrrCodesearchnetJava(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/__init__.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet new file mode 100644 index 0000000..fe84dde --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet @@ -0,0 +1,52 @@ +{ + name: 'Natural Language Codesearch Ranking (codesearchnet_javascript)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Ranking (codesearchnet_javascript) aims to measure ...', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'keyword1', + 'keyword2', + ], + + authors: [ + 'Andor Diera', + ' Abdelhalim Dahou', + ' Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: false, + + task_type: 'free_form', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/doc.md b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/doc.md new file mode 100644 index 0000000..6b56758 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Ranking (codesearchnet_javascript) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Ranking (codesearchnet_javascript).* + +## Examples +*Give some examples of the Natural Language Codesearch Ranking (codesearchnet_javascript).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Ranking (codesearchnet_javascript).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Ranking (codesearchnet_javascript) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py new file mode 100644 index 0000000..57b8746 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchMrrCodesearchnetJavascript(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/__init__.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet new file mode 100644 index 0000000..9adc92d --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet @@ -0,0 +1,52 @@ +{ + name: 'Natural Language Codesearch Ranking (codesearchnet_php)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Ranking (codesearchnet_php) aims to measure ...', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'keyword1', + 'keyword2', + ], + + authors: [ + 'Andor Diera', + ' Abdelhalim Dahou', + ' Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: false, + + task_type: 'free_form', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/doc.md b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/doc.md new file mode 100644 index 0000000..9fd3043 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Ranking (codesearchnet_php) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Ranking (codesearchnet_php).* + +## Examples +*Give some examples of the Natural Language Codesearch Ranking (codesearchnet_php).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Ranking (codesearchnet_php).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Ranking (codesearchnet_php) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py new file mode 100644 index 0000000..a2b13dc --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchMrrCodesearchnetPhp(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/__init__.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet new file mode 100644 index 0000000..7e7c6c4 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet @@ -0,0 +1,52 @@ +{ + name: 'Natural Language Codesearch Ranking (codesearchnet_ruby)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Ranking (codesearchnet_ruby) aims to measure ...', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'keyword1', + 'keyword2', + ], + + authors: [ + 'Andor Diera', + ' Abdelhalim Dahou', + ' Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: false, + + task_type: 'free_form', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/doc.md b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/doc.md new file mode 100644 index 0000000..a0e0efb --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Ranking (codesearchnet_ruby) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Ranking (codesearchnet_ruby).* + +## Examples +*Give some examples of the Natural Language Codesearch Ranking (codesearchnet_ruby).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Ranking (codesearchnet_ruby).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Ranking (codesearchnet_ruby) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py new file mode 100644 index 0000000..8f52da5 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchMrrCodesearchnetRuby(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet new file mode 100644 index 0000000..362d107 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet @@ -0,0 +1,31 @@ +{ + name: 'Natural Language Codesearch Ranking', + + // @TODO: Add a description of the task + description: ''Natural Language Codesearch Ranking aims to measure the generalization capabilites of language models in code understanding using mean reciprocal ranking as an evaluation task. It includes multiple subtasks to measure three different types of generalizations', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'codesearch', + 'natural language query', + 'mean reciprocal ranking', + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Florian Sihler', + + ], + + subtasks_order: [ + 'codesearchnet_adv', + 'webquery', + 'codesearchnet_ruby', + 'codesearchnet_go', + 'codesearchnet_java', + 'codesearchnet_javascript', + 'codesearchnet_php', + + ], +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/doc.md b/src/genbench/tasks/nl_codesearch_mrr/doc.md new file mode 100644 index 0000000..7303e45 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/doc.md @@ -0,0 +1,17 @@ +## Motivation +*Describe the motivation for this Natural Language Codesearch Ranking.* + +## Examples +*Give examples of the Natural Language Codesearch Ranking.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Ranking.* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Ranking has, with links and references if possible.* + +## Citation +*Cite the source where this Natural Language Codesearch Ranking was introduced.* + +## Further References +*Add any useful further references.* \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/webquery/__init__.py b/src/genbench/tasks/nl_codesearch_mrr/webquery/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet new file mode 100644 index 0000000..b019072 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet @@ -0,0 +1,52 @@ +{ + name: 'Natural Language Codesearch Ranking (webquery)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Ranking (webquery) aims to measure ...', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'keyword1', + 'keyword2', + ], + + authors: [ + 'Andor Diera', + ' Abdelhalim Dahou', + ' Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + }, + + has_validation_set: false, + has_train_set: false, + + task_type: 'free_form', + + evaluation_metrics: [ + { + hf_id: 'exact_match', + git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", + best_score: 1.0, + } + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Add two numbers together\n\n', + instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', + input_prefix: 'Q: ', + output_prefix: '\nA: ', + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/webquery/doc.md b/src/genbench/tasks/nl_codesearch_mrr/webquery/doc.md new file mode 100644 index 0000000..e31666d --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/webquery/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Ranking (webquery) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Ranking (webquery).* + +## Examples +*Give some examples of the Natural Language Codesearch Ranking (webquery).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Ranking (webquery).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Ranking (webquery) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_mrr/webquery/task.py b/src/genbench/tasks/nl_codesearch_mrr/webquery/task.py new file mode 100644 index 0000000..d5c7d50 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/webquery/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchMrrWebquery(Task): + pass From 5047d62a207e07f5fe136e291da385b751e342c7 Mon Sep 17 00:00:00 2001 From: drndr Date: Mon, 31 Jul 2023 12:53:30 +0200 Subject: [PATCH 18/57] update configs --- .../codesearchnet_adv/config.jsonnet | 35 ++++++------- .../codesearchnet_go/config.jsonnet | 36 ++++++-------- .../codesearchnet_java/config.jsonnet | 34 ++++++------- .../codesearchnet_javascript/config.jsonnet | 34 ++++++------- .../codesearchnet_php/config.jsonnet | 34 ++++++------- .../codesearchnet_ruby/config.jsonnet | 36 ++++++-------- .../tasks/nl_codesearch_mrr/config.jsonnet | 2 +- .../statcodesearch/__init__.py | 0 .../statcodesearch/config.jsonnet | 49 +++++++++++++++++++ .../nl_codesearch_mrr/statcodesearch/doc.md | 19 +++++++ .../nl_codesearch_mrr/statcodesearch/task.py | 5 ++ .../nl_codesearch_mrr/webquery/config.jsonnet | 35 ++++++------- 12 files changed, 183 insertions(+), 136 deletions(-) create mode 100644 src/genbench/tasks/nl_codesearch_mrr/statcodesearch/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_mrr/statcodesearch/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet index 4c73f01..6e7736a 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet @@ -2,37 +2,39 @@ name: 'Natural Language Codesearch Ranking (codesearchnet_adv)', // @TODO: Add a description of the task - description: 'Natural Language Codesearch Ranking (codesearchnet_adv) aims to measure ...', + description: 'Natural Language Codesearch Ranking (codesearchnet_adv) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', // @TODO: Add a list of keywords that describe the task keywords: [ - 'keyword1', - 'keyword2', + 'codesearch', + 'natural language query', + 'mean reciprocal rank', + 'python', + 'robustness', + 'covariate shift', ], authors: [ 'Andor Diera', - ' Abdelhalim Dahou', - ' Florian Sihler', + 'Abdelhalim Dahou', + 'Florian Sihler', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/test_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, - has_train_set: false, + has_train_set: true, - task_type: 'free_form', + task_type: '', evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + }, ], preparation_strategies: { @@ -40,13 +42,8 @@ // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can // also provide a custom prompt preparation in the task's Python class. - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', - } + finetuning: { + objective: 'maximum_likelihood', }, }, } \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet index 06729d7..595ab27 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet @@ -1,38 +1,39 @@ -{ +{{ name: 'Natural Language Codesearch Ranking (codesearchnet_go)', // @TODO: Add a description of the task - description: 'Natural Language Codesearch Ranking (codesearchnet_go) aims to measure ...', + description: 'Natural Language Codesearch Ranking (codesearchnet_go) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', // @TODO: Add a list of keywords that describe the task keywords: [ - 'keyword1', - 'keyword2', + 'codesearch', + 'natural language query', + 'mean reciprocal rank', + 'go', + 'cross-lingual', ], authors: [ 'Andor Diera', - ' Abdelhalim Dahou', - ' Florian Sihler', + 'Abdelhalim Dahou', + 'Florian Sihler', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_go/test_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, - has_train_set: false, + has_train_set: true, - task_type: 'free_form', + task_type: '', evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + }, ], preparation_strategies: { @@ -40,13 +41,8 @@ // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can // also provide a custom prompt preparation in the task's Python class. - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', - } + finetuning: { + objective: 'maximum_likelihood', }, }, } \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet index 7efaa64..c8e9f4a 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet @@ -2,37 +2,38 @@ name: 'Natural Language Codesearch Ranking (codesearchnet_java)', // @TODO: Add a description of the task - description: 'Natural Language Codesearch Ranking (codesearchnet_java) aims to measure ...', + description: 'Natural Language Codesearch Ranking (codesearchnet_java) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', // @TODO: Add a list of keywords that describe the task keywords: [ - 'keyword1', - 'keyword2', + 'codesearch', + 'natural language query', + 'mean reciprocal rank', + 'java', + 'cross-lingual' ], authors: [ 'Andor Diera', - ' Abdelhalim Dahou', - ' Florian Sihler', + 'Abdelhalim Dahou', + 'Florian Sihler', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_java/test_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, - has_train_set: false, + has_train_set: true, - task_type: 'free_form', + task_type: '', evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + }, ], preparation_strategies: { @@ -40,13 +41,8 @@ // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can // also provide a custom prompt preparation in the task's Python class. - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', - } + finetuning: { + objective: 'maximum_likelihood', }, }, } \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet index fe84dde..ecfe265 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet @@ -2,37 +2,38 @@ name: 'Natural Language Codesearch Ranking (codesearchnet_javascript)', // @TODO: Add a description of the task - description: 'Natural Language Codesearch Ranking (codesearchnet_javascript) aims to measure ...', + description: 'Natural Language Codesearch Ranking (codesearchnet_javascript) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', // @TODO: Add a list of keywords that describe the task keywords: [ - 'keyword1', - 'keyword2', + 'codesearch', + 'natural language query', + 'mean reciprocal rank', + 'javascript', + 'cross-lingual', ], authors: [ 'Andor Diera', - ' Abdelhalim Dahou', - ' Florian Sihler', + 'Abdelhalim Dahou', + 'Florian Sihler', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_javascript/test_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, - has_train_set: false, + has_train_set: true, - task_type: 'free_form', + task_type: '', evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + }, ], preparation_strategies: { @@ -40,13 +41,8 @@ // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can // also provide a custom prompt preparation in the task's Python class. - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', - } + finetuning: { + objective: 'maximum_likelihood', }, }, } \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet index 9adc92d..d1bb755 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet @@ -2,37 +2,38 @@ name: 'Natural Language Codesearch Ranking (codesearchnet_php)', // @TODO: Add a description of the task - description: 'Natural Language Codesearch Ranking (codesearchnet_php) aims to measure ...', + description: 'Natural Language Codesearch Ranking (codesearchnet_php) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', // @TODO: Add a list of keywords that describe the task keywords: [ - 'keyword1', - 'keyword2', + 'codesearch', + 'natural language query', + 'mean reciprocal rank', + 'php', + 'cross-lingual', ], authors: [ 'Andor Diera', - ' Abdelhalim Dahou', - ' Florian Sihler', + 'Abdelhalim Dahou', + 'Florian Sihler', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_php/test_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_php/train_sample_cbt.jsonl', }, has_validation_set: false, - has_train_set: false, + has_train_set: true, - task_type: 'free_form', + task_type: '', evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + }, ], preparation_strategies: { @@ -40,13 +41,8 @@ // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can // also provide a custom prompt preparation in the task's Python class. - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', - } + finetuning: { + objective: 'maximum_likelihood', }, }, } \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet index 7e7c6c4..2962f56 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet @@ -1,38 +1,39 @@ -{ +{{ name: 'Natural Language Codesearch Ranking (codesearchnet_ruby)', // @TODO: Add a description of the task - description: 'Natural Language Codesearch Ranking (codesearchnet_ruby) aims to measure ...', + description: 'Natural Language Codesearch Ranking (codesearchnet_ruby) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', // @TODO: Add a list of keywords that describe the task keywords: [ - 'keyword1', - 'keyword2', + 'codesearch', + 'natural language query', + 'mean reciprocal rank', + 'ruby', + 'cross-lingual', ], authors: [ 'Andor Diera', - ' Abdelhalim Dahou', - ' Florian Sihler', + 'Abdelhalim Dahou', + 'Florian Sihler', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_ruby/test_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, - has_train_set: false, + has_train_set: true, - task_type: 'free_form', + task_type: '', evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + }, ], preparation_strategies: { @@ -40,13 +41,8 @@ // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can // also provide a custom prompt preparation in the task's Python class. - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', - } + finetuning: { + objective: 'maximum_likelihood', }, }, } \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet index 362d107..20008df 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet @@ -26,6 +26,6 @@ 'codesearchnet_java', 'codesearchnet_javascript', 'codesearchnet_php', - + 'statcodesearch', ], } \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/__init__.py b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet new file mode 100644 index 0000000..179d7ae --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet @@ -0,0 +1,49 @@ +{ + name: 'Natural Language Codesearch Ranking (statcodesearch)', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Ranking (statcodesearch) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual and domain generalization', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'codesearch', + 'natural language query', + 'mean reciprocal rank', + 'r', + 'cross-lingual', + 'domain-shift' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Florian Sihler', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/statcodesearch/test_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: '', + + evaluation_metrics: [ + { + }, + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + finetuning: { + objective: 'maximum_likelihood', + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/doc.md b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/doc.md new file mode 100644 index 0000000..0826a5c --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (statcodesearch) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (statcodesearch).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (statcodesearch).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (statcodesearch).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (statcodesearch) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py new file mode 100644 index 0000000..f7089b5 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py @@ -0,0 +1,5 @@ +from genbench import Task + + +class NlCodesearchClfStatcodesearch(Task): + pass diff --git a/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet index b019072..15fe6a5 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet @@ -2,37 +2,39 @@ name: 'Natural Language Codesearch Ranking (webquery)', // @TODO: Add a description of the task - description: 'Natural Language Codesearch Ranking (webquery) aims to measure ...', + description: 'Natural Language Codesearch Ranking (codesearchnet_javascript) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', // @TODO: Add a list of keywords that describe the task keywords: [ - 'keyword1', - 'keyword2', + 'codesearch', + 'natural language query', + 'mean reciprocal rank', + 'python', + 'robustness', + 'covariate shift', ], authors: [ 'Andor Diera', - ' Abdelhalim Dahou', - ' Florian Sihler', + 'Abdelhalim Dahou', + 'Florian Sihler', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/GenBench/genbench_cbt/main/src/genbench/dummy_data/LLM_test.jsonl', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/webquery/test_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, - has_train_set: false, + has_train_set: true, - task_type: 'free_form', + task_type: '', evaluation_metrics: [ { - hf_id: 'exact_match', - git_commit_sha: "758135da6a37ce962b7bc38c6dd5eab672d2b742", - best_score: 1.0, - } + }, ], preparation_strategies: { @@ -40,13 +42,8 @@ // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. // We provide a few options for configuring the prompt. But, the task creator can // also provide a custom prompt preparation in the task's Python class. - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Add two numbers together\n\n', - instruction_few_shot: 'Add two numbers together. Here are some examples: \n\n', - input_prefix: 'Q: ', - output_prefix: '\nA: ', - } + finetuning: { + objective: 'maximum_likelihood', }, }, } \ No newline at end of file From c4ec13c8d27830228a3194b67d52e1ec54dbde2a Mon Sep 17 00:00:00 2001 From: drndr Date: Mon, 31 Jul 2023 15:20:40 +0200 Subject: [PATCH 19/57] update main mrr config --- .../codesearchnet_adv/task.py | 67 ++++++++++++++++++- .../tasks/nl_codesearch_mrr/config.jsonnet | 3 +- 2 files changed, 67 insertions(+), 3 deletions(-) diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py index 6df3d41..03eb671 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py @@ -1,5 +1,68 @@ from genbench import Task - +@Task.register("nl_codesearch_mrrcodesearchnet_adv") class NlCodesearchMrrCodesearchnetAdv(Task): - pass + + def get_dataset_raw(self) -> Dict[str, datasets.Dataset]: + """Create the dataset by mixing every three data instance together. + + Args: + None + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the raw data for the corresponding split. + """ + # Load the raw datasets + print("kaki") + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + + # Mix every three data instances together per each split + output: Dict[str, datasets.Dataset] = {} + for split, dataset in raw_datasets.items(): + # Combine every three data instances together + dataset = dataset.map(self._magic_combo, batched=True, batch_size=3) + + # Maybe do additional processing/formatting here + dataset = dataset.map(self.format_example) + + output[split] = dataset + + return output + + def _magic_combo(self, examples: Dict[str, List[Any]]) -> Dict[str, List[Any]]: + """Combine every three data instances together. + + Args: + examples: A dictionary containing key-value pairs for the data instances. + The keys are strings representing the name of the data instance + (e.g., "input", "target") and the values are lists containing + the data instance values. + + Returns: + A dictionary containing key-value pairs for the combined data instances. + The keys are strings representing the name of the data instance + (e.g., "input", "target") and the values are lists containing + the combined data instance values. + """ + + single_example: Dict[str, Any] = {} + + # Perform some cool mixing magic here + # ... + + # HuggingFace datasets.Dataset.map() expects + # a dictionary of lists as output + output = {k: [v] for k, v in single_example.items()} + + return output + + +def main(): + task = NlCodesearchMrrCodesearchnetAdv() + task.get_dataset_raw() + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet index 20008df..a4a6d73 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet @@ -2,7 +2,7 @@ name: 'Natural Language Codesearch Ranking', // @TODO: Add a description of the task - description: ''Natural Language Codesearch Ranking aims to measure the generalization capabilites of language models in code understanding using mean reciprocal ranking as an evaluation task. It includes multiple subtasks to measure three different types of generalizations', + description: 'Natural Language Codesearch Ranking aims to measure the generalization capabilites of language models in code understanding using mean reciprocal ranking as an evaluation task. It includes multiple subtasks to measure three different types of generalizations', // @TODO: Add a list of keywords that describe the task keywords: [ @@ -27,5 +27,6 @@ 'codesearchnet_javascript', 'codesearchnet_php', 'statcodesearch', + ], } \ No newline at end of file From 83249a66343433658e572a04e3638331bc689c57 Mon Sep 17 00:00:00 2001 From: drndr Date: Mon, 31 Jul 2023 16:33:34 +0200 Subject: [PATCH 20/57] fix codesearchnet cfg json2jsonl --- .../nl_codesearch_clf/codesearchnet_go/config.jsonnet | 2 +- .../nl_codesearch_clf/codesearchnet_java/config.jsonnet | 2 +- .../codesearchnet_javascript/config.jsonnet | 2 +- .../nl_codesearch_clf/codesearchnet_php/config.jsonnet | 2 +- .../nl_codesearch_clf/codesearchnet_ruby/config.jsonnet | 2 +- .../tasks/nl_codesearch_clf/statcodesearch/config.jsonnet | 2 +- .../nl_codesearch_mrr/codesearchnet_adv/config.jsonnet | 5 ++++- .../tasks/nl_codesearch_mrr/codesearchnet_adv/task.py | 4 +++- .../nl_codesearch_mrr/codesearchnet_go/config.jsonnet | 7 +++++-- .../nl_codesearch_mrr/codesearchnet_java/config.jsonnet | 5 ++++- .../codesearchnet_javascript/config.jsonnet | 5 ++++- .../nl_codesearch_mrr/codesearchnet_php/config.jsonnet | 5 ++++- .../nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet | 5 ++++- .../tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet | 5 ++++- .../tasks/nl_codesearch_mrr/webquery/config.jsonnet | 5 ++++- 15 files changed, 42 insertions(+), 16 deletions(-) diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet index 48dbee9..7683834 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet @@ -22,7 +22,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_go/test_sample_cbt.json', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_go/test_sample_cbt.jsonl', train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet index 86a5b4d..e9a56c3 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet @@ -22,7 +22,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_java/test_sample_cbt.json', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_java/test_sample_cbt.jsonl', train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet index 874515a..feaa37b 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet @@ -22,7 +22,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_javascript/test_sample_cbt.json', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_javascript/test_sample_cbt.jsonl', train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet index ca2b9fd..8e28a03 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet @@ -22,7 +22,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_php/test_sample_cbt.json', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_php/test_sample_cbt.jsonl', train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet index 49ad8eb..e082b27 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet @@ -22,7 +22,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_ruby/test_sample_cbt.json', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_ruby/test_sample_cbt.jsonl', train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet index c4f3cf5..3e08da2 100644 --- a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet @@ -23,7 +23,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/statcodesearch/test_sample_cbt.json', + test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/statcodesearch/test_sample_cbt.jsonl', train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet index 6e7736a..b0ffd0e 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet @@ -30,10 +30,13 @@ has_validation_set: false, has_train_set: true, - task_type: '', + task_type: 'multiple_choice', evaluation_metrics: [ { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, }, ], diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py index 03eb671..47f8989 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py @@ -1,6 +1,8 @@ from genbench import Task +from typing import Any, Dict, List +import datasets -@Task.register("nl_codesearch_mrrcodesearchnet_adv") +#@Task.register("nl_codesearch_mrr:codesearchnet_adv") class NlCodesearchMrrCodesearchnetAdv(Task): def get_dataset_raw(self) -> Dict[str, datasets.Dataset]: diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet index 595ab27..b62ffb9 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet @@ -1,4 +1,4 @@ -{{ +{ name: 'Natural Language Codesearch Ranking (codesearchnet_go)', // @TODO: Add a description of the task @@ -29,10 +29,13 @@ has_validation_set: false, has_train_set: true, - task_type: '', + task_type: 'multiple_choice', evaluation_metrics: [ { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, }, ], diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet index c8e9f4a..764d852 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet @@ -29,10 +29,13 @@ has_validation_set: false, has_train_set: true, - task_type: '', + task_type: 'multiple_choice', evaluation_metrics: [ { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, }, ], diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet index ecfe265..5b6d930 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet @@ -29,10 +29,13 @@ has_validation_set: false, has_train_set: true, - task_type: '', + task_type: 'multiple_choice', evaluation_metrics: [ { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, }, ], diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet index d1bb755..cf6d3e1 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet @@ -29,10 +29,13 @@ has_validation_set: false, has_train_set: true, - task_type: '', + task_type: 'multiple_choice', evaluation_metrics: [ { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, }, ], diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet index 2962f56..154f03a 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet @@ -29,10 +29,13 @@ has_validation_set: false, has_train_set: true, - task_type: '', + task_type: 'multiple_choice', evaluation_metrics: [ { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, }, ], diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet index 179d7ae..de5beb9 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet @@ -30,10 +30,13 @@ has_validation_set: false, has_train_set: true, - task_type: '', + task_type: 'multiple_choice', evaluation_metrics: [ { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, }, ], diff --git a/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet index 15fe6a5..bb0eec7 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet @@ -30,10 +30,13 @@ has_validation_set: false, has_train_set: true, - task_type: '', + task_type: 'multiple_choice', evaluation_metrics: [ { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, }, ], From f43086f653aab80aae9e38faaee2bb4d74caca13 Mon Sep 17 00:00:00 2001 From: drndr Date: Tue, 1 Aug 2023 16:45:50 +0200 Subject: [PATCH 21/57] add mrr task scripts --- .../codesearchnet_adv/task.py | 129 ++++++++++-------- .../codesearchnet_adv/test_mrr_task.py | 36 +++++ .../codesearchnet_go/task.py | 81 ++++++++++- .../codesearchnet_java/task.py | 81 ++++++++++- .../codesearchnet_javascript/task.py | 81 ++++++++++- .../codesearchnet_php/config.jsonnet | 2 +- .../codesearchnet_php/task.py | 81 ++++++++++- .../codesearchnet_ruby/config.jsonnet | 2 +- .../codesearchnet_ruby/task.py | 81 ++++++++++- src/genbench/tasks/nl_codesearch_mrr/doc.md | 44 +++++- .../nl_codesearch_mrr/statcodesearch/task.py | 81 ++++++++++- .../tasks/nl_codesearch_mrr/webquery/task.py | 81 ++++++++++- 12 files changed, 708 insertions(+), 72 deletions(-) create mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/test_mrr_task.py diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py index 47f8989..75788df 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py @@ -1,70 +1,85 @@ -from genbench import Task -from typing import Any, Dict, List -import datasets - -#@Task.register("nl_codesearch_mrr:codesearchnet_adv") -class NlCodesearchMrrCodesearchnetAdv(Task): +import random +from typing import Dict, List - def get_dataset_raw(self) -> Dict[str, datasets.Dataset]: - """Create the dataset by mixing every three data instance together. +import datasets +import numpy as np +from more_itertools import chunked - Args: - None +from genbench import Task - Returns: - A dictionary containing key-value pairs for the raw datasets. - The keys are strings representing the name of the dataset split - (e.g., "train", "validation", "test") and the values are - HuggingFace `datasets.Dataset` objects containing the raw data for the corresponding split. - """ - # Load the raw datasets - print("kaki") - raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() - # Mix every three data instances together per each split - output: Dict[str, datasets.Dataset] = {} - for split, dataset in raw_datasets.items(): - # Combine every three data instances together - dataset = dataset.map(self._magic_combo, batched=True, batch_size=3) +# @Task.register("nl_codesearch_mrr:codesearchnet_adv") this doesnt seem to work +class NlCodesearchMrrCodesearchnetAdv(Task): + def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: + """Create the dataset adding n distractor pair (original comment, random code snippet) for ranking. - # Maybe do additional processing/formatting here - dataset = dataset.map(self.format_example) + Args: + n_distractors: the number of randomly sampled distractor code for each ranking chunk + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + # Create 49 distractors for each item + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select 49 other items + random_items = random.sample(other_items, n_distractors) + # Split input into comment and code + input_parts = item["input"].split("[SEP]") + for random_item in random_items: + # Split random input into comment and code + random_input_parts = random_item["input"].split("[SEP]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add distractor comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: output[split] = dataset + return output - return output - - def _magic_combo(self, examples: Dict[str, List[Any]]) -> Dict[str, List[Any]]: - """Combine every three data instances together. - - Args: - examples: A dictionary containing key-value pairs for the data instances. - The keys are strings representing the name of the data instance - (e.g., "input", "target") and the values are lists containing - the data instance values. + def evaluate_predictions( + self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors + ) -> Dict[str, float]: + """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors + This function assumes that the predictions were made and passed onto this function unshuffled. + The test data is ordered with each true pair followed by n number of distractors + Args: + predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. + The keys are strings and the values are floats (logit scores or similarity values). + gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. + n_distractors: Number of distractor comment-code pair for each true pair. + Must be the same number as in the get_dataset_raw function - Returns: - A dictionary containing key-value pairs for the combined data instances. - The keys are strings representing the name of the data instance - (e.g., "input", "target") and the values are lists containing - the combined data instance values. + Returns: + A dictionary containing key-value pairs for the evaluation metric(s) computed on the predicted + values. The keys are strings representing the name of the evaluation metric and the values are + floating-point numbers. """ - - single_example: Dict[str, Any] = {} + ranks = [] - # Perform some cool mixing magic here - # ... + batched_predictions = chunked(predictions, n_distractors + 1) - # HuggingFace datasets.Dataset.map() expects - # a dictionary of lists as output - output = {k: [v] for k, v in single_example.items()} + for batch_idx, predictions in enumerate(batched_predictions): + correct_score = predictions[0]["score"] + scores = np.array([prediction["score"] for prediction in predictions]) + rank = np.sum(scores >= correct_score) + ranks.append(rank) + mean_mrr = np.mean(1.0 / np.array(ranks)) - return output - - -def main(): - task = NlCodesearchMrrCodesearchnetAdv() - task.get_dataset_raw() - -if __name__ == '__main__': - main() \ No newline at end of file + return {"mean mrr": mean_mrr} diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/test_mrr_task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/test_mrr_task.py new file mode 100644 index 0000000..62f81d8 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/test_mrr_task.py @@ -0,0 +1,36 @@ +import dataclass_factory +from task import NlCodesearchMrrCodesearchnetAdv + +from genbench.task_config import TaskConfig +from genbench.utils.file import load_jsonnet + + +def main(): + high_mrr_test_list = [] + for i in range(1, 11): + score_dict = dict.fromkeys(["score"]) + score_dict["score"] = 1 / i + high_mrr_test_list.append(score_dict) + + low_mrr_test_list = [] + for i in range(1, 11): + score_dict = dict.fromkeys(["score"]) + score_dict["score"] = 1 * i + low_mrr_test_list.append(score_dict) + + cfg_file = load_jsonnet("./config.jsonnet") + factory = dataclass_factory.Factory() + config: TaskConfig = factory.load(cfg_file, TaskConfig) + + task = NlCodesearchMrrCodesearchnetAdv(config, "nl_codesearch_mrr") + output_ds = task.get_dataset_raw(9) + + high_results = task.evaluate_predictions(high_mrr_test_list, output_ds, 9) + print(high_results) + + low_results = task.evaluate_predictions(low_mrr_test_list, output_ds, 9) + print(low_results) + + +if __name__ == "__main__": + main() diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py index c9aed7c..a93723e 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py @@ -1,5 +1,84 @@ +import random +from typing import Dict, List + +import datasets +import numpy as np +from more_itertools import chunked + from genbench import Task class NlCodesearchMrrCodesearchnetGo(Task): - pass + def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: + """Create the dataset adding n distractor pair (original comment, random code snippet) for ranking. + + Args: + n_distractors: the number of randomly sampled distractor code for each ranking chunk + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + # Create 49 distractors for each item + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select 49 other items + random_items = random.sample(other_items, n_distractors) + # Split input into comment and code + input_parts = item["input"].split("[SEP]") + for random_item in random_items: + # Split random input into comment and code + random_input_parts = random_item["input"].split("[SEP]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add distractor comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output + + def evaluate_predictions( + self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors + ) -> Dict[str, float]: + """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors + This function assumes that the predictions were made and passed onto this function unshuffled. + The test data is ordered with each true pair followed by n number of distractors + Args: + predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. + The keys are strings and the values are floats (logit scores or similarity values). + gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. + n_distractors: Number of distractor comment-code pair for each true pair. + Must be the same number as in the get_dataset_raw function + + Returns: + A dictionary containing key-value pairs for the evaluation metric(s) computed on the predicted + values. The keys are strings representing the name of the evaluation metric and the values are + floating-point numbers. + """ + ranks = [] + + batched_predictions = chunked(predictions, n_distractors + 1) + + for batch_idx, predictions in enumerate(batched_predictions): + correct_score = predictions[0]["score"] + scores = np.array([prediction["score"] for prediction in predictions]) + rank = np.sum(scores >= correct_score) + ranks.append(rank) + mean_mrr = np.mean(1.0 / np.array(ranks)) + + return {"mean mrr": mean_mrr} diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py index 44b086c..bed70c8 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py @@ -1,5 +1,84 @@ +import random +from typing import Dict, List + +import datasets +import numpy as np +from more_itertools import chunked + from genbench import Task class NlCodesearchMrrCodesearchnetJava(Task): - pass + def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: + """Create the dataset adding n distractor pair (original comment, random code snippet) for ranking. + + Args: + n_distractors: the number of randomly sampled distractor code for each ranking chunk + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + # Create 49 distractors for each item + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select 49 other items + random_items = random.sample(other_items, n_distractors) + # Split input into comment and code + input_parts = item["input"].split("[SEP]") + for random_item in random_items: + # Split random input into comment and code + random_input_parts = random_item["input"].split("[SEP]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add distractor comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output + + def evaluate_predictions( + self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors + ) -> Dict[str, float]: + """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors + This function assumes that the predictions were made and passed onto this function unshuffled. + The test data is ordered with each true pair followed by n number of distractors + Args: + predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. + The keys are strings and the values are floats (logit scores or similarity values). + gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. + n_distractors: Number of distractor comment-code pair for each true pair. + Must be the same number as in the get_dataset_raw function + + Returns: + A dictionary containing key-value pairs for the evaluation metric(s) computed on the predicted + values. The keys are strings representing the name of the evaluation metric and the values are + floating-point numbers. + """ + ranks = [] + + batched_predictions = chunked(predictions, n_distractors + 1) + + for batch_idx, predictions in enumerate(batched_predictions): + correct_score = predictions[0]["score"] + scores = np.array([prediction["score"] for prediction in predictions]) + rank = np.sum(scores >= correct_score) + ranks.append(rank) + mean_mrr = np.mean(1.0 / np.array(ranks)) + + return {"mean mrr": mean_mrr} diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py index 57b8746..ffdb4ff 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py @@ -1,5 +1,84 @@ +import random +from typing import Dict, List + +import datasets +import numpy as np +from more_itertools import chunked + from genbench import Task class NlCodesearchMrrCodesearchnetJavascript(Task): - pass + def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: + """Create the dataset adding n distractor pair (original comment, random code snippet) for ranking. + + Args: + n_distractors: the number of randomly sampled distractor code for each ranking chunk + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + # Create 49 distractors for each item + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select 49 other items + random_items = random.sample(other_items, n_distractors) + # Split input into comment and code + input_parts = item["input"].split("[SEP]") + for random_item in random_items: + # Split random input into comment and code + random_input_parts = random_item["input"].split("[SEP]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add distractor comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output + + def evaluate_predictions( + self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors + ) -> Dict[str, float]: + """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors + This function assumes that the predictions were made and passed onto this function unshuffled. + The test data is ordered with each true pair followed by n number of distractors + Args: + predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. + The keys are strings and the values are floats (logit scores or similarity values). + gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. + n_distractors: Number of distractor comment-code pair for each true pair. + Must be the same number as in the get_dataset_raw function + + Returns: + A dictionary containing key-value pairs for the evaluation metric(s) computed on the predicted + values. The keys are strings representing the name of the evaluation metric and the values are + floating-point numbers. + """ + ranks = [] + + batched_predictions = chunked(predictions, n_distractors + 1) + + for batch_idx, predictions in enumerate(batched_predictions): + correct_score = predictions[0]["score"] + scores = np.array([prediction["score"] for prediction in predictions]) + rank = np.sum(scores >= correct_score) + ranks.append(rank) + mean_mrr = np.mean(1.0 / np.array(ranks)) + + return {"mean mrr": mean_mrr} diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet index cf6d3e1..068617b 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet @@ -23,7 +23,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_php/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_php/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py index a2b13dc..3790767 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py @@ -1,5 +1,84 @@ +import random +from typing import Dict, List + +import datasets +import numpy as np +from more_itertools import chunked + from genbench import Task class NlCodesearchMrrCodesearchnetPhp(Task): - pass + def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: + """Create the dataset adding n distractor pair (original comment, random code snippet) for ranking. + + Args: + n_distractors: the number of randomly sampled distractor code for each ranking chunk + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + # Create 49 distractors for each item + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select 49 other items + random_items = random.sample(other_items, n_distractors) + # Split input into comment and code + input_parts = item["input"].split("[SEP]") + for random_item in random_items: + # Split random input into comment and code + random_input_parts = random_item["input"].split("[SEP]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add distractor comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output + + def evaluate_predictions( + self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors + ) -> Dict[str, float]: + """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors + This function assumes that the predictions were made and passed onto this function unshuffled. + The test data is ordered with each true pair followed by n number of distractors + Args: + predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. + The keys are strings and the values are floats (logit scores or similarity values). + gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. + n_distractors: Number of distractor comment-code pair for each true pair. + Must be the same number as in the get_dataset_raw function + + Returns: + A dictionary containing key-value pairs for the evaluation metric(s) computed on the predicted + values. The keys are strings representing the name of the evaluation metric and the values are + floating-point numbers. + """ + ranks = [] + + batched_predictions = chunked(predictions, n_distractors + 1) + + for batch_idx, predictions in enumerate(batched_predictions): + correct_score = predictions[0]["score"] + scores = np.array([prediction["score"] for prediction in predictions]) + rank = np.sum(scores >= correct_score) + ranks.append(rank) + mean_mrr = np.mean(1.0 / np.array(ranks)) + + return {"mean mrr": mean_mrr} diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet index 154f03a..9e5bd78 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet @@ -1,4 +1,4 @@ -{{ +{ name: 'Natural Language Codesearch Ranking (codesearchnet_ruby)', // @TODO: Add a description of the task diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py index 8f52da5..fe7761c 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py @@ -1,5 +1,84 @@ +import random +from typing import Dict, List + +import datasets +import numpy as np +from more_itertools import chunked + from genbench import Task class NlCodesearchMrrCodesearchnetRuby(Task): - pass + def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: + """Create the dataset adding n distractor pair (original comment, random code snippet) for ranking. + + Args: + n_distractors: the number of randomly sampled distractor code for each ranking chunk + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + # Create 49 distractors for each item + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select 49 other items + random_items = random.sample(other_items, n_distractors) + # Split input into comment and code + input_parts = item["input"].split("[SEP]") + for random_item in random_items: + # Split random input into comment and code + random_input_parts = random_item["input"].split("[SEP]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add distractor comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output + + def evaluate_predictions( + self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors + ) -> Dict[str, float]: + """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors + This function assumes that the predictions were made and passed onto this function unshuffled. + The test data is ordered with each true pair followed by n number of distractors + Args: + predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. + The keys are strings and the values are floats (logit scores or similarity values). + gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. + n_distractors: Number of distractor comment-code pair for each true pair. + Must be the same number as in the get_dataset_raw function + + Returns: + A dictionary containing key-value pairs for the evaluation metric(s) computed on the predicted + values. The keys are strings representing the name of the evaluation metric and the values are + floating-point numbers. + """ + ranks = [] + + batched_predictions = chunked(predictions, n_distractors + 1) + + for batch_idx, predictions in enumerate(batched_predictions): + correct_score = predictions[0]["score"] + scores = np.array([prediction["score"] for prediction in predictions]) + rank = np.sum(scores >= correct_score) + ranks.append(rank) + mean_mrr = np.mean(1.0 / np.array(ranks)) + + return {"mean mrr": mean_mrr} diff --git a/src/genbench/tasks/nl_codesearch_mrr/doc.md b/src/genbench/tasks/nl_codesearch_mrr/doc.md index 7303e45..26e01ef 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/doc.md +++ b/src/genbench/tasks/nl_codesearch_mrr/doc.md @@ -1,17 +1,49 @@ ## Motivation -*Describe the motivation for this Natural Language Codesearch Ranking.* +Language models can serve as a valuable tool for software developers to increase productivity. Large generative models can be used for code generation and code completion, while smaller encoder-only models are capable of performing code search tasks using natural language queries. These capabilities are heavily influenced by the quality and diversity of the available training data. Source code datasets used for training usually focus on the most popular languages and testing is mostly conducted on the same distributions, often overlooking low resource programming languages. Motivated by the NLP generalisation taxonomy proposed by Hupkes et. al., we propose a new benchmark dataset called [placeholder] which builds upon existing natural language code search datasets to systemically study the code understanding generalization capabilities of language models. For evaluation and comparison, we collect several baseline results using fine-tuned BERT-style models and GPT-style large language models in a zero-shot setting. ## Examples -*Give examples of the Natural Language Codesearch Ranking.* +Given n number of code comment pairs (1 true pair and n-1 distractor pair where a comment has been matched with a random code snippet), calculate the MRR score. + +true sample: {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} \ +distractor sample: {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def _resolve_entity ( mo ) : ent = mo . group ( \"entity\" ) s = mo . group ( ) if s . startswith ( '&#' ) : if s [ 2 ] in 'xX' : radix = 16 else : radix = 10 try : num = int ( ent , radix ) except ( ValueError , OverflowError ) : return u'' else : num = name2codepoint . get ( ent ) if num is None or num < 0 : # unknown entity -> ignore return u'' try : return unichr ( num ) except ValueError : return u''", "target": 0, "target_options": ["no_match", "match"]} ## Data Source -*Describe the data source for this Natural Language Codesearch Ranking.* +**CodeSearchNet** : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub \ +**CodeSearchNet Adv** : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE \ +**WebQuery** : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE \ +**StatCodeSearch** : R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors \ + +During evaluation for each true code-comment pair we create n number of distractors where the comment is matched with a random code snippet. The distractor samples are sampled consistently by setting the random seed in the get_raw_data function +**Dataset Size**:\ +*Finetuning set:* \ + -CodeSearchNet Adv train set 251k \ +*Test sets:* \ + -CodeSearchNet Adv test set 19k \ + -WebQuery test set 1k \ + -CodeSearchNet Ruby test set 2k \ + -CodeSearchNet Go test set 14k \ + -CodeSearchNet Java test set 26k \ + -CodeSearchNet Javascript test set 6k \ + -CodeSearchNet PHP test set 28k \ + -StatCodeSearch test set TBD ## Limitations and Bias -*Note any known limitations or biases that the Natural Language Codesearch Ranking has, with links and references if possible.* +TBD ## Citation -*Cite the source where this Natural Language Codesearch Ranking was introduced.* +TBD ## Further References -*Add any useful further references.* \ No newline at end of file +@article{husain2019codesearchnet, + title={Codesearchnet challenge: Evaluating the state of semantic code search}, + author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc}, + journal={arXiv preprint arXiv:1909.09436}, + year={2019} +} \ +@article{Lu2021CodeXGLUEAM, + title={CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation}, + author={Shuai Lu and Daya Guo and Shuo Ren and Junjie Huang and Alexey Svyatkovskiy and Ambrosio Blanco and Colin Clement and Dawn Drain and Daxin Jiang and Duyu Tang and Ge Li and Lidong Zhou and Linjun Shou and Long Zhou and Michele Tufano and Ming Gong and Ming Zhou and Nan Duan and Neel Sundaresan and Shao Kun Deng and Shengyu Fu and Shujie Liu}, + journal={ArXiv}, + year={2021}, + volume={abs/2102.04664} +*} diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py index f7089b5..f3ffd6b 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py @@ -1,5 +1,84 @@ +import random +from typing import Dict, List + +import datasets +import numpy as np +from more_itertools import chunked + from genbench import Task class NlCodesearchClfStatcodesearch(Task): - pass + def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: + """Create the dataset adding n distractor pair (original comment, random code snippet) for ranking. + + Args: + n_distractors: the number of randomly sampled distractor code for each ranking chunk + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + # Create 49 distractors for each item + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select 49 other items + random_items = random.sample(other_items, n_distractors) + # Split input into comment and code + input_parts = item["input"].split("[SEP]") + for random_item in random_items: + # Split random input into comment and code + random_input_parts = random_item["input"].split("[SEP]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add distractor comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output + + def evaluate_predictions( + self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors + ) -> Dict[str, float]: + """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors + This function assumes that the predictions were made and passed onto this function unshuffled. + The test data is ordered with each true pair followed by n number of distractors + Args: + predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. + The keys are strings and the values are floats (logit scores or similarity values). + gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. + n_distractors: Number of distractor comment-code pair for each true pair. + Must be the same number as in the get_dataset_raw function + + Returns: + A dictionary containing key-value pairs for the evaluation metric(s) computed on the predicted + values. The keys are strings representing the name of the evaluation metric and the values are + floating-point numbers. + """ + ranks = [] + + batched_predictions = chunked(predictions, n_distractors + 1) + + for batch_idx, predictions in enumerate(batched_predictions): + correct_score = predictions[0]["score"] + scores = np.array([prediction["score"] for prediction in predictions]) + rank = np.sum(scores >= correct_score) + ranks.append(rank) + mean_mrr = np.mean(1.0 / np.array(ranks)) + + return {"mean mrr": mean_mrr} diff --git a/src/genbench/tasks/nl_codesearch_mrr/webquery/task.py b/src/genbench/tasks/nl_codesearch_mrr/webquery/task.py index d5c7d50..17d40a3 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/webquery/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/webquery/task.py @@ -1,5 +1,84 @@ +import random +from typing import Dict, List + +import datasets +import numpy as np +from more_itertools import chunked + from genbench import Task class NlCodesearchMrrWebquery(Task): - pass + def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: + """Create the dataset adding n distractor pair (original comment, random code snippet) for ranking. + + Args: + n_distractors: the number of randomly sampled distractor code for each ranking chunk + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + # Create 49 distractors for each item + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select 49 other items + random_items = random.sample(other_items, n_distractors) + # Split input into comment and code + input_parts = item["input"].split("[SEP]") + for random_item in random_items: + # Split random input into comment and code + random_input_parts = random_item["input"].split("[SEP]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add distractor comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output + + def evaluate_predictions( + self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors + ) -> Dict[str, float]: + """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors + This function assumes that the predictions were made and passed onto this function unshuffled. + The test data is ordered with each true pair followed by n number of distractors + Args: + predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. + The keys are strings and the values are floats (logit scores or similarity values). + gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. + n_distractors: Number of distractor comment-code pair for each true pair. + Must be the same number as in the get_dataset_raw function + + Returns: + A dictionary containing key-value pairs for the evaluation metric(s) computed on the predicted + values. The keys are strings representing the name of the evaluation metric and the values are + floating-point numbers. + """ + ranks = [] + + batched_predictions = chunked(predictions, n_distractors + 1) + + for batch_idx, predictions in enumerate(batched_predictions): + correct_score = predictions[0]["score"] + scores = np.array([prediction["score"] for prediction in predictions]) + rank = np.sum(scores >= correct_score) + ranks.append(rank) + mean_mrr = np.mean(1.0 / np.array(ranks)) + + return {"mean mrr": mean_mrr} From bfb99626603231d97a19d322e0737e831f393fee Mon Sep 17 00:00:00 2001 From: drndr Date: Tue, 1 Aug 2023 16:50:45 +0200 Subject: [PATCH 22/57] Update doc.md --- src/genbench/tasks/nl_codesearch_mrr/doc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/genbench/tasks/nl_codesearch_mrr/doc.md b/src/genbench/tasks/nl_codesearch_mrr/doc.md index 26e01ef..a9933c9 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/doc.md +++ b/src/genbench/tasks/nl_codesearch_mrr/doc.md @@ -13,7 +13,7 @@ distractor sample: {"input": "Allocate sampled topics to the documents rather th **WebQuery** : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE \ **StatCodeSearch** : R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors \ -During evaluation for each true code-comment pair we create n number of distractors where the comment is matched with a random code snippet. The distractor samples are sampled consistently by setting the random seed in the get_raw_data function +During evaluation for each true code-comment pair we create n number of distractors where the comment is matched with a random code snippet. The distractor samples are sampled consistently by setting the random seed in the get_dataset_raw function **Dataset Size**:\ *Finetuning set:* \ From 160a41e9e3bc99ce5023bfc5e9bdae96c79951ce Mon Sep 17 00:00:00 2001 From: Amirhossein Kazemnejad <2122102+kazemnejad@users.noreply.github.com> Date: Tue, 1 Aug 2023 11:58:19 -0400 Subject: [PATCH 23/57] Add underscore as allowed characters --- .github/workflows/task_submission_ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/task_submission_ci.yml b/.github/workflows/task_submission_ci.yml index 8f5404e..b7731a9 100644 --- a/.github/workflows/task_submission_ci.yml +++ b/.github/workflows/task_submission_ci.yml @@ -59,7 +59,7 @@ jobs: - name: Parse the Task ID from PR's title id: pr_task_id run: | - task_id=$(echo '${{ github.event.pull_request.title }}' | sed -n -e 's/^\[Task Submission\][[:alnum:][:space:]()]\+[[:space:]]*(`\([^`]*\)`)[[:space:]]*.*/\1/p') + task_id=$(echo '${{ github.event.pull_request.title }}' | sed -n -e 's/^\[Task Submission\][[:alnum:][:space:]()_]\+[[:space:]]*(`\([^`]*\)`)[[:space:]]*.*/\1/p') echo "Task ID: $task_id" echo "task_id=$task_id" >> $GITHUB_OUTPUT shell: bash @@ -111,4 +111,4 @@ jobs: - name: Test Task run: | - genbench-cli test-task -i ${{ steps.pr_task_id.outputs.task_id }} --tests-dir ./tests \ No newline at end of file + genbench-cli test-task -i ${{ steps.pr_task_id.outputs.task_id }} --tests-dir ./tests From 7958b660575cbc40fe6da5b21b408db45710f6d4 Mon Sep 17 00:00:00 2001 From: drndr Date: Wed, 2 Aug 2023 12:16:15 +0200 Subject: [PATCH 24/57] fix indentation and docmd --- .../codesearchnet_adv/config.jsonnet | 30 +++++++---------- .../codesearchnet_go/config.jsonnet | 27 ++++++--------- .../codesearchnet_java/config.jsonnet | 31 +++++++----------- .../codesearchnet_javascript/config.jsonnet | 23 +++++-------- .../codesearchnet_php/config.jsonnet | 29 +++++++--------- .../codesearchnet_ruby/config.jsonnet | 27 ++++++--------- src/genbench/tasks/nl_codesearch_clf/doc.md | 24 ++++---------- .../statcodesearch/config.jsonnet | 31 +++++++----------- .../nl_codesearch_clf/webquery/config.jsonnet | 29 +++++++--------- .../GenBench Evaluation Card.pdf | Bin 0 -> 72032 bytes .../codesearchnet_adv/config.jsonnet | 26 ++++++--------- .../codesearchnet_go/config.jsonnet | 24 +++++--------- .../codesearchnet_java/config.jsonnet | 24 +++++--------- .../codesearchnet_javascript/config.jsonnet | 24 +++++--------- .../codesearchnet_php/config.jsonnet | 24 +++++--------- .../codesearchnet_ruby/config.jsonnet | 24 +++++--------- .../tasks/nl_codesearch_mrr/config.jsonnet | 9 ++--- src/genbench/tasks/nl_codesearch_mrr/doc.md | 22 ++++--------- .../statcodesearch/config.jsonnet | 22 +++++-------- .../nl_codesearch_mrr/webquery/config.jsonnet | 28 +++++++--------- 20 files changed, 174 insertions(+), 304 deletions(-) create mode 100644 src/genbench/tasks/nl_codesearch_mrr/GenBench Evaluation Card.pdf diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet index 1d042e4..0adb0c1 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet @@ -1,17 +1,15 @@ { name: 'Natural Language Codesearch Classification (codesearchnet_adv)', - // @TODO: Add a description of the task description: 'Natural Language Codesearch Classification (codesearchnet_adv) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'binary classification', - 'python', - 'robustness', - 'covariate shift', + 'binary classification', + 'python', + 'robustness', + 'covariate shift', ], authors: [ @@ -24,7 +22,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -35,26 +33,20 @@ evaluation_metrics: [ { hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, prompt_based_testing: { prompt_builder: { - // Currently, we follow BIG-bench options for prompt construction: - // https://github.com/google/BIG-bench/blob/main/docs/doc.md#optional-fields - instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', + instruction_zero_shot: 'Given a code comment and a Python programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', output_prefix: '', choices_prefix: '', append_choices_to_input: false, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet index 7683834..4abe068 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet @@ -1,16 +1,14 @@ { name: 'Natural Language Codesearch Classification (codesearchnet_go)', - - // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification (codesearchnet_go) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'binary classification', - 'go', - 'cross-lingual' + 'binary classification', + 'go', + 'cross-lingual' ], authors: [ @@ -23,7 +21,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_go/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -34,24 +32,19 @@ evaluation_metrics: [ { hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, }, ], preparation_strategies: { - - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', + instruction_zero_shot: 'Given a code comment and a Go programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', output_prefix: '', choices_prefix: '', append_choices_to_input: false, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet index e9a56c3..33a70fa 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet @@ -1,16 +1,14 @@ { name: 'Natural Language Codesearch Classification (codesearchnet_java)', - - // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification (codesearchnet_java) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - - // @TODO: Add a list of keywords that describe the task + keywords: [ 'codesearch', 'natural language query', - 'binary classification', - 'java', - 'cross-lingual' + 'binary classification', + 'java', + 'cross-lingual' ], authors: [ @@ -23,7 +21,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_java/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -34,24 +32,19 @@ evaluation_metrics: [ { hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', + instruction_zero_shot: 'Given a code comment and a Java programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', output_prefix: '', choices_prefix: '', append_choices_to_input: false, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet index feaa37b..d1e4f6a 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet @@ -1,16 +1,14 @@ { name: 'Natural Language Codesearch Classification (codesearchnet_javascript)', - // @TODO: Add a description of the task description: 'Natural Language Codesearch Classification (codesearchnet_javascript) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'binary classification', - 'javascript', - 'cross-lingual' + 'binary classification', + 'javascript', + 'cross-lingual' ], authors: [ @@ -23,7 +21,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_javascript/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -36,22 +34,17 @@ hf_id: 'accuracy', git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', best_score: 1.0, - }, + }, ], preparation_strategies: { - - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', + instruction_zero_shot: 'Given a code comment and an Javascript programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', output_prefix: '', choices_prefix: '', append_choices_to_input: false, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet index 8e28a03..26f9ad4 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet @@ -1,16 +1,14 @@ { name: 'Natural Language Codesearch Classification (codesearchnet_php)', - - // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification (codesearchnet_php) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'binary classification', - 'php', - 'cross-lingual' + 'binary classification', + 'php', + 'cross-lingual' ], authors: [ @@ -23,7 +21,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_php/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -34,24 +32,19 @@ evaluation_metrics: [ { hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', + instruction_zero_shot: 'Given a code comment and a PHP programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', output_prefix: '', choices_prefix: '', append_choices_to_input: false, diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet index e082b27..69eb6e5 100644 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet @@ -1,16 +1,14 @@ { name: 'Natural Language Codesearch Classification (codesearchnet_ruby)', - // @TODO: Add a description of the task description: 'Natural Language Codesearch Classification (codesearchnet_ruby) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'binary classification', - 'ruby', - 'cross-lingual' + 'binary classification', + 'ruby', + 'cross-lingual' ], authors: [ @@ -23,7 +21,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_ruby/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -34,24 +32,19 @@ evaluation_metrics: [ { hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', + instruction_zero_shot: 'Given a code comment and a Ruby programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', output_prefix: '', choices_prefix: '', append_choices_to_input: false, diff --git a/src/genbench/tasks/nl_codesearch_clf/doc.md b/src/genbench/tasks/nl_codesearch_clf/doc.md index a0f66f5..f5fc40b 100644 --- a/src/genbench/tasks/nl_codesearch_clf/doc.md +++ b/src/genbench/tasks/nl_codesearch_clf/doc.md @@ -4,14 +4,14 @@ Language models can serve as a valuable tool for software developers to increase ## Examples Given a natural language query, determine if a given code snippet is relevant or not -{"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} \ -{"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def _resolve_entity ( mo ) : ent = mo . group ( \"entity\" ) s = mo . group ( ) if s . startswith ( '&#' ) : if s [ 2 ] in 'xX' : radix = 16 else : radix = 10 try : num = int ( ent , radix ) except ( ValueError , OverflowError ) : return u'' else : num = name2codepoint . get ( ent ) if num is None or num < 0 : # unknown entity -> ignore return u'' try : return unichr ( num ) except ValueError : return u''", "target": 0, "target_options": ["no_match", "match"]} +**match**: {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} \ +**no_match**: {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def _resolve_entity ( mo ) : ent = mo . group ( \"entity\" ) s = mo . group ( ) if s . startswith ( '&#' ) : if s [ 2 ] in 'xX' : radix = 16 else : radix = 10 try : num = int ( ent , radix ) except ( ValueError , OverflowError ) : return u'' else : num = name2codepoint . get ( ent ) if num is None or num < 0 : # unknown entity -> ignore return u'' try : return unichr ( num ) except ValueError : return u''", "target": 0, "target_options": ["no_match", "match"]} ## Data Source **CodeSearchNet** : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub \ **CodeSearchNet Adv** : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE \ **WebQuery** : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE \ -**StatCodeSearch** : R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors \ +**StatCodeSearch** : R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors For each comment in each subset we sampled randomly another code snippet from given subset, to create a fully balanced binary classification dataset. @@ -26,7 +26,7 @@ For each comment in each subset we sampled randomly another code snippet from gi -CodeSearchNet Java test set 52k \ -CodeSearchNet Javascript test set 12k \ -CodeSearchNet PHP test set 56k \ - -StatCodeSearch test set TBD + -StatCodeSearch test set TBD ## Limitations and Bias TBD @@ -34,16 +34,6 @@ TBD TBD ## Further References -@article{husain2019codesearchnet, - title={Codesearchnet challenge: Evaluating the state of semantic code search}, - author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc}, - journal={arXiv preprint arXiv:1909.09436}, - year={2019} -} \ -@article{Lu2021CodeXGLUEAM, - title={CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation}, - author={Shuai Lu and Daya Guo and Shuo Ren and Junjie Huang and Alexey Svyatkovskiy and Ambrosio Blanco and Colin Clement and Dawn Drain and Daxin Jiang and Duyu Tang and Ge Li and Lidong Zhou and Linjun Shou and Long Zhou and Michele Tufano and Ming Gong and Ming Zhou and Nan Duan and Neel Sundaresan and Shao Kun Deng and Shengyu Fu and Shujie Liu}, - journal={ArXiv}, - year={2021}, - volume={abs/2102.04664} -*} +Husain, H., Wu, H. H., Gazit, T., Allamanis, M., & Brockschmidt, M. (2019). Codesearchnet challenge: Evaluating the state of semantic code search. arXiv preprint arXiv:1909.09436. + +Lu, S., Guo, D., Ren, S., Huang, J., Svyatkovskiy, A., Blanco, A., Shujie, L. I. U. (2021, June). CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1). \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet index 3e08da2..742d5f2 100644 --- a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet @@ -1,17 +1,15 @@ { name: 'Natural Language Codesearch Classification (statcodesearch)', - // @TODO: Add a description of the task - description: 'Natural Language Codesearch Classification (codesearchnet_go) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual and domain generalization', + description: 'Natural Language Codesearch Classification (statcodesearch) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual and domain generalization', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'binary classification', - 'r', - 'cross-lingual', - 'domain-shift' + 'binary classification', + 'r', + 'cross-lingual', + 'domain-shift' ], authors: [ @@ -24,7 +22,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/statcodesearch/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -35,24 +33,19 @@ evaluation_metrics: [ { hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', + instruction_zero_shot: 'Given a code comment and a R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', output_prefix: '', choices_prefix: '', append_choices_to_input: false, diff --git a/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet index 9f4aef9..f76432f 100644 --- a/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet @@ -1,17 +1,15 @@ { name: 'Natural Language Codesearch Classification (webquery)', - // @TODO: Add a description of the task description: 'Natural Language Codesearch Classification (webquery) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'binary classification', - 'python', - 'robustness', - 'covariate shift' + 'binary classification', + 'python', + 'robustness', + 'covariate shift' ], authors: [ @@ -24,7 +22,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/webquery/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -35,24 +33,19 @@ evaluation_metrics: [ { hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. prompt_based_testing: { prompt_builder: { - instruction_zero_shot: 'Given a code comment and an R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', + instruction_zero_shot: 'Given a code comment and a Python programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', + input_prefix: '', output_prefix: '', choices_prefix: '', append_choices_to_input: false, diff --git a/src/genbench/tasks/nl_codesearch_mrr/GenBench Evaluation Card.pdf b/src/genbench/tasks/nl_codesearch_mrr/GenBench Evaluation Card.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3d4e16e3e1eb452ad3de5bf0c25dca0cae7c8c2d GIT binary patch literal 72032 zcmc$_Q*>qDx-S~rc4lnbPAV1Kww;P?+p5^MZKq<}wr~B<+WWM%+d2>D;kJ9{Yy+KR z{xH7TAHGf|FDg#QM9&68*7KbI1;YYh1lSo`!tn9}7^E$1O$;0@JWPxMOn*xN4rWFc zW&pzvfEIw0jTOMe$^y^WU_&e_@!|nmL;TIN1RV;uhAnd;H zXyXLKZH-Ob{#LL6 zSh)UGHnscP!U#|VFeuvDIscvEf88tnB^1E=m&d=;x&AMrxc-A221!wX7B4%8DZ2@m z0UL{{2@|J*sevJ*p~+tf7#R%=n3xTX*!X!lxlD~1O&I@jZOqQW#lgg4YGi0^z{bSJ z!f3+A%E`vB^N$8j&WK>q?Eg06|7bHM z0Lxzo{+Aiq+5e|Z`v0h;k6@&)ua72c*7dV=QM9~VXlf`(sq1%=A zkodBYUKXVuV*TaTMZ7Q;N`gcv*k~*cACT8(ocDWRXEG%2#tkdUP~V!6fj17u2z-SY zjmjMgNk_KEED^J19wqiSl!z&!sQO4FGn7&KQ^X|30m-Mgl1Xg50&7{l41sq}S9WLm zqZf_#U{x1!DvQ^TzO32-tbl^(6UGcuR*Dq&2_jNp2+SXHQOwdf=80~4qM8fFx+eEW zhUi1$`$JRY8}#>ZPSUpm1K^)*H4(H7bIj~ZxE{D%l8Ke03-xfPJ*{F9PhX@;SPpPm zR41L@d)lrk`DUkS%n+#N*2-wGC39#GKqUpQoC8#Drj_pe(U-T?^J(mp2Ff6d#N6Fi z^SpEkDyq#Wkz^>kVkjpIYMsCt2#l=U9ZDgM3(*^pOpKma5XzyfKj_1DQ?PB;I&Im{ zYlQSNnPJ1h7bS19WGnJ>)LiP4^Lq)qs5BRt4{A@M9zNiABz4|NSY-CQuSHG&`265tQZ&qUQ6VT-Bz%QEOkoum_N zsqpQ`dEY<PHNltWb)^NMT6B;lf0yxN_n=EgwL#zo)yG&@cRilcZ()yl1q&8)tW7fk03{B4{9)YK*+k4f) zjJMG&aoDm!1Ar|@0e0WAVnx@#kuL0UU_E&Zs-#~acduU@2Yub(j#Cq zw(gwcqxNif-7JzwJHoAjH48r;g+8 zAEe$Nri=>Vr}I2IEK(x2NQd?gW2U<}VjlarExe_Vg`_E&vVgqa=@Go<#kge>^B>>o zt9-6f9{U_5c7dKuIKf>ff1gk5YmzNu-!zQkJpZZ1>{F$N+}Zt!AhC(^WO=(%|Dfdg zZKluD^(|jAq*Rc$AM1ls;44p>qRG_Yjo$B1SAHQNl9prepKx>@0s5 z0RM_y%m5}PMwY*eo_`hpSuy>0AZKD`{2My|{VzAV1za9?4TByI?N_Q{UqnYoM`Np$ zA5foUfG5?}m8=5|J)J70VwUjl)(&*4t&w=o>$c}_v#rcY(s!-yrLUzNXwWHK8&?Y} zOsTklNS3JV%s?qHKn1-~?-cmX-rnKgL&!{j7I*+p=sTf+<-{K#UmMvlDnM&Y1OQxU zkV6LX;g)xBf+}y|18;N!*#aZm3L(?e10iCj-t@rf0EmIO@*r1m@}`019ULHp=CF-* zw7ZD(wRl#Xa(wZDrBx?^Yz+*Anguq2=VbvtmDLC_Phw+=Q0X9&L- ze5|)r;q@ViBj_MMpcxQ}TJ%xuw~M_iYxn@5oCeUw(G8%a?Ta))oA-?BGk}VS=V*|*l79CC>x*nXT;G5#D0n7Lv)2G`PY6Vt*@=;pA7HF9;*Ed%>7u)o5 z@8aZ>@Lr%MP3g8NE5Duje4FlDi|TIx0>3di#ssRpzi|R?Z3BM+3YdZX{*Jkafcol) z@qMA@S#Jdj`lfz%m;I(*-M2$v{Okfu`hCX~?cPmlBl(>Qh;r=i9oKt&?S1(czvt3_ z`_g`sjDIJed~3xAbQJ98dKCA-eF=Evp{~rm+eMlkwGZU92yPy_V`hCXD-%8}pN+S2 zYJS(qVncXsfm@hqjrhhQ!>Isy`d4oI8K}DY&K)weT@gkeLAgS7Irs;ByJ-SlY-nqL z7kF;b(}W%$gT76U38>^9AHH5E-A${~FZo(y&)ma<5+UVryxo%tkofp9Jl}I`26lcK zGx;SBtb#vXfVkOC6YB4S2z~F1gSX{|--n(skV5S!e{-Phf!kYt<3bCZ`+W=b1IbqS zl3bbDItF|Jy8~shfBSg>$#(b(`T$Y95XwXKp1u&iOzOxU`}Q7>(trES#Or(m=aPtg z{d4*T1_#Qj{o8`D@wf8c;{)=Jm*VaV^=sL$YJ({5*z9Nq-7V*<=O)(<2kHR4URW>9 zsSd^4Z2RVFU#5$-jpF^O^#yLA*$0B9;~L~3@g1N2C$*E#^t#Gulk=H9;njRG??Un) z#50NQfhY2$n;7G%Y0nau1jWZnxT+uBI0tUqCa2vV0@j=l4~Y) zGhoJkZQsqOsoY+>2gKis2y8u4OqHL-&p~t&1$S2HW6mHbo|ETYk3Bbzhj5;0$e07^ zGAVN8zlAU-(X5d!&%>u?9xM8R+7X^oHYLLCRA~r2Dn8<6pSU)>kPRh9*pFPr@F?g+ zqFOs2LO}6u5Id;?m!K`iDcbC)p7@w|lIIXC&_ARuTJc49_ZPv>%N|>3#P3nYHZqP)pQ02MEGhDkqzD}ta`xvR#-PD4@K869mY4tY2 zj?yB<4Y!ya<0e8=fTnRx37*nA@*ARQdCFFYh0?0iuXSXrz%_#F;SbtdpG@SFL%l6y z(iIV~^SsDvhR1uDaUsr&_$e)NejMGClx|V!F27WgmraBS$xbct54++mcomu8MK72| zpJEG8{3qj+-|h0EpU}p8F&Sm9#itk)#i@m~e3)-{JE~YO$U|Piw~PJ(+7-3L*}BK? zT2zRkr3UJ_p}fJlaw;tf$edcNa{Hi1Rw|aVzj>I~SpHCLz>u912Ix2?pfswu54Dlj z7Bm{jDWsHLLP_{U*}{Q?(_(*C@m$H7)Qq1UTr) z8k%l$Ha_IvTd3+x4?AFt$nzh9YPT;4L&LW-SZJ=9_Z#Wj_nWjxPy;pip)nNuWjzz3 z9qvVAU#pLo^hO62rP7aT@BuPzmu?n}1&f}ds8A+(gudk-8n^;B`&ud0+*!8c1%!}> zk4*bzjt)%G*4dVr>`#?c;SwY|x$QmudW1)a$MYTzkDv7go(fH8wzQ7}zTS5JuJmBu zE1FyiIpERo`rBPb?JPTebFlZ85@h9qJTi`P*KE$T)6W%?KF2)bkBh52A9 z5lU*MwkRAE2CF>FR0|%cI6;X<^kYpgo1Jg!Shug^LWYQ0?{#*&P?#}sy%qdNv-O)@ zatWQR)@LH~NPcwp$~E{I=L?B%wRASY=quqxKiC2_9>gZc>-Hv2MPZX;n=2J79wQMg z>kdJ}+AIoc{Ru2lgUA*n0|SU8*6kR?`MF5bahPX_gfXh)g0SRxNL81?lJ8)->2}`t z<@q6)pIR9Ag=Tr!HH-Ig?$JY!&ycn-yBJ|cQ#NHJVwVmD2NPmbF13%RB<^w>K)FgD zQ*B03NmUm%ASU>+JfmaW7rCFu+UM97PW^Z|?HT$w-nO}B`qpWQ63Q_ILhfPS9WV*T zeBZCvBVvA0uDbFD8$@LW0qJ@3 z`aNPhVVW|8`H%DDbzgMtD!ti;+UU%mRH|4W%SPTr!>R8XTHW1zp3db<1m*NshBsyp zK&D2eXcn?<;1P*h8V5-aE+}CfTrniY#?3`3}T;2nbMMGd=C>Q@}o2ej%4=WF35=?zz;^N!^VjUMtfd1wn| z5K8L6WG#(vpP=w){oc{|+mL``C^{QO_gIp|=fv#@9N)oO2k3p&G z4@ACuh_46fH)Uz^vPSaJ_yLLm6&+_6ID%(4g!knY0vYiHLshlGDS>yL4 z;3e&153$dZ;xwF(*Lrv_#&)X>O>x$Hz(FHO13=#@{hb)L;M&s*BO=W_k4-CkHLCaB zCR4kagRgr3(8sI^fv_ov8!VHdjIQ^*Z8_t3lnqu~&>x*X_hRZNB z%&rcNwI$qkmNS(j{}nRpm|p;Y{p_+5PlsF8eL0AI%^Us0Nk># zz7g)-C~N%XIzz(M78|)Z(YkAh(t2@CdnK*G9HabMH|fQzIp%A3zEk+A7QNrGWzDCj zkI{2zTs|{}q z$)jR%cKh~{gF7usKg9lkv`jJ^zQk^}0h84i6>(OXBaDkU!Wh_F+LCPB8@mR{`({cE z?xO-2gS0Tq`kUs36XNL*5brl0)`)NG9pI4bt8R$xM+RsyJ5 zxL7eu6lc(IJ%#OveMcY*r5MO0KVG^U#q1*5=FSF;rA1+jChX#*2_quoOL3}4ZGMFE zb#;9vLBV<>_B(@y4dHc2_{*IgC;h-qv)2)FqnI7KzPz{MJW?WjzaB9oLmMu5Unt;B zU>yj(bhyjC(IqC`Ts(q7Vmiio@%<>of3cw$`U+NpP~ykVUa643>g2n+-|es&n-dgx4M-QWgrIHIl4*6)_qC% zKC@}Dbua%82)}!pfP!+l8r_5nkD%i+wVi1nozBl9J)MX^7DRzjdYVaf>%y~;NcyOQ z&`iU~iu{#3?yV3+XBoT{X1`a!@#agpGts5!$#Z7L|31j1trK?u%&WezC5wsv#{|m- z$-AyBgo?z-0mO;&RwD6web@qlIb43e|BYF^BF^33+E0t{s^x&s;f$cRD%?!%aQ3pV zB$Qc65lF5vwx)cWDTl^cHwR^9L<$MgHhp zMY@LsGxQmbDzUKC1Di3!=pmp6$KG+?tK6T5e3z^}fG$I* zzj3>to-0k139hrI79!%mNL;ZPOhh+ekA#AQb5^b0UryElfFg7uJOmAf`2V#7|KDdmQ z5us|~F1;qR+6-dG|3gSn`fw!%rh^t#AtV41woc%P?k6LEtECikqyJ~O_;&xtPLzT# zvR1@q0&IEY_K`Gff?y~`cBWo;_MlCA`NP4kR^KPVn$eVnJ29sIr1q~!!wEIj(T?ZV z$`_cJ1r?IQSYV1|N1?_e5?XgiSbiNa{*XT|*Fg7vH_bEKN_>$}v8dCSm!G08@=Tba zCSq^Je9?2JSxM^7V+c#@=M;q3TqAaqJ%kpxK#^u@<-uU6nM8aO?XvU>=@g}~ykz#O zP^Vv$LT#BH{9{(c#)fW$KH0i--Ab)R;xzWBJh)JORio=`O3DMb1aq%MFG|urx6b=z zBXo)ag3VyT&Kct0zohfXMkRk{k} z?FO+9n8+lA_q=|@t3!&1GQhIPR+&f~!-&-&mZ80OoJ9uy?1sadljIk=+m2 zI9MmMM`~~SLp`^}4_-DcHTO<8db(^ZKBMi}J!dY;o+*83va8s&2D5eTtNk`Vej#w5 zuWF*Vnc|v}^DTR_(K*rl6JwlYop!W1BQusIlOmjZi;KTcyEYN0uO+}ev};fXa{G8* zA$e)*r*NUPjehd`(XyAIF%3ta8zK73{bkDPJ8-u|z5(!fI-05H_;QhepQxV?ASFHW z*{D)OXz_u@AYk!fl;+Q7A9YB+EfdpBw4(#$Skf(Kq}-F@qTL52UUfL=YXo{uOn=@N zEktbYv5M5?1T#aWwhSmJbMT-?Av>xEiO+wjOV+dLi{|bl*jAF(;cnr>ru9Gknp0ceB8?#w9L#YC!IQK>+)jUD zGbR~Rw_39EEU2HRihTc=GIQ%slWT3z9V^lN{^_gIgcm($xLjF|_j=Q)a$N&jY*R-J zI^PGT7_-55CRz09N~=q@7bes?3zPg((T!;bW|yFN+O0TbRoAR>TCJvVc2YUOa>%6A z!aG`#p~1k+`Z)XLH6L>%k6$|Jdq{DDam7{f@`F9sJ5{2m7?Z0=VTJAc;DnWKXV@qo z52H1OV4)6-W64`t7?%kAz~oATq1SI2X+sIwx={S1d74hWj<00xV1St6XS)Xvvv`Er zZa+{FG1gLo*N~v}-li0af<<0#b#;sUHG>wDs}$c8+j6sKP5%-4dfeE@#_3Q-8noHB z!mMGU5VAsap3!N8>3!EG-lo5CVZO+F^4E$znTgDx7;9#n!Kyii^^IDRyKIJ6IGA8thpr!K?{~ECta_UZEGE1s=gg+?Zw$uWYeXMY2-4jw$u{x z#d`nl(5%sE6s0QO9wvm7h2Y{#4aUy!GwNa~!$!f%{toz&^%UO~((IzE_SVF5lQ$KN zys52X(JmV&;qymOEO1Hjs}=6{vzi@*HC|G`kdMvdP!N8TG`AjiK`L^L8opUtuU(sg z6SlJAuE#6zPf>mH!trc}>nX#DbjLNlFOk|eL0ms72y)hD!k=|#zJH$Oy<^4M8`bws z^lyR@gjUIv8F5Iek3FF6T=5IEhb%>m(^N9cc?wa}q2s!bVQ96)3i)sXGP-}(Xl*b~ zIka+KE!ZmZJo0NY`PTQmX8eBa61#9ly>V*v#z601)m)THu!)@SIFM;;%h#L~Pntaz z9<8Yg5-Z7w6*2sPx%Xr{oi17gG6lw-ms6;-K6ka$9-M)%z|||n#Cq1C3M%aEVrxR& zl(>;qB8T(JxyyRObnEKY6Aw_aFtp0MnoE>inu~_rLsDEtgh;@QArw)Rcm#Q@>Pp3Y zRk26GDS5(6L1;rLv1M%J;l6B4RatK_OkWR+3n&ff-d5?3lD2WRZqN?RiC-3Y?I(<& zd}r6_1TpX6pYj>7_G1|TQOTxVN9RdIqBqD{(0(UvIH2ws`H7;fpav2abu|ZgK?*nJk3sVj0+r}#*)!{AdVk~_y>tFK{ zU@wWwHsvdnq(*CVt6!*b0l?3zvI=1&z`x$}2w#6#4@uGc$&FKGe!MXseFL z8zXk@!kg>?e=&lW`IKJ8Ro-Ud1gvJRd_-nbbb ze%2#}T|?JU#&%WnkSws31_r!1FO%naBC^likHl`Rd1}?u3{4KQG~4MVX&TN*uB~(d zts0$!VOnQu72|#6RQjZT??ykV)IgRbI|3Yn$exlNAAvBu+qVP~n#EVN#L6pfdQ&{Y z2{s~)FDM)|F~)hW)+jUhs##`w$JRRhG|I?NnICA(VhJFkFqg$u&I9c4`dq(Mx0~7- zgNEy0k$yZmgT1;m+Fz;B5YI0vk<6p z1mVSkyAi~0J-Z%wqU;xTArmrKZf-bWhz=$$zDwU<}Wk9pVqP_6sY9fbQ0@QI17bfTwJ+D{9YtaGQ*tP>U-)b1Otoo?bJMZc=v0lRXONG{b)^;zp49u3*h$&CD{!Jh*w#yV7Lh(YuDvU*kN3pt`jo;8SFak?lia{;xk`?>uk5F+G9=ArdSN1Q1* z^Hx#kJg*b^Z(d|%SOz;j5|6u&K~LksLrSe+JGu_3Rl6CwWY%aaC<>a=X$u_ffRqL% z^WC(Utm!(3Fi}i~W-?Y}%){-Fv+Ho;D?RvHN5#X{KL@Yj;YB(EiuHaX+3X74S94~_ zK2M9-@|hD=228)9`woW<37ud{F+c(WoJJvBw6CkAHLVdk9= z;vkV(L{-CL*6E`y1ntuM6NnH9mrNZ6a?N#nS&p12gxLdMD5)V?lo<}or^>JRFIB0N zih6C_q+Ul8l|tIkiZM^FG?uptU~tC;N!J@)kb_dzxOjG9GW(@L*=r|YZhsmlvn@So zvnSUd7qAHmVShYW6L!UYKTOw#wW`8d0yC3 zTmaC^#)Pcr;E0G=7<5~itY*+-e7!gwA z0+~NUy(VZy5Is{mk@{;+JjOT)qiKPD{4N%RFMz+p;Ho6-@njSy2Oxy=I&GurH{M(_ z61qj?n7+_6dYt0wbau07I_)tdylkp~4{&d9E&VbJ@5rC%Y(LaXhfNoNUnY(|tiDN# zY8ywNi_?XjVMg*)Fh#ZSDIPX~M*8*A+A^~t`QqtI@i_)T*<_FONR}ao6i`mTUHz64 zt4rL$fLCMrKy&vyzL%feg*p+m*g{LQY4q814PnuwSVg&0B5Nlqm$E%WF_0oS=W+OVH4Z3p`V7n!HD}s^jcMe>Y~GLne`y#7mTLK3~y){$L~+var)g} zf(o~>w`9WewpirobVC)E5F%t6KyF@**pz>z+%cYc8F(42{f=0V*SF)4ERIh1Ayp+B z(N=&(fcK__jx)8U_ypK@f30*Tb^ETdGw2SF+Mgj~v||YOP6z_%CksB<0g~<=Am~OH zv^trD%l8hMlnh5jVO>?Jn2>YBXPIc4XGH<$$0!CDH-_Z0pd#{yE%mn&Y0-!)W)3MvTji8>^j`#Obf*^Cwz zj3jA|0U`s)*Q!N-ew(*A%Y=v7yatxf8TsT#=PSHo3xIxaE|WcCEeiyNX~q{|t%f5p z@VE+C;?!g=8=)>-QO#Z?yd{HyRgYw(C6xQn>;}%8Xkf6xSi9zRb(fcr7pe;WCNvg2 z$@6Hd=f&G|+Yc(^rpV5IQb%RUQ&v#HDPI4XFF8LtyzhoG9MkZ}YFSnQEa}qu#d}Ra z9yu}987kX`8iD{6vHT8U#4)Ju6u0>l3C`Phf5%skZ580uKqkEF`dRvWUCRE)d8eD1 z0tF8z=uihJE+6}^1V_lQjkN44Y`W&Tt<~$fsg6%x@qGJT-^d15k->9Mkl6E3M&~Fr z!a!fu_hvBCybW&l1WF%u4r_spWf5pcbBV&GuuM5;D_R? zhaYkK07_<7`g3p3C)o3nQWTvr?y!;NyIYV1Zlcy4mG>!3LNdDscT%lKaMISFFo(u( zVpv%}@50yK6=ivP@T|EL#dDmjdcs-SQ3k8sd2T5LLJrLDvD-N}A}t~%?SCEfRpxW5 zc8VvSNfQOr8SDb*m~Xv!uHS`d^`WIs?sWxx-ooufMs11Zldp+DSK$@Qj$3s^gB(313&O;-v)` zA~YP!f(-@gMZ68Xjz2NeAea$N)FawSqikJbPu+=jhjGDPkUV*q;~IoZc-*y_F0&|S zOIBP#kw-E@9z;OLnc(EwglsW%GR8gmVR`MnD4*JS(&x%qVo?lZ0;m7Ujc9@1!OdJR z@47P;TIY}LTH5VbBFUQqgOE6tCa|>nMKv9A{)IEJ zSKfuc(tcf4m!>Tawk(<==~tyyVeSl-2S3fIgP*gma0GWc_%gqfU2?2Q$KYF&koPAN zzGa0~c(X!IEwxP~Uw|;lovKfmbqaFXDT5ev+P%bCgY)phfGI-PM#{_HP3#X8kn9$| zGLKfm^Eu*HQE{vkO;4}8o}x$Axla>C!Ifj z8r&B?ZTJy&=X6?xkR5kypd&gDDZ)71CRP3}FB}xzowV<_ z)A_yRHkS+@|J`!)+)5!m7Sp5JvrcyMM7#QcNG;Sl$sM@*mF995FWIpH$0-0gBy7(2 zL4+qTihx$C%SW)*%Z;qG&BJDMFrX%tJ#0!vi~50inWv5$uwWWzcTIPS>N%Ac?V~>v zK8cm{xiNiIX(KeK-rQ&&<-}004}p==JihMP5O!Z{WY|$0-jR?6w(-m?HUdZH6eOhS zb6g-cZK~K=3-cQaDeAh)4p6RfhG)Qc27w{}{Q>SAL$0=*e(c{1B@2AODE1vvptfK| zB-fI}l#*?cG-T*6R*Gv?fMQv3WEB|4d;z_xc&O5^5gr@*Y%bt-kd71~#$5(6XO8N> z)Z=mCAyVndO@bmnm$;-CSMADL;~R0h@?(SZVQTb|Tv-u4fG6N$Ctkc&`%nlqf~@F6 zee1^Fi*HogZjG2B8Ey_L_|8?*34MOm>LGU32TD!zseX+!@M4e+n#uCjy ze0qbAOGhC0dxuSiz8FH&MO>heCJq0!Yi@UW5P&5j#C+Clh6jEwIMHnw87q<2z7#lM zQaohRv{v%25{QSqoYcsNC4(lwa_+MHNt|dp7G&JXBEN$Kqcbtr!VEIn;G9b*f%LrZ&pg6mm*&7+ILu-4b97Fspp zSlWqb1nBEBe_E}<23_w(Aes6@tYOHQ-q9!>t+yg6c=Ib=!i_eL3FxK3oA-g_lzOh>) z8CSg^b=VRN-9S?DXQ&EqmCwvBzjUPwuXD9APxTEK%NlJDB6q2Xm!)du_TbAsTvxo~ z@axH)Sqs{{p$Vmpl|5@ONXlb>xd9i9XPjnYi;=%K9x)L&U#5r}86nMSPf#^l!>G&& zZ6Nrf0B^q-k{kB94z8}m$2z%w9EL<_h6piU9)jjF5Q9v!M$^Ef;v@|RxBZcGi#>0k zWEkr&N1ljsmA@^$_&COP_rJ@Zha?V&O1I)#!-dF*Yi6{3>oT5$0LP4V^j~@TGjjc6 z%0=^#*ePKvmee6in&ILGVG~mO()ULWZU{{YjYl8=t{__>d5?XFD5t+l$g;M(kFFV#i0Kxj9sUsb2g99Ut@5YQEgp zBQ#CPE0LDhKYTQt8k)JQe{Ot^nt~ozys$FLwe(1xa+kWIZ)@L%_U1@$z?*zkE@8$t z2eMrYUnX`o;EgMyS~75t*t7NaE)LibR(Pco!1;q##0AHpuKAnn1%vG2It`w~xwz%> zzNUkYU`VA0tH^mp5IhHS<)oK;s#-No1?Haa6PG$L6Mi6e(TqC>%`T{0dPRNngH7WJ zZV1WeWC1K@aU0Z;Os2B$^6*j%U`$(R0uzF>d6gG%$d-_0&oX}hp(iQ~fv1$PdvKfg z;^-dxQ^XXYxo=f?GDVI^PwBivA)EdK%A)m}jn9xPj0o1j)QP_Q z(?KS+&a>fX>GXvPoA1Y!S{f#Mf>Y~Tis)&RpMKUkoz2qn;f(_sgsTe2p5WW>idkIY zY)fB!u7Pd5zga%}ZfjnIjxq@-E@Oiiof{enB;W{>%=eLHCO_8LLTH3hkdi2gtAt|I zQCj9O6(+aS;IY%hX_J{Dx&E+tj&eD0=^h<~v+TZ4(J%`TA$+oXg4;`4WhDKDIfgsT&QXTk8=iJ zlFQ1Dm!aUuy_{JzuLzWx9nFt!@o7o z4XeLg(G(I~FOCc8+DO$3`ZYXiARe}eq7kz+|EY2Wil((>b#?G4tuR$I>3@wH#dPY~ zxTm&Ny?R1?IuqpN^N~*ZEy+U`_M7$(ol*?(2|feJ zlzezIJhDVo{oCG~nAMY>N;xh6pQV1BZHuYPq>z1LAl+hf`A^DI8M{ZI(L4f{Sl6Vu z+4-BtoEeNZIQ+wmFXydcVO8-v_yircEq!-AS^{tR1QIy{Rkp^?Sbhx(-J)J_!mff9 zr(37UeBqg;&pD@dgeU)^*>t)1Be5t)4FL7b8?) z{`c3{Aw+qo%$xIh6Hhu7#hc;g5RyDK}; zw44QVn-`K(XlL`=e5CyLIO;7rT-_YjxBa-Z?Ri1_W(KpOnAnS4w#sALkV12ZJLxHXpXp}lzBP%7u#q~bmuurnL5BK3U_B+ngq_y!YLAkcm3vR=jOuc_kJ4bT ztMliRdDf0w_(BipCq3}&?eEIjQxshVB1#o#%?D(zc-4p^V@#5cu3GhzEn_e>I>D|4 zjd$NG|3vS&dY1I@X6HpmUX7z#Kgm$fX~DngOOSmOSR$v=m6Ocr3)}(J%kC3-wUq^~ zg{#@U@;+2X!arO_@0_Om+f9$%Z zXK)L5;p)F{Ok}bm56KknvTq+`j*VxR_C1<*Q!Z_myVKHeK9XGoju@}eifLy=s!l?x zoL#VVZYA7wK?Gmo3qb-a8Yxe7&QvMowa$pz^lMBpd@XNy1(qE+jvq&#)F6#vssyj z_7>!I+q>dU2?!S2AGF*oZRN_&xAyZud2q@-d_SB*9TMf)?!nMjjuo6(J6v-nE0p~( z$QrUM$v#jjv*YShAk;tf?mx5jnf}?I^naJG&%ycc>G~`zO#gec)&EG>XJuvO`j2$| zto3<%xIVJh^PQ`!D}oJ|+Ft#@j*uPyoj#)04v3bWE2ONQEBn>E?5>&TZGn2!2LJ>$aAHTw~ z%xG}IaBRRe5J)>fQIeC>gL_?FBXE2EV}`_UDGQ-@cw}VYrG!UtlHkPJ$QA~KGCiQm z|H%$}O6C&i4B?1fu=dzDI1jxMA`t&oLw#d&GhOCVCk;QVD(Dmo_=;WMsuwa3c6Jrs z9QdWcz^}fH@1|xt4Y>>mXNjHOcWe=#w(qKVTCg{>cX}8P%F+Ga9$W(`8`y3P_{=c^ z2wehj!eBJi1BV6dI|I=>Jo%Js@9X)EJPPqp(oojc7T8rlHM#&{3|`w<2MVHGh@Rd@ zpfB!6HM)l=qQrw4%HgZTp@hq$)W8uaK*It4BeD!wdK>4XgAdcm3|FESwjnZllaQ|d z1Nq2iToC{Z4e99#fVv2Hk?>AnpBB7tsq?J=aH%I0Lbw84_xKmVkM`filGAGAYp@Ej z|J^9SHP$1uB^vfUTIr_`gh@j~gGdMobOHwCk*3w~fvh{Qfc#RDaEW-U1=+n7gAfBW z(L(Om&7RWRLMUp7sl)*Qy10OOc>HSF>p>>Q#qI4M#)D%7)m-&M`VRjhhtT{A-bUD7 z9R|&tc^l?q2RbLr{pQ8)o}1_gHa>ohea$_d$S>8!(m z%KHpGfT+~2JhIcX{+?0(UO4=Y-SriH?TP>J#U#D7GBF_q z>9d2)JGg0v$osY_hySp$NcO{EuYUU0qJd=h)&gUSOWyPyHs2?<*#~ZvZvxKL`W{N@ zImqy}PS1viDm~jVydP--(f8oV_%8C)qPvf^QwyEICHqze-ey1UQTpjWiBJ2b&g4pO z>sQv$P)4!0PSit0@5iyePOk~X_eIL&7mR%XntB>ufLnu`k-z21IM<8}=o< z0fceui-hPWwnsP!sUQD_xMu)bJNgmm224BtE!1hp-QwCCpELJPP7I`hp}aaxB`TpJUqh$l_w@Q2m++BSqM zC8+?-^MaYg+f>9vo~kfdRL;-+*HFVI??EV0X`K8H{ux<*O@G4Q8AQILN+vvQclo79 zhFfjV<8FVRl4)aurY=h_RUK&ne96*GVpihVU>MW#hiN90d+`GS>0!I4Ow+vbX4Q7Z zO!dQhl7~HvfHwj$Cp5Qno7$0$8IP(Ii`ow7J*B<#U1Sj*qbO-b&&X?vvHfXJO!UeJ z1~*n%j5yOSGihA|VWTZGxthjTBxuWvGFM!U3;p$JS+Jm4q$eX=3S~?VLu{VJ3VpR^ z90|jK3NC(-@Mj*{A+>ee#&%kp#PF1tVoN<>mTQ+PM9_@(PQvpn)zFalblZUht*a26 z(3WY_YlFX zGhZt!Jvtfr;z&d(v{(6$nYs@j&BpO@Sx)Xto$d*<+q)P2J z0nf1ru(gXlIXOnouUBv6#Wl1*1A5VuVN9NbGpv1R0GEzBVeHg*`C=Ft$k1w;_*RRGzD1T zKP@YIb7N@7nUyJ^w!8@WPzVgA>{B4DYRTD8KR}DALzNLUpiq6;xZ&T1CxWmdcrD3T zCJpVPgFdi=cw4~&+D9@W*<(6z;)&(0^3C$^?OkPEQy6!d6N=oh#5}gfv$ZqMtt#K! zJUENw4z2P6Gj>+C)R|G{3&ZsS^&y~ft?CfH&MNfJ{XZ>q_CNbunuyLDnl9oV?*f5d z-rmZ~o#TnTP~)X?3Ptcp_v5XxJ_lo@bP-jAWSceiq=;@_vS|lV&OCvy{v46(nSrY{ zgT{=o%0NS5WwK^*YuLL_9W-7?QxJ@8MCNiqC-=24SojG^P0*LXl;EcOwBJ@~OW7`3 z*c_s|epkDb0-tj`oCYU+bSb9LTdn@6%;b3H-dg4g>>%jnIi@Z7&FYBY^;@g71{+7I z9cGdS#;qsxq#et_Bm})vW`OKr(1dv%Su%_-)k1+7&Ai`ZMMb@&DS@W-P??JAk^qrl zJR=Rop5uY%7QxJ~KqUSbDA>&l49WeU3uRTkDe5l%&GFcs66SX97pmmC=RNnTw&4;} z-;CX2Ijf@YukM<6@6!XC?by0}m-sE-;2O>&XE%WnJ74T94HN%~GO>sj!(q=F0-;)* z(5p-64x6M^jByAQnbTJzDQXU<{e1AX3xdM;PFdp!3?+1}AcRn#b!$ypg}^=G{6#gy z5#!2Ropvq+-RGo{XVex@VCAku0rJq=P2Z|dLk?%kQw>OEEP=>Tjc-wx%^Bntf%uT2 z!x}RYf4v{|ehdT;2i@>z+M1w9QaubXeM(>sWqPejm@N3RYAUAVZky=0pTnp)h2h)9 z`0loep7p9erJ46zs(nCtg|$Ld?ddaXMguEg z+S5QpzV#Dap_VffYjb)H-8T~rmSu~mhjIqw^2x+Y=&MKTY19Lp`hzuyCv z^NBeiyH&N)0&8bxZy0Iwxa2sxL4u2q}^AblbU+?ei0Q zA?@`ocT4iCZX2*$F@_@(G#A$8)ag zQ~uSYj#l?bVez*f0>hQ`t>k#;aS)aN9F+D!-Nu1vkB7GN-#O~bfC;wuX!cTn%it0` z#y^>Vpf@#(3&8TEH#1H`|mN@^uG&O;sIc$m!mMHERsg%=I znAoi`w%k3W&cp-bJO>498iQei*@P!b*zr(`?^qnC@sxU<%|S=k7dACioK0vlWKzWH z5?OE3rpac0S>9i~?ItTmL$z2B!6{C*oENFgsc-F+?s!>H%B zrq1$+6{nvE*`o#cs2xNjDaL6bU6oR=!<7>c4vLbMnmr0+5wz-wzCjz0UW30$;t;57 z6#WRx{p+2|IWy~!h{;0PuhCNX``AptM>ZqD|n_g@Ddt{Pcugl2st! zYErK?@#KSPC}{`IpMgCvHg6R3jLMn5CanRg(!c5zUhy<@*IlK}lQWOQQg(meCqm=y z&~@&bA%yuz^k|$@8@i&MeMv{aS}vwgr0IK|gQls6D{iAb2ugb^|FR%g$b)9^(8jNQ zc##dz$$ti*p;-`0elQ(!759(U0Y#-X-i{fKzdJUUxqP-l4E)}4L}>Y>6Q+~O)8*`a zfLxgv?n#Mf1Y6=BYuYu#zbG11kw#0P|0{2Ovz09b|58dFZb_1Jv~S+2e4Kg0y%6cT z_X#X!+@K=&@H_4<_L-~uZrL}VH~IA3w<4~fs5W7EWG6PfS&_GNm350Y&A6Vxzk7=D%#|EUc?BkqS=$-mMUDXL>vt3#VRLtMV4Gjt6n$X z&lbDGrUbU7iGE6!4tyF~u3)iD(L!-Iq&~ppEuO`_;6g9vgT2&!Gr+sz1oYXeL=TUL zCbC0!=BwbvD;{ z@bIkY#%-b%7r+!>^#Jc|VVWw_h0WqQYvG*zW45ZK!9SWWOyMztgG1uJNun^E*>jzT zY!QC)GC8~pwfJ&1WhRQhG_%?%(D!2>D}zG)1apuk^Tg^zkoDD_H^{inCWpGFrs?X$ zfn2+;Nb$p%b2FkVr;XIm`9)99?|CKvTu~!9(l$i5W(IemZd$mj$9)#RKS4qwPTb=# zbCA*U^MVyCE8(i)ARvxy8e5=BS4oMMES&_#o?^s&u^br~%a_lXDSyL|;FI~Yc;)_v zpC0C!goQyf2G-3TwH+wohq)H+r2^wO3DsqY?vP~>6z22x)p+Cis}u)UY=MVK|7+dL zW4QQ8&Q1L$!t_u0ohujD0z^ka&PAy2X};R6;^fU0?$8O`1QMcH0@K#G`t2(QaZxWQ zUTqCuHnL<@=4B{|=1!UCAF#1dQ-hE1kOkigB+XR(v_ZjzVWLyGo&&8e@6GN{A}c{2 z-AY)Gkrb8`58Ihy?-8{Fis@N`9jae%wVOczXM}luVGq@7zp9ZiAoP`*^6Oe z=wp&^{xS3MXd#?m{abZ@Q^i=EJvL|kP45C-r1JF-fvxbj!1nU(Sb+DK zq5t198zv?bc}RSM!yOM}Q&(oShE0pH76`rJ}&08$m5 zjKd8`KqTICb0VShLkVpii0pBBg00&1E7W}1%3_3VEy=1*fYAjFSz9Tuk}MWDmdIntZ{B(lI;HGj-Ci-Hien~7cA?6mnvd%*XQjRM;w>$Fn_v_M zW;qisPJXg22doOBFDO11EFnwpJp{`7zY5dq!M~|e?<6faNtyfuC{UskB5lY^rkLOs zaRRkU{cz4*W{0bXxyo9nghr|^1UE^wQ1PSVXNb#=4wQNc)) zqz}mW+P0fU$gsE_F7`vHBs46Ju4}Wu7|X_-&a&()izdHk>=W_$dz^9yN*_`b1TFWF16WpP3v_jP_3MpYdEZSTklkh&#?W zc^1qmLazP|=PtJIM)VC_s)C-WX4Fyslzw1Ci<(&UGDvlt1E#f~7+o^V-+R5Y9^HYA zf{R8^Fy)5KFZF zq%G^2smR)4P|a9yK0oWve(!YOpKDQM2K~8c0p`ly71|BXM)R`|;mENh|D>pDFCKCQ ztz9!edAntK%A-jWnj|>gtQptk}5OVoG*Fi|mHW2y+F;leFzM%oE{%Z6Q? zv!y&rXP*$Yy`dwe@G*FSyA^o~wv~cy*ETFkb9u@!GOv8?A!)2PuB zb#BywzAdfitGAOb^fTT{MO53F3?W7faAv#hbT3bG? zuNatI-lZ|4Ii4!mKPpAGCKwZS+y8Rq6V3!;;kq=-!sbtz-OZmitd^a)$Y+}5IXoFu z>p>My`yHFgifj+J0lhan`71NjW#MpL25vZJ3D%qDwx8c3nF?#cNksDnr&^Jx4<-1I zHUi$?AaVDII)1+{VP3Fvv2-mao&!cY7eO% zBYR{8Y7JGf z)_Qmu1VoucDqe!9iE{Tq_aHMToF`NORbf0FCCf!gY$7-_;gGoFI&fyN3Sw&m9 zUVawZ(6pE%9HOB-IOx|*8EN0J#Hpo*Wu18E1=!+O<-E&{UtTLoW3&jw zv^-XN76Quasj~80o5ordwA$}3yl&?!cj)#<>QaintK=OeBH$}-jq-yWh6rtXM)KD7 z_!CHXA3Bn{-)bs)fawsA7i=L}7Lg6~SQ{}Ei*4{=ULbRAqRf!;!yxkFKot-!+gai-|x2K z?yp?{8_kBj58@D=&oxoE7c%y36_GGl6#S zx6Y_$s_BS)4LPRo>@?PtP?^+f^+PoW*^G$ z=$yM3u4-y;vkxbiIk&`f{-m%9%+TTb5}oIlm>@cZVpqmk1{C^!Y z1Tt7^pBV!(LUTY0BB$Q->j{%-0g$w%4ajvtFDIOZ+d*Z6JQoE(Nxwf;ZGv8MRVEan z8j>=x4D|f#p@wg7K~^GGEW3tAZmi_iX-^>;>)hHDl4e~L!cdgdJB3{W8{xAvRWRho z$Zb-BtYp2j{k##>!2TiH;Kbmle4NA1SI4?w<+>I>775Xv%pDG1(E2H1CDcFf_B`t7>*tM3S@^BV!%o9SDWLOhfED_eIVA0gtx*?ptgs`mN3mW-_~ znGZhU@v}!CHN6jzWr!70D*bUue#!S%g#oo! zWh!+bFw}?4b+QA>7W&-At-O04x`(S{!bRmWcNC!V#Zb{LD;{R3tENG{3fV7RZ-#x) zcAb&fftIA$YQ3pAX??rIufz%v^t2_BSgE3SAG5N3u}m16jgs}Ar68BH-2}52yI7|w z3(VW7cdZJAKKrt&-_jc^Qpv ztZl>v@OpG;3fbExV(7k?;FMWSaJqq!-{1U4k=X4KDugTUk1SE^NO?CK5;OEJDf8;%&DaG+-Qpy1&HJ_B}91H$(k-*S>z;A39vfQF4^1JY=)GMj?w zaK*U8;ob0M?_5I3Q6P(RaMyW66U9bkwZ@b7<66K7NtW3c)g*7GmSG?b%49#152)6t zZV*#Ba107IoRs~lrz$=TvGz@qWP%6A!3ZK=XglBs8?JCj0-F^K~*%Q-{-MEU6;ac$Q-F1Gc`=^zUK2U*a9yp=Au=3 z=(tApZ?%==(KHxM?UH0SvAU2QHrk&W66#d@T~i=>z8}eV+ujZs=szW*P(GFTzYuwh zy|j=21opc7f=)LX38)|?hKESU4k3_wgMjTd*;}A?y|uIS2ZHPs+$Gwn8P_2l{C@>8 z$7JtLA1Y-^;{SR9E`vL&IHJA+0EmOWlrNPBt@KM!A0IXiEtT?a-=0TY>lsz}nIUQ6 zlKfVWXRa+b93o(y)0a@##1TJx{)kZ*9fX%hggqr^=3WG!i*so2C5*L%0Ui=lfAOeK zD_0AT3%w#!r^}$QA>=P1SA<8ZDYd(T`e)UWN^KvNF2xYhB$1wXQaHn3yLvN~BOpx| zwH{aMfzGnr7OpO(3Qm_8@#FC#2qFH}k_R|7rzEH~Kq!~Gs~Gfln816#v6l@8{Mc9R zEY7H0YOjIcK=XWvL{E1P+_@gRkIco)r6HtVDAMzfVO~qFt0$CCEynjRGrmffjvbop zFwvS_W?RErHf(*~6omO?Wz{Y<%oE(BKX{pz_LAKIZklN|Iw?F&#}X*K8PAGnh8K?+g+apBaT3Dsl$%s1YVK{N6U% zhuW2)qM8lN2T;e}whkl~6PnO4KZD-ow?CRy>wxaq*LsRU$Nzo42?!xR8U=X+{@qob zoM78k-9v*)+Cm=PdcTh8T5xHye5755?crtTnr;ekMczH>vPT>#CNX!gVp5nAtB0le z9;}VmA(JAJj9S%!?$Z2o_HalybJw}l+`A7Odee{-RyXEt7 z;aR6cc<0^x!h2ClLnc)<8l{w*w|z;`Thop_TnLqbBWesKS3~!CZGlkUejb||&dZD; zwm&=5vUa3=ymsR=(ns7}NI z`eJrZJD-DE?qY_d9$As*Y5aLv_1v&%iBj1)Ys($ttzm-wj7fB!vB$I`2e+I%SZbky z2}aFMfThCk42vy-u}e)`6Eckz>cqz+RH%W8ya+#{tXPcR zfn><2;2jBhznMN0Mc}~!`U_!okX~@MNEMC1q_#>u`fT$_O7VAMb$uTiK7V41RCekP zoAXrOf_ck)I=uwm-GniNfBe})4=6Ophs)wS_)Mvk0A`#ki{r_tt@-aWjT8c*d_)r>DokuNA*X+J@Bstv?g+u zcnPEB4c?%fp1TiXTmTMNoR!ZwD^1?D%BWOY5&!4{m|MO3KVopa<31F-$G6D!7Uq$N z|Ik(iMK+me`|s5PQY5rwNTyDIZGBL*dAZ9(6P;(mdfQugh|7B{Rhj0UB$lilEJT93 z>sHuxOqi4)VjJ|T(qeUGh|Gi)qhsQ+1S5hsYh%Sgj}Whb*-iog8aa^4bQF(5NHl0iIoJaAY!+d15jf!oCp7d2 z`$&{$dFiW2T&jNWC&`@*+z=r>q$QYXzd+Y`FJA}?0(NmE!T&h-l|+&CzF+gq_wf+@ zxiGa9hG{Es%h_gewTy|dL=wUt$42S8PBIXt-enE$PG}0MeBFwq)_h)~P+Ff%OmJB? zwS!sQg)f4Ym1u1meZOn8fXV|xA;Rd5t=RYlR6{srC4133Mu}iZLgLR4=O2)g=n;0O zB|V1i<-=FW_mnM;OkwGwy|Ec%?usGxMUTLVv%G5a8}Q@Le>~7z3O_Jz#iequsmgi= zkUOdbj{GnU=%M9Fgu~iDCvo`HEsd?1_AgyN;R*8av!&6CNS)I|&>)`GcINY13gKc`Dbj=w*AhJZ7sK7pRl;J&vd#jR zBPe{9_>OWfCbPTB+`czs`c7dK>jiBAPH!l>$7*qE|sXOX=LbcIxrKW#B=6z*GvD z1Alj-s>ZpodxA>t)Td#j%nr|QU^4iagsz|{QuZ*fK3dR6a)R>vzISyorI z0(?u0Fx5-wkh*(LzXJ8~tgOsXWiXgglnh2nn_&#|l4nI~dhg=fquRb9 zsQ(RSUHRF8?c#xKq>iu8dt=Oe-?6%3-m{{IWCz(N6BD5Y!)?AAd;OD4=6S1^PBtg&{v{izmA4~LaEiY}25L7hBSjsWl+JEXZBg{*WY0_9-^KvE_>8u_1 zlv;*O=+(4 zLRgWDci7h3M_W(#KQdgJR{d@uBt)Kyu(ABz;Q^(?{RvV+aX8liI=FKb3;(O9!w^wP z1_>}r`LIvGA6cYa^MC@+Pw(4v&{`(AnIaVEmQwAK3=Na`rs(9%SI(kv_i;s8bDV7LY;4MyO5J( zQqaP#6Tk?n%%d-{mZtDcvei~0VN>6t30FigvqO?K8zZNTCMa-a-#4jukpu@nH907W z3p6&ZG05=Fc%Z-{)XA=lB>VF5*?viz$si|O$McBkvDVogc5c+JFMhmbM$UE6R&JwIl?lR-C^X67eqiv_3K@^ z^*MnSapd$X9@{e)$K+H=kOMsCi~EV`Alz8Q_l2YKOzJ7hZA;!i0@>;^*N37-1nw%0 z01>=UBib`J>PBABSz4D(*G3kq_#E&hs9;vkL>C-_hB%Hz0ED zXJrfUyrD3?A~85nqyCes`)5NJckyhVMr=_93VgKeVON;B2ZcxY`Kyv%{Sh}olyO=e zQzj}UNwKlp%abFG$GC|!qBg~JCi_;^c;#NuR(RSh(t2A7>Sj{jspzO*j%fu>Y-Y`Wh=gQhMUokv^D45u$$ z_zu0gp^&fag+eA*I1}a#t$N(IoCenhHpb@SuVU(*3=)pssrj<6YdW7(6CGkEi7?)_ z(0Wc;%K`~JN>>R9?{;S8!oG*Fou8FaA*KHOWV;4Fs{z+&N}lCQ7qgEy>M`e#sW|Kt zgr(2?=MSpijYcRX-4_MxIK7{V;Q4Bo8^T;ZVQTHI(m?@)lJ@(R=I#sx;mSH+<(^kx zFUQoX-UP+ROtn2*QACYa(JdWyNhHX~JMA#W;OH%>lrS~x$qz_B0$&ptHo2WZa20U^ z!u=8Ilg?2;BZ^^83FqmAatG_?Wk1UB(SyOKJ1R9T>h>?3L6|=W^_)V)yHN1(Cz+A> z!6>(O>2+D6bu* zcL0Z*oSWv03x+MqJ|!sK=yA7TTbtzc#tH-_bR}aHsl486BiwE`al^dF!POz!g;JU}6Pc6%KRA`XQpt>)bjLQh+w_8{G|&AOu3fFlxL&*AgO!d^ zE6zo8>#qn7u0YvSKG&}Cw!aKRbi2My`+hT+9~?8_&_))8RbClS`m_|yVGVqHU2Pi5 z-u+^xs2=g}glG4WLY)MJ62&Y=!Y=p3++(rl0XQ#0y#=afp&9qSvyYE)(;QEBsA=La zGlG^8OcV=>F%VoLrRlo2uC%1mUVGSJC8rM9t#|kE*J(3Pf@Mv*r*Dq0;0(d7=t3$P zvByaonH=Eo;oFN?^llQfy0bhCecB+o(Xq?0h?kru4bY7nIAD(zh932HhpD0?^m8{j z3TC0>HdDueIMNmt7?&CCSBrDhrj^!SjAnDuG*w?o!q}yHVMd(fzc^KA3BHkA_CwYoRNg#`1 zwPVd18?02JK_RJ9XD6hMTJkD+&=FS)d7m9tR(29~EozNG9mONBIVjWdLHsI-X}r_f z*(2q9Fp0f`3q0=bB-(qK)?wwqqpDcHAsRJ8T@J z&DkWOg`&qs=T$z1AeL-Cvh@{c0$Y2e zm=S+)TM8&mz809 z3*)qirgX8*ytY85_$KijMAij*sh|zv1sb8nJ9K2_n{thW=Jiwl=X8lZt$0od>l9*m598C?8xtdiwq;= z)KQYOU(s-Xad(paWB_@_Ihb%R4lwn{_C8bvBXQH_8YNZlZ z=Ah9JLacVrs>Zx9|7hM6Nq-|D{KnA9{ew#pw^HILMy;4AWX$!VVsSzgJYAAA<}!P| zM!@YPDxZz}a|n5Xu6EMe$z}6uD2;AvRhzBF5T9K?P*=Y0BYPq zIG2LniJiY?m2Dx*X?d1W(N6Ar<5d_4#4|9MN)R=TTX2g3VQ7I#aMNfo{%9mcg(!zUwxWI=Hu3b! z24|XNk^`*GA{&Ut@i9b;VXTLM(%=G{8U@ zX^l_nJiXeE9XUAZ;IT3x!_d#oB1s8gtBewQ4~4H{*{PPs%VeKDPMhAKJ#Sep!aKIY zw5~NP{*}F_8OK~IQth;igv*Lr9Drvo8G=-{;B; zz*wl&Xp)GQSttyM1hc+a5s*ehf#{KBd##T@Z){sh~F)DL1fr=eOejob*7? zU2Pho(bpmLryV4|R@tcV=W2}Fn%fol!Pd<;RNZl?eWZ`h?A>>liPbL#&{`;)>CaY7 zg_oPErL?_dK}Rwb`+t*BAN!jgd*2vHX`EW21y(i*ch!{LauV^O^vWz~Z+|8$P;Eu>Ffd?OqZBdR3-N?nb%bB|Jq#cC_}b?$*wrZvv!&s;$+1Mzg6sl&ZiKub z;hvF4VxEUSU7;u1wHS0dDC2h4cZlJArh(ItqR&Q_F#=mJGYidfMBBe{HIjA-#v4I+ zw$$2!B%gB-fa~bBJc!$(&dYA|Yqhk=qtbkZTA}jwz^@mNuS9?J6%gH+UPYuQs_oxW3*NqT zYZ=%1q_8zpj#bxL=B`|^&580d_~L`IQsr}FUJj#NS*QdZprK42Omg4)$NQH8@xLEG z>mUjof8Zz?oS$H{Rp8!Al5Lhz2-V!e#eMn~U}Mk>?s#qg6l1=v1E3O=+#I9*l{&JK zLvc-IfzdLBlcg-9*kFiiiG2?QmqJJtxt=kXHO`YNc5oAAUB!5^F*|bUCtzS0i&-kU zw=DMcI|O+DX7v7suAc7RS21h1i78QsmCnnQZZ-#C`&Z(qfWlWW9(=bV)I49`5f*9z zZ9y6-CrhNv-#MY(R>OJ8x@&5krd;INPApDT$kPYDGPk=1@3=BYVDu}nI zM{o|#f`ymgKM?x>4nQCyBO~sz?OqK)Ob&5K zA4a$hcme7R4lEBw0Rj?AC{KvT;8o8Bpn|JkUJE+!0zC8!Wc35O5BR--<4=&c-?#V^ z@tXnx^o0up60l!u%byMpatdMZ&p!a5gG$R=%$^7i5WMjR1XKX=j|%S`GMGQWm7@P{ zH2?tB!v_F?^&sy9fxs;s8)+cSpWkn0{Er&eGAqU4HPT*9jR7X2|7#@=X&4tq@TLp? zFK@^tV4z3e*RKwTfWa$F&uHDN{Q-N3K#sfrQqqrs0(rO3CRU$>UzlH8Sy>nXz!wO> zD=7Qj50&B30r=1Lm#xSY;@&B!3lK-%$RC&hPJbCZ7a!;x5{zJnrytPw@8bOoN=_a@ zA0Chp&{bay0_JXSL&3CuNY85dqf4k8Fcpa66A}o-x99i8IK(iW2M6rsgZ=vr@ky)v z3M1p(@elJuPZkukoxML!O&q76nvMzp0R;sGG&~{#0O&VE3?AZRY0Pi9atLQ1;Gu6* z5R`U}gT8$!I=zZ(@XAO*|#Fpffu z8S3|Vx%~q@R|rASL)g}}AOZndzlKx#t_S)a0YZj)JRHCem!bSZ5Rl)gFeJ=zP?snI z`Q`5$pdyy%xz0%jgzew7^=uaBFlRgetGV% z?jIUA#x*S10qyjFK;pq{-5?kJ)$v8QdtbCcO||P7Nd~-d6u|~B_nXt=;H+wnZ=t@G zo)FKcv|TTeqvdQ~?A5;G_SZw!cKiuU3lK@>V}Iu$qTRv$+Y)=MzNpu~nxo+LrgWFY zA!8pfA}uLtsaNHtjw^@;-GiAly(qg*#4NPKE&w(D3l2slFkB{G-aaq%p9&7-DJsX| zZ$<~8iY}AVu!PQsVc1V4@xc&5Gn^T>JeR59>IAvG-BUO?Ze(7{#p^26nh262!nYOu zNqia1!UFNx6?1|72FmbkYSm(L#--$T(MDRM^~nEV0wvb9I3Cigx#O||Oh*~^d+kOM zARQBV&R&_^Ck+LvucSQX-=N%6yM=6%Q~y zbsxi;pq_Q?5F8A0>o2E-j|FyK4h@BI3LUHu%kxJ_U^!m$g3a=P^MfW8F2CrMWNBG! z!O(H{CF7@m*xgnXpm@s0kMjD4>ebe4!frY7zbQcA5ohs44YgO>KUv49&hZZ~BxE`h z1>>GmaQ8S^Geh4)>39JF-cnt$w#wp=?DuaHr@{yZg#yG0aN-)7W@%h)rwE9TeN00^ zrnDfa(6p$P!n<6luvW=3J}5K@?M&{@qT;?ZaqT`|xxTkAohuTqH)3Bx9IKf?TWInv zCR|t8;~^69C)>rB)W8qt@r3nl$ACe(BLeKk$Nn);xCcI@UYlH2T(tBFc3~}kqeais zpj8{)Zlm)?4C(wiZ=Q~|wUON3i<(tumsk&BQ}#kC#$g;?pc+eA6brR)7I8!)YYvFz zOKz(XhdZCMdGA+Tz4=GM#{<#utQXa!*@CUONpdJj07atL4+w{Y7pxn7HU~cGZ}TJq z(m=a7aY+9pnF2v423{8OFw<_>^6FC}lZ9H+sijr*#87ILwxTEAPdVe8m*;ORq27Gx z=tTJxF+T2>U_Red_9axGD`NP&3O9(=qI{6ZG*$n_#1dV&Y zs*C!lvII)Y(#Mk?R;xr<=_s3!I_RPL>(H$`JF?8RHD(vwxGIN#(0 zNPKPjs!QK#?Ux=7o-E5r{lD6>0K-pjNYmQhL}WbJgXnBnxnG16b2^1IXiAgr9AIZ9 z-5SbLY^_jI0U}r*{(;wmpM%RDFbN0jPayCehj9% zTN>?i|NT>uISe+|-;8k^RiRbc7Hr$5YN{JkRV+8+c?FI}EtC+<9c$zYq>7r7A2Hc% zG==JHtSGbBcGxq@4ZpdelmA3S3e(Zj~!Y!U-D^L~~n7}#`X zAH;yH02wmtm1#f{+YhNcMAa$6cQhCv8$ZPEBemiR15NK(~AH zw8zIGIO?l$zi8-IHDP*kJSomX4Zl;g;E7Vd2J=LpUg6H1Uxm?iE-HWVRUMy315%#g z7jWl2?@!95RrGe|_sty=p$vHh8gz^Yx41d1F!A=WK4I)4TK)u-gwWnjT|}Tx5Sx1* zib5C0&~ksn`Ymo|vYN3aEu{VG6AVy(TD}Yn8V^yKh^G-c6&N3?ezt_MW*Wfcc8^P> zmIty-C?=3wQ%;%VmXT36_!cH{7+w|zPDkQfMYB`%(|cOltJmt21$|Naum#`G%78-r zy9LA6)wIk0;J#9m$X7y)bUh!AeAn&pNrvaQ`l_!~lE&k zSkzBwSYd8I;FzPh4ZIUdU9@ZlX`;qI57uuvPh?9Gg{BQAo{T zUB>^e7>=^2;x&?H@ogUFw~IQllFAykS@W7bs0BFzf6^j-y{l0)5SYc~X2QO4b6{`nc7$CADkf8BcfL03Kz>O&Llqx- zJTN5k+K_uOMM*ctF*r3q{6VMVER{K}ds8_rX+;6TVrtdFO8u-?gL&mZGb67KIn{^& zoj<9wd(|G|nm-(kXf61yHMq}c^FoEUcn>KpWr!m&BV9MmxjZ~$n3yTW9;+)S+`ljPbU%!#|TQGS&ru$*fvM7x$Ke-^Z zlctOBA$~OV*HGXw;HFtMN3`8XD|!KUv@fJvcvl59`DHuP(2N$UR#WyURHd-;Zvoa$ z&Eiz+gne;y*O{7C5y6j-sh-D!fcZbPksV<5m*Kvh|216w zxg^-`RP)e#%8}9LWWa?_2fw;b+Loigvt4EM&HI8=p14}j3mR%aF4BZY597P)u{c7< zYO&iA2zt7MeB4J8O^pEkpT~seaX-y_*^ZGLe_&RkPC4|5ECjl?mSDk@c?H^yYjaxb zbnT^^gHQNnL~6u(pClFmoT>X6n|HY5*a1>A0U`8pYFP9%>g9Iz&?0RJG`G8}tJjGX zFBEVDqFR0IeNY7l|(yktbr5!bhf zj){6(l#;szZHb1`p{&vqs5TByIFBJj`Kb@g_sU`)W5C^Kv?MA4_Z1(4HSZs_YzD&H%XdpOho>yMi->@PVT& z-DKIn*A)xLo8WYd*FyRXmZmQ{_3YjxGxg))5Zr9JZL4(FOKIYZDM`&mEIq>Ibsp}G zR@K1dJ@r&P34-EB-d3m=Cfp=?Y%!gWvwRco{!cnXq~0qA6P&ImVS1ANL(fgUPc}O2 z7q3z^;WfQFcq`}xT3_wabcFf`(iG^pfUN}?-t@BAvzX>Sr<8bzh(r5w_1QL!@8qHA z%~G?c=V1Zwt#7ImXXhcCA|Me=@u2OMS9NDzQ%`z^USwJs_yXN~r~A?Ds;!9tQIu|l z+FD;);^Wm*b1`2|@pfyOJj4HblNOF?;!ni2JIZsICN!|_DpF|jEve_`u54>y6T$-JwJ=cSAr!Z8qSv&~d73Tszd2C-O z=dus@1I{-*&vZV|ZbwW~C-QdPH+@I%tSkQztvGTzR6pjWIPbtvd&ugJVt@4BiLB*psA@HQfsoO zI0FnQGJdc~!bF=zeP&jfuIXEtc;&yZ3$q*|UHf96UCjy-xnPCWk+*47FwTc=JHz59 znRek2-HaM9MCq6Aw7**ibSsmJHr?Aj*_A>OeDb}oTbfjeDX*2+rA3S8)m#NE_|!Ge zm=0yraHIVRckdumayY_Zo(73)Ysz+pjOb2y2h;9b)}@-`{P+J;$6j;;g-PpLTBWKE z^}p#lNg{Q=&U_~+QHV)>1gQaXjI|dtSA}c_~B|o z)LMpdBhyPU9m$Kz_=2Bi^E!6IF4uVFR{klWbEe#M0UL z%uXZ}J@vbcKkpjsqO@6&zt_D)uQTra__6xwV3EUR}~Y}8C@=$O`o9a3st+f z<#1S2DLbgmb@K5j>vA*;%hGaQS}u-V_33vQdXUP(txL8ivIW_j zrTU`9q)2h`$MnXrS#^=+`Ms@fjt0`1LN7qv=XC3QR{`c%4fCG zAi(O5e&+s8q(scQL3T;={E+MLc(SB|ipk@xQu{FVt{B_?AC#R_j3`mtrQ5b`+wRl0 z&C@nc+qP}nwr$(CZBNf6|0Lh!pJe8$kgA)ysqFo{Ydv|Kc$i~aP)qm4 z`9}O!7v_vDStYsJ^&)x*j~LWWWWod1yFZ-?%(P5-caC~wU>FrTQEEDPHFHIu956i5N>v?V+CDs zErT9ha-l}=3hLv#TwW4ZFCG z7mM`F>C+_^Z~J#K#wx)^0pfCBPJTa)#iSZJK|@%O_wbj3k`I}-Z?KzDv4x~N`yobM zS10~4>`JZXm;Bh{^o6GK3$kwS!l&NhwOK60m_ppxx|s;D3#){Ll+)COC0N}mn4z%Q z^F&*?K+Y6B@mJZZD5zaH(MtJswB@k+*C5f2I#NOQP~F=KDcjTFK1i{TV%27qB^X$~OY#&y=TmMQt;7eFE5uPRZk>4=~$#GxUQ~e2I+=UUE20SKf z0d4^UHCS`F2Q^#R=;6M7RQUox!)337$+CcqkCxYz?~I7eiX2ZOk%N*U1hDLyzs);- z*u&Ojl9Sc?oI90oYe1q$tMOfkh-AT9?!KvswL4P+!w9psQ<4ninvT)BCsJ}=-8F|< z?5u|P+*19aLsE6BbZEp`l9Ysk2>N@e2Cc_x1#qz9xHeI^4&_0;8opyPo_XI@Rtf?> zL{M0P(QO0X>>Qq{G~wFR{1xS3Thh>Zj3ZmEZT;l|-W#lX)?i!n(Vf`bCN>Y*^v!E5 zzk-3>g~Y=m4lmLK&%x)37kw-C+L|+(QnG!IPIhWIO{RPPjz)~Q=T%`a=Sh@dxuBKK z=4qp2(p){5(pi4GJIAtws4=iu zNiY_mUMTx!>&Z&a^>V_aBRb@%zootVvO;Ll9x-CcHNCX{VCJLkb?N3O(--AS)ORDY z`nXu_lwIm%1f7yNfuvK<+$Saw)|7#o6A=!#g0Xr?Z4HGwz731fXVJUC@|zV2!%P%a zkA`oHwyNb!8AaCf(dVG9D)S`qfM_hJoGk`qchrNh(?EH^Ha$ zHE@Id=M2+SaxfRTMrxGxgFXiF2zk3qNqH`x5-53TJgr7`G1r(&RJ&^rwml25%~|2A zp1vpP&YkM%ES@FfTE!$16wot{WfFzpLJBfdl7z}6md$F8WaW4NW#*2*Sc_T`x#`bX zZ050U;mn+AzMJH6XTQ33tLa8+rqqanqWy{6;N_TUGfX?UV2)#&@P|aJ)Ui)Pbu+Oc z=rNsOL?Ap#A@N!nQ7%5Aa2GnJYI9x?yT0hP&bT{AoxbTod0!+6;rGTAQ~U$vY=A0H z#&$FKT+m^Us&E6pVCQ-T&6v{8!{&@v@D!crV>}JhuJ^)$aaW=juYvSUIEx{-wDQ*P z{%nlJdSZoEqZ{$hKr8EdGYO$iz|Dp@?ziXaZ|29`J1~c-4pel)m}M4wFgxxW=OjK= z9;dU|Nq4;UQ`-5ThL|M2O)utl-?x4q7c#*XcLbwZ4O@Kb#|lxmd@ApnQ?^r znYN)D69&h&&fE6*G|kd6V0tCYOd|WStseOhiUtn-k|Dwx8|shbH@5{k%^%dOHH9p~ z#KvB?U7T29p^Jzsi2%_~oKCdSak1jF5A~2v1~$(J_b-1%H}DKoTq`0L8KNo~Q1Szk z6WBQb*fdYN#3Hq!w~KEZoc(5{;HDc+(s&g z=8L45hX-V%Fc0qe4ueAT<3)|AlN6g6XV*g!J>qj}s=Bk!w)&MDg9cT|N?(X>QQYb& zHPI_rU&_0}jXDzzbK+t1^_fYnYHEOCixKXbEowQQE50*V;tzI?OxMKZ2m4VE>v4}3 zp1Wbs=(90ZX6v>VbZO(g)aUp<75cM`wfnhk)v=SZm$mlTMcwQ^&(2N*E)n%W2X*DDjvU{rtD< zvi!1tTTXi{?FkK~SLltCpX|6Vj>}BG2z_%deb_%zCY)RZ(CCd={y))A#RMyzv~qKaGTco8^`g$K0t}jzsX<({8Q8t zBM7SUjd7aW&=Xz2bNd^9FV{jUSQr6Y&JpZ?+zH?8FSAwr15T|{)c4;IKc@c=;>YkG z#E*6o%cQ>6esI}TnH zq|&Qv7!*X{aPjeQ@i8c9V3D98<8M&mJYzo#1b7gbQg9%Pf>>J^UnSI|^C;0(Zauq= z&kN*H;3LrR(NS^8Pq^RC>E1aA3J}P}06pAYx>npg1k+qbsBmAyuAex6v2DNzrzBLQ z`^QHR0c{2Xly#jrHGtEwUM_&BT?~$OU^w7!3k+BIeW0HOOo(1c7CU~u@6{*;XLUIU zEVx0;OhkDOq<{!pvHo2%f`1OVb;)${JE+m0z{_922ms%_*trnmhdXEAqCb*>{NHfF z{CO1UY7q0-evRPkguhG8ua0(tN_15K2!VcYK*DJZ6gs?pKmiURD?8tvBq3jyBv?NL z?Es&R0sc7%H5#x$0gfLFg;O=GP09*l{c;L)bPl8lz|V3ySifK)yOC?!N7H&)@kH|Z zmz#b#zreLG3!vj`Dx+}W*0zDB#cx4dvR)snXImP1L3L3@MQtz$7vTP`TsXm=`3%>N zejjcT?=(By*N^smZ9nH$vfXb8ZjNiv+aZ3QU4EK%-0Pc<9^4;oKw)8D1`rhJ#-I)2 zSl=I`k+eN$pXJuQ0z@7DT83y3BK|$ynVz0XyGQ@L*y-&bzn|=y*x>TKuxO^Aoamn` zB?SRDKrhgtKtF#bBqV-BM03d8#6+0cpB!@-g3umgh#zuQX!8)D@y{|e7wMn!)dPA! z*YEHE^w$~f7kQ55em|Zc!nUN^Kn9c-;`<+&ryar{_TZoK`yZk=A9Tl+t+lt7jFXn1 zA6p@9gz0S`K#uwLUjgu}(4LX+r%fs6XN=j?E<<3)gpWs4EkY(jV%#$a6LffJK;Y2N zDq?B}1U<(jfL{>TXWk@!#~FPYCvmJTFbde+#lJogNa&vw_!9b3O=UDvk_(2=o| zoR4ziJc?ENtT0J&AVM>iL2QD^f&_|x+ucMQ0{y!>J!vo~K!ghF+7JjU1$?t`L13R) z)Yovx{zS9#rW}tegS9=KJ-A5u{De4&ggw~r#-Ah2@-8e`+nfpc$xpA>#ORu(heJ>n!JzzABq7Yno&v!odn0^}EQO=2xT#zB;hF z0)xRz2_N*cFqb647Rr<2Sm6OI>~H|1S$K7Zh1MVK#%g(Uz4ov{EQ{Xd&f@7Ut!d#+taEe;Q4Uv- zE!q|n5Tzq?6)j3T{#+|gD)8*fw1)nLnzP@KpX!y!4bR(Y@fg zV9&_vhX7p?jS(7O1aw(i7aYlH*%f@%g+d9nMfA25 z!E|TfeQ>YGQ`=L1mY4_X@eM1zWNq&@TggAj(y)WPl!qXlVwF&#QAVzCe5G^L8&QzcBz%&}&9Qk(1M?T4at0gw5 zZ4|cw5!~7n^X_*B_rdlP255z(ngljGhi+~{agAAcR}WnkJLph-2!FQS3_z0~v4WQb<%)DR{Lg8V|;swB{38N;XUVR0Wm~slF)qIlR ztF51K15N^T%V61F2Cgal109m4b}6~lFhx3yB4=NmJb(~t_&2OXKlYdit~jt4|3MJH zD}y?Qra2HM9-P;J*-!AM)IvY`+xD?}7qY0U4Wt9cCFKUa;jripAHTqI1&qu6qI7Rp z3>6V`XBy_)rouyeTXZ!fUS*9Z8w*#Y0M>0tVz<;eu!pPIVPl4{N;+)d115A5_$|%S zN=uzq=90GNtU`7fV3C3;<$G>A;p?g&3;bSOkPN~)u0eXMVS!H@M<4r1@G6lqA$%OK zi|6H5xTu+fJ-C7mhL2xo$FZ2eA#9vTyrRKqWa5&r4i2U7;-&_PHoV}cdIt5)j& z(8#B_JnB&mVohvZ|AzAf#2BdehRtr{tf6JN!te&t7JNO@E!XrlksZr3UPP7SHm+H} z|8794Tob9@|Jc<5kZpjH{{Xf6XHc0%5|MSG z?3*;IdRorhHIIw)!3v2^vZJ}ui;kirQhjnz`Q7-=TB)bYrfc*xfMaGEibitjY_AU7 zoKm@OntPiq^J{rrbVZQ}#Crs^P+(GPNUj9wuMu{gy>>PSt|SCvSh~oJr=&+i`t9JsPE9?PRgt*kB7wxaAYW8M zPQo$%^Os8Rv0Bu)GkFRU+aM~m$Vf>GWL%-6_U6*W+4g|dIJE-kw5pR)yG1k;?2C2+ zD3R3}-S89}mk$@5_Dfq6LXMdW!l0~t)BUP`DCzdzIOZuPl&H{6{B}ym>{~GpMC}1q zqWNK>Y{U}WLMaKnq+cXtp@0=_W2ThDz4*hF+QE786ouC`a@yY=n3kBp#@?)3!iBaO zTTjx&*e%9jExSUhJH^Q7ce!P-^La`UGT@_nFAY7QEO6x_cT!Iramx0KX*7w zxc|*^^cJlL2Q;(K&CXt4aGIC~_=ZDXbaa=9`i?vJBuOnlR442-hux5%sj*xWne4vt z`hmplv~Xoqa}=gm&3M)#NigZx?(hY2!s2|Nd^kp)=8^!px0b#Kvk4xI!j5ZP$;Go( zoXwk2;K^oR>$nT{A`NAz6KrHPT<&X9`j5Uo?jFm@KD2X61LX(lsgkL(mPw}hp!?%( zbw1+!sHMj1{HC69hOKT6)jlRY4AR?01OvYr63Qr28^UTuGX#veNVNMzgG3qSj-hpm z^jy0ar|UqHyj;XZ>V!uv$YWWFxG;qAx9FTpgZlT2lIeqZwktGOT)g_3It^gwtR|!B zq*=@|p3e_7ywy|6z-@mnVo_&xO`S;Dll4xJ8x&(!go_Elfb_#7fncq^hUBq3>M}au5lJpPwuNA z2BG~qGM6rVsJPkoRLEZ$){U|N_wo-JNcRyhkDh!WQPIT9_MA(?^vM+-)7vY4E!V3E-9l1jE82kFyr|J?_5@lz&Iy1wDxDg0+1wqHN_{EiG?Hx)qEP2?= zfP;~d!QpN5@2kUAZ54q>M7`nr0+Fr4X3tbwaYHB_RR%!6-%oq#c3|Q%{NC2d4Culf zOXTfjBpxi0&jfCJ;k-`mSd!U<2aC-{Y^ABI=uf>%iG!L-h!dRJztr1Eit>^iUy>AW z4(+It4D<}@6_L7tHf(dW4eJ9W6xA9G>f|xp$7aPv0zfnwQ6m@#JHV zu{P$9@CKrU1pke1se%i{?Er~s=*iO5{zBom8b(-JS54@*1}Mzn(!9J-MLzYgcZm_X z>s68NLVL@P5a4gg8Q8!uB%ldt!Rq*87rnT9#;q(y#nAS;HNDi#k@OI#6^ZkZYR(+Qr5PST<}U;1FOlGqmY zF@H`G82JDsk57a9P4mf+Mq$V7RXpE09f_s3K3Sy;7B11_yi9%RVM{7La+YM4kJ!O^ zWKor~2}@pPhrX=S?wr&EapKk84X7SYFE78_Wx~i=Nyu&_4eJU8d zt$v^uPRg#Y>v_45QCIGA0Okl#hBw3|{RN`z9d*TY-wWd%{?3dyn|e3B8M;*Pn+za& zK57vtz3S z!)qa>Q08-N`WqlG_x|DLfZ)-xSKC<53yuR%dzQHIfHayZIMl6;_PodhmgdvjR6MPy zHw!Fc1=b|9u=X7>wC-&5q?Yo^c~!{n%mzDBV3eTi21OZ%55yInk5Y$%>xj>4{Hf-a zpMguc$92H2$=J#>&+qCRcS-~V)nh9Zn##V56&?fN=tp4gYh&St8>)lY+T@%oqa+>iH&4gA43yo z`8GK>d*E;E%Tl*Yj36hn>R^{AJkj3*qYF+iuuH%)&ndl|k>tfH0x}DZmk!W@wmDQH zU>>q{(L=E!17AZ21Pk2X-zvbHZMl*fP??0hvoHYVd~Mo3A-)(G{OcFXO>H|~pLT~v zQ2l1~O(7ll{#dX25Lh_cvE3rfu~36+bD*l8*YfQn9$#RE8PRB3PEhlN!SV9d_NPW{ z1g3T;WVOc;h$rOX07SvvRLXwck&30=A-2Kk&+`%)zQ%rIB_5kIq{>lx(H3U9^z}>+ z46{Fw5fb!t6wscS4+xExu0XXB;rv57_M$#&#Yf!?mr4T7+lEPBUGr{Jmkn}> zDKbJbY5Ma%c}|=D#c?AmszZ-m*LSZ({&>a%J*f_i2*-;wF(W%75h&J18qYFmCL?#% zV4TaarwPqXXgO1_E1F%otb)@qlAiQ|y~btJp{x7EdJ9m1JW~dnpzrK(D7lD~z^P$1 zrV?BB&N!q{U<|hx%6lhzD47t%lltD}pBEKT_wmg=gr)>r^_-$q3xFf(fZnW{p{&EY zWcP?d(ctyvrBpqsFM zgIwr-u~+W1bQ=q+2YTmyXJ^*6;{FsSIi#1qxmD@_mNXP_zRV6k$tQdgwqg63+M_Y9 z+K{c1H(zNm$OT7QAZO|eRx@eV3VVp>ei$;V+m!99yzwNQU?UF}yKb^2Rf)C4n?|1c zeD*9{HWy3$Iv`U*P$JfNR5>PSC2WZ-A*Lq$EA|vOG@NZQ+$m zFWTA2M_M^n@|=2TdAH3SOVS3<%{UxqW0ag!Vl&x8#8#X3x8ZG(_>b$#2w!)2s0B}t z=7V|o+DBJJFKVYDk=09;O92pps`ts6Rl!`h96y5Os4y3UHEsXxJxcW{ut}5qJ%g9R z`j~JN1?6qYRDqzpnuGOq-^_pkODPssUtKy9CJ zrc~SeXAVylCDv(~X?2HM5+8jT@;LA)HRzsJ^2<~ebp?#gwwEq@XkR@_B?bIj!t9fm zjZ89SEN?n_N8nV7O4hFMg5AGn=BBbkN~kN4s4k`jMw9#ePCg?|B_C^$+zrRGHCCaI zwIA3(6W#~h^MPX7ckYmuM=>DsDCBlBK5^2T^-eXfg`SZHAoz=^TH}#ogw733>aKyS zc=SQo5iaDi2sh`Jv*Ho)u%KSY4adyZEo2w+yPkP>D|X5F#LKKxP2DnLap!n9$B?O> zw1hHEDBGfQeo;)K=n)2Y;aEQ0%bO}&i)#7q-D%smtH4+1ZIq)sf{riAXBu9$j1ML0 zupl%traWQ-S)eJy&OSO2);5{Sc6G*&aN|~8eckpi$)@jsQ5NW$SUY(4Fr#RL`7TG_ z4*SB{we)>G2^Uh3sN8_n(`l{at&N5JQZ<_9GfH@Esgsz(RszTx!Yy=ydurFbQK|aP zKSq|zcg5Eo$^TM1hUM0xNo?e^TZ#De?)}?8epTU2*>Yc^4cE#2pc0S4rcCC!eVfPY zDq2U!Tki%2osk!kgfCjF^4+XX^gG$lp_tgI^wKvIshrM}3cl!zFR2TbW7NO|^ER}y zTsebLI`r+7GD52exwJi38+P^$Vl*$${$rdOgER6JLyyV#E*2fw$4`IN7{RvPB!;e< z9TJJ(4yc|x4<{wKgmU#Xs*)5e2UaCHuVZae8Jj(y&m~J}YtZj&vR7tg&-h+n{cNTW zP%Hm=y=@JDZ2LFm)^uSjz>< z84p>*r;bMO_*AWc?UCqsd$~Q2sHddhT1IVki5BNb?=1H`0i4C7sNl}a9!jd0^R$_V z^z;ZV&*L*jz3a#g_f<24cNF{IU}5VjYyEIQNNsVs5TiKQ5szG%K}iM^bc{3PWS8s{ ziIFn~C0gXR`YfJ~U?hc~r)y2xa;*iRwN)t0VsYuVHSP2pqSYg-^WpS{J)0YL*xXma) zoR=iMxtqK|nsV(oyk_B#)id(~yGZP59ib4cV?bz=;C?~_r)-0JRCO79O(CA%>WS^x zge$kO05kmrRq?zrS8Zz*iLJZUHlM82CMettRn0|5?zH38_c>}(>FX54v)4eg#=F-A zEhZICo2TWq67)1*QmM_z0}^hD!m=BL7$9&k-Z2F(f~9vv^3_aqwBJ z{83}c6^>>iRc!K=4b;B7Z z$z%UgMziG)yF{cikGtl*{-LW_s_M#Q zv!mWGa_f#Y*yU{o8z_wGP)T0w1b;PBk$_#{ceogZQjg<2piim=1dR@IzMuWlcgb=8 zIbYj|>0z;}1Z;#*?y!Gd=1vEhnglc&gjyTyBzw)<_6ucgYiJ+Y1)G$r#J)e}Y9&!* zSc^jT>n%V|_mR>z*qv6=hIy$ZVn0)wAMm|QQ;8;>JII#!iaSJLXRTZC8*Z+_GR1$~ zE%N$Y7Y4U|7bS;(n^$3P#V`?Ry6TV+6+kp;7EY0hZ6m+6T2^!1RWLNdQDOe@dqcFw zcFT2CQ)Te&z&B^|a8}^Cjr9THpDblq1q^zrN&XL@Qbv*de}nw~C$IZ|3;A)dviuk1 zV2K9CpV4dP5sdY+n41>PGL_A44cTv%IC!(II)1%I{>J)duX`3ZKwxY z)zEnR9+}+^!za++2V(-KU;;k8?Bh2Dp|bM;mm`lYN}AYU{CR-PWi0@%y}P?(_&$R} zXaeT)o0RSa5J5(#&fVGwGa+jRVD(@|m>qiP5uAn6Y-{DXZK!SS=wQxRY%9$cFGkb7@BiE8c zSV7hTtN{T^Dnd=t;n76`AlJU(2a`2VKihnh{UamLLSFb488Wj2iied0NN+=ZcJP=o zlg9=`FgGxdKF1(wd_z1e8%xx-wRUCUB3$lA-imo65s>GuX>IUoe79?0*LHEQe%RCk z_)XD%Q}k~x`H29-*gJxbOMY74lL@|xnLs)M+Sb+A-`6_;`$hxghNPiR;O{N*;`;hf zt$ic(sO(-`>D>U-x>5t3Le~Rye+k_>v)KUyX=iHtb#MQuezFtR)&Wimn9uP7gup86Fsd-gbJue!E~8BhT)b*M1*RLpRrg-+m=sX=Qwq8@BX- z`##?sd&8t4fvA_$;b-LTBa z*yPOAC%X2r4Jf0)X4NqEj1BLWM&I$4pLG_^1PtdsN%QZ$#mjGj z=9b0Y|8@HNwmHWq^v<-?J9>B4#V5U|W&OnOaR-<}NIt%r0uijV6k5M27lGVLb0%Nqj02}5uGwvJCb`>sR`wqR zz43AI{OY7w|2kMX`H+buevV%u!?wmz#U6moX`K1wkb1geKr80M^eScu#EZ^E)n4-% zb8$Mua`2s^wVO^?3E8X>HaWA!#HDvWVS+CNEz6*`>CwpQn`6OaCKiwt_6Z(1I6C6@ zhdD!-x-3PhK+O7@qm(-Hq-3(Y4^lth{Zr+Wm}Tjq*A|Hg)R-jd>Kb}U;FIL`O6b&+ zlEJ_23dJDRv_Kv9f+2?OxP*r!Zh6jAOtxBCjx|wvJG9ezHM?bl?m`KdcOoo2M>D}* zrh?j*F7HTW?j;a>qpYHCr*}*L=havkw)@)NM zl451^Ym2{7HTx(oSF{9Y@kCmna-yROdl?qi-i614@$*gqnE51QwL%9Pb47XQV1<8F z&_MPIn$rQ}yF7=` z+DiLM#Kxe>J*mwDg>w)t3iBYl`Bxt@egtS_3|(p(Z>f;-47!XFPCf5xnW+;FBC zjj4xj(?kHtO5Xj+telhF;P@R2BsS0|s?!472VaCF%DJ>y`mf`#358Wxgj`K@!ERL)5Pi?V_fogDkHB zL&(p$Ms7@UsRQ&IX$(P&iMLbOPzlYKpZ|VEYl_11FJ~oE&Gxl!Vg~4rY(~VE-T*Dr zD$(Zq7K0V^x`(^F?NyTjwO&zGJ7nXHc*O)*6Zk)aJliYIVR&pYt-P6lmNuiFg-F7@u@X|6WTN`{Qu4QJ89ZF*G zFqvNW0bX$@8wqF<%E1mODWEvG&RgoIdtVRUVYkP>k^#e}9g%c!c}_Bqu-5te0)yK{ ze5A~bz9H+<*zt8PlP7Twx~v^nd7K1!tFGK{b501*40aWhbal&_lG8fUkmSR0#ft zv8E#?-V>~nOE9KTj%89+A9c3(;UUfHH+S|2QFrl;8*&J>@Q7$$3C1b2D`N5MlylxBRgmtlHwn4(nZ zMRB1f`-h@R-h?X6#2~H9FpI$&`@##I!uv*Hcu(wJ=x(~E(INPq5NC{VK;-*cg%xv? zJ%GmhQh!@x3Gur3P`}6X%kE%s=G^bDU~9JfT=71bzXK7jXS+Gk#TqPNk(ABNcaxUJ z6C=5m0aFFPdsgQAU;-$9U-ugn(*gviOHXD#A(-OY4hc)o^1M2&Q3A5Ue&Dk#evgCK zEH#sYRwuz__v30CIlvV*U8M{o4o z!;k}Yr;y}lW#<_wR;g^gkQtrPngj+CV<=W4R^7%(Le(*7{E9;ac$S3O5rx$B7jz|_ zSeZ)SE8&DRvvfk|Fq1?%e)K+jKZ3Pt-=w>D&;%sTmn-Dv8{^BrOx@tiHObHt9?P8| z%6EBV|NfOkiBl51l2gh+*T>%!jhlij)F8uttBikgVQZyOspp z8$F^_b#EU77)0F88T89)Zd;50l|@87$5Snr){bc;j)wDKgy-wT9gpG-D=nXn zRA*P?1SKQ5fDizr<^CfVf=Z7c_W-+5_DQLc397TIjpE}SsGkvgxpy)`~N_VN0H||gu;2x@+70y6|YGDWLX+JZE9~Ryjyc7CU|nBSMBOIS31+P?C4wcICN)^}Sq{hEL}hrn zA3yC{ZophRp`XBvL4w@cZWl`6CAKgMs&M%mwVRNNAzp0CGpw0hnwN>~kL#CUmblG; zbTK>rwMha54UPL=QxU#nbLv73P83cU$JgneM)Ocq8>zAA${E&ma{YaW`b(*L0k4!N zS-t#fI9oVE|NXK{=({xC-=+0#VJd_^Iu9`Yox!%XVlc(m;l+!;DNl;V7y<1{S(U0& zVOZf_eH~*RjbL0x7Hi;soPi80RjoDDeNb)C7!MaKx6(aC*#=`Cy85ruCM-pbVs2(! zKBv~+Z~aGuS{*E$g^voQ{OJLD%_?gwOT}sak0L19NzzPEVgaBt#5j^Y3CAUmzBJ^I zg6BwbCL~-UT6o}FM&=0c+!nhRDVclqCFR!}#Z0bbcDuU>9nTFukNmy)s_}1xZ0D8i z4Hvz*$l?6?8bW1fUTcVg(2|{HWi|`iL`)-qnh#R_{BpIX>mJh)R8xuiF6?9v*GBtX zqa|ImBsfYGd&cbw#|Nvk-kLnbhfTNYX*%x-%GbPc z37jbFTgJQ>^{mcVhSjY1G-2GPG{ezHd?va+bxMM6y}J0_YV@qC8X_pjD} zwJm{2V-}^$Lbo|_uPHp1wbgqCV`} z@Z9%D|CXR8>cvDgLnko@89KS-ur`>N?O4#Lr7q8={uL}7N0(MOKowc)F71Q-HknA1 zJVrswP}*K3W7xL9pTuzKo~tmKOR|=Ed{7d34l!W!7jqvJBW#h(zP!0%J3lmN|A|YE zam+dDmtRR@fQVi^SVPLqSODhNU>tT#ulsI)Aw^7~9wKQSBfj{P*8KztIZi;5W>kDN zwvij6{y4Wk6)yrOA6Ug@&K+&0J3SDe=tskxd_X}T$_P1b~5Uyo=wGN0^kPxmDeD3zX6wbyHJj2jPR!(WdP z3>pq3o!6j-rtiK7P+qSlL%CAYwmq?^fVV1Z7tz1BLkxkj8ZG%oPy8&+&BRvNxis+j z;=PSSUxS)O9RycK?c`b^NK6$$`k@8!>8s_f85BasBbnPZgWp58vW++eEEYiQ71onS zeMM98kc+LJxn-=aQi96_Qz;rSB;4CJ!m@E#B$0x2_5OOCFB7Fmo5AW1=u!iuFJl)d zy|FDdS;aG^cG!*N^2qU}@lWbp2QN+!PPIUEPOsI4%{bl<2k8(SWXdZdUQOntV;rUT z4yF2)NiWQd21c3Y1}BAT=o!I_sWi*m22b=vtAx-+{+M|8ODS z{;=g*Uxo{O-trHqU7y6r?cgcK!EIxXKf;$LTmk%<%u7I4n!QLgckmo( z_}L_%500@^pJi>rQwVKkPEB9;_U=n5Ld4##x*n^kfUo*Ad`TnvAxPGIaTZU>GV)u0 z*8%J1V~tSO!QVy}nF94aiD$#|kTdN%~X5*TP;-TxTzAtvY7^N@yV`Rs7MGshK~E zmnrYL5QK!Y>(#i(>?ummDt$bcJF5FM{wjY}#9FLK?^^mtS&Tni?CIWj&AnJZB9aKG zClzRjss1_XMN&^=O-MO1!9L8^S$Ql!bGomi=-+D|8$P>ftbi@vcj((-^c!t2%xLmQ z)B08le*ZZyNLt&T?oWdVZ2JmZ+FVENtoG~vV{-tVfhQJKb zHwvu-O3AHZU?^!}b@AGfNq@S#3HCHnC+E!1?BwGXy=pRrooipX@ev2~{D4s^4U&p2 zm;g?nwe13j_un1Yrnk1Ln{>vQIYWzQ#YS|LRWEO{p5_S7^5yo<(L-06mZ-?5IE2dw zcVB32+_=908`Z|kDpUCvc1)ZKa~sqp_nE9p2uZhVT%?W2<(L!d5%dZh2lXsdgA~Ms?s<<$ zgu=7a4fqPZSlr1;>H^l8(zTIfq6t5Z&|B|;NlRE4y(CjuNcORZfcjRGOc@OcHEkG@ z0R-=m2m96DY-l`r%^`oCXPw4plb&x=yw_k_T-ZGFRO0ZHl+_}*VvJ*F2CwOx`kaId z-t74`s@0-}CurU%6DCoO#$#zwox((h8CY5ojG6*rQ+DUGZ2C8wqgPX`VlOt`@jJsk z7L?u?4%BgBv?&Hs*ZJfk#4gK?Xx~%P-ej}kul$}AVUj8Wh9WM^OV966YJy7>3;*;$ zSmg80X0NG^e@C*oqG#a}DQh!WiD=OQ;^ax+-Zm+p*$JCf(mV%^Wwedf-6CDe!+3zgIW{dRL7T^+fKh z_Thk34IcA;onXlY1AhE5(=K$PfD$IS9FEcCe1gkFqY%ZjUY>%Gq|qXr5Dj5u={*SZiZZ`^QJjgS=3aB?}xmi>iDJ_ERkke4%v9&a?{>`uc9zlzK zO(c189wk!Wf%){L5GfKG?J&6Uk9JL;ssTfsE9X$=-GM88#50wG6W~=WXRP(i9ZWdf zOlE!lcwIN^+TKB0H-IRWJY8R3>`}ix>N~g;t@0M(4gD6cv_jgRX{lvV&~kiWdfI7a zO(Ly(AkB$nk>cHWyb%v>qU>!$E9AVfcaCxhj>Y=XTJ~F%ZoJ0O>tTJ#Kfa|btO$Em zZqqo*!ZC*UjBp9Hr-79t$oHawy~d?(>Opa4<+ZwogTP<<{av`@ZNzQX7%Uh=j>vc2 z^T6TUxW23k1MVf)=_6&25t_(q2Gf1>hIg#^H(X#EY$kYZ5z~gjiU&??CEmQiDqKGc z!w4*DTGs~D6Ia6S4>MDP|JF5b-*rE`pn2T2s$(}ZV2p&rM^YW(rp3~2kD^>D1*Wnj z%vc?dQ1f$5-&&;BwGe!`fxaaf>V}=%2^kWG`W`)dmAt2F#Zyn>Z+tGXiJZ^WBvO>Q z32^B!!OCdJ97%5eK_FTIp_MBR&D&ezrcEb?!<6UKVDMvezao+c89(GHOz?IZ?5z?W z@-6zj>|<3qn8OIH|F9^BPS6)%xS-aSz_s=G6L{GC@6`ZO>%aQno_HP?IGYld9y^qz z{?M+-g7%vNtDR{#i(UE(9#{IVB=vL{2I()dRNulqYzj?aTi(f@=+zgkK@Ga9cDg6?NCr$_`-p$>m;26<<-u^@6 zI9Io}icS%Xhq%Q5MBJw#@SD08?bHh}t+XQ89IF;A-u zwIJ-CvoRN}ymhce{+O%trDOaK?+tABb0h0!RfL3T=g!O%LQ|$0!{Dy$dB?g%t4BHK z4kmJiVkq+e`Jif>54C?t@Phha3$W+D`$ERHGV%?BeXZk}JU}qNxrlTeI^yb;K~vpB z+UXZtIuW&csppr__RJnn$nEk1Bwqzf9xZ|-Uxpy-&wxwXT9$i3G31P9s~lJ`7DVHl zml`m<^2QhU2QuD_!tXqjXo&bXGTnc3SSw1vx3sDdA+PKz!T-+vEP(N74cPNYrpt87 z)g4iNIz4~zZ(4n?i0sTE_!>nVNSASTIGyB(X@-$LCDVa>ptMu*FRMiQ12t@w(M6!8 z0cmXl;Coy&bAMf7ycG4_NuF7AxY=3M!S1`s#CwqqL4F*#wZ}(#f=3cJ&RDqDu&sw? zWHEZJy8#$w&Lk_BbEg`~(EeY_`7?k{eD9&%T3Hwo_X+3k)Wgb{<{P;KVA{J=;#TaU zXt7^8>YsTg2Y@^QZ6GarC~J+4X09y{i8FfaJRRC%BWNeJOd3PCIS*W-82EMpnwJf6*bN5y5tb zLsdC-iNzNy2;B0J62a_2iY=Tf`DA+TQGBd7Jtdq)`Y>=DkbPoc3 zzGg{G@UH9E22pvdD5eJK35kLjah2m8a)`iQ|JKadwEqw8-XTboXkE80+qP{Rt8Cl0 zZQHhO+qP?!ZQHK9PUpVZFZPQ#?bpoAMt0_ioRR ziE;%I#jSrYsYfd)?hLa$p&*>&?_&SFx%6Mmk@X2 z6aS>^Hn}Bdm12EeVI_1~71!&A^EzLn`1UNvZ&8)6CtIE?>2~jMEv3H)5t#o#EdwY5 ze5@j^yCc*OPK5gU#0j`iR%!c;!6o-b43RNaI)itfDq_WrKuDWOifu9aH~ru-cB{eI zr6#u*n*zr8u!W*}V%!j%g%A2}%{bNvq4%0t73G&ng6a6=@ZJ}8Gq@p)zh}{><7P4Z z_Y-;f!LZ(jq*#P{iN7&U&>|vP7Cm~v;m;LKoG3fDNALXZkcJ6EXQN<-c@`0=`P|ku zNYLTtAU*>V5lk)U*pB2B9$+=pz^c|BIU@qo`mvwd_8)Ege1rvE!dkW?K-~1izD{Wy+4)A&$>vj#!b^P9W?$LdR(wy zdzWRp(+j)O=+Y6gX^DmJ>bamHsK9QzN|BUX&6PROOvl;xp5rCv_xo_;D>N#l1j|9T zM&7hu)&XV47K^BLAv1YAf?t4D-Z-V_V&Oe2V7~XY&PGiTQi1lih+BOsXnuEr>Q40t zCTJ3z0V7EegMyO8$}u6}urKQLv!_z{k;Po|C{3ekA2z~wKC{=ZfN|in;Ypr-IE+I6 zDH))o!c^?pxh=&7j|$|RPzBHX`0d`=1&G_C(ZKe$%}Ei8?Jb@9qz!W=Bx z;EPQ&Wz2oXZ})qw)HQg2#1heN@qDPRW+~u}>wazEw6xkycGhG6#z{s`Np9`0F*}exaNb zsk9z@%OH`=lumhZ1tCnrLAMM$Q96Khf7S3T-oLw#A|cfC zv>SKkVi9;4@f;Z=FX^3Fmcj`cXgot6Dq%z*@GB6T_LJluUZ0nU8#^i@hr|qFE1;U& zgqHiJ$EmN?u=rcdG=c(f6Cd?(cvnpj$Li%`wIxBPziEB#Wx-k)Vz1V)Fm1?lpJX=g z?J(7E#8mj-U|v-NSgxNnP1(PB4OD3nA&p>tOc&!wHr9AS4vwUcJl0bJ`D%V{{vzK+|_%^gv{Xo=#=kk1^fG zO}?Z3bgYL-Q)W1mN1BZ??)Pbu(7-<04KG&5KR|sKpj2I<0JDiE$oPkDHSAT=xLm>> z+o(d!2EY!cXcGE(5K27Xrh>+{zYa0;$qQVNhdlh|Z5f8g-wjxT4PO)}tD(#yb0{P* z&{jBDIY;Hsw~G%NFBnqe-S_C9=VDb|Ma1>%2>>n2T`;&%vmh(mY4 zY#nhCmi`jMHhC^pcManJm@eSg^*5G9247RBh0I-4i9D}>zqB~x{7Jc<{r zE`e7LELZdb^m$z?xQey5w7SL*3=thK3#%_Yoqkf}^~C@64{1&Eahs5?lg%}}AmZ9s zuTYs`j=uiq$xHMWmro47I6|kiA9Bt^dzC95&F!)qtxGi~x{RSv65Vrpn}~3b6avo5$84;NEJ9kyK%B62=H)VWu*?wh0MtILUX* z+pD;MO8r?Bl*l0XueLh zMxvFEzt}mqW0D**S%JlA$z+hlj|q&-C*`<8qIt%e1;S-ZA=q<9M@-SJd4Nxm*;vn- zR;NXTELyCqjN+)0FAi!)d*$4A2z`3eyz&RJ`!Asbz#JP)go2#-`JaZnrU`?*sl9@j z?@v?u^RSCzdE}Z8I0}hE7j8ZDWMwXLv*t3&6Z2?Ga{w7`r&wPp3gSHCdH;)*fMG1; z)e+u*jRXhj;1M0_f#+5`PnYPOM|`m%comeV_39PZvqjdOcvivIiO87`di9dZDw$xo z_V_t`z&Y)RtgSK(IQQ^$Y`m)jmZS{a1{kYz{@9q`>5!Y{^B z010=XfD@zA=1wv#YuvH+9&{IY5$fV>`2rfuZr08May#W~n9H{$M6%H6lwkdD}E!5ZUM?l}mz`3CA)X`u3L&X61T^I{s7>=yL0xK02`vFvoQjdBbqj z@F7p4Rf|kZ)ksf>);iX#>PBo6ynku19In$EZ(|G06_HR(haB+nX<1>kJU~8l+`^<^ z2i3*Fc0-uLs-9aYys8^xVY-`jl+M=TuDHcVB&+>TX{yeKKi)vRyHBgR1(krhBO zDd$y9c@Gyd_JJLYxYSjPRU9AX?-|xn*OTm({_PErj z_bw!fnq6mcnCEYlMbJ;bVHT z&I|*)@_cx|Y$uc*N{!q(_jjh)s)F6*IDuM`oy&Wduf7Z`!9I)Pg_Ux6+=Qcn28Pv- zzBgt{lI&fC%bVzZI4ql=3PkPB{}yRX`PEi3HLQ3J0+j5zOZPe-QUCtx=Tq*Y7Qln5 zA~;qI9r<-eJYTad$z6=2&Nd-qaGG3w?ccun(;#d}Yi|vgyvv}A5^Wj_0A4H>$ zkfx=EL@HhUemCReq9r1&m8!iLIi}2&o_%PTzE+E$%6P`xI0T-KhQ5MzQPW)(S>Hf(Q;!?(I_UUe$6{FBUJ58{jGCwZXW7R%uT`}rckJ5Y zY|eCJi4+qS`sN<6+YYJr zk8(pfmFyCh*+KCA$av9qv#{d}sg^MkDEDmnk_OW!Qka! z!5ayzYmz4f4!&dzuX|E!5=kBxK$XebfN(vVQMbfJTS*9kI}6ODC@3PJAn6Ih z&r6bmyw3WG;}!|J!1Q_{ORP+EAlipag=YG#u!twfqNvfbGNm1zYQkaU*nR{$_WB*! zWjOUj@RrCnilIV%T?&vVdBKo@AO*Sry_Ph&Mt#g-`1uQ=?-pH3?a~0>tNPuMgVk!= z8XYSYI_>$_sbHi`3qU3@SZ>ogOH(5%$`3l>Pezt)wP%FTq5i}J1eX)=1{{odhbU<=;OR`*0EOXTs+OBYcEr`Qz3g9QQq zd*p;!7&9AE9gKe_fJ)3#vQco1UPKaZmRph)1!w;`1Vqst4t?Ijk^9^ot9Xywe$VP0 zi|^2W(DEgfVd)V?{ZUsrbcmoTDt}U>I#;Ca?`khEh8(g%l|{vhHJuYagcmt04{uzL z#P+V!4iL@^TY6h78CyZjbA!0rs+!Jp)=@U=5W0;WzH;r{KNQFl#^?!ts3 zofCg!h7%Zv4XF)F2V-AOC=uA!9}IgY{k3(aCQpqtiqi&Hqt)0;&WhqO(Kp_DHE7*v z9T*0wZknXwtc;^Etnk?+$DFZ-LN^w#3?Z+zZvY|S%a2P4=uD<}yzlR4IHa*^ed0VY zQboO}8ud0(i0Yn40p7&<1pCTQ!eHk8s7||t6vYBBBz^3~IhKBeGhS6K$qx*>arWNx z^L>nl+)2Vq)QMaiO3r{TQkvz$ij~Kwp|0F%($g=qKVr0V1j~Q{g_ykLo!Cz5IqB4M zj@^FPiC4N?9wTQ}`EjuWW8%McPueGJ!P$a&zmo&o_B$PpT-Atgit%B9zp_-SP z3Na&Ku=17xh1-fj`}Y5}>pEH!fp}~+{S6eM$Tl(Sxb!$bEPJ0i3A!2`KNknhS>0gW zG77#;l+F1LK#xolR*Ej?6xu^DGiaFk zfE~rtzzDAF&D~Vy*DJ~Iq9A?fJt>30iYe8xgz1PXEo>cpq9;?@Ft&C%b>-%gwe~Gw zIziP{-wu`;mUxX}k;57Q3-$U_ZYECmu0od`Y->duUV`LsxD0bgA*~?WM9V6?7XLVG z3@&HsqO-^VgA_5MPvkI3$ zUsR%b`o#-Zg( z-q$SF6TbPgrAN#b)&D%ELh~rJ5*$%y3=|6+hLK(HX@fHK!;= zCJfQ@#`?ubkLeyrCS`kSuP|D6StQg^$f%^R6$DHeOf!BhgM`K<@Adb%Ra>brSYc}Q zz93KT+)peO%Mll?XTu4PtDblYv2ttFUxqCoXGKbd>$4%%+jn2!f=Yts&9snDk{gCJ+D3mo^eSalS6 z2`@EW2RZqKge9lLa92ZnhrK@^i=RGz_AQV!tfk=rB}y_qFN=Q!wc4RTg?J1)GTF!2 z0*0tW%UVL*_GB6xpKEf$uK%!M&4#&lel=eYOI2V@;x2#i9q^H;Gh&ZTRuP(=aeS-f z+K*o`yWVukMKmg2ws;lP;b2Vl&WK?u1ycHe2N;<4Y1VG@IYLu)u%pW07wPhP$DK3O z^!`1_d0{JmU@|5MF-OTr0%CM~B`~}Fsa3)ErOR&vW^HifW<{D*U-yD!%gbbHjv(J|Ceh-isQpak@3USRzAZ>k}~_54xSJ6yj9dz z#iyvtd0h9GE?g1tZ1-TJrMYqSM>7^TbaMcYj4dUK#nlot23T?%YTzG1lib5qWNnSn z0`qdRO{JYM;HJxhlp7VdHncG^aFEsu@16QVpbOW@K+dX}>4)d&e9UZfZ{Y0)s7BW- zGYw5?NmX>b&$YnR0Xo7_Mm9wrtp~E z0DCv;Giv(|$(bb=8w}6o#YKgS{0vLJ!!j4CbY+8R5(UEqc74j;T_?{yB_M84c8xJI zhC|$z)W`L#Wy!6Hq$N$Bd(W?|%A9xvl-lFjCslJ@#NPf^v^Vm1C{ZtRn1gBy_#(#S zx=hNbMpv06fx1)S8Ts~zm+|{`cheWJqhyRp5@tLENnLa6iy6jYX_uDDLP~Sh%vv&* zt7+M)yt#i1jUZl2r&c@AZxZb1wC>M3lCBPNMcqhPZyjhdki9>uyefVm9 z^hVMnBEGlc~0A$q@QzWGVxk$7Dfq zxD#KzzH$iCGKl%DEHCQSjBxh8WkOQT9Q*OPl>$rRJ_6$Sh^*Ma#4+E==pQQ9YyBoj zQe2pUB-D&I@JG1}!vwq1S*SMtO24Qc*0jN_6kgm+Z%gSDhx?uNXm!AFrV0W77QU)F z5ATm>BmP0ap(*3)Mk^9QK=Lt-M)i2-GEwlIIa(J4qY9goa1fhHLvm$ zpj&?PO|iPY!iw!ZI?q>`3#t~Wq`UY-W{gLzy|9vF$oG_<$*w9Nd?$=i#N5V6#QpBAk{3Q|Z~oFyyF^K>%g zFe42)_bL;ne3S#P+*PPzQ+luk*RXG?R~_)wG=Q{G83Ec0XD?%)H4GV;%A!&j1m=H8En*QaO3D>B||0dO7#w#a&< zRCWYYV|7|yw{IFmZN>!ZNXkbGKR*VgOKLs95e8}9Yh~E|$H7NigsH(p5P5}$$94W3 zDLPBHCmiE7{Zn*y@sO%?0uHjb;_DR}?j$!Xxos-!gSlk~?v+(5?Zf&j$n`0(pTIq0(nb?g{6y z9X)f+ILKQg(a*H(3W4`~H7_R(mgKfx;yAfqLl&h1il)z4IoljL(`1=w%z3JwqD}h& zrY&!`ji(|Ptif79F5kjS^oN-%S{cNqpfXK!jb+IvoO`v8-PtZX9(*||$PO)RV^D?~ z1X-Od^D@G?1GW*Y_m>i~;BK|zYl$O{5&}58MjE{j!G0TSbSuArgkJYy|0SBn^1qIz zF*E$%{>gv-Ii~;T>Oa~j85tS=GnnT3&p&s)!bZ4FNXY68FBBHOC=O3384k?ckHE~p z%mPu~KM#wPh_tvUP6e^JO&~569!Nlm|M8LQwDb6D<+GdFVw(H8vAXjbYsGKn&IubM zl2Ra!oeqW+fQSknhyvg_I_Y^p002P(2@*ge?%^S`hhet1%Rza|iva~IMuhScR1g^` zz@V0n1@agbE)3ATodbA~2;hJT>YxFN06_oID-amiJ_VdUU z9UK5qY@-0z`mgpO7X8Hf0u5aKa_Uav0=)of{-`}5gZumX{1ec*lrT^r?O%P{ecU2O zWt>@TGk24Jl^ywEswl_n0_bu2mnToBXp@qT03N7yg1!ARM-zhlkjMPm_e9kT0U7^J zzEsNinO)tB0Rj2F2qE0fseg=9?+*e1{jPM7ienjjJ(K?Useam}{pOAPiM;E%{On>{ zv~zR+wnBf2y!l-rptm=@>cP0UnPFV?L7LY0y{N#0g>-d&wLQ~;Iko^zLU^6}wZO-& z_czLS5{`ma##4x0z zPp3MYB3XDnW*99uog7MWm4&}}!Em6fVJe;#!F#80r^d4@#8t$W;;nX{BNWoom7%2YcP)vC1J(5P8B|+weC?Z2Tb2Gy?aUx^?5Ze4g zN8JiOjJ;mvv!xDNRxrVLxN9H^a(VBq>|2GZ5tr9T=As5 zpT0>IqNZ76T7ug>W?5^9Cgh4%0xQ*;dOqWu2VoJGcR)oSE8Q6Qb2Z5?2pXJ47`2PLn0@5#ECU@V z-S*B|JFpskBZag4vX6c0MbMP<%&tKmjBKL1MQc(v3!<8tj+%uf8GCyScP-kWhH?7uoNL5-|Kz zZ0AOxBjDkw;L6St9iS0bkCeiR(T1JH<*jN)nI_EV0p6FqBKuwz`mh0>x40|=;iQ9d z1Wz(*{Psy{V@vw=DA{pdx9v$+Ge1U~^bO=eHCcy>8N!UO<;|N29kR-S(rUM~E@B|% zq*o>3PPyM_$2uKy40wuF_9zW?FAcb3Lv|BMXuuIrf@~+l;x7A~O(Ih>WKt)Yn?nsj zERvj>Wrr?Rhac#A)`Y~k0CD#KoC$^Gg}cxgZdk?Cov8M-I4|{5drqSvxmb5lhv?h+ zwP=@r1mm)JVH~ZP>_vnFtX$u>fR4dV>1>IJcIOUK-oOm&RV0I7f6mE-PpH(nc>xA1 zBoN~gK}2laT^poT>?NUGUtGtIJwb&}`s=uX>E0=N0hH?`eBRT7lTf>kgj>~mL)&dl z*Dc=wUoYW2ZgKWq=HbzR;EHt)n4lOuyW@1TuU8r>Ii$_NOZiS zTXSb@j06l%-MmcrtRRsFEGK$dE-GPe$HB3=HBTUIuEgY1MvGJNF_r3*9TZowtwdBm zBhEI_@(}nQo#7kIob3*36Z8hq=K~a~Ly-}w5ZpTkK*z>XfgH(LJTdVqmykzrvGLpo zgeeQ}5qnY15V0yRTb1H$(=qvz=)T@@$w9Ufyb<~B(_~yFnz)FJ4F53fVx#lxny{AW zCW#B~GLHtb#EV(d2%-CCn@r$1%MLCK_%Lt0HTZy~GiInV{DA;To54s>K4^rY|+xBDL6~wqC zDvl99X6+6TiejRpjGQV&yT`(Q3aeDJn^PTcfDPeOTq#ZlEHsxlt&SCcGBxd%N_nzP z58Ud~SRFre;$nqL;wzi&qBw4^WXtmZETJ^z1&Pblj3=NVE*xcTe)4(h0GCmOwm7*K zgzLA>WDRfZ0p_5zcSpV#Lmu+ZwIxzl2Jf^%aMfd|>OSl{$gpomQL5AXO9Gwz9kM^R}&6njgR>o5=am-Rbq`JeU^{Z!opr_l&brM;xlN1o%6y?zuomtjqprGQhXT1*PF6-qkv zT9m}aW29?vcTQSdAbj2ikrM}-Jfj{A&*l&7Dzu=ygeW>azBuY<8LHc1fr}ki>>oy1 zl~Dt^##{PW|kAKc0C8c}4ol%LS zj7E7ng}7wIXv$h0GW)&|N*IXl>ydO|e+`?*CDVJPbO=we@;SI^IklYbx{cNI=|*+P zIT7)WItRBsOFzCY>;9}kX!04kDi7oP_;;ddqz%GRuN074@GX??GZR1x+VwO_%|TuZ z8;Yg*BMCu1vB$_J2s_&Kg?)*uOWdjVO)ximtl_SDW#>61qS1T%pQEz%s#5v;BsOFMQj?lhV zqI3PdH-Jn7tpEchQ?c8@yjDjJ;Y(^FZ+*kpX9yBmp}hE$N^^pR)pfq|XJ;z#%cw+) z5*az1#InD@9V@)5CvC!c$-Yr6T{n+f>PN zC`a@j-cVEwSfMPb)ph1^BXjIZ&CjV~;yrZslrLC^o6aCaA14J~X|j(fHnF zeb5Zmh{P4S29qBq@}s}bS?xiYY)e}5pYt?`@s8~0$wTHGdm4(}g%zaT2Z=)J7ud!^ z1unM_+{FZ%CYTf%u%50T9-GGoIlDdK7uG5dLq(Xvr}ZOr>rM{CK(UHW?|_o5>U}K+ zGKav%we~F+vZ#_FPCi@!09=**Q)E1_MCItr8cBDl=0BrFYeuU2pG>jxk`*XNUUL}Q4Isr;3sB6$&Pc)a zYW+I9epGvXRijoMADRQ}89Ov*BNLc}Vk;06<$Q0xD8deqHs7s*50xvzp$K@sAM_u> zt2jz`gr2+HJjymQt;-8SHefZkp2Hm%7h+S%V0oxMVP0Y&p!QQT?*`V~YC#pVf^N?9 zauM?+yj7GS8>X7EPU+8vIgneHf*bKEU?Og%5o;+zFanfCZN7KhjBWZ*xI8mD{WA0A zMV5y|zHARn(?yTZ$rBfvYwoxdD}S6D7--HvyplZB2nD;?x+`H<<@tSzKBW=yBU+9z z?UIZns(U5%V|LUm81}QC;L~dSHYpAfeI5HJ6i$X4q{Q%071eqWtFZaqr#{magqIY6 z?6wL*{Ks7iT|uagD&xtk5F4L!^f*yFbDY)r`I@txp~4uhSxvI!N)A{q{2t^0VK=;` z--Ye=Gu|8CbboA<&`!METTCdH(W!N7ny>0>Vr%|Y=wr&g*Z80=?wYU;{{^wF*pY6= zcY?Y(0;!*yEuP{U8CZ{L9a^cS_btPN+@f*AnRmIUy~MiV;-7~-cMMWiPA`vAavTkr-?ou>XLmSlZrO=EO^En z?y2@P^x$SBc9Nd-GF^5U_{lVjKO{eR@hb4VFb(Un&yBcCW_;+DMjj3nEHQVIGQhV& z*ih0kF#F0*Y4t7FWa73(&ZU|%isH?+|1sashJjYbiQR$J>Pv5Vb*l9tDu0cZHX6K} zRVKBXkJk3ES)*Rb#I&(D()m-SE*#0md=Hg;L4ABZNzhb+v=bZ&&nQS$x_D^0qx%$9 zay%Pk2R9biV-B|+f^pIXs!vF14n@_WqhgT>1?vUch_||$1H*4;#Y;=M7{j|2pDd|Y z&9>H8x51Ye)iGL*#>OH)!aspwT-?#E>qMLTatsZ$r3K}hy#xQ}jtQFQ7!GLVr>;fS zh}CwqJ!RQ=)B*w@GtqgViN1v`{|AWrx3&#=E*Ay78j?h~RnjgPi>idm{OqVnh@wTK zXMD1OLzeO{wr_=V@RH9rZ*+i%$k19vPh@XC`8#!x^K+)9P(qb*$kmn=qM<L~Z_G zopaD1)IMMF>=1nTIk|y>tb|1C-lm~ew@}fq>b+j%2Vn?d%y^wPVNO6Z?~+WAP!scP z`t+;s`p0vn<~#ya#N<~pp=T8?5O3Hj6;o0$`%L9#6Jk$*%10xN_0Q$n(^Oa5I$hb` z#n0ok>Eg^DFXBB*Zr@y>knfZ;#avnurG>$Gn^t;=DE1IR-`2N#E=g-fj zoIeU3!%L`)i46j=`5^XFxoL!AfTWMkuz@0}`e9-kg=U0RMlO`I3CiCeS>QQhg+>v2 z)n}!BDQ##@9mB_k9z5Ge|43v#nWXBvIGsP}~ z*%y>HU^HLY?g0FMC6Oci$Y(x|_uL^EzsCs{{Egh={$8FW0;%M*dJy7-5+yqclE0qD z^3`1Ml_gx3t*rW%Dq8tbw`7oe63=0A+6rE>8MCVOqz;PDi!Ik#4UP={W;@mzy4rqC zygm?!oiG#}Qlw=?ksHUyL@ERhI#9ZG7Jt?NeYQ3@irfM~B$Vl~otD||uU9>CL5)JtJB>$JR+rN`!^RFqR{j#+l+M`d zrrDdJ{C=XA+8$0?OATdHcl~@Da`e7&)G^Wyq30CUvfv^mb0p?3J9Y%H!r={_RBOU==aHje}wG1(66{bd~xU&7X&@a{BfDjBCT+$BS!ontj6|3ah&tAzDN*~&I*e# zS%@9`0}eM}8J}%tStXkX#Mf%fCbiIsNf^fd-H~jmY^fMyIn_(^jb`SqDxgL796|jz zsHGwZSCHH-!gcCJmAiu=(lZ+!8;l1&GhDRu5>QtH|3OIS9Hdc+{I0=HR{dkS6A9X; zjo%Rayn5Lq?)4h+=z2ufw)LTPx?)w!Vf)(*y?2yh2jigiypby_>7o1NV`o@sPrjjX z><@Dkj-qiKS2T8akX5fpC!K#;D4d&B;@pM(tCulNH4ekKG&2b+MybG>yB7tuqUu5X z@rSO2=IMC)=p6frnK7W=2u+tOoMMU}NHof-eQAl`orU`DQ$Y&Kp{ohjxfr?Zv10kp zo2bn?J#Geic_=+bW2s|Xa=a`MbK89{Qw`g=3OVyiksWrH$y_E?NJD#Z-Lrs%SA&?0 z@qy_lB6oaMo0>_ws4Za!d;=t(ZZf@n#$iCu#aeEjAQDrT3%_8NUQtIPP-ZR?OE4*@ zJqRBxTHbCPbNFKn)08xp&v>1Y(&N&rH=?+#25;KSpCeFo0hfKr8Y5G_gD@RdHnLru zn$uQujhh0?{1u0X z-~ryUxbSM|I^I@BzoE;MnAPJ1`|?Yc8;nWry|9=QB8#X*lJ)wDs}=isv!K($*g($? zj6_Da+s&@z#%0r30Ag$nHk({N{e>r|$Ezv>tDNy!zmRq@JnFmsbi0uDWg(RokUwh9 zyUzEaCiPBoae#6S@a zr}T%OpDkZ4AL{frjf^3FwBOs%!&yElHV0yYbOe-NtS95OvY{-G@G?aE?;|$0a^(lv zp(RAcX$}K3T|CiRiF-HNLrd$@y4?kdghnsylDK54=?Ey^-5&chpS`lDd98o-iW%W# z&W8+wu17glS`)nrPeugkk;a)Smz;ujs%kzLD-oT*?SMaS4x4&?jnM27&=da1T@~-Z z!CXwa`SQ`hz2H)8#y0sO%pjRnOheD9!_+4?MUCwiSrk)irk=R>EciUHaJy37?toSr z0l`ZOll1M(#8sTQB4Heush6|m(s?srckQN=Y(kHnA{Su9FVt2Fo%t6t33753@LBac zGHSTO{lK{!R{Yry&DVTfox8@OMFrJI|o0%5CSb1P|_X2(&|{Q*a7rnUAZ`m@d58)-3;3dO zc(V1~mZx!3esck#9UTQE2s4QxU~i1p{^6%MkEN}S9(_Q`*Jn{mb)_s8H(kSDxf z@0hQu0wjG<+Bg$KwUp=!wiC+-2&<62=v`VS4i)s~>{){9gj2SsC_?RbFDn`wYm&z` zJ)uA{6pplJ2NHVeau&+x^!XLd2DOF1 z*_iHM_|4b_8$qeal#fk6tM)Log0uwe=(D6zjjVjLNqg;yS#Uo}LqP2HhI+iEL^nlE zX8ec~vH{-}=_vRitLf}xVp`&}C@&DyBNg^?;6SEe>E$X^viyJ7iB_s83rIuW5RLMzBG88r`D&k zgMM}x`~?^p$Oru|V?);e-Pn+To#j8}bT%pjwg(I--KW&uVUpJYhTJ3pP=xD{2^`j1 zsLdPHB&FDpNGO5L-=EKKr0v#T05N%EbIdRLH@@AqVwKjerYL-lFQ&6ICA8BN-yOW5 zZf+V%+OefKxiOS7oYT2^YgU^mR(~5lol|NqD!uBDRCI9YN-))TlbSv1Eae9Hu6&Jr z<021l>h_AZOSqg>cn=8dDsS{9c4wKjjN3#`(7u_jJ1j5A$?!N&TkET*nmGqYIWb-p z*W3}>>Mk0upX+g#Km{8csXBd+dZUVX;c0aJvbmkKmp{qU=hok5WOKV-hAg}xM(J%V zHY$D1UfUM0mOdQ0aAAxY@MzGgS-dtEKKyp@V6pu!?x;Qi?&9mg zauIZa#F~M@V|m_f*knHT4gpAq{rgoh*gBT|ph zu?n-j3(Te?;9(_1UG9(%%+8YHW!=&ulVr2YkoedoKeo!}PeoU*#6hJ{O9!i{}Y$1 zR;p#oy7+Wk67fP3oR8Pbd*c7Z#hSd*`o$FKA1;<3CtIkdi9cH{1KT<)D5pskVS1y< zr8s7vV1Zhxm&)@1$_IQ7Y_s>ys04-=nmp{=-Nu#vl;1)Yn4XT?s!spz<(Gg`x2 z1jJWYNA5E@?v700fuY&)e+*=^Jr_WxO0oI0Bz@2OaD>1QJatgZs4d@K{fK7dtmkgN znCA!H>$ez|aIS@}eAaDG)StV7)g`yZtuUOA1}dY({)apCbOEY8? zn-7C}Qas5=UDBzpmJ?*eMMm~@-4y18LDL+X(~!mmkS-VahXu3VMc-j@6ykE&ORqh5 zTwSdQmZ2;@lOQ`p!;eK}3 z51s$v=98yn>y#3ePp;z0L(*)O+o{j{RcdTA*zM`#0`;`E3BJ(FdRr)rytZ=Ynb$T8 zt!itG_vmmJOmxoU4*veJ{J*s&v;Qw);bi!K!y;q1$%W8;sa78ba6Z7wPohW%16*tj z%=rW?b9Yx8JA4EgZX(nQ|Na`+WR+!;AQZi9-@(m|6Y7@d%Qx%Y=NHTTC=#i6-_9?m zlg+x@8g_6~yX|(WgCnf3pTIH#{hLHkj_t;<>sNx7mNyqZtPKK75UncZ>`YN-DP z5Cxzz$QqJA=pKN3AK)G!$W9?(ey*iQ6nH&YJz+oK{1QM)izWuZQa{rkfIKfzsirZ; z5`*?okXVJHc0pS*C0!~i6V>c|xdCCYN;l{*O=#27rubcbzCJaWCBXa;9Y8xP0J!XI z!=JN*`m=deDO%(`K=(Y-r}Qmm_)OA#fqIR<{+dR{C9t7n@SJOU+wp($*bzQumn;)RSg4lG6uty+Gh%njXN9ol&ZHaYi*P9PNp> zAl*l6YR>OEGhFh%`f>ZwIQI>yj+(I!bha~$I%BY7drH42nDiBJyYeaDw=OSz?CZb1 z(?72&FMVz6zdI(spIo2(<5$1=jlV&&Kk#Iq{K@OTgvP&NWS@PE9;v0>GvaO8wyZxL zhm@T?*xMeNi%EjFs68m|#G#Bkxy?gA{yI-$cKgrwBdc*T_x!c$7kK~W%g6Em2bf`F80b0cYXw#+9{uVd%Y#I8vVUEgsyn^dJw-jeLzQjagvm zjeO%zFHRo5+q*~U|DVRL1FDH_U4uXnq$)*FB!KjmO0OclDJ^tJAOWN#gwPRC5v3}< z69kkl2vQUR0s@K%0;1Fdf*@6jv`BmLo_pRp@A&Roch=0BJ+uE^)}Hl$-}ld&JzK!_ znQe?(Yw>N5-Sr6vjvo&IEss@5{DmL3t1>Mc*XK5C)@GmcIk`Mb#S5qByf44+a`HJl zpkI^AI71AxW>Gc*MYBu^yj)kTyX-Xj zPzr{m1X|WJj18y)0F~CM}f^LUfLWu zb1C<7rH4ks48@J%9ZD3Bu1>NU2Kd2$KJidmT`Bvt9MU@#F|mX?UB$Vqr+u4lBRP9E z9i@iX_SN5yMyZhjQjh0J4>fBqGwKR0m7*sEMfyj0U3!@-uI4+l2;({UW3>6YBr*Y; zxBUv5(^u)lDmi;fi{L$@6uhURi*GmsW=UsIsi4mK~;*1!{n-aR-@M+MBQSVaAfb#<=oGx zjL|gdBJ*06?jPUiGuy1MXdl}OR@``==z^=8{QRhu^ik9J-OabJ(+?Urs>Rz1!#R;9W=Zpk2*iMrm4KjI;tJ9 zeb#Z>A?#?BpZ<2z7Cp}h9X?#;R}t24O3-4Isc0Kp4NDkSE_1sit{=J)oi}< zLdaVN3UoFUa520sf4M4~b&^gW$TQkz@0H}&wK?~!?H52RwB6XSulcyoaLa^y zKl%3iI28NXlb=}W$9xl0RK|R%rkmH59IyKEG+T>^;g-%=TSPOOBd*{ z9J-=nZpCB}SZBDni@8w$0J#@!2dG?;gd4S$q0DiQ={dSilUyMHU>so<{dxr;G??)Ff zNp})T2E$r-67varp9gxf_;&|Nt1z z$VDY>Z%u0te9d=N062&Mj3 zvbH3xcr&EUsC%pES#d$cEMf9{5BBk)M3i3ih{E#&zVTn8HVq#xyjRP2Z>ggs4A6X~ z96!2S@{aRpuNlTltKQ0Q>bnPOU5)E1%snxXe^9cNrw}f_p#DUFB2nvGPV}UwLR9!! zurxr>Y))p~A5Zq)%cTD>4Q(M)m=IOAXbY(hSARW+9PaUewsxZgUf0@JetsvI z*m?3@yj@^G7BMYDjhYKyc63q)hmDomE zM@?wx+p*Q1Zv2>AE4HH1$NtQq0R+6~IZCz6LW8$&(A3k=dl-z8##Q*7Gc<2YM8rCW zm)%canY~Cm(D7LzxZN$@Hn~ZER^&xJi>`DqtJ2quY(KL~WIVP0yP^W?o`9|DvBa0P zNM!}Xg(U{4sp+xwL4ocV7(m~p(#(<>9MX|}aHTDFzgUyXuKP{(MeKQed3MH`L(UOP z02HCZuNAD6s4W|t@#2~bfTt@nPQDIG8L`VD^gd}W{I%u|*RM;-Hc>Y&cTkfh z7hXN^m4PW0Sx7f9KhMn zx5qOst0NMz)J37xXhRr5=q_!s-hP3T>DB z$dWj}jKe4ot3+7uW)@@a-MN>y2=@+dfQUcU4_4`X3#=)h=LV(KcOYyeTl#Ik7=L11 zueI7&^1O?!wF)BEcLdr<9xgxOyo@fkuRRuX4bRiQTq-5_s8{eTfh)4UVHl^SPhicPjW-W?83LV`SN z`>z#)fM6(vqGfeFya;GK50nS1hI}Myo75#hzMe z4s#&TY9j{u<{$b?ib)SSkx+-ON<8@^e!D!&$JCU6+^^&^e@N4rtit&7rt!BUA8h)u8nDK(!!BAP0|w3q)lNSW>~ z`tr3FQ>(F3@?GVRp27LU5FG-#SQ6JCK?*ur;=ujXeH7C@28TK0ITOOq8c!y6d+7|X zlN8kzBL3eoz6?;v-zrHKJWdE@N_PY zNq``D0oZt#T|dvZ?s)=LndwTR%uDZA2cMkas8XH6?utZ-TDHiw4{?##TMVj9Xy|wM z5<4-4Zv@Xfiz=^)IZ3`13{_q&zy5N_X*?Ml^OP^x+E7|iZOmrYK$Qi{9VK(lVT6~X z*tt71(PPQ>Ve4}-+6Alnc=of8$1omw(u7n!?H&$UQ@tX2(T6)y^kqhQ%hng;lRHFs zQnex@WDOU$M*kST*gQ3i_ij`?hs>WpXTk_8)ixJTy)0zin1C{|CAgxQIq#pz90Uq- z(vLzm&N(}AND7@J=n8Y;fXW@=Bq!H;;<`Z{r+Ps(McCWqdE%}{;ImW zo7oC=`n;@wY?7Ghcu8{pJTMP3HCEFNo-1sRF`pup=^{P{F|$-F^euMmLDj#ihIdP< zeTARgar(jD_Qgf*OJG>9TUh@(dHV1RclVM-vAc3|2j8Rx0*cuZXlzy{$7*@?C^Zzh zBT||ehr{LG9MKo$WT)8yNc@P#opFaPcgLyt=&4Okugws-YV5%9TC;DqoCzlU`cQjo zqk?7Qd_@j9fGjgTSDmy6K8fr*I25X__E}$Es^oRrl66KYyvT|dz+xrC8649qjk@7M z!CaC}!5FKk`<|70&kPI?YM&=@a<}O-%BXvrxzDT>GwBY0F;>l;R5kh*rp9Lpo7sDO zPnMXs=sqNz7ub-OZ*N$A^WerpqgF~Dl-BKp$n?x+{e$j>UHa8hwFan#r~lN;5Uk+` zRrPl4$j+tdZ}g=rLdPuiH#Zb#31(Jp9tT76_-T$~x&ZRtQNWYhHafmt8V=YRYMDW1+T%|xRQ0%G_1O$PC0gs)Lecfk)zSXjcktAiez1zozy5I}n8YMnu+TXw3i85Cr-kNa#;8x`xDi z`vFx{C}V>4^7r=f1HylG1MTYJLZA!~_?HF-$|8VvKrj$WIUInp<{lwvN?KO`D$q_v zP97tNRz$*~7&KS`i9w=3C^Q5F1%Z%AFa+fSS5r|?#JGUaAbFUAtDL-|JQ#{`LAfH~ zU^o4kZ{R;RMn3`)PYA-JF^mwf N9HX$Xwy6%|{{U$S7{&kq literal 0 HcmV?d00001 diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet index b0ffd0e..842d1c1 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet @@ -1,17 +1,15 @@ { name: 'Natural Language Codesearch Ranking (codesearchnet_adv)', - // @TODO: Add a description of the task description: 'Natural Language Codesearch Ranking (codesearchnet_adv) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'mean reciprocal rank', - 'python', - 'robustness', - 'covariate shift', + 'mean reciprocal rank', + 'python', + 'robustness', + 'covariate shift', ], authors: [ @@ -24,7 +22,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -34,18 +32,14 @@ evaluation_metrics: [ { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, }, diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet index b62ffb9..9c98587 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet @@ -1,16 +1,14 @@ { name: 'Natural Language Codesearch Ranking (codesearchnet_go)', - // @TODO: Add a description of the task description: 'Natural Language Codesearch Ranking (codesearchnet_go) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'mean reciprocal rank', - 'go', - 'cross-lingual', + 'mean reciprocal rank', + 'go', + 'cross-lingual', ], authors: [ @@ -23,7 +21,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_go/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -33,18 +31,14 @@ evaluation_metrics: [ { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, }, diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet index 764d852..53ee70d 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet @@ -1,16 +1,14 @@ { name: 'Natural Language Codesearch Ranking (codesearchnet_java)', - // @TODO: Add a description of the task description: 'Natural Language Codesearch Ranking (codesearchnet_java) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'mean reciprocal rank', - 'java', - 'cross-lingual' + 'mean reciprocal rank', + 'java', + 'cross-lingual' ], authors: [ @@ -23,7 +21,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_java/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -33,18 +31,14 @@ evaluation_metrics: [ { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, }, diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet index 5b6d930..ef89c60 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet @@ -1,16 +1,14 @@ { name: 'Natural Language Codesearch Ranking (codesearchnet_javascript)', - // @TODO: Add a description of the task description: 'Natural Language Codesearch Ranking (codesearchnet_javascript) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'mean reciprocal rank', - 'javascript', - 'cross-lingual', + 'mean reciprocal rank', + 'javascript', + 'cross-lingual', ], authors: [ @@ -23,7 +21,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_javascript/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -33,18 +31,14 @@ evaluation_metrics: [ { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, }, diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet index 068617b..fb59296 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet @@ -1,16 +1,14 @@ { name: 'Natural Language Codesearch Ranking (codesearchnet_php)', - // @TODO: Add a description of the task description: 'Natural Language Codesearch Ranking (codesearchnet_php) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'mean reciprocal rank', - 'php', - 'cross-lingual', + 'mean reciprocal rank', + 'php', + 'cross-lingual', ], authors: [ @@ -23,7 +21,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_php/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -33,18 +31,14 @@ evaluation_metrics: [ { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, }, diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet index 9e5bd78..880a7fb 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet @@ -1,16 +1,14 @@ { name: 'Natural Language Codesearch Ranking (codesearchnet_ruby)', - // @TODO: Add a description of the task description: 'Natural Language Codesearch Ranking (codesearchnet_ruby) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'mean reciprocal rank', - 'ruby', - 'cross-lingual', + 'mean reciprocal rank', + 'ruby', + 'cross-lingual', ], authors: [ @@ -23,7 +21,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_ruby/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -33,18 +31,14 @@ evaluation_metrics: [ { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, }, diff --git a/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet index a4a6d73..80355cb 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet @@ -1,14 +1,10 @@ { name: 'Natural Language Codesearch Ranking', - // @TODO: Add a description of the task - description: 'Natural Language Codesearch Ranking aims to measure the generalization capabilites of language models in code understanding using mean reciprocal ranking as an evaluation task. It includes multiple subtasks to measure three different types of generalizations', - - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'mean reciprocal ranking', + 'mean reciprocal ranking', ], authors: [ @@ -26,7 +22,6 @@ 'codesearchnet_java', 'codesearchnet_javascript', 'codesearchnet_php', - 'statcodesearch', - + 'statcodesearch', ], } \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/doc.md b/src/genbench/tasks/nl_codesearch_mrr/doc.md index a9933c9..3fc22bd 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/doc.md +++ b/src/genbench/tasks/nl_codesearch_mrr/doc.md @@ -4,14 +4,14 @@ Language models can serve as a valuable tool for software developers to increase ## Examples Given n number of code comment pairs (1 true pair and n-1 distractor pair where a comment has been matched with a random code snippet), calculate the MRR score. -true sample: {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} \ -distractor sample: {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def _resolve_entity ( mo ) : ent = mo . group ( \"entity\" ) s = mo . group ( ) if s . startswith ( '&#' ) : if s [ 2 ] in 'xX' : radix = 16 else : radix = 10 try : num = int ( ent , radix ) except ( ValueError , OverflowError ) : return u'' else : num = name2codepoint . get ( ent ) if num is None or num < 0 : # unknown entity -> ignore return u'' try : return unichr ( num ) except ValueError : return u''", "target": 0, "target_options": ["no_match", "match"]} +**true sample**: {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} \ +**distractor sample**: {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def _resolve_entity ( mo ) : ent = mo . group ( \"entity\" ) s = mo . group ( ) if s . startswith ( '&#' ) : if s [ 2 ] in 'xX' : radix = 16 else : radix = 10 try : num = int ( ent , radix ) except ( ValueError , OverflowError ) : return u'' else : num = name2codepoint . get ( ent ) if num is None or num < 0 : # unknown entity -> ignore return u'' try : return unichr ( num ) except ValueError : return u''", "target": 0, "target_options": ["no_match", "match"]} ## Data Source **CodeSearchNet** : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub \ **CodeSearchNet Adv** : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE \ **WebQuery** : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE \ -**StatCodeSearch** : R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors \ +**StatCodeSearch** : R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors During evaluation for each true code-comment pair we create n number of distractors where the comment is matched with a random code snippet. The distractor samples are sampled consistently by setting the random seed in the get_dataset_raw function @@ -34,16 +34,6 @@ TBD TBD ## Further References -@article{husain2019codesearchnet, - title={Codesearchnet challenge: Evaluating the state of semantic code search}, - author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc}, - journal={arXiv preprint arXiv:1909.09436}, - year={2019} -} \ -@article{Lu2021CodeXGLUEAM, - title={CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation}, - author={Shuai Lu and Daya Guo and Shuo Ren and Junjie Huang and Alexey Svyatkovskiy and Ambrosio Blanco and Colin Clement and Dawn Drain and Daxin Jiang and Duyu Tang and Ge Li and Lidong Zhou and Linjun Shou and Long Zhou and Michele Tufano and Ming Gong and Ming Zhou and Nan Duan and Neel Sundaresan and Shao Kun Deng and Shengyu Fu and Shujie Liu}, - journal={ArXiv}, - year={2021}, - volume={abs/2102.04664} -*} +Husain, H., Wu, H. H., Gazit, T., Allamanis, M., & Brockschmidt, M. (2019). Codesearchnet challenge: Evaluating the state of semantic code search. arXiv preprint arXiv:1909.09436. + +Lu, S., Guo, D., Ren, S., Huang, J., Svyatkovskiy, A., Blanco, A., Shujie, L. I. U. (2021, June). CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1). diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet index de5beb9..27927bb 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet @@ -1,17 +1,15 @@ { name: 'Natural Language Codesearch Ranking (statcodesearch)', - // @TODO: Add a description of the task description: 'Natural Language Codesearch Ranking (statcodesearch) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual and domain generalization', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'mean reciprocal rank', - 'r', - 'cross-lingual', - 'domain-shift' + 'mean reciprocal rank', + 'r', + 'cross-lingual', + 'domain-shift' ], authors: [ @@ -34,17 +32,13 @@ evaluation_metrics: [ { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. finetuning: { objective: 'maximum_likelihood', }, diff --git a/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet index bb0eec7..d099231 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet @@ -1,17 +1,15 @@ { name: 'Natural Language Codesearch Ranking (webquery)', - // @TODO: Add a description of the task - description: 'Natural Language Codesearch Ranking (codesearchnet_javascript) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', + description: 'Natural Language Codesearch Ranking (webquery) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', - // @TODO: Add a list of keywords that describe the task keywords: [ 'codesearch', 'natural language query', - 'mean reciprocal rank', - 'python', - 'robustness', - 'covariate shift', + 'mean reciprocal rank', + 'python', + 'robustness', + 'covariate shift', ], authors: [ @@ -24,7 +22,7 @@ data_source: { type: 'manual', test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/webquery/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', }, has_validation_set: false, @@ -34,18 +32,14 @@ evaluation_metrics: [ { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, ], preparation_strategies: { - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. - finetuning: { + finetuning: { objective: 'maximum_likelihood', }, }, From caa63fbf37733c76b218db5fa08bb2e67ac07b6c Mon Sep 17 00:00:00 2001 From: Amirhossein Kazemnejad <2122102+kazemnejad@users.noreply.github.com> Date: Wed, 2 Aug 2023 07:21:30 -0400 Subject: [PATCH 25/57] Add dash to legal characters --- .github/workflows/task_submission_ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/task_submission_ci.yml b/.github/workflows/task_submission_ci.yml index b7731a9..0539a7a 100644 --- a/.github/workflows/task_submission_ci.yml +++ b/.github/workflows/task_submission_ci.yml @@ -59,7 +59,7 @@ jobs: - name: Parse the Task ID from PR's title id: pr_task_id run: | - task_id=$(echo '${{ github.event.pull_request.title }}' | sed -n -e 's/^\[Task Submission\][[:alnum:][:space:]()_]\+[[:space:]]*(`\([^`]*\)`)[[:space:]]*.*/\1/p') + task_id=$(echo '${{ github.event.pull_request.title }}' | sed -n -e 's/^\[Task Submission\][[:alnum:][:space:]()_-]\+[[:space:]]*(`\([^`]*\)`)[[:space:]]*.*/\1/p') echo "Task ID: $task_id" echo "task_id=$task_id" >> $GITHUB_OUTPUT shell: bash From 02f2082702d50c5820b39b5d970f7f4ca1ca98f4 Mon Sep 17 00:00:00 2001 From: drndr Date: Wed, 2 Aug 2023 14:05:25 +0200 Subject: [PATCH 26/57] remove clf for mrr branch --- .../GenBench Evaluation Card.pdf | Bin 72032 -> 0 bytes .../tasks/nl_codesearch_clf/__init__.py | 5 -- .../codesearchnet_adv/__init__.py | 0 .../codesearchnet_adv/config.jsonnet | 56 ------------------ .../codesearchnet_adv/doc.md | 19 ------ .../codesearchnet_adv/task.py | 5 -- .../codesearchnet_go/__init__.py | 0 .../codesearchnet_go/config.jsonnet | 54 ----------------- .../nl_codesearch_clf/codesearchnet_go/doc.md | 19 ------ .../codesearchnet_go/task.py | 5 -- .../codesearchnet_java/__init__.py | 0 .../codesearchnet_java/config.jsonnet | 54 ----------------- .../codesearchnet_java/doc.md | 19 ------ .../codesearchnet_java/task.py | 5 -- .../codesearchnet_javascript/__init__.py | 0 .../codesearchnet_javascript/config.jsonnet | 54 ----------------- .../codesearchnet_javascript/doc.md | 19 ------ .../codesearchnet_javascript/task.py | 5 -- .../codesearchnet_php/__init__.py | 0 .../codesearchnet_php/config.jsonnet | 54 ----------------- .../codesearchnet_php/doc.md | 19 ------ .../codesearchnet_php/task.py | 5 -- .../codesearchnet_ruby/__init__.py | 0 .../codesearchnet_ruby/config.jsonnet | 54 ----------------- .../codesearchnet_ruby/doc.md | 19 ------ .../codesearchnet_ruby/task.py | 5 -- .../tasks/nl_codesearch_clf/config.jsonnet | 32 ---------- src/genbench/tasks/nl_codesearch_clf/doc.md | 39 ------------ .../statcodesearch/__init__.py | 0 .../statcodesearch/config.jsonnet | 55 ----------------- .../nl_codesearch_clf/statcodesearch/doc.md | 19 ------ .../nl_codesearch_clf/statcodesearch/task.py | 5 -- .../nl_codesearch_clf/webquery/__init__.py | 0 .../nl_codesearch_clf/webquery/config.jsonnet | 55 ----------------- .../tasks/nl_codesearch_clf/webquery/doc.md | 19 ------ .../tasks/nl_codesearch_clf/webquery/task.py | 5 -- 36 files changed, 704 deletions(-) delete mode 100644 src/genbench/tasks/nl_codesearch_clf/GenBench Evaluation Card.pdf delete mode 100644 src/genbench/tasks/nl_codesearch_clf/__init__.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/__init__.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/doc.md delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/task.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/__init__.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/doc.md delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/task.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/__init__.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/doc.md delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/task.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/__init__.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/doc.md delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/task.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/__init__.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/doc.md delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/task.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/__init__.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/doc.md delete mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/task.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/config.jsonnet delete mode 100644 src/genbench/tasks/nl_codesearch_clf/doc.md delete mode 100644 src/genbench/tasks/nl_codesearch_clf/statcodesearch/__init__.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet delete mode 100644 src/genbench/tasks/nl_codesearch_clf/statcodesearch/doc.md delete mode 100644 src/genbench/tasks/nl_codesearch_clf/statcodesearch/task.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/webquery/__init__.py delete mode 100644 src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet delete mode 100644 src/genbench/tasks/nl_codesearch_clf/webquery/doc.md delete mode 100644 src/genbench/tasks/nl_codesearch_clf/webquery/task.py diff --git a/src/genbench/tasks/nl_codesearch_clf/GenBench Evaluation Card.pdf b/src/genbench/tasks/nl_codesearch_clf/GenBench Evaluation Card.pdf deleted file mode 100644 index 3d4e16e3e1eb452ad3de5bf0c25dca0cae7c8c2d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 72032 zcmc$_Q*>qDx-S~rc4lnbPAV1Kww;P?+p5^MZKq<}wr~B<+WWM%+d2>D;kJ9{Yy+KR z{xH7TAHGf|FDg#QM9&68*7KbI1;YYh1lSo`!tn9}7^E$1O$;0@JWPxMOn*xN4rWFc zW&pzvfEIw0jTOMe$^y^WU_&e_@!|nmL;TIN1RV;uhAnd;H zXyXLKZH-Ob{#LL6 zSh)UGHnscP!U#|VFeuvDIscvEf88tnB^1E=m&d=;x&AMrxc-A221!wX7B4%8DZ2@m z0UL{{2@|J*sevJ*p~+tf7#R%=n3xTX*!X!lxlD~1O&I@jZOqQW#lgg4YGi0^z{bSJ z!f3+A%E`vB^N$8j&WK>q?Eg06|7bHM z0Lxzo{+Aiq+5e|Z`v0h;k6@&)ua72c*7dV=QM9~VXlf`(sq1%=A zkodBYUKXVuV*TaTMZ7Q;N`gcv*k~*cACT8(ocDWRXEG%2#tkdUP~V!6fj17u2z-SY zjmjMgNk_KEED^J19wqiSl!z&!sQO4FGn7&KQ^X|30m-Mgl1Xg50&7{l41sq}S9WLm zqZf_#U{x1!DvQ^TzO32-tbl^(6UGcuR*Dq&2_jNp2+SXHQOwdf=80~4qM8fFx+eEW zhUi1$`$JRY8}#>ZPSUpm1K^)*H4(H7bIj~ZxE{D%l8Ke03-xfPJ*{F9PhX@;SPpPm zR41L@d)lrk`DUkS%n+#N*2-wGC39#GKqUpQoC8#Drj_pe(U-T?^J(mp2Ff6d#N6Fi z^SpEkDyq#Wkz^>kVkjpIYMsCt2#l=U9ZDgM3(*^pOpKma5XzyfKj_1DQ?PB;I&Im{ zYlQSNnPJ1h7bS19WGnJ>)LiP4^Lq)qs5BRt4{A@M9zNiABz4|NSY-CQuSHG&`265tQZ&qUQ6VT-Bz%QEOkoum_N zsqpQ`dEY<PHNltWb)^NMT6B;lf0yxN_n=EgwL#zo)yG&@cRilcZ()yl1q&8)tW7fk03{B4{9)YK*+k4f) zjJMG&aoDm!1Ar|@0e0WAVnx@#kuL0UU_E&Zs-#~acduU@2Yub(j#Cq zw(gwcqxNif-7JzwJHoAjH48r;g+8 zAEe$Nri=>Vr}I2IEK(x2NQd?gW2U<}VjlarExe_Vg`_E&vVgqa=@Go<#kge>^B>>o zt9-6f9{U_5c7dKuIKf>ff1gk5YmzNu-!zQkJpZZ1>{F$N+}Zt!AhC(^WO=(%|Dfdg zZKluD^(|jAq*Rc$AM1ls;44p>qRG_Yjo$B1SAHQNl9prepKx>@0s5 z0RM_y%m5}PMwY*eo_`hpSuy>0AZKD`{2My|{VzAV1za9?4TByI?N_Q{UqnYoM`Np$ zA5foUfG5?}m8=5|J)J70VwUjl)(&*4t&w=o>$c}_v#rcY(s!-yrLUzNXwWHK8&?Y} zOsTklNS3JV%s?qHKn1-~?-cmX-rnKgL&!{j7I*+p=sTf+<-{K#UmMvlDnM&Y1OQxU zkV6LX;g)xBf+}y|18;N!*#aZm3L(?e10iCj-t@rf0EmIO@*r1m@}`019ULHp=CF-* zw7ZD(wRl#Xa(wZDrBx?^Yz+*Anguq2=VbvtmDLC_Phw+=Q0X9&L- ze5|)r;q@ViBj_MMpcxQ}TJ%xuw~M_iYxn@5oCeUw(G8%a?Ta))oA-?BGk}VS=V*|*l79CC>x*nXT;G5#D0n7Lv)2G`PY6Vt*@=;pA7HF9;*Ed%>7u)o5 z@8aZ>@Lr%MP3g8NE5Duje4FlDi|TIx0>3di#ssRpzi|R?Z3BM+3YdZX{*Jkafcol) z@qMA@S#Jdj`lfz%m;I(*-M2$v{Okfu`hCX~?cPmlBl(>Qh;r=i9oKt&?S1(czvt3_ z`_g`sjDIJed~3xAbQJ98dKCA-eF=Evp{~rm+eMlkwGZU92yPy_V`hCXD-%8}pN+S2 zYJS(qVncXsfm@hqjrhhQ!>Isy`d4oI8K}DY&K)weT@gkeLAgS7Irs;ByJ-SlY-nqL z7kF;b(}W%$gT76U38>^9AHH5E-A${~FZo(y&)ma<5+UVryxo%tkofp9Jl}I`26lcK zGx;SBtb#vXfVkOC6YB4S2z~F1gSX{|--n(skV5S!e{-Phf!kYt<3bCZ`+W=b1IbqS zl3bbDItF|Jy8~shfBSg>$#(b(`T$Y95XwXKp1u&iOzOxU`}Q7>(trES#Or(m=aPtg z{d4*T1_#Qj{o8`D@wf8c;{)=Jm*VaV^=sL$YJ({5*z9Nq-7V*<=O)(<2kHR4URW>9 zsSd^4Z2RVFU#5$-jpF^O^#yLA*$0B9;~L~3@g1N2C$*E#^t#Gulk=H9;njRG??Un) z#50NQfhY2$n;7G%Y0nau1jWZnxT+uBI0tUqCa2vV0@j=l4~Y) zGhoJkZQsqOsoY+>2gKis2y8u4OqHL-&p~t&1$S2HW6mHbo|ETYk3Bbzhj5;0$e07^ zGAVN8zlAU-(X5d!&%>u?9xM8R+7X^oHYLLCRA~r2Dn8<6pSU)>kPRh9*pFPr@F?g+ zqFOs2LO}6u5Id;?m!K`iDcbC)p7@w|lIIXC&_ARuTJc49_ZPv>%N|>3#P3nYHZqP)pQ02MEGhDkqzD}ta`xvR#-PD4@K869mY4tY2 zj?yB<4Y!ya<0e8=fTnRx37*nA@*ARQdCFFYh0?0iuXSXrz%_#F;SbtdpG@SFL%l6y z(iIV~^SsDvhR1uDaUsr&_$e)NejMGClx|V!F27WgmraBS$xbct54++mcomu8MK72| zpJEG8{3qj+-|h0EpU}p8F&Sm9#itk)#i@m~e3)-{JE~YO$U|Piw~PJ(+7-3L*}BK? zT2zRkr3UJ_p}fJlaw;tf$edcNa{Hi1Rw|aVzj>I~SpHCLz>u912Ix2?pfswu54Dlj z7Bm{jDWsHLLP_{U*}{Q?(_(*C@m$H7)Qq1UTr) z8k%l$Ha_IvTd3+x4?AFt$nzh9YPT;4L&LW-SZJ=9_Z#Wj_nWjxPy;pip)nNuWjzz3 z9qvVAU#pLo^hO62rP7aT@BuPzmu?n}1&f}ds8A+(gudk-8n^;B`&ud0+*!8c1%!}> zk4*bzjt)%G*4dVr>`#?c;SwY|x$QmudW1)a$MYTzkDv7go(fH8wzQ7}zTS5JuJmBu zE1FyiIpERo`rBPb?JPTebFlZ85@h9qJTi`P*KE$T)6W%?KF2)bkBh52A9 z5lU*MwkRAE2CF>FR0|%cI6;X<^kYpgo1Jg!Shug^LWYQ0?{#*&P?#}sy%qdNv-O)@ zatWQR)@LH~NPcwp$~E{I=L?B%wRASY=quqxKiC2_9>gZc>-Hv2MPZX;n=2J79wQMg z>kdJ}+AIoc{Ru2lgUA*n0|SU8*6kR?`MF5bahPX_gfXh)g0SRxNL81?lJ8)->2}`t z<@q6)pIR9Ag=Tr!HH-Ig?$JY!&ycn-yBJ|cQ#NHJVwVmD2NPmbF13%RB<^w>K)FgD zQ*B03NmUm%ASU>+JfmaW7rCFu+UM97PW^Z|?HT$w-nO}B`qpWQ63Q_ILhfPS9WV*T zeBZCvBVvA0uDbFD8$@LW0qJ@3 z`aNPhVVW|8`H%DDbzgMtD!ti;+UU%mRH|4W%SPTr!>R8XTHW1zp3db<1m*NshBsyp zK&D2eXcn?<;1P*h8V5-aE+}CfTrniY#?3`3}T;2nbMMGd=C>Q@}o2ej%4=WF35=?zz;^N!^VjUMtfd1wn| z5K8L6WG#(vpP=w){oc{|+mL``C^{QO_gIp|=fv#@9N)oO2k3p&G z4@ACuh_46fH)Uz^vPSaJ_yLLm6&+_6ID%(4g!knY0vYiHLshlGDS>yL4 z;3e&153$dZ;xwF(*Lrv_#&)X>O>x$Hz(FHO13=#@{hb)L;M&s*BO=W_k4-CkHLCaB zCR4kagRgr3(8sI^fv_ov8!VHdjIQ^*Z8_t3lnqu~&>x*X_hRZNB z%&rcNwI$qkmNS(j{}nRpm|p;Y{p_+5PlsF8eL0AI%^Us0Nk># zz7g)-C~N%XIzz(M78|)Z(YkAh(t2@CdnK*G9HabMH|fQzIp%A3zEk+A7QNrGWzDCj zkI{2zTs|{}q z$)jR%cKh~{gF7usKg9lkv`jJ^zQk^}0h84i6>(OXBaDkU!Wh_F+LCPB8@mR{`({cE z?xO-2gS0Tq`kUs36XNL*5brl0)`)NG9pI4bt8R$xM+RsyJ5 zxL7eu6lc(IJ%#OveMcY*r5MO0KVG^U#q1*5=FSF;rA1+jChX#*2_quoOL3}4ZGMFE zb#;9vLBV<>_B(@y4dHc2_{*IgC;h-qv)2)FqnI7KzPz{MJW?WjzaB9oLmMu5Unt;B zU>yj(bhyjC(IqC`Ts(q7Vmiio@%<>of3cw$`U+NpP~ykVUa643>g2n+-|es&n-dgx4M-QWgrIHIl4*6)_qC% zKC@}Dbua%82)}!pfP!+l8r_5nkD%i+wVi1nozBl9J)MX^7DRzjdYVaf>%y~;NcyOQ z&`iU~iu{#3?yV3+XBoT{X1`a!@#agpGts5!$#Z7L|31j1trK?u%&WezC5wsv#{|m- z$-AyBgo?z-0mO;&RwD6web@qlIb43e|BYF^BF^33+E0t{s^x&s;f$cRD%?!%aQ3pV zB$Qc65lF5vwx)cWDTl^cHwR^9L<$MgHhp zMY@LsGxQmbDzUKC1Di3!=pmp6$KG+?tK6T5e3z^}fG$I* zzj3>to-0k139hrI79!%mNL;ZPOhh+ekA#AQb5^b0UryElfFg7uJOmAf`2V#7|KDdmQ z5us|~F1;qR+6-dG|3gSn`fw!%rh^t#AtV41woc%P?k6LEtECikqyJ~O_;&xtPLzT# zvR1@q0&IEY_K`Gff?y~`cBWo;_MlCA`NP4kR^KPVn$eVnJ29sIr1q~!!wEIj(T?ZV z$`_cJ1r?IQSYV1|N1?_e5?XgiSbiNa{*XT|*Fg7vH_bEKN_>$}v8dCSm!G08@=Tba zCSq^Je9?2JSxM^7V+c#@=M;q3TqAaqJ%kpxK#^u@<-uU6nM8aO?XvU>=@g}~ykz#O zP^Vv$LT#BH{9{(c#)fW$KH0i--Ab)R;xzWBJh)JORio=`O3DMb1aq%MFG|urx6b=z zBXo)ag3VyT&Kct0zohfXMkRk{k} z?FO+9n8+lA_q=|@t3!&1GQhIPR+&f~!-&-&mZ80OoJ9uy?1sadljIk=+m2 zI9MmMM`~~SLp`^}4_-DcHTO<8db(^ZKBMi}J!dY;o+*83va8s&2D5eTtNk`Vej#w5 zuWF*Vnc|v}^DTR_(K*rl6JwlYop!W1BQusIlOmjZi;KTcyEYN0uO+}ev};fXa{G8* zA$e)*r*NUPjehd`(XyAIF%3ta8zK73{bkDPJ8-u|z5(!fI-05H_;QhepQxV?ASFHW z*{D)OXz_u@AYk!fl;+Q7A9YB+EfdpBw4(#$Skf(Kq}-F@qTL52UUfL=YXo{uOn=@N zEktbYv5M5?1T#aWwhSmJbMT-?Av>xEiO+wjOV+dLi{|bl*jAF(;cnr>ru9Gknp0ceB8?#w9L#YC!IQK>+)jUD zGbR~Rw_39EEU2HRihTc=GIQ%slWT3z9V^lN{^_gIgcm($xLjF|_j=Q)a$N&jY*R-J zI^PGT7_-55CRz09N~=q@7bes?3zPg((T!;bW|yFN+O0TbRoAR>TCJvVc2YUOa>%6A z!aG`#p~1k+`Z)XLH6L>%k6$|Jdq{DDam7{f@`F9sJ5{2m7?Z0=VTJAc;DnWKXV@qo z52H1OV4)6-W64`t7?%kAz~oATq1SI2X+sIwx={S1d74hWj<00xV1St6XS)Xvvv`Er zZa+{FG1gLo*N~v}-li0af<<0#b#;sUHG>wDs}$c8+j6sKP5%-4dfeE@#_3Q-8noHB z!mMGU5VAsap3!N8>3!EG-lo5CVZO+F^4E$znTgDx7;9#n!Kyii^^IDRyKIJ6IGA8thpr!K?{~ECta_UZEGE1s=gg+?Zw$uWYeXMY2-4jw$u{x z#d`nl(5%sE6s0QO9wvm7h2Y{#4aUy!GwNa~!$!f%{toz&^%UO~((IzE_SVF5lQ$KN zys52X(JmV&;qymOEO1Hjs}=6{vzi@*HC|G`kdMvdP!N8TG`AjiK`L^L8opUtuU(sg z6SlJAuE#6zPf>mH!trc}>nX#DbjLNlFOk|eL0ms72y)hD!k=|#zJH$Oy<^4M8`bws z^lyR@gjUIv8F5Iek3FF6T=5IEhb%>m(^N9cc?wa}q2s!bVQ96)3i)sXGP-}(Xl*b~ zIka+KE!ZmZJo0NY`PTQmX8eBa61#9ly>V*v#z601)m)THu!)@SIFM;;%h#L~Pntaz z9<8Yg5-Z7w6*2sPx%Xr{oi17gG6lw-ms6;-K6ka$9-M)%z|||n#Cq1C3M%aEVrxR& zl(>;qB8T(JxyyRObnEKY6Aw_aFtp0MnoE>inu~_rLsDEtgh;@QArw)Rcm#Q@>Pp3Y zRk26GDS5(6L1;rLv1M%J;l6B4RatK_OkWR+3n&ff-d5?3lD2WRZqN?RiC-3Y?I(<& zd}r6_1TpX6pYj>7_G1|TQOTxVN9RdIqBqD{(0(UvIH2ws`H7;fpav2abu|ZgK?*nJk3sVj0+r}#*)!{AdVk~_y>tFK{ zU@wWwHsvdnq(*CVt6!*b0l?3zvI=1&z`x$}2w#6#4@uGc$&FKGe!MXseFL z8zXk@!kg>?e=&lW`IKJ8Ro-Ud1gvJRd_-nbbb ze%2#}T|?JU#&%WnkSws31_r!1FO%naBC^likHl`Rd1}?u3{4KQG~4MVX&TN*uB~(d zts0$!VOnQu72|#6RQjZT??ykV)IgRbI|3Yn$exlNAAvBu+qVP~n#EVN#L6pfdQ&{Y z2{s~)FDM)|F~)hW)+jUhs##`w$JRRhG|I?NnICA(VhJFkFqg$u&I9c4`dq(Mx0~7- zgNEy0k$yZmgT1;m+Fz;B5YI0vk<6p z1mVSkyAi~0J-Z%wqU;xTArmrKZf-bWhz=$$zDwU<}Wk9pVqP_6sY9fbQ0@QI17bfTwJ+D{9YtaGQ*tP>U-)b1Otoo?bJMZc=v0lRXONG{b)^;zp49u3*h$&CD{!Jh*w#yV7Lh(YuDvU*kN3pt`jo;8SFak?lia{;xk`?>uk5F+G9=ArdSN1Q1* z^Hx#kJg*b^Z(d|%SOz;j5|6u&K~LksLrSe+JGu_3Rl6CwWY%aaC<>a=X$u_ffRqL% z^WC(Utm!(3Fi}i~W-?Y}%){-Fv+Ho;D?RvHN5#X{KL@Yj;YB(EiuHaX+3X74S94~_ zK2M9-@|hD=228)9`woW<37ud{F+c(WoJJvBw6CkAHLVdk9= z;vkV(L{-CL*6E`y1ntuM6NnH9mrNZ6a?N#nS&p12gxLdMD5)V?lo<}or^>JRFIB0N zih6C_q+Ul8l|tIkiZM^FG?uptU~tC;N!J@)kb_dzxOjG9GW(@L*=r|YZhsmlvn@So zvnSUd7qAHmVShYW6L!UYKTOw#wW`8d0yC3 zTmaC^#)Pcr;E0G=7<5~itY*+-e7!gwA z0+~NUy(VZy5Is{mk@{;+JjOT)qiKPD{4N%RFMz+p;Ho6-@njSy2Oxy=I&GurH{M(_ z61qj?n7+_6dYt0wbau07I_)tdylkp~4{&d9E&VbJ@5rC%Y(LaXhfNoNUnY(|tiDN# zY8ywNi_?XjVMg*)Fh#ZSDIPX~M*8*A+A^~t`QqtI@i_)T*<_FONR}ao6i`mTUHz64 zt4rL$fLCMrKy&vyzL%feg*p+m*g{LQY4q814PnuwSVg&0B5Nlqm$E%WF_0oS=W+OVH4Z3p`V7n!HD}s^jcMe>Y~GLne`y#7mTLK3~y){$L~+var)g} zf(o~>w`9WewpirobVC)E5F%t6KyF@**pz>z+%cYc8F(42{f=0V*SF)4ERIh1Ayp+B z(N=&(fcK__jx)8U_ypK@f30*Tb^ETdGw2SF+Mgj~v||YOP6z_%CksB<0g~<=Am~OH zv^trD%l8hMlnh5jVO>?Jn2>YBXPIc4XGH<$$0!CDH-_Z0pd#{yE%mn&Y0-!)W)3MvTji8>^j`#Obf*^Cwz zj3jA|0U`s)*Q!N-ew(*A%Y=v7yatxf8TsT#=PSHo3xIxaE|WcCEeiyNX~q{|t%f5p z@VE+C;?!g=8=)>-QO#Z?yd{HyRgYw(C6xQn>;}%8Xkf6xSi9zRb(fcr7pe;WCNvg2 z$@6Hd=f&G|+Yc(^rpV5IQb%RUQ&v#HDPI4XFF8LtyzhoG9MkZ}YFSnQEa}qu#d}Ra z9yu}987kX`8iD{6vHT8U#4)Ju6u0>l3C`Phf5%skZ580uKqkEF`dRvWUCRE)d8eD1 z0tF8z=uihJE+6}^1V_lQjkN44Y`W&Tt<~$fsg6%x@qGJT-^d15k->9Mkl6E3M&~Fr z!a!fu_hvBCybW&l1WF%u4r_spWf5pcbBV&GuuM5;D_R? zhaYkK07_<7`g3p3C)o3nQWTvr?y!;NyIYV1Zlcy4mG>!3LNdDscT%lKaMISFFo(u( zVpv%}@50yK6=ivP@T|EL#dDmjdcs-SQ3k8sd2T5LLJrLDvD-N}A}t~%?SCEfRpxW5 zc8VvSNfQOr8SDb*m~Xv!uHS`d^`WIs?sWxx-ooufMs11Zldp+DSK$@Qj$3s^gB(313&O;-v)` zA~YP!f(-@gMZ68Xjz2NeAea$N)FawSqikJbPu+=jhjGDPkUV*q;~IoZc-*y_F0&|S zOIBP#kw-E@9z;OLnc(EwglsW%GR8gmVR`MnD4*JS(&x%qVo?lZ0;m7Ujc9@1!OdJR z@47P;TIY}LTH5VbBFUQqgOE6tCa|>nMKv9A{)IEJ zSKfuc(tcf4m!>Tawk(<==~tyyVeSl-2S3fIgP*gma0GWc_%gqfU2?2Q$KYF&koPAN zzGa0~c(X!IEwxP~Uw|;lovKfmbqaFXDT5ev+P%bCgY)phfGI-PM#{_HP3#X8kn9$| zGLKfm^Eu*HQE{vkO;4}8o}x$Axla>C!Ifj z8r&B?ZTJy&=X6?xkR5kypd&gDDZ)71CRP3}FB}xzowV<_ z)A_yRHkS+@|J`!)+)5!m7Sp5JvrcyMM7#QcNG;Sl$sM@*mF995FWIpH$0-0gBy7(2 zL4+qTihx$C%SW)*%Z;qG&BJDMFrX%tJ#0!vi~50inWv5$uwWWzcTIPS>N%Ac?V~>v zK8cm{xiNiIX(KeK-rQ&&<-}004}p==JihMP5O!Z{WY|$0-jR?6w(-m?HUdZH6eOhS zb6g-cZK~K=3-cQaDeAh)4p6RfhG)Qc27w{}{Q>SAL$0=*e(c{1B@2AODE1vvptfK| zB-fI}l#*?cG-T*6R*Gv?fMQv3WEB|4d;z_xc&O5^5gr@*Y%bt-kd71~#$5(6XO8N> z)Z=mCAyVndO@bmnm$;-CSMADL;~R0h@?(SZVQTb|Tv-u4fG6N$Ctkc&`%nlqf~@F6 zee1^Fi*HogZjG2B8Ey_L_|8?*34MOm>LGU32TD!zseX+!@M4e+n#uCjy ze0qbAOGhC0dxuSiz8FH&MO>heCJq0!Yi@UW5P&5j#C+Clh6jEwIMHnw87q<2z7#lM zQaohRv{v%25{QSqoYcsNC4(lwa_+MHNt|dp7G&JXBEN$Kqcbtr!VEIn;G9b*f%LrZ&pg6mm*&7+ILu-4b97Fspp zSlWqb1nBEBe_E}<23_w(Aes6@tYOHQ-q9!>t+yg6c=Ib=!i_eL3FxK3oA-g_lzOh>) z8CSg^b=VRN-9S?DXQ&EqmCwvBzjUPwuXD9APxTEK%NlJDB6q2Xm!)du_TbAsTvxo~ z@axH)Sqs{{p$Vmpl|5@ONXlb>xd9i9XPjnYi;=%K9x)L&U#5r}86nMSPf#^l!>G&& zZ6Nrf0B^q-k{kB94z8}m$2z%w9EL<_h6piU9)jjF5Q9v!M$^Ef;v@|RxBZcGi#>0k zWEkr&N1ljsmA@^$_&COP_rJ@Zha?V&O1I)#!-dF*Yi6{3>oT5$0LP4V^j~@TGjjc6 z%0=^#*ePKvmee6in&ILGVG~mO()ULWZU{{YjYl8=t{__>d5?XFD5t+l$g;M(kFFV#i0Kxj9sUsb2g99Ut@5YQEgp zBQ#CPE0LDhKYTQt8k)JQe{Ot^nt~ozys$FLwe(1xa+kWIZ)@L%_U1@$z?*zkE@8$t z2eMrYUnX`o;EgMyS~75t*t7NaE)LibR(Pco!1;q##0AHpuKAnn1%vG2It`w~xwz%> zzNUkYU`VA0tH^mp5IhHS<)oK;s#-No1?Haa6PG$L6Mi6e(TqC>%`T{0dPRNngH7WJ zZV1WeWC1K@aU0Z;Os2B$^6*j%U`$(R0uzF>d6gG%$d-_0&oX}hp(iQ~fv1$PdvKfg z;^-dxQ^XXYxo=f?GDVI^PwBivA)EdK%A)m}jn9xPj0o1j)QP_Q z(?KS+&a>fX>GXvPoA1Y!S{f#Mf>Y~Tis)&RpMKUkoz2qn;f(_sgsTe2p5WW>idkIY zY)fB!u7Pd5zga%}ZfjnIjxq@-E@Oiiof{enB;W{>%=eLHCO_8LLTH3hkdi2gtAt|I zQCj9O6(+aS;IY%hX_J{Dx&E+tj&eD0=^h<~v+TZ4(J%`TA$+oXg4;`4WhDKDIfgsT&QXTk8=iJ zlFQ1Dm!aUuy_{JzuLzWx9nFt!@o7o z4XeLg(G(I~FOCc8+DO$3`ZYXiARe}eq7kz+|EY2Wil((>b#?G4tuR$I>3@wH#dPY~ zxTm&Ny?R1?IuqpN^N~*ZEy+U`_M7$(ol*?(2|feJ zlzezIJhDVo{oCG~nAMY>N;xh6pQV1BZHuYPq>z1LAl+hf`A^DI8M{ZI(L4f{Sl6Vu z+4-BtoEeNZIQ+wmFXydcVO8-v_yircEq!-AS^{tR1QIy{Rkp^?Sbhx(-J)J_!mff9 zr(37UeBqg;&pD@dgeU)^*>t)1Be5t)4FL7b8?) z{`c3{Aw+qo%$xIh6Hhu7#hc;g5RyDK}; zw44QVn-`K(XlL`=e5CyLIO;7rT-_YjxBa-Z?Ri1_W(KpOnAnS4w#sALkV12ZJLxHXpXp}lzBP%7u#q~bmuurnL5BK3U_B+ngq_y!YLAkcm3vR=jOuc_kJ4bT ztMliRdDf0w_(BipCq3}&?eEIjQxshVB1#o#%?D(zc-4p^V@#5cu3GhzEn_e>I>D|4 zjd$NG|3vS&dY1I@X6HpmUX7z#Kgm$fX~DngOOSmOSR$v=m6Ocr3)}(J%kC3-wUq^~ zg{#@U@;+2X!arO_@0_Om+f9$%Z zXK)L5;p)F{Ok}bm56KknvTq+`j*VxR_C1<*Q!Z_myVKHeK9XGoju@}eifLy=s!l?x zoL#VVZYA7wK?Gmo3qb-a8Yxe7&QvMowa$pz^lMBpd@XNy1(qE+jvq&#)F6#vssyj z_7>!I+q>dU2?!S2AGF*oZRN_&xAyZud2q@-d_SB*9TMf)?!nMjjuo6(J6v-nE0p~( z$QrUM$v#jjv*YShAk;tf?mx5jnf}?I^naJG&%ycc>G~`zO#gec)&EG>XJuvO`j2$| zto3<%xIVJh^PQ`!D}oJ|+Ft#@j*uPyoj#)04v3bWE2ONQEBn>E?5>&TZGn2!2LJ>$aAHTw~ z%xG}IaBRRe5J)>fQIeC>gL_?FBXE2EV}`_UDGQ-@cw}VYrG!UtlHkPJ$QA~KGCiQm z|H%$}O6C&i4B?1fu=dzDI1jxMA`t&oLw#d&GhOCVCk;QVD(Dmo_=;WMsuwa3c6Jrs z9QdWcz^}fH@1|xt4Y>>mXNjHOcWe=#w(qKVTCg{>cX}8P%F+Ga9$W(`8`y3P_{=c^ z2wehj!eBJi1BV6dI|I=>Jo%Js@9X)EJPPqp(oojc7T8rlHM#&{3|`w<2MVHGh@Rd@ zpfB!6HM)l=qQrw4%HgZTp@hq$)W8uaK*It4BeD!wdK>4XgAdcm3|FESwjnZllaQ|d z1Nq2iToC{Z4e99#fVv2Hk?>AnpBB7tsq?J=aH%I0Lbw84_xKmVkM`filGAGAYp@Ej z|J^9SHP$1uB^vfUTIr_`gh@j~gGdMobOHwCk*3w~fvh{Qfc#RDaEW-U1=+n7gAfBW z(L(Om&7RWRLMUp7sl)*Qy10OOc>HSF>p>>Q#qI4M#)D%7)m-&M`VRjhhtT{A-bUD7 z9R|&tc^l?q2RbLr{pQ8)o}1_gHa>ohea$_d$S>8!(m z%KHpGfT+~2JhIcX{+?0(UO4=Y-SriH?TP>J#U#D7GBF_q z>9d2)JGg0v$osY_hySp$NcO{EuYUU0qJd=h)&gUSOWyPyHs2?<*#~ZvZvxKL`W{N@ zImqy}PS1viDm~jVydP--(f8oV_%8C)qPvf^QwyEICHqze-ey1UQTpjWiBJ2b&g4pO z>sQv$P)4!0PSit0@5iyePOk~X_eIL&7mR%XntB>ufLnu`k-z21IM<8}=o< z0fceui-hPWwnsP!sUQD_xMu)bJNgmm224BtE!1hp-QwCCpELJPP7I`hp}aaxB`TpJUqh$l_w@Q2m++BSqM zC8+?-^MaYg+f>9vo~kfdRL;-+*HFVI??EV0X`K8H{ux<*O@G4Q8AQILN+vvQclo79 zhFfjV<8FVRl4)aurY=h_RUK&ne96*GVpihVU>MW#hiN90d+`GS>0!I4Ow+vbX4Q7Z zO!dQhl7~HvfHwj$Cp5Qno7$0$8IP(Ii`ow7J*B<#U1Sj*qbO-b&&X?vvHfXJO!UeJ z1~*n%j5yOSGihA|VWTZGxthjTBxuWvGFM!U3;p$JS+Jm4q$eX=3S~?VLu{VJ3VpR^ z90|jK3NC(-@Mj*{A+>ee#&%kp#PF1tVoN<>mTQ+PM9_@(PQvpn)zFalblZUht*a26 z(3WY_YlFX zGhZt!Jvtfr;z&d(v{(6$nYs@j&BpO@Sx)Xto$d*<+q)P2J z0nf1ru(gXlIXOnouUBv6#Wl1*1A5VuVN9NbGpv1R0GEzBVeHg*`C=Ft$k1w;_*RRGzD1T zKP@YIb7N@7nUyJ^w!8@WPzVgA>{B4DYRTD8KR}DALzNLUpiq6;xZ&T1CxWmdcrD3T zCJpVPgFdi=cw4~&+D9@W*<(6z;)&(0^3C$^?OkPEQy6!d6N=oh#5}gfv$ZqMtt#K! zJUENw4z2P6Gj>+C)R|G{3&ZsS^&y~ft?CfH&MNfJ{XZ>q_CNbunuyLDnl9oV?*f5d z-rmZ~o#TnTP~)X?3Ptcp_v5XxJ_lo@bP-jAWSceiq=;@_vS|lV&OCvy{v46(nSrY{ zgT{=o%0NS5WwK^*YuLL_9W-7?QxJ@8MCNiqC-=24SojG^P0*LXl;EcOwBJ@~OW7`3 z*c_s|epkDb0-tj`oCYU+bSb9LTdn@6%;b3H-dg4g>>%jnIi@Z7&FYBY^;@g71{+7I z9cGdS#;qsxq#et_Bm})vW`OKr(1dv%Su%_-)k1+7&Ai`ZMMb@&DS@W-P??JAk^qrl zJR=Rop5uY%7QxJ~KqUSbDA>&l49WeU3uRTkDe5l%&GFcs66SX97pmmC=RNnTw&4;} z-;CX2Ijf@YukM<6@6!XC?by0}m-sE-;2O>&XE%WnJ74T94HN%~GO>sj!(q=F0-;)* z(5p-64x6M^jByAQnbTJzDQXU<{e1AX3xdM;PFdp!3?+1}AcRn#b!$ypg}^=G{6#gy z5#!2Ropvq+-RGo{XVex@VCAku0rJq=P2Z|dLk?%kQw>OEEP=>Tjc-wx%^Bntf%uT2 z!x}RYf4v{|ehdT;2i@>z+M1w9QaubXeM(>sWqPejm@N3RYAUAVZky=0pTnp)h2h)9 z`0loep7p9erJ46zs(nCtg|$Ld?ddaXMguEg z+S5QpzV#Dap_VffYjb)H-8T~rmSu~mhjIqw^2x+Y=&MKTY19Lp`hzuyCv z^NBeiyH&N)0&8bxZy0Iwxa2sxL4u2q}^AblbU+?ei0Q zA?@`ocT4iCZX2*$F@_@(G#A$8)ag zQ~uSYj#l?bVez*f0>hQ`t>k#;aS)aN9F+D!-Nu1vkB7GN-#O~bfC;wuX!cTn%it0` z#y^>Vpf@#(3&8TEH#1H`|mN@^uG&O;sIc$m!mMHERsg%=I znAoi`w%k3W&cp-bJO>498iQei*@P!b*zr(`?^qnC@sxU<%|S=k7dACioK0vlWKzWH z5?OE3rpac0S>9i~?ItTmL$z2B!6{C*oENFgsc-F+?s!>H%B zrq1$+6{nvE*`o#cs2xNjDaL6bU6oR=!<7>c4vLbMnmr0+5wz-wzCjz0UW30$;t;57 z6#WRx{p+2|IWy~!h{;0PuhCNX``AptM>ZqD|n_g@Ddt{Pcugl2st! zYErK?@#KSPC}{`IpMgCvHg6R3jLMn5CanRg(!c5zUhy<@*IlK}lQWOQQg(meCqm=y z&~@&bA%yuz^k|$@8@i&MeMv{aS}vwgr0IK|gQls6D{iAb2ugb^|FR%g$b)9^(8jNQ zc##dz$$ti*p;-`0elQ(!759(U0Y#-X-i{fKzdJUUxqP-l4E)}4L}>Y>6Q+~O)8*`a zfLxgv?n#Mf1Y6=BYuYu#zbG11kw#0P|0{2Ovz09b|58dFZb_1Jv~S+2e4Kg0y%6cT z_X#X!+@K=&@H_4<_L-~uZrL}VH~IA3w<4~fs5W7EWG6PfS&_GNm350Y&A6Vxzk7=D%#|EUc?BkqS=$-mMUDXL>vt3#VRLtMV4Gjt6n$X z&lbDGrUbU7iGE6!4tyF~u3)iD(L!-Iq&~ppEuO`_;6g9vgT2&!Gr+sz1oYXeL=TUL zCbC0!=BwbvD;{ z@bIkY#%-b%7r+!>^#Jc|VVWw_h0WqQYvG*zW45ZK!9SWWOyMztgG1uJNun^E*>jzT zY!QC)GC8~pwfJ&1WhRQhG_%?%(D!2>D}zG)1apuk^Tg^zkoDD_H^{inCWpGFrs?X$ zfn2+;Nb$p%b2FkVr;XIm`9)99?|CKvTu~!9(l$i5W(IemZd$mj$9)#RKS4qwPTb=# zbCA*U^MVyCE8(i)ARvxy8e5=BS4oMMES&_#o?^s&u^br~%a_lXDSyL|;FI~Yc;)_v zpC0C!goQyf2G-3TwH+wohq)H+r2^wO3DsqY?vP~>6z22x)p+Cis}u)UY=MVK|7+dL zW4QQ8&Q1L$!t_u0ohujD0z^ka&PAy2X};R6;^fU0?$8O`1QMcH0@K#G`t2(QaZxWQ zUTqCuHnL<@=4B{|=1!UCAF#1dQ-hE1kOkigB+XR(v_ZjzVWLyGo&&8e@6GN{A}c{2 z-AY)Gkrb8`58Ihy?-8{Fis@N`9jae%wVOczXM}luVGq@7zp9ZiAoP`*^6Oe z=wp&^{xS3MXd#?m{abZ@Q^i=EJvL|kP45C-r1JF-fvxbj!1nU(Sb+DK zq5t198zv?bc}RSM!yOM}Q&(oShE0pH76`rJ}&08$m5 zjKd8`KqTICb0VShLkVpii0pBBg00&1E7W}1%3_3VEy=1*fYAjFSz9Tuk}MWDmdIntZ{B(lI;HGj-Ci-Hien~7cA?6mnvd%*XQjRM;w>$Fn_v_M zW;qisPJXg22doOBFDO11EFnwpJp{`7zY5dq!M~|e?<6faNtyfuC{UskB5lY^rkLOs zaRRkU{cz4*W{0bXxyo9nghr|^1UE^wQ1PSVXNb#=4wQNc)) zqz}mW+P0fU$gsE_F7`vHBs46Ju4}Wu7|X_-&a&()izdHk>=W_$dz^9yN*_`b1TFWF16WpP3v_jP_3MpYdEZSTklkh&#?W zc^1qmLazP|=PtJIM)VC_s)C-WX4Fyslzw1Ci<(&UGDvlt1E#f~7+o^V-+R5Y9^HYA zf{R8^Fy)5KFZF zq%G^2smR)4P|a9yK0oWve(!YOpKDQM2K~8c0p`ly71|BXM)R`|;mENh|D>pDFCKCQ ztz9!edAntK%A-jWnj|>gtQptk}5OVoG*Fi|mHW2y+F;leFzM%oE{%Z6Q? zv!y&rXP*$Yy`dwe@G*FSyA^o~wv~cy*ETFkb9u@!GOv8?A!)2PuB zb#BywzAdfitGAOb^fTT{MO53F3?W7faAv#hbT3bG? zuNatI-lZ|4Ii4!mKPpAGCKwZS+y8Rq6V3!;;kq=-!sbtz-OZmitd^a)$Y+}5IXoFu z>p>My`yHFgifj+J0lhan`71NjW#MpL25vZJ3D%qDwx8c3nF?#cNksDnr&^Jx4<-1I zHUi$?AaVDII)1+{VP3Fvv2-mao&!cY7eO% zBYR{8Y7JGf z)_Qmu1VoucDqe!9iE{Tq_aHMToF`NORbf0FCCf!gY$7-_;gGoFI&fyN3Sw&m9 zUVawZ(6pE%9HOB-IOx|*8EN0J#Hpo*Wu18E1=!+O<-E&{UtTLoW3&jw zv^-XN76Quasj~80o5ordwA$}3yl&?!cj)#<>QaintK=OeBH$}-jq-yWh6rtXM)KD7 z_!CHXA3Bn{-)bs)fawsA7i=L}7Lg6~SQ{}Ei*4{=ULbRAqRf!;!yxkFKot-!+gai-|x2K z?yp?{8_kBj58@D=&oxoE7c%y36_GGl6#S zx6Y_$s_BS)4LPRo>@?PtP?^+f^+PoW*^G$ z=$yM3u4-y;vkxbiIk&`f{-m%9%+TTb5}oIlm>@cZVpqmk1{C^!Y z1Tt7^pBV!(LUTY0BB$Q->j{%-0g$w%4ajvtFDIOZ+d*Z6JQoE(Nxwf;ZGv8MRVEan z8j>=x4D|f#p@wg7K~^GGEW3tAZmi_iX-^>;>)hHDl4e~L!cdgdJB3{W8{xAvRWRho z$Zb-BtYp2j{k##>!2TiH;Kbmle4NA1SI4?w<+>I>775Xv%pDG1(E2H1CDcFf_B`t7>*tM3S@^BV!%o9SDWLOhfED_eIVA0gtx*?ptgs`mN3mW-_~ znGZhU@v}!CHN6jzWr!70D*bUue#!S%g#oo! zWh!+bFw}?4b+QA>7W&-At-O04x`(S{!bRmWcNC!V#Zb{LD;{R3tENG{3fV7RZ-#x) zcAb&fftIA$YQ3pAX??rIufz%v^t2_BSgE3SAG5N3u}m16jgs}Ar68BH-2}52yI7|w z3(VW7cdZJAKKrt&-_jc^Qpv ztZl>v@OpG;3fbExV(7k?;FMWSaJqq!-{1U4k=X4KDugTUk1SE^NO?CK5;OEJDf8;%&DaG+-Qpy1&HJ_B}91H$(k-*S>z;A39vfQF4^1JY=)GMj?w zaK*U8;ob0M?_5I3Q6P(RaMyW66U9bkwZ@b7<66K7NtW3c)g*7GmSG?b%49#152)6t zZV*#Ba107IoRs~lrz$=TvGz@qWP%6A!3ZK=XglBs8?JCj0-F^K~*%Q-{-MEU6;ac$Q-F1Gc`=^zUK2U*a9yp=Au=3 z=(tApZ?%==(KHxM?UH0SvAU2QHrk&W66#d@T~i=>z8}eV+ujZs=szW*P(GFTzYuwh zy|j=21opc7f=)LX38)|?hKESU4k3_wgMjTd*;}A?y|uIS2ZHPs+$Gwn8P_2l{C@>8 z$7JtLA1Y-^;{SR9E`vL&IHJA+0EmOWlrNPBt@KM!A0IXiEtT?a-=0TY>lsz}nIUQ6 zlKfVWXRa+b93o(y)0a@##1TJx{)kZ*9fX%hggqr^=3WG!i*so2C5*L%0Ui=lfAOeK zD_0AT3%w#!r^}$QA>=P1SA<8ZDYd(T`e)UWN^KvNF2xYhB$1wXQaHn3yLvN~BOpx| zwH{aMfzGnr7OpO(3Qm_8@#FC#2qFH}k_R|7rzEH~Kq!~Gs~Gfln816#v6l@8{Mc9R zEY7H0YOjIcK=XWvL{E1P+_@gRkIco)r6HtVDAMzfVO~qFt0$CCEynjRGrmffjvbop zFwvS_W?RErHf(*~6omO?Wz{Y<%oE(BKX{pz_LAKIZklN|Iw?F&#}X*K8PAGnh8K?+g+apBaT3Dsl$%s1YVK{N6U% zhuW2)qM8lN2T;e}whkl~6PnO4KZD-ow?CRy>wxaq*LsRU$Nzo42?!xR8U=X+{@qob zoM78k-9v*)+Cm=PdcTh8T5xHye5755?crtTnr;ekMczH>vPT>#CNX!gVp5nAtB0le z9;}VmA(JAJj9S%!?$Z2o_HalybJw}l+`A7Odee{-RyXEt7 z;aR6cc<0^x!h2ClLnc)<8l{w*w|z;`Thop_TnLqbBWesKS3~!CZGlkUejb||&dZD; zwm&=5vUa3=ymsR=(ns7}NI z`eJrZJD-DE?qY_d9$As*Y5aLv_1v&%iBj1)Ys($ttzm-wj7fB!vB$I`2e+I%SZbky z2}aFMfThCk42vy-u}e)`6Eckz>cqz+RH%W8ya+#{tXPcR zfn><2;2jBhznMN0Mc}~!`U_!okX~@MNEMC1q_#>u`fT$_O7VAMb$uTiK7V41RCekP zoAXrOf_ck)I=uwm-GniNfBe})4=6Ophs)wS_)Mvk0A`#ki{r_tt@-aWjT8c*d_)r>DokuNA*X+J@Bstv?g+u zcnPEB4c?%fp1TiXTmTMNoR!ZwD^1?D%BWOY5&!4{m|MO3KVopa<31F-$G6D!7Uq$N z|Ik(iMK+me`|s5PQY5rwNTyDIZGBL*dAZ9(6P;(mdfQugh|7B{Rhj0UB$lilEJT93 z>sHuxOqi4)VjJ|T(qeUGh|Gi)qhsQ+1S5hsYh%Sgj}Whb*-iog8aa^4bQF(5NHl0iIoJaAY!+d15jf!oCp7d2 z`$&{$dFiW2T&jNWC&`@*+z=r>q$QYXzd+Y`FJA}?0(NmE!T&h-l|+&CzF+gq_wf+@ zxiGa9hG{Es%h_gewTy|dL=wUt$42S8PBIXt-enE$PG}0MeBFwq)_h)~P+Ff%OmJB? zwS!sQg)f4Ym1u1meZOn8fXV|xA;Rd5t=RYlR6{srC4133Mu}iZLgLR4=O2)g=n;0O zB|V1i<-=FW_mnM;OkwGwy|Ec%?usGxMUTLVv%G5a8}Q@Le>~7z3O_Jz#iequsmgi= zkUOdbj{GnU=%M9Fgu~iDCvo`HEsd?1_AgyN;R*8av!&6CNS)I|&>)`GcINY13gKc`Dbj=w*AhJZ7sK7pRl;J&vd#jR zBPe{9_>OWfCbPTB+`czs`c7dK>jiBAPH!l>$7*qE|sXOX=LbcIxrKW#B=6z*GvD z1Alj-s>ZpodxA>t)Td#j%nr|QU^4iagsz|{QuZ*fK3dR6a)R>vzISyorI z0(?u0Fx5-wkh*(LzXJ8~tgOsXWiXgglnh2nn_&#|l4nI~dhg=fquRb9 zsQ(RSUHRF8?c#xKq>iu8dt=Oe-?6%3-m{{IWCz(N6BD5Y!)?AAd;OD4=6S1^PBtg&{v{izmA4~LaEiY}25L7hBSjsWl+JEXZBg{*WY0_9-^KvE_>8u_1 zlv;*O=+(4 zLRgWDci7h3M_W(#KQdgJR{d@uBt)Kyu(ABz;Q^(?{RvV+aX8liI=FKb3;(O9!w^wP z1_>}r`LIvGA6cYa^MC@+Pw(4v&{`(AnIaVEmQwAK3=Na`rs(9%SI(kv_i;s8bDV7LY;4MyO5J( zQqaP#6Tk?n%%d-{mZtDcvei~0VN>6t30FigvqO?K8zZNTCMa-a-#4jukpu@nH907W z3p6&ZG05=Fc%Z-{)XA=lB>VF5*?viz$si|O$McBkvDVogc5c+JFMhmbM$UE6R&JwIl?lR-C^X67eqiv_3K@^ z^*MnSapd$X9@{e)$K+H=kOMsCi~EV`Alz8Q_l2YKOzJ7hZA;!i0@>;^*N37-1nw%0 z01>=UBib`J>PBABSz4D(*G3kq_#E&hs9;vkL>C-_hB%Hz0ED zXJrfUyrD3?A~85nqyCes`)5NJckyhVMr=_93VgKeVON;B2ZcxY`Kyv%{Sh}olyO=e zQzj}UNwKlp%abFG$GC|!qBg~JCi_;^c;#NuR(RSh(t2A7>Sj{jspzO*j%fu>Y-Y`Wh=gQhMUokv^D45u$$ z_zu0gp^&fag+eA*I1}a#t$N(IoCenhHpb@SuVU(*3=)pssrj<6YdW7(6CGkEi7?)_ z(0Wc;%K`~JN>>R9?{;S8!oG*Fou8FaA*KHOWV;4Fs{z+&N}lCQ7qgEy>M`e#sW|Kt zgr(2?=MSpijYcRX-4_MxIK7{V;Q4Bo8^T;ZVQTHI(m?@)lJ@(R=I#sx;mSH+<(^kx zFUQoX-UP+ROtn2*QACYa(JdWyNhHX~JMA#W;OH%>lrS~x$qz_B0$&ptHo2WZa20U^ z!u=8Ilg?2;BZ^^83FqmAatG_?Wk1UB(SyOKJ1R9T>h>?3L6|=W^_)V)yHN1(Cz+A> z!6>(O>2+D6bu* zcL0Z*oSWv03x+MqJ|!sK=yA7TTbtzc#tH-_bR}aHsl486BiwE`al^dF!POz!g;JU}6Pc6%KRA`XQpt>)bjLQh+w_8{G|&AOu3fFlxL&*AgO!d^ zE6zo8>#qn7u0YvSKG&}Cw!aKRbi2My`+hT+9~?8_&_))8RbClS`m_|yVGVqHU2Pi5 z-u+^xs2=g}glG4WLY)MJ62&Y=!Y=p3++(rl0XQ#0y#=afp&9qSvyYE)(;QEBsA=La zGlG^8OcV=>F%VoLrRlo2uC%1mUVGSJC8rM9t#|kE*J(3Pf@Mv*r*Dq0;0(d7=t3$P zvByaonH=Eo;oFN?^llQfy0bhCecB+o(Xq?0h?kru4bY7nIAD(zh932HhpD0?^m8{j z3TC0>HdDueIMNmt7?&CCSBrDhrj^!SjAnDuG*w?o!q}yHVMd(fzc^KA3BHkA_CwYoRNg#`1 zwPVd18?02JK_RJ9XD6hMTJkD+&=FS)d7m9tR(29~EozNG9mONBIVjWdLHsI-X}r_f z*(2q9Fp0f`3q0=bB-(qK)?wwqqpDcHAsRJ8T@J z&DkWOg`&qs=T$z1AeL-Cvh@{c0$Y2e zm=S+)TM8&mz809 z3*)qirgX8*ytY85_$KijMAij*sh|zv1sb8nJ9K2_n{thW=Jiwl=X8lZt$0od>l9*m598C?8xtdiwq;= z)KQYOU(s-Xad(paWB_@_Ihb%R4lwn{_C8bvBXQH_8YNZlZ z=Ah9JLacVrs>Zx9|7hM6Nq-|D{KnA9{ew#pw^HILMy;4AWX$!VVsSzgJYAAA<}!P| zM!@YPDxZz}a|n5Xu6EMe$z}6uD2;AvRhzBF5T9K?P*=Y0BYPq zIG2LniJiY?m2Dx*X?d1W(N6Ar<5d_4#4|9MN)R=TTX2g3VQ7I#aMNfo{%9mcg(!zUwxWI=Hu3b! z24|XNk^`*GA{&Ut@i9b;VXTLM(%=G{8U@ zX^l_nJiXeE9XUAZ;IT3x!_d#oB1s8gtBewQ4~4H{*{PPs%VeKDPMhAKJ#Sep!aKIY zw5~NP{*}F_8OK~IQth;igv*Lr9Drvo8G=-{;B; zz*wl&Xp)GQSttyM1hc+a5s*ehf#{KBd##T@Z){sh~F)DL1fr=eOejob*7? zU2Pho(bpmLryV4|R@tcV=W2}Fn%fol!Pd<;RNZl?eWZ`h?A>>liPbL#&{`;)>CaY7 zg_oPErL?_dK}Rwb`+t*BAN!jgd*2vHX`EW21y(i*ch!{LauV^O^vWz~Z+|8$P;Eu>Ffd?OqZBdR3-N?nb%bB|Jq#cC_}b?$*wrZvv!&s;$+1Mzg6sl&ZiKub z;hvF4VxEUSU7;u1wHS0dDC2h4cZlJArh(ItqR&Q_F#=mJGYidfMBBe{HIjA-#v4I+ zw$$2!B%gB-fa~bBJc!$(&dYA|Yqhk=qtbkZTA}jwz^@mNuS9?J6%gH+UPYuQs_oxW3*NqT zYZ=%1q_8zpj#bxL=B`|^&580d_~L`IQsr}FUJj#NS*QdZprK42Omg4)$NQH8@xLEG z>mUjof8Zz?oS$H{Rp8!Al5Lhz2-V!e#eMn~U}Mk>?s#qg6l1=v1E3O=+#I9*l{&JK zLvc-IfzdLBlcg-9*kFiiiG2?QmqJJtxt=kXHO`YNc5oAAUB!5^F*|bUCtzS0i&-kU zw=DMcI|O+DX7v7suAc7RS21h1i78QsmCnnQZZ-#C`&Z(qfWlWW9(=bV)I49`5f*9z zZ9y6-CrhNv-#MY(R>OJ8x@&5krd;INPApDT$kPYDGPk=1@3=BYVDu}nI zM{o|#f`ymgKM?x>4nQCyBO~sz?OqK)Ob&5K zA4a$hcme7R4lEBw0Rj?AC{KvT;8o8Bpn|JkUJE+!0zC8!Wc35O5BR--<4=&c-?#V^ z@tXnx^o0up60l!u%byMpatdMZ&p!a5gG$R=%$^7i5WMjR1XKX=j|%S`GMGQWm7@P{ zH2?tB!v_F?^&sy9fxs;s8)+cSpWkn0{Er&eGAqU4HPT*9jR7X2|7#@=X&4tq@TLp? zFK@^tV4z3e*RKwTfWa$F&uHDN{Q-N3K#sfrQqqrs0(rO3CRU$>UzlH8Sy>nXz!wO> zD=7Qj50&B30r=1Lm#xSY;@&B!3lK-%$RC&hPJbCZ7a!;x5{zJnrytPw@8bOoN=_a@ zA0Chp&{bay0_JXSL&3CuNY85dqf4k8Fcpa66A}o-x99i8IK(iW2M6rsgZ=vr@ky)v z3M1p(@elJuPZkukoxML!O&q76nvMzp0R;sGG&~{#0O&VE3?AZRY0Pi9atLQ1;Gu6* z5R`U}gT8$!I=zZ(@XAO*|#Fpffu z8S3|Vx%~q@R|rASL)g}}AOZndzlKx#t_S)a0YZj)JRHCem!bSZ5Rl)gFeJ=zP?snI z`Q`5$pdyy%xz0%jgzew7^=uaBFlRgetGV% z?jIUA#x*S10qyjFK;pq{-5?kJ)$v8QdtbCcO||P7Nd~-d6u|~B_nXt=;H+wnZ=t@G zo)FKcv|TTeqvdQ~?A5;G_SZw!cKiuU3lK@>V}Iu$qTRv$+Y)=MzNpu~nxo+LrgWFY zA!8pfA}uLtsaNHtjw^@;-GiAly(qg*#4NPKE&w(D3l2slFkB{G-aaq%p9&7-DJsX| zZ$<~8iY}AVu!PQsVc1V4@xc&5Gn^T>JeR59>IAvG-BUO?Ze(7{#p^26nh262!nYOu zNqia1!UFNx6?1|72FmbkYSm(L#--$T(MDRM^~nEV0wvb9I3Cigx#O||Oh*~^d+kOM zARQBV&R&_^Ck+LvucSQX-=N%6yM=6%Q~y zbsxi;pq_Q?5F8A0>o2E-j|FyK4h@BI3LUHu%kxJ_U^!m$g3a=P^MfW8F2CrMWNBG! z!O(H{CF7@m*xgnXpm@s0kMjD4>ebe4!frY7zbQcA5ohs44YgO>KUv49&hZZ~BxE`h z1>>GmaQ8S^Geh4)>39JF-cnt$w#wp=?DuaHr@{yZg#yG0aN-)7W@%h)rwE9TeN00^ zrnDfa(6p$P!n<6luvW=3J}5K@?M&{@qT;?ZaqT`|xxTkAohuTqH)3Bx9IKf?TWInv zCR|t8;~^69C)>rB)W8qt@r3nl$ACe(BLeKk$Nn);xCcI@UYlH2T(tBFc3~}kqeais zpj8{)Zlm)?4C(wiZ=Q~|wUON3i<(tumsk&BQ}#kC#$g;?pc+eA6brR)7I8!)YYvFz zOKz(XhdZCMdGA+Tz4=GM#{<#utQXa!*@CUONpdJj07atL4+w{Y7pxn7HU~cGZ}TJq z(m=a7aY+9pnF2v423{8OFw<_>^6FC}lZ9H+sijr*#87ILwxTEAPdVe8m*;ORq27Gx z=tTJxF+T2>U_Red_9axGD`NP&3O9(=qI{6ZG*$n_#1dV&Y zs*C!lvII)Y(#Mk?R;xr<=_s3!I_RPL>(H$`JF?8RHD(vwxGIN#(0 zNPKPjs!QK#?Ux=7o-E5r{lD6>0K-pjNYmQhL}WbJgXnBnxnG16b2^1IXiAgr9AIZ9 z-5SbLY^_jI0U}r*{(;wmpM%RDFbN0jPayCehj9% zTN>?i|NT>uISe+|-;8k^RiRbc7Hr$5YN{JkRV+8+c?FI}EtC+<9c$zYq>7r7A2Hc% zG==JHtSGbBcGxq@4ZpdelmA3S3e(Zj~!Y!U-D^L~~n7}#`X zAH;yH02wmtm1#f{+YhNcMAa$6cQhCv8$ZPEBemiR15NK(~AH zw8zIGIO?l$zi8-IHDP*kJSomX4Zl;g;E7Vd2J=LpUg6H1Uxm?iE-HWVRUMy315%#g z7jWl2?@!95RrGe|_sty=p$vHh8gz^Yx41d1F!A=WK4I)4TK)u-gwWnjT|}Tx5Sx1* zib5C0&~ksn`Ymo|vYN3aEu{VG6AVy(TD}Yn8V^yKh^G-c6&N3?ezt_MW*Wfcc8^P> zmIty-C?=3wQ%;%VmXT36_!cH{7+w|zPDkQfMYB`%(|cOltJmt21$|Naum#`G%78-r zy9LA6)wIk0;J#9m$X7y)bUh!AeAn&pNrvaQ`l_!~lE&k zSkzBwSYd8I;FzPh4ZIUdU9@ZlX`;qI57uuvPh?9Gg{BQAo{T zUB>^e7>=^2;x&?H@ogUFw~IQllFAykS@W7bs0BFzf6^j-y{l0)5SYc~X2QO4b6{`nc7$CADkf8BcfL03Kz>O&Llqx- zJTN5k+K_uOMM*ctF*r3q{6VMVER{K}ds8_rX+;6TVrtdFO8u-?gL&mZGb67KIn{^& zoj<9wd(|G|nm-(kXf61yHMq}c^FoEUcn>KpWr!m&BV9MmxjZ~$n3yTW9;+)S+`ljPbU%!#|TQGS&ru$*fvM7x$Ke-^Z zlctOBA$~OV*HGXw;HFtMN3`8XD|!KUv@fJvcvl59`DHuP(2N$UR#WyURHd-;Zvoa$ z&Eiz+gne;y*O{7C5y6j-sh-D!fcZbPksV<5m*Kvh|216w zxg^-`RP)e#%8}9LWWa?_2fw;b+Loigvt4EM&HI8=p14}j3mR%aF4BZY597P)u{c7< zYO&iA2zt7MeB4J8O^pEkpT~seaX-y_*^ZGLe_&RkPC4|5ECjl?mSDk@c?H^yYjaxb zbnT^^gHQNnL~6u(pClFmoT>X6n|HY5*a1>A0U`8pYFP9%>g9Iz&?0RJG`G8}tJjGX zFBEVDqFR0IeNY7l|(yktbr5!bhf zj){6(l#;szZHb1`p{&vqs5TByIFBJj`Kb@g_sU`)W5C^Kv?MA4_Z1(4HSZs_YzD&H%XdpOho>yMi->@PVT& z-DKIn*A)xLo8WYd*FyRXmZmQ{_3YjxGxg))5Zr9JZL4(FOKIYZDM`&mEIq>Ibsp}G zR@K1dJ@r&P34-EB-d3m=Cfp=?Y%!gWvwRco{!cnXq~0qA6P&ImVS1ANL(fgUPc}O2 z7q3z^;WfQFcq`}xT3_wabcFf`(iG^pfUN}?-t@BAvzX>Sr<8bzh(r5w_1QL!@8qHA z%~G?c=V1Zwt#7ImXXhcCA|Me=@u2OMS9NDzQ%`z^USwJs_yXN~r~A?Ds;!9tQIu|l z+FD;);^Wm*b1`2|@pfyOJj4HblNOF?;!ni2JIZsICN!|_DpF|jEve_`u54>y6T$-JwJ=cSAr!Z8qSv&~d73Tszd2C-O z=dus@1I{-*&vZV|ZbwW~C-QdPH+@I%tSkQztvGTzR6pjWIPbtvd&ugJVt@4BiLB*psA@HQfsoO zI0FnQGJdc~!bF=zeP&jfuIXEtc;&yZ3$q*|UHf96UCjy-xnPCWk+*47FwTc=JHz59 znRek2-HaM9MCq6Aw7**ibSsmJHr?Aj*_A>OeDb}oTbfjeDX*2+rA3S8)m#NE_|!Ge zm=0yraHIVRckdumayY_Zo(73)Ysz+pjOb2y2h;9b)}@-`{P+J;$6j;;g-PpLTBWKE z^}p#lNg{Q=&U_~+QHV)>1gQaXjI|dtSA}c_~B|o z)LMpdBhyPU9m$Kz_=2Bi^E!6IF4uVFR{klWbEe#M0UL z%uXZ}J@vbcKkpjsqO@6&zt_D)uQTra__6xwV3EUR}~Y}8C@=$O`o9a3st+f z<#1S2DLbgmb@K5j>vA*;%hGaQS}u-V_33vQdXUP(txL8ivIW_j zrTU`9q)2h`$MnXrS#^=+`Ms@fjt0`1LN7qv=XC3QR{`c%4fCG zAi(O5e&+s8q(scQL3T;={E+MLc(SB|ipk@xQu{FVt{B_?AC#R_j3`mtrQ5b`+wRl0 z&C@nc+qP}nwr$(CZBNf6|0Lh!pJe8$kgA)ysqFo{Ydv|Kc$i~aP)qm4 z`9}O!7v_vDStYsJ^&)x*j~LWWWWod1yFZ-?%(P5-caC~wU>FrTQEEDPHFHIu956i5N>v?V+CDs zErT9ha-l}=3hLv#TwW4ZFCG z7mM`F>C+_^Z~J#K#wx)^0pfCBPJTa)#iSZJK|@%O_wbj3k`I}-Z?KzDv4x~N`yobM zS10~4>`JZXm;Bh{^o6GK3$kwS!l&NhwOK60m_ppxx|s;D3#){Ll+)COC0N}mn4z%Q z^F&*?K+Y6B@mJZZD5zaH(MtJswB@k+*C5f2I#NOQP~F=KDcjTFK1i{TV%27qB^X$~OY#&y=TmMQt;7eFE5uPRZk>4=~$#GxUQ~e2I+=UUE20SKf z0d4^UHCS`F2Q^#R=;6M7RQUox!)337$+CcqkCxYz?~I7eiX2ZOk%N*U1hDLyzs);- z*u&Ojl9Sc?oI90oYe1q$tMOfkh-AT9?!KvswL4P+!w9psQ<4ninvT)BCsJ}=-8F|< z?5u|P+*19aLsE6BbZEp`l9Ysk2>N@e2Cc_x1#qz9xHeI^4&_0;8opyPo_XI@Rtf?> zL{M0P(QO0X>>Qq{G~wFR{1xS3Thh>Zj3ZmEZT;l|-W#lX)?i!n(Vf`bCN>Y*^v!E5 zzk-3>g~Y=m4lmLK&%x)37kw-C+L|+(QnG!IPIhWIO{RPPjz)~Q=T%`a=Sh@dxuBKK z=4qp2(p){5(pi4GJIAtws4=iu zNiY_mUMTx!>&Z&a^>V_aBRb@%zootVvO;Ll9x-CcHNCX{VCJLkb?N3O(--AS)ORDY z`nXu_lwIm%1f7yNfuvK<+$Saw)|7#o6A=!#g0Xr?Z4HGwz731fXVJUC@|zV2!%P%a zkA`oHwyNb!8AaCf(dVG9D)S`qfM_hJoGk`qchrNh(?EH^Ha$ zHE@Id=M2+SaxfRTMrxGxgFXiF2zk3qNqH`x5-53TJgr7`G1r(&RJ&^rwml25%~|2A zp1vpP&YkM%ES@FfTE!$16wot{WfFzpLJBfdl7z}6md$F8WaW4NW#*2*Sc_T`x#`bX zZ050U;mn+AzMJH6XTQ33tLa8+rqqanqWy{6;N_TUGfX?UV2)#&@P|aJ)Ui)Pbu+Oc z=rNsOL?Ap#A@N!nQ7%5Aa2GnJYI9x?yT0hP&bT{AoxbTod0!+6;rGTAQ~U$vY=A0H z#&$FKT+m^Us&E6pVCQ-T&6v{8!{&@v@D!crV>}JhuJ^)$aaW=juYvSUIEx{-wDQ*P z{%nlJdSZoEqZ{$hKr8EdGYO$iz|Dp@?ziXaZ|29`J1~c-4pel)m}M4wFgxxW=OjK= z9;dU|Nq4;UQ`-5ThL|M2O)utl-?x4q7c#*XcLbwZ4O@Kb#|lxmd@ApnQ?^r znYN)D69&h&&fE6*G|kd6V0tCYOd|WStseOhiUtn-k|Dwx8|shbH@5{k%^%dOHH9p~ z#KvB?U7T29p^Jzsi2%_~oKCdSak1jF5A~2v1~$(J_b-1%H}DKoTq`0L8KNo~Q1Szk z6WBQb*fdYN#3Hq!w~KEZoc(5{;HDc+(s&g z=8L45hX-V%Fc0qe4ueAT<3)|AlN6g6XV*g!J>qj}s=Bk!w)&MDg9cT|N?(X>QQYb& zHPI_rU&_0}jXDzzbK+t1^_fYnYHEOCixKXbEowQQE50*V;tzI?OxMKZ2m4VE>v4}3 zp1Wbs=(90ZX6v>VbZO(g)aUp<75cM`wfnhk)v=SZm$mlTMcwQ^&(2N*E)n%W2X*DDjvU{rtD< zvi!1tTTXi{?FkK~SLltCpX|6Vj>}BG2z_%deb_%zCY)RZ(CCd={y))A#RMyzv~qKaGTco8^`g$K0t}jzsX<({8Q8t zBM7SUjd7aW&=Xz2bNd^9FV{jUSQr6Y&JpZ?+zH?8FSAwr15T|{)c4;IKc@c=;>YkG z#E*6o%cQ>6esI}TnH zq|&Qv7!*X{aPjeQ@i8c9V3D98<8M&mJYzo#1b7gbQg9%Pf>>J^UnSI|^C;0(Zauq= z&kN*H;3LrR(NS^8Pq^RC>E1aA3J}P}06pAYx>npg1k+qbsBmAyuAex6v2DNzrzBLQ z`^QHR0c{2Xly#jrHGtEwUM_&BT?~$OU^w7!3k+BIeW0HOOo(1c7CU~u@6{*;XLUIU zEVx0;OhkDOq<{!pvHo2%f`1OVb;)${JE+m0z{_922ms%_*trnmhdXEAqCb*>{NHfF z{CO1UY7q0-evRPkguhG8ua0(tN_15K2!VcYK*DJZ6gs?pKmiURD?8tvBq3jyBv?NL z?Es&R0sc7%H5#x$0gfLFg;O=GP09*l{c;L)bPl8lz|V3ySifK)yOC?!N7H&)@kH|Z zmz#b#zreLG3!vj`Dx+}W*0zDB#cx4dvR)snXImP1L3L3@MQtz$7vTP`TsXm=`3%>N zejjcT?=(By*N^smZ9nH$vfXb8ZjNiv+aZ3QU4EK%-0Pc<9^4;oKw)8D1`rhJ#-I)2 zSl=I`k+eN$pXJuQ0z@7DT83y3BK|$ynVz0XyGQ@L*y-&bzn|=y*x>TKuxO^Aoamn` zB?SRDKrhgtKtF#bBqV-BM03d8#6+0cpB!@-g3umgh#zuQX!8)D@y{|e7wMn!)dPA! z*YEHE^w$~f7kQ55em|Zc!nUN^Kn9c-;`<+&ryar{_TZoK`yZk=A9Tl+t+lt7jFXn1 zA6p@9gz0S`K#uwLUjgu}(4LX+r%fs6XN=j?E<<3)gpWs4EkY(jV%#$a6LffJK;Y2N zDq?B}1U<(jfL{>TXWk@!#~FPYCvmJTFbde+#lJogNa&vw_!9b3O=UDvk_(2=o| zoR4ziJc?ENtT0J&AVM>iL2QD^f&_|x+ucMQ0{y!>J!vo~K!ghF+7JjU1$?t`L13R) z)Yovx{zS9#rW}tegS9=KJ-A5u{De4&ggw~r#-Ah2@-8e`+nfpc$xpA>#ORu(heJ>n!JzzABq7Yno&v!odn0^}EQO=2xT#zB;hF z0)xRz2_N*cFqb647Rr<2Sm6OI>~H|1S$K7Zh1MVK#%g(Uz4ov{EQ{Xd&f@7Ut!d#+taEe;Q4Uv- zE!q|n5Tzq?6)j3T{#+|gD)8*fw1)nLnzP@KpX!y!4bR(Y@fg zV9&_vhX7p?jS(7O1aw(i7aYlH*%f@%g+d9nMfA25 z!E|TfeQ>YGQ`=L1mY4_X@eM1zWNq&@TggAj(y)WPl!qXlVwF&#QAVzCe5G^L8&QzcBz%&}&9Qk(1M?T4at0gw5 zZ4|cw5!~7n^X_*B_rdlP255z(ngljGhi+~{agAAcR}WnkJLph-2!FQS3_z0~v4WQb<%)DR{Lg8V|;swB{38N;XUVR0Wm~slF)qIlR ztF51K15N^T%V61F2Cgal109m4b}6~lFhx3yB4=NmJb(~t_&2OXKlYdit~jt4|3MJH zD}y?Qra2HM9-P;J*-!AM)IvY`+xD?}7qY0U4Wt9cCFKUa;jripAHTqI1&qu6qI7Rp z3>6V`XBy_)rouyeTXZ!fUS*9Z8w*#Y0M>0tVz<;eu!pPIVPl4{N;+)d115A5_$|%S zN=uzq=90GNtU`7fV3C3;<$G>A;p?g&3;bSOkPN~)u0eXMVS!H@M<4r1@G6lqA$%OK zi|6H5xTu+fJ-C7mhL2xo$FZ2eA#9vTyrRKqWa5&r4i2U7;-&_PHoV}cdIt5)j& z(8#B_JnB&mVohvZ|AzAf#2BdehRtr{tf6JN!te&t7JNO@E!XrlksZr3UPP7SHm+H} z|8794Tob9@|Jc<5kZpjH{{Xf6XHc0%5|MSG z?3*;IdRorhHIIw)!3v2^vZJ}ui;kirQhjnz`Q7-=TB)bYrfc*xfMaGEibitjY_AU7 zoKm@OntPiq^J{rrbVZQ}#Crs^P+(GPNUj9wuMu{gy>>PSt|SCvSh~oJr=&+i`t9JsPE9?PRgt*kB7wxaAYW8M zPQo$%^Os8Rv0Bu)GkFRU+aM~m$Vf>GWL%-6_U6*W+4g|dIJE-kw5pR)yG1k;?2C2+ zD3R3}-S89}mk$@5_Dfq6LXMdW!l0~t)BUP`DCzdzIOZuPl&H{6{B}ym>{~GpMC}1q zqWNK>Y{U}WLMaKnq+cXtp@0=_W2ThDz4*hF+QE786ouC`a@yY=n3kBp#@?)3!iBaO zTTjx&*e%9jExSUhJH^Q7ce!P-^La`UGT@_nFAY7QEO6x_cT!Iramx0KX*7w zxc|*^^cJlL2Q;(K&CXt4aGIC~_=ZDXbaa=9`i?vJBuOnlR442-hux5%sj*xWne4vt z`hmplv~Xoqa}=gm&3M)#NigZx?(hY2!s2|Nd^kp)=8^!px0b#Kvk4xI!j5ZP$;Go( zoXwk2;K^oR>$nT{A`NAz6KrHPT<&X9`j5Uo?jFm@KD2X61LX(lsgkL(mPw}hp!?%( zbw1+!sHMj1{HC69hOKT6)jlRY4AR?01OvYr63Qr28^UTuGX#veNVNMzgG3qSj-hpm z^jy0ar|UqHyj;XZ>V!uv$YWWFxG;qAx9FTpgZlT2lIeqZwktGOT)g_3It^gwtR|!B zq*=@|p3e_7ywy|6z-@mnVo_&xO`S;Dll4xJ8x&(!go_Elfb_#7fncq^hUBq3>M}au5lJpPwuNA z2BG~qGM6rVsJPkoRLEZ$){U|N_wo-JNcRyhkDh!WQPIT9_MA(?^vM+-)7vY4E!V3E-9l1jE82kFyr|J?_5@lz&Iy1wDxDg0+1wqHN_{EiG?Hx)qEP2?= zfP;~d!QpN5@2kUAZ54q>M7`nr0+Fr4X3tbwaYHB_RR%!6-%oq#c3|Q%{NC2d4Culf zOXTfjBpxi0&jfCJ;k-`mSd!U<2aC-{Y^ABI=uf>%iG!L-h!dRJztr1Eit>^iUy>AW z4(+It4D<}@6_L7tHf(dW4eJ9W6xA9G>f|xp$7aPv0zfnwQ6m@#JHV zu{P$9@CKrU1pke1se%i{?Er~s=*iO5{zBom8b(-JS54@*1}Mzn(!9J-MLzYgcZm_X z>s68NLVL@P5a4gg8Q8!uB%ldt!Rq*87rnT9#;q(y#nAS;HNDi#k@OI#6^ZkZYR(+Qr5PST<}U;1FOlGqmY zF@H`G82JDsk57a9P4mf+Mq$V7RXpE09f_s3K3Sy;7B11_yi9%RVM{7La+YM4kJ!O^ zWKor~2}@pPhrX=S?wr&EapKk84X7SYFE78_Wx~i=Nyu&_4eJU8d zt$v^uPRg#Y>v_45QCIGA0Okl#hBw3|{RN`z9d*TY-wWd%{?3dyn|e3B8M;*Pn+za& zK57vtz3S z!)qa>Q08-N`WqlG_x|DLfZ)-xSKC<53yuR%dzQHIfHayZIMl6;_PodhmgdvjR6MPy zHw!Fc1=b|9u=X7>wC-&5q?Yo^c~!{n%mzDBV3eTi21OZ%55yInk5Y$%>xj>4{Hf-a zpMguc$92H2$=J#>&+qCRcS-~V)nh9Zn##V56&?fN=tp4gYh&St8>)lY+T@%oqa+>iH&4gA43yo z`8GK>d*E;E%Tl*Yj36hn>R^{AJkj3*qYF+iuuH%)&ndl|k>tfH0x}DZmk!W@wmDQH zU>>q{(L=E!17AZ21Pk2X-zvbHZMl*fP??0hvoHYVd~Mo3A-)(G{OcFXO>H|~pLT~v zQ2l1~O(7ll{#dX25Lh_cvE3rfu~36+bD*l8*YfQn9$#RE8PRB3PEhlN!SV9d_NPW{ z1g3T;WVOc;h$rOX07SvvRLXwck&30=A-2Kk&+`%)zQ%rIB_5kIq{>lx(H3U9^z}>+ z46{Fw5fb!t6wscS4+xExu0XXB;rv57_M$#&#Yf!?mr4T7+lEPBUGr{Jmkn}> zDKbJbY5Ma%c}|=D#c?AmszZ-m*LSZ({&>a%J*f_i2*-;wF(W%75h&J18qYFmCL?#% zV4TaarwPqXXgO1_E1F%otb)@qlAiQ|y~btJp{x7EdJ9m1JW~dnpzrK(D7lD~z^P$1 zrV?BB&N!q{U<|hx%6lhzD47t%lltD}pBEKT_wmg=gr)>r^_-$q3xFf(fZnW{p{&EY zWcP?d(ctyvrBpqsFM zgIwr-u~+W1bQ=q+2YTmyXJ^*6;{FsSIi#1qxmD@_mNXP_zRV6k$tQdgwqg63+M_Y9 z+K{c1H(zNm$OT7QAZO|eRx@eV3VVp>ei$;V+m!99yzwNQU?UF}yKb^2Rf)C4n?|1c zeD*9{HWy3$Iv`U*P$JfNR5>PSC2WZ-A*Lq$EA|vOG@NZQ+$m zFWTA2M_M^n@|=2TdAH3SOVS3<%{UxqW0ag!Vl&x8#8#X3x8ZG(_>b$#2w!)2s0B}t z=7V|o+DBJJFKVYDk=09;O92pps`ts6Rl!`h96y5Os4y3UHEsXxJxcW{ut}5qJ%g9R z`j~JN1?6qYRDqzpnuGOq-^_pkODPssUtKy9CJ zrc~SeXAVylCDv(~X?2HM5+8jT@;LA)HRzsJ^2<~ebp?#gwwEq@XkR@_B?bIj!t9fm zjZ89SEN?n_N8nV7O4hFMg5AGn=BBbkN~kN4s4k`jMw9#ePCg?|B_C^$+zrRGHCCaI zwIA3(6W#~h^MPX7ckYmuM=>DsDCBlBK5^2T^-eXfg`SZHAoz=^TH}#ogw733>aKyS zc=SQo5iaDi2sh`Jv*Ho)u%KSY4adyZEo2w+yPkP>D|X5F#LKKxP2DnLap!n9$B?O> zw1hHEDBGfQeo;)K=n)2Y;aEQ0%bO}&i)#7q-D%smtH4+1ZIq)sf{riAXBu9$j1ML0 zupl%traWQ-S)eJy&OSO2);5{Sc6G*&aN|~8eckpi$)@jsQ5NW$SUY(4Fr#RL`7TG_ z4*SB{we)>G2^Uh3sN8_n(`l{at&N5JQZ<_9GfH@Esgsz(RszTx!Yy=ydurFbQK|aP zKSq|zcg5Eo$^TM1hUM0xNo?e^TZ#De?)}?8epTU2*>Yc^4cE#2pc0S4rcCC!eVfPY zDq2U!Tki%2osk!kgfCjF^4+XX^gG$lp_tgI^wKvIshrM}3cl!zFR2TbW7NO|^ER}y zTsebLI`r+7GD52exwJi38+P^$Vl*$${$rdOgER6JLyyV#E*2fw$4`IN7{RvPB!;e< z9TJJ(4yc|x4<{wKgmU#Xs*)5e2UaCHuVZae8Jj(y&m~J}YtZj&vR7tg&-h+n{cNTW zP%Hm=y=@JDZ2LFm)^uSjz>< z84p>*r;bMO_*AWc?UCqsd$~Q2sHddhT1IVki5BNb?=1H`0i4C7sNl}a9!jd0^R$_V z^z;ZV&*L*jz3a#g_f<24cNF{IU}5VjYyEIQNNsVs5TiKQ5szG%K}iM^bc{3PWS8s{ ziIFn~C0gXR`YfJ~U?hc~r)y2xa;*iRwN)t0VsYuVHSP2pqSYg-^WpS{J)0YL*xXma) zoR=iMxtqK|nsV(oyk_B#)id(~yGZP59ib4cV?bz=;C?~_r)-0JRCO79O(CA%>WS^x zge$kO05kmrRq?zrS8Zz*iLJZUHlM82CMettRn0|5?zH38_c>}(>FX54v)4eg#=F-A zEhZICo2TWq67)1*QmM_z0}^hD!m=BL7$9&k-Z2F(f~9vv^3_aqwBJ z{83}c6^>>iRc!K=4b;B7Z z$z%UgMziG)yF{cikGtl*{-LW_s_M#Q zv!mWGa_f#Y*yU{o8z_wGP)T0w1b;PBk$_#{ceogZQjg<2piim=1dR@IzMuWlcgb=8 zIbYj|>0z;}1Z;#*?y!Gd=1vEhnglc&gjyTyBzw)<_6ucgYiJ+Y1)G$r#J)e}Y9&!* zSc^jT>n%V|_mR>z*qv6=hIy$ZVn0)wAMm|QQ;8;>JII#!iaSJLXRTZC8*Z+_GR1$~ zE%N$Y7Y4U|7bS;(n^$3P#V`?Ry6TV+6+kp;7EY0hZ6m+6T2^!1RWLNdQDOe@dqcFw zcFT2CQ)Te&z&B^|a8}^Cjr9THpDblq1q^zrN&XL@Qbv*de}nw~C$IZ|3;A)dviuk1 zV2K9CpV4dP5sdY+n41>PGL_A44cTv%IC!(II)1%I{>J)duX`3ZKwxY z)zEnR9+}+^!za++2V(-KU;;k8?Bh2Dp|bM;mm`lYN}AYU{CR-PWi0@%y}P?(_&$R} zXaeT)o0RSa5J5(#&fVGwGa+jRVD(@|m>qiP5uAn6Y-{DXZK!SS=wQxRY%9$cFGkb7@BiE8c zSV7hTtN{T^Dnd=t;n76`AlJU(2a`2VKihnh{UamLLSFb488Wj2iied0NN+=ZcJP=o zlg9=`FgGxdKF1(wd_z1e8%xx-wRUCUB3$lA-imo65s>GuX>IUoe79?0*LHEQe%RCk z_)XD%Q}k~x`H29-*gJxbOMY74lL@|xnLs)M+Sb+A-`6_;`$hxghNPiR;O{N*;`;hf zt$ic(sO(-`>D>U-x>5t3Le~Rye+k_>v)KUyX=iHtb#MQuezFtR)&Wimn9uP7gup86Fsd-gbJue!E~8BhT)b*M1*RLpRrg-+m=sX=Qwq8@BX- z`##?sd&8t4fvA_$;b-LTBa z*yPOAC%X2r4Jf0)X4NqEj1BLWM&I$4pLG_^1PtdsN%QZ$#mjGj z=9b0Y|8@HNwmHWq^v<-?J9>B4#V5U|W&OnOaR-<}NIt%r0uijV6k5M27lGVLb0%Nqj02}5uGwvJCb`>sR`wqR zz43AI{OY7w|2kMX`H+buevV%u!?wmz#U6moX`K1wkb1geKr80M^eScu#EZ^E)n4-% zb8$Mua`2s^wVO^?3E8X>HaWA!#HDvWVS+CNEz6*`>CwpQn`6OaCKiwt_6Z(1I6C6@ zhdD!-x-3PhK+O7@qm(-Hq-3(Y4^lth{Zr+Wm}Tjq*A|Hg)R-jd>Kb}U;FIL`O6b&+ zlEJ_23dJDRv_Kv9f+2?OxP*r!Zh6jAOtxBCjx|wvJG9ezHM?bl?m`KdcOoo2M>D}* zrh?j*F7HTW?j;a>qpYHCr*}*L=havkw)@)NM zl451^Ym2{7HTx(oSF{9Y@kCmna-yROdl?qi-i614@$*gqnE51QwL%9Pb47XQV1<8F z&_MPIn$rQ}yF7=` z+DiLM#Kxe>J*mwDg>w)t3iBYl`Bxt@egtS_3|(p(Z>f;-47!XFPCf5xnW+;FBC zjj4xj(?kHtO5Xj+telhF;P@R2BsS0|s?!472VaCF%DJ>y`mf`#358Wxgj`K@!ERL)5Pi?V_fogDkHB zL&(p$Ms7@UsRQ&IX$(P&iMLbOPzlYKpZ|VEYl_11FJ~oE&Gxl!Vg~4rY(~VE-T*Dr zD$(Zq7K0V^x`(^F?NyTjwO&zGJ7nXHc*O)*6Zk)aJliYIVR&pYt-P6lmNuiFg-F7@u@X|6WTN`{Qu4QJ89ZF*G zFqvNW0bX$@8wqF<%E1mODWEvG&RgoIdtVRUVYkP>k^#e}9g%c!c}_Bqu-5te0)yK{ ze5A~bz9H+<*zt8PlP7Twx~v^nd7K1!tFGK{b501*40aWhbal&_lG8fUkmSR0#ft zv8E#?-V>~nOE9KTj%89+A9c3(;UUfHH+S|2QFrl;8*&J>@Q7$$3C1b2D`N5MlylxBRgmtlHwn4(nZ zMRB1f`-h@R-h?X6#2~H9FpI$&`@##I!uv*Hcu(wJ=x(~E(INPq5NC{VK;-*cg%xv? zJ%GmhQh!@x3Gur3P`}6X%kE%s=G^bDU~9JfT=71bzXK7jXS+Gk#TqPNk(ABNcaxUJ z6C=5m0aFFPdsgQAU;-$9U-ugn(*gviOHXD#A(-OY4hc)o^1M2&Q3A5Ue&Dk#evgCK zEH#sYRwuz__v30CIlvV*U8M{o4o z!;k}Yr;y}lW#<_wR;g^gkQtrPngj+CV<=W4R^7%(Le(*7{E9;ac$S3O5rx$B7jz|_ zSeZ)SE8&DRvvfk|Fq1?%e)K+jKZ3Pt-=w>D&;%sTmn-Dv8{^BrOx@tiHObHt9?P8| z%6EBV|NfOkiBl51l2gh+*T>%!jhlij)F8uttBikgVQZyOspp z8$F^_b#EU77)0F88T89)Zd;50l|@87$5Snr){bc;j)wDKgy-wT9gpG-D=nXn zRA*P?1SKQ5fDizr<^CfVf=Z7c_W-+5_DQLc397TIjpE}SsGkvgxpy)`~N_VN0H||gu;2x@+70y6|YGDWLX+JZE9~Ryjyc7CU|nBSMBOIS31+P?C4wcICN)^}Sq{hEL}hrn zA3yC{ZophRp`XBvL4w@cZWl`6CAKgMs&M%mwVRNNAzp0CGpw0hnwN>~kL#CUmblG; zbTK>rwMha54UPL=QxU#nbLv73P83cU$JgneM)Ocq8>zAA${E&ma{YaW`b(*L0k4!N zS-t#fI9oVE|NXK{=({xC-=+0#VJd_^Iu9`Yox!%XVlc(m;l+!;DNl;V7y<1{S(U0& zVOZf_eH~*RjbL0x7Hi;soPi80RjoDDeNb)C7!MaKx6(aC*#=`Cy85ruCM-pbVs2(! zKBv~+Z~aGuS{*E$g^voQ{OJLD%_?gwOT}sak0L19NzzPEVgaBt#5j^Y3CAUmzBJ^I zg6BwbCL~-UT6o}FM&=0c+!nhRDVclqCFR!}#Z0bbcDuU>9nTFukNmy)s_}1xZ0D8i z4Hvz*$l?6?8bW1fUTcVg(2|{HWi|`iL`)-qnh#R_{BpIX>mJh)R8xuiF6?9v*GBtX zqa|ImBsfYGd&cbw#|Nvk-kLnbhfTNYX*%x-%GbPc z37jbFTgJQ>^{mcVhSjY1G-2GPG{ezHd?va+bxMM6y}J0_YV@qC8X_pjD} zwJm{2V-}^$Lbo|_uPHp1wbgqCV`} z@Z9%D|CXR8>cvDgLnko@89KS-ur`>N?O4#Lr7q8={uL}7N0(MOKowc)F71Q-HknA1 zJVrswP}*K3W7xL9pTuzKo~tmKOR|=Ed{7d34l!W!7jqvJBW#h(zP!0%J3lmN|A|YE zam+dDmtRR@fQVi^SVPLqSODhNU>tT#ulsI)Aw^7~9wKQSBfj{P*8KztIZi;5W>kDN zwvij6{y4Wk6)yrOA6Ug@&K+&0J3SDe=tskxd_X}T$_P1b~5Uyo=wGN0^kPxmDeD3zX6wbyHJj2jPR!(WdP z3>pq3o!6j-rtiK7P+qSlL%CAYwmq?^fVV1Z7tz1BLkxkj8ZG%oPy8&+&BRvNxis+j z;=PSSUxS)O9RycK?c`b^NK6$$`k@8!>8s_f85BasBbnPZgWp58vW++eEEYiQ71onS zeMM98kc+LJxn-=aQi96_Qz;rSB;4CJ!m@E#B$0x2_5OOCFB7Fmo5AW1=u!iuFJl)d zy|FDdS;aG^cG!*N^2qU}@lWbp2QN+!PPIUEPOsI4%{bl<2k8(SWXdZdUQOntV;rUT z4yF2)NiWQd21c3Y1}BAT=o!I_sWi*m22b=vtAx-+{+M|8ODS z{;=g*Uxo{O-trHqU7y6r?cgcK!EIxXKf;$LTmk%<%u7I4n!QLgckmo( z_}L_%500@^pJi>rQwVKkPEB9;_U=n5Ld4##x*n^kfUo*Ad`TnvAxPGIaTZU>GV)u0 z*8%J1V~tSO!QVy}nF94aiD$#|kTdN%~X5*TP;-TxTzAtvY7^N@yV`Rs7MGshK~E zmnrYL5QK!Y>(#i(>?ummDt$bcJF5FM{wjY}#9FLK?^^mtS&Tni?CIWj&AnJZB9aKG zClzRjss1_XMN&^=O-MO1!9L8^S$Ql!bGomi=-+D|8$P>ftbi@vcj((-^c!t2%xLmQ z)B08le*ZZyNLt&T?oWdVZ2JmZ+FVENtoG~vV{-tVfhQJKb zHwvu-O3AHZU?^!}b@AGfNq@S#3HCHnC+E!1?BwGXy=pRrooipX@ev2~{D4s^4U&p2 zm;g?nwe13j_un1Yrnk1Ln{>vQIYWzQ#YS|LRWEO{p5_S7^5yo<(L-06mZ-?5IE2dw zcVB32+_=908`Z|kDpUCvc1)ZKa~sqp_nE9p2uZhVT%?W2<(L!d5%dZh2lXsdgA~Ms?s<<$ zgu=7a4fqPZSlr1;>H^l8(zTIfq6t5Z&|B|;NlRE4y(CjuNcORZfcjRGOc@OcHEkG@ z0R-=m2m96DY-l`r%^`oCXPw4plb&x=yw_k_T-ZGFRO0ZHl+_}*VvJ*F2CwOx`kaId z-t74`s@0-}CurU%6DCoO#$#zwox((h8CY5ojG6*rQ+DUGZ2C8wqgPX`VlOt`@jJsk z7L?u?4%BgBv?&Hs*ZJfk#4gK?Xx~%P-ej}kul$}AVUj8Wh9WM^OV966YJy7>3;*;$ zSmg80X0NG^e@C*oqG#a}DQh!WiD=OQ;^ax+-Zm+p*$JCf(mV%^Wwedf-6CDe!+3zgIW{dRL7T^+fKh z_Thk34IcA;onXlY1AhE5(=K$PfD$IS9FEcCe1gkFqY%ZjUY>%Gq|qXr5Dj5u={*SZiZZ`^QJjgS=3aB?}xmi>iDJ_ERkke4%v9&a?{>`uc9zlzK zO(c189wk!Wf%){L5GfKG?J&6Uk9JL;ssTfsE9X$=-GM88#50wG6W~=WXRP(i9ZWdf zOlE!lcwIN^+TKB0H-IRWJY8R3>`}ix>N~g;t@0M(4gD6cv_jgRX{lvV&~kiWdfI7a zO(Ly(AkB$nk>cHWyb%v>qU>!$E9AVfcaCxhj>Y=XTJ~F%ZoJ0O>tTJ#Kfa|btO$Em zZqqo*!ZC*UjBp9Hr-79t$oHawy~d?(>Opa4<+ZwogTP<<{av`@ZNzQX7%Uh=j>vc2 z^T6TUxW23k1MVf)=_6&25t_(q2Gf1>hIg#^H(X#EY$kYZ5z~gjiU&??CEmQiDqKGc z!w4*DTGs~D6Ia6S4>MDP|JF5b-*rE`pn2T2s$(}ZV2p&rM^YW(rp3~2kD^>D1*Wnj z%vc?dQ1f$5-&&;BwGe!`fxaaf>V}=%2^kWG`W`)dmAt2F#Zyn>Z+tGXiJZ^WBvO>Q z32^B!!OCdJ97%5eK_FTIp_MBR&D&ezrcEb?!<6UKVDMvezao+c89(GHOz?IZ?5z?W z@-6zj>|<3qn8OIH|F9^BPS6)%xS-aSz_s=G6L{GC@6`ZO>%aQno_HP?IGYld9y^qz z{?M+-g7%vNtDR{#i(UE(9#{IVB=vL{2I()dRNulqYzj?aTi(f@=+zgkK@Ga9cDg6?NCr$_`-p$>m;26<<-u^@6 zI9Io}icS%Xhq%Q5MBJw#@SD08?bHh}t+XQ89IF;A-u zwIJ-CvoRN}ymhce{+O%trDOaK?+tABb0h0!RfL3T=g!O%LQ|$0!{Dy$dB?g%t4BHK z4kmJiVkq+e`Jif>54C?t@Phha3$W+D`$ERHGV%?BeXZk}JU}qNxrlTeI^yb;K~vpB z+UXZtIuW&csppr__RJnn$nEk1Bwqzf9xZ|-Uxpy-&wxwXT9$i3G31P9s~lJ`7DVHl zml`m<^2QhU2QuD_!tXqjXo&bXGTnc3SSw1vx3sDdA+PKz!T-+vEP(N74cPNYrpt87 z)g4iNIz4~zZ(4n?i0sTE_!>nVNSASTIGyB(X@-$LCDVa>ptMu*FRMiQ12t@w(M6!8 z0cmXl;Coy&bAMf7ycG4_NuF7AxY=3M!S1`s#CwqqL4F*#wZ}(#f=3cJ&RDqDu&sw? zWHEZJy8#$w&Lk_BbEg`~(EeY_`7?k{eD9&%T3Hwo_X+3k)Wgb{<{P;KVA{J=;#TaU zXt7^8>YsTg2Y@^QZ6GarC~J+4X09y{i8FfaJRRC%BWNeJOd3PCIS*W-82EMpnwJf6*bN5y5tb zLsdC-iNzNy2;B0J62a_2iY=Tf`DA+TQGBd7Jtdq)`Y>=DkbPoc3 zzGg{G@UH9E22pvdD5eJK35kLjah2m8a)`iQ|JKadwEqw8-XTboXkE80+qP{Rt8Cl0 zZQHhO+qP?!ZQHK9PUpVZFZPQ#?bpoAMt0_ioRR ziE;%I#jSrYsYfd)?hLa$p&*>&?_&SFx%6Mmk@X2 z6aS>^Hn}Bdm12EeVI_1~71!&A^EzLn`1UNvZ&8)6CtIE?>2~jMEv3H)5t#o#EdwY5 ze5@j^yCc*OPK5gU#0j`iR%!c;!6o-b43RNaI)itfDq_WrKuDWOifu9aH~ru-cB{eI zr6#u*n*zr8u!W*}V%!j%g%A2}%{bNvq4%0t73G&ng6a6=@ZJ}8Gq@p)zh}{><7P4Z z_Y-;f!LZ(jq*#P{iN7&U&>|vP7Cm~v;m;LKoG3fDNALXZkcJ6EXQN<-c@`0=`P|ku zNYLTtAU*>V5lk)U*pB2B9$+=pz^c|BIU@qo`mvwd_8)Ege1rvE!dkW?K-~1izD{Wy+4)A&$>vj#!b^P9W?$LdR(wy zdzWRp(+j)O=+Y6gX^DmJ>bamHsK9QzN|BUX&6PROOvl;xp5rCv_xo_;D>N#l1j|9T zM&7hu)&XV47K^BLAv1YAf?t4D-Z-V_V&Oe2V7~XY&PGiTQi1lih+BOsXnuEr>Q40t zCTJ3z0V7EegMyO8$}u6}urKQLv!_z{k;Po|C{3ekA2z~wKC{=ZfN|in;Ypr-IE+I6 zDH))o!c^?pxh=&7j|$|RPzBHX`0d`=1&G_C(ZKe$%}Ei8?Jb@9qz!W=Bx z;EPQ&Wz2oXZ})qw)HQg2#1heN@qDPRW+~u}>wazEw6xkycGhG6#z{s`Np9`0F*}exaNb zsk9z@%OH`=lumhZ1tCnrLAMM$Q96Khf7S3T-oLw#A|cfC zv>SKkVi9;4@f;Z=FX^3Fmcj`cXgot6Dq%z*@GB6T_LJluUZ0nU8#^i@hr|qFE1;U& zgqHiJ$EmN?u=rcdG=c(f6Cd?(cvnpj$Li%`wIxBPziEB#Wx-k)Vz1V)Fm1?lpJX=g z?J(7E#8mj-U|v-NSgxNnP1(PB4OD3nA&p>tOc&!wHr9AS4vwUcJl0bJ`D%V{{vzK+|_%^gv{Xo=#=kk1^fG zO}?Z3bgYL-Q)W1mN1BZ??)Pbu(7-<04KG&5KR|sKpj2I<0JDiE$oPkDHSAT=xLm>> z+o(d!2EY!cXcGE(5K27Xrh>+{zYa0;$qQVNhdlh|Z5f8g-wjxT4PO)}tD(#yb0{P* z&{jBDIY;Hsw~G%NFBnqe-S_C9=VDb|Ma1>%2>>n2T`;&%vmh(mY4 zY#nhCmi`jMHhC^pcManJm@eSg^*5G9247RBh0I-4i9D}>zqB~x{7Jc<{r zE`e7LELZdb^m$z?xQey5w7SL*3=thK3#%_Yoqkf}^~C@64{1&Eahs5?lg%}}AmZ9s zuTYs`j=uiq$xHMWmro47I6|kiA9Bt^dzC95&F!)qtxGi~x{RSv65Vrpn}~3b6avo5$84;NEJ9kyK%B62=H)VWu*?wh0MtILUX* z+pD;MO8r?Bl*l0XueLh zMxvFEzt}mqW0D**S%JlA$z+hlj|q&-C*`<8qIt%e1;S-ZA=q<9M@-SJd4Nxm*;vn- zR;NXTELyCqjN+)0FAi!)d*$4A2z`3eyz&RJ`!Asbz#JP)go2#-`JaZnrU`?*sl9@j z?@v?u^RSCzdE}Z8I0}hE7j8ZDWMwXLv*t3&6Z2?Ga{w7`r&wPp3gSHCdH;)*fMG1; z)e+u*jRXhj;1M0_f#+5`PnYPOM|`m%comeV_39PZvqjdOcvivIiO87`di9dZDw$xo z_V_t`z&Y)RtgSK(IQQ^$Y`m)jmZS{a1{kYz{@9q`>5!Y{^B z010=XfD@zA=1wv#YuvH+9&{IY5$fV>`2rfuZr08May#W~n9H{$M6%H6lwkdD}E!5ZUM?l}mz`3CA)X`u3L&X61T^I{s7>=yL0xK02`vFvoQjdBbqj z@F7p4Rf|kZ)ksf>);iX#>PBo6ynku19In$EZ(|G06_HR(haB+nX<1>kJU~8l+`^<^ z2i3*Fc0-uLs-9aYys8^xVY-`jl+M=TuDHcVB&+>TX{yeKKi)vRyHBgR1(krhBO zDd$y9c@Gyd_JJLYxYSjPRU9AX?-|xn*OTm({_PErj z_bw!fnq6mcnCEYlMbJ;bVHT z&I|*)@_cx|Y$uc*N{!q(_jjh)s)F6*IDuM`oy&Wduf7Z`!9I)Pg_Ux6+=Qcn28Pv- zzBgt{lI&fC%bVzZI4ql=3PkPB{}yRX`PEi3HLQ3J0+j5zOZPe-QUCtx=Tq*Y7Qln5 zA~;qI9r<-eJYTad$z6=2&Nd-qaGG3w?ccun(;#d}Yi|vgyvv}A5^Wj_0A4H>$ zkfx=EL@HhUemCReq9r1&m8!iLIi}2&o_%PTzE+E$%6P`xI0T-KhQ5MzQPW)(S>Hf(Q;!?(I_UUe$6{FBUJ58{jGCwZXW7R%uT`}rckJ5Y zY|eCJi4+qS`sN<6+YYJr zk8(pfmFyCh*+KCA$av9qv#{d}sg^MkDEDmnk_OW!Qka! z!5ayzYmz4f4!&dzuX|E!5=kBxK$XebfN(vVQMbfJTS*9kI}6ODC@3PJAn6Ih z&r6bmyw3WG;}!|J!1Q_{ORP+EAlipag=YG#u!twfqNvfbGNm1zYQkaU*nR{$_WB*! zWjOUj@RrCnilIV%T?&vVdBKo@AO*Sry_Ph&Mt#g-`1uQ=?-pH3?a~0>tNPuMgVk!= z8XYSYI_>$_sbHi`3qU3@SZ>ogOH(5%$`3l>Pezt)wP%FTq5i}J1eX)=1{{odhbU<=;OR`*0EOXTs+OBYcEr`Qz3g9QQq zd*p;!7&9AE9gKe_fJ)3#vQco1UPKaZmRph)1!w;`1Vqst4t?Ijk^9^ot9Xywe$VP0 zi|^2W(DEgfVd)V?{ZUsrbcmoTDt}U>I#;Ca?`khEh8(g%l|{vhHJuYagcmt04{uzL z#P+V!4iL@^TY6h78CyZjbA!0rs+!Jp)=@U=5W0;WzH;r{KNQFl#^?!ts3 zofCg!h7%Zv4XF)F2V-AOC=uA!9}IgY{k3(aCQpqtiqi&Hqt)0;&WhqO(Kp_DHE7*v z9T*0wZknXwtc;^Etnk?+$DFZ-LN^w#3?Z+zZvY|S%a2P4=uD<}yzlR4IHa*^ed0VY zQboO}8ud0(i0Yn40p7&<1pCTQ!eHk8s7||t6vYBBBz^3~IhKBeGhS6K$qx*>arWNx z^L>nl+)2Vq)QMaiO3r{TQkvz$ij~Kwp|0F%($g=qKVr0V1j~Q{g_ykLo!Cz5IqB4M zj@^FPiC4N?9wTQ}`EjuWW8%McPueGJ!P$a&zmo&o_B$PpT-Atgit%B9zp_-SP z3Na&Ku=17xh1-fj`}Y5}>pEH!fp}~+{S6eM$Tl(Sxb!$bEPJ0i3A!2`KNknhS>0gW zG77#;l+F1LK#xolR*Ej?6xu^DGiaFk zfE~rtzzDAF&D~Vy*DJ~Iq9A?fJt>30iYe8xgz1PXEo>cpq9;?@Ft&C%b>-%gwe~Gw zIziP{-wu`;mUxX}k;57Q3-$U_ZYECmu0od`Y->duUV`LsxD0bgA*~?WM9V6?7XLVG z3@&HsqO-^VgA_5MPvkI3$ zUsR%b`o#-Zg( z-q$SF6TbPgrAN#b)&D%ELh~rJ5*$%y3=|6+hLK(HX@fHK!;= zCJfQ@#`?ubkLeyrCS`kSuP|D6StQg^$f%^R6$DHeOf!BhgM`K<@Adb%Ra>brSYc}Q zz93KT+)peO%Mll?XTu4PtDblYv2ttFUxqCoXGKbd>$4%%+jn2!f=Yts&9snDk{gCJ+D3mo^eSalS6 z2`@EW2RZqKge9lLa92ZnhrK@^i=RGz_AQV!tfk=rB}y_qFN=Q!wc4RTg?J1)GTF!2 z0*0tW%UVL*_GB6xpKEf$uK%!M&4#&lel=eYOI2V@;x2#i9q^H;Gh&ZTRuP(=aeS-f z+K*o`yWVukMKmg2ws;lP;b2Vl&WK?u1ycHe2N;<4Y1VG@IYLu)u%pW07wPhP$DK3O z^!`1_d0{JmU@|5MF-OTr0%CM~B`~}Fsa3)ErOR&vW^HifW<{D*U-yD!%gbbHjv(J|Ceh-isQpak@3USRzAZ>k}~_54xSJ6yj9dz z#iyvtd0h9GE?g1tZ1-TJrMYqSM>7^TbaMcYj4dUK#nlot23T?%YTzG1lib5qWNnSn z0`qdRO{JYM;HJxhlp7VdHncG^aFEsu@16QVpbOW@K+dX}>4)d&e9UZfZ{Y0)s7BW- zGYw5?NmX>b&$YnR0Xo7_Mm9wrtp~E z0DCv;Giv(|$(bb=8w}6o#YKgS{0vLJ!!j4CbY+8R5(UEqc74j;T_?{yB_M84c8xJI zhC|$z)W`L#Wy!6Hq$N$Bd(W?|%A9xvl-lFjCslJ@#NPf^v^Vm1C{ZtRn1gBy_#(#S zx=hNbMpv06fx1)S8Ts~zm+|{`cheWJqhyRp5@tLENnLa6iy6jYX_uDDLP~Sh%vv&* zt7+M)yt#i1jUZl2r&c@AZxZb1wC>M3lCBPNMcqhPZyjhdki9>uyefVm9 z^hVMnBEGlc~0A$q@QzWGVxk$7Dfq zxD#KzzH$iCGKl%DEHCQSjBxh8WkOQT9Q*OPl>$rRJ_6$Sh^*Ma#4+E==pQQ9YyBoj zQe2pUB-D&I@JG1}!vwq1S*SMtO24Qc*0jN_6kgm+Z%gSDhx?uNXm!AFrV0W77QU)F z5ATm>BmP0ap(*3)Mk^9QK=Lt-M)i2-GEwlIIa(J4qY9goa1fhHLvm$ zpj&?PO|iPY!iw!ZI?q>`3#t~Wq`UY-W{gLzy|9vF$oG_<$*w9Nd?$=i#N5V6#QpBAk{3Q|Z~oFyyF^K>%g zFe42)_bL;ne3S#P+*PPzQ+luk*RXG?R~_)wG=Q{G83Ec0XD?%)H4GV;%A!&j1m=H8En*QaO3D>B||0dO7#w#a&< zRCWYYV|7|yw{IFmZN>!ZNXkbGKR*VgOKLs95e8}9Yh~E|$H7NigsH(p5P5}$$94W3 zDLPBHCmiE7{Zn*y@sO%?0uHjb;_DR}?j$!Xxos-!gSlk~?v+(5?Zf&j$n`0(pTIq0(nb?g{6y z9X)f+ILKQg(a*H(3W4`~H7_R(mgKfx;yAfqLl&h1il)z4IoljL(`1=w%z3JwqD}h& zrY&!`ji(|Ptif79F5kjS^oN-%S{cNqpfXK!jb+IvoO`v8-PtZX9(*||$PO)RV^D?~ z1X-Od^D@G?1GW*Y_m>i~;BK|zYl$O{5&}58MjE{j!G0TSbSuArgkJYy|0SBn^1qIz zF*E$%{>gv-Ii~;T>Oa~j85tS=GnnT3&p&s)!bZ4FNXY68FBBHOC=O3384k?ckHE~p z%mPu~KM#wPh_tvUP6e^JO&~569!Nlm|M8LQwDb6D<+GdFVw(H8vAXjbYsGKn&IubM zl2Ra!oeqW+fQSknhyvg_I_Y^p002P(2@*ge?%^S`hhet1%Rza|iva~IMuhScR1g^` zz@V0n1@agbE)3ATodbA~2;hJT>YxFN06_oID-amiJ_VdUU z9UK5qY@-0z`mgpO7X8Hf0u5aKa_Uav0=)of{-`}5gZumX{1ec*lrT^r?O%P{ecU2O zWt>@TGk24Jl^ywEswl_n0_bu2mnToBXp@qT03N7yg1!ARM-zhlkjMPm_e9kT0U7^J zzEsNinO)tB0Rj2F2qE0fseg=9?+*e1{jPM7ienjjJ(K?Useam}{pOAPiM;E%{On>{ zv~zR+wnBf2y!l-rptm=@>cP0UnPFV?L7LY0y{N#0g>-d&wLQ~;Iko^zLU^6}wZO-& z_czLS5{`ma##4x0z zPp3MYB3XDnW*99uog7MWm4&}}!Em6fVJe;#!F#80r^d4@#8t$W;;nX{BNWoom7%2YcP)vC1J(5P8B|+weC?Z2Tb2Gy?aUx^?5Ze4g zN8JiOjJ;mvv!xDNRxrVLxN9H^a(VBq>|2GZ5tr9T=As5 zpT0>IqNZ76T7ug>W?5^9Cgh4%0xQ*;dOqWu2VoJGcR)oSE8Q6Qb2Z5?2pXJ47`2PLn0@5#ECU@V z-S*B|JFpskBZag4vX6c0MbMP<%&tKmjBKL1MQc(v3!<8tj+%uf8GCyScP-kWhH?7uoNL5-|Kz zZ0AOxBjDkw;L6St9iS0bkCeiR(T1JH<*jN)nI_EV0p6FqBKuwz`mh0>x40|=;iQ9d z1Wz(*{Psy{V@vw=DA{pdx9v$+Ge1U~^bO=eHCcy>8N!UO<;|N29kR-S(rUM~E@B|% zq*o>3PPyM_$2uKy40wuF_9zW?FAcb3Lv|BMXuuIrf@~+l;x7A~O(Ih>WKt)Yn?nsj zERvj>Wrr?Rhac#A)`Y~k0CD#KoC$^Gg}cxgZdk?Cov8M-I4|{5drqSvxmb5lhv?h+ zwP=@r1mm)JVH~ZP>_vnFtX$u>fR4dV>1>IJcIOUK-oOm&RV0I7f6mE-PpH(nc>xA1 zBoN~gK}2laT^poT>?NUGUtGtIJwb&}`s=uX>E0=N0hH?`eBRT7lTf>kgj>~mL)&dl z*Dc=wUoYW2ZgKWq=HbzR;EHt)n4lOuyW@1TuU8r>Ii$_NOZiS zTXSb@j06l%-MmcrtRRsFEGK$dE-GPe$HB3=HBTUIuEgY1MvGJNF_r3*9TZowtwdBm zBhEI_@(}nQo#7kIob3*36Z8hq=K~a~Ly-}w5ZpTkK*z>XfgH(LJTdVqmykzrvGLpo zgeeQ}5qnY15V0yRTb1H$(=qvz=)T@@$w9Ufyb<~B(_~yFnz)FJ4F53fVx#lxny{AW zCW#B~GLHtb#EV(d2%-CCn@r$1%MLCK_%Lt0HTZy~GiInV{DA;To54s>K4^rY|+xBDL6~wqC zDvl99X6+6TiejRpjGQV&yT`(Q3aeDJn^PTcfDPeOTq#ZlEHsxlt&SCcGBxd%N_nzP z58Ud~SRFre;$nqL;wzi&qBw4^WXtmZETJ^z1&Pblj3=NVE*xcTe)4(h0GCmOwm7*K zgzLA>WDRfZ0p_5zcSpV#Lmu+ZwIxzl2Jf^%aMfd|>OSl{$gpomQL5AXO9Gwz9kM^R}&6njgR>o5=am-Rbq`JeU^{Z!opr_l&brM;xlN1o%6y?zuomtjqprGQhXT1*PF6-qkv zT9m}aW29?vcTQSdAbj2ikrM}-Jfj{A&*l&7Dzu=ygeW>azBuY<8LHc1fr}ki>>oy1 zl~Dt^##{PW|kAKc0C8c}4ol%LS zj7E7ng}7wIXv$h0GW)&|N*IXl>ydO|e+`?*CDVJPbO=we@;SI^IklYbx{cNI=|*+P zIT7)WItRBsOFzCY>;9}kX!04kDi7oP_;;ddqz%GRuN074@GX??GZR1x+VwO_%|TuZ z8;Yg*BMCu1vB$_J2s_&Kg?)*uOWdjVO)ximtl_SDW#>61qS1T%pQEz%s#5v;BsOFMQj?lhV zqI3PdH-Jn7tpEchQ?c8@yjDjJ;Y(^FZ+*kpX9yBmp}hE$N^^pR)pfq|XJ;z#%cw+) z5*az1#InD@9V@)5CvC!c$-Yr6T{n+f>PN zC`a@j-cVEwSfMPb)ph1^BXjIZ&CjV~;yrZslrLC^o6aCaA14J~X|j(fHnF zeb5Zmh{P4S29qBq@}s}bS?xiYY)e}5pYt?`@s8~0$wTHGdm4(}g%zaT2Z=)J7ud!^ z1unM_+{FZ%CYTf%u%50T9-GGoIlDdK7uG5dLq(Xvr}ZOr>rM{CK(UHW?|_o5>U}K+ zGKav%we~F+vZ#_FPCi@!09=**Q)E1_MCItr8cBDl=0BrFYeuU2pG>jxk`*XNUUL}Q4Isr;3sB6$&Pc)a zYW+I9epGvXRijoMADRQ}89Ov*BNLc}Vk;06<$Q0xD8deqHs7s*50xvzp$K@sAM_u> zt2jz`gr2+HJjymQt;-8SHefZkp2Hm%7h+S%V0oxMVP0Y&p!QQT?*`V~YC#pVf^N?9 zauM?+yj7GS8>X7EPU+8vIgneHf*bKEU?Og%5o;+zFanfCZN7KhjBWZ*xI8mD{WA0A zMV5y|zHARn(?yTZ$rBfvYwoxdD}S6D7--HvyplZB2nD;?x+`H<<@tSzKBW=yBU+9z z?UIZns(U5%V|LUm81}QC;L~dSHYpAfeI5HJ6i$X4q{Q%071eqWtFZaqr#{magqIY6 z?6wL*{Ks7iT|uagD&xtk5F4L!^f*yFbDY)r`I@txp~4uhSxvI!N)A{q{2t^0VK=;` z--Ye=Gu|8CbboA<&`!METTCdH(W!N7ny>0>Vr%|Y=wr&g*Z80=?wYU;{{^wF*pY6= zcY?Y(0;!*yEuP{U8CZ{L9a^cS_btPN+@f*AnRmIUy~MiV;-7~-cMMWiPA`vAavTkr-?ou>XLmSlZrO=EO^En z?y2@P^x$SBc9Nd-GF^5U_{lVjKO{eR@hb4VFb(Un&yBcCW_;+DMjj3nEHQVIGQhV& z*ih0kF#F0*Y4t7FWa73(&ZU|%isH?+|1sashJjYbiQR$J>Pv5Vb*l9tDu0cZHX6K} zRVKBXkJk3ES)*Rb#I&(D()m-SE*#0md=Hg;L4ABZNzhb+v=bZ&&nQS$x_D^0qx%$9 zay%Pk2R9biV-B|+f^pIXs!vF14n@_WqhgT>1?vUch_||$1H*4;#Y;=M7{j|2pDd|Y z&9>H8x51Ye)iGL*#>OH)!aspwT-?#E>qMLTatsZ$r3K}hy#xQ}jtQFQ7!GLVr>;fS zh}CwqJ!RQ=)B*w@GtqgViN1v`{|AWrx3&#=E*Ay78j?h~RnjgPi>idm{OqVnh@wTK zXMD1OLzeO{wr_=V@RH9rZ*+i%$k19vPh@XC`8#!x^K+)9P(qb*$kmn=qM<L~Z_G zopaD1)IMMF>=1nTIk|y>tb|1C-lm~ew@}fq>b+j%2Vn?d%y^wPVNO6Z?~+WAP!scP z`t+;s`p0vn<~#ya#N<~pp=T8?5O3Hj6;o0$`%L9#6Jk$*%10xN_0Q$n(^Oa5I$hb` z#n0ok>Eg^DFXBB*Zr@y>knfZ;#avnurG>$Gn^t;=DE1IR-`2N#E=g-fj zoIeU3!%L`)i46j=`5^XFxoL!AfTWMkuz@0}`e9-kg=U0RMlO`I3CiCeS>QQhg+>v2 z)n}!BDQ##@9mB_k9z5Ge|43v#nWXBvIGsP}~ z*%y>HU^HLY?g0FMC6Oci$Y(x|_uL^EzsCs{{Egh={$8FW0;%M*dJy7-5+yqclE0qD z^3`1Ml_gx3t*rW%Dq8tbw`7oe63=0A+6rE>8MCVOqz;PDi!Ik#4UP={W;@mzy4rqC zygm?!oiG#}Qlw=?ksHUyL@ERhI#9ZG7Jt?NeYQ3@irfM~B$Vl~otD||uU9>CL5)JtJB>$JR+rN`!^RFqR{j#+l+M`d zrrDdJ{C=XA+8$0?OATdHcl~@Da`e7&)G^Wyq30CUvfv^mb0p?3J9Y%H!r={_RBOU==aHje}wG1(66{bd~xU&7X&@a{BfDjBCT+$BS!ontj6|3ah&tAzDN*~&I*e# zS%@9`0}eM}8J}%tStXkX#Mf%fCbiIsNf^fd-H~jmY^fMyIn_(^jb`SqDxgL796|jz zsHGwZSCHH-!gcCJmAiu=(lZ+!8;l1&GhDRu5>QtH|3OIS9Hdc+{I0=HR{dkS6A9X; zjo%Rayn5Lq?)4h+=z2ufw)LTPx?)w!Vf)(*y?2yh2jigiypby_>7o1NV`o@sPrjjX z><@Dkj-qiKS2T8akX5fpC!K#;D4d&B;@pM(tCulNH4ekKG&2b+MybG>yB7tuqUu5X z@rSO2=IMC)=p6frnK7W=2u+tOoMMU}NHof-eQAl`orU`DQ$Y&Kp{ohjxfr?Zv10kp zo2bn?J#Geic_=+bW2s|Xa=a`MbK89{Qw`g=3OVyiksWrH$y_E?NJD#Z-Lrs%SA&?0 z@qy_lB6oaMo0>_ws4Za!d;=t(ZZf@n#$iCu#aeEjAQDrT3%_8NUQtIPP-ZR?OE4*@ zJqRBxTHbCPbNFKn)08xp&v>1Y(&N&rH=?+#25;KSpCeFo0hfKr8Y5G_gD@RdHnLru zn$uQujhh0?{1u0X z-~ryUxbSM|I^I@BzoE;MnAPJ1`|?Yc8;nWry|9=QB8#X*lJ)wDs}=isv!K($*g($? zj6_Da+s&@z#%0r30Ag$nHk({N{e>r|$Ezv>tDNy!zmRq@JnFmsbi0uDWg(RokUwh9 zyUzEaCiPBoae#6S@a zr}T%OpDkZ4AL{frjf^3FwBOs%!&yElHV0yYbOe-NtS95OvY{-G@G?aE?;|$0a^(lv zp(RAcX$}K3T|CiRiF-HNLrd$@y4?kdghnsylDK54=?Ey^-5&chpS`lDd98o-iW%W# z&W8+wu17glS`)nrPeugkk;a)Smz;ujs%kzLD-oT*?SMaS4x4&?jnM27&=da1T@~-Z z!CXwa`SQ`hz2H)8#y0sO%pjRnOheD9!_+4?MUCwiSrk)irk=R>EciUHaJy37?toSr z0l`ZOll1M(#8sTQB4Heush6|m(s?srckQN=Y(kHnA{Su9FVt2Fo%t6t33753@LBac zGHSTO{lK{!R{Yry&DVTfox8@OMFrJI|o0%5CSb1P|_X2(&|{Q*a7rnUAZ`m@d58)-3;3dO zc(V1~mZx!3esck#9UTQE2s4QxU~i1p{^6%MkEN}S9(_Q`*Jn{mb)_s8H(kSDxf z@0hQu0wjG<+Bg$KwUp=!wiC+-2&<62=v`VS4i)s~>{){9gj2SsC_?RbFDn`wYm&z` zJ)uA{6pplJ2NHVeau&+x^!XLd2DOF1 z*_iHM_|4b_8$qeal#fk6tM)Log0uwe=(D6zjjVjLNqg;yS#Uo}LqP2HhI+iEL^nlE zX8ec~vH{-}=_vRitLf}xVp`&}C@&DyBNg^?;6SEe>E$X^viyJ7iB_s83rIuW5RLMzBG88r`D&k zgMM}x`~?^p$Oru|V?);e-Pn+To#j8}bT%pjwg(I--KW&uVUpJYhTJ3pP=xD{2^`j1 zsLdPHB&FDpNGO5L-=EKKr0v#T05N%EbIdRLH@@AqVwKjerYL-lFQ&6ICA8BN-yOW5 zZf+V%+OefKxiOS7oYT2^YgU^mR(~5lol|NqD!uBDRCI9YN-))TlbSv1Eae9Hu6&Jr z<021l>h_AZOSqg>cn=8dDsS{9c4wKjjN3#`(7u_jJ1j5A$?!N&TkET*nmGqYIWb-p z*W3}>>Mk0upX+g#Km{8csXBd+dZUVX;c0aJvbmkKmp{qU=hok5WOKV-hAg}xM(J%V zHY$D1UfUM0mOdQ0aAAxY@MzGgS-dtEKKyp@V6pu!?x;Qi?&9mg zauIZa#F~M@V|m_f*knHT4gpAq{rgoh*gBT|ph zu?n-j3(Te?;9(_1UG9(%%+8YHW!=&ulVr2YkoedoKeo!}PeoU*#6hJ{O9!i{}Y$1 zR;p#oy7+Wk67fP3oR8Pbd*c7Z#hSd*`o$FKA1;<3CtIkdi9cH{1KT<)D5pskVS1y< zr8s7vV1Zhxm&)@1$_IQ7Y_s>ys04-=nmp{=-Nu#vl;1)Yn4XT?s!spz<(Gg`x2 z1jJWYNA5E@?v700fuY&)e+*=^Jr_WxO0oI0Bz@2OaD>1QJatgZs4d@K{fK7dtmkgN znCA!H>$ez|aIS@}eAaDG)StV7)g`yZtuUOA1}dY({)apCbOEY8? zn-7C}Qas5=UDBzpmJ?*eMMm~@-4y18LDL+X(~!mmkS-VahXu3VMc-j@6ykE&ORqh5 zTwSdQmZ2;@lOQ`p!;eK}3 z51s$v=98yn>y#3ePp;z0L(*)O+o{j{RcdTA*zM`#0`;`E3BJ(FdRr)rytZ=Ynb$T8 zt!itG_vmmJOmxoU4*veJ{J*s&v;Qw);bi!K!y;q1$%W8;sa78ba6Z7wPohW%16*tj z%=rW?b9Yx8JA4EgZX(nQ|Na`+WR+!;AQZi9-@(m|6Y7@d%Qx%Y=NHTTC=#i6-_9?m zlg+x@8g_6~yX|(WgCnf3pTIH#{hLHkj_t;<>sNx7mNyqZtPKK75UncZ>`YN-DP z5Cxzz$QqJA=pKN3AK)G!$W9?(ey*iQ6nH&YJz+oK{1QM)izWuZQa{rkfIKfzsirZ; z5`*?okXVJHc0pS*C0!~i6V>c|xdCCYN;l{*O=#27rubcbzCJaWCBXa;9Y8xP0J!XI z!=JN*`m=deDO%(`K=(Y-r}Qmm_)OA#fqIR<{+dR{C9t7n@SJOU+wp($*bzQumn;)RSg4lG6uty+Gh%njXN9ol&ZHaYi*P9PNp> zAl*l6YR>OEGhFh%`f>ZwIQI>yj+(I!bha~$I%BY7drH42nDiBJyYeaDw=OSz?CZb1 z(?72&FMVz6zdI(spIo2(<5$1=jlV&&Kk#Iq{K@OTgvP&NWS@PE9;v0>GvaO8wyZxL zhm@T?*xMeNi%EjFs68m|#G#Bkxy?gA{yI-$cKgrwBdc*T_x!c$7kK~W%g6Em2bf`F80b0cYXw#+9{uVd%Y#I8vVUEgsyn^dJw-jeLzQjagvm zjeO%zFHRo5+q*~U|DVRL1FDH_U4uXnq$)*FB!KjmO0OclDJ^tJAOWN#gwPRC5v3}< z69kkl2vQUR0s@K%0;1Fdf*@6jv`BmLo_pRp@A&Roch=0BJ+uE^)}Hl$-}ld&JzK!_ znQe?(Yw>N5-Sr6vjvo&IEss@5{DmL3t1>Mc*XK5C)@GmcIk`Mb#S5qByf44+a`HJl zpkI^AI71AxW>Gc*MYBu^yj)kTyX-Xj zPzr{m1X|WJj18y)0F~CM}f^LUfLWu zb1C<7rH4ks48@J%9ZD3Bu1>NU2Kd2$KJidmT`Bvt9MU@#F|mX?UB$Vqr+u4lBRP9E z9i@iX_SN5yMyZhjQjh0J4>fBqGwKR0m7*sEMfyj0U3!@-uI4+l2;({UW3>6YBr*Y; zxBUv5(^u)lDmi;fi{L$@6uhURi*GmsW=UsIsi4mK~;*1!{n-aR-@M+MBQSVaAfb#<=oGx zjL|gdBJ*06?jPUiGuy1MXdl}OR@``==z^=8{QRhu^ik9J-OabJ(+?Urs>Rz1!#R;9W=Zpk2*iMrm4KjI;tJ9 zeb#Z>A?#?BpZ<2z7Cp}h9X?#;R}t24O3-4Isc0Kp4NDkSE_1sit{=J)oi}< zLdaVN3UoFUa520sf4M4~b&^gW$TQkz@0H}&wK?~!?H52RwB6XSulcyoaLa^y zKl%3iI28NXlb=}W$9xl0RK|R%rkmH59IyKEG+T>^;g-%=TSPOOBd*{ z9J-=nZpCB}SZBDni@8w$0J#@!2dG?;gd4S$q0DiQ={dSilUyMHU>so<{dxr;G??)Ff zNp})T2E$r-67varp9gxf_;&|Nt1z z$VDY>Z%u0te9d=N062&Mj3 zvbH3xcr&EUsC%pES#d$cEMf9{5BBk)M3i3ih{E#&zVTn8HVq#xyjRP2Z>ggs4A6X~ z96!2S@{aRpuNlTltKQ0Q>bnPOU5)E1%snxXe^9cNrw}f_p#DUFB2nvGPV}UwLR9!! zurxr>Y))p~A5Zq)%cTD>4Q(M)m=IOAXbY(hSARW+9PaUewsxZgUf0@JetsvI z*m?3@yj@^G7BMYDjhYKyc63q)hmDomE zM@?wx+p*Q1Zv2>AE4HH1$NtQq0R+6~IZCz6LW8$&(A3k=dl-z8##Q*7Gc<2YM8rCW zm)%canY~Cm(D7LzxZN$@Hn~ZER^&xJi>`DqtJ2quY(KL~WIVP0yP^W?o`9|DvBa0P zNM!}Xg(U{4sp+xwL4ocV7(m~p(#(<>9MX|}aHTDFzgUyXuKP{(MeKQed3MH`L(UOP z02HCZuNAD6s4W|t@#2~bfTt@nPQDIG8L`VD^gd}W{I%u|*RM;-Hc>Y&cTkfh z7hXN^m4PW0Sx7f9KhMn zx5qOst0NMz)J37xXhRr5=q_!s-hP3T>DB z$dWj}jKe4ot3+7uW)@@a-MN>y2=@+dfQUcU4_4`X3#=)h=LV(KcOYyeTl#Ik7=L11 zueI7&^1O?!wF)BEcLdr<9xgxOyo@fkuRRuX4bRiQTq-5_s8{eTfh)4UVHl^SPhicPjW-W?83LV`SN z`>z#)fM6(vqGfeFya;GK50nS1hI}Myo75#hzMe z4s#&TY9j{u<{$b?ib)SSkx+-ON<8@^e!D!&$JCU6+^^&^e@N4rtit&7rt!BUA8h)u8nDK(!!BAP0|w3q)lNSW>~ z`tr3FQ>(F3@?GVRp27LU5FG-#SQ6JCK?*ur;=ujXeH7C@28TK0ITOOq8c!y6d+7|X zlN8kzBL3eoz6?;v-zrHKJWdE@N_PY zNq``D0oZt#T|dvZ?s)=LndwTR%uDZA2cMkas8XH6?utZ-TDHiw4{?##TMVj9Xy|wM z5<4-4Zv@Xfiz=^)IZ3`13{_q&zy5N_X*?Ml^OP^x+E7|iZOmrYK$Qi{9VK(lVT6~X z*tt71(PPQ>Ve4}-+6Alnc=of8$1omw(u7n!?H&$UQ@tX2(T6)y^kqhQ%hng;lRHFs zQnex@WDOU$M*kST*gQ3i_ij`?hs>WpXTk_8)ixJTy)0zin1C{|CAgxQIq#pz90Uq- z(vLzm&N(}AND7@J=n8Y;fXW@=Bq!H;;<`Z{r+Ps(McCWqdE%}{;ImW zo7oC=`n;@wY?7Ghcu8{pJTMP3HCEFNo-1sRF`pup=^{P{F|$-F^euMmLDj#ihIdP< zeTARgar(jD_Qgf*OJG>9TUh@(dHV1RclVM-vAc3|2j8Rx0*cuZXlzy{$7*@?C^Zzh zBT||ehr{LG9MKo$WT)8yNc@P#opFaPcgLyt=&4Okugws-YV5%9TC;DqoCzlU`cQjo zqk?7Qd_@j9fGjgTSDmy6K8fr*I25X__E}$Es^oRrl66KYyvT|dz+xrC8649qjk@7M z!CaC}!5FKk`<|70&kPI?YM&=@a<}O-%BXvrxzDT>GwBY0F;>l;R5kh*rp9Lpo7sDO zPnMXs=sqNz7ub-OZ*N$A^WerpqgF~Dl-BKp$n?x+{e$j>UHa8hwFan#r~lN;5Uk+` zRrPl4$j+tdZ}g=rLdPuiH#Zb#31(Jp9tT76_-T$~x&ZRtQNWYhHafmt8V=YRYMDW1+T%|xRQ0%G_1O$PC0gs)Lecfk)zSXjcktAiez1zozy5I}n8YMnu+TXw3i85Cr-kNa#;8x`xDi z`vFx{C}V>4^7r=f1HylG1MTYJLZA!~_?HF-$|8VvKrj$WIUInp<{lwvN?KO`D$q_v zP97tNRz$*~7&KS`i9w=3C^Q5F1%Z%AFa+fSS5r|?#JGUaAbFUAtDL-|JQ#{`LAfH~ zU^o4kZ{R;RMn3`)PYA-JF^mwf N9HX$Xwy6%|{{U$S7{&kq diff --git a/src/genbench/tasks/nl_codesearch_clf/__init__.py b/src/genbench/tasks/nl_codesearch_clf/__init__.py deleted file mode 100644 index b8d3157..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from genbench import TaskDict - - -class NlCodesearchClf(TaskDict): - pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet deleted file mode 100644 index 0adb0c1..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet +++ /dev/null @@ -1,56 +0,0 @@ -{ - name: 'Natural Language Codesearch Classification (codesearchnet_adv)', - - description: 'Natural Language Codesearch Classification (codesearchnet_adv) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', - - keywords: [ - 'codesearch', - 'natural language query', - 'binary classification', - 'python', - 'robustness', - 'covariate shift', - ], - - authors: [ - 'Andor Diera', - 'Abdelhalim Dahou', - 'Florian Sihler', - - ], - - data_source: { - type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', - }, - - has_validation_set: false, - has_train_set: true, - - task_type: 'multiple_choice', - - evaluation_metrics: [ - { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, - ], - - preparation_strategies: { - finetuning: { - objective: 'maximum_likelihood', - }, - - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Given a code comment and a Python programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', - output_prefix: '', - choices_prefix: '', - append_choices_to_input: false, - } - }, - }, -} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/doc.md deleted file mode 100644 index 8193db4..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/doc.md +++ /dev/null @@ -1,19 +0,0 @@ -# Natural Language Codesearch Classification (codesearchnet_adv) - -## Abstract -*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_adv).* - -## Examples -*Give some examples of the Natural Language Codesearch Classification (codesearchnet_adv).* - -## Usage -*Describe how to load your task and what is required for evaluation, if anything.* - -## Data Source -*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_adv).* - -## Limitations and Bias -*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_adv) has, with links and references if possible.* - -## GenBench Eval card -*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/task.py deleted file mode 100644 index d9d9062..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/task.py +++ /dev/null @@ -1,5 +0,0 @@ -from genbench import Task - - -class NlCodesearchClfCodesearchnetAdv(Task): - pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet deleted file mode 100644 index 4abe068..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet +++ /dev/null @@ -1,54 +0,0 @@ -{ - name: 'Natural Language Codesearch Classification (codesearchnet_go)', - - description: 'Natural Language Codesearch Classification (codesearchnet_go) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - - keywords: [ - 'codesearch', - 'natural language query', - 'binary classification', - 'go', - 'cross-lingual' - ], - - authors: [ - 'Andor Diera', - 'Abdelhalim Dahou', - 'Florian Sihler', - - ], - - data_source: { - type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_go/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', - }, - - has_validation_set: false, - has_train_set: true, - - task_type: 'multiple_choice', - - evaluation_metrics: [ - { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, - ], - - preparation_strategies: { - finetuning: { - objective: 'maximum_likelihood', - }, - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Given a code comment and a Go programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', - output_prefix: '', - choices_prefix: '', - append_choices_to_input: false, - } - }, - }, -} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/doc.md deleted file mode 100644 index aa3720e..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/doc.md +++ /dev/null @@ -1,19 +0,0 @@ -# Natural Language Codesearch Classification (codesearchnet_go) - -## Abstract -*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_go).* - -## Examples -*Give some examples of the Natural Language Codesearch Classification (codesearchnet_go).* - -## Usage -*Describe how to load your task and what is required for evaluation, if anything.* - -## Data Source -*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_go).* - -## Limitations and Bias -*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_go) has, with links and references if possible.* - -## GenBench Eval card -*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/task.py deleted file mode 100644 index 12e66f7..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/task.py +++ /dev/null @@ -1,5 +0,0 @@ -from genbench import Task - - -class NlCodesearchClfCodesearchnetGo(Task): - pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet deleted file mode 100644 index 33a70fa..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet +++ /dev/null @@ -1,54 +0,0 @@ -{ - name: 'Natural Language Codesearch Classification (codesearchnet_java)', - - description: 'Natural Language Codesearch Classification (codesearchnet_java) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - - keywords: [ - 'codesearch', - 'natural language query', - 'binary classification', - 'java', - 'cross-lingual' - ], - - authors: [ - 'Andor Diera', - 'Abdelhalim Dahou', - 'Florian Sihler', - - ], - - data_source: { - type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_java/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', - }, - - has_validation_set: false, - has_train_set: true, - - task_type: 'multiple_choice', - - evaluation_metrics: [ - { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, - ], - - preparation_strategies: { - finetuning: { - objective: 'maximum_likelihood', - }, - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Given a code comment and a Java programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', - output_prefix: '', - choices_prefix: '', - append_choices_to_input: false, - } - }, - }, -} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/doc.md deleted file mode 100644 index 16abaa2..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/doc.md +++ /dev/null @@ -1,19 +0,0 @@ -# Natural Language Codesearch Classification (codesearchnet_java) - -## Abstract -*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_java).* - -## Examples -*Give some examples of the Natural Language Codesearch Classification (codesearchnet_java).* - -## Usage -*Describe how to load your task and what is required for evaluation, if anything.* - -## Data Source -*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_java).* - -## Limitations and Bias -*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_java) has, with links and references if possible.* - -## GenBench Eval card -*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/task.py deleted file mode 100644 index 6855c0e..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/task.py +++ /dev/null @@ -1,5 +0,0 @@ -from genbench import Task - - -class NlCodesearchClfCodesearchnetJava(Task): - pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet deleted file mode 100644 index d1e4f6a..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet +++ /dev/null @@ -1,54 +0,0 @@ -{ - name: 'Natural Language Codesearch Classification (codesearchnet_javascript)', - - description: 'Natural Language Codesearch Classification (codesearchnet_javascript) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - - keywords: [ - 'codesearch', - 'natural language query', - 'binary classification', - 'javascript', - 'cross-lingual' - ], - - authors: [ - 'Andor Diera', - 'Abdelhalim Dahou', - 'Florian Sihler', - - ], - - data_source: { - type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_javascript/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', - }, - - has_validation_set: false, - has_train_set: true, - - task_type: 'multiple_choice', - - evaluation_metrics: [ - { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, - ], - - preparation_strategies: { - finetuning: { - objective: 'maximum_likelihood', - }, - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Given a code comment and an Javascript programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', - output_prefix: '', - choices_prefix: '', - append_choices_to_input: false, - } - }, - }, -} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/doc.md deleted file mode 100644 index 86806bc..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/doc.md +++ /dev/null @@ -1,19 +0,0 @@ -# Natural Language Codesearch Classification (codesearchnet_javascript) - -## Abstract -*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_javascript).* - -## Examples -*Give some examples of the Natural Language Codesearch Classification (codesearchnet_javascript).* - -## Usage -*Describe how to load your task and what is required for evaluation, if anything.* - -## Data Source -*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_javascript).* - -## Limitations and Bias -*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_javascript) has, with links and references if possible.* - -## GenBench Eval card -*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/task.py deleted file mode 100644 index 86cbe4d..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/task.py +++ /dev/null @@ -1,5 +0,0 @@ -from genbench import Task - - -class NlCodesearchClfCodesearchnetJavascript(Task): - pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet deleted file mode 100644 index 26f9ad4..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet +++ /dev/null @@ -1,54 +0,0 @@ -{ - name: 'Natural Language Codesearch Classification (codesearchnet_php)', - - description: 'Natural Language Codesearch Classification (codesearchnet_php) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - - keywords: [ - 'codesearch', - 'natural language query', - 'binary classification', - 'php', - 'cross-lingual' - ], - - authors: [ - 'Andor Diera', - 'Abdelhalim Dahou', - 'Florian Sihler', - - ], - - data_source: { - type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_php/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', - }, - - has_validation_set: false, - has_train_set: true, - - task_type: 'multiple_choice', - - evaluation_metrics: [ - { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, - ], - - preparation_strategies: { - finetuning: { - objective: 'maximum_likelihood', - }, - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Given a code comment and a PHP programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', - output_prefix: '', - choices_prefix: '', - append_choices_to_input: false, - } - }, - }, -} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/doc.md deleted file mode 100644 index 024058f..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/doc.md +++ /dev/null @@ -1,19 +0,0 @@ -# Natural Language Codesearch Classification (codesearchnet_php) - -## Abstract -*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_php).* - -## Examples -*Give some examples of the Natural Language Codesearch Classification (codesearchnet_php).* - -## Usage -*Describe how to load your task and what is required for evaluation, if anything.* - -## Data Source -*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_php).* - -## Limitations and Bias -*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_php) has, with links and references if possible.* - -## GenBench Eval card -*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/task.py deleted file mode 100644 index 53da09e..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/task.py +++ /dev/null @@ -1,5 +0,0 @@ -from genbench import Task - - -class NlCodesearchClfCodesearchnetPhp(Task): - pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet deleted file mode 100644 index 69eb6e5..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet +++ /dev/null @@ -1,54 +0,0 @@ -{ - name: 'Natural Language Codesearch Classification (codesearchnet_ruby)', - - description: 'Natural Language Codesearch Classification (codesearchnet_ruby) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', - - keywords: [ - 'codesearch', - 'natural language query', - 'binary classification', - 'ruby', - 'cross-lingual' - ], - - authors: [ - 'Andor Diera', - 'Abdelhalim Dahou', - 'Florian Sihler', - - ], - - data_source: { - type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_ruby/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', - }, - - has_validation_set: false, - has_train_set: true, - - task_type: 'multiple_choice', - - evaluation_metrics: [ - { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, - ], - - preparation_strategies: { - finetuning: { - objective: 'maximum_likelihood', - }, - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Given a code comment and a Ruby programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', - output_prefix: '', - choices_prefix: '', - append_choices_to_input: false, - } - }, - }, -} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/doc.md deleted file mode 100644 index 012e885..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/doc.md +++ /dev/null @@ -1,19 +0,0 @@ -# Natural Language Codesearch Classification (codesearchnet_ruby) - -## Abstract -*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_ruby).* - -## Examples -*Give some examples of the Natural Language Codesearch Classification (codesearchnet_ruby).* - -## Usage -*Describe how to load your task and what is required for evaluation, if anything.* - -## Data Source -*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_ruby).* - -## Limitations and Bias -*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_ruby) has, with links and references if possible.* - -## GenBench Eval card -*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/task.py deleted file mode 100644 index a53da4a..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/task.py +++ /dev/null @@ -1,5 +0,0 @@ -from genbench import Task - - -class NlCodesearchClfCodesearchnetRuby(Task): - pass diff --git a/src/genbench/tasks/nl_codesearch_clf/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/config.jsonnet deleted file mode 100644 index 63dd636..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/config.jsonnet +++ /dev/null @@ -1,32 +0,0 @@ -{ - name: 'Natural Language Codesearch Classification', - - // @TODO: Add a description of the task - description: 'Natural Language Codesearch Classification aims to measure the generalization capabilites of language models in code understanding using binary classification as an evaluation task. It includes multiple subtasks to measure three different types of generalizations', - - // @TODO: Add a list of keywords that describe the task - keywords: [ - 'codesearch', - 'natural language query', - 'binary classification', - ], - - authors: [ - 'Andor Diera', - 'Abdelhalim Dahou', - 'Florian Sihler', - - ], - - subtasks_order: [ - 'codesearchnet_adv', - 'webquery', - 'codesearchnet_ruby', - 'codesearchnet_go', - 'codesearchnet_java', - 'codesearchnet_javascript', - 'codesearchnet_php', - 'statcodesearch', - - ], -} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/doc.md b/src/genbench/tasks/nl_codesearch_clf/doc.md deleted file mode 100644 index f5fc40b..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/doc.md +++ /dev/null @@ -1,39 +0,0 @@ -## Motivation -Language models can serve as a valuable tool for software developers to increase productivity. Large generative models can be used for code generation and code completion, while smaller encoder-only models are capable of performing code search tasks using natural language queries. These capabilities are heavily influenced by the quality and diversity of the available training data. Source code datasets used for training usually focus on the most popular languages and testing is mostly conducted on the same distributions, often overlooking low resource programming languages. Motivated by the NLP generalisation taxonomy proposed by Hupkes et. al., we propose a new benchmark dataset called [placeholder] which builds upon existing natural language code search datasets to systemically study the code understanding generalization capabilities of language models. For evaluation and comparison, we collect several baseline results using fine-tuned BERT-style models and GPT-style large language models in a zero-shot setting. - -## Examples -Given a natural language query, determine if a given code snippet is relevant or not - -**match**: {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} \ -**no_match**: {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def _resolve_entity ( mo ) : ent = mo . group ( \"entity\" ) s = mo . group ( ) if s . startswith ( '&#' ) : if s [ 2 ] in 'xX' : radix = 16 else : radix = 10 try : num = int ( ent , radix ) except ( ValueError , OverflowError ) : return u'' else : num = name2codepoint . get ( ent ) if num is None or num < 0 : # unknown entity -> ignore return u'' try : return unichr ( num ) except ValueError : return u''", "target": 0, "target_options": ["no_match", "match"]} - -## Data Source -**CodeSearchNet** : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub \ -**CodeSearchNet Adv** : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE \ -**WebQuery** : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE \ -**StatCodeSearch** : R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors - -For each comment in each subset we sampled randomly another code snippet from given subset, to create a fully balanced binary classification dataset. - -**Dataset Size**:\ -*Finetuning set:* \ - -CodeSearchNet Adv train set 251k \ -*Test sets:* \ - -CodeSearchNet Adv test set 38k \ - -WebQuery test set 2k \ - -CodeSearchNet Ruby test set 4k \ - -CodeSearchNet Go test set 28k \ - -CodeSearchNet Java test set 52k \ - -CodeSearchNet Javascript test set 12k \ - -CodeSearchNet PHP test set 56k \ - -StatCodeSearch test set TBD -## Limitations and Bias -TBD - -## Citation -TBD - -## Further References -Husain, H., Wu, H. H., Gazit, T., Allamanis, M., & Brockschmidt, M. (2019). Codesearchnet challenge: Evaluating the state of semantic code search. arXiv preprint arXiv:1909.09436. - -Lu, S., Guo, D., Ren, S., Huang, J., Svyatkovskiy, A., Blanco, A., Shujie, L. I. U. (2021, June). CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1). \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/__init__.py b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet deleted file mode 100644 index 742d5f2..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet +++ /dev/null @@ -1,55 +0,0 @@ -{ - name: 'Natural Language Codesearch Classification (statcodesearch)', - - description: 'Natural Language Codesearch Classification (statcodesearch) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual and domain generalization', - - keywords: [ - 'codesearch', - 'natural language query', - 'binary classification', - 'r', - 'cross-lingual', - 'domain-shift' - ], - - authors: [ - 'Andor Diera', - 'Abdelhalim Dahou', - 'Florian Sihler', - - ], - - data_source: { - type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/statcodesearch/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', - }, - - has_validation_set: false, - has_train_set: true, - - task_type: 'multiple_choice', - - evaluation_metrics: [ - { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, - ], - - preparation_strategies: { - finetuning: { - objective: 'maximum_likelihood', - }, - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Given a code comment and a R programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', - output_prefix: '', - choices_prefix: '', - append_choices_to_input: false, - } - }, - }, -} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/doc.md b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/doc.md deleted file mode 100644 index 0826a5c..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/doc.md +++ /dev/null @@ -1,19 +0,0 @@ -# Natural Language Codesearch Classification (statcodesearch) - -## Abstract -*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (statcodesearch).* - -## Examples -*Give some examples of the Natural Language Codesearch Classification (statcodesearch).* - -## Usage -*Describe how to load your task and what is required for evaluation, if anything.* - -## Data Source -*Describe the data source for this Natural Language Codesearch Classification (statcodesearch).* - -## Limitations and Bias -*Note any known limitations or biases that the Natural Language Codesearch Classification (statcodesearch) has, with links and references if possible.* - -## GenBench Eval card -*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/task.py b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/task.py deleted file mode 100644 index f7089b5..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/task.py +++ /dev/null @@ -1,5 +0,0 @@ -from genbench import Task - - -class NlCodesearchClfStatcodesearch(Task): - pass diff --git a/src/genbench/tasks/nl_codesearch_clf/webquery/__init__.py b/src/genbench/tasks/nl_codesearch_clf/webquery/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet deleted file mode 100644 index f76432f..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/webquery/config.jsonnet +++ /dev/null @@ -1,55 +0,0 @@ -{ - name: 'Natural Language Codesearch Classification (webquery)', - - description: 'Natural Language Codesearch Classification (webquery) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', - - keywords: [ - 'codesearch', - 'natural language query', - 'binary classification', - 'python', - 'robustness', - 'covariate shift' - ], - - authors: [ - 'Andor Diera', - 'Abdelhalim Dahou', - 'Florian Sihler', - - ], - - data_source: { - type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/webquery/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', - }, - - has_validation_set: false, - has_train_set: true, - - task_type: 'multiple_choice', - - evaluation_metrics: [ - { - hf_id: 'accuracy', - git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', - best_score: 1.0, - }, - ], - - preparation_strategies: { - finetuning: { - objective: 'maximum_likelihood', - }, - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: 'Given a code comment and a Python programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as: comment [SEP] code', - input_prefix: '', - output_prefix: '', - choices_prefix: '', - append_choices_to_input: false, - } - }, - }, -} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/webquery/doc.md b/src/genbench/tasks/nl_codesearch_clf/webquery/doc.md deleted file mode 100644 index 8973fdb..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/webquery/doc.md +++ /dev/null @@ -1,19 +0,0 @@ -# Natural Language Codesearch Classification (webquery) - -## Abstract -*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (webquery).* - -## Examples -*Give some examples of the Natural Language Codesearch Classification (webquery).* - -## Usage -*Describe how to load your task and what is required for evaluation, if anything.* - -## Data Source -*Describe the data source for this Natural Language Codesearch Classification (webquery).* - -## Limitations and Bias -*Note any known limitations or biases that the Natural Language Codesearch Classification (webquery) has, with links and references if possible.* - -## GenBench Eval card -*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/webquery/task.py b/src/genbench/tasks/nl_codesearch_clf/webquery/task.py deleted file mode 100644 index a5e5f21..0000000 --- a/src/genbench/tasks/nl_codesearch_clf/webquery/task.py +++ /dev/null @@ -1,5 +0,0 @@ -from genbench import Task - - -class NlCodesearchClfWebquery(Task): - pass From dc620afc8d71aff5115640c857b836d8dd2f4b82 Mon Sep 17 00:00:00 2001 From: LucWeber Date: Tue, 22 Aug 2023 18:23:42 +0200 Subject: [PATCH 27/57] Add size information, fix typo --- src/genbench/tasks/icl_consistency_test/doc.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/genbench/tasks/icl_consistency_test/doc.md b/src/genbench/tasks/icl_consistency_test/doc.md index 28f4852..3c03e6a 100644 --- a/src/genbench/tasks/icl_consistency_test/doc.md +++ b/src/genbench/tasks/icl_consistency_test/doc.md @@ -8,9 +8,9 @@ to the way a model is evaluated (e.g. whether a model is calibrated) or the type number of parameters or instructions tuning). These external factors can be added into analysis by using the task.add_factor() method. The output-metric is Cohen\'s kappa for each factor across all different conditions. A kappa-value close to 1 indicates that the factors does not change the model prediction, while a factor close to 0 -strongly changes model predictions. Currently, this test evaluats the ANLI-dataset (Nie et al., 2019). +strongly changes model predictions. Currently, this test evaluates the ANLI-dataset (Nie et al., 2019). -*Size*: for 600 data_IDs. The user can choose to reduce the number of evaluated data_IDs. +*Size*: 57600 for 600 data_IDs. The user can choose to reduce the number of evaluated data_IDs. ## Abstract Finding the best way of adapting pre-trained language models to a task is a big challenge in current NLP. From ad6f5dd39a45f15c1ba99feefc78e8d2b26bcc46 Mon Sep 17 00:00:00 2001 From: LucWeber Date: Wed, 30 Aug 2023 11:52:11 +0200 Subject: [PATCH 28/57] Add doc-string --- src/genbench/tasks/icl_consistency_test/example_evaluation.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/genbench/tasks/icl_consistency_test/example_evaluation.py b/src/genbench/tasks/icl_consistency_test/example_evaluation.py index 98b7e4c..a1ba045 100644 --- a/src/genbench/tasks/icl_consistency_test/example_evaluation.py +++ b/src/genbench/tasks/icl_consistency_test/example_evaluation.py @@ -30,6 +30,9 @@ class Generator: + """ + A simple wrapper to evaluate a given hf-model + """ def __init__(self, model_name="huggyllama/llama-7b"): self.max_new_tokens = 4 # some labels consist of up to 4 tokens self.tokenizer = transformers.AutoTokenizer.from_pretrained( From 06a66c249919db1672a6693049e40801cb1d7b81 Mon Sep 17 00:00:00 2001 From: LucWeber Date: Wed, 30 Aug 2023 11:52:44 +0200 Subject: [PATCH 29/57] Rename factor "Instruction quality" --- src/genbench/tasks/icl_consistency_test/task.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/genbench/tasks/icl_consistency_test/task.py b/src/genbench/tasks/icl_consistency_test/task.py index 4be8b31..4577c31 100644 --- a/src/genbench/tasks/icl_consistency_test/task.py +++ b/src/genbench/tasks/icl_consistency_test/task.py @@ -24,7 +24,7 @@ "cross_instructions", "n_shots", "instructions", - "instruction_quality", + "hp_instructions", ] @@ -84,6 +84,8 @@ def evaluate_predictions( kappas[factor] = cohen_kappa_score(factor_present, factor_absent) + # TODO: Calculate average kappa + breakpoint() # Return the evaluation metrics. return {"exact_match_accuracy": em, "kappas": kappas} From 5e7354eb8dc72cee0246b1fb4b30f6897f0122bf Mon Sep 17 00:00:00 2001 From: LucWeber Date: Wed, 30 Aug 2023 12:06:01 +0200 Subject: [PATCH 30/57] Add kappa_avg as metric output --- src/genbench/tasks/icl_consistency_test/task.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/genbench/tasks/icl_consistency_test/task.py b/src/genbench/tasks/icl_consistency_test/task.py index 4577c31..32e1a81 100644 --- a/src/genbench/tasks/icl_consistency_test/task.py +++ b/src/genbench/tasks/icl_consistency_test/task.py @@ -1,5 +1,6 @@ from typing import Any, Dict, Tuple +import numpy as np import datasets from pandas import DataFrame from sklearn.metrics import cohen_kappa_score @@ -84,10 +85,13 @@ def evaluate_predictions( kappas[factor] = cohen_kappa_score(factor_present, factor_absent) - # TODO: Calculate average kappa - breakpoint() + # Calculate average kappa + kappa_avg = np.mean(list(kappas.values())) + # Return the evaluation metrics. - return {"exact_match_accuracy": em, "kappas": kappas} + return {"exact_match_accuracy": em, + "kappas": kappas, + "kappa_avg": kappa_avg} def add_factor(self, data: Tuple[Dict, Dict], factor: str) -> Dict[str, Dict[str, Any]]: """Concatenate the data with the factor present and absent and update the setup_IDs accordingly. Also add the From b1982940ce3760fa4212bd485eaa5f0e30862218 Mon Sep 17 00:00:00 2001 From: LucWeber Date: Wed, 30 Aug 2023 15:08:09 +0200 Subject: [PATCH 31/57] Add main effects of factors as metric --- .../tasks/icl_consistency_test/task.py | 47 +++++++++++++++---- 1 file changed, 39 insertions(+), 8 deletions(-) diff --git a/src/genbench/tasks/icl_consistency_test/task.py b/src/genbench/tasks/icl_consistency_test/task.py index 32e1a81..a5be05c 100644 --- a/src/genbench/tasks/icl_consistency_test/task.py +++ b/src/genbench/tasks/icl_consistency_test/task.py @@ -1,9 +1,11 @@ from typing import Any, Dict, Tuple +from numpy import ndarray import numpy as np import datasets from pandas import DataFrame from sklearn.metrics import cohen_kappa_score +import statsmodels.api as sm from genbench import Task @@ -37,7 +39,6 @@ def evaluate_predictions( *, predictions: Dict[str, Dict[str, Any]], gold: datasets.Dataset, - save_path: str = None, ) -> Dict[str, Any]: """Evaluate the predictions of the model against the gold data. Calculating exact match accuracy plus consistency across all setups (Cohen's kappa). @@ -63,17 +64,31 @@ def evaluate_predictions( results_df = results_df.sort_values(by=["setup_ID", "data_ID"]) self._assert_equal_data_ids(results_df) - # Compute the exact match accuracy for each setup. - em = {factor: [] for factor in self.factors + ["accuracy"]} + # Compute the exact match accuracy for each setup + accuracy_metrics = {factor: [] for factor in self.factors + ["accuracy"]} for setup_ID, setup_predictions in results_df.groupby("setup_ID"): temp = self._convert_numeric_id_to_dict(setup_ID, n_repetitions=1) for factor in self.factors: - em[factor].extend(temp[factor]) - em["accuracy"].append( + accuracy_metrics[factor].extend(temp[factor]) + accuracy_metrics["accuracy"].append( (setup_predictions["predictions_numeric"] == setup_predictions["target_numeric"]).mean() ) + breakpoint() - # Compute the Cohen's kappa for consistency. + # Compute main effects for each factor + for factor in self.factors: + X = np.array(accuracy_metrics[factor], dtype=int) + y = np.array(accuracy_metrics["accuracy"], dtype=float) + + # create mask to ignore setups that are irrelevant to factor + mask = X != 2 + + # fit GLM + betas, p_values = self._calculate_main_effects(X[mask], y[mask]) + accuracy_metrics[factor] = {"beta": betas[1], + "p_value": p_values[1]} + + # Compute the Cohen's kappa for consistency kappas = {} for factor in self.factors: factor_present = results_df.loc[results_df[factor] == "1"]["predictions_numeric"] @@ -89,7 +104,8 @@ def evaluate_predictions( kappa_avg = np.mean(list(kappas.values())) # Return the evaluation metrics. - return {"exact_match_accuracy": em, + return {"exact_match_accuracy": accuracy_metrics["accuracy"], + "main_effects": accuracy_metrics, "kappas": kappas, "kappa_avg": kappa_avg} @@ -131,7 +147,7 @@ def remove_factor(self, data: datasets.Dataset, factor: str, keep_present: bool keep_present: whether to keep data with the factor present or absent. """ self._set_factors() - # breakpoint() + len_setup_ID_preamble = 4 index_factor = self.factors.index(factor) + len_setup_ID_preamble realisation_to_keep = str(int(keep_present)) @@ -199,6 +215,21 @@ def _convert_numeric_id_to_dict(self, setup_id: str, n_repetitions: int = 1) -> return setup_dict + @staticmethod + def _calculate_main_effects(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray]: + """ + + :return: + """ + # Add a constant column to X for the intercept + X = sm.add_constant(X) + + # Fit GLM + model = sm.GLM(y, X) + results = model.fit() + + return results.params, results.pvalues + @staticmethod def _label_to_numeric(label: str) -> int: """Convert a label to a numeric value. From c99eba6bea5a1a183e11621ba0ea2842da75c2bd Mon Sep 17 00:00:00 2001 From: LucWeber Date: Wed, 30 Aug 2023 17:51:10 +0200 Subject: [PATCH 32/57] Refactor --- .../tasks/icl_consistency_test/task.py | 62 +++++++++++-------- 1 file changed, 37 insertions(+), 25 deletions(-) diff --git a/src/genbench/tasks/icl_consistency_test/task.py b/src/genbench/tasks/icl_consistency_test/task.py index a5be05c..76d90d4 100644 --- a/src/genbench/tasks/icl_consistency_test/task.py +++ b/src/genbench/tasks/icl_consistency_test/task.py @@ -3,6 +3,7 @@ import numpy as np import datasets +import pandas as pd from pandas import DataFrame from sklearn.metrics import cohen_kappa_score import statsmodels.api as sm @@ -64,32 +65,41 @@ def evaluate_predictions( results_df = results_df.sort_values(by=["setup_ID", "data_ID"]) self._assert_equal_data_ids(results_df) - # Compute the exact match accuracy for each setup - accuracy_metrics = {factor: [] for factor in self.factors + ["accuracy"]} + # Compute the accuracy for each setup + accuracies, setup_IDs, setups_by_factor = [], [], [] for setup_ID, setup_predictions in results_df.groupby("setup_ID"): - temp = self._convert_numeric_id_to_dict(setup_ID, n_repetitions=1) - for factor in self.factors: - accuracy_metrics[factor].extend(temp[factor]) - accuracy_metrics["accuracy"].append( - (setup_predictions["predictions_numeric"] == setup_predictions["target_numeric"]).mean() - ) - breakpoint() + accuracy = (setup_predictions["predictions_numeric"] == setup_predictions["target_numeric"]).mean() + + accuracies.append(accuracy) + setup_IDs.append(setup_ID) + setups_by_factor.append(setup_predictions[self.factors].head(1)) + + accuracies_df = DataFrame({"setup_ID": setup_IDs, "accuracy": accuracies}) + setups_by_factor_df = pd.concat(setups_by_factor, ignore_index=True) # Compute main effects for each factor + betas, p_values = [], [] + # TODO: delete this extra list when error with one_label is fixed + factors = [] for factor in self.factors: - X = np.array(accuracy_metrics[factor], dtype=int) - y = np.array(accuracy_metrics["accuracy"], dtype=float) - - # create mask to ignore setups that are irrelevant to factor - mask = X != 2 + X = setups_by_factor_df[factor].to_numpy(dtype=int) # X is binary and states if a factor is present or not + y = accuracies_df["accuracy"].to_numpy(dtype=float) # y are the acc. scores of the respective setups + mask = X != 2 # create mask to ignore setups that are irrelevant to factor (coded as X == 2) # fit GLM - betas, p_values = self._calculate_main_effects(X[mask], y[mask]) - accuracy_metrics[factor] = {"beta": betas[1], - "p_value": p_values[1]} + try: + beta, p_value = self._calculate_main_effects(X[mask], y[mask]) + except: + continue + + betas.append(beta) + p_values.append(p_value) + factors.append(factor) - # Compute the Cohen's kappa for consistency - kappas = {} + main_effects_df = DataFrame({'factor': factors, 'beta': betas, 'p_value': p_values}) + + # Compute Cohen's kappa for consistency + kappas = [] for factor in self.factors: factor_present = results_df.loc[results_df[factor] == "1"]["predictions_numeric"] factor_absent = results_df.loc[results_df[factor] == "0"]["predictions_numeric"] @@ -98,15 +108,17 @@ def evaluate_predictions( mask = [(f1 != -1 and f2 != -1) for f1, f2 in zip(factor_absent, factor_present)] factor_present, factor_absent = factor_present[mask], factor_absent[mask] - kappas[factor] = cohen_kappa_score(factor_present, factor_absent) + kappas.append(cohen_kappa_score(factor_present, factor_absent)) + + kappas_df = DataFrame({'factor': self.factors, 'kappa': kappas}) # Calculate average kappa - kappa_avg = np.mean(list(kappas.values())) + kappa_avg = kappas_df["kappa"].mean() # Return the evaluation metrics. - return {"exact_match_accuracy": accuracy_metrics["accuracy"], - "main_effects": accuracy_metrics, - "kappas": kappas, + return {"accuracy": accuracies_df, + "main_effects": main_effects_df, + "kappas": kappas_df, "kappa_avg": kappa_avg} def add_factor(self, data: Tuple[Dict, Dict], factor: str) -> Dict[str, Dict[str, Any]]: @@ -228,7 +240,7 @@ def _calculate_main_effects(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray]: model = sm.GLM(y, X) results = model.fit() - return results.params, results.pvalues + return results.params[1], results.pvalues[1] @staticmethod def _label_to_numeric(label: str) -> int: From 810b12904eb6546c5c2e2783782c83608e0c95c4 Mon Sep 17 00:00:00 2001 From: LucWeber Date: Wed, 30 Aug 2023 19:40:18 +0200 Subject: [PATCH 33/57] Split datasets into subtasks --- .../tasks/icl_consistency_test/__init__.py | 5 + .../icl_consistency_test/anli/__init__.py | 0 .../icl_consistency_test/anli/config.jsonnet | 48 ++++ .../tasks/icl_consistency_test/anli/doc.md | 136 +++++++++ .../icl_consistency_test/{ => anli}/task.py | 14 +- .../tasks/icl_consistency_test/config.jsonnet | 36 +-- .../tasks/icl_consistency_test/doc.md | 16 +- .../example_evaluation.py | 11 +- .../icl_consistency_test/mnli/__init__.py | 0 .../icl_consistency_test/mnli/config.jsonnet | 48 ++++ .../tasks/icl_consistency_test/mnli/doc.md | 136 +++++++++ .../tasks/icl_consistency_test/mnli/task.py | 262 ++++++++++++++++++ 12 files changed, 665 insertions(+), 47 deletions(-) create mode 100644 src/genbench/tasks/icl_consistency_test/anli/__init__.py create mode 100644 src/genbench/tasks/icl_consistency_test/anli/config.jsonnet create mode 100644 src/genbench/tasks/icl_consistency_test/anli/doc.md rename src/genbench/tasks/icl_consistency_test/{ => anli}/task.py (96%) create mode 100644 src/genbench/tasks/icl_consistency_test/mnli/__init__.py create mode 100644 src/genbench/tasks/icl_consistency_test/mnli/config.jsonnet create mode 100644 src/genbench/tasks/icl_consistency_test/mnli/doc.md create mode 100644 src/genbench/tasks/icl_consistency_test/mnli/task.py diff --git a/src/genbench/tasks/icl_consistency_test/__init__.py b/src/genbench/tasks/icl_consistency_test/__init__.py index e69de29..91b47b8 100644 --- a/src/genbench/tasks/icl_consistency_test/__init__.py +++ b/src/genbench/tasks/icl_consistency_test/__init__.py @@ -0,0 +1,5 @@ +from genbench import TaskDict + + +class IclConsistencyTest(TaskDict): + pass diff --git a/src/genbench/tasks/icl_consistency_test/anli/__init__.py b/src/genbench/tasks/icl_consistency_test/anli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/icl_consistency_test/anli/config.jsonnet b/src/genbench/tasks/icl_consistency_test/anli/config.jsonnet new file mode 100644 index 0000000..2e047a6 --- /dev/null +++ b/src/genbench/tasks/icl_consistency_test/anli/config.jsonnet @@ -0,0 +1,48 @@ +{ + name: 'ICL consistency test (anli)', + + description: 'The ICL consistency test measures the consistency of LLM predictions on the same datapoints across many different setups. Different setups are defined by "factors". On the one hand, factors can be specific attributes of the used prompt (e.g. the number of examples the model is presented with ["n_shots"] or the type of instructions that were used to wrap a specific datapoint ["Instructions"]). On the otherhand, the analysis can also be augmented by factors that are related to the way a model is evaluated (e.g. whether a model is calibrated) or the type of model that is evaluated (e.g. the number of parameters or instructions tuning). These external factors can be added into analysis by using the task.add_factor() method. The output-metric is Cohen\'s kappa for each factor across all different conditions. A kappa-value close to 1 indicates that the factors does not change the model prediction, while a factor close to 0 strongly changes model predictions. This test evaluates the ANLI-dataset (Nie et al., 2019).', + + keywords: [ + 'consistency', + 'LLM', + 'robustness', + 'in-context learning', + 'prompt-based learning', + 'icl', + 'anli', + 'mnli' + ], + + authors: [ + 'Lucas Weber', + 'Elia Bruni', + 'Dieuwke Hupkes', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/genbench_all.jsonl', + }, + + has_validation_set: false, + has_train_set: false, + + task_type: 'free_form', + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: '', + instruction_few_shot: '', + input_prefix: '', + output_prefix: '', + } + }, + }, +} diff --git a/src/genbench/tasks/icl_consistency_test/anli/doc.md b/src/genbench/tasks/icl_consistency_test/anli/doc.md new file mode 100644 index 0000000..8fe23b8 --- /dev/null +++ b/src/genbench/tasks/icl_consistency_test/anli/doc.md @@ -0,0 +1,136 @@ +# ICL consistency test + +The ICL consistency test measures the consistency of LLM predictions on the same datapoints across many different setups. +Different setups are defined by "factors". On the one hand, factors can be specific attributes of the used prompt (e.g. +the number of examples the model is presented with ["n_shots"] or the type of instructions that were used to wrap a +specific datapoint ["Instructions"]). On the otherhand, the analysis can also be augmented by factors that are related +to the way a model is evaluated (e.g. whether a model is calibrated) or the type of model that is evaluated (e.g. the +number of parameters or instructions tuning). These external factors can be added into analysis by using the +task.add_factor() method. The output-metric is Cohen\'s kappa for each factor across all different conditions. +A kappa-value close to 1 indicates that the factors does not change the model prediction, while a factor close to 0 +strongly changes model predictions. The ICL consistency test has two subtasks, one evaluating the ANLI-dataset (Nie et al., 2019); +the other the MNLI-dataset (Wang et al., 2017). + +*Size*: Each subtask contains 57600 when using the full 600 data\_IDs. The user can choose to reduce the number of evaluated data\_IDs. + +## Abstract +Finding the best way of adapting pre-trained language models to a task is a big challenge in current NLP. +Just like the previous generation of task-tuned models (TT), models that are adapted to tasks via in-context-learning (ICL) or instruction tuning (IT) are robust in some setups, but not in others. +Here, we present a detailed analysis of which design choices cause instabilities and inconsistencies in LLM predictions. +First, we show how spurious correlations between input distributions and labels -- a known issue in TT models -- form only a minor problem for prompted models. +Then we engage in a systematic, holistic evaluation of different factors that have been found to influence predictions in a prompting setup. +We test all possible combinations of a range of factors on both vanilla and instruction-tuned LLMs of different scale, and statistically analyse the results to show which factors are the most influential, the most interactive or the most stable. +From our results, we deduce which factors can be used without precautions, should be avoided or handled with care in most settings. + +## Examples +The test evaluates the same datapoints across many different setups to determine the consistency of a model's predictions. Every datapoint has a data\_ID (specifying the original datapoint) and a setup\_ID (with each digit specifying the presence or absence of a factor). + +Example with data\_ID - 1120; setup\_ID - id0_0200020: +``` +The city's name derives from the Greek words "άργυρος" ("árgyros" meaning +"silver") and "πόλη" ("poli" meaning "city"). The name's older form was +"Argyroupolis". The first name of the settlement was "New Argyroupolis", +given by the refugees from Gümüşhane. Using only the above description +and what you know about the world, "The city's name derives from Greek words." +is definitely correct, incorrect, or inconclusive? + +ANSWER: Correct. + +Undead is a 2003 Australian zombie science fiction horror comedy film +written and directed by Michael and Peter Spierig and starring Felicity +Mason, Mungo McKay and Rob Jenkins. It was then-relatively-unknown "Good Game" +presenter Steven O'Donnell's first film role. Using only the above description +and what you know about the world, "Steven O'Donnell was not a popular actor before +the 2003 Zombie movie." is definitely correct, incorrect, or inconclusive? + +ANSWER: Correct. + +Let the Music Do the Talking is the first of four albums by The Joe Perry +Project. It was their the most successful, selling approximately 250,000 +copies in the United States. The title track was re-recorded by Joe Perry's +more successful band Aerosmith on their album "Done With Mirrors", albeit +with a slightly different melody and Steven Tyler penned lyrics. Using only +the above description and what you know about the world, ""Done With Mirrors" +was an album by The Joe Perry Project." is definitely correct, incorrect, or +inconclusive? + +ANSWER: +``` + +## Usage +For an example script copy `example_evaluation.py` into your genbench root directory (`/genbench_cbt`) and run it. +#### Dataloading +The task can loaded through the default GenBench interface as a zero-shot task: +```python +from genbench import load_task +from genbench.api import PreparationStrategy + +task = load_task("icl_consistency_test") +ds = task.get_prepared_datasets( + PreparationStrategy.PROMPT_BASED_TESTING, + shot_list=[0] + )[0] +``` +#### Evaluation +Provide the evaluation function with the model outputs as strings, accompanied by the corresponding setup-ids and data-ids +from the original dataset. +For the predictions, please follow the following format: + +`predictions: Dict[setup_ID, Dict[data_ID, model_output]]` + +For the gold labels, please provide the original dataset ds: + +`gold: datasets.Dataset` + +With this input, run the task evaluation like so: +```python +results = task.evaluate_predictions(predictions=predictions, + gold=ds) +``` + +#### Adding factors +External factors can be added via the `task.add_factor()` method. +```python +predictions = (predictions_factor_absent, predictions_factor_present) +predictions = task.add_factor(data=predictions, + factor='') +``` +where `predictions_factor_absent` and `predictions_factor_present` are dictionaries of the same format as the original +predictions dictionary. + +#### Removing factors +Factors can be removed from the dataset and the evaluation by using the `task.remove_factor()` method. +```python +predictions = task.remove_factor(data=ds, + factor='') +``` +where `ds` is the original dataset as obtained by the `task.get_prepared_datasets()` method. Note that removing factors +will influence the results on all other factors. + +## Data Source +The original data stems from the ANLI dataset (Nie et al., 2019). +Prompting templates are taken from promptsource (Bach et al., 2022). + +## Limitations and Bias +We identify the following limitations of the consistency test: +1. The number of factors in limited and does not cover all possible factors that might influence the predictions. We limited ourselves to factors we deem relevant, to ensure fast evaluation. + +2. Currently, the test is only implemented for the ANLI-dataset. + +3. External factors such as _Instruction tuning_ or _calibration_ have to be manually added by the user using the `task.add_factor()` method. + + +## GenBench Eval card +This test can be used to test generalisation in LLMs (pretrain - test locus). +It is designed to better understand how LLMs generalise (intrinsic motivation) and to give practical hints on relevant prompt-design decisions (practical motivation). It can be used to assess robustness. + +![GenBench Eval Card](GenBench_eval_card.png) + + +## References + +Bach, S. H., Sanh, V., Yong, Z. X., Webson, A., Raffel, C., Nayak, N. V., ... & Rush, A. M. (2022). Promptsource: An integrated development environment and repository for natural language prompts. arXiv preprint arXiv:2202.01279. + +Nie, Y., Williams, A., Dinan, E., Bansal, M., Weston, J., & Kiela, D. (2019). Adversarial NLI: A new benchmark for natural language understanding. arXiv preprint arXiv:1910.14599. + +Wang, Z., Hamza, W., & Florian, R. (2017, August). Bilateral multi-perspective matching for natural language sentences. In Proceedings of the 26th International Joint Conference on Artificial Intelligence (pp. 4144-4150). diff --git a/src/genbench/tasks/icl_consistency_test/task.py b/src/genbench/tasks/icl_consistency_test/anli/task.py similarity index 96% rename from src/genbench/tasks/icl_consistency_test/task.py rename to src/genbench/tasks/icl_consistency_test/anli/task.py index 76d90d4..6333d2e 100644 --- a/src/genbench/tasks/icl_consistency_test/task.py +++ b/src/genbench/tasks/icl_consistency_test/anli/task.py @@ -10,7 +10,6 @@ from genbench import Task - LABELS = [ ["Correct", "True", "Always", "Yes", "Guaranteed", "Duplicates"], # `correct` labels ["Inconclusive", "Possible", "Sometimes", "Maybe", "Neither"], # `neutral` labels @@ -32,7 +31,7 @@ ] -class IclConsistencyTestTask(Task): +class IclConsistencyTestAnli(Task): """Python implementation of the ICL consistency test task.""" def evaluate_predictions( @@ -79,24 +78,18 @@ def evaluate_predictions( # Compute main effects for each factor betas, p_values = [], [] - # TODO: delete this extra list when error with one_label is fixed - factors = [] for factor in self.factors: X = setups_by_factor_df[factor].to_numpy(dtype=int) # X is binary and states if a factor is present or not y = accuracies_df["accuracy"].to_numpy(dtype=float) # y are the acc. scores of the respective setups mask = X != 2 # create mask to ignore setups that are irrelevant to factor (coded as X == 2) # fit GLM - try: - beta, p_value = self._calculate_main_effects(X[mask], y[mask]) - except: - continue + beta, p_value = self._calculate_main_effects(X[mask], y[mask]) betas.append(beta) p_values.append(p_value) - factors.append(factor) - main_effects_df = DataFrame({'factor': factors, 'beta': betas, 'p_value': p_values}) + main_effects_df = DataFrame({'factor': self.factors, 'beta': betas, 'p_value': p_values}) # Compute Cohen's kappa for consistency kappas = [] @@ -266,3 +259,4 @@ def _assert_equal_data_ids(results_df: DataFrame) -> None: assert ( used_data_ids.sort() == results_df.loc[results_df["setup_ID"] == setup_ID]["data_ID"].unique().sort() ), "Not all data_IDs are the same for all setups. Check for missing predictions!" + diff --git a/src/genbench/tasks/icl_consistency_test/config.jsonnet b/src/genbench/tasks/icl_consistency_test/config.jsonnet index f868746..1859622 100644 --- a/src/genbench/tasks/icl_consistency_test/config.jsonnet +++ b/src/genbench/tasks/icl_consistency_test/config.jsonnet @@ -1,15 +1,18 @@ { name: 'ICL consistency test', - description: 'The ICL consistency test measures the consistency of LLM predictions on the same datapoints across many different setups. Different setups are defined by "factors". On the one hand, factors can be specific attributes of the used prompt (e.g. the number of examples the model is presented with ["n_shots"] or the type of instructions that were used to wrap a specific datapoint ["Instructions"]). On the otherhand, the analysis can also be augmented by factors that are related to the way a model is evaluated (e.g. whether a model is calibrated) or the type of model that is evaluated (e.g. the number of parameters or instructions tuning). These external factors can be added into analysis by using the task.add_factor() method. The output-metric is Cohen\'s kappa for each factor across all different conditions. A kappa-value close to 1 indicates that the factors does not change the model prediction, while a factor close to 0 strongly changes model predictions. Currently, this test evaluats the ANLI-dataset (Nie et al., 2019).', + // @TODO: Add a description of the task + description: 'The ICL consistency test measures the consistency of LLM predictions on the same datapoints across many different setups. Different setups are defined by "factors". On the one hand, factors can be specific attributes of the used prompt (e.g. the number of examples the model is presented with ["n_shots"] or the type of instructions that were used to wrap a specific datapoint ["Instructions"]). On the otherhand, the analysis can also be augmented by factors that are related to the way a model is evaluated (e.g. whether a model is calibrated) or the type of model that is evaluated (e.g. the number of parameters or instructions tuning). These external factors can be added into analysis by using the task.add_factor() method. The output-metric is Cohen\'s kappa for each factor across all different conditions. A kappa-value close to 1 indicates that the factors does not change the model prediction, while a factor close to 0 strongly changes model predictions. The ICL consistency test has two subtasks, one evaluating the ANLI-dataset (Nie et al., 2019); the other the MNLI-dataset (Wang et al., 2017).', keywords: [ 'consistency', 'LLM', 'robustness', 'in-context learning', + 'prompt-based learning', 'icl', - 'anli', + 'anli', + 'mnli' ], authors: [ @@ -19,28 +22,9 @@ ], - data_source: { - type: 'manual', - test: 'https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/genbench_all.jsonl', - }, - - has_validation_set: false, - has_train_set: false, - - task_type: 'free_form', - - preparation_strategies: { - // A recipe for preparing the model to perform the task by configuring its prompt. - // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. - // We provide a few options for configuring the prompt. But, the task creator can - // also provide a custom prompt preparation in the task's Python class. - prompt_based_testing: { - prompt_builder: { - instruction_zero_shot: '', - instruction_few_shot: '', - input_prefix: '', - output_prefix: '', - } - }, - }, + subtasks_order: [ + 'anli', + 'mnli', + + ], } diff --git a/src/genbench/tasks/icl_consistency_test/doc.md b/src/genbench/tasks/icl_consistency_test/doc.md index 3c03e6a..8fe23b8 100644 --- a/src/genbench/tasks/icl_consistency_test/doc.md +++ b/src/genbench/tasks/icl_consistency_test/doc.md @@ -8,9 +8,10 @@ to the way a model is evaluated (e.g. whether a model is calibrated) or the type number of parameters or instructions tuning). These external factors can be added into analysis by using the task.add_factor() method. The output-metric is Cohen\'s kappa for each factor across all different conditions. A kappa-value close to 1 indicates that the factors does not change the model prediction, while a factor close to 0 -strongly changes model predictions. Currently, this test evaluates the ANLI-dataset (Nie et al., 2019). +strongly changes model predictions. The ICL consistency test has two subtasks, one evaluating the ANLI-dataset (Nie et al., 2019); +the other the MNLI-dataset (Wang et al., 2017). -*Size*: 57600 for 600 data_IDs. The user can choose to reduce the number of evaluated data_IDs. +*Size*: Each subtask contains 57600 when using the full 600 data\_IDs. The user can choose to reduce the number of evaluated data\_IDs. ## Abstract Finding the best way of adapting pre-trained language models to a task is a big challenge in current NLP. @@ -22,9 +23,9 @@ We test all possible combinations of a range of factors on both vanilla and inst From our results, we deduce which factors can be used without precautions, should be avoided or handled with care in most settings. ## Examples -The test evaluates the same datapoints across many different setups to determine the consistency of a model's predictions. Every datapoint has a data_ID (specifying the original datapoint) and a setup_ID (with each digit specifying the presence or absence of a factor). +The test evaluates the same datapoints across many different setups to determine the consistency of a model's predictions. Every datapoint has a data\_ID (specifying the original datapoint) and a setup\_ID (with each digit specifying the presence or absence of a factor). -Example with data_ID - 1120; setup_ID - id0_0200020: +Example with data\_ID - 1120; setup\_ID - id0_0200020: ``` The city's name derives from the Greek words "άργυρος" ("árgyros" meaning "silver") and "πόλη" ("poli" meaning "city"). The name's older form was @@ -55,7 +56,7 @@ inconclusive? ANSWER: ``` -_Added line breaks for readability_ + ## Usage For an example script copy `example_evaluation.py` into your genbench root directory (`/genbench_cbt`) and run it. #### Dataloading @@ -128,7 +129,8 @@ It is designed to better understand how LLMs generalise (intrinsic motivation) a ## References -Nie, Y., Williams, A., Dinan, E., Bansal, M., Weston, J., & Kiela, D. (2019). Adversarial NLI: A new benchmark for natural language understanding. arXiv preprint arXiv:1910.14599. - Bach, S. H., Sanh, V., Yong, Z. X., Webson, A., Raffel, C., Nayak, N. V., ... & Rush, A. M. (2022). Promptsource: An integrated development environment and repository for natural language prompts. arXiv preprint arXiv:2202.01279. +Nie, Y., Williams, A., Dinan, E., Bansal, M., Weston, J., & Kiela, D. (2019). Adversarial NLI: A new benchmark for natural language understanding. arXiv preprint arXiv:1910.14599. + +Wang, Z., Hamza, W., & Florian, R. (2017, August). Bilateral multi-perspective matching for natural language sentences. In Proceedings of the 26th International Joint Conference on Artificial Intelligence (pp. 4144-4150). diff --git a/src/genbench/tasks/icl_consistency_test/example_evaluation.py b/src/genbench/tasks/icl_consistency_test/example_evaluation.py index a1ba045..edc9def 100644 --- a/src/genbench/tasks/icl_consistency_test/example_evaluation.py +++ b/src/genbench/tasks/icl_consistency_test/example_evaluation.py @@ -21,8 +21,9 @@ from genbench import load_task from genbench.api import PreparationStrategy - -N_DATAPOINTS = 50 + +DATASET = 'mnli' # choose between 'anli' and 'mnli' +N_DATAPOINTS = 2 #50 MODEL_NAME = "huggyllama/llama-7b" BATCH_SIZE = 8 @@ -94,8 +95,9 @@ def make_predictions(self, dataset, bs=8) -> Dict[str, Dict[str, str]]: if __name__ == "__main__": + # Load the task - task = load_task("icl_consistency_test") + task = load_task("icl_consistency_test")[DATASET] ds = task.get_prepared_datasets(PreparationStrategy.PROMPT_BASED_TESTING, shot_list=[0])[0] # Selecting a subset of example for illustration purposes @@ -117,5 +119,6 @@ def make_predictions(self, dataset, bs=8) -> Dict[str, Dict[str, str]]: results = task.evaluate_predictions(predictions=predictions, gold=ds) print("EVALUATED SUCCESSFULLY!") - print(f'Exact-match accuracies: \n{results["exact_match_accuracy"]["accuracy"]}') + print(f'Accuracies: \nMean: {results["accuracy"]["accuracy"].mean()}; std: {results["accuracy"]["accuracy"].std()}') + print(f'Main effects: \n{results["main_effects"]}') print(f'Consistency: \n{results["kappas"]}') diff --git a/src/genbench/tasks/icl_consistency_test/mnli/__init__.py b/src/genbench/tasks/icl_consistency_test/mnli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/icl_consistency_test/mnli/config.jsonnet b/src/genbench/tasks/icl_consistency_test/mnli/config.jsonnet new file mode 100644 index 0000000..7896931 --- /dev/null +++ b/src/genbench/tasks/icl_consistency_test/mnli/config.jsonnet @@ -0,0 +1,48 @@ +{ + name: 'ICL consistency test (mnli)', + + description: 'The ICL consistency test measures the consistency of LLM predictions on the same datapoints across many different setups. Different setups are defined by "factors". On the one hand, factors can be specific attributes of the used prompt (e.g. the number of examples the model is presented with ["n_shots"] or the type of instructions that were used to wrap a specific datapoint ["Instructions"]). On the otherhand, the analysis can also be augmented by factors that are related to the way a model is evaluated (e.g. whether a model is calibrated) or the type of model that is evaluated (e.g. the number of parameters or instructions tuning). These external factors can be added into analysis by using the task.add_factor() method. The output-metric is Cohen\'s kappa for each factor across all different conditions. A kappa-value close to 1 indicates that the factors does not change the model prediction, while a factor close to 0 strongly changes model predictions. This test evaluates the MNLI-dataset (Wang et al., 2017).', + + keywords: [ + 'consistency', + 'LLM', + 'robustness', + 'in-context learning', + 'prompt-based learning', + 'icl', + 'anli', + 'mnli' + ], + + authors: [ + 'Lucas Weber', + 'Elia Bruni', + 'Dieuwke Hupkes', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/genbench_all_glue+mnli.jsonl', + }, + + has_validation_set: false, + has_train_set: false, + + task_type: 'free_form', + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: '', + instruction_few_shot: '', + input_prefix: '', + output_prefix: '', + } + }, + }, +} diff --git a/src/genbench/tasks/icl_consistency_test/mnli/doc.md b/src/genbench/tasks/icl_consistency_test/mnli/doc.md new file mode 100644 index 0000000..8fe23b8 --- /dev/null +++ b/src/genbench/tasks/icl_consistency_test/mnli/doc.md @@ -0,0 +1,136 @@ +# ICL consistency test + +The ICL consistency test measures the consistency of LLM predictions on the same datapoints across many different setups. +Different setups are defined by "factors". On the one hand, factors can be specific attributes of the used prompt (e.g. +the number of examples the model is presented with ["n_shots"] or the type of instructions that were used to wrap a +specific datapoint ["Instructions"]). On the otherhand, the analysis can also be augmented by factors that are related +to the way a model is evaluated (e.g. whether a model is calibrated) or the type of model that is evaluated (e.g. the +number of parameters or instructions tuning). These external factors can be added into analysis by using the +task.add_factor() method. The output-metric is Cohen\'s kappa for each factor across all different conditions. +A kappa-value close to 1 indicates that the factors does not change the model prediction, while a factor close to 0 +strongly changes model predictions. The ICL consistency test has two subtasks, one evaluating the ANLI-dataset (Nie et al., 2019); +the other the MNLI-dataset (Wang et al., 2017). + +*Size*: Each subtask contains 57600 when using the full 600 data\_IDs. The user can choose to reduce the number of evaluated data\_IDs. + +## Abstract +Finding the best way of adapting pre-trained language models to a task is a big challenge in current NLP. +Just like the previous generation of task-tuned models (TT), models that are adapted to tasks via in-context-learning (ICL) or instruction tuning (IT) are robust in some setups, but not in others. +Here, we present a detailed analysis of which design choices cause instabilities and inconsistencies in LLM predictions. +First, we show how spurious correlations between input distributions and labels -- a known issue in TT models -- form only a minor problem for prompted models. +Then we engage in a systematic, holistic evaluation of different factors that have been found to influence predictions in a prompting setup. +We test all possible combinations of a range of factors on both vanilla and instruction-tuned LLMs of different scale, and statistically analyse the results to show which factors are the most influential, the most interactive or the most stable. +From our results, we deduce which factors can be used without precautions, should be avoided or handled with care in most settings. + +## Examples +The test evaluates the same datapoints across many different setups to determine the consistency of a model's predictions. Every datapoint has a data\_ID (specifying the original datapoint) and a setup\_ID (with each digit specifying the presence or absence of a factor). + +Example with data\_ID - 1120; setup\_ID - id0_0200020: +``` +The city's name derives from the Greek words "άργυρος" ("árgyros" meaning +"silver") and "πόλη" ("poli" meaning "city"). The name's older form was +"Argyroupolis". The first name of the settlement was "New Argyroupolis", +given by the refugees from Gümüşhane. Using only the above description +and what you know about the world, "The city's name derives from Greek words." +is definitely correct, incorrect, or inconclusive? + +ANSWER: Correct. + +Undead is a 2003 Australian zombie science fiction horror comedy film +written and directed by Michael and Peter Spierig and starring Felicity +Mason, Mungo McKay and Rob Jenkins. It was then-relatively-unknown "Good Game" +presenter Steven O'Donnell's first film role. Using only the above description +and what you know about the world, "Steven O'Donnell was not a popular actor before +the 2003 Zombie movie." is definitely correct, incorrect, or inconclusive? + +ANSWER: Correct. + +Let the Music Do the Talking is the first of four albums by The Joe Perry +Project. It was their the most successful, selling approximately 250,000 +copies in the United States. The title track was re-recorded by Joe Perry's +more successful band Aerosmith on their album "Done With Mirrors", albeit +with a slightly different melody and Steven Tyler penned lyrics. Using only +the above description and what you know about the world, ""Done With Mirrors" +was an album by The Joe Perry Project." is definitely correct, incorrect, or +inconclusive? + +ANSWER: +``` + +## Usage +For an example script copy `example_evaluation.py` into your genbench root directory (`/genbench_cbt`) and run it. +#### Dataloading +The task can loaded through the default GenBench interface as a zero-shot task: +```python +from genbench import load_task +from genbench.api import PreparationStrategy + +task = load_task("icl_consistency_test") +ds = task.get_prepared_datasets( + PreparationStrategy.PROMPT_BASED_TESTING, + shot_list=[0] + )[0] +``` +#### Evaluation +Provide the evaluation function with the model outputs as strings, accompanied by the corresponding setup-ids and data-ids +from the original dataset. +For the predictions, please follow the following format: + +`predictions: Dict[setup_ID, Dict[data_ID, model_output]]` + +For the gold labels, please provide the original dataset ds: + +`gold: datasets.Dataset` + +With this input, run the task evaluation like so: +```python +results = task.evaluate_predictions(predictions=predictions, + gold=ds) +``` + +#### Adding factors +External factors can be added via the `task.add_factor()` method. +```python +predictions = (predictions_factor_absent, predictions_factor_present) +predictions = task.add_factor(data=predictions, + factor='') +``` +where `predictions_factor_absent` and `predictions_factor_present` are dictionaries of the same format as the original +predictions dictionary. + +#### Removing factors +Factors can be removed from the dataset and the evaluation by using the `task.remove_factor()` method. +```python +predictions = task.remove_factor(data=ds, + factor='') +``` +where `ds` is the original dataset as obtained by the `task.get_prepared_datasets()` method. Note that removing factors +will influence the results on all other factors. + +## Data Source +The original data stems from the ANLI dataset (Nie et al., 2019). +Prompting templates are taken from promptsource (Bach et al., 2022). + +## Limitations and Bias +We identify the following limitations of the consistency test: +1. The number of factors in limited and does not cover all possible factors that might influence the predictions. We limited ourselves to factors we deem relevant, to ensure fast evaluation. + +2. Currently, the test is only implemented for the ANLI-dataset. + +3. External factors such as _Instruction tuning_ or _calibration_ have to be manually added by the user using the `task.add_factor()` method. + + +## GenBench Eval card +This test can be used to test generalisation in LLMs (pretrain - test locus). +It is designed to better understand how LLMs generalise (intrinsic motivation) and to give practical hints on relevant prompt-design decisions (practical motivation). It can be used to assess robustness. + +![GenBench Eval Card](GenBench_eval_card.png) + + +## References + +Bach, S. H., Sanh, V., Yong, Z. X., Webson, A., Raffel, C., Nayak, N. V., ... & Rush, A. M. (2022). Promptsource: An integrated development environment and repository for natural language prompts. arXiv preprint arXiv:2202.01279. + +Nie, Y., Williams, A., Dinan, E., Bansal, M., Weston, J., & Kiela, D. (2019). Adversarial NLI: A new benchmark for natural language understanding. arXiv preprint arXiv:1910.14599. + +Wang, Z., Hamza, W., & Florian, R. (2017, August). Bilateral multi-perspective matching for natural language sentences. In Proceedings of the 26th International Joint Conference on Artificial Intelligence (pp. 4144-4150). diff --git a/src/genbench/tasks/icl_consistency_test/mnli/task.py b/src/genbench/tasks/icl_consistency_test/mnli/task.py new file mode 100644 index 0000000..7d5c49f --- /dev/null +++ b/src/genbench/tasks/icl_consistency_test/mnli/task.py @@ -0,0 +1,262 @@ +from typing import Any, Dict, Tuple +from numpy import ndarray + +import numpy as np +import datasets +import pandas as pd +from pandas import DataFrame +from sklearn.metrics import cohen_kappa_score +import statsmodels.api as sm + +from genbench import Task + +LABELS = [ + ["Correct", "True", "Always", "Yes", "Guaranteed", "Duplicates"], # `correct` labels + ["Inconclusive", "Possible", "Sometimes", "Maybe", "Neither"], # `neutral` labels + ["Impossible", "Never", "Incorrect", "False", "No", "Not Duplicates"], # `incorrect` labels +] + +LABEL_TO_NUMERIC = {} +LABEL_TO_NUMERIC.update(dict([(label, i) for i, label_subset in enumerate(LABELS) for label in label_subset])) +LABEL_TO_NUMERIC.update(dict([(label.lower(), i) for i, label_subset in enumerate(LABELS) for label in label_subset])) + +factors = [ + "balanced_labels", + "one_label", + "cross_task", + "cross_instructions", + "n_shots", + "instructions", + "hp_instructions", +] + + +class IclConsistencyTestWSubtasksMnli(Task): + """Python implementation of the ICL consistency test task.""" + + def evaluate_predictions( + self, + *, + predictions: Dict[str, Dict[str, Any]], + gold: datasets.Dataset, + ) -> Dict[str, Any]: + """Evaluate the predictions of the model against the gold data. + Calculating exact match accuracy plus consistency across all setups (Cohen's kappa). + + Args: + predictions: A dictionary of dictionary, where the keys of the outer dictionary contains + the setup_IDs and the inner dictionary the data_IDs. The values of the inner dictionary + are the predictions for the example. The keys are strings and the values can be any type. + gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. + + Returns: + A dictionary containing key-value pairs for the evaluation metric(s) computed on the predicted + values. The keys are strings representing the name of the evaluation metric and the values are + floating-point numbers. + """ + self._set_factors() + + gold_pandas = gold.to_pandas() + gold_pandas["data_ID"] = gold_pandas["data_ID"].astype(str) + gold_labels_numeric = gold_pandas.set_index("data_ID")["target_numeric"].to_dict() + + results_df = self._create_df(predictions, gold_labels_numeric) + results_df = results_df.sort_values(by=["setup_ID", "data_ID"]) + self._assert_equal_data_ids(results_df) + + # Compute the accuracy for each setup + accuracies, setup_IDs, setups_by_factor = [], [], [] + for setup_ID, setup_predictions in results_df.groupby("setup_ID"): + accuracy = (setup_predictions["predictions_numeric"] == setup_predictions["target_numeric"]).mean() + + accuracies.append(accuracy) + setup_IDs.append(setup_ID) + setups_by_factor.append(setup_predictions[self.factors].head(1)) + + accuracies_df = DataFrame({"setup_ID": setup_IDs, "accuracy": accuracies}) + setups_by_factor_df = pd.concat(setups_by_factor, ignore_index=True) + + # Compute main effects for each factor + betas, p_values = [], [] + for factor in self.factors: + X = setups_by_factor_df[factor].to_numpy(dtype=int) # X is binary and states if a factor is present or not + y = accuracies_df["accuracy"].to_numpy(dtype=float) # y are the acc. scores of the respective setups + mask = X != 2 # create mask to ignore setups that are irrelevant to factor (coded as X == 2) + + # fit GLM + beta, p_value = self._calculate_main_effects(X[mask], y[mask]) + + betas.append(beta) + p_values.append(p_value) + + main_effects_df = DataFrame({'factor': self.factors, 'beta': betas, 'p_value': p_values}) + + # Compute Cohen's kappa for consistency + kappas = [] + for factor in self.factors: + factor_present = results_df.loc[results_df[factor] == "1"]["predictions_numeric"] + factor_absent = results_df.loc[results_df[factor] == "0"]["predictions_numeric"] + + # mask out predictions that are out-of-label-distribution + mask = [(f1 != -1 and f2 != -1) for f1, f2 in zip(factor_absent, factor_present)] + factor_present, factor_absent = factor_present[mask], factor_absent[mask] + + kappas.append(cohen_kappa_score(factor_present, factor_absent)) + + kappas_df = DataFrame({'factor': self.factors, 'kappa': kappas}) + + # Calculate average kappa + kappa_avg = kappas_df["kappa"].mean() + + # Return the evaluation metrics. + return {"accuracy": accuracies_df, + "main_effects": main_effects_df, + "kappas": kappas_df, + "kappa_avg": kappa_avg} + + def add_factor(self, data: Tuple[Dict, Dict], factor: str) -> Dict[str, Dict[str, Any]]: + """Concatenate the data with the factor present and absent and update the setup_IDs accordingly. Also add the + respective factor to the list of factors. + + Args: + data: A tuple containing predictions, where the first element are predictions with factor absent and the + second element are predictions with factor present. + factor: A string giving the name of the added factor. + + """ + + # Update the setup_IDs of the data by appending a 0 when the factor is absent or 1 when the factor is present. + setup_ids0 = list(data[0].keys()) + setup_ids1 = list(data[1].keys()) + + for setup_id0, setup_id1 in zip(setup_ids0, setup_ids1): + updated_id0 = setup_id0 + "0" + updated_id1 = setup_id1 + "1" + data[0][updated_id0] = data[0].pop(setup_id0) + data[1][updated_id1] = data[1].pop(setup_id1) + + # Add factor to list of factors. + self._set_factors() + self.factors.append(factor) + + return {**data[0], **data[1]} + + def remove_factor(self, data: datasets.Dataset, factor: str, keep_present: bool = False) -> datasets.Dataset: + """Remove data of factor and update the setup_IDs accordingly. Also remove the + respective factor from the list of factors. Keep_present determines whether to keep data with the factor + present or absent. + + Args: + data: The dataset as obtained by the get_prepared_datasets() method. + factor: A string with the name of the factor to remove. + keep_present: whether to keep data with the factor present or absent. + """ + self._set_factors() + + len_setup_ID_preamble = 4 + index_factor = self.factors.index(factor) + len_setup_ID_preamble + realisation_to_keep = str(int(keep_present)) + + # filter out all unwanted datapoints and adapt setup_IDs to exclude factor + data = data.filter(lambda x: x["setup_ID"][index_factor] == realisation_to_keep) + data = data.map(lambda x: {**x, "setup_ID": x["setup_ID"][:index_factor] + x["setup_ID"][index_factor + 1 :]}) + + # Remove factor from list of factors. + self.factors.pop(self.factors.index(factor)) + + return data + + def _create_df(self, predictions: Dict[str, Dict[str, Any]], gold_labels: Dict[str, int]) -> DataFrame: + """Create a dataframe containing all predictions, gold labels and labels. + + Args: + predictions: A dictionary of dictionary, where the keys of the outer dictionary contains + the setup_IDs and the inner dictionary the data_IDs. The values of the inner dictionary + are the predictions for the example. The keys are strings and the values can be any type. + gold: A dictionary, where the keys are the data_IDs and the values are the gold labels for the example. + The keys are strings and the values can be any type. + + Returns: + A pandas dataframe containing the predictions and gold data. + """ + additional_keys = ["predictions_numeric", "target_numeric", "setup_ID", "data_ID"] + results_dict = {factor: [] for factor in self.factors + additional_keys} + + for setup_ID, predictions_setup in predictions.items(): + data_ids = list(predictions_setup.keys()) + n_datapoints = len(data_ids) + + results_dict["data_ID"].extend(data_ids) + results_dict["setup_ID"].extend([setup_ID] * n_datapoints) + results_dict["target_numeric"].extend(gold_labels[data_id] for data_id in data_ids) + results_dict["predictions_numeric"].extend( + self._label_to_numeric(predictions_setup[data_id]) for data_id in data_ids + ) + + temp = self._convert_numeric_id_to_dict(setup_ID, n_repetitions=n_datapoints) + for factor in self.factors: + results_dict[factor].extend(temp[factor]) + + return DataFrame(results_dict) + + def _set_factors(self): + if not hasattr(self, "factors"): + self.factors = factors + + def _convert_numeric_id_to_dict(self, setup_id: str, n_repetitions: int = 1) -> Dict[str, Any]: + """Convert a numeric setup_ID to a interpretable dict. + + Args: + id: A numeric ID of the form `id_1010101' where each digit represents a factor. + + Returns: + A dict containing factors as keys and the factor realisation as value. + """ + setup_id = setup_id.split("_")[1] + + setup_dict = {} + for factor, value in zip(self.factors, setup_id): + setup_dict[factor] = [value] * n_repetitions + + return setup_dict + + @staticmethod + def _calculate_main_effects(X: ndarray, y: ndarray) -> Tuple[ndarray, ndarray]: + """ + + :return: + """ + # Add a constant column to X for the intercept + X = sm.add_constant(X) + + # Fit GLM + model = sm.GLM(y, X) + results = model.fit() + + return results.params[1], results.pvalues[1] + + @staticmethod + def _label_to_numeric(label: str) -> int: + """Convert a label to a numeric value. + + Args: + label: A label. + + Returns: + A numeric label. + """ + return LABEL_TO_NUMERIC[label] if label in LABEL_TO_NUMERIC else -1 + + @staticmethod + def _assert_equal_data_ids(results_df: DataFrame) -> None: + """Assert that all data_IDs are the same for all setups. + + Args: + results_df: A pandas dataframe containing the predictions and gold data. + """ + used_data_ids = results_df["data_ID"].unique() + for setup_ID in results_df["setup_ID"].unique(): + assert ( + used_data_ids.sort() == results_df.loc[results_df["setup_ID"] == setup_ID]["data_ID"].unique().sort() + ), "Not all data_IDs are the same for all setups. Check for missing predictions!" + From 7d738e6b907da0fd4a849bef80009bf1a97fa488 Mon Sep 17 00:00:00 2001 From: LucWeber Date: Fri, 1 Sep 2023 10:37:07 +0200 Subject: [PATCH 34/57] Final changes --- .../tasks/icl_consistency_test/anli/config.jsonnet | 2 +- src/genbench/tasks/icl_consistency_test/anli/doc.md | 13 ++++++------- .../tasks/icl_consistency_test/anli/task.py | 2 ++ src/genbench/tasks/icl_consistency_test/doc.md | 13 ++++++------- .../icl_consistency_test/example_evaluation.py | 8 ++++++-- src/genbench/tasks/icl_consistency_test/mnli/doc.md | 13 ++++++------- 6 files changed, 27 insertions(+), 24 deletions(-) diff --git a/src/genbench/tasks/icl_consistency_test/anli/config.jsonnet b/src/genbench/tasks/icl_consistency_test/anli/config.jsonnet index 2e047a6..0dded00 100644 --- a/src/genbench/tasks/icl_consistency_test/anli/config.jsonnet +++ b/src/genbench/tasks/icl_consistency_test/anli/config.jsonnet @@ -23,7 +23,7 @@ data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/genbench_all.jsonl', + test: 'https://raw.githubusercontent.com/LucWeber/icl_consistency_data/main/data/genbench_all_anli.jsonl', }, has_validation_set: false, diff --git a/src/genbench/tasks/icl_consistency_test/anli/doc.md b/src/genbench/tasks/icl_consistency_test/anli/doc.md index 8fe23b8..b95adef 100644 --- a/src/genbench/tasks/icl_consistency_test/anli/doc.md +++ b/src/genbench/tasks/icl_consistency_test/anli/doc.md @@ -14,13 +14,12 @@ the other the MNLI-dataset (Wang et al., 2017). *Size*: Each subtask contains 57600 when using the full 600 data\_IDs. The user can choose to reduce the number of evaluated data\_IDs. ## Abstract -Finding the best way of adapting pre-trained language models to a task is a big challenge in current NLP. -Just like the previous generation of task-tuned models (TT), models that are adapted to tasks via in-context-learning (ICL) or instruction tuning (IT) are robust in some setups, but not in others. -Here, we present a detailed analysis of which design choices cause instabilities and inconsistencies in LLM predictions. -First, we show how spurious correlations between input distributions and labels -- a known issue in TT models -- form only a minor problem for prompted models. -Then we engage in a systematic, holistic evaluation of different factors that have been found to influence predictions in a prompting setup. -We test all possible combinations of a range of factors on both vanilla and instruction-tuned LLMs of different scale, and statistically analyse the results to show which factors are the most influential, the most interactive or the most stable. -From our results, we deduce which factors can be used without precautions, should be avoided or handled with care in most settings. +Just like the previous generation of \textit{task-tuned} models (TT), LLMs that are adapted to tasks via prompt-based methods like _in-context-learning_ (ICL) or _instruction tuning_ (IT) perform well in some setups, but not in others. +This lack of consistency in model predictions is a problem in prompt-based learning and hints at a lack of robust generalisation. +We here introduce the ICL consistency test -- a contribution to the GenBench CBT -- which evaluates how consistent a model does predictions across many different setups while using the same data. +The test is based on different established natural language inference tasks. +It introduces a consistency metric to reliably estimate model consistency and provides insight into which properties of an evaluation setup render ICL predictions unstable. +We conduct an empirical analysis of eight state-of-the-art models and our consistency metric reveals how LLMs lack robust generalisation. ## Examples The test evaluates the same datapoints across many different setups to determine the consistency of a model's predictions. Every datapoint has a data\_ID (specifying the original datapoint) and a setup\_ID (with each digit specifying the presence or absence of a factor). diff --git a/src/genbench/tasks/icl_consistency_test/anli/task.py b/src/genbench/tasks/icl_consistency_test/anli/task.py index 6333d2e..9dac7bd 100644 --- a/src/genbench/tasks/icl_consistency_test/anli/task.py +++ b/src/genbench/tasks/icl_consistency_test/anli/task.py @@ -97,6 +97,8 @@ def evaluate_predictions( factor_present = results_df.loc[results_df[factor] == "1"]["predictions_numeric"] factor_absent = results_df.loc[results_df[factor] == "0"]["predictions_numeric"] + breakpoint() + # mask out predictions that are out-of-label-distribution mask = [(f1 != -1 and f2 != -1) for f1, f2 in zip(factor_absent, factor_present)] factor_present, factor_absent = factor_present[mask], factor_absent[mask] diff --git a/src/genbench/tasks/icl_consistency_test/doc.md b/src/genbench/tasks/icl_consistency_test/doc.md index 8fe23b8..b95adef 100644 --- a/src/genbench/tasks/icl_consistency_test/doc.md +++ b/src/genbench/tasks/icl_consistency_test/doc.md @@ -14,13 +14,12 @@ the other the MNLI-dataset (Wang et al., 2017). *Size*: Each subtask contains 57600 when using the full 600 data\_IDs. The user can choose to reduce the number of evaluated data\_IDs. ## Abstract -Finding the best way of adapting pre-trained language models to a task is a big challenge in current NLP. -Just like the previous generation of task-tuned models (TT), models that are adapted to tasks via in-context-learning (ICL) or instruction tuning (IT) are robust in some setups, but not in others. -Here, we present a detailed analysis of which design choices cause instabilities and inconsistencies in LLM predictions. -First, we show how spurious correlations between input distributions and labels -- a known issue in TT models -- form only a minor problem for prompted models. -Then we engage in a systematic, holistic evaluation of different factors that have been found to influence predictions in a prompting setup. -We test all possible combinations of a range of factors on both vanilla and instruction-tuned LLMs of different scale, and statistically analyse the results to show which factors are the most influential, the most interactive or the most stable. -From our results, we deduce which factors can be used without precautions, should be avoided or handled with care in most settings. +Just like the previous generation of \textit{task-tuned} models (TT), LLMs that are adapted to tasks via prompt-based methods like _in-context-learning_ (ICL) or _instruction tuning_ (IT) perform well in some setups, but not in others. +This lack of consistency in model predictions is a problem in prompt-based learning and hints at a lack of robust generalisation. +We here introduce the ICL consistency test -- a contribution to the GenBench CBT -- which evaluates how consistent a model does predictions across many different setups while using the same data. +The test is based on different established natural language inference tasks. +It introduces a consistency metric to reliably estimate model consistency and provides insight into which properties of an evaluation setup render ICL predictions unstable. +We conduct an empirical analysis of eight state-of-the-art models and our consistency metric reveals how LLMs lack robust generalisation. ## Examples The test evaluates the same datapoints across many different setups to determine the consistency of a model's predictions. Every datapoint has a data\_ID (specifying the original datapoint) and a setup\_ID (with each digit specifying the presence or absence of a factor). diff --git a/src/genbench/tasks/icl_consistency_test/example_evaluation.py b/src/genbench/tasks/icl_consistency_test/example_evaluation.py index edc9def..314aa0c 100644 --- a/src/genbench/tasks/icl_consistency_test/example_evaluation.py +++ b/src/genbench/tasks/icl_consistency_test/example_evaluation.py @@ -22,8 +22,8 @@ from genbench.api import PreparationStrategy -DATASET = 'mnli' # choose between 'anli' and 'mnli' -N_DATAPOINTS = 2 #50 +DATASET = 'anli' # options: {'anli', 'mnli'} +N_DATAPOINTS = 200 MODEL_NAME = "huggyllama/llama-7b" BATCH_SIZE = 8 @@ -99,6 +99,7 @@ def make_predictions(self, dataset, bs=8) -> Dict[str, Dict[str, str]]: # Load the task task = load_task("icl_consistency_test")[DATASET] ds = task.get_prepared_datasets(PreparationStrategy.PROMPT_BASED_TESTING, shot_list=[0])[0] + breakpoint() # Selecting a subset of example for illustration purposes subset = list(set(ds["data_ID"]))[:N_DATAPOINTS] @@ -122,3 +123,6 @@ def make_predictions(self, dataset, bs=8) -> Dict[str, Dict[str, str]]: print(f'Accuracies: \nMean: {results["accuracy"]["accuracy"].mean()}; std: {results["accuracy"]["accuracy"].std()}') print(f'Main effects: \n{results["main_effects"]}') print(f'Consistency: \n{results["kappas"]}') + print(f'#' * 90) + print(f'Overall consistency: {results["kappa_avg"]}') + print(f'#' * 90) diff --git a/src/genbench/tasks/icl_consistency_test/mnli/doc.md b/src/genbench/tasks/icl_consistency_test/mnli/doc.md index 8fe23b8..b95adef 100644 --- a/src/genbench/tasks/icl_consistency_test/mnli/doc.md +++ b/src/genbench/tasks/icl_consistency_test/mnli/doc.md @@ -14,13 +14,12 @@ the other the MNLI-dataset (Wang et al., 2017). *Size*: Each subtask contains 57600 when using the full 600 data\_IDs. The user can choose to reduce the number of evaluated data\_IDs. ## Abstract -Finding the best way of adapting pre-trained language models to a task is a big challenge in current NLP. -Just like the previous generation of task-tuned models (TT), models that are adapted to tasks via in-context-learning (ICL) or instruction tuning (IT) are robust in some setups, but not in others. -Here, we present a detailed analysis of which design choices cause instabilities and inconsistencies in LLM predictions. -First, we show how spurious correlations between input distributions and labels -- a known issue in TT models -- form only a minor problem for prompted models. -Then we engage in a systematic, holistic evaluation of different factors that have been found to influence predictions in a prompting setup. -We test all possible combinations of a range of factors on both vanilla and instruction-tuned LLMs of different scale, and statistically analyse the results to show which factors are the most influential, the most interactive or the most stable. -From our results, we deduce which factors can be used without precautions, should be avoided or handled with care in most settings. +Just like the previous generation of \textit{task-tuned} models (TT), LLMs that are adapted to tasks via prompt-based methods like _in-context-learning_ (ICL) or _instruction tuning_ (IT) perform well in some setups, but not in others. +This lack of consistency in model predictions is a problem in prompt-based learning and hints at a lack of robust generalisation. +We here introduce the ICL consistency test -- a contribution to the GenBench CBT -- which evaluates how consistent a model does predictions across many different setups while using the same data. +The test is based on different established natural language inference tasks. +It introduces a consistency metric to reliably estimate model consistency and provides insight into which properties of an evaluation setup render ICL predictions unstable. +We conduct an empirical analysis of eight state-of-the-art models and our consistency metric reveals how LLMs lack robust generalisation. ## Examples The test evaluates the same datapoints across many different setups to determine the consistency of a model's predictions. Every datapoint has a data\_ID (specifying the original datapoint) and a setup\_ID (with each digit specifying the presence or absence of a factor). From a543041e8391bd690c2a1e4e3057a3ed4a2468e6 Mon Sep 17 00:00:00 2001 From: LucWeber Date: Fri, 1 Sep 2023 15:10:44 +0200 Subject: [PATCH 35/57] Style and quality check --- .../tasks/icl_consistency_test/anli/task.py | 23 ++++++----- .../example_evaluation.py | 38 +++++++++++++------ .../tasks/icl_consistency_test/mnli/task.py | 21 +++++----- 3 files changed, 49 insertions(+), 33 deletions(-) diff --git a/src/genbench/tasks/icl_consistency_test/anli/task.py b/src/genbench/tasks/icl_consistency_test/anli/task.py index 9dac7bd..16d04b9 100644 --- a/src/genbench/tasks/icl_consistency_test/anli/task.py +++ b/src/genbench/tasks/icl_consistency_test/anli/task.py @@ -1,15 +1,15 @@ from typing import Any, Dict, Tuple -from numpy import ndarray -import numpy as np import datasets import pandas as pd +import statsmodels.api as sm +from numpy import ndarray from pandas import DataFrame from sklearn.metrics import cohen_kappa_score -import statsmodels.api as sm from genbench import Task + LABELS = [ ["Correct", "True", "Always", "Yes", "Guaranteed", "Duplicates"], # `correct` labels ["Inconclusive", "Possible", "Sometimes", "Maybe", "Neither"], # `neutral` labels @@ -89,7 +89,7 @@ def evaluate_predictions( betas.append(beta) p_values.append(p_value) - main_effects_df = DataFrame({'factor': self.factors, 'beta': betas, 'p_value': p_values}) + main_effects_df = DataFrame({"factor": self.factors, "beta": betas, "p_value": p_values}) # Compute Cohen's kappa for consistency kappas = [] @@ -97,24 +97,24 @@ def evaluate_predictions( factor_present = results_df.loc[results_df[factor] == "1"]["predictions_numeric"] factor_absent = results_df.loc[results_df[factor] == "0"]["predictions_numeric"] - breakpoint() - # mask out predictions that are out-of-label-distribution mask = [(f1 != -1 and f2 != -1) for f1, f2 in zip(factor_absent, factor_present)] factor_present, factor_absent = factor_present[mask], factor_absent[mask] kappas.append(cohen_kappa_score(factor_present, factor_absent)) - kappas_df = DataFrame({'factor': self.factors, 'kappa': kappas}) + kappas_df = DataFrame({"factor": self.factors, "kappa": kappas}) # Calculate average kappa kappa_avg = kappas_df["kappa"].mean() # Return the evaluation metrics. - return {"accuracy": accuracies_df, - "main_effects": main_effects_df, - "kappas": kappas_df, - "kappa_avg": kappa_avg} + return { + "accuracy": accuracies_df, + "main_effects": main_effects_df, + "kappas": kappas_df, + "kappa_avg": kappa_avg, + } def add_factor(self, data: Tuple[Dict, Dict], factor: str) -> Dict[str, Dict[str, Any]]: """Concatenate the data with the factor present and absent and update the setup_IDs accordingly. Also add the @@ -261,4 +261,3 @@ def _assert_equal_data_ids(results_df: DataFrame) -> None: assert ( used_data_ids.sort() == results_df.loc[results_df["setup_ID"] == setup_ID]["data_ID"].unique().sort() ), "Not all data_IDs are the same for all setups. Check for missing predictions!" - diff --git a/src/genbench/tasks/icl_consistency_test/example_evaluation.py b/src/genbench/tasks/icl_consistency_test/example_evaluation.py index 314aa0c..9e49e8d 100644 --- a/src/genbench/tasks/icl_consistency_test/example_evaluation.py +++ b/src/genbench/tasks/icl_consistency_test/example_evaluation.py @@ -21,8 +21,8 @@ from genbench import load_task from genbench.api import PreparationStrategy - -DATASET = 'anli' # options: {'anli', 'mnli'} + +DATASET = "anli" # options: {'anli', 'mnli'} N_DATAPOINTS = 200 MODEL_NAME = "huggyllama/llama-7b" BATCH_SIZE = 8 @@ -34,6 +34,7 @@ class Generator: """ A simple wrapper to evaluate a given hf-model """ + def __init__(self, model_name="huggyllama/llama-7b"): self.max_new_tokens = 4 # some labels consist of up to 4 tokens self.tokenizer = transformers.AutoTokenizer.from_pretrained( @@ -95,11 +96,9 @@ def make_predictions(self, dataset, bs=8) -> Dict[str, Dict[str, str]]: if __name__ == "__main__": - # Load the task task = load_task("icl_consistency_test")[DATASET] ds = task.get_prepared_datasets(PreparationStrategy.PROMPT_BASED_TESTING, shot_list=[0])[0] - breakpoint() # Selecting a subset of example for illustration purposes subset = list(set(ds["data_ID"]))[:N_DATAPOINTS] @@ -118,11 +117,28 @@ def make_predictions(self, dataset, bs=8) -> Dict[str, Dict[str, str]]: # Evaluate the predictions results = task.evaluate_predictions(predictions=predictions, gold=ds) + + print_out = f""" + {"#" * 90} + EVALUATED SUCCESSFULLY! + {"#" * 90} + + {"-" * 90} + Accuracies: + Mean: {results["accuracy"]["accuracy"].mean()}; std: {results["accuracy"]["accuracy"].std()} + {"-" * 90} + Main effects: + {results["main_effects"]} + {"-" * 90} + Consistency: + {results["kappas"]} + {"-" * 90} + + {"#" * 90} + Overall consistency: {results["kappa_avg"]} + {"#" * 90} + """ + + print(print_out) - print("EVALUATED SUCCESSFULLY!") - print(f'Accuracies: \nMean: {results["accuracy"]["accuracy"].mean()}; std: {results["accuracy"]["accuracy"].std()}') - print(f'Main effects: \n{results["main_effects"]}') - print(f'Consistency: \n{results["kappas"]}') - print(f'#' * 90) - print(f'Overall consistency: {results["kappa_avg"]}') - print(f'#' * 90) + diff --git a/src/genbench/tasks/icl_consistency_test/mnli/task.py b/src/genbench/tasks/icl_consistency_test/mnli/task.py index 7d5c49f..70755be 100644 --- a/src/genbench/tasks/icl_consistency_test/mnli/task.py +++ b/src/genbench/tasks/icl_consistency_test/mnli/task.py @@ -1,15 +1,15 @@ from typing import Any, Dict, Tuple -from numpy import ndarray -import numpy as np import datasets import pandas as pd +import statsmodels.api as sm +from numpy import ndarray from pandas import DataFrame from sklearn.metrics import cohen_kappa_score -import statsmodels.api as sm from genbench import Task + LABELS = [ ["Correct", "True", "Always", "Yes", "Guaranteed", "Duplicates"], # `correct` labels ["Inconclusive", "Possible", "Sometimes", "Maybe", "Neither"], # `neutral` labels @@ -89,7 +89,7 @@ def evaluate_predictions( betas.append(beta) p_values.append(p_value) - main_effects_df = DataFrame({'factor': self.factors, 'beta': betas, 'p_value': p_values}) + main_effects_df = DataFrame({"factor": self.factors, "beta": betas, "p_value": p_values}) # Compute Cohen's kappa for consistency kappas = [] @@ -103,16 +103,18 @@ def evaluate_predictions( kappas.append(cohen_kappa_score(factor_present, factor_absent)) - kappas_df = DataFrame({'factor': self.factors, 'kappa': kappas}) + kappas_df = DataFrame({"factor": self.factors, "kappa": kappas}) # Calculate average kappa kappa_avg = kappas_df["kappa"].mean() # Return the evaluation metrics. - return {"accuracy": accuracies_df, - "main_effects": main_effects_df, - "kappas": kappas_df, - "kappa_avg": kappa_avg} + return { + "accuracy": accuracies_df, + "main_effects": main_effects_df, + "kappas": kappas_df, + "kappa_avg": kappa_avg, + } def add_factor(self, data: Tuple[Dict, Dict], factor: str) -> Dict[str, Dict[str, Any]]: """Concatenate the data with the factor present and absent and update the setup_IDs accordingly. Also add the @@ -259,4 +261,3 @@ def _assert_equal_data_ids(results_df: DataFrame) -> None: assert ( used_data_ids.sort() == results_df.loc[results_df["setup_ID"] == setup_ID]["data_ID"].unique().sort() ), "Not all data_IDs are the same for all setups. Check for missing predictions!" - From 65fb8715eee19ef70e7b7dacd548ea01608261d7 Mon Sep 17 00:00:00 2001 From: LucWeber Date: Fri, 1 Sep 2023 15:23:29 +0200 Subject: [PATCH 36/57] Style and quality check --- .../icl_consistency_test/example_evaluation.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/src/genbench/tasks/icl_consistency_test/example_evaluation.py b/src/genbench/tasks/icl_consistency_test/example_evaluation.py index 9e49e8d..b52bf9e 100644 --- a/src/genbench/tasks/icl_consistency_test/example_evaluation.py +++ b/src/genbench/tasks/icl_consistency_test/example_evaluation.py @@ -117,28 +117,26 @@ def make_predictions(self, dataset, bs=8) -> Dict[str, Dict[str, str]]: # Evaluate the predictions results = task.evaluate_predictions(predictions=predictions, gold=ds) - + print_out = f""" {"#" * 90} EVALUATED SUCCESSFULLY! {"#" * 90} - + {"-" * 90} Accuracies: Mean: {results["accuracy"]["accuracy"].mean()}; std: {results["accuracy"]["accuracy"].std()} {"-" * 90} - Main effects: - {results["main_effects"]} - {"-" * 90} + Main effects: + {results["main_effects"]} + {"-" * 90} Consistency: {results["kappas"]} - {"-" * 90} - + {"-" * 90} + {"#" * 90} Overall consistency: {results["kappa_avg"]} {"#" * 90} """ - - print(print_out) - + print(print_out) From 5eb610cb8218e80529444bd643e5db8d711034b7 Mon Sep 17 00:00:00 2001 From: LucWeber Date: Fri, 1 Sep 2023 17:28:04 +0200 Subject: [PATCH 37/57] Update abstract and eval card; Cosmetics --- .../GenBench_eval_card.pdf | Bin 43069 -> 43069 bytes .../GenBench_eval_card.png | Bin 91665 -> 77484 bytes .../tasks/icl_consistency_test/anli/doc.md | 7 +------ .../tasks/icl_consistency_test/config.jsonnet | 1 - .../tasks/icl_consistency_test/doc.md | 7 +------ .../tasks/icl_consistency_test/mnli/doc.md | 7 +------ 6 files changed, 3 insertions(+), 19 deletions(-) diff --git a/src/genbench/tasks/icl_consistency_test/GenBench_eval_card.pdf b/src/genbench/tasks/icl_consistency_test/GenBench_eval_card.pdf index 037573426f0d8c61be742604deb230985c2aade9..3a1d066564ab44b74d95522dc8c30ae0086610c3 100644 GIT binary patch delta 1024 zcmV+b1poWJ&;q^C0+3UGFc7`xSLi4oylC~UBNXVs00T5P;9ycWcxW8(2TX@Q&&qaM z(>P(AHV0d_S5JCRPrHg>YbMzFLfpImTp-;ALx$ReL&>bQ5JF(9&EnG+!7l0JYbHPi z&wh8I8>X}dBRsXQ*~Q|+J&+JVi~_5Vf>5{uG&o=gaB)g>{dZD-0T6MF7e6SiXs)1t-&}@0hKJmBPrudau&9kQ1mdTgp>&1)Z zXlPA+gv1l5Ovs@mc%Mc*(Q+dt}bgd(d7p4Nl1c`!+lVqz<%jEDfOTuruFu z=;gZ2)}4IJNDu+pDC8c722Wy)F`jQXS<&rk(i^n&;~9MHI)82J=79H8lop)P)G&Zr zjoptP`_ee4Z0mM{(OjQ=aEimaie=@hELi(lh4C%=2!Cr-&*dZL$fb(9qN)4Faq zc?`Rp@RL*uqzDtq^sq{^8BVAaldzDF=b<~i(>#QKG+h#0(dN=_&qMXLtyag3Gn>wY zB#1!q4=TiTiijPIG+=Pp3h%N}R*b&5>62kshr8OmYN`6JWAzl8zt5_s%dCTCy4Z4z zA)OQaJZ`8wK~6;`G4ZG}j;{ylY{0&lw=q19uW!d+=UrEG*L9-OBOQx`ndQ#8GVZU0 z4xYY$KVE_x;hknqI@6LrgaqgI^V{{7?vfUZ?lI3>D&{yC{20AP$4KPate zuCPO>a&{9T22RhHi+^4z#exVHEnD6&3`$UMYn(7av*k7W#NTb3;#Xod&zfReCSR6s z7B82hp*8i92xm~4kVB#H=uTsxq!=79Iu6=0CxVj@Ie;?)y<%2dp)B+yK2(V@JXMOjm1YF)7WRpJ%TimJ@(I?|`{a6BH-4#-lF zLc=iJXDL;ZqIBN$?c=J=W;~%91Yw(#g7rnz@H^1CDj{#U2ZXEvdZ zF_ecMn?h;ldc%w*OanWs|H`T&YbZ@R1!{2L*Lcmz;okPC5_wBc^pdWQ(g1%}*X<^c zVb@a*qGTN=lIL-aX7iiSNTWdebP02Wcbk4mIj8`p{+jT6LQu7a4)pV(Ja7-6ljxo?V z$uHx6$`jyJRT2}=Cgb>e(9Q^WPqn#!}NIBH- zt`7HBD9ZnuypF^IP&O;dylKgaone~25DY}VKUe0XZ@X5KY9x%qfY^D$YBV!NGn!$w z`GOq&iHR6A^qfC2e;CD&e4=Q#068aCcx!qM2GC7^*M(^O4`|i%kh9qXNS+BdGBzmTsjayyx1QNV7cTC4Vcn(8 z($ezs=RB`{yy5NTD9!P8RAE!k>197|x1RUB@^L@ww=Lf6BHrvLDy3KL+k9c|i-`WY zh1P)CH+=`|b}SzlHCNZTuG<;vaGkrj&nd^EY~@ZZ4_w-xpN0124>Oki{Trob#px}7 z|I$jk*8T6FDLJR4SN{FW)T&#IfB&4zv6<#SZ+E`=eC^+V7@}H<)%o+|s`(Gg^1pwX zTmHZF75i$R+k7W!`e*_Yf$*J~2gXQsQtsE*WR*qGwJ8eUmAPBz)_*^Ox5 z)G731i^`{WD1J5LRI>Axenq5yX)teLNr^(dYRcG15eM7BgOO=LicE9yIrv!DoIg9E z_R4X3#NE@gx;e`s(3GoX)Z;m z>vG>ia-u#o3=E0o5fc3E9BOIOyu7^ZHL2IWuGi7kjWw)EkR1K|`Lm#5w3`R#Zeiz4 z$uJ@HE9n^nA0!S)_$NyslEQA?Hci-WH@~#Vl)m9d^tfV{Q>RY3hO-5Q+F${xb{u|F zBqbqH@$7_QfSx3VBO%nz@^zE(Jwi3Oc!6HNk$o=h@M#w{{#> zem>e&7OPX_FMi)e?R9_$(V`FXdH zW13{BfC|ke%c3}yq`s5eI4-6bRE<^2No~LS(B1#vkJ#^DMwV4gKFi+fgmHc4KBvin zCO)&)Q!nz%g1Du|dux=8jErQCmmN8Fz4&`qn2SbZNt(%@FroyviIF^!yOA8biT(z< z@~)DgC|V8MeJyQ>zgJeh)%vw>t_ta&H!(@5j=vn0Vp!8N+2T@@ZqY@vV@Kp`h1l2X z8P)~}?Q98ir{S-(&v-2&&wePmp5);3(xmAFt5QPnd!xEv$)Q|2h0*7|=zYSIbju8; zM>_7l&v2TZaQE=2WN?{#v2o8yp7vCOD)FjVrBsQc;^Ip8?QFhJpGN!o(nN%ZN6W{^ z;-&KE&fWR->lY0zt?m&kYr&Zy1}WiyPt7^mPP42>k4oozG1O$*jV3!xnnXlJ;lzk; z+%+r*E&W&#*~!p6O=B!4&7@F6?E;_rBweo^92$p% zN+Q2Lq-EpdQSv-Ku}ODqCQ6El_<3p4yRsYnt4xke2_^- z|4FfOtXhiwxPjX{?5*Y64Qj^YJyp8}?UPGG1=+;x=Z5pghdytL6@9eZpzdA7t=qR# zj2lksXlv_r6#5~fDV97t5#M(9?Ah%&(Df;X{6;+VJ9kD8x8=v`mWB4zB}L%?F2$ck zS1F%Gr^n~5S8=`+TC$xEi0#|A4ccmB2wd}~<49@QZg%(G9-e(I-Ne|cF5f7BtlNaaMN zxflO32Q69=o)%gk^tBDvgwM`E>Q1DyWLQ82EC_eo?*ha51E7Ur{gm ze(8gpoH7|UgVyOyByfZom%u{Wq2tHp>G-WJFGz`rnSYSV!wDsUBy#?|gQKLDhtSOL z{TWd!fCWqh?H)59K3qH?fP0zw{mLttUBAO~>jCjJd3ILT!tSkC*%ag6P${<8z0-Nv zQMihTVUTw$LEK35;X)Ke83pwx9a$2sfs#=+Qt z2Om`B<$araKf7(>KTN0KJCHT0;g&o$X6feoBStPN*=g3g$*wDex}(VVP@A{EzkjE9 z=V9BQacVN@8etMy_TziYP0~AdxFu&Yo8pTpJm;72*w85>qqyBCjedJ);}p;d+E>~O z+6<_Ph>EH@Jg_yuS)kUxdiAPg5ub!+hIPDBm(6*+p(0BG$)IoO!^J?Zp6@SipRHTH zad)k;sK$FEl_ski^=#9)0QXS~^BtRSymut`>AHAh!RRFU!I zxPy3&EY(edW8M``CmSC>efrzGvrpfBt{lCc&B}TQ^;-~(IY;*= z4WAtz9)7L-_NAQ@z4|r&THTABpkMV`K{fqG#6(}+ ztMA{Xb#!#L>kF^Ham<;W+<%VN^kMwYJ^S(FM@iEj&1^@NhdYjx-&#c#*XqiB!@?rO zL^iagg1OWrTUSTNF01YXKT;IZ0k_*2z{0nkukKROz7~6OP?DP}ry3W2!n#j+BfWrf znB&NUQk*0uO(z9S$Kkt875i%%N~Y5BWt0WaCFs+7H&oDpm8W)iSzmWWR4-z#rzs<~ zrfV%Vwe^QFS!eDF3kwUna)Jn5T`7qTzdY#$?RkQa9XKE&5x`yrT&375$LNC$A7Ai< z*>Gy;C%MlXH*Qo$UfSb?vuuE}QZYGi|3O8Vpp4wGElb!wQ~Jf3Raij&aNJT160aop z-pO-7czer(1tYvQ?JX^nrRslr{{F3Z!nC<@-~1CPA(`|W9l_bAcKGzMnchUhg}G@( z?-u}_1f7WEz1qgsHGgl<!{CsuW$c`hb(MYC3rmwh7s>=v+{(B~AwRLs7f@+?hUI7|VAA!Qe zvTxtV{?Yc-FDhl#j~cQuh@_W|{l@9lrJ=pXMMtyW~pr~z>%LmfD4oPhgXIwqKXXZyo}53Kr#xR=Qc~5Mn3I5%tzs9t zcK!NtI-AC;0oBR+a#2N+9dGN5Pz)?C2%ELKHuVZROt9?V|GC3jt%!Dhe7x&de`K#w zJvHu~=Fdt>ySBJ2s`~&2JvGfeO{@NoW=RUcXdXj#4&YA z2&d0zi+Irs((IgSk=UdwVW55lNJ>UVhPoTTwi;9{S+|VdX|_L=r1{Sk^gfn7m4{GL z3fYI`edotJx_nxAgHyTyoT&9z&3h6o`iGnF_yN^uy38vuI8VI@XOnWD>o;|&@IP|d za-fmdZG=GX(EF$#4-6`!e@f@=dkd)hI=UL+@skl3CZn1z*)ZN&un%_TO0mlhtP5pYf ze(l=tIHW0hKN#KKb(MwHe7Z|T<;1s-k?~AwlXVcleMBk}NA8Hv-ca^}(eWBh!}f2F zXC@j<$Hyw9dhy$a<(O5Eyx?qK65v>vpCWF(s>=s=SeK;B-#=L#c&y){O@ z515Z%`?A*UfFDygPRkDzIdzls0KVW9o!-vVBRWot^OMi`Z4%hkKZK8T6p>0VE-o%> zZ~vjt|46L!{7iSV!=OTe&wFXUQT!Nc%nkA|GiBN3HZETd~Nleb=%e(>2{Td zjQ@Jbu|xSfxVAlD>{#xKO?uy-pI+tfm#}iH(883~5sl2+eTi6A!t$_JFIKFf*2C&p zJ7i=Exj6bF(X6`o*p6xe5#9EGPO@nm{zNO$er3`f-&*XpQs`am0<9P8P}Q%-OS9e7yhp_t^6Ca_yu#0in>2 zr2qWK|EacPCWh#u-peE1Sn~G`UUtbp_*&z^;q>!8>83jaSQW&P_7qTdw{O{EPIp-` z(;3tXv_=Zq+#GDa?bj9`DL4}|Ri#$d4yga|q) zl0X2JWIDvkDn36uIr(9uM{Mq+yU*9UivZ8kI}V+`i`1!^QPIM&_I>^n^`1gk|Qbaz}j(&P{ zGL>>lM!zxb?QU+9xDb9D15gZ~a72dzRFbcaTs4h)ZR3xs=_x~^;P8-GBBrKnGoF9T zSs3ToxR>Yqi_DVi72r$bwOU>=IPyaJE81AiwDz&wx@Lu<-P=V?$5$(OM)|e;p{$8| zL)9$%6sS+pDoNM7E2M<0zP~v8@Kub63CJ1WjrRKknWLq93~s1pJEk)o5)#q?wfx$+ zo3e1_=cl_Lz@@Af=IjnIG3fzjvT9~$R=gvT3h)=97_S3RLAlaE2>gWH3g?yZ-WX?6r}G`y19qy^cI>} zP3P&aGrWTj9vgy|N>b6X`&xFao-f%;GyT(zAsLkJ(e4w4`w}mwnt+a8=1hKlMR;*8 ziOIioxtSAc!FlXGA#`Bru}HBp;1sLqE8!ow$N}Iw@>{_NL`=&<1$Q6U z^9ItChXON?+JATTxsfmV`LTARog$5&_7s)yBqB6$V?ofx4vD?i%*jsK5SPfguspX! zAJJJ|21s={*%1Pmis;^{C%?qD&;-R_PF1Bna>Afe`t!YwA^sAQlJA|_UsDCWoM^IX z(MP4@$^)Q$krveUE;H&JGNsc*%@#_l2Z9Kf84V05=Cjb{vT**=v*YH|Q_vxmXxY_R zUUxQ`nN-^${gl5Hc^DU_o?h3nE>Dw^#tY7VMEygheShC33*(bPID`r>&#ekV*#`@N-E(A5y}XAu(&>*{9E+`#sAF3J#fRuo!6^0&{y|#l*~Ph)gompV~qg zIMUxG2t4l$^MN*xgP1j8*LqrVTy73Z85q3Io}ai(zD2h*SSD66{tmc-VU*xo!YcC{@f)wuKt)Gpk!Y|g?=EYU`x@DO0A1d$yOMQ4@G zdPA>cp&CJy-0}D3ZK9-l_>gS>!K#bj_*~UE zEi~8@{7c`i^#dn;G$4~5;F=&W%^Os-f(u+e#mHnK{(*uH?-huse2|$rzYrj_toR{E zwjS2v5!1Hq+jTrRJ8`-rk?pF@=V!)~Vl9gNk3<5|MglNh2Ru8-##ZDwUZr#$Y*A4i z(N>(=Bs@R%BnDzgdRM4J@yvMdB_M@5$oG%6kyLg3`0Yl*(+9I>i;)_1#eG?>G6jOA zDeFf`g`M1g-ZR;O0Z>w@)uFQ|J+VV9459wO!{fM5@frq4=_?tO+&w3QN-9wec2hz2 z8c6zJ(^q?y2r#P2dRLJcb#O3BfXOeRFkZ5?O$+2U)yU4juC4tPy^CikLeG}1SpD?N zmvc`71NB3lCRFwpG$P(RA25d=MfQBmnfDYF{24^+Ah6_E4hZu{^L8cy@Kh$Cn;fww*imvKJQ|EI@XW%zmDcOTVGBzrd*W%|}*;tb|LH=RBFf5YnUyg8JoV(E2E{WDNs(}(5AFkrY+f>2^j+; zQ?|DlS$6E&wX1rS2wHW#`b{_n#!RX9@G2&PX^^7XC7H+Q6GG;d&MZw%ds2n z7xou;(D7HZn+`OlSBF3G_0Y9iwBjRvRQ?^q^ zh4nI5jyYiSYTNws8t+S{4^XS64y%N; zuK;N;%8E|NQQ3!dpC0Yyl_i?pYU;hwI4*j)!Rm4~f(4(SzD=<6r9dtbU|#F-_No@d zeN@-3lSWSuN(M(`ISjwt-(qdIY}qoOgg!f2WaEs?OebCVlp4x;-elK z+7KkvmBP=(RUU6OzbscB9h~>h)WFWO6a85UqitTo)uUy?1e=5FRa~VmXkhL}NT{Zn zy?`L89k2*&Vt~kDm3eum*=5mb)#}x%b^ub#LIs;LxlF1d9(m zrmL!z%qd##cgZ#6kP*rA<`JIW-kVYM#E4>vw7z#lOG^v1(6l=uKtmQeq<;jdAPOtS z3Y`o^GPyyI$FzI5OT!rbso@NJ{bs{}(!#d>slLJ~wsdDF#_d8H4h@|so8mqIgxUel zWrCB@5XD|22dmr5AdjCi7lW*PZ{r>&F{p&3T%Ldg03op(th@T1Y7Ki`Xnv3bTY^Wd zkAsCSW7mM)XheBHlZfTj?+#v_~5ltRzR+RCnMw(ktojk_?7J2V!WZr?TvEd4CC z#kt`kj>KV*?7rr#hFYWd%X0Gxj2l!vqha`=&%~ewG z$(6Em@bfo-bnFtl4m{rZ<2*ykp4-4~hJphNIJo@Am#>bVxjO33X0=nR7}R0C8odqz zt#de^Ayq;`N{Soeu-S*cT*x68rKP=N)Y9Maf?Z`BuW)_oJsFT(*mX!()1xfGBPT&4 zto@K)=7dO)@ND0)UZ-?x$vJF7lYtj!vpVgvQ|8lH!ML|?K&7as(ZJ@$JyhSguQWP= zBiB;Y6v&?$vL_Subs+eciCr$;;jp_wIYg#z#F7K^<0)jblLVJEnyjcNi4 ztf-IO*pKreoeaXxjVo8L-YrI5WEmpt(tJ)tWS3Z{smp9ogK->0z{lEPjjhLpO&@)8 z7)a0GR~BU9P|+0}yw}iY9%QMBkAsrNHYkzKWC5CEr=SFysh|n)gATfNziMx=R-T9X zx3UbDUA#BK&uTQ^eU!Av6#VOCxnC9!`Q$gpt01Xi?`UXz2itz3h~?1;C8o)`appNL zOm~U;px9b|%W#~2hvez`*_jdH8~j*XJb>NUlR?-d0=?g|bE2Zp&^uo~*|8jJi=_Ri z=*N#Al0{S39IbWctj1@Uq)xY`aMT9}2OB{;GGq!wU4Ecn(6K!L za9P~;VTAt4CTeO$G(Y6yRZqBaA$?Q>ZX6Ip6U9cg48TPvsZKsoLr@_`_AC1!QVGFS zO!}a^CJ^wloglbAW$N;hiHwZw!GU?wKF298Rn~7D#%cj4AsN*T7pFC!&GSj7mXetFq6 ztBTzP9Kt6Yc#ZU@?SI~-?HdU=timV;O~7I%&Gv6xx1-?+CXF=H-Q8_%7v?+@gB~z_ zMuZf}KuBrle2$}(qQPYd>~7!pHor2*#YGTpzWTRXZWWW;DDJY&vJA2d61sN$LK$2O z;tA@A^rd1`?q@fSL+Q^%N&vz0Mkj;L_YZe|SW9u2yDD*b0mlePHs|bz)Ja9Pd5r;p zgp$VA{z*HVP8zAg6*Yd$k7+__ZgLaqpt@z|=884;3Q9n^9+o;u6R*yD@g1qic0v(L zHmWPv&n~%s?1*aeQJ-S4Ub+#$LL;U?)f9vDrfjq}c8MKC+fCoOxl`7s7$4+F@&`GA zd&DAoPW& z0s3DpJIMre)?ZQK9QGL#vqDY#;kryV-H{QdB?d-;^)lFwyt6P`#-dr#lE) z`YG~yt)EMWpHkcisNQjqjpH^gPN}=NP1Le2j#AV%*%f7rbG0~*KC0kq_9RIpFU{J? zC+f{`eZ$GFf|8OG=s8uVk7W3Byf)lMVUa{+SJ&>?zP-aTZ~}d;C!wJO-UIzv+m&(x zGLG5U9^;<+HJcf0;U3koOzExT4ii2l&16T|9O!GF`1o9le=_xDn-tFxi*YRv)2wm- z3N9rnSv1SdzHvYc0FEB}{nL8Ss7a{c$hZBHqv#D4Eax~Zz?;f3(-gag`1Y`|b$GB6Ozf04a#tpGUA5$bKo3QXn=e zu(46iZ6qcp<{%3TON;s9mj~Mvg=^w3cZ~(#Eup$w!j@3^nEA+&5`!`%-MBG=t6|#U z^)-da@>m<4;bLqHRnsIDCBdz@_@be{XKwDPsNRpWfAU1fOuNCVM>W}cqg9XquK*zE zG6kZgMxbqKy_SbfjR-|rjX=*w0nUu=~54r%LIJA)sbW- zhuT(-EE8X|O3+^8=~62VjqI%lOXD@^jnHEWyhs zylc~%*SfcU_%QP_!n`eyy7$P3{ifGqk+*8kpK_WCEB^fqm!hbY5RlS7=%*%(j~vKmex<8r15h|-j}z@BEcO{Pv2z= zH6pPT8M!7-MXSlX4^1&9ec%-25`}DI3dM8-gt^@hv5T=@!V9$#Z8*%~LQXR*P%eGq z7A2+!FOgmfs<7q(ao_I%kK1?e?n!QRV{nc}=6UV1xIm00AFTTg`aeDn%N%zC`l~{_ z(`_#E;Y&0^SkSbptE-zqdW@g@)Oq()=XRx@G{t7V&&bhmiun6Hc`^=50iW-&JZ0KT zcC^N&+S=O0EHmA?!ZlIdDTyt|_3jeoAFMJy^c$rSte@Msw4mUJ1V>i8S#i~@Yx{V9 z)r75u`bx^z4kFlQd4PGQE*!0C00?wmDvH-lhQ^kPBD2M2>AhqNu51`F%p^u6(bTM$$Nx$^X4nYtT}Kkh6vb|zqklh2sGx~HYQ3! zEU!hUvM_(fX(l^iQXn@rq|p0UDf`GO9uED-9R$s0DE)TW)QrEB))ITDg^>7bGN( z;ZELHb33Av3nUnV!72(u=rPRhfS9EfpQ;RL`xqP<$6z$DfN3*`0CxafXo$v4f#Cb2 z0DVE5gr;&6Jn>1YUBpvU5+M=b@kbGWj#8QoQ(5;t6+!#4qdphG(TWN2K4w&VG-EDv zV^gOGdhA2auvPitE)Hcyn+~}niHVp(jJrwHG*mZbO=dF zXm03ihB+rtd2yIy^Bk^MH0ll4ZD85NmTrcf7FPPQPf8t z(Is~2(vcxS@HNk>{Ocvp0*N9+YBgxpGg?i@A7j6Nd=BO@f0@fi$qj%F@(QUn! z7akmWgk&ovN{!GQCp--KweA8Jnn@{_Ih@f#67@-kL`hfu_}@BBtVntB5H&JV-0l(R zvr7s1-jOrRZsc#q8(O(1zAf^obkeCt3kHkopWiku+Zs~ z^vg4M*B?B1FmL6mRe_KU18th^^0jqzIxgo!+tB{>=@YigxEMB8qCH*029|&;_RD%t zqUf9ieC9$|n;T{6I6#bbzkX%(W1JI@CQdtALBw3?q=d5u<)u;GyX$ZrQcwRa{pRrm zIf00$?VW?q(JOxAqtOM!`s5f$C#10I9M`sDHBpR$6@jhD@J)U3cCzB*p_n1!|#A}0`dn<51UvF>Q3cgjD((eMOm zIE^dJ4=yewX1Fc?`U^edddNwv^3l%`Wp#q9?aAker6imE_*G+V!RNQ0r+h z!;6rSNlOqd#YmijSO75gM-sFg8kJxZmSewVkWj<`nWce0s07iiX|4c&NrBhIu-{Uw zKF1}8Kv<{w@fx_8z>TYL3Vtevy?ggAS-;|7Rib7~w`i{H$G@P#5VCC#%4v4L`N94B z&m!%}p-V@)Gmy2?91l|(@2}qzrcUS5)KO~d6o0EeQ>SPLd@!ts4_}0QO)R@;g+u)q zZL%l|nXtWKyqv<#W;zgWd#u#qlV9(~f~>*Ofzm^vrKeppa6imJCD&7ZU-K_b1YCF~ zF>eJr65HnoHw(K7qdHi}LQ`ZE~e=piVw^aA|f|ISH>|IQ!q z@oN*MRn$K)Fa}Ar2S8aUhJ%IW0)kf&%}%KvVjVGS`(+PvLTlbx>6I+GScma%`@Nze ztO~^p;-u_`ysKgRb5&R+b_}ojI*y5t+(&ND*GjvBD6CquhV9Uy0_O1)lO|POUS6RX z;`Gmb^r&|o?rYDcKQ>hSC(8eShDH9nCHj9okFr^2EQfS_WJb4fM6$ z<{0^i#ye@D_?0&NChGtA$jtxkDy*Goe+~gd0asj(L+;eHba&+PD8z#t4wUNUHOa#A zPAi{UjKqCDGnsW-`Z3EN%vnr8yT#s7ph)*WBS8P#=Y-PwVY%Wn|8<8Qa~W57wLBoy z+lHvF(kv`2lr=CgFz0@CVfrisxqw3{(+8nl9V z9KmI_qtthhgWuvj8xLryxVL1&A;TJOX5q!Y5~+lV>5JPD*4^ku*#il{Wu z2l}wqC)E?SGb+K8SY9H}0bIqtG@uvQOheOtqwfDYj??E>{OCW^I(l_MDfsNJe?NPn zv1JAk)9Q~lVN}q|7SMP0nk_Um;Yu2IBBUz~=WI_==TE+9AY9_|Sw*$uHz*aEIM}*% zt9ypZT>=(P8rQeW{`a4}bk(QI{c;$?xOChh!l~P12|*+~71ha}5YIFL(*c*hSjj@w z6dbRV``WZ208Z^9lQv7GMkA*&f963lTr;HmWkdzPy_w?GGZbN(vAmE&{A`;yw^}pt(fR|ZfE zMVRnkSdP_*GZT7V6mD)dQSavgNU3@YB zzRpNB)Ap@mf||b51iWfWh|DTDX98xr3UZ{d{!`pb&cyFTVzTIxjIx87jlSkG)M^4z zR{29ytOB$<4sHnJ2U$=I%8(ojWcLb5>5}l=&~2!t5%6y)=kL<8L`s{-JYQjHsdaQJ znl!|_O&W4>afjIip((?-H(< zE@p0SC3Hi~?d39&DJ55|#+Ll3eXC_PHFTBu!EkvW&g&TI@iOXs`-VjD zG5)MN4)y7#r+ikg$7ezv24tQbyd==`iizN>|Ho5Pe8~|kE>cv#1{@$7JV&lKAl)r5 zz)}DDWPPoyQY17o8O`}$TZKN&J0 ziKN8L5%;E?yRZ%olG43!gv>kHer~#rQI=O&4pt_n@U_p7`CNOUI2xUgVm^h22%{o89h)pJ4-{@v^>lG0v#DmA)4|HkYpHf3u4LY(00Q?{3CmOax3xu>83ocHaRybW(VIKGiYhvga~$#ed}OgW+i;sWC4+S|2_Te8&=tu`~^-rS2qs1{}x4gcfZ z)v1Dlg6Yof9Son)+!M$5C24RYjNmq??uEKzI2{D<2OMApFFa#SPiY#%$VyCEus4Nc z4vbel`_P}+Cw3oe1*|qE4lFM$!lq+6{7Xb|_{%n=Q2(>e&6-tV{5Fzukr&^RxtgUZ zt9Q6Kr7Nv2IptlLphF-R78ICA*C@b?%I-IdN)IMxE~XUv?i*EmJK7aL$b13PX>G*p zYfY3QMHsSzg-&nTy!kmeqw=Eqo=S@pFO+>U;cC^4yzkcfg-jtdCBrrK12GoNen^2P zC+hBSyWqc`gW&2^nEa!I+rvIjzc48k}2esWnFR$ztHwuvHl` ztPvvFx3K*&MUC5y|<;}CFV+_uu**B9IDCR_F)VglZ(;N?x~j@ zJ+wG!|NHw3>&1H**sDy`6n48ox=c-j4>=Z{0CaMNmjI+I(5<&=bCt{6L5YX7>w8(+ z|9Sy9tZmO6fP>-%rd&GuH25#y`W6UviZ`5?)T#BEC4>)X8O;p{7^yQHhz;n)x9y1EQ43H!E3+feM{q zn313^8Uu+77VI$MD9+Fcg`x9Gs=f`THy}&eq+5FS{;c0Ki5RE=G8l(g-ynEG7P1_H zPu0ET;uEsBgpnaYeHGpA(up5ter!2WBJt3uttDhTFf8z(sTYTf1&tfzyOEjT$B!RN zPA-r;8n-a_AVY3b7Ul~ZxbcTMada_8OHk@%v+d9VCDRrI)#(6!q&?dYf6%_cmc@nC zv#y$cu5#zA=BEbS4>U^@ zH5l2LSY^gpb-53dzJwRxrgJRbaz+V!rF-Dt%QyPQyF zDSznB#c{fw#k@(n*IR_f`WtA`%T;W4|M6!0hCWR&{l&8_pKZ=IPzinG@IpKPJG7TM zDAKA8u-$q=Zih8u8OT15b>B`=kDV`UI4v#UH@nnTl5$H}0i0F>H z3RBZz*RLOh8$~Nct7p#@!gqukWapXC;d(jp;5H+wCL0D~%n@+#}rtt_-v8-&9v$ON>+PC|G3dbbnDlkKWVM>u{er|4BJ3BkRVSMUs-7u%>TRzHpilbKZ zGgkQiRW5VGYvftCXJ%#|1@MEgPY=K$9+QyrLIz|2?wFsSHwjl}_^8NMilBBx{Yj1|f zGbCgDq09Im2^#uF%5z7z_o~7Vut(93Kc6k#*h*$*W)QL7jcfuUvNG!v17673F6%%R z{#Z~T#;l=ps>A9ZXizc@8!!MH#_BwtR9nk zAZPo+BsY24Q=e;W)3P!0Pla8C$Vh~MN1{s7LGbzOLk{dz&|E))Fu%dTwg4z8AC=mQ zX`LU3)ig8;BTU74olL!U)@uNf$9BaWw(P5wiIR-2GQcK${PAN8aL@am;q{5k%PAUo zz3%0hFcJ>(&gRm0$i-18h*A5N9qA5}>@YB2m#LqLq$5K^rSBioau~!jNbfK@pt*KZj+z=6B83_H*P#J z{WPM$cQD#zalwAg$$0fVaJ}}o{GV$=-jqCFoE~7a;lu$wTAC$IA3JvQEv;Ud~c;T@Wc1S*fqDPZ_D1 zMWAF?Q#?6!#wI2(doGy#9;igZwDD8J>{$1!WApwgWpZ*)N9nfXF7P-KK0V{nv9WVk zt~{@^!*e7m>#RrG#X^kXFRgs07$4rtOu|!enup~K2@X{N--l?C>1S~=GEO0l`ng5l z!<{^{j02`o>8hCmz*J5%+Xox-T~M8WSXtLYqRIykZ2k4?791J+U%AJqY@FnFcL!bT z!i>xTAC9y687_<2&2u{t3Y(;)q(b^3;C}>P*s^WgZ3NcU=1e>LHH4kQsTIB>idIpCgoNz9a&R=zXIswTIJB(N z-~=p8*Kh37bm%`}YirBm((=KI3XS>-cZ<=^OY!<&khX@rT;@JUk8SMuT02^)-067! z?sg3`^_HOHk;*9se$}e~C?%+UBokS`D&th-2G0yuzy5P>IC&2%zqxWVh99GOSK-hy zv$kfxd6+DpjLPW9$QfWV^Zxo19s3qw5HSDsW!==wjDa9a^?gszJYhOj_+snTMyJrii|nBF>0B%TOg-=#x#C%_0bDrVyA6wc?O!Z zVxL%>n6RV$>8I&-UPOeRh^hZ#b@r99jU{*Ypf|GPqY{VHK zA+ZGmfa}hpV84MM9AiDZfW}Y5@?g2+nWww^Cx|0IUMsL+>_iK+h0oLyO2imvV&8LveMGO6( zwd>ZM2BS9v*$R({_}J7$$Ii}v;lhPe*48H*o0^7UVt<9?rg%i@XG}1?fUPS$2E7-< z+6M={z~oCL>Zx@~;f~m5D*m@5bs`05WAUtE_U}dOX=Pn*z zUj6>dySrjIJ?1^#UlwReOK&ER!g%HBFgsyPFo^CfZo?ec6Qx>kaGP9U$#R zq~7g;@6-RSuBqt8qt>tEx9&S?=<=u3K9->uw7&z?{1g2ExEthl1SF`Yu!7vI-n41c z)YMcikPp-$&fcA9h^9GCbD>Ws1G7$AMh1E44;1#-CmzZsj70EStE%osKlyfR>y;4A zGUkH^e?bHPR$P21Ab=LeOy!rRR>>cK@KDhY%Rsa7r&oNu5VVz_va@R`m;7GwAPlKGme@ zlVM`kJxnY-w;5~}?bp(iZf`Kn5dmre6TJ8B+lBD(@Z74Z?MM{26B84~blz22-zIfk zeDxnJ3#9Y~iWC)+r*f)E$j6TfpWOG*AZai%a`{EvX6$I|x=5lLkl>d|OZFq|@#zTJ z7bt|P6$T)sJBiQG{?rmy__pI;&~X1g`F2bU?%ThA1%(_uJRE2@0tP5$HQY#dh}OiK z&!zQgIr$>S*f$)#8^5;{(R<4Pwc^s*94sBc9H^wr=~pTcEo60v~0wB#OEqtiHD<- zWI?nK|BKtUZ0YOkBS06!-J8jqz&!{nfCmt#Ezq|bx$iBwBzyteVNkbkgOhy^S`B$< z`N%|wZ#Sg5NvNbY%FT4qy^DNj8DJ>VBLXRs1$mQ2zYL z+|B(1ik4QAk@5BBf2%aPMEq=D{&%C7-W`3W_4(Sr@5OJ0wg352@&PZ`-~7V|{=E5| z{63y%NpeX4W{V3>9(%Y;>!+7uJIm&6>h5-GBs5TdHB&-mv7`(gx;K6C=Ip3kr!@;*1a_1L3a%33V+AnxN*m~n+x74yfKJjKy zrxp~z9l10pD~BWo=sI8bcBi7vra*F2*IuFkWnnvYXafWjZ36fgoWczQY(uM`L@TgCbsfSwVbQ@vvblqe z9yh353t}k>smR;U?=I@uHRyV(8;3ysi;m_?C7@kI=6v;P-|WLPwyv0(&ODT_zVLa+ z^vujJJR>3>$@MLs*F!m_r>D28%l(@rpzH zQPF3(8;kg222;&IE!;D_L@Ki;-kVdu4bNIC@=Xf3JpThD`sHUTtfP%Z#h zX+xK{Z-d!I9PDbPYi(U!0U)IWZ`AhBkZV@CQoQfnxx*##*&m$>Dhin+0aWw1vc7-O z=ka6mP{SRR5y+>#cQk>d0RY)3+Hi;Hc%S83({O-m)vQJynIBo{I@VJq0Zx1vdVqK> zX?0~w4mM|{OyHSb-pJ)^DZHq1F`af8V_kkK&x=9xb^eh<0!k&LfCOw^x>v6JlSma# z+Z$lFzB&;C_I>e)^7T@9FHn*=63a?g4|+jW0FXkKRGnTi7j|hGxRxZQZE%?46qPZ3 z4>jr(-kyht=Q;Ff)ZRlKjfxw{c0;cm!jn^S8yYf-%irjr|G1I@C=_u{ zlj;ELI>dY)q7IMw(J98$0+}=M?x!aVcTjx4aX3V4BO__O9n{Ih=3fJMuZ1cBL*h-S z3;K8UH|#um%1{{Rk5FyYV6-q;Qsz24I(|Z-Upb_m$(d(S<%qhiAe4ir%Vn~AQ&)40 zHl~wE=Zowu^EFl!$M3W@d&F z)~K$lDq?uLXZX7`dplDq+k|IQ=X^hMQg(7_U)a@o&uB|;!1TG5#BjDyU9(`gVb948 z;36NpecN?# z6}A{FLhoF@TIG-xN&*3Et|N=NH(4W*Dk-@L0dIIGxIMkRHbJI(HJLXl7PI(YRs;UN zn?MtrLH+rz0wu^)8=~G+HhALX{`^X-d3e}y2M)IoZdw6?}hUSS_U(&yqA7 zY!;+|CQ#<)<_dYvYfC8S9F}|IY-elx4-Ze;w<#nkc_A0L>e4VF{-DzeDYu2oZpZ$hT<<^2Vl;d#xbtYO!l{+yD22Mp@I0U7I+}`le&$ELBldE#Cqc-R=9&apyWAA+k}^I?)dwQbAtP%uCFC;(d4ZBLqb9%_cAar0CoE+ zFO2b;wNhM11bGTAs%~JsMMj8roXz^REpAxpjq@&pMXHg>$@4hQ<{gFGF-v>}GnTfH z_{I5x&K-!nLPs>WW*$&1pc0V73#3ar?x;6&jSP|4*H@2)H`$awf#?~b2Suci%@?Q? z&vd*eHp`9S<60h!IOl>JKlb)^g=-R1KwIJ0t(mS0zTXPE!Gq@~;Him^aN@2Y-%(sa z>Cg=zePIGElASx2QCt?LnXBVeR)F)OK0*jyEv^h?gsgNA9YEGa)9Scg%AWx^ysj*uT|RR%PFE+R;$} zrgt-RA#rH0^wqwlBrj%VW@Qb#{5Rs>1gz)${rCUaXY5Rpou)=bB0|VAVwABYN=3<* zN)d`uYAnf4S`-PXq>`kPv|xl%Q7UPrg(OjuiaL*1W`4hOopYV*f41v?T;K1^i2A&j z*XwoP_w#-}pLbiM^Hy71wOygprseZ7SbANC=q%2OX`y^ z(hqy!b?lKeGTZ+2{UvqI3DU4j8V*Oz2T$?Le2~*dGSgAg|Ce8EX!MDI&71k!nAFyb zG#cZwve1acdr$`^KpRI*D668<2hG~-9Xlq;{`xDMZ9m6tRrxxQSt=?`@{q&td9N1V zh+8kBEeE2R4Y(P>3(^nzd1BDe+(+JuC3EL&_P%o2L$f5Uu>Ou*%f!l*DJyiU54~My zT{JmxC@Ds&$2i|Pr=StMLI*Uh%I@vc4vC40LDKQdKdy@YwyFqBu`d5ieRf~nOe4!L z*%2m5$Ek5fQc@82KQ=Tx06%;Nwx94RJU@7RY@+Yk#(QLL*Ws_8)7ULdE-{;>Fy89r zB*6o|^8Lu#g$+H%Cmhc={H|*E^cjUE>0XDEvyNHW?33N_eeB#3y6+B5$dnGtH(pw$ z^rD;UB!#;4DJ2U2^t@JXZrNaAB$6+7V#lu~?K!r16C8j%z5x0TXC|4u=S~XBy`?8p za7|`=i9!E{WDiR><(nbve|=)$mq6Bd18zf<{>}(D@I(rdpCuhevO1zHnpx~8K@zpe zE-|e7)!o~BbY6ssH#P=tp`oGQpkZhil7eax?;qT~JLhpGW?u6&%W0x#-L|z90{H%* z(duODA{;qdFAJC+q?aIl1fvN;f3e>1jO9W|*ICc!h1e$p2kD8HG_qo=4lN6Ay0 zCZ9YY>kU$2`>!7_QW_b}m@%WX%efi$%MMN7{bh3%zMk&>^w})~Z8-+*zvP=$ z5@dgtkhCAWQA22Y5M*i|NAy?b<9-1Rs*vF&WHmQ#v$4^+hZ*PEk1trQo_Hlz(SAc1 zriI!cO_ebq`fs2h9cK$WjwpY_a4*jM?vf@#%aO4oPK9H!;o;&U3A{#c<`Gb3uaqf4 ztw-D_XblC+?fEFM%6|RGe*QwTKS#ZofGDS^sD-{j871%2kjyS*2wUs6X}3IHac1^hfMbb;x0s3sJa&ynUb~3(2St3(Jw-OzO!l~dXQU}SdG4T7xV?jAah`6~H0Rbkb+xbGyb1p0JVvc;t&K^ZQ{k_iq9w4e z%4c_i*o2b4#H=PNGkGiu@y`Tu{G+h2(4(yJ?X$Vdl|0>6zA-f29KZI7Q*dGj>9YM|9kmr(eJg)Q>?gj)A&D5&mgm}bFn+|;E9e13~&lE3g z%w}4|9cQ!lG{g1MNRQ>vUV)6uft+gpI$-$laTd{rq_-+y2JSrW$DAHrTnJik)3X7B zeV-&n&3C6nPP7bDg7$8{& z_a>o5`p)e0=&;u`xouvJP)hG$BvOF>0|u-ppZOri`4}s=heW(*&A?@k5lhLzkcX

xouTXKc!%QW4}sq4 zgyy73NB#M66^2g}g!z@DGdu=0d*^;q(HgV=asaBjjRzrg3(dn5B<7ch2aMTr?LhU`&$Xmvr540_K zzHL);ln~h5SX94tGfMdRUe`8%YF}KFw#ZAySI^AJs?T-b(+sm=oa8cesBL8_y~3|w zzAP8Gs(DZTn6>%Q1zFpS+#BiVefs#ZFXdJ~V7cB4@)9LUIc%Ps-K%sxY`^!9d_VIk z3rVNO*|fE_HFb~J+>398&rG|1*;uB2RkHh+y{Yq|z}D5i#8%`J-H_t_l99L=oFWrn zxN+mgQ&lS+609KSNlrrH)E5{xJ=JS^yYAY7JVY9rdaWAsrpB|kNS=6E_LKyA}gJe`7{v**Wq z=7Neex)tS(=bgb8*VO`EEYWE2DE|05>-x=AbXxHW=+$8)FZiTbw8HIae{fKcQj%*v zEVJB17Oo)u60-7M29HcvxRcl;!tIgn5)Jv;tq~X8&UdE0;7t}W#`YS`K*N2~T?Xj4 z&8wU4o{^E!aW-mXqs^<$F(}WvsQjJ+VtHLO*@WNL^oSLJGoB%9yB|4mA}_9LO3+ZR z@`ZnYt^KQ}zo)DF+ues`cAod1=bZBVYmjJBySH*{4JncHMdl|kp5~~e?_bWkxt*Ko zWO#dL?KrM8NILS#kK<@k(vxm~I`4kkzG7}aYWr{&UEif&^gb#dgvS0h{cK#GwdHgD z?e*v3Slv_9R$RooR3+JB@J|S)0j-?(Py* z^YI}}AlRIDr=z8r*=VQ>VkKJE1ltd5V9JI!Lk#9rHze&fmv)@D=+`l07RCv39af6M zyI;Q7BD24`|Z@7oWHebFt(gCF?4*^uKuK`l|SwYgeFK7!n?tFz`>^P@o)7i%3x$=f-P*m)JzK`>e5}{!?0K5@*JqM``i)QTq&K^5%?bH` zUVkgh3iQxqxd6r2g&NGB`*nv{`of1WLtuiE-A)VlJ%l^PORTqxgGm zQGD>8MHS)DeC+eIx(wAOYh$Ld02%bEEbdH-MN<5*N_|BZ)k?Z{NQDduS>L z4yDPLrCteD_@tj-FNq8wT(P4t+h{5b(H$M?p+zj|guhrn$w)P|0f?Os7>4Jb)zjAQ zPF>tf@)qxn;;EuJPC_G*h~Tc?%hJP=3PR#S;#*pJcc19K>V3uGQX&E4)8qp^YADB6 zWci^rC@!0?sX04bPr&h4Op+28f29TfQQ#7Q$XD)u`^9t0uk(*4bD&@QHf;Ft<1lbG zJy5*XYKe5DTRR#|u(?O4XiXt#=-hC<`;c5nlXAKPcKI(e)6meceYqFlPa3YM#AU*| z6e)ep)2XIfiIa{t4JY?DkF+aU7;vlp&e-OOm48n;5HzA=dF-g;$B$353Db$~kewQd zE2CsKu)_q)+_85)XDtLsPyGuRMZPYv6p#+--O3~-*JpJ-RV)1 z$`}*_zW}gzkldlTa;S}el3-^Pn&^AF5z=odC42`7J(k{}0{2BxYslE8ntUHtvTu<1dL<978nbnrLRZqKIW8im1^Ox5KecD1$jh@~5}7%!uGZKH9kC?~Dp z`<{YBu#jT7S4<=Tp0wn3ZLLZ9gzkU|M3(z!+9Qj!Wn;pm!_za##pM+pza}uedyEhNM>U|@YYYM%VzshrgChVc!or9ra zcr0m3$JtO7u1>jA`t64BhFg((Ig=)nN7L`UibgIuKO^o%x1+v`6E619)G;S;(3nY; zn$=vlbgT7akeo3EJrZBWlkg@yRDNMofHht5REF>Lo89($mu8GLbi@9G1{thB2Sqt> zxRiAsIj5{Fz)b47VDuGAla~OM^_8dsV29Vh_W4WH!dlgwe>wTFoW~tyjAgR=t`qxR z^Jq)^e!sEt$9@lMp6KV^Up~@n#T1Qw>QKz{I-?0diTXu(dddFKYG*`K3j*n zfg0nMfL1$S+lWO93%H;l4M^Sv=N60acfO2smx_ANhor?$azQquh@-o<1&*Gh_ z25%68&y7vU%pM=~Zw3+07H^DrJZ-9zp8ACKkNQ>!mzYna`vHv!5V3MCw{k!}N>A@{ zvMu%eC|XRv17gn5(3r5Lc*cg^ck|jXSqO{ruUrvL1I#bag-I!YftX5~O%9|zdX!8G z+Cf6EF6*qYM(-d&mV8_Y!jv|2tTWl5EatwPnZ9$JWTb}1FwXGjT1)ERglw!_-@2Unnxo<;K*gA}r@j6~m%UzuBTjH+CrfX};lCpXyVQrHDhgYb}jG+r+L zBFJQ4k^FVodU&vszraU_3^7`NPT-*a%#!iQOnL4+GAUiEpQK4wQ%ateA=>rM64B$z z-vcB{Mx6{VR4a6=Us^`Sd5r?_+E(Flp#X;5d8G5;Xp%jt z_)M1mC)!XW)z$M~*XI`QaWJK3bcv0fA_|3@)^5z#3wwC;#6IC`by)0wKZWx49c}S8qpfb~{hELn^)9gLL z3nm_{6oicZBhMh5b&#n$hZ;p%2zfKl=oNaO%p{ zJ`ejz*vdP;oMn8MYUX;{uO9XFDwvw=y_M^cK_!Bjl4i*NMJ_kHo=h3uQ1 zT`uhCPlF@6a{4%Olugsp1f3cP^wh(w=YCwQqSboV7)PtnTnJcO;=(C)x1eAM_ki@Q zg9K9cAnK3K7c^$snTo@*mw#l%bvctD$36hvlMJG219b}RjbDBrw7q#&kkZtH(LoWq zXDE)(ZQEq+yBquO&!b_0cLx)N&1YaaT!?-)EX|kxv?}M=mauz$!j4Wb%NQz}tks6* zAJlM>b(llj?*UyA(JB;Ndhm{q_4Q+7hE^KKhubJmo>D7&X-0C^jQ!c8*}e#Y#@_gr zqI9IW%Sye6nJT!wn9ejD4t1%gXBNMl;zEV%Wrqj?U3&Dhv_fB{ke3e zW$rSiM}sr>PjZT6(NqoFeB8}Vv|(PAmBCi}DST?B@t7y(?Zxrs_Tt!cvghVL-R5Z@ zii(=37q;@^@>1cZAw-o>D4z24>_YudFM0XsRoNH5d?*?s`Izb4svFm@OKYABWl(ogoV=G) zl$CpN(X&}`g4__k_^AAbjxLzt^bqZRW(~gl0QZMRc}JHUKY7ik1uaR=$mlKF(U4iN zu(>o==BIBFcYFWzli1&Q$xaDBc&4HiFhM&~d?v(;k|B zVDUOO3%O?Wwu#UJ|By5#y??f&QlfC=IYQU|pt-l4+hz(gmmMfvw-~!^)`FU6U3lv+ zg70gXMh<15lI!5>NOlxNy?*EXY8R_m>5I3L@4WZqrw6&>*E|N%2aH5gD|oF!&`FWc*~V zlhTK`SoAV-@-j6~7+;aUSH7l;=Xv5vMU(WJTYa7*7lBmR;n5cF9(_g{IV7ZKWawpR zEow1XN~;Rtv|B`1Z6tyK~z6K2_3@GQr25Ztiv z(bPm7t0GwsW5IBH_u&kbkUXMKxubIM^Rb6lV04>LsYJW|PqXuz(;cs%jj5wXtR9=Y z@z23t9%Cj?)@y1MRt1Z9(L+&fD-#Zt%qmI!b_MNo>Z)V(Jx&TkI7njaDK_iK%@4Uf zVobyjGKui65$%=eti1hw;7Cy0j1;aafDP}`$D9fm>a=$!Vq;^6a=T+@fXy46iKRd^ zJo4<2lu6q>O_KNEq4ZN)6(%ncDt5`z%9qe`y;$fm6?d zmC(lb6^h+*r6!uAf&8>-J*E%V6+mMkf0`Ds1T5Q{{$r zJ8;mYpz+@Kd}vA=jK2zS?VS4d^>9QsI(G{^^yaGhn|E|~Z)Rk&7Slk4xRLqSOp?Y? z2A?7mPlfI=7GTwzEaa!wwwJ$>SV{W}jG>32&{nHU+@NN{LwZ_z?~81ui8x|ob&}?jh;=b5lc)km6EbDplX4$XLCMq6 za~C52bAEnsY?SJF+|^?>tw8(DQ?S~*ztr3NE+Ol;UToMV z+#j+6{^%3lHHNO^>xv3_-U=oZzd|2~q<1CdBV2P81sIOH#sFb=7%4KQW(uNba<{7u zcKjV@vrfaN+@bB*adxr&n_&sBeCIuyruf}8hfC3|)agR5Cb5AZR|zkYp5spqZeVUEar|qXIi0{S_u30S^gV(654y{#$0QZodVsa zqGS`DkZFAu9n-q+zRi4^K0J-CLsq9J8F2FNkbd4BKj&#XXScpyLlo2uwFe!KL(+Lpa$f}=KtYaR8VNI(5&F=w}s!0AOBb|xGZ-V*e!dp80Ex>%# z{4ke9Ps|t@Lqd@HTf0w}Y3UYs{=6A1H>Z z3ZbT2B2yJ^EkBw_P5xYv`w3oBepu~=FmTagVB`%Py{UU|1hwYzZsnC1(w%9kl5Of^ z+_lf}vGAolAXsZbmbqCsSVl$xIO^%y7{IcH5f8j7ouHo^_Sn_wB#l=N{zZpoZc?fN zATu5omD5~NS~?Pu`e}sOZvOuM%)K%wcBod)M}GW`184q-`-Sc6)orC-`+_~jf_(@w zwH{xi)IMV;wZW1QZMcTsFOB22%^P!#_no1>@k4{2W;qSk0U(qB@!rC5R;;8dW{ry2 z$gy;|Ft5<^fO5e!OhRDUu~uIGWh&RmEoMaRPPbPMgQ*vfBxs7CFC@Q|E@> zC7e#RdEFU5kE>5Y4bN2Crm_tKG8f7k^&K?m5Vtty%@o$xf zJ9SmN*}zA5`3Bds?USp@9#xESKpVei!VF9V48#WO{O* zYWF3jbR~qINSs4%KjHT(FD7d(DsgXb0?sea&d#=hhd_Q4KtEv7dmh!9@4C~aJs~=3DqfF#J)?&7< z`Y*-2lZUf{^fZARUBEbfKRlCyDjn)G!(Nr`dq6m;*GmgoTEBkJ+Tad#peH+*Y>!+j zcuEG1e&MLlNnm*p0JPN)x_Z9318@7;>HAr8`nBG151SwL)e2-gZR%*jWY*M-siN1h z-_LIt+MQl0KB^nKGli?PvQqD``WR#TjOl#PDOmfDlu+ny<-#SiNgUFqPWk7%A5%2Q z?&WYAe3TMrFYu8mCr{v+sMkL}xSu(M>#(&Db5aGC?`^vH2lIM!9>=zzp%cctv7@HF zqOv-}UE(HsSP*IF^zJn#(|$DDCTr{1>YhuRzPx?*>(E(yhKey0H>|krT{!-}s;=%a zeR{m=lVR|MB0^^7pPYFmq}D=%c6E%2YW>4qhl#sZX7{hVxChxva=y_yZE% zzrJftgyU*jfhr1hd=wOUE9^?Jc%YTSRRMDx$bNQ)%gcw)OL8#sr z5b!HphPg|NSL8Y3z5cR!|4*O6|3I3Mkr@B-UoJrVV)~zuDg2+kjQ{pR z#5!-+qn!Gr*_j+NM%xg{iJsO$@xoF^d>IpR>Gcm8g^v0wNaCbe7hy)~X=NhnCS=BH z8;WcZ`s~F_CL47|8<$l|KLVFPjU{9qTT}{2F%PE@`iVS=f6V5IidGQH95h21bPnC z@?`CU$M*SgcqwoHcL8V!N(QsGW{reWj#)rncTU*hLWfQcdJ$dfEvJ^rV!GxXkb9x9 z>@Ui*7sBZBvnA37@W``%v-;g7Dv-JKF0ZJ#TT(KD;;lC@e9sTR3>eeu^T<|vwq^_Gz{32_VNfGtsO!6wX(r zr7mDyYp6tDBp2o)|GJg{J=j2+Wsg0>QeU!;kDIBjbjxR}WaL^C@&ME(`hG+fCB6#RGTEpGiMr z9cJRI6dt9FtcOiEt$+qlBtzOa%!<^ z$6(q6_DLiHmp;53m-O%?GK^33J4Q~J@N97YxKBHT#emzDwg2qKQwMlK9+_Y;4|?wc zD0mefTdXvBHA&r(dc7C$p=^WVu*2y*bXc@mZaVyYbfq_AgjH(ODtHp%DEHqf`p@eu z2sRnMN?uAK|9rsf(jO)Un$ll+^6kRmE$P3PKY+qE9Ta>&ZZ2NSb3fO?N=2>t7ky5l zxD&}49t1mHQzrIN@=RSb4e`-5F>4ffd!&NGPht{d{}{U(LmVOuHEQKO$F;x06+{1h za>=ADH(M8LY2D@B^&L8N)%!%=j7kF*d>=o3x(je}Lbgog=<{56+uM&4ZmTUzW_`aL zeKJ1UWBKj&&A&Wy>pwfb2f~wgo1s}vAa`B^@4R{PPH89$YWDsBw69r*(GJGi zN@%?Jb#>R*pmv$ssV;CUcEsSHVG7%q_VO&@-HFTY!rDX~jV>c0(w{F|^JD3mgbjLlU^qu5BU z^Lv`j1G>%ob5Sf2%?-&nLY>ba$`JBWEmYLrxih0MVfR2%iQo0~q!If)y|#L~xJ%i+ z6w|Mv5hFl(rWJ}p6wzXK)66Z@On~ISfX1dkP6~}0>2!Bp#OioyzBm!RAQ$3_1Is9S zF+d*4v~MG6wxPBW+1-OZ`t6Hp{m_Gu>Bo+@6;^gaY6nwD{-nHEDwpc+6LjG}Ih0I= z@!x-(N#ao;kO5dO3SrvSvM;Sg`9)Jt=vic@ZXJn{kA$NZ zj_?J_X+HO2#_w;THGjq9%EO6-KDw*r;k6dG5%<2$2^~n0c1oBr6_56J+{>m&#(qXp zT2*z3O+6CnJfuGv5?>)^rk3o&&Axpe@(b0aAb$JZgr{p$`DD)I|>Q8;CG-$5d|c-)U}&_`_ZZ7 z%Ykx_YoSoxYtXX&pVXwsu|X5NqLY}H@;D3_J`JtgC?XZq!Sg2HofgZ;4ZaX}blr=lf z`Khzq&69mcTB)MxNZdZ<{-3`Oo>$r6IHPXYlXG^Ne&sg5HSYPKK00v0LnhcC72~6S z{^d6@7kMze6kLw0mRbWaEN+}0!dgwN(Z^bh!BCKR|7WQ5VyhXn;jd>I;q z-IcP+%8mLDV+!@=NCmED>5Cbniwlk%J7(*9{@l5-Kn7^`oO;_M{oGHR>hqlkG?eh# z2&Md;ewoqgsiV@z_Y{V0oG9C-?URql6l1vUS{`F#v58`@cLklKb+I>nAFO>wHCK}= zggzebF&4HzN1nqn+Znf>;l}0NxR4^jbSLf37_EtY~LGpKz|G-o|$8+*F_Z(=9u{ z0l?t-_Uzi9=wpYz^Heb@{_vfexqHqm@l1aB$x5^rfK$)>{#jQ8rAwEWo$ihA>GbW> z!`@Tg9l4i()Rog^ZETBvj+o50&^O#T;pGSDC)ERFWW?Z1sg^T@P@Cr6Ck`E&U^4ye zu=35WsW~kf<8Nla4VB-E5})&Ocv4SjP=<240?p z$;QR4q-0c;D#|ObCy#T2EiPs>5Pm$cn>5Bc?KP>FXTCkn<9VuQ&TVy_qI~ds8#&&)!X<^_QA@qlGY{!!(vA-KQJ!#m_H4 z;UmkxW=|027c|}Un0A)!NaW!&{^V(8LyQ^#bbNpO`_z?VrD?;8d9hP2rH%U)YM5Yq zv1Z?A2`_juT64a`0{4OY!<8TC+}>e+aFWp)F&neWeIv<49$fr(y{=B*cG z7(y*IV1RA9p}pU4og)XU@F1-;EH&}b|yJBib7QWYuVOPI<`(J;95Fn(Lb z;kmw6=3#Jh)xQ^+FbYo-J{VQsmUjPYm$g_P+Bi%s^a0|vL@m<0D#O|<;H%gT!oxAp zAY5rXcICbg^M<-hoIq_lgKX#w}uL{CwS4wx|#vt0vOH1oA ztpD!TrHki!?~l%$sPyzy4t{3g3orYo4-Ly!>V@W16_2MGysi1A7)M}TBJNa@*Qecp z_(ErP)*mFMU6hRQbd7K1zA-?%+weAbky*51P4U1T^A~Bw6!2r?;-?!o)4vYTY+k#a zJe1LR`GIJoU)D^?Mg?=w0YJ_W%hK~T=aGnrxk$GmF^QEss{NWhT%bIOX|Y|Dc4Lbg z-`HoyN0+!kJFWG%JUZ{c*GqYETfeWo zdzb3{_{(3=vsFe!mO`Q|1vKgDzDZsU92$D3}U>VTVz%xr1) zr_bI$5yXG1?@TZXGSfwH?_$FALdD?d83nhDYm5_KIiM>!*Y#3&3*_Z?yZ;(GZN~zt zgP1X8Z{EDw5zzQ>`0gjKUJ%d0kItdx5wdu5kTWs|FA{+Ava$f;=#58CUAWYbv(8V6 z7iU~@O}QXCke;Pfq>ns!()i^|?vO2aUhT+q=W&a3AESzh_Z!PGPBEt=YKZrrPcB9T z1nZQl65+P)a&P;@?h{A1wF&Qb8lKy3JrWOQRXVjq1mLxh`cB*ZMRZdFNlAhIgzM^6 z&$gy8PM5_;p|*mqyKsg5-7lN+)D~|PJ}i_g7IWUAxXw)$ zN5Je)`($Tq>&$;i-AAqSzBrzjL-%3c`F=L>BE2Lu?OGT;gyG^uqY&^w`P>m4`pl6C zSZO8XHY6~L7l(_RfAr(vlEfY(yR!hbzp-*G8HM-itrlp$#iAzeS+DIQ{%+x)KgI_X zN&|b~|DYgTg_mW_ujg>|JaOU#(0&7}r<$3_V@|CbU^-we-z15ZG@dOI-!4)3nbdXT zY)y9r{W~Z(IHsMLoFkfN9H`xMf^sZKw0wOH!xF`*d`ta|qWE`d%u?8y2X9iF>yVv8 z`${6fFMm5(W!0_HNg9(UdpXVKuel_RYzHXZ2kGa z8tbs|<5=xL{Dut1N_&!*TyFV~pIhod9}O^aMS1hQ?uUgyhehW=Ms>t-P5aN}A1?O* z_ivD9EGusw>o1PX_FtNYvv1u>Q^g@CW(4pKZ{@GHQ76(prF*)A;2?&|kRW4Yr$2jk za{7nCYh)Gn4#}{HwjkIxGdWF+GUvPt;V|K5;#vPv%tT-+gA%T^VNv8Cpd`WMO+y7YobWO}oNJxOyH=X2gF&qKx z(sh|1iuM0hP*1I8nEnAZu6+ORQ@HA-Vsed$#OQ+kHnqRKEr^Z4$4ZP(R^R~?7Z)=p z?R)wM=`~DDvAzPszlxFLPsu7)pgU7GzOxc8>`sN9ICBav0mlL5sV*%{>)*F*Nd%$N zaz8#){gR-~k$NRP)_^+|gI^;qC4yNZ1SN7IL@UFjJqg5)?L{YQ-@F-(Tbh>olptWa zYb_I~u*0d-MQp8X`S|f;HLLK}YQ|U1gDI>-{=0hVdcJk=v>jLG+J`cGKF!OV5xW*V zj6(5n1?dw4gA^#E7XxBNz}q_L);(RJ8v zQmo3~_OVy)m_GEp%vxKpwn|E(pa?H}L*nU^51=@#2CAaLf@{NMDlTeb8f^_Fq}}3p zp_B^d8BX6^cN2+<6BGEvdYgnl+l&)tGOoln@c&s=)HlL;Ul`}W9N9$T>QH{_4^bG$ zPa`;Us_NiR4}~m3*fI(=r0MXS!r- z^`XOuRZW-|T{O=+_*b7XrE%aN>Sh9M#~jy*CK00(S9lPyI36<`JB4Jx9ko;@&{OuhO<2jOWnY zIQrPjU~}n^$@5Uh#NYX6vHxW%)BJE&>qX~pWg}RNkv$GW;FEf5T0kMjtan{TkRK6L ze7HPU$Uy24_YMFn1#o7%%FT5L@7K9h17bOZ1aATi(+!ZlAJc-G%&u5F$?D~EoG#Re1lBUH zrqpAPXy0FYhS6@63MjALrdrnR1|oW>w1l^X=dD}`Q+@ShCeFoJcX))Pp0dtq&1 zT$E|#%DNeLrVlrufd_?saB{sPnbf>#Cb;vF0Q!`Dp-EPI(wWknoPIE%|BboDfaOP- z<>N0}Tj`UL2?0@Z+-_{;uGf0&|2W7)v5@kj54MRm#4`%!mBc#rJ*_ocoZ~k-9eJ>J zN6pI|sH9WKTFt~?5f=-nX|X_6Fw8F3Js4JtY+*BGrgXyJ_#UoyZ)G$R1R-6F@T4@uJ_*+M9ZNknm zw#9%rYNS#mgdMqHVxvgMO$He0&CEk9^AP4<z8znqi3|tpm8$S{S$VrgQ;>w zvLR$|0bI4K+&Y5x(Z6Mn|H}o~n9mOJ@h@QPLB-94*Pq`|b=e!Qsjw4ofY;}ml$6v| zVIkuhiGGMx06J}jwBh@@?y4~pTRUy-0!vPn!eXPCJNhT)CSmHW2V7l^ zqmGeDutG_L;y;rq@}Ky2uJu&O*tJE*Rj6}#T(ooUhuynJ~KWauMv+Rbr$FL%h_Ou1tK%7*`Ub_kl1dN+F)R zmOI&2fZ(M8#=pe6V{SaOr))4dHFjovh*$fJCV@u^xRGa8c0KG+cpnmrCsCs9?7l5m zmu=q`A_k(s`pIFUh8P&*lRYBvocy)T*NhK`$~CVuGGepWiNR*ssh3Bk6gMF~K*B-evnM`=-3 zXEVAd2l%jpxSCOGp%_3KKYun_60`so7)WRo+xoZ20mpe~(de_}*i= zHbpNI<>W$GZ1*l*DgCSJ_{}+!wSUbT4AZR10(+-djJeYhhEgOZ4S+N+!#l(=@^}#jzXBNp!FzYpNpfq&0JB-AULQ@0 zNeI7u?uZ+`5}ywJ&T`#nsvCj=1M@2en5~KlA6){#4~#Y79eeh;$m=K_7pB^mDv|*q zxO_$f1eU4#B9se+E|C(~S($MtxtCvGoFFHPm*6G-Ugh7Mn|}m&>z(4epyh=@BK_O+ zT$ILp-}C3KTTh%HXp}4rK%2oAp6tN?!s+APZV$$poclCWUjD(IBs*Y1F$^XLVDb_> zUfZ3v*=Q}%nIfEpgKqjZrjM0Aa{s*RWL_1t%>8t%FLQ!!0w$wp>Ardo@dP(>d5RE` z++RD3_To$gBDTXt=SczpJAOU#;1cm%z3BSKcV)&JQ?aL9&}`zN5zF3IHn^cFIPj~R z15V7m%3$1Tk>I>2T$9Jj;9t!|s_|wK zJBehv2&Z##4Vg9fI$I|&2`41W0ay<%7Q(iA(AOWL}-jLFLQtLlbwwtFPa+1)1yA{ zw6{33X!ZW?#Q@7qj zg$W~B{A`VHAMYPNe%x+aLw||&h6g_K%L+eD*}1el(A#*Mp)aV6YGGF#ck)d`#+T~-zO-}Rq7Meb zgYAL!7J4G?+b*SN$Tm!l3A{+bA?C)S)!u^3;|?QaiszcbP}r3@F#BOuhc2pYgbJu1v{DE4@dZy3Wk}QL! zW1@_$yd&jR zRR&?7rf{57iMn5g^z_X9J8{TTxRG7vJrsQSIpXzELb-qN{tg$`m!G$NLE#!)7MYaP zrdAMHBp2_X=qX49*bh4}+)7@(%|D?X5^^cXXIll*39P2Lc^vg8aUne~&v53#)Hvb! z>V+q72%v;Hv|@)!AG5l1HA|n?Js4CA&CR-4e>ar%VTmTJXGGF8ycFW^pdS;X4Bdx% zXDFpMJOFq!{opyB*-F7162E^jT77Hloc?qA6*iwy+*=()FQ&4Z3i`|tL{gJW#2zyD7kJ72@p4)xG`_4n?-U#96tenzmu3CGM;eA#F1ky%g3`d#BTCT; zq2f<&l3U7nWE)T}dC3WUr|DX~^=W2Y@`P5PomI~LYT3h%ida1}y(`}s&aHxiX$ksX z+8EAvf@Ih2?Z->N)TrK8$QmqXMxUF0G7;cGv`^B~((dfJ`>|eXXGbEJ&{TQ$ zFplnx&dqf8M8I(eRJF`F`bfF4zmfZLI;GofPk>p9IjIknh_%Ky+A{3X5)Q4~Ui-)H z2Gtr|E+#gPxzpUbMCY2y<_^-2o1Eehr8AgL%e`#d>f~@Y5lBf(eA8CYb`is2RhA}D z6}=-atblU=rqo=*RO}etb2d!^xErzHy7EH+k8xO?R}D7nE!p}TN5{~)MTLcj@B9@O zevF{)Q=Nw6pCxT+-|?n<n(2)V{!$R9q;=w=o7u`-cC3bw=u4Zsqiqx@(KM?;tT-xN=PkZ)5fL zVGoIwA=I|bsMnaUXc0v2@$9IBNy41li6gJ}i*jG5m)&LtBH z(QIBL`Rs0j~=@+7i9D@$V)1Rz_|%qdTcs6SS@vx zUhF?7Sq}ZZhML%tcStYQYHP}a5tWlhZOItHj9d1rFmoRE?uC~gqW0a?)#R? zZ?tNMrFM+%E)dk_#@Yz9nZJ`}YinySBhpQ>uUpTE-NLc!B3Iso&ntLVB4H5TUJT1o zzp1w8+b*pvn2VmLPMHP`alI&p9~<7@5#lv&wL`l6)y-340lz09N2X<8VmO+yhpV!k zU^R$-z4X|X7lch6Mpitwo0M1b74I7-AYiBGlwzIGBpMYY?CzH-h1v{9csFg(;rIfj zStwqZ(4E;kICw2^h9fdktBz(n3l{rd)q|O18lPuETr{HJkOh8qlTDsL7Rr0^NW{w{ z9&5Q$J?Q}hHXJ|UJ_&Hv>c#arFbAKrgCH6N6OBb(?7+$vJw%egg~T%=W-<5 z-5Bd~WBIwMgZ9Z+v6o_pWTNH|qYQZqJ#E5>T}s;aM*dc|J*F4qx|8mx@FG9aH75V9 zP}T$R53RFm8HSC+CM+5ef~hukR7`D{_rai+10t6t7|par!rKmQPV=9H&Ry`S4mR?V%_by$(qc|@PMtXZ=NM`ruZ0v%g!YN;cVoHZ| zB>*GD6eg6f5z|KI<0W;BGCrH3U3>n@2vjJmV~uX{N`MD)6x4XYE8Wf#X58j)qHTJ= zv3*!6q*xg!uLmq@agBAe!4dE4aC_LWK$UL(Q3~Cc6$9Ganp@+WYMt9Gg&FbC z0t&3nGZ-eh_+c!8w2U_XMADk7@z&tC1uHp2O~IQW>WB-R4~TPY_lLNL!m#Vp-EJdf zI36bmAmdkNjb1Cd=QZW%>tl^vp_<$v_?3%iou4yqNFd5%$t@r62vtzwO(@ zQArAU0|}|Fz$r!QP9#f!y{>dc@Q>oj0}&r1Ku)vjq@V~IFa?T>etEOCtIpSlM!dAk z*olZmb|Ad_!1kf{j09mJFC{x-e#AxUwX0-ii}MV2RzkuNJtr}BzGFvt;$ybghwT;P z&>)%UU07r%uSjOk6W1kU%waf0{D$S6UxG(K-#K|zK;$im)>q(wfTo{4d(V5hGsB{J zyT5hm(nZi?n!fx>tfn2c0z9z9oWaKwo7A>o5Key#hfGp}#h%F(ftoM}M+>3vUE>&GSmj zuUsI=Tf!2j;{*z2d)wQ}G_63&?2gKDiuW2i?3^jO95H@gY`+I=jcAUjrmtTWCQW*7 zJ=u{*fkAB$Xy~+}BmES#m=|>tn;j#i`0@YQ=66Zj|GMkEUF!vrq5aqY1}$M^J8P}| zyZ=Af1poIxV#XG`cCsx$xzvsU7Z+g?jnU9Z(0p0{@#9h+-U>EjfAceAoC&rcZ*EMvhv7wjgvI`v?!;;9&k z{=@UKKW^oJn!54-@)`Z-+m`%~2`IaY-}nCKo|0=)pc_Lr(tN4CXp!8M87X+`zMQkOs%M>SP}L@^VD8h3~&Y3Mw`ZPT0|?Us~b@H+MjPH;TC@Q zXW6?L&7-IQISe3+d`InXXV2Kqe`z)xf=sSkCaf|9K)}Ja(0xza_nqXSDdq))(Wjsr zM+&FhA3w>dd51C@9cUs~b%T!!rw&1{Gg85b8u^7_d61lq(h|`dP@A?#021v`E_2=o z!MT=v^K4%tE@@FK#|m=`ft&{h2Zw@Q4aWA418p2&DI{b|&aA_S4`&G%4|WzfZQA&r z@peS~D|BNw!RW$x{bm}@g9>^}%YjF7ss5idirkf+)n(#arGXi}PXXb9qREBuCl0R< zjo!gHXHiIsuM53^U}~kMg+6i;Z`UQMyH_TBGncfpP@sf;JMQR303w3e4=rabO?8oA zQ!hRPN4x;DQDCDqrbdmFt$6aAJz*;+$+@k6Z5geg%9GNhcZAX43dAE3kd9J%x-H21 z^~4NWeh!|Qxz`GbMm6t-(^Da`BsEcEcd7w~g}D@bXjULa=tdz=*}OqJEi$;p;oXP`rAxYt9a+$iP#eMre(MkqywdfgIe%?yoU z`hTZCc_K#7uQtTL;7K0uAS$g#_wMc_igOOQMPW~-k$@d=V!Kj?5SY0VVsHN~Ul<<04(TIAoTP^+d#-{#tNo?Hl-p+j?3 zaD>4JN{6OOM0b|tdv@lm*Xsq3eh2{m_(62@wgv4vc$3#EAmhBS0_d{5;3|h5p)Y_| zy074?NqL}1yPc-Bp9Zmg+e!S%8yLu#GfEl$6Bs!D+6kYLgoe^uUtE2gc7ys!i~)mz z2(a#+%kIVeIQlsg5{lscuvMbQ3>h&Z0{;ATt6)J*1yM+E6df#D;>SnK`mi*2ZRCse z&cdGpSX&MA^?0Whn_1PXo5+I%x$D5pJhB9Ssy6iiY^*Z^2m)%S@k9R*jC!H(^qtqB z-S{~cytr)}Du8_fWC8MCU3zh^aOp-_BxbcQZq$2R8x_j@LdHl$7%Udlh_wSHEx6*; zud+!061Q>v`nN2s=a2|IkSKv`n4^9Y+8IL?U^lowG*1sQNJb8=3`hPVQj!(Cm58{! zMPwRu8AB;*aExSNvM#+UU7D58Ar8cAY!I(h{|8rc99Lig+`Z$4Gv-f5f+9$M_8bVm z$hqx1ycpXn&bE?(k&ICYhYvFcPX3<@u~R0I5oAqz2cpRoT`kOU{EBb(`RS+YJq`vve8roL$q<+#I*7A^zRU%|Kzs6P7bmtaey-s1 zzrlE8PhhxeD#+aw&sRkoL9o55Ba#?%X;IOnE>`!od+T5$uIpiB&VK3QU?_OUw>*&r zk0HqLEEd;;31B*ZK`d;R7>M>YM8*;MAE<9c!W>T-pNhywhjx}8u;KFH#l!s6x6gMq zJ+pyG5e)Jcf3}2sT*jbE!3K_B^vcSLja%Y4g2l^U8G~SBlp9V#f!O*j$~;ot%F;wMg{VSHHuQ1`&G)jJE<5bJ)gl z-+^^=0Yc#niW-1JOAYyo*lkoU!jF{pgcdg{`rtJkRe5>ICeJv0E9h_2VYs!RA)mTY zL;y~ux=)`T$NmWx1%L7K=<~aNr~9o7Fmv$)Mu~%j4>w+FPdVh(80N+}pR9|8Fys9) z^seYf6sw)kiCjDH!2bP=J+?ul@gbH{aHF5>GbF!T z=gw#TlAML5ECea`UH=PpZvj>H-foQ|iXGd)77G!jR79mQP!}a43P>0zh=fW>NZ4CU zmUOEqASEFk>Q)dbC4~ioNG}j2rSE)}`>k`&80U`hjqkhnI>vj>`)=K`*8l(eJu#m- z=W~>x8^{K&7T!m#2oaEp)-#L(n$1?=u7d6#doM$kL94HTyK2n5@BQlL`fBh4zbOzW5ltj&ADAMs1~zI%)9B$+$B}b+Z$;Lrylm ztmTb!PEr46NvEMiLPGnx>W(2=C(qacagbkvE1`$ttJW|uPP91_i@CXNm$|r7Xn++i z*E4-*7=Lt<9-ykzKLHSCAZ|uT7&SjNUYrX5RTEy7;Fc{KByCb3g!%xgLjxETnqI#D z6^fb{7jM9U8E2|qy}$iw(3l&heMVW(n3Ddiv&8Is*02RYRC0q($iisq!EQ4lz)= z+72S)UO4lu{nZDO176%k2iN?@-3qELnhK7R2LWBAQDj)#13#q=7vGQ@`$b*^`EA{Y zc$&;?cY#mp&PqQFXUwYa;xZVWgCs~P27Kf9Gc9Qq9W>HOk!<15jly;wT0;nm6oebB zv)@~GnXG9NHrWP8-SkA7K$lrh7SQf~rU{|~@uv+-*Lehe&a>x3&IXTTN z7ks#S+uuUYU-HBq_#fqjV-mDU9wZoB6F}xzk98ETWH|T{m`v{|2y*Z6{t};O+BMg> z4X$~L9X3CCPp$1C!clk-3|oWz_wj*5wb~q`&e`I-!PY)IFxFnR4xhs{^2DGpeV&cj zTjhZ9$5sF~vzBam`_*K)2JmG_Sy|b#&*|AWvPV15scm25ZE5r-3Z(;Wa|bBb**){G zk6^bfZ*P2AcQavh7YK^SuJP58C#uc6V;Iu+F)g_BTqx66jCnzWuJjy8qw_Zb^I~ zHE(S;==C5=Xhx0Ekx7>RyF`2U_O0N?jqQF-7zg)KZ3){)fRdK)vsY}}o6QIMQXfFF zv(WHw#H1mpiAoD%p=%V)Gb-|WnRFctb+@3}4+u`s5kqJdI=>9af#uM} zRTG) z3ADUmwaDt#uiGZ~*qfAvyYD`Vq7zgt%_x}7)6?UKPtMyT4IFfEaJc*dwXHiqJA~Eq zve>wQw3I%;6R2t!Nm?UhAIU}psdi8bN-%i7(v2*^2(U88n99fv#_Zr zjeumWG2}HhR;m>0i|@1Z&!MB0F0)Fyi7I#GYwzMgCtI!jf`SYqUwkZx9^S4V1&{O- zZc1PUG(>j@cJH}34@6w#&K(~9pxTu_O@R+#AAYPLjpn7t0u|tGyHZAl@lCRb%>iV7=L*;-mzixHH=cdZ; z3;bksqAO&KcQmd8T1hpnpEr1{lg|MsEk|RKK9UW`!RM&r2kFa6A(80&qL8c-`NX?? zc@$!8^;d1&T+55e*}alju?ge9wWtf}+2SoYV3ZTkFA;k>I>4!jr*_jQF+|c96=Hxx zyFrBH*)^*kRepTU;Bj24ISTV zRZR0l02*akrd20ysz|5BWiaqKt2wtE#HXp>|=I!1yrj4?S6{ zHxSAL+C{CAUQpbPOiFwv=vBaj>4T+?ucJHJ$`gZ>GVjM=lvhFkl5xS~mFv*5H5UiR z!s6l#ph{T9Tq9fHn?vaKs=<}P;aCk?GU`JJBNf$LF570KGN%1&6-K^E|$Lf##K+kN7_tg&SF2?+@&aef) zMiE+2D5D|>@IgqRq8B4EO{gviGRMuUX)~(&r;^m@e^io^Qipg)h7Sb zWTqqDiD9_gojcar4iTsf!5>gZZ5~?{ql%vegGv&&Mxarz#v?|!Sn$Umt8hxuEC_;R zk0C5c)_E5$^g5@D7hH;*=HcZf0D%vcPyoHau-!N?UX|YUEByJ$__wZWe{e6}HEs3k zM~e=+ttwHR8~Wt<&KGM=PGBw4x6#*251+bbjhClD`_Yewz1Ju{Xy52o;wkWM>K^V1 zGe!D(hOgG?i5IAB$y1rP%b+-S?fu-wFIGje7mdD48W**Ax$#P%EYZi>27}m)_c}#5 zXk<=spLJWpqDC9>UJKl?vTPR@`fLab@bS<2gT-%;mVRD27W9d9Yylvm)6%GH=>a+b-I0S58n`s zfh7qW(AAbiL@M}gHEHOx7tHFw%!LT<*lisJm6RYxHxN@=&v2&Wzi!39)R5{MCt-R0 zPB#8A=U0g4zk&ElJpWQ}{=fViQ-$_ZifTm^M+rX7mII-2K5+)BcrC=p83EY}JKZr` zjs*AsBDMMD?KA)WQd|#K63H8{ZAL&a%b1=4{c_d#Hxa5J@4qGpoZnC)2U3!Z&Dw#| z$oL8%E>me{+nxJ~HUkop%8nyYObL#<+AO?8&@(v|&Es$+?RM-49%eG(e z(0~Q#}DDK0ZDTF-Ifbe4g-BAJ8h;6Hif(wE`I=(KclYOg17h`T!RB zbl2|Po=_kmsOe$ueFWPFFK8`G75p%E8kvi@EA*);qoDioLk9=7rB7G<$dZX9m*sH$ zFnK3a4ifbD0-tTYC;ffusqL;BY&_a>eEk^#F7O_qvh`Kp@vJF|*XxcOZx(XJVDvKq zI85F!Y%@&)kKsMe{As2N6I#f3ge-~|1IOruM^rLKq<;J}AW;IsSqEEq?Q0V9JqXJs zFo`oMmg%|E^F{P%qF|kgx=ave;}1{alq5$*MK#3CRyPBxU`(ns>IzN5_;nD6I>7n{ zs*^_U$SO<;2!_V`VqL1x{oYQmwx6D}*()SQZJ`YEei`Tt~1ZqH#DEE?j5?aPcL)@L=ZL7ll0gPT%}jMEH<5 zW}eCnMqFe}+6R6eH25oEzf{-sOMEi+MTEaJDb1mp4-}*%1f-UM9_Z}2Yk5O7NrndR zmr9HD&TYwc0uVsi!dm_)I0#W_d~Ea9d26F`6W+3=gaK*dvP6eXbypw1J@-QV5KeTb zn^274dFPk~8;p>W338x%2I#I9g*)rk2SgsKwPl$b^iYg=>rwG{eL!L1tQDb=S24oP$UtXrYZ8l^|+$=SQ1m zif*IG*Bvt}QV?_YhLS26h$2Y>uipqty_h%i_k%~-MgS`_6n`s{g5^8CCnY!ICMx-( z+LRscNFZJaomsHcH8_-hZ?P_~?-=oy;hb!>`*FMwJt5QjuUXp{?DIUxrC6Q~-!LH= z89kf|Z+^V;4^Szv+8j3>P z*k<8i3}!4ce(t^BOOPd2iy~nyEtB^D`*A1C-{~&C2lYjbvvvj!iQLsSqDaC3oIXvQ zBcuMgT<8;6!fV_#30Nz{qP=K{NB%o z_f7EZ{6GSLGHbjQxvzpIUQeDoPvbnVY2u}LV(S>FT|}CzJuL35P+tlswO!k5&R@;? z)iC35xcW{+heId{FFiX4dmK2QgrnMO$DXFtXYYKUh7T{QOd|GQ$cX*j3Fsrja1q!z zH0LWI=s1iL`5*{uAMmgI_%QJ!j$}Y3TC_?C1Ue=3afXR4MQTdkZMei=7BOnhKL ziG~q=F!|=tc&e?;R}4gb{TO>v`~eGpj;wuMBMg3iDgdJO9(m|{;CF->s6BNROw66Fbt&5d=8*C`E_;i3dzYK8w@sjh2GD_FauHK7y#I&J2l)J?(<>xUe1D_ zktzAx%0ksPo!hCtpy*k#2d>9e0NZB~w3(qSi*(iG8q3>t_OwBTo-`_>>ze5aS{Gh= zDpqBK^G8EcDmTGvzhW?kF{s4F?|O_9#JhI!+Nv_K35w=l-&`+ZBZM{ig7^A;=Nu35 zxJU@5KGq6r+VHP$i6rXD>^=Cfl-X1-8=%vdM51(zFKLUp4f z4Py2e1LwkSyR9c`l1mAf!UggmBO3&UMxd09HReC|s@@(RjTyGJ(#hdGPv%Ja4?Q|= z#gi+V+Bom}b- zs%#{586Du<-fk#3B0%$%Fe!Q4(xX(Aa53gN7oi`(UGcZWSY$W zYSSUunf4}UkE&t{?fhzZEdvC-m!XG@rw*P?$#|MqP*6sLT<1{-BrXH0OyJxQj@ml4 zNgoO<;s@(n6-uD*2`(;PBZj4^AMqE(pl+p}7Gv;I7`%`pj7$&qiJ78=p~{wwXb6jh zCq7KW^7;%?$C`yc6w8QrNbqvJ)j+q#X`s&Q`t>yti?EH3&^VNXoAsQB1|FRkSCFS(9Tifo#GL| z7cw1I2>b9*RbZs?z$4r7#T3Wj?jC)qWw{HPF5hN^gkn}&0)0~Zi2TX7Q&E|^bmd~9&*`1aLUy6+-dS33xZM#yQodSVb8`|8`f(Cy3zlh}1?zj0cmm>%4r*^zCOkM;Ks9B%36!(Q8Z-(% zjb%7FM-$JSK;fK>;_4`RR`ltP+dx~r4b3kj3B0rQqa++affTLp*!i+3Nf&L6u=M$Q z*Uq}^I&|kJB5~#!TNvA@S2T~+O)k{ z1Gxpl*t&!M+=AHwy~EETxk04_%%NboPTJDqh)MvuGTj)0jtb;~AY}%=nwF2wJ^X4@i3`WX23b~R9iZPw-5D3Q(2Lt`W`+0#f zhy@cmGu%=yFE1m^fsz^V==-a1=qw5D_d zy5NKDi22&9Rri|0^hKZdzpKLn}91k=q#HSECL)Neh2bcGUrhIl=5e zg1x95?BmmF#s-?VMLYqxFB=PMSuOB_)w*xqv~BWY*kaLpE z9cR5NC6Uf@W1OYf+#%d|ZEy!VQ1kGyxmOIQF0jrD3vYJd4zv=yC$~kOCGugj9h5WT z5%>3eZN~;z{R{7Sd5R9GH_qIIr=Q;X_;!sKvh8QdR;q@QZSb?i8T~-h9Yngo7=Hm& zsR~D~W3D=J7#Q*B`W*^~@fBLCfBN{MVXNZTLC7CPnC4?+E91_Gr$7bua(Dk4=L|ev zAP;c5SuSu?qB z3|<+2VC__(?(aDG9sDU(CPE1dY;f6bL{t_XHKrNVtp+_x@jxtj)*UFb4zWpbB zA{?B$EK-{m8atw}Mxx(l3OfSvuUJB|`(ehE8)%5B_}|1Jv%Gu|vnl|nv8wt}x~Zy9 zGQI}-kMR!@!rbp~KsAWYY6lhhjCd5gX{yJ0O4j={b^YoKo0G?0JZJ9`Q66*Si6(}6 zz(r5Mp)7_zD8^&~O)>Rk>5$P(@^iBEssOH^ECn=Q2N6h>iMHTZO^n#<&tX@by^Eg3 zUH5e{oD#yn40S`#3i;9PN-q|K-kq7aLg-Aoo@VE5dGph&h7W3w=A#2KW<~)Sw#b(s zY@Gk_6l!UU{5trvgpFf~RTAReQM}DgdW%TzWIdc znR0!h$(ZB~cq{1Wf+|dhRc@$tts};N!ctOFm7AaH)Ej0WFBn)*p?4s}YWBNN`xaVL zm>dY}lq((Dzd#fv&t)X?&$M|f`82Y^`Pu!j{z!?&=Nu3QT@|UM^lT}=dJv%k6iX?MQa}E2oTFSDV9Pit}5UNOL zXI7R^OoAgpnvQTe=|EGkT|5C5eiaG@GjOMORt1weJAWj`1fZWeM&Fh<0b{KF(n8uB znmYvIg=1iNK_O5y6)9fTacHSzD$==+F*^_!`m|l}iT(JOffSybhB1K;Kl~%_L38!{ zbfHAB30e`z&K<&`=!7Y91#T@1=vG43&a0CJ^7!oPC*04VCGw1TfR5G3l{FO<(5sS? z6ArsJ{HPlrs~j}I)(dR_!r+DNq+4zZ0ys$}uI3_Olthb6Pdumd<{GXH1puIo^p$ltGRvOuI@K@Expi0V9quuh z_(K#bOi0PUP1=fZVTAfD0l+&n7JgQj)cm)JR9YfZ@fc=>ZEAKX;P&T4st275n-l*Ou-hNm(1=A&&aBXRlOi232)n00T3rJ8*l?f z?Aq$Ye>)~gV)idRaZFCywlCZmGZHqCidrUa-w^x+9-ac66^DkansC3UeEUfMn|u!M z+5L1-s$0=D4Mru%kIv)jvb>j{QQK?**(FdRua?eM?j^&%Jm<33`&h|}^8C(WI2nD5 zWs-3+I{!Wys1OnW>)HkQo-^vl#7BBc9j;+u6UyU6<6x(XwN8k)mUo{cy;lOkQ)L!A zPFxQT5x+mCRh4~+lWI#)4FAw}K{p_6({CG_iG~+14nsGf$%QnaiDgZ*c0oxOsP+~0 zN55ZP@K#Z>&YrKn!|8`Cg!7Pg9f=v#hk_vBMjEP4xl(Fii-vdL_lyStC5qtj@mI}1 zAoI0y+r&KMBZz-r}(rw|u^rs&lb1gH%o z0rzXS47snnpu8YJQqP-DIUe8cSD@Hpr(MqX>oSJ^NXWSVQz3t9V&JUAyU=<#sz})! zE=H>w*o1j+9H1Fp+`G&qPf&?A=MAju#C&ioV<8{k_r8S_IniKLG*HBFM#9Kly!!X$ zd?{nyR{0OYf*d~NErQ?)Bcg%*hdyrG^1wDKgtb+ux_@14QnyOSu>v*OIiZ-k2;`CA z1a4*N#fg}LT4t?{&FuHfy;cuUGqCT=(8Pp{+_@M&3FOh5+u*(*#!(YqrNdIhe#P!| zr0h??RDw*GT)sDYMY3*wRBBMbbg35g zQ#i1wd;(NNpP|^xcs4C(tTCWt2q69KugQ? zd2oB2rj`~k=df+VM!A2ShwJlbxR+ywEA756@@$uN!So8y z8R=#FmnDD?)?&*YlOKsP(m1`ZLN^+)pDpN)y6A29WdQ(sW4iKTDQ5NvHXslVq#nqc z6o26I?`lG4jKaM~&uTUk>u#N#TM7s3_0P@OB64P#$&nwn!Vk>-c=d;wTDtbn`o%;g zhs(eqNU4>VUtj4+#+|WqhF1WRWW-~r$wE?M zFF6S zzJ}9uw--vf8vZIO#3jb5V_fP;0OxD)kJR9r5|1Jq}F^)t3`1zr`m7JciMt zMR(+03%lgT$~)~h$hF(_fi7?8U3*B?1JiI>^9jmHhy1V;s62F3*-S(X-XQwEd3?v03+Ct%zbm;j)mDeE(of_ zHG4Pphv9&sP6Wgs%=)=rRW0V`m_F8g7#UHs2ps*Vy+BzTP{{ysKI#_4YtbYGT9X8b zB~|IU8~s|ZssH6>aUs9rnkE2M?|JDO%I^lw;txZ*B08Gf7{B%ai zUO%3Sd%V{EMvAPg#_}UUw?^~ZXtpAasjLc9aqnJ-N$ZfDL||6Mk9G#lJhc-r2UTGb zC(|I=y>dqbxxzSaA8t61J?WKnWw{=+i?wQS?Sr^rogXcZ+=1+$0@<8GesX;6W4@~` z6}H~j@D|s84A~#9ZbpI=U<*h(1;&1OJp^SkLy;X#AUdEqvaTPJ-UeWUOipFKMGZ^a z$6d!gCQ1CH~ASzM;E+ep_0bLZ2Fuz-o6Ybnxtm;ao7P_ zR@8yMVvs!d&L|OE#<0tIFzRNuiCQH@mW&e2<<>EX%Zifr(MR?1*+iH+ed-i`MoK+* za2wf&1JfoV2V{w12{KS@lgSeQ;aG#(So6uu0~^zdYfgh^!3+hJ+d)5@3}{cEW-y5* z9?M=Afmu78MDG@NlB+hl{4|X1NJvP8a`K_wduQ?2ftG0tlE7{c)AzcRTo^WEZ}6($ zU7)+Ae9qN_dqXpb09>fI2>Az&CL*a;k;n(uTIO%|`&}-rve&vi9k#>bNTw zhc>45zYFGMW#W}4j}s>zB`AYQgjfWTwCHgZ64vlgW0cS-`2cxB7g^piG>{51EyQE% zrUM1a-l<^txX*OBT$nw|`k{ zd~)S2=H;sFlT5s3v`DRpclqL-ZL@TXR&HeW0v?c##fBT%?r|939fA`ZKLZR-bv?a+ z&_g>HdgD});cUsVz#R1SwI6iiPQn=$`U%4O_}c(xkS*m_R8k_xvh+Aa9-?&sASKfN z!@Baz6=+CW6eJc8_kb;R0Gt^Qew)tf!%T9ylld(FGG8=7Y$X)FxHnAb zSXM?)8jiC;DO}jjfFn<~QWS>&C^M;7wIbjBqB(8~-3-M6U*!_?aREgI zeJjTCU=y)bdVL|rbL#?!YOt-U<=_qWO$}hq@FaMSGdPN)YopwQp>I2wWM#++Pmm_` zP{$ksnr|7sOno_oY4p??OxoFrHfhKcwcQnu?0+lltBWR=~trFKAHHi=?L;&YWDpoHZ zy&d_NJPrBtB$eB(iENAJrWc)uqwJH{eNGsZdxFK=9w!HH5fHL^w6#87QtZQ-K^eX- zaOrWVq!h_*4Ai=stge`D%yy+>Pt#V9l4HgJHNauj!rHe0Ut*M?H;aS_2jqj(skT81 z7oeQJ*HdlN&y>INY5o2|Y3I?l-Q7}u2bxvpDxiv4P5v}c=MswzXMe37N078ZiIkQZ zOk8XO=&Dkm27}{+oAm?*^8O)qK?D?b4xxSP9ym?!O}E~5S0wZb^#Ekc=g<+C%OB{s z)efkDm1=NM(0AK%*W}G+J6Zc9>N~Ypu?kXa-&h>$J}XrGcC3jw69{yH3D5PhaUmKl z8A5T!k6q}IZ>542Y40SDS>VRD*nVd~bQZ>kP&$5wdkCV_y1^`sx53WSTrZ+8vF4ry ze)&Na6_v{$P-JK=?vcG*)E6N7cX~2SSKK_A>c;uQ`#o0iq_!+9UNEfyQ`l%Aqo}H} z^fI5vbKiz+Ouw(z#)#1$Lue-`$ZVTDwW{G1SlAJJvl?&X-S;%MX!4Z( zzP^1ORi&xp?IhU8L(I;#wMh9o`C((tl%H2-h`u08T{Z&nnUaVpYf&ROZ3C^D=qv01 z$DdAslvBT(h75l@3IoamEbKNWnmXncHA!C&EIp?;$ECaPkFE?AxK8ow;P}<@b@&Gc zn!~st9{a}q@n(xhfQ4Iq$z5iV@)`;Bgi+Y{Ptw3E((!FF<*Z+X47#;*s?QQ6{(GR0 zY0)=HlXbm-e^1RGkV^M^dP_B>-RE3}{V|UzV2dv%i!G0DLxcj!j`o&X1W!{BfJUrQ z=Engh>DHWEX8k9G;KQ2{IBV}RaZFyr?#vKPc?HJeIVAj-7+h2XKRko#351;f)FxWs zY!zIKQ^rE{Ge<7n1}?NCqICzjNfvci$slzb#2_n!_naTt@Ug3;YHQN_RJm+|G-=5V z=E$EVbEqglb?d(`4clH@c6gn_tSkXZHTLwv-&p7SZG~=0PCvt#zZUsq&V5;jZ+Pk+XkCB@w?5Qb5!gE<%gEl3QS?x zlEkDoFU73DB4!ZtHEP8A5eyyl<&mqx9=V@_E+ZL6B2sLyL2VzN!pBR10>-{E8wW0c z<{>XJ8o0j|rfjw)G*%-0LvkNX>Cg9*`IH4)FK`!F+{flbPCw8N|6K4QNBYt{=@T?a`N6AkWVH7MZj=;2T z8K9-yiL)jaYIy_?V%}13{TVW=m6BXcDqp&NIICR?>WV!+m z$C%13(@p<@Tqay0n5qMRj^;wX9L^IUXKheaJDdH_B_S5+o;nqZhF^#N5wt~lR!re+ z^HZBpEhXhEmS_DcJu*3d<@Hb5&>xi}I2$8pRis}$=MNhUS|bUmn+vC%MoCTUNZ_k7 zr)j$o_a;C&dSa@ij>Ie+HV3@==U?+!ct=1OvB2wAYv7bQCMoux75H!nq=x$nnPL{rVMC8zkN;o!q&YlMD|A7_u;%P}eE zkrWIZqcFwBv>+TG2~>z5(k^ffBJC;2H)J}F&3SY+P_gnyqSKuA&B1AO3oQNF8`UYay50R2)ChQT2q_pU^`Qz0=fx{`~ov zU2dcR&(5@)__bJ$7a1<;?+ll)6ZBM~0MdLOOlqL$L?}KCX(RsYekfaY#UUZw7vkwWB%Lv232A+h)e|XY+j#Lf%#1M zb1#Fp;!(2v!SFewxwzPIR>+UPE9v6}eUl7}9_<)<|K{`eHn)>m-70N(`;%ub4gto2 z5E!X2IL(dA*#QZhn5`5`-|6!4 z@J|xG;DsGVO+>p~K|6Pjyij6n zgf_yyK4UvEk+J98m_ra<_a4enqLRTO$=`LV>!oQGoWCV+xB8?X4G6L44Z7e^LWV$K zd0hIEHq$r>Wtp!pwZ7u2@ZBO_2s*8WCkp8={Dlr)pEfrT+-(fvbyg%0>ekCs_ylwO zCJ0WOZW?n_a!N=eHe>}flWq(38Nzi8GQBChc}VtuthnQquLD^F}%GbQymAO7D zMTjfF1e{7y1_t)-V7c`{sSlmJ(mOq&NslpCqLm~xjt3V~6ZPF8{0CkMkI=J3SxH-!HF;Wwv5PYlb0>H>0Z zta5vbG`--W%^#;|_yPK61kxj%Nr?V_7G@o9-uyY21U1HKNXTcvo*|MFfJ#k}>KbC^ zxy}p0=-0=J%WZGSoTD5vH+>i`E~RS<*g3?JjB@_B$MkoEUt;x0ssy1Nq3+apfI3K_bE6ByeS4*jr|6gdyqTo z&N>15fF-v$ZY2-qkWK^E1if&IVB^M;2{S<^*Z@f2Be>fPdhZ~=6WfN1nIcNk`2Ykw;01= zDX2mhs7unjez1u1(OiQG-yr~0%0@SWroYD3Xqt%4s0G%Cm54YVRk)1@{l)iBqyCj+ z6B#u572MHJFIcfcgfWCkPHo8;{%;W*OcY9D5cN;r`|>cj`5_*Z2Dq|*hq@68wNkN$ zhK4;e?cTyK5Z^Ulg8rr}8p#1PtG}aT{s%6fhHY1f-vS{iqSt0WTHDlkS|H*#Y_zRWl~D|LAW+7#zilHUE#8M^EB!e-r$&Q1rLtA74SEaof7p;V5!qC zVha=OPkIW7(@rFeG*?v#VLjvcE+FU$;JBURGl9mjHfB;A_K;w_PM$EyEzeU<85^fR zJqKnTut2XaCy_r<+R&9CNh6_{ZP4&go+qsfzC9Vf7z96U$j7Vf#f17S>Ov(3gShCh z-HQO$j%;gSQS6@AIT~~D$qhorjHMW9I7XS13#lK#sE+I`$+Z?$3OpRj0DVE8uR%sk zkl^)!X*!rm>Uaf{-|F}uyHTS;QP=!)iJZ3$q##8Y|m2#g>lm##hvv2Qlqa_CV-1mi)k#ohgjZHwoE3Ttc2H~?Bm zx~fbS_DJ=8>2L?W}4dw8ZU4hm=KlXQ#>Qr7Btt@Acc@$3K3zUZbi>=pH8AF znQ2iaO=Mcg>fm-^KqxTXOIjjFmY?)3(P~AYiD0T_*2$0dJ|v#}K?6L@z>xjeg9@(U z6a2+?nGLP@;R93MI-t2k*HsEMm{w!}<%+T=>lQnpE0QN%~1F)se}s#by@=G-cSI5rRVoDH4)PEZL3a`eGO~(SN+pu!!jt3*1%iO@)o$~ zvR4FQxI)Sj$_Ds5Xx-#T*LZDmsY${=Ykf2R)K{Bv1>vtVWhMT z3Smr4-8qP3S@)|A@Vq8=D8-mi#fs=`3U3`^9b#7!v#YbCE}NE81yWIp;WryEv;=^M zY)PT7crM#>3gLZL%LpCD*F6klkoz_$!l+d5Sp;o02+w5fg5f`3+wNVvYJlR^i_HY^ zMGI5{BW*!Wv4JWo{b?J%k(G@aVx@qJPrXFQOY8!KJmM(Ij)&G8?W{XrR!Xt4%y$9! z8zTVxVjz=E^E?Q8cXoScamyNxxB!7usIAO%65NcmPH#vVkqc7E#u_F9kAn5=he6#8Yhq4&;&9={aE3cMIXP*g*>KBF<|;=hOvKuemvoHunb(fg&aWxJci5uqh3)guqp_tQr4G(XQwj_!Q@pzH(Ai z2p4XfHDYbsvGF1An2c^3a)?&;qUm3bxq6?{2=x2-)@{Eo1mrA3!X6#VU@rRV5DRjCCld~ z8YsmY?`=8p$^|?Icn9`s^DS*3HNFex1FG)=6_l@#n)GM~i=WZZBbz~3%F^4BE5*$| z)A06|o)ftxHO}Vs#6T zB`DdlNU}u`^)zZUgV;@ypbFRX&borpI2o1E^Y{RPop>z<2q7?`_ziWqGO^W#o*@Jp z+h}3A^HXvX;V4vUbVDYM!T9Z2?v`^}_kSJbTePSpIk^i0hy>bQ zhXoZC?C2^C`mHjraJGm4?wVbL!8s%&o$OLaN5P9l4Ex65Qt`)qjj?L=W?itCI%?s{$-<{q*?UQMLcd%2eQp zQr2)L%A(-Qo~7CQ0ATj`kzcCodQN4kAm@ZW0%m*`F(#;=dNFj69N!K^NK5a+^?g$3 zlI^-}pI-O1S9L^V$nD)GFSeV#$ed)BF@f;to7VSIVX?kb|!982h=VU@UNi7-9fDQPhRE1$8 zNo`L}OcCvm-nF~d#1hLrW=~?ikN@ndL8(yMX9&N^jtBuL{jOqnr`4*dTur(U`k~D$3A~bmB zcB5BnMLH@)rQC;=aSnQOA1)IwrOFW^)cZDIA<-IiRy_Py+Nq0zO+Z2iWK355BIvwn zRFDvZvSul)Na;o%bjAnm!?Jce{q0y}L$kZIW2V`^y5r-3y~%b>9UaSzCu;Oum$cCSSiHD$C?RM;1;+Lz7$bun@)#*o_p(}1riLK6zNN&pf) z!abm2lww{NK8{a}E3OnYtPlM{CTJg2cPDhjIba4? za4POeW&L|3Ay}+HnfUy+-`-L9uRsH@{~v;K{})l=6Fhm4%S*42bp;lAOweU6KmLamHrJ19A6I^wt@#x(rX19YU0=pw^guV{ z&F8fjNkv1&D)EQ>G`)i6TOECA zAMu~^K)XH&D|mH9#p|??dt~Gi#yAXSkX^&A@I{Jb^Dm3sNn|VJndo137SFiVhtXL- zMvqB`OQ^A|zsO^DUIt4V>R{lQO=CfHcb>t)u}tPXh6Vr{5;`*8?g&^mF2|KZ3otyr zyxI}vG?8?AoS`nZsXFZ)5fY*R2g!}X!kU04(O`sv+<6Gz3NJsiKmYx`_`3HL&c*j? zxU*|d6vk&p&%mJqgH#Q&Lvr__DLE~hP)i4$3Dz00mB7+4CKhdKX)(oeGiE5NhrO9~ z94R~b2uq<%E0bTzb;Sg^1+2YC@*`j8IClXsb*ZH429ZMxwIl@(_{eGM=|z#*;aSN4 zkyQix@ttPScfyXh$C#tisui0rFjmPT3qA-jNODX{WojD;!Xy_ge4;S8ULWnSF;nu<5H2A+^hHswlPjtTis6@^%6)eKK2M_Eds_eb2_Z5WVjmk{s{Thr zc=$)O-T}cbgZalMcd&4e$%9a5T+WZ^4>$~MD1Pn^fsBsN3YWs5=baXBRphzEAo@NK zPzg*7OUPbJvXV_o_abguIp1qq+*0l{Px;%uCYO~OW`&1^`*^}tX=O%_14es0S$gPv zir9FidovES%u{X=-CUWe9W2Dj=p9as6yPA9lU^{Z#PM-Z31SMS*F0-W08FWmkmoWZfvM4J2QW6e}l=8fsEwMQZ zV(&>Wh})_aSGFi*o3AfnE+aCRe*FmPEE-XJf!g@bfygZZ>EAbNalN@L^D|q>t9hf> z(3Q07kEgx?sA=#& zT72YyO8n3JN_9m!scb$go>hA9=a2jU{#U{sG17V5u2(!?l7&A>-Z_L#63{?=GAPPv z5a$3>ltk!xsgXnDJHRR~@W6Bz+5I{0D2S>h07D~a4H*19BY@e*QUhXg?8uR4$i`=I zp9$u|a5o0Eo*?M;T|R?@gC7bD538x&BS0Sri83uJjQ}1%f_cpBiO2b7Ncla6&8_2O zUE_(cEn{fF87&|?X)y$LIA#l995{H;oy=Nq0sk`FT;kS6iz1toF^(4B^b(teDzcwU2w!Ud#^1idOwWI< zkL*9D=pjHwvU6SnLpXi|aVZ@UVC)Am1 zYBrFknp@&?WPT{T>PJ^xt++^^=Ge|%`=oPHZ|$}?skC$FW!lCF66qfwW5w~7C0S6T zE_T0gcze>3oWhjdg)T;4i!a-3825Fbc=?R9xNVen+?q4~7&l3q!&BuPerQpW`nN!q zRiF<$h_z!;;h4&;?$7@Lt?aSpb zL;E;Mh62#-h6 zQxe0!AF$~cM0FjLky}UZL|QDjoPUT`2rxYT3%l%74klsGl@FTyrN~ zYOq*1UUE}KPw*1SKmJS-AODQ1B#%6bvODSB%Z`uqczJl7VhY3hl9uCznB6@+qhGQSs!6E;-@S~Zqg;P1kstYSm4Il523Q9aeh{@l&Q`+mq4QqlQp2`Z z!Drs4s8jTaN7Q6!pu;Xh2a`?f7l-iu)-gW3fB!>QW9{;9(uW2y8s?ELDKp@BS6gZ89$B!Z!MC~NWG0`>gBc@eb zAV^n0-j2Q1$GHosTf4S!U7zqB+Q$Y&hg%}zB5B&cIvGS=oAq~((6V=kF$Jxx^v~nHb;GX z&M=EsI{eMnf4pE#@I*sN&V{e;_OBbsk#qqZew_4-#zFQuLC!-aaC1#W!m0+dDC+U! zhJf4)%eu~5#>}^>wVL1ZSn^oh*os*SP4DM_DVUE4?58a^!{!aD0QRJ0^w^U#!(~a5 zufl|T=9?Qcmz;!qzWGO6j2E{vY_a8UQmr$_DtLI?8J=J6wdv021~p}RniHB|fk1<- zCe*fxq{%%u%n{!UW7=2mm34l;5fgPKzA-S2rN;YR1VYC-*J-$CTMg5!l;muFO!{6Vk z!)80@SN%dUDQ|qekLSkbrJS9o8>vxFM=s%g%PZ|77o$dnkfdK;Gu%%hX{=tEYn~CA zm#S7^Q>+?ryZO?)p^IConz9o=aNhjhnUjOUWz&pB18oW=DkteTG@9Y>&k8M7erJtd zg&H;minL_?!-SJ_V@R$J=Diy?ZY+b=op6BTj>y?^<0DoK&pNM8_Bah(5T4Frw(ZP< zFV;Fn8Z>YLx*@qYVI4hLjWgE5uprc)l2_0aH`UySTV!m5?t0`QRQUg!saRII2`gQ38MRkj~Vh39I5S z{ZwZH^jr916w$mC_7%&m>-N9f5v=}=E;FW8hcz`(z#K#=F}?Mn*jQL-D7AwpmT9RP zvQtx*B>Ru7S6n~khSbfk?=K%Nm@a9Uw3j=U(oJE+{r=nW6-#Uj^FDi_rG3QlKzc7DHU>r% zuC^`h$TC&1W3+nh&15zt3y`Oq(^fdigbeRnlM}e1Y)7NOGn(l#LqXBbmbiiN(Fa~a zBq{~qSz4aQjz5= z@xbPi|N79>1e3jATw3`2Nk&Eu$ar6Cqz3W&G*gR#lm*!Q0S?`l5c8sp&L6kd2jOvI z99Yzng?zY^RfC>RAP*DG#nyB-0nqsHFOzq^^r|057*fGMiBKq+)Sd>aw9se={GT44 zq9hC=on|VZ@oCH>gvi&15s$3Jf@U*=G;X zolgvEAbSt(8uK$Z;`q1Mjodp5=$|I?j}Z0pv!Jg{oQ ziGE;3k>TO{ndAVDTNU%c37yG0bo$0iA`*ZeOY`;~cUkQOAy?*)H#3Tgwe)QcfBg1q zW7})5P=V<4e}HVtxulR+|r`Vq**XZ=Jc@{cQ8#H z8m+xoB)lPj zo-IkWop}Sw*(eD2BN#mu%lpN&-UJ+vTJL~hSi#T^pec!@0qv?Of1`|YO zCI*7fojxtXQ#ngd<k^`Xd&FR3{(L3o=66m|@liq8HV?R6@n6Dr+Mkc7uCYKN zRm4tM>7l#i*o*8YFWrsNqQR+I8C~9na_BWQXoV37FUnG*4>mApZW_S9mI2<9m-UeM zU9{#yhyZ;!*RSEk>sR{?F|%im4SN@WG9!P2|vMR9KW1vXf{!fYU41*1OclVi=E&Xal4>k{Zn? z5{#DV@4SXM+~BzX1W`wboCT5&;nIXmA(E?*RbJ?LOs+K8AlN+}k@Ic3jf%xTqe~82 zvp!$+WObwy&d5TOVrnhC{b!88X5ZhvK=hznA9{g}`~DG(H3Us9KoU*y^!06vG7-!} zt*qN5XK!z>HjLrck?QZDVOUojf~&{XXzjQ+h);gL#wgi*81Z03 zSCeoCIFB=ns*f-i($;`YL$+Vk;(9o;#GWXjqYOFq(C)XM=g`r;<>-NqjX#{NvE9yr&^yBx( zJ`vep^;bcod-iSmX%!2Et7L>L)U5w#3P25i4VGhI&v!UV&l%J%zjg6Z&M9hf8}zoe za9Z4{_hi0y%DjFl`8fJn@dCmkeK1V~zwA})sx^7&=#7$VZT(4NgFC}#U5D9|FF)4m zy!7<;4(_o6BHzZ($EbiiAF=5YMiFf!7}R$V-yJ6_(d6O79io}N8pB93qe;U|4NFVQ zr`dNpKxbscGW?2_*I_WZj~P59ZG@;nzK_rl9ItADqwzIfRB zc3-bMw;V--ft5gSU|7_5c4chZPf-uf4gXhRcKQAM#%ba)g>&^KMBLu`1{%gCD*-Cm zg`(jjFrw$_LVws&YDXA^WY~&CYh6k2F!!Mf4U^7jYms~iGeF$jy>!R<5L{ZV&SH5S zLK1RdD%J)u+f@7IuLEG#4)|0H6L6TJA2CS>l#AXwA~bXlDIl>hOv8i)7>2wFNs9y=!#zEvL2ULQ) ztAqaquZPb+d-UHuy8oYH@{`=w`~$cDKm9N4kLskcG5vZq&P0()zN1o@+-l$R26+z} zEj4Nf@_%xPJ^ANE;RIf@04?(&DtG=*WWO-zGTI?r{jYye6HpfBQ$QP}g>zxQM?kjr zJs!Iizqy*6pBtobkQfh~Uv}JzL~G>ys-L!iM*W;$uItq~rL87( zaRNNJYR|Uaj0zl(wO?R=qs-&bKFzco2`B^2HiJ%(2F@e2?Y0NI0z^21W^sUpCU50` z3**hN-dzE1Hv?Uiq*$*3Q{$iktcg|vOFiu+wa!{ypv-FpTI@_y2&h&Kkb6Mw<&;$S$a2X8% literal 91665 zcmd432UJztwk?Q9J!UutL=f>P2oeMV$;m7rNpeO+Kyn7j92F5I2#Vw+IU_lvq97TG zl2rthoOAen;k~c^`&F-6)oay$E84s7=yvbTT64}ZM(=&}u{>p@ZXDQ4wU>m1m-;O~^y*Oaa0%=E17v@LZ>^i9o7 zbkAAoSnBGUTHQ0Vp4?t8OhWQEiP+Uk^7q5WdhMO$+txRy7w>BQ{g>z;+x&KaI_(%^ zyf-Q9;or}$-P(5WLvP;R+;e%VDIZUCD5~B%LLq5-br!lwKu=*Yi8JVd1P@d67*A1(*{iMI&L?TnJd-v|o>_%ZZ1A`>G_DBEz z^wM8q_x|-3x5nrH$IJWw+b{2ET_C=Y^}-Dw>d!;gui{n-op^L2@pG=Ga#^lz7=nUtYs^2J(u6}`pXyPjIq@iUt!s+S07ngS%v3$Q*vJSrsB$?$v5roVqQ)4IdGgi zF(*fU&z?P5oyIh|k`fX*{z_Vz1lvyI_Hn@)0V_QlVKi90B}^7{^Q4co-iRC6uQ zuJ&(kB$Sj0+D!Io6x)tx)p)b7jAb`k4~N?+I+pi4FNTv+^G1c57Ku%kE=II0XjeQ= z(ye{f*5{l5V|aKpSUX#8?GCw}A7Q6BmoI3icl z%T^4zOnbgW8@DAZux9u#$ltm1`u+R&7p#9a87?XuZfac^u1>th(NA(-xe+&!z!a^Hq(d3g0SI<#mkx@=518KYUwSR-?$K zx=xjkf{ctzz>0)~#LMz>*HUr%bd~IQ)$s~kqTPf#R!bb`!mzg}_s;`Vv2Jd&t49j> zXKkY!DDk~c&$7ixNZk12u|8D1=1~=GjeLB3s>QY@0k4{QZAMP}utgkXNG1dI4 zw6gj9MyDzV7jL+Dn3_7(kM@Rx_SN}^JXo~Qtwp1$wZONwUec-bvM7pH$4Ysz(=6s% z|B?+Ca#X~%&bLoFC1ca~9c4*uexvy*h=+K;0$nujZX^naW`4Z8BYN!T=NRJP-B>?z z;%@rABW6HKQoBmygA=v)o3s~RG%yK(!t#x>wHn8(vlkAWYhZPjZVPJ zmp^0{su>~^l`=E~CUgdC0{ER*aw@%#508HnThR@Ej@>d?oap7ZpNX}!w8Vi(z{zB! zr@xMWQ0Pe4U@oEj9?YqqV>hXJPP0V0*lyCctL$#|q2*;hEfMxRA9e?hlAHAu+`?I9 zI(t^e#ieZC75}f&i!#P`yo;|@tI#rzj`8omFaGn-KQoJqfpoYe;;8F4MkrWVyy+g4 zQA|~ij*9ww=gytg{ry^J&YWRlVUgF?j>aC{O&fIn{$Y3hkq6=eSg*#eP!4DSZB8E=y^x0KEZHKyOQ~^v$zz5GuPv+0midun^IM>smsop ze|@`cyuVxvXP~CK`j))>%R6`O965P11Ia|ma13iUbzm!)ZRBwZ+urEh81F6aU|QW+ zoy>LJbjdaAFMGcb7Z*oF*AWVe1R5cS`!>b~1_mo%ACl#m_VVM_ME!~HJbpZ-xmk9+ zGy6s;k6F8Zq;vf9=YM(Qybe_PDj^BTX;D&AhH#sx7OsYhxR%w<{Ih*mkwp8(sMe;e zscCvGHxhxAE)IsdT0bit-3OCHE4dw-*+bvd*H^KG&P(4s#3dx;Z{6~{eEBl*+dh2~ z-MMQQGZ&WvgH%{%`d!m6nbDPqB%IMA$?J}eSeI-o`&qtIVugi;Ap&;k-Rt#Xd~)2O zk{+Zdn$^ro`I}k{u=D#Goafk&Z6*{QwroZPAa&?uiw| zm-AT-@xDAmPd^gvL6e+mg7x+}DU!a6=Kk9hot2qdw)y6pY$`mwV_)9HOcV@|dNAvH zvv-v_7h|~-a&j&>&r~z0mb;YFIQ%?iGtyceT|PBs-fLcQpnYI|w1Zhls5iRd(VoL$ z?Vn!7zI}T!P^V;NBqiX*ix{)=bwvj0h}5Yd;*`CAe|d0d$Z&aPkhGhM%P;$@YrhdqyNYLYoX$#HdgdAY05Dq&`3CTwJ^EBB_a$VLIS zeeP$BZ}QDR*1e>pikg}cdW{ijS3F3Q3JR2&6;owy-;UC&3;Ooif=G!1zf+>N^Xh_v zv2iLAXKGe#EL|K@Z^4I)$QAfWWwt2A%^YwT3f51p3g@?J4&$>*9b_Ie<_`TnFfc#Y zB0c)gE*cpOSu}8``fdGTEIkjqqw82n(auuxV33;k4@B$m=$3!Hp$mly9ga2{-o? zT9H#z8=GumXHSeUoH>&e74`O8O;14quh8^|!$nxNv`3>6LXLA8uIo3+gqNaPgiD3Y zA{v6j*&+@%vnf67x9fNIkIgme7+D^UwEGg_@Bg}r#!;fegRG-9N$$jgez|{%acJDl zK*Oo8@8<8ASmV6S9{62*Dsa=-?Ex7*Exj1Q1zmybzBC$1x$exgIP+{}ORukV{-}Nt z?(eOO0;PPIULuo*f}H%9Cqo30l9Vdt{gCq4+8aLBUBdULZhKTYugqDjgm)Dd$=aM}CS*RI+rfXPO?Y$7D)1)1)5@=enc3ME#Byog=6H9$z?t2>8r7rX_wFTD42+E# zd)NeyhM*4cX^5?sZ>~2Ewhec7cB+;-v{jV|5z08bjG;InKK`gSk9hJY#wjapzZ9_@6Izx;bPU=vv+SZt~%HJ z+vSwhsHlsZ>yxfF1KvFRtr}T(JsTo~Qr73ipIs)Ag?@3fRjdV)U|fxvv`%r*SB zn)>>*H}hF3DS45H4<0-i&_{ubGo;wsIfpw)sH(Y!0Tpdwo}Qjmg7-6XStpv~ZX##@ zu;#27scC4iWn!KjvKqFuvB^jg9`!#+dmX7sSoYGTe-suFdq(&M#Xq=2M6_yOoqJKQ z*HU0HP&3YkzmIlgbd0p6pwtPEF8%}<$TR6q26Qm&%!+&Z^yysW=315AuAwuz>tTsC z4*j^Q#1p>(izjmIFQkSJz@)US)>e4}*D0x6xc>%w9P>XvUoq**nI0)yUkg*RU}j`Y zvUXIe6)PCYW;JMv`seY1#a<+_4REv0n9cy3F7_)Ho##@MCmR1`TAMU0!nPG z{p=85>bTSNM~YE|Gp??by8Xt+uY^b^^9y};r-T`BSu$Ky@B8OC z%p0OmDBizjs&MuCb^ZWya&m>D(v+g~mATk*w(_;+Q>P~P;lzfwtkh#+na-Uf0E$ia zd!XXt@quS-Wlc!H)Q6golUwk&#FUJ(E+Zb_)cs}+Y8sAG_^nYB-^GdOa)>qRNH^@x zOG8Py7ckyes)pcfNr-ewMz#c4X&GG|M^%(lF*P-f>!Q+8?`vvQhzyvYZ4?>BeJG+N zq!)aD%Csowhhw(B(ox=?tdNqL1%74%tmW?RUQ<&e8_25B3h)y+E+;29-({4+_Y!4P z3`y25jfLS`y?u&N;~rd8tA_fX16Yy9f}-Z-q=X1rRn?GM=D5;m$Hj@gd-hnIy(uMS zTNwMkP*zSZ+%%5@$Oe>2!Cp|evQLt$mYy9JaVp?pnTF=wZTmLyzyP`(& zBE`|8iaI%ivqL3qxx*j{BqOX_5Nf_IKuI&{=g)f{S#@<1Tqa%e+yC4@RV{qw^5w8Y zJ=d;X6PJ=o&&0|&hS$(bh5gKPL28uiEcBFIJupTfqi-82DGDiyIE;Z;I-{?6ybK8; z7Vl$KRk4ZV&!$reSx*n8-SDMJYvwg_=rf$*!*08+c>m75vcb#>>+9<_$N+o+zBEEz z2J3gssCZ1nS`LtjY_82(b|2Hp0TTDfa#>$V(kgc`?andoY))bU==e4bEKj%Y|9ASH ziF8}mNhTDBlSf4@mBk0uk$+XooC??0*H6?i&z2;5+ovS`zH6SXyUtBaOcZ5gWNcS0 z+C@f#qL#p<-hBJh^HZceIr|PAXhVW^=5GfUTt{xP5w%TM%a?j~^qef%wA}jZ*QbSW zZKO(-wxjKmWP(!{3s(z=LX3d>f5_zB`M7sffAzpN6YkJINvW0FDyBsVkXmS^?~xT0 zbd0FmgL%g}?ml_&@ZpJ?qvup(Q7;mTkm(#ok`jOwMEEqc*Lv;xe}NVZ(#II-^cjx) zUbK3>ny_kC^#Yj}4ASXSBRjE+@)`sgZU|@AktiJ9{PWL0nLuyBu}W&k0TknNS*L^8 zbuO56T?m}-b6(VNMxrLw1%j8FCw_f9(?oxH`9%4qOM(Ml4B;`00MODoL#1=)PHVB9 z*|i%tQetDT{`&PxTwFZMRG8w(kyzB(k@!$k|46Nm9hZ!OP+T`x`_t~0)a%J1z*_;v zo#)$>y7J8;iA>=*QCKy&L@yaaCq~36fN;eEI0TX@BR{|D=Gw3bGxf7xB(J&r^7VQL zCvaiIAD{fl^KjPN5oH-KUmi(e`CS5K0D+JE^;Z6eG!2^<|BZd8Afv6Sq8^YdH zD!ht}j0{LT$*JdMTDGj0u>J?K#L7`N&D8uphnrvlPknq|O_pU4ff|U_kXKbr{`&Q{ zyN5?@W1}DK004NJwv#`0=>`Z_t8fueI(SXdJYxENWmusq|=*XPvo&YnJfnNzQ>De+cJPo9Y5Lh2vex04?_ z^cn;~S*!ujiMUGq^c30*q6#R1QUd6)<{GqIf4GY*UA~yxr0d>9Poe6YJMMXA{pnC$ zQcxF>XBESNw+K`XqI~NGy(FC&vGenQvOlX%y0-pmNl;K%XTdpCs#enPOXk!syD#;{ zw@o-6A6s5st<}p;ijDoSqQ=O?WLYrM6_HB1f4@8cLM`{8P&tyqSLdzt=VpinRkF3s zg8(y#wR-Yo_cc_3b7Z&3K}FjTs@5Z|jF5Id4D#1<#et!MNy*@Pl1nyhQ3s^L`Qtzi z2=4y*^XI1kf=e=Qj~hDnIW31_?=JRM1`s;XR1r7=*eMe?cl)UGE(8v!G&?)H=47`V zKvNsdjf)rEUdO~ZM;u8Xx1Sxf!g(N>-?s&F@h6Ba|9#Eu{}bWuzxa|JFP;CQqDp@B zXa_k3MGC50E*JgQ_2Slr+Ol+So}r`rq?awm&#)&vGBN`Mju6#Q0!`BP|Mjmo%D56l zZGB~)qf+z^-@i3QH?l2Cx%Hm^CpW*P47Tpq@iOLCGa=i4ac6T&O8}?@A{uou38=Sd zuK8y82*LEDqoWNWRh{!XM0~wl4ecK1ZRab0_Bl=}T)%!jZo~sJ0>XZzDf%IiRq(%+ z`;VsF0Wf}*F80ZRS#fZ1SkL?rBczXkO7E2M=xw}Js77-Q0+Tjf~ zUDjW|dKHgY14PsHkDcf(Hv0a-(-4SCAs+!&<$JQj|IE!Cn^8p+-Cu03%g-i?Cxz2r zpQGjElyhCGI0;UY2*orb&trf<6c920=j7kMFTpu@cZGohj z3Vq06q%}zuKo;rPtPO$J3@DUrlwNp!cDS(vDdK|V4^dq`J*HEqqL1FtvT~xd8oeU8 zOD6k$MFrRGw^i`ep8YN&dNl9d&o*n zEAylCfE}X#tYu4g2=x@Gm~?*MzI_3~!AVFkhS(gODu0xki5GMirgtaqkgM!AD>tDF z2A`w~i`8&Qy*Vf7C=rig{EPIbSIm%LMF17 z$>j^_d3yS1@-UThgt@m(=^)}CQ27IX{+zvy?ac@>s#d`iXiXfJmUUHzgS5(%TWm4 zs<LQ;V+!^F=1Mt0s~Kjmv2i!E{B8imG>fzJxQ>vj>(pTBJz zyLe(65ZY*I>RU}+U2CXWS<+&^tCnQQ1;+gPPkyvq)<0!NlYMatJGjm_7rlECu7t2#rDJqh!~^8*fa&@ax93$)Vp10s`VG;PSI>5 z*WvToS65^+N+$-cs|dN*eLkb$``*&&%HycvZ8!{VrH*zdeKUv1p>zUd`^N&EDI`m7 zZmj2`ZfTL$8jY3D?qk&6*{TaQr)@i#;NJ)fp1%86uv|uQ-NH*>9}>qhcHgL3Q5O56 zlkYHzVZ^#%GT!~v(=+L_v(lM(1qEks_j#KhlH@9yP`2W+Eb*!#BzLhHjYA>#cgTA& zN3;FyL3UD6T>NQl4Fg*Z>&oVUugJ;>|m= zZCPXjQ70w9R$kK8M+kMt?8u=5>8V%=E;rC0w!-nBT^R39n47Z}%bk69lKlWl7?GlI zr2GX1PLl4~<42b+`={YSrOR3u$0Co42Z;Ff<(Y)e&n`R26(bPER|c~r z|8km^whgbU(i||?CP2*x&%+AHA~#tJuV(8vE)@8pz!9(;R9gWM)9HN8%D9sc)}>KACrr-7Y-<%ve^lm{s#UJp%r<&y5eNAF3T~Dlnp^TMPmlRS zX&kMF;X$Ucs#njS|1Bc2(eqE&1*;*6(e5*BrE{_Y(r$qF4Yl_N5B$F08hU=FqQIwY zi_#L?-8SWdzX&=m7&C8bSLKVlbyWp4QT!#{@z7;+eG$BcQLGs%ZCF{JKgtK4_0b5s~$|7s4w)X;1K)U+gWiX+i!; zo+C$(tUW`EC`6+m;@JJqZriqpnV!zJpVdPXOVppx&gVO{Zu&!65=K4{lO<$|*Bf8o zJygxS{ZCeQHkV=RSzl8rxz0C*vf<0$>i5a;V14?El3E7?wbG3;O7m)*2L}fYpjHjG zvsV&FRyE%=+~hB$TJYiE-2WBv)Z5T(x!O?^Jj5tTV3 z5zmykkFK1q3?$aT=;KpzaY;!9+(28tnHKVECI2g{wst-~x1x76p#rvJiO_5XOd4xy znn5JeQqqfRqSq>qo3(oL^!=1C=by|^IdntFyQ9qKo$P#rw2=3=KEr1#o;PXP{sJ2! z^o`h>Gjt$g$sRNgqW%{g=I#kCe9BSS4KWU*;EcRIWc0p>vF&*tF-^g7U>x^MeFSv~ML_1f9dQ7@T zG1y|k`)QYr&EwudOg#)x`E~9o9Xadzh z`;3g$Oq;_Zo4=Yiv5QG4@=V|ggy_X@oe23@>sx`~iBO*5;kW;6C%O`J{!JH696^H8 zA`>KnWz)(QCVErAoC|w5u%}A6%8mm(JyAgG1kfdJ3gQ{ihTg}Wc2beTX#x`kSFmj@ zXv+pXe;(D-qYj=silQoe`SOD+V|A472iB0^M9;@~TU5vPJ8Q z`{T98SQIALa+uhAj+@+S4Cd%_Sv8@pX3;2W{kk^Ou4YC_lXnJu2!RD!X&EF=kLKmSFqd$MX2?Xi5 zju#*LT+3}4F1l-LYjwxXO1?EM>i7!jUA}U~o9^(TL*FeoZ9$obfRVWU>reqDrKEf+ z7Op)zN+~u(EN|xq0ipcvqK*yOo`h;1kM;=UlH7)4a2EXb0&w9{0A=*(f|r)==bI!8 zaGTXNLY_3Et%mRv;_vToS+)1A9YlKxGzG|aadL7xXGP+0SfN>~!p`hrn=9gBKFQ*Fm3gR z4<8DsuPErG=MO9ctx8y1=cqswWv>hZ&S((jIKgjip6u9dlCP5S1`yAtE5grD8=~fI zx?oC9y`V_fjZW?2b*`1EZxw?yv~D3fBCY{4Cbkb}vr$_lt5Y_S$I`t{#KbGXI6f3tW8c#T$Dg7t^9&^rIgSTpSNZM~~cfb#Zbs zd;QD6nu3zX;6gprlooqPn^<ST5q~pXhZ@g#orzuSEq55jC`>@(GkMjy^le}@5?djXxRE6;%djvohscMQywA( z;}ysMWBbU7?NbdnHsPI@=qh?yD~jMjIPll51*@4Be*D${l2NX%E|{}^G^5;9R#vuV z99s8BQI39>)ij(-lriR}btD|iIc*|S1qjRvaCy6{m9)t810gxH2v~Bc56%1rmF?` zZD}dz8rq6%jIe2_T$MpHT4XNho)wG@AYUqmTVq`5LHrC&g!u87Em{&f&~dKPZ=E_U}l zjUoSNq*eKKBqGv*s-m>*=sX#S<8z_=VjSR`8Mdxk&hr73NZs>G$Zp*J5@4m(H8o86 zCQwQ|eSB;k>{C`J)QLYFo5UqySG1BKuGY&V*=Htyjm_L5>^>`=4LZ`-zQ z2>8d&(TT*c9a?nSeY-}XMc)4uAt-hy64rCsA?TEVs?b9s)?O`C8f=>*YMXw4nX{sC z8S<4z`*M}YhBl62_^aMGvz4nRW*ZDr8iNx}a8h8|nFAv4buyOW$^RFk6k&;1A+e)T>HlYWb$)G8<4SRh`GW z^BZ-$0koS$%bjeRdIOplIv%1aRea)MsZ{3~p`Ij(I<88)>7IrS@>-m+VxfgShv|d5 zO^fTJ$B;KOhiuUL9Qu6YB$q4Vv>u65(#ffW2ubv@c(f2oaa9N2E&@5MsBeMJ3(^fA z8wbT$=7ub~GGy{LvpbFBA>gCC9bpkFIAh(@cfTARl(WS&bKgHc!*W7!P@4c^btu6F72DaSW&m4}5sS~%R`h%RlmiWcuJrQ-)4|eXO6GNE#MMjQ_M2>jX ztIg_&uw~)_Km$r`%L)>Iqw1SX45^mRL!L*5f`x!Bybyl9CmoH!5gH^`g z#Zy|Bkiz|A3H%S#XV=+BqXg|D;p^A0mZL8zlz?JRiBX|+bo1K=coqiqnklTliX}L= z|6FMQTIJxI<2pG8E%e^he9h5L@HF_xLds8sLW%C1Y>Hyq1vqX>mxn_6VMuy)PEGM! zZ}+y=)U4qKv{w?Xoy|T9OY4L%41XVQwvccvkYE8!3k}O35FVTDYi&AJgnz@^wfz zejLIa-6)N9WpCa*XF4ev930%P#Mhp#p=R0(Qajp`4h^*^&8|~7Fkp2)2;r9!iOe4- zKEPVsuBn@Dj?e~NtL&PDiKIoOwdGhdNV<0n5KBe~xnEA}F_e4IG_;?83*-h46U zo<(p9ymUNdg{Go@+qu=YVl;hIoSI|BOuOwSdOF8D7Q8H+gv{**KL9$XIjq37E%_a&Jh^Jgq>Gjsx5+&wl|hK%v-83 zK!y`pn|erBCB1}9#xObzYnY12A{0Di1Ax(Z}l9ravck%J{wd>3?9I?=Tu zr(m|482dl1Hc0sea4^Ey064(^7$i*I=--)Ol_tN0`Jdz5#)+unf!!tZF2$+IA3i64 zFf#ji+bl9t@yuh#m~$g+{!PP$Sz0R7*A8{{x$2fBAA+>EZcNrROgI7PU;z8H_cDxF zif~7!9-e`-F2x1qKbdO+dno~Np0u0j%@6{o)mhm4njI%g8N>)tQegur{Z@XzOYwXG zzs_v?+85t6(7JXdvVPZ1XAsoJtrbSY5F({$^XB>i$2-;jXI8#HW1FkvY~(dD`uy4* z`oes+01QinzQ!m<7-ib>Ow^3jI*;A2J`-4Q;>tCKGF>gQu`0D!?AHO%)9GL^6;oW^ z$5s+`E9N?1YnpN0xneD|Dyw3s0dVaVzR1ixpD<4JEnZ)HW(@&F;N`9zJJ29!^`}E~ zO(1u>43E9T#8~|orM&W*N+R>t&RjU^Fu?ol*)tn-aZ~U`U2k+e>o-sjh1S zCrJmfC$D}8G5{Dg1F~3Gov`D5>58^K(cCsS&m=IE_u~3ue+OEYX%1VpP9N;TKNcB8 zZ5^6?fvgeLA5&%6(fT0vEB2_<1zjs1Ifid78xOb zT@VzNa6v*q7(GHpOG}$})uX(;T&^3*Wi&Pu?O;}Er?{sbNvoiNsXA=Mq&<+5)1Vpf z5?c}AMA10z#qR&(c>&ufV6_ZtFX2|;H$f!|jnx{;6r9jZA!!vf%h)w8Hugf$*FZr- ztF=Y>r}uz5y2rvBOFBfNB0OfbE@SK+y$vWuwgBRFP$bN~z2n%VyO`9fOw zb6S4ih>RV|wSO!m%7Wg0baGQs&F`Vz+5YT-27DnEE_6wmnKI?8y_42pp?oiIX8Ft# zl>&`Hy7fa%4<9_R1_5!@HSPpd93@m#OXo8)g(W9e=^F2ZBn*GowLi44)H#gM zlUfQ(dx_RCq%O7TZ2p1tiPfDXHV@e=pVp(4D}f58wEFVQ9+Yj{O8DLE1x3RNZJyuS z44%zi?c5nt$gaJP{A&mkq^Zl=^bK$dY9ot9NKqj`Wg)1Ma!a$K_{RdQWAAuI6lUrocUJW`t!AT1l|xqAgQXZkO|$T3x-z{{R4tnQi`~gPzpKZ zFv9ocmR=fMDbiC{#j@83fIFj2h;Fc^V zC6#At%>j2?Hax+B^JU9J$2p;twv^agWrOAJT|LLbaslpPE}&~t_JqX5Pl8xp`oI5v zI|n(Oj)9?goR*EP5w^E?1Um$T3c-QOHtytr+3p7Cy{5lMbMGRiSi8G*?&A2VSIX#k z5%PuKT?oF0UhMR!6zZ1MXNDe<37dCo>E20a8z^M2guoMO+FL{!TCYdA4FVV#7>K_I z4@WpQ=n530dsa`-#?9d72f?8x-Cu5XZvO>VSNm zNcM=2ImuzqBD?cA-ZZII1;2Qq1C7^&@UvnAzBUO%tm(0=Jx9;NQkSZlTf_YO*yO_C z+Im+{F9fzgfe%9jW`L)mcMTjH-J;q7m(l>=^YMljIs8Bl8C(i{B56|l7q73~Sht?= z-XnH7QXjja2Z=`l3n7W4Cz-8R4_mvJL03)^SRlb%Ot|F>RT#xUUfZw8a1uBCc=rO&ZV2?V;DM8bR z&=NhDznSclRx~RoFMk(et_ds?_tHxjR1Y6Mtb^{_cUxByLn&}x`>+4(l0csxRyhs` z&zuOC3ys37slgC|L6Ci64PYn8J|FbqxYhdVYIeIm`-DCF_g^6R1^f)>k;^VX$vThT zgTIZxMg-dPR;Y!cXtRXEdBo|oWS$MDdiK4~r@`tUKQ0oQ&Ml{TX_sK3;}wkR4wE%I z;rH_N_U3~BE34Ka;sc~pE5r#DYl0PnRB{6wZ~aE(x;M)J{k10g{r$AQf_I=9N>PQqcNz^RQr%yRBH)FTdQDC7%gmr||j98#a>4{aZX_k0}dG(%Sjd!AXd}pB? z4?&ycf<#4_QhA`qa!gN8)1N;5p4h?hT{#Mr`Rr{147+3;VHYt*m`g(MA*8l|?k7Gz z+~|Q2hYo5?Fx+;&s|$+4ubC`zn5MfZaG->U1C?P)94OO6U3zt!TKBIt6O%zulFn%q z$-_cAH^bGz+kXTuSrm+0lYOOWxdBXsFNYWzfjHTUZXBd)KI4ASQZ(7yGPFcsG7s!l zLcJ*4+;GGJNP_w~(o>iX+{Z-yko`JZ1%w~D$aY*cDfAWLjV3HEzP?JhJ*HHxwm0@V zE@fzPq63p?H`#|a^(FV!9Wqu@5rRqN%!)su9W>*RlEZ2M@R%rKAZ4Vf7dGpL!$O6B6a%nMp3*x_z-k|AzSr>SC!*L4 zwxf8bd7@%KGa|Z4uxz)csqy$~u0pKN)u|#47j{;?b?a7uix7#7-7?dH0{52AnU2t> zsIp&MvN$1lUq(em1)d$n1gY@UjBH1v9$kg`FzUC<)uGEzYAQb`gYcKOiz74=s0G?rB7=;D70|^Jc#4Uq@!`B|V3tL_gR%e`KfLkC#P~wIbYwbms$*Oul&yO}|C+yu z!MjAsFt+U^ZbeV%E{w#1P$=Q>E{bar28npd(2{1t(JMn?UWSpbkwgE}B;Ef%?LYs8 zE9k#?nXu@%B`ag{2a{)C1Ilm+h5}r0!)`?nW_5AjfzKaZF9pXrOqlgi*=7vOg&gLl z3|;Y_izZ*4Fpt%GjV}6D2KpsLXERrbn0$+p`u_W0QVBon*3T5>mhW=w&ylD8_rJX7 z?CRg>42BEX5k5ai@$zaqgrkf2X_Y&5>J*V2K>%Z|{F~5CU%@Dmf~@Roq zA&n#gcplR2fZ-%@Y3U9S?i7rkC==aUC_ovIC2#tp88iow23bP^Rf&tssr3Pb2E66d z_yQVMg*^)h$d%qnmjqmkm@Inbd8WO|=mlMaon?ANd6apG$fGutgo|DVYQq`TlKXEC z9X@P>c=s?t(N;nYBrGUk&=`)Dg_#Fd`u43En6ee|<2RA7UtM&B+5dV1Arg<^mbXY`;$ICP)#c&LtY8-IzyXxMMJ zoI+8r`-K}^(aGh+z7%YYrpja3;|hU;p#2WW^d&1WgZxMW8L)Pv0isS^CS>%jZa9j{ zXJV-^50yv%*qi?}rtE-=o)fl{D}*R3Wax^b6*!26X`@loAemNx` zL*4dhWQ`af{z|n~bq0x6J%Rf9&qQSuPwOd zype3F@C(BSt+-8(EW#p+E=?bmVz@ab6qxc$Wrh;z<%bt>fg~MUWuibH(F4y`ok8yq z@%EV?)d~|l=g-TyuCzqX75A@8A#1mtpl2n~*48HG+%T082Y8fg+N=8T&jYd--!9uQ z6{BDKzkE3_GhENC=d{Yy(fgMgM10OK?fq$mgJ$sl(Vi7dLCdANn19<%O4^R5u#w|2 zVXLt(>==@eIN42$R{aeW^j1D>&xKUCG1ZxG=mW}|pNi}Rcbx~6iGye0m=f6L= z<)r+27p*VW+HMRZwXLHOC`WiUnu~HFfw#bqk^_b__~XY}Ha2Ns`KM2w$Y36!En8n2 zz3xI2B7V^}#s)FNT7gzNsYqi8SM8~9F8_A}lY>(!Hv5U`Z1hct!3BX6kRcu*Z=4}! z3<(E{|BDy8Ubi5M*wk}%5Kg{9pK;ifh!H^i6EU4fe)Q-F;LqLz2NbY7){uTu;NK)B zT%|is^x-QoVwbAJyy{4WW)aM7!+9pK;u5Knn5`uAPPD0zUWvIs3`%H#4G~k{z&jWW zj~CgPh8p}a)XEYOj!{k}h?MPc!iDFtsbqZ^%;F*DpGJ!Lt*q0b3qj(%Nq{9VA*4f? zOhn-S5rsN9(v_P6K%1PInQ6J2w9i#5;G1`U1x8fPh=GOe-M=5R#^teE?QLvuv7v>d z@J8}t>A$@I>guUzz!yzO^$QMzU9^&=8>!pF~C%RqXU2Y{f zlvqt;V+S_Q8V0j)L3>#oM8noZKNuk52{elw)E1I4QmH`#pXh0#>``MN)d;)InyMa3Th_sW(VC;<;VCJ$NQ;wvI*~GjA zBM(o?Y6{y4W;Un=1e=ePL&2$Pb;6bzwvA+8`DHWLTlY1z_)8>gGq_g%M@KOTPo zq7vZP1Ndd9f||IsGee&<@u^NJS7t?tyl1jrzjojtQ!J z;JSz$^G+?34QGW{`7UI{e6GNzEvmffi>8dyg%Gac;SmfzJb3V+X5G;j4I3;nF^SQZ zqBM$QZd&!3sG?!Kw@{UI3kQS(^S`qq@0)KKph-cwQg<{J_M8bz#=(#vNR0LRhYwc) z)-U2h{awTodtm=iY*z5c+#HI5ph;)eHFO7CA$vL!_D?{i7gq?=Gtps!O_6X-2G;r# zH0=Myi0Un!swNEH6Ve?aS|e45x4-FygilaeoIFXp9t#iRX%P@sq%hZ+Bn&-OwZJ@D z?vB~U+Ole%aqxTrtVcMkudalHtdF`=Xf~mS2LuKtiefp+w6CRV?*XLo=1k00{CXBfI7sN4QRT32<*H13s3NK#?cP{ga_1#t!;PftG( z_*p z_}YF94Sh#@={j~ru^SlW2=c+>tEb;VBsqBWD7}|d0PX28UW-2fN5~)8jtu>AbT#P9 zmoK;BwR_)>*s6dl;7Id+_?-q@d1RC14T4##60=~qxQ#~LSth1GNS2nCZlfgarA_qQ zO~I=E7H#qAsc%g%_yWt8gc>kq6g&+fJ1Vq+tN5MBZNF@;VGEep$j zq_2m$xm++8nPXyp4Uz(h8wwu;K@zBs@amOBx0S5jiLiq)lD+-DSfVVM9%PqeFrqby zX+vC89TaK`Fd8bpen8aiiy9 zlXTC4mGK>1ByAHSBt2!$#|i(}`z*pU{}WldavUBrH$pV(8b{k~3i`?^9v%wB@&{t+ z+552)cOaFKxM5ur_s>wR1mS#83^zt9Dd%BYC7|Wvz1QRH$zw~?1IjqiQ~4PgY_Ota z$oCK8;Rxq-D)+;OO+te0cys&RANL&{Q`Wt@47FAc4LIIk9nZHIZ9nI(i(&6X+ow-c zlrp{}m0yGMF@^scP|ph?WWyjaXa~Ey?*wi1K`$_|X9z=KOXyENy2wES{f_*k%Tjcu z{l)?NK3!s0D(EL~#3m$ER9AayNqmBa%YF?I$FRe5owp;Sp*#B&e90pkIM2|F@*l8_{+4fgi%7^wdvD` zH4gJ5XLfvR-8tYjA0!_)&hC#IN(1pSKGPV--f(-2Vd01@Ba z-bmpVd26ZbW?9$FG0T(5uci^AKbzzB_4M>S(gpT_h`CI?I&_OT=!_Dj zv;pGe@yap@hNU>kAFx@5@~m&V1K<3F_n?`25d%gSL3_GPU@m(I`Q}|$7ZuvmZ7z?K zb>i!583^Nkjxq4l{h1%0HFJ-JCpmD-sTSY(qKP1mzu0oG=KJ^WswWRPZ*DlN7g}D^ zRzc0mb=aecW{^3cKgmTG7h$A?oQRklw3W@Rt>ud~+j0)fFz!uGe&)OzlTu5?sEo>Q z?(76v)7IDbK>Q~4m}2Xu=H@=2aDB#X5B4E>eLu2KoNiH_7)yLj%!S&TnoJ@D92~yG zZdC>R=S!3WMz>Vc70SXD7YX_5hKB0^gzvM6*L6$NYx??VxVgFazN;mo5II$K*ZYsg zT9sE%`~6K%|7UF=8^tSB2wdig`{+|3bB2T_}!=cP9kBUFwV74Yt^Cn98Lz>PNwcoa;O<)b?USzH>=@4v0By)8+8 zFGgiW=?(=21)be_^T}?ww}FXosHjjPT{J=8)TY{_5F?t)xSb^Kpl7~We>?xN*M=|6 zZCXpeq_ZwWgBOU`N=Qlu#PcPJpZ1G_g!vQjJ~uL&Nk$o5dffIgF^6~LZy1a&VSwQL zyxYTv+tFuy22AoA?`|tI&n`Q8SV6wDE{oo3QB_5Sk(zky0=gnGD6Og{Y8n&w|v79@{4!^V$dRc}K zUqKVa6LOBggY1sG(EgICeGIPccDAS#v#P5BC_{0&Gu#OW6j~NyTPTdQe_Y&Jm0jbHdsm?c8o#CMISrKX<8yj9#F*-2sfrXxwV}WHGvgn8j%c1X6iwDVlxP^S`Ph7`rUhAi`}+cMU|#|+z3uNmxmAE&mS=SD>g#vZ=nPmU z4<)9v$164GCa#v>j-=*9JMzbX{N`^B+ zh^N!6tfZg|@3YXjJjKCrh$DDER&X0hn#-5N&ypFo%R8IR4~BgtPTg zlH}z171ho#mm1Rn;wt;}^(_!XO$51;`P z3lAsf6&SkyI&|m({0mNKuA{rghNqC6jz#LM1lD|1?)@uB(0S#EmeaKRJvnEr*4c9{ zwiBACWhv|A1N5)Ry_kI0-hLD-;o;tB>U@P873UFt2G?A+edI5a3*QP@|BT8> z6%NZLQlnz|zbYq_y@LauSo2xD7PCGczz;uPCKOS8Gys8*nd3;q1k`slVpNCyA|iay zH$Q#uTv}x$+vAl?GGezv?r38>sD>xf1mCwP6L7G6eIZoQs$*qk>^Tv;iLiEi3>Ezej`YEOABExC@|{GoRrA#alQ`YBo#d1K4S1a9#0^SAT5D&Y7843%RF>=M9WZg=wD zyUC#~rNZnV7V&^7PDorgw1WkMMh6BSf-{XOeqoZYpI2`~C4MD9Nk5fLG7Fj~R) zj6g2)`*!`V;-8^!v!jFMt_wQ=2H42VfrF?@>&C!?`2(UM1QBrIeHN6zn5d|zE)y~u z!6#^zzJ2#@8!Bo0ui1~c$5v%*T4_lL-eBT@+=&yxTo<|jiO3x#rK^uJ%2txQX8!V< z8hDbQpa1^n*Dii~8X6k$S|;gzf2^4?JpxC)SC>MMC)2#XK>jG1!b9=;)%WU^mMCpX z$=&mQ(PbWZ#6UpI@JH&MrPwC#`P9V3-FQ}rl5#sld~}y@jdNGJ{O1iaxNB(m6k>`F zgv`u0Zw|h~^LVuJD4ZZ}6Dsuf9;K$TNH;vFoCFsHc1`-2n{Csjhepx(q;lh!lN008Ug(l#IQ_B>0j+wb) zIMUSzyMER?zSL=%1K9QUk#|2`H5mgV_WVp*1Q!PfvdD2Z#y%03{dkJqb- z)}+IDzSAky;1PeU7oaXJEMeN67}a2|*+)N!M&tv+5#^-)gJ5lV$_G=c8h(hg+tAQp zID28i>L`&EM=16vj2tlglDQ4lFwJ(yj;+#8Jm8NYbzh3@GHNJZK*c$P195{O7ciRr zjY^K}{{ejLmZm1@@#DwqLb&$9+Vvjr*6;aq5)$}RPvEIKvxXv(PXMJo@Zo({&Ej;2 zN~YfLo0^$X8Ms5c23U2 zLzXa{#Gf}CfqpxM%uho8&r%I63XlF)#FN!zW#sCdzZ6s^KtSX!^!d?Xh$$NP#L&rZ~lY>WZq>P7!g-xMlu$=@GSj;67GN=9A zp4>Jw^99v<+ueN}W&Z5BbJq+FnULrFUcTG`5b~k2@-z!eYP*kTT3XusBGlEdFijoI z!%4)GK;3+O$F@_kV0j=-J$)I96OLoh=F%=XemHM-*I)kt{WxK8(LeBl$Di15Wfx&NAt6H zZcpEe14qZoG9pa^v2Xv4v_IYD!2i7r2#AK{sV$!<7(^c+uC1#ZXBnD&N&Cv!6@2C{ z2PGIa1p96HOf<{YM>8{)(Q92&!Odt3u&8A1Ks>%h{x%=1A%|Vz6dpw)ssnBcga`nM zHqADYDLgLXHkd4c3GTe)a0r#GbtPszfFXHI~e`tAY0Nq7ZmMWjc+fmBH<& zNlA?00{h^(#dpWRp=dapnz9hb=3KT*vZS>1+wialw5NBSos*@Ab^8x(h(mYA9Fjq z8%S6R4W3Mo5mzt{on}LE~w3 zNeMM3pOiwfc|=4`VqO_i*8_Pb_v~y=yu&EW)r9&5RvM|C79S@iC1o0p=gtAo|MBAo zk(gji%FNDwNN5q()o7yH0F+O3jF&kriUyQvztkIzFH;qdmHsL2|{_Z$A>C=91 zKYzY8`it9IJ9U_ah#*R~--HTTImpMfzs#F}$1t=YExd(96cGPEhcjb@Quwb@d!;%!K7R!U?#15q zN3?O&-e$;XO?S7+oti&BP5#MaqyIIFDG82r`b2 zTiXfhQr*w}@I^&LIYw1=`>|uaVq#*Jty!~^la6`7$c4wm%>9@ta7g)a-T!lQilnRF zXbQf8xqc5ofhyN>JG1J+f}Y=oPb2q?%eqm9r7_x{79uVG&D$r z2W);3_(!I5*F7!Yy?b|Z+XV5-qw?QUcl@*FzgFhAoxd_m@l)|6(*C#nLt0FAKfV}b zjys=<>Vajzo_{{S`AbYS>#*mySXf@A6VQKrm#IViw(s82^53gye$~;$jQjV!Cyl%L z#Ho49{QC*x`v2EjHNQ@Noc#a#A1+q-{qrHF#b3%E55gT_pyWtGt)C~3#1I8!7hQ2` zK>}(GF00`0tJ=6;lij^$Sl=E0Zc{mN96~(5wt&_Mw;~+Ps=ERb< zXiTB(HYuXkI% z`qvP|JT||usvUAtEs4xYrB)PPaWZq9f^Nd=roP8fJ*reEt4#hUui=ZsDb#6+08Sc% zi|O6!ofHZ>c)Q8{gvFYeU#mAqW9UCO8uJ*ISQP*|S@ZA9um{ zYb+QwPTwMv(~+yD*sd8K11Y;CV0xT{bd$%lT*!}Up`@)bLuQ=We|xPz*wxM}S0+h1 z$is{wQwgr2IynpFnH?!8uzY55Us2uq^!4jq3UW`uuR@?3r>Y$cs0ExJ%pPlqgiHWw~sO@uD#vru3M(8 z(WscN+2&4UrMR#MZl7m2r+;FZYB6GMo3dnj*eL_OSk4iL&F{Dw~cpv(HECGec7`90D1c zi7NH3U*~dVOc)9)N|A>8(rlFg1Gs?J?(ycA4{B&?MxH&pd-v|GTer5AsJR`*WTE_{ zH~Xq>u1eT%REc$UmHg7nXf)_AjR$HG(#7|h2#Q;J0pH9X;%77BK3z2TQd7M_AEM7` zj2wA@(`dudJ?U_F|31}xmw}%vCg&3qj?;oUK2-8Qabmu%u142p1f60~nMk{d_$4nI zD(NJjd+C}GqPd1vH<5-UQ8$y7P%_+2uWm=^+C8!%w1h}rxQ_NUwKsT~@KKuqoQO_O z;!)ez)8N#C?C~y9-rZ-E^Q#faPC`58q_$*cv?!Lk@8brozZO4eTx{&t36=# zN?yozu0RXqTw-DxSDEujkF30fByixI!-#iy%19A66{Sg&Cf$7WsFUFST0d7UUKvx@ zw{v`)3#R1!_wD~WgYz?S*HdmcwUCw7z5f+IB(^i5ri_GvY9op2709Z7xu%B3nB})y zPEuE=U%v%S4>Ai?r{#1UIUnZMFI|A5BFm~$q~TjgKYunjCfL9_l`(87<;R-uf`z`k zh~0PqESPO-d8z8?XVtXLopf^k`;sWspPd)FVy|QgRMin0YS*71@?!gR>Zdj0hX+O+ZJzb@4@yoOw}ZBzc*6NSo)PrstSz~Jgq#p9Liss#7+Gi{J3 z9}L!$bJ{O`)$Q>e1dFL)13$b=iOV~MlIZBAcT#@Vjm9dsAdC{*(?>PWMW^t;-(+xx zPm>Vv^`C`vnSXrJc7e57G=2NLk~&8#ow{2|^bBA~TS(k%jA|7?!=i-?C5fgrqo7B3 z?zAKuO9fIASnL22arq*5vWEvD+E&f?m$7W4vWa$2zwghdP zO@FStyMNdDT$h{ce;J2f6aR^eV+!dFaZ_nuzmF!g9T40y=BUV26)_IX-&+WZ* z_+W%;_i^A?z!tjV*_dn)NM>98HM6BfPut(W8XYygQe;a+wG$&N-K5-aJ6>P3?#ho_ z^BX)jFZACd=b8AqJ5wWjU0&W!y7%y*Q4qC5cLGWs>RO*^7*mdYnX*kAk9UxgQ^rXcue{EmTm#i6tZ}vUDi8pk=%jdh#c2?6{Ohu^-J;w_ z2nA&G=aC~l1;$Fy*uGqV;8dS&>fHLQ?3ga9@f50P_-X+o?p{X5ZIazFtL`029@V?s zRBl(u0L7S|mn9Md@TdFXZ*syclF%57q^@vRZemNu(2hW2bMryWA`2B$xvitlS{9l_ zdI-=QAZSMV{oA)?$=h}ss>lu2wsg80n}n)ucYvuhFvibRq+fFwavfZ-h4B1aLr9rL zT2Qcoc3QQ-p*9y-mYVh?H4S93yF!R*|LE#v9ZeI$6q?ApRr#iq=RjnuSOL!moFcH4stu!tJE-Z z&MfE$<^D9b_X(=AJ68VYg_E>KR#kLE!*ywkmn7>wa!yeyqGL90)11{;8xeM>Og5c4 zT3!7Z-#<$lrlhnZ&MGBtY!N3^T%m=frO-mneJ2jFtPUWekis)s0)s3P=#9GgOe}oQ z!*n)g!G5^+;KBaMn?B5?dBC=YogBet$mR**zGpADBqTkcs3~rx9MP1M>+V*xSs7KT z`#s+^u5@lNf%2Xnqbz1q-MyWVm6_>-jFS$U4X5x^*_;@mHW)r&`p!cT1D-v7nuP?y z3=-|uBl}pLS+a7R$M+4gI&4{8-JE5AW`VT#NxE{Sv$Po@a%I|wSAK`#Al?dm)J?Jl zq8b_CpPSRB-`4)e&%tSpR;>a(88&%xAiTsun_qM+LQJ@=q&{iVLfE~QJ$IFtbAHI* zXQ8TBTU)+3p?t`K6C*BdXQ86!nLJ6%T{u3i@tw=|*Xyk!KHU3r2qxh)42p$l2N?v? zl{0HnKjTh^3==V5W7X6K6XXz0>>QPt_qHnU@#DXq#~!9^XiWqU^e_X1A^4|ai+OMZ zOfQ&2iT&;S_j3tyBUo{}fKteEZ6($-dFJYLRvKq>h#d&6q&Jv~yPMlWGFBT%H-_XW z_KIkbixv>@v2c*8Y|`6%FJFedfBzn!Lf>98$geVOTsF7v`0OU(XKXGIz`veMEygpU zoc$h&20=?Nw#XURqWoFYljl!NYZ!F;`phmnetvoGn9frN)U+m;FmvqLtC#ZeQ!vCj zn&_N90i_|B79>g2e6w`&n|)#u)X3>Rgp?4yFG+~v`q zWWvTlai{I0!>so=JS>&{`_&EoBHExequP&Y9}WG*dfkj%~e{GLEoWFjOJ zCdXzTNzCm$K2PktZ&%_CE(7eH1AY5JM}8`>di2zxecLc@u zKRH`bj*Jc#+`9?n$Na&`^OM1Yy#RI~9?&mvC2rs1c#1V0_-Tuq_F)6)f#@hz-I%jS zSC+0tdh((@1}tA`Z+^fjrQV{y)`#ev>qOIkKX~vPA{C2*(3lhmy%hEPTdmEtZMC$t zR`ggjI^g-p=8}@yOlsn{V7tgzzqC?OR_^jMw-WvbuUN zkt(^ntK%J-bInpHAZ>5}lO|8TT~t&gMXX9Fhe3lZZGF9BkDT=}m-nXTTuxv|&j={5IpJAd!ji9$z$&D%l#UKNg= z0eKIov>oYqnd}tNiRlVQb^>Qadj!N67qkqcDCa|VD4o6HNiDH=l=W@bWPE`2%Ny5k zX`PucUK~zw@tX;{Z^s(}XzyYv=GU8=ndLQDg`2kW>~6HFdSyN`(?^evKl(&B+&?&Y z5i>z-2%9sVY%%ay55$l{3*7o45Y2&C`bw176t(EQAduifh!Cl|?o^f~I; z`_Ak#oC%n9Cn*abc;J>_g@C&-8o8$j4eYD!*%$khiiGK_k`iwq`d2jC8eE-+{}U8} z5zBEG{eyn@1gE+JtCi34m}!}&7n0Uv2B z`~6;e`cz%rmZZc1+02A!j}MYmWNi>0XthxF`b`*3l0@-J-{r84-{qrsr!Lm)KajM$ ztq5Mo1t}yPgvl31xMFss-C4IE)&zaC_4TJ=k2)jw6RsZnZAJF_D@jSWh}d~LZdAb^ z(^dejC~BS~k^dzHb;|RjyT9kB0izJoNTQcQ%>EId;|(%ML<4Ln(C^1oe$Vh|Jp}RAjqqS(pQld&49cDvqTl& zTbQ!V$|eZx6-=%xx(J-Y7E8BOsfl`z?hYzlEnBvH1%=I;#<3-AXC2gAb`tJiL<$Z1DcE7+QhB%DGSl~HgrL3H4Q zMDNp6p_qJk_L0W;7|JNLtmv?GFtEf+I7lnW5HQfXIHqaeq1X0@7EE*|@Ka!<^YoTR zY=wm2)2c&{WkBrTX!`etK!gvXFw^#a%Fpk1!PPw~Cs{|e;VFBkq-cwIBBo203{X;P zNua(RRwoHZoY65lm{bw57YrZhx;i0R9s&v!EKHL)i(9PgZo%6Enr;ptWY~m6 zxNh4C!~-3mE~awaJ|k$+UtN7kw1#a#VpLwu{~B_7ip5qW!x%GhBF1a5z%n6eo*iM% zt`?w`U<$_MJ98t$L1Vp@1qtpoXNhM07WAj`JW<>{D)z1H_mJPeS@a(@{fp6^v!ivY ztX`%TB%U7MDs+3`!!{jtl%T44B&>$Y89+8-i6g+BE(O2=J!2{xwZO z##gTo_~2)oU7Gdv#=`gCc2WN%D0o#~z6T`eR(5t*1iz;X#P&CNFW?}%!ln5?jqSgf z_vst8iXXM~f*&xwZpNB5LkKuzbqpf?*(|?(SwfQV!mYTYtPTcB=yrJ5!A4h-P}BS%keTX4%D=PHCks{df z1LjdAlj%>{7eIZnieqkOZC%}9X*|Pps=s_W4VvB7=KlTrLUvUIs}Qt$cr*>~17nMj zI}0Z)clb<1#9!gzgXlb)ZF>0l@wvFMT9S=!T^@6o^(10XIs|le;hGJQ3560DIWOGq z#m=*Ba8*MYu2FDq`JdCilLyJ_U^;u>*VU!)HiO{;vJ|ees7b}2`@pgy6EIv>hfqjN z1eg$lE>36Z!sW~P-!w0LFD4TbL8WYHZ9JsNTA;LS4t_$$oS?*rxD&){{}YP%`iG^d z>1=Lg2*Jf&U!1w|md+Y3kge zwbIE+b;it@*|;J!`gafm-+TT%NXo)?*j*Ca?&$djI-tEPzoD>dcp6Y~o0wBGkh*ns zZSARMw%*p%veD@c%OzaV9%}Y-d7ysBr7S9Or!U2Zeel~*EZt2qBiFk6}g;n0@vp6O9>AVoFU}+)Gze|_ghW@pL{N6JG%GPCN_SW{) zkO}1$qOLo^wwWRm%Caht`1m-yjnMPD{gOCvS`x-2cI;@#6$7MC6cEnNEoNUyWyH1m zl~JB+s7-md4nBT7#{VZc;}B{5>WuCIArYBB@1ye^B4r7&luHiSW3mM{_VQ;WpX9xU z-^@J++7RAk{3t=(G@v?4KmS@squ&h`E)r2KZ^V!09M&7+w+QdiRuVclHXa-n@PaN< z1v2AljeXQ?{X1V<*6}Vtwl$d8Djwcgd3zfhJv8_uC-k!~IGnuMfxmJCMze(?Y8PoU zMB3eFPg>r9A8>oK`}(K%@6Rb&J408uvm|=0(wu!1rW}@-rr4R9sdCH|*qwSxU1fDZ zA|AV^q>i|%b$4M*4e9!wm1Q^fYSnq4b%^`8!rrd!zo|>Wqn?4gbHCK8D^_pp)2-Wd zT{)RD&@M5*$<)Z^0=DJO<`F$q3tTpWqta z+x6k|g>}o|&#KfUxmF%NH1o-5H&FMHGW~J=5mr*kTWwPqLniiGy1G=WdjIJZCYAq(7uTTu^-4L>GoL*D?gXL@~iVp<-6|L^ea$$X7c#* zjhs{wD5V-NT-d)h7AIwQDc00}K}hic$IEm*y%ymlKDbUVlSy_EjE{*Do=t<)D7GIMG8_uTEKFYHGN}al}yLZRr#=q%3aADDEY?ji08wt=^fu zAUh#5?4w86jn6&^pmvo+&d(_cu=&Y=Ek9DWj3q60Z+N+BWyR&ZHXTQ`RgjZg{poRE z;#L7~NM(FW&e4sWk=vskoLO}-m+|#Sa-|M3{aicF`6(|ef$<#A7?1z&l5ObmTWG~v z9N=FP@QU}RZ}vvjv*utjbc>oY-o89S6-mf60A~;Kt_#?93sI@@4DE!BdE&$lMWWoZQoBM?^-I0;BjJI~I9icTmR&%DQ5x`6x6NifoiZm+D(>`n3&l`ut|bf%<#>{$^%uM8Wv#Wsagz`!qVc zS~C5utgI4jkgAyG_5JQ>m3;>e43-35m|(M%sT;EXrN4D`X(}1iwWLX@YdhBug~x#5 z&W@jDX9RXSM0yKJ-f6ymeW5NoGFeBh=G9Cv)PxVhM~b&6*r;S$x)Tuw*bqt^R{3<+ zZMs^9s=j}zm%T0lL?pD%mR`*zk0%lKHZC_7&(fMPpEJF~{f>%x@&;uYx$sW8VC@I< zs*DvZznJg8a6k90{9adahOXF##}72|`@UjC{w9w^xuCe3sr!Dd`wgdrbLT=aUU}a? ziyC=_K}mgRiObOFLH+bBEVo(2{LH3SG*2hB_H2c0faCSL!=IK^%>2aJj1RL5nI6(G z&7-&<`lg*Trhrz{20iL})6a(Sv{EvGbttEsxHp`@+IoafT!X{h#=P&Lw_1hKzPn>| z6G-6XQc(vH`ga+w$f-n52%T-Hs=4z={`h$=S{vU#T~s`~*T9q5%Wk2bo@)l4BnRjs z1wzeEbMDgU*e^%ZVW4074y&(GhbMBbn&8RRHLvi*MsYp|4-N@kp*Y6%ZB=4&PiO0s_dWM5+L#pB zcl!B9ZMKf;Y+_$)^w!qEYJbW1xXi>APh7RERpV2W6@PcXT31sOj(zqE4pzXuKBhi0 zm*~;#V+S6Bu~qsReFAoqDQbrcp^Of+u&Sx4$vxdIIz?$)`77*t?z=AX^0RePx7-@v zrE=-?{C}Mi{BLimfh5TW_N%aSS>!n7-2Z8kme`E&=IA6r%X}~jmE`emg?QC)^y>{;w7^%_1X+KYTahUDq-rfI_?8gR3 z((KHUlx1PSw<`IS*@-IoH9#rTD=&05y0&hb=fUSG+fuT>6MH*v*r24-xKGXg)BS1slp3_DudLaXQQ`&k}xNr5>%S0>%qmk=wzA^3N*_jczk=K1WpRupHE*n~U3SDy3v{w;s z*=o;vu%%;CV6A%X_0c&)e@@k%7Bjc#TDDq*UDL(5hS<6p#02-PWW(cRY(?Hv^-z>* zynpnhb62%reRvDFJ1D%K8hc=?Vqds@0Y39OD?e%zOo#c{hXP4I>*hb*^Y#e%!1b*c z*()~rYp)nc=a=kxmR$ubu8ZgA#S`VcG(JMyg%%f4zv-<@x5&vGVcMyb4$mIqI6b!o z(LRI6jE~LyX&p~H`MDVAlCNj94QNsJa%;_jjID4xy~n3C{k(eaT7`z#mD2=KwezCX z_#~s!K5NtBuU&)sdfXcD)y(lCXv2Y?^DGMvUpry)wJb@68c&x;dQ9eAWVVeqi!hDq zSuaaX9Kw;& zR>r17`>7hLee-}TeB$yUlE*(6xoednx2LnR013shs;UEYYP$KY_;Q4}s-48-TK$qQ zfHqyU?7&TbJr1L&N)fl8b^H;|sI9i&jK3;WPRswf*0R>Ta$nR`>uxzNg~GX{kH#m zfi>}eEU?;m0IM)$Vyz??f!-y^o+v`~Id#a`bL(@?%ZB%2l!bt-bCsPJccI<;7Z-qM zF667jenPbubxdj5gQKdqtEo2vAx-;ct`g9`DgMWGqaltx@${e}EhPcKp-$Zw5q}R&xgJ$@ar*SalKtdE-Up{Rb6EI<<+egZtm^^Au{_o z6f3&sfvvNGw$q!II8Pbr7eQbJ`gyn z1?v~r)TNv0>3Yf`6=lgE)1ErU##eZBE_nE`BXE=CMT-0iriPj07UBg_TNP9>Bwr0r zpHE2eK67TM)F1P0zf|ycrcRYX#_s?ZFxP&vdvPUEXoAi6FICab9zYeeJV_GGpmK-f ztx^;Ti;ROpwrbNgw7dlgr9d@>e!=o!`Gyp#NC^8WAhv&pCRIa+1-g(+Y0^}r7w`Y$VT#@eFz1u<}Z}irGGp!?w zeqVF^t+qxZiI7=@HtmCgge-J$N`7CW^YD3(Pbr(1`Yk(nT*E6a?xL9XX?Q}k%e}s4 zvv%#!4F>22sCh1B(~FH8u@Q;V|H*xZgf*x*t3{4>_3+5Q?6|CLb@5sVnJpa>2F!f^ zKr_&C%6k%D+!iXtOA_P#t`&rM2giHLH;b5yMO$qepLKZ&n|qP0#IFXAibJ zA-S`Y)8k3`#lW-Pv)XW6;>OapxreFZAV`L%)i;zK8WXYW$D*ASY7b$EENMm>U|-_i zUQ<&upqdi676f}g0)>K)mM3*O_U(G^6OiA)0G*@CQBgvsYdn z9CdV}8f9Z{Cg4$+T9Kpb{kZ$8-HW&0q-Z&2gVl^9-wlosyf(C6K|gTUcP4$uEYmq~ zIsbB1`4$b{)J}o~IM>Fq8M%mMw3KNcno`MaY~CmI*}| z#V}|i3KX{!>S4=uKFr?sJ9FmD?JXDX>}_!k+2xjAK$xe`{eT-G_z1F>w)ETgU;C>2 zpr4-wj2BuzBeUYe#|=fBR#sll8`{xfzID>ux2s-zkEQ~uzrAG=VbfxYB1y@~Dr%mt z|6vQ9&a%cd3(BlW@X5$e^EUF9(p&V^?)8T0ffNuL2IlUOkQnq{SL-)0dN zrWX_h3XH^gOTCjAO}=M0QV4b;v#B1^fbam|=C+?(*&yi)=g$2NI4(-#om%?U36(eo4fY~a@Vpt;%{BL*uS`T z?09gnkRBfpGI-8mhzJPe?LNEWAdNPTzUixaf8EuqAB!*e9WpIi9|{i-FGvL08Nh)X zK)xmve2<=qv!8RMK6KA+O7Q7LMZxRVt?QoAqi4@q6lHJs*(3C|2M#p5=B8Rt*JExz zPy$mLTL2M9%+I9kTgDifP~vDrSPx~W*V!e%Tyfa)$jV`9^}=bbC+u7o4V6q2_www)5p_+7 z@3xhgh8Re^?YqI*`6Q%*-U=5BJ^|1QHBABEJ!M|c6tOpj%-1DorpcS@Y%P2Goj0qY zgz{Ltim{H?-RKX!L|kTuD&8kYx`t~RZ?wqTdYC}HRhAai(YTrX!h7>zU`!f*W>M0M zsXRGmBz(aqSg3OD_4cc=?)@tH0|d^vFGX#@!@FnuFbvW3224M|VUxC7_sX?_@H~im zE3kOy@3}txv`_Pp1&}k6`AgA_d0&@Kf2nme?yL8TRPEinX)T}8bOQ`9&RK2o9&Lx^k@Nb!@AUJ!t% zuYLsWcrz(Y4yA=gY;Yl}RrTC(xH%kAnZTFFM>Um1B`=yJCZY>+1>w$F0xBWhZN560 zoELQM`Q_1wWBS45LRc+}g-K6K+eu~e(q{AI8x3@;>!KDlaN@*?h=UGeTG>vz1%cNN z@{*hvySp(j7HQ9Wya&;(xxi$XoQh`#*pRL%PnvX;&I4-zwp!>C2J6<1fT~;;d|CeP z_SsiA>*$X6{NZ-t(xp=X7=A%PK?Rq~x^`jU+)=ULDdSR(n*rQE9-b#)_DPy)daFAt zVXGH$v@i(SLOiM(`fk|0d$TVjBt#&e@fUqEqEoJ*ad}!vb5%kVc2$NC4+j_U#jn8H zc_Iiu_`;La`i?H$Tt1jX5l*??rmxIM_&0;&OLMQ!F!Q79^ANQwZ1jF~dS?P#oxz}G zQ&r(GdLg1edN8F;*X`w#J%%p4oxy<^9r*w~THxN6@?{R=$z zE!k-46Lv%7VDg84kid9;J%7;As;iAu_oBc)j<7H2UN`lmhhAZ{6CC zU-90Ckq}-IAJ==^gAmn~t!4U~n3!PqdSWzv;1->Bz7!|#Bn>=CmUM{Nk6cSkEWZaG zi6TM7m(|`CWyF^Y`pwO6n5J6XkK!{U`ub7Vl4H;1l>MBa?-76B)6TPF6xAcPFI%wp`j-p$F-A=OqIAztg&=h&^-aQ?v;0c=t97}Y+-z!=1p5^t;azI!!C2tl|pifjrllIT~ z3v^^hk8t&-LsWp_s%V6SeiV&+deL4Urh481D3-p3an4gUfg|QmzFKN zmd`8CHpg{L^V*^0SJUw4%R0$2eQ~mZk1~U)2_PSjyjt>K2mH0;{m2*^QM>MI8?T?u zPhH6q_?4cYKQZ$>33dYfwbZmUvxy&R6uGhHH}7MM3%hJt6c;w-U5j_Jna*F=2AsU{ zZf{NF^;G#*P?}q+D8kzlRtljv{`#>)uPNc&Ne5qm70XXpI74 zszRr@KS3qz2yZ4c!j2n_69WKINqdm6ZISk9JrmDaXe#+oZ;ZgEN1}V^y|+jqWvJYj zai&wZjjq-GIX&5+=VrE5^7_|*sF=hZ|M>FbsoNo2Y`zz6*4(c!#aJM6qQZvuvca(( zX>t(h^NRGh5mIP4?iA9Qg>6AQPGkBh6ItUvxu}KjEAnEa@l|Ijn8ML~l&=M?%$O95 zvQY%WP>Xo5U)KAhGn_oRo#nR@n=7wlmt36bOBpwmb?WW{J6_Ux!9fb;gZ0D&;=L2) zU$qwMH45s^x(2tFcB4>XjA*b@IsG2Bx~O+{Z>v{k@ZiJ_dfpCTruL~LsVwz(O!-m0 zuWp>4b*Odx9s);wUopF!Q#EmBD@o_pUpq!cz__o&DeL*t`DXs$G4GP{50862Q(660 z(^BD~sV1vr-T1OO0V*CR?aJL>KpsI=|d;!et>0S1)p$Z1M~#pu9gwy!^V#F z)lNnA^=8NWC)V#nBJO_2cKF>m_z7h=FIyzLVNt{*QfT3l0@ME?EOcgGG>`K^#DD;2 zcCz#I(=>0}v2c@_uqD8eK$C`XjNA zcumJR58|9*K@Sg?c1(0udGDHRHZ?OnfVz*E=nV5mC~*_c0g#6TZCrk!QSC9`WcT9G zCtD!-yiACc`R^U~KgN#M;pNLHzC5n_P|ZJ_Fhk^dHLYidMIaK`pW3Fk6ja}aC*rf5_$hQNXDWTtG$nXB(wO2ev5 z+gWCS5Quuas5{>HQ9bA3v2nA6Tp|7}oJ_38CO00F*PIVA&5#>S2WBV+JfWNzVisDdgifS4iap+ee)`;`e71b>x_AQm=8LQyz1 zoWC*+%YjU+i6P^zS=DwfQRhG% z9-if%ymG$e6|^(`HR-@a-v{`TX1QgH(QNzke=$q~4y4@X<4A@+frL<8Rb`Ku*@tsg zLX|d`A>O5?fZ(~sO0l(7R!i{Tq+ ztW>UWRmo6K>KzV;VWzM9n>|Ax0I2m%%tXy}_E>=Ihc(Cb`!u-E84DL<^rxRJ@wqPu z#kf4X%K%A`vfg^9A?6LiG%w%6%*?-5iCGWmi~gNhxO6xlszwSZW*oSdr4R-TN;f>a z@D_?UFKOovf(LyUKV$B}Z5&W4uB^0s6rvqcS}#|^G=Wdc7e^WlzDP?KAQ(`ns_$HO zRPe##bafd?|01}*yq=gGF2C|0{qIx-JsbF3)huF+Xj2NEZINIXMGJ6Ez+~UrYVjsf|Fm)2Uxk3G)rVT!CVzdX4_! zykY<=ZWxa-%I66&%b;|m4K?yj3!Q@ZI?q>+ICbhMb7}vcJ$Cxe-<)R8E+BztvN={H zDb1iax}F)c$6_(xk>Io{n8-*Dj&U!&A(JLOEpdJ5z8`D#_WR(e^fuft-L^D7fXMb~ zi78l)n(tipt2_05i!WkYobLn7jf-Z=Nv&P8_RBA)jm$`^SZcEiE&#XJ|Ka=H?Y_Z(`Y z=%zzQ#i=J+bW@)91PFelmMGGX0(u27&gB;_>5`A3MLVRtNq_X6)KnY1xDdUL^+e+H zp4v}9Z8V`vlZ)cc=0^@I-H<>tk_~ivHy3GY5CzLLNvo?h3p40vG}^ogw=mM_Offk^wFf&yl0?|$7FqEzV6 z6K=O{j^isL&YYR!>yXm85o9Da(g*ouj?Lhb#?2}jE>C|Zi;=*&iKwgUIVabL4=rse zU3s>C$B|#qmifsp_z6sa>2TFF$fAcYtH8n$(uep6iu=xb& zeBd*!%%Ym_>%2hmYVl;blH<+=)b244qx8PlgfzYPx7NhH31ks6jwONZUp%&cB-@}%S_ z;Gn-!g5%4jV2-02c&alXxA=+#vb2{uc2HQ5RB`a5Gl8o_=1#{g)$OH82N%VfsWDXJ zWr@>2M@}FJdA3E6kSpNE7}F#w=3;#lt6Z=#aTMU2OyD*BGvtpNnc8*6Hp1ie#GN_ zbZjLbZ6|yAZ074|D-$6$Mfypg>?+3BN5&7uQD5oRtWhg1jA`$;z5$ZfCAZHj#2iB3 zpN=JKL8W7>WlSy(JxyE{ee>qct#<=V=6;d$|6&vnw-sSb-#Dc_@nlQSlcHP1sT@8< zd)@5h3m0;Y`Zd&-$i-ic^PBRh90%#YVX%!SgTQ9{(h!@2tdbz63F%{s?9Q4;vLAR> zkz(b7o6A-;i1x9;gJ=Aj`&E(>dtzt@@-n@CaoF*kb zT5KM;TqB*gyt00(!3VwEV1ubBFw8Ja5t#8cRY@BYAtt zgVLP-+_@VjrC=ywTV+=i$5p;P=)!A=9gZmJk)>=B!lk< zTvrbX`65@ARFAmPV9O<8TCkVdl<@&n#}e$H=dBLm1eZZO#ikhI-NgJA=nIPxdsDKk zcP>FC%eddBl)S3wP$6{2eA0QooU#w=U2a8ZUObEcD2(|vF=5z*Ri+A_4fTy%V>RPF!+Hj12pv&cx2NPJU!NRDjG6KB1r>iVMXiS;>`C=bIEsgzN=2> zoSE9T>C>lM9(1~>GWWm+J%OEx)*UfEfW$88aOPR#*g$$QVnST}xKR8AnL&%2g$~`V zeJ-=#k`DWxk6(YID)D*W6WZz?5OIA^ktdiX_zTiANK0Ww@uU3B7!IR1Ysg8B8yG*o zsWN_qMey21!Z|GY=NPYZ9#Uu#`{`l2d^oX38HCK<2sY+=ijm;MwP1oA|H63I?|^c{ z%QFt7QLj39|5&__R9M55;=#7_PVN~x$@i!E!U*cX>vv}yU7eg;! zA-1lAH>L0FsaeF}mAsXkZscuavZiQgWC)o`@bZh#YDvs(UftZ@N-Oinqju8zi_IeH zQm834Dmh<*8v{2tw>S?3B%CVw4j5 zH84fVX-S(2s;ZBp8f`~+Sf4>KmcGHFA(ub3vl%ZkRU zZLG>K#v$eioMi1wa%j!!dxONoB&F%Dpy2Uvw2RV`h;^u=y?1Uv+9jIb=0;J5k2zA7 zL$*fzxNeNKC!QLlCr0Al*AmLXsOpnf<12>indmrI2XUgx8vT9lx&+a=Yc`bpwd7XDGDH4op-W(R zTq7SWBAAN&c*n-_AFxyG1|N8TdhW4-6r~B!n*1KF@!K$XJG13yk6dy=x9Qtm?U)IA zf!KLOZ7p8Bb;Rk7O*_k=T)G|)+^;!zWXQQAEfJZxLgEolb@zet@~@(gXZhbdH{%us z%0fg{iKEG3imy$;j6=>=gKb~pGt@+VikRYJ0sWQcfXZoR?#B)v9-{Z1BXou+Wy;{j z!e;9&U>%VmvxU^xUFBgA@kM~g3jB%yK_wv}{D3O|ZWGp9y8O8i-`?EYXH7BZNdjMO-W6aVA;a-6{qtQSaww=dj%_7T^KU zmvSs;4D&)XE``k^@Y2k8s;)76Mo>!|ky5{YAr^jFsIj7{dxTJVFsf68^w#(XLDhPa za`ozozrQ$DMSn;C@eEXb8@QtLqrVnu4wY`sOMm-`@^K%%HH@5YjSrCu?fG#URMXii zIooJmc!!9Bllm7F!`6uz=A>;85uunHp&#&osKb`WGZ&flUg*$^jF7k_M8x_e52NlL zgIM<0DnP1$(xm;Q5iMJ{4&LO@LKN5Ou1)!V2p6!{M~c?2Qk*$DH*5t)s1{zQj5&27t; z(grm|2G;Rx)(8tA*foA9PAolJe3}$Vo1yh-nVFd+C3k;Q)OczbzqDxlx~Ey!nEwHX zzcO&;+tP#!yk$^2Vl`7T&oi=bwa5;Egd82N*BRFox@49V;p7&&vIu5rf_`jdbamIPHf=Sw_Lbb07P)IahaB zi;eq#!dP`j0w{pl*cK%xWE>tDRQj0@5}d%ZB==DU5&GMUJt`v42u!>@?&^nZdMo0d zX-}DA4kOf%t&B}8T{y_ZSIpSq(J;tCpF}yfXE8 zadNTxHB@NfBuA4&Mj<#7miZ)3xOe{^*wtK-R-wo3y>sW@U!#(``{5NtFSgk2L*^eh zEqr}thKaqMCQg8AivbcsuY;7(1_L~QqT$uQ0-CSs z#F2(7Uy-^<-ig$QJ($Wl+3i#4Qm%=1jwPVOCIm&bqB8*+NUFk$?f<;*-+K|0s<4Uc zoOZo7j`&Q3=vBJtKeN|Vf8|Bu^A-Px*=zr&{<+w^u=(Ej`t?5Ss3?FK3dK&9S1@}| zs5=%;9^#<*?=^ZEAlN&f_J+rRqjHLh9}cUEq7h-()zu>kkN?+*tO)$=%I>{-l^;G2 zB77R)?(%-`pl)i-Dd7KKxB9<*b#J69hMoY%wuGVQlg1chi3Dbx9gFnCcA%FzHRF7VCB9HE(O)=XLi#bKczk z!KeOD{_}pvM{U4bDQMFoav2cI#6tDoy`n#C%ZX?7u4EhOl|g+Ia4?7~saR^){#>K@ z75Y{>rLrILIYY-I6!>Xx2CQ8rieduw=gj96|R{i zRD=dx)7Z4K!{!c)*wAuax`Yu2C@U%5DJUg{5#zyx{4N>#uFp_!&w?2n)3uPd4H<~! zt@Rh6CydAVGh6X_RjCsxoIuGb9~y$)sKKu`iZqDOS&4AsFwie?E+{T<`0{=U(M~kh zoYQmzm6#|@ZnEY^ae4L4yD+pRuJnQkV6du7qCNO!6gI8Zvvm_h*9>GSI3xSTghN7# zvh#Np7c*83V|tdVipWk^ciqfxaaYVQ0#a%ozRWfIo}&6MDjAyrhK$ayNVWSwO#$K1 zI*?OklrVh_BlP7&p9I_~F#%fSv5ffuI@bfrcWzJ@*LP(?^JKrlx_qPH;!}k;^)&>P z(ERzWm;~QH{$qfTE1y8q#U~Ivc5|~{1nIf@h26W%D#SU=MnAA^Hp^;dzNr<() zYD`^7jP&Ch_Ii$;v3`)K`$%rG!UcGBy}9OMIZ)EYu30n(X7N}{K7Ra&+(+gDiZ0oL z!Hu-_LvRORh`-hDj^^qJWw3Lrfqve}i$c8fwE z&;7#@CNKfbPPt_1@#GGL%Z%Prl$3M@|I>9T#i&s$-hOcTXD!>8h*_u(>nT5q@lJJ< z`u7jjY1rcKZbPO)Eywrj^x?i^S7(&rJ@dpMb1EQF1n|t{yBhO7FG-#LoyE^{qc77J zVeIkk^{&b2)e4mHkf{|q6IhjBC{xy_8xAP$kp3wGJn{6?j=t$svU4vJ=cG^Ln|JxdEBXN{>IojyG6LzN zgw$NE`AA_?;Q)em?_ImDj=R5Ci&4iFd7DyLLDlHlR9$lt=E%Q}G+Z zT|zyww12PNP+wopXugZ5h7Ye5?w`yIE31dJ zn2ObnYf*-gCm~CLtC`J67P~ADB5VR>GtwY2MUqZMf>L8Kcn-jEQqdtuztY4Z|2z9EW+;zekfe-^IlKI=SGy*`D1NCWfWV>^D^7|W0K8Gp z#5PzPJ0XtM_Jg=uugZNQX4PqjT2^QKr{@Obw>)WSVR0O*e+k%7&%`$y8H|OXqIeJQ zfvM+UmtIE~5TNdfn3;)f9|q}#ye^O_L}Dg0kdHv-*$A5{(>W_dvoQrP5WjagK4|bx zZ|`z?zxz}?r7D6EIVX@p@o|Z2Pmc5Z>RIlx&;dbu6()Ey#muf3jc9T0L z4?YYSRR)|gDt;>RGt`m&)>dx??Gf-SjsK%~E|*zg73?p}vB6@fnGqa7A|G|SU~QFwa1DvSDCMFjjoFY2PiaZ;tR2HA!FDSOW!G<*5_Y|o|ITcJ3$R%P@9YCH%&1BIT z4pr-tgVqw3U2q0Kw=R80oS+OuB#(qZRF*Z7q1jv>ain{fE=~_Wv&aiIn!{FqNjQr^ zJ{l*nFFmbOP~H?B)`Y})lpf-Fch8D%V(#M5kgX><34(iK+^emgpmd-laQ5P}i6Hx_c8c1}mV>^2?$q$;lkxL!iheF!`G$hb8$KH%cHGO| z^AfNmX~g!|4%jx=IU_`7T~`C(DHxoUpIMn)AV-Zkh8h4(1yD|elmd%g383O+cK!t= z;RNf)(|l_)hR0jO)XhsIE~BtwgM@Jf8TsFys7w*dEhKWO;~AUoc?%3Ex(6}t1}CDc z$q>uV$xeYZ2_P7?c+WBT1fozbY`+lHgJq_N49;UP1F_RfCTnqS_x|2Iz;xIEu>@56 zdq!QGiK7(B5!{F5g?H%)Pexmw3Fs-D+31;zn*wFg5xXh>A{ppXp2- zyYpQgTTHjawRhq4g>%AS(Yruu8w z3)rN=UzmJ;^S~})TW@y!KqbAIi!x}Cz}dHc?1>AGX@Nz2M=* zIMNxx1;bM(6bRuP0V50RbZU5n3i5}eAar!>JLyyM9l7q^-{!ot^-hdVq^@WS5>Y?- zVB@>J_>g2k#11`_xcj&c|9CxWjg{=h#aMFe*;Cn%+Qh~%z-F@?7|Go zrlekUFU!w4?m?o9QR`DHQuoeB?7&8G4`_^(`sXiC<;18glAoToFX2q>x|?sOwocCl zRyTF@8HCn0PP+_3O$iInfR(6=u?q94k$xk{Nv+SCR`_Q zn@RaPR!Km%>&@5Y`@)VB0eu2>fV{|9z@GUNRrfPxnd*Lx$2E{F$UO7lD=51mz$<;4 z^y{SF`=eC{F+fB1<5vLRCE72Y?~jC86g=f`l`a+m{nIltoZZ$f$TK1Ixsz&;ayn(bs4vBw*L`^R8oUaTI4pLFcC^?{Z{o1wh@X*eD`NLng{;l7FS|V$h z+xGP@2xg`H-+$fd+g}#i7ju8&qX&%MSfVm>AF`vVM<(tbUPTh~AqN6~>8GblTfYIG z9{JaKF|QQqKWE!br7C4C!jMd(0-oV_1d|jBL=uBF9+B@M8ev4{n+PMNf>V&czZ5Jj z8&^mAI0MdAePAOFa(!cS%06s4#F}_9-&#O>9Ooer-t1mN{H=7{))17Ckx1;DEDWy6 z*@^zB)DSkl45JTebzUZ6=67`LMTkbz+>v{x7^2sgG8jG(2u5!7SoV<^QM0);cvDQD zHSac1pP6?;V+rkL#^y=RCBhgnA#6mxs#r2eHljpoJ4atXTX<&pK}m%}F=iniUye~2 z^B{&5SJY=>A*kP#sqV-6-j`pKc_LN}lO=jc?IkvnvnfnZQS#6eJVE@mD2)pNXy5Mg z@@L6AMEWk`RA6re`bGS@D_emI>=TSZa7F7m29OngGMQ$MuSie2?gxg=^CZ{0Y}-Jf zWq`PpztWkYA)B{sG2fF%U?j-z#@buzTbbOY8~g}}%Czaz>t65bXIAwk_gb1f2l?6Z zey~B{C7R}o={=+3OIT&W(0{I*j6v=oS4;rcY$?KG2`Pl-COkex=pqnnESRWvi*Sy(+|+~eQ(Vf=y7>B0$}9H6 zAlx?|U1t>D^pc$|Ve^TI1|?5jQEaEoBj9JI+0*J-jL_DfQ#4Kh8Q&`Lw5gFWH0+QkR9zoCraW)HqBhu^S)8wI1 zrIW#9FGaivD}l{3IOrjp?leIxc5E2cgo4ZyI$vsPL^sNP53;aeW4pC$)|lN*E;(%e zw#uXA_gZqgRTrOO;a(eVd3wVKzfA5lh&mTY!+rTdEBEvCEER{JVHT6n`p)9Ci7I9V zk(HRGjoRZ`gcvj#wF^SLO|hx>hvG<6hpSZ+=;*N}{1i2D+-qU)uQ=>T42XzA#f0MT z)r?3I?rbsbn(dFvf2pp{X6SSdE&if4gZ0iMxJzKbh=_eFN}2z~L>O1XLJ;W<^{MSy zPKUlRzs$CXy~Nxn3T46ilV+P3_n=G2fb$_zMhZSOA`1KnoMR#BRchzD1EVq9T;3DW z9m!z^pg$aFDj+26;XE<5J%=94X+FiagKI@V3zbbjxw>Qq*cn}zVq$iCITcaho3Xdi zmeWgzRW>2$>Wk01qx&q9bV|Chf*j`6a8BTup*!ddn|s1_dsWFK>WbSG!d2?wj?k2H zcx~((U-|poteChE$!7{CW}lPy(P2Bnj`aWN+{gQMW#yK?e@x-?Lf8L7kq6&Fe4tj$ zYa*JURW1)j$>aKq9t2HX;Jbl-+$$F9zq3RZj?LJ#SJyY>1NnsRgY^j6joEwfxt7`U z%u>YE>Q}E`4GehA>!rf{WYG+{CU~8}XDEJRd=tU?SLHJ>V1rY*xJ8R$imy z@Xy8Q07ke@*SSixH#^xk^e5?WI7vt(#d-I<&ca^DP0Vl-A3OLC#Sb-Fa`X&17MR45 zq5(*7q(u`PzboRmHAVwuwfESM($ZA+${{6~Cbw)FcC_pEwzznZUZCpAnPEO$q*qY0 zyJK<7o}LfiuPOo_F##HPsP8Md==>oHIkXvCX%IdH?kILOi9Va48gX{&$*?FfU$DZB z-WGFpuii~X#2_qjgURlVw)*ey{3GfIf#QLwKY9 z#Aa8h(+=KgEJ1BFWgdrqQ{vxV6pxW~uE=v8FY}*Yc(gPB7r$Qpzpr5w2i0unW0lgM zKQ|Xx#$m7KPvk#?^EsJ=OYTi9EwtY6^iBKUe{bdFdg7RK-+%w=(~L1S|Lv1}^OZOM z`TyeX&7-mG_qX9wBaKLfq9{TrQ&B{cLS-%)%TOURnTJYcE;5VA6f%ZFhB6g0Whjy& zWk@1Yg!lN?eeb<~Ydz~->wW)vp2u2y-+SMgx|U)sB$- zEN)|Hb7RroeyK!z4m$VS;a|JqIYbO|asl^X z5M@^TEsjHe5-yaCD(Y7$lmlcy0g{LR1ilosb0+7`{ShQDTh)AMt(3i<;SA)3V&@w- zW5py71~(uiC+Z#?QjPSRtEr_#aearsFP{qXWjIU@ma3 zGgB`x`9XRGv~5>15?5OqWmbK))*#Fa^Ku0#0EXp!?LY;G*31d;3RBYYxT zbRwpZ?ePj~fDYI_ex4<<3~5W#*!ZJ^e;SB#B4i^PcuY1ZhSZlg6zSK(S9;z-Vh_dS zEhPWwt*t*9CP6|YmKq@%56>N6&AeL%Q0khwT?SARi1OcuL&)=lD~_cb*^!B|s$iE@7B5>xka-w_3;0e4w0dv1 z=ly{iWCUrv1IRxBqI6-GQ_P@4)%2D@3qO!&W`h(hb|Fhtrb^-0UReW&nw#G?g-l= zupvY_YPk>)-Rm6sYlBxR*`Y;H%A}nJuQ5RPsrdJSHBI0|!+<#_qo^0q%8@}NuFCc;6*^!i_;F*vgdAN<_ov@g1sBF5e>|cXQ8}#<{+UDJT^=Q<0-|0wM$fr ze?wLil7u`>cps;$EeqAJ@Y06+_l!AHii>9NIG{D&iAvRA7Hi3lPUtf3EC0^(U4U|g z)7=wNZXaQHt}=sLl0c2m`x8&jhn@)S=P@CTB+*-jBq5$};Gq#vKvDh31M0!RAzZ0; z6#QhE0B>xAqd27)iT*}wOS6^tTH_fE!yAw=|y>5{oga9_s)G=AL&VQWMti^^(v8#dQQxKN6Wvv=C>t8N2Nvfl#ySX(Hs zdK;XkZv42x?s9ak)TBz;fg9ay_zWNu0K_(&I@We|nY9$wJ`R(K6(Ax;TswNgyx##Q z1N)Fd_#`YDA$>I|-3I917ThiQ)OSZB5J(e|fP%WnEzH&}Qp1cw8;V0FcJ?f2z4p&> z0d)@`sR23~9u(csNP3n@yLPYVYP!`uMvuo#-)*&?BGP;HjKuk~^Utth?JkmJ=*W-% z*NGq8UfzgVFfRmnM$>i*`Isa%JQ@3@Hc7eU6HO6(;3b=+Qx%kyUSY&76KUQQJv>2N z2~J7gI+`&eHX+ld=v;2hq2mw(o=+MD62#UiOD4E}Ejm)eOx(f{YRHx%a)wZo50k{A zt}JCTT25HfHJR_;y(4@LL=~4Z>$L}+NH8zO+GiI4fK!kc90v)5&c78t9%coeVBw0` zJHd_uSvW~>Uowq=T`Gh$!0B7>|Te zgDr;;--vs|6!3>nBgN!QbdJ%Z3OFfs;hl&g$8EB-)^d9k3TW2Q&2V@ks zPq)=VB6aS|>shBw8${FF0_c|2}JxCgpLt=PsZs+2PLX*jicSROU+^|@f=`1Irx?p2yK98R}ms0ry!ejEF z#$a*~^GcAz3WqchbtX!EOYk2TrsgmL@L(i5Jp3H2CoZC_G{yll2@oPCsPZAE=v(4C zvv|foiDGtS+Y9WJ;&1AIgP;(DG>?HK-yLzgk%?_zAHa9Ez1S?}B5Kf2!}I3A{7)lu zkJ!Cd`oT4}wNK97HR2-ZKbElf=eWbRK|X{~i1l;+!nH*HBp(8j*v6%4k=LfvCJp8;dQGBkk!rqH&%DqH$%vt@t~p-zL13B+>#*ss_lQC2jMDd5QC}&>!`*ek+w3F0UQS#V_4xO`c=J}(1%uMrLP){IQn3cX4?xmY)9tj=S%ii z1X@9(Bp>2?^XA?fW|I4`AUS#}As>wZ`~F5Wn*rNjfpF1Xw*te_@*%%)JTD;S9KE7-XB(&%u0DO#ET zPLBD8pwSnfWJ4SYHYQTs*dut9GDm;BAahQF**Et0y(H!b@*ynyFI|kA3lPFS_3F1a zwQTq|5O`K@3g|&J!W18o8>4!1!@yYXyImj$S4ahwd@4XW-W=@-5BkCUIi%8u?;NvE zwn>Ng76>xM@Tbs#<{04O8&g*l>6*Z56&m;U5KIf-doYeWU_NKsFVZAQ(3l(Z z#HoiU*if&w@4Jcna=IHl8IJh`W&l8D2RcTLk8^J&HhvC!#k~8BSDaHHUFtZ*1_-@^3dQj0 zVXG_ZLidTCfF1ihEd3@}o@^~Y^es+RaiBrvA!D&ID}HM#vlEh{o&z3u%;?sv*%Q)W zFh3C?NifgWqr#7RF2%GLVqo`1m^GTtz33D0=wQS|FGvLD9Y7hxQhz|iw2qb4YQ zm6m}rCUYMUQk1gB+iIgM|yPD32t z_FBE~(Q{3UxT4Tq6o0w>9XKPBUcp9(p#5lk#J=JQ{%+GjskqrT_QDd$=FMk_&c!fk zMxMX~sqCf?=;vZziKAD06f^}@`$yJ)s@ua;3$L*+{eISO&-Y^qSFbP~{IZ(Y&+wG! z6|Ntz6KmoOcXLPgh=m&m`o;vBE3{<`AmQn)6AEWm`m)q#wd>QZepl?1cpb#crGLKE zyB>8p>g=**?&Wc{M?Sr?Ym{>Q)cE<`uZAeQ{{H^MNDoz@?Nf3C47*gQKD=k`N14x) zp##w&dol+it_;in8dh(*m8+$Gq7s{e`$c0C5)#y00ehX)`GlWWiwaQ>;0gWAs@vH_ zADwb8IX(H~(ZN=$?+Ti-wz3K$gGa-lt@C}cU0Gz(&GLHU(k z)*!0}kYTu_-eGkutaaaGH5kCQYZDvoWO~ePGr!dy=%!`M8yNQx2w=dzX!}n)&v6xY=8IUx{tn;AM>IM(s@`Ezygs>72?RQK-=9K zy6Tg)FJB%XIMK5LnI~)>{(Qxgj?PAJ@ryJRyk>rL(K19yTJ$+$pAS;Fu$UY2l3iN!vV8VpxL2ry0`hcQ+bv1wCXQtRwXU zwnIB!k+F6yOO&FN7fR=6U0vEHrSywHl4{{Ws@qpRxe>K!aLPQ70>fiiF|UGKZQvYL zH~qZ=z35^J$USzpPl|Qjb_Ts39SR&Qg$aMN zhM{zy-wvUC-)$6W?Z|>Xprv8qEPPq(ZPE6E@R~KT}qiBs4 zC#Slqq{s3lOYRFm!IicrShW>-fQBM_S6tb$e0sZ1RI8rcZ1RNb-UL~&VQsw|5U=j= z`2mc&7y)soLYEJPnV@DRE7R@WcY0TVJhhpdae$HWQgnt_GBPrr4)ZeeZCKnjAe#Sz z3(Xrd)eNyoph=zG>a6LvMF}| z^7t=Rcv6J1&o@)^4S}O<;2cd=VBu{>VF1T$2JAm!97>}i`uWnhbE?na6fp#~E!vDf zRtaK?uC;#DWt?!L$`821Vq%Nt$7uvcmQ-5PCZ0|m%reHh`}-o}-6 zzQ5i$pweEQooGN)eMFAu%9SikZ~EgwO#}d{SbtZ_m3uDpXNuBoJm`yfX1Fh+Ff*&y zeQZqwrwAB0vR+ljEk=>7>G6zl)25wsnO~Qkq;7?VZl7~2ahe^DivTZvkT#)HS&At! zz87kbp-;ZBp~VK{-HM)=7EIzjp%~-^mNvw4D@EZ&oqkPlQABZ&I>&kM)GJ63lUCYV z>>4I^TTtQZDvwow`n2xz0o$sos=>D#7=m-x z-#ne{ZVfuG)2ZM38sG{z{ZeoRH#?#M|McY`%oMx%`1v(_Z+6wF*648Z-2`I9DJ8{* zl7R2wgT5MXnHKxa#VZiBK*2I#4+xZCywWOeD^?$B7t!EfHXrc{E67_SUp=-RSfT&K ziAKim-af1o#OJWKvpa-}g@%c%64eb~7;lr< z>t3^BV&sZ`{yNc1nCOvXeOF=Q78^Y^Gc!JT=VJm1C(8%*PP)KG1_qk=3ubF!vKBUl z8~YkQi)7|L%%R$|BjM=p@TAwCFeu}ty>ac@)5^+2rg3KPJ-3PEM-?VM#)9La&mx%n zipvM%EPaYq=)&{)4O?jXKYdDRt3i?U4HSry%SHGezWkukrR;^%oq*E~lm%l0%ZOeF z4UC+&<)HW~nY6h+;1kOzlq?kBj$Hz~a0?_iB1E?`5gQFzG&J^>{yxuNPewHvtgF7^ zO03B{KU9AnKJ{C}1rywSbc-$edHMwyhIS$Y0a2+0WM44zKwpwhujA+-ZY?n>#J;gA z+!>7y7(j0UDML{j=-%H>DGF=w3+i`30y&z`Ua|1$lNFj7MZtq-0lZRPkj9WjqB1R8 zE71@tk?>&|&_u5xCPX*4;=W$V9*l5O6sWt-p_qI4hY?V(*M%0Fh}1O?%{)7@_#&0b zR~L?BH6e)~qA=$ns%X)0^<#$Fyb7g2a2LGeyDba-H7_)@Fyjh zbH1)ke*_vLwdrIWTMz*|r^L41OIMEhCAZ$29xV|oV0*HAGuh#KfVtmF6^HK!4Y2=~ ztn-0q7ZHHxl}QQ-Zr{E{rbTqCpF3(o!SBu-oaw8ItH$zpya!CH8TCj41j*R1`1Mn0_xu3oQ#>o0VNj-M zr^$k}SyZ-ufOz3b!IPe(IU~TDp>{ z36Zksf_}RVt$Ye?Pnv)oe04$pKEoQX=(s6laPdoDn1T8wYMksfw?5=?J)Z9b(?i@k z8X4>^JF3#YZvA>LJQsqL4}RYlg;IuqBdpkjuwxQim;{l5bk3E)@9-Yp9EKld??61H z9TyR?14DR-u>5jJjfXWfG@>jZZuZCRq>DPnjYOW@HjV$|3xFqFdjZlTJg~ZxaNPmR zP0Qi!G~RbD5)#J?P#gV(V4s)r0Z~K8;f;v*2my8-VZTSeV8Df<@95~jlAMwTkBp32 z;0^fg$Btuh_km)IV;Z7mWVu6uW!@x zf1@5YL@2t4kNOO;MbddfE_H4P=c`j%T0EHItU^wzBEn~imNJBdmJ>8B<`4bR1&x1s zBVsfCRnG=U+c+^9dHneCQ4{F%G7BvZRTqHfI%{RM3-IRlkG4hhvOZ|HS{_*$z|vQc zFe_@)s=dsQz_iiG{2sOPL_*JnXs!j3C2cJZiGuoX>{2Phvi6NAKNyg{Tnb`CLO6lG zYy%+44Ifp0fnMOJ7?%!r0^PBc8YdJZEe9zGypabL-P-m93BO|uE(Fkd7|?ShV0fnK z1CUKKY}sO9R*K6%j*sRGP?#3igChS$S^xkSN#^Z+An^|4d8-DVOjeP#7Nh$Rg6Ib; z5+OntKYPoCi-m8)&Iv{!0L_u>XzAz}C>WL(9fi%64mXULP*;C|R%BEO5xN>XvD(5r zr&-^s+~v4$76pul+KPhg8olgpx;a0vmh^xX=nD%ApVieVPC{dcP3h`x5FK)2do?Mt zu_ezJ&9wO{+2N$rRDa^oi}KbF<%>V$W4^$z$XbfMILggPr3!q8j zN!S9(`Z#R8E@&MS3?%<;CTry5ck zjV>+BPEcC`XJ$K;uZP&A8cd)Zl*(1$i{+5PtB}tFx_TkTX@SeopI;B!%nOcjD>9E9 zDBrZX-{`-uWoBj)4FS0qg8-0o9zEKK=DZJu+rF{=44SKfS>G0{n)-F#!a@MEFp0VD zhpNOlyvaF0ief}3VF6SzBUXA!hJUQh6 zX;$NzCC?FdLzP(D+cV)ITYinzIxLuQrOD-z@j4xoi zaBU*NCZPhU{5Uqm)4{v+!|*WQW;V8K&^EyUKC#OM&@subj7a_)h-FwLM($zS6-!=| z%$$qIEBE5aacMZr84UQTeCUJjfv z?9=NOD0?=bKL`XIOzNTjq=_ycKR;Pxch6m{B z;XqD*EL*6=)wqRabHHjr&Thc9Xtvzp>m#~bPIs~2*B`JR(L2Nl|cj)(gk z(?p}qe!$HKmCNJe;%AUbMBBb%p7Psi#ot(8e+ZN!<}L1z%ln`Q0tDv;|4!USrBP)K z4JLL;CohbgB!VF@`C&k^i~IHX^s|^i&?COG`&v9}lo@I>w_ho#106y-@_-Tw0x%pgQ}{J$?LU%6fM-)}K6u&ln_ z{NL{xiyo6*_n&VDx8Rcg_nZIgU(^`F=7yamEC2kQ@%9a2Jv4Yv_d2m~j1Q^D8u6BR z52N9ir3J;p1|ZJs=FPQO`|?3mTLKv}c^EeDkKL zVMdmgXl5|&LqVQPLuRa=VY~!GlQ$tmU5%{u7zi0+H$<5GnBBJgInDR`{@yU-?f8>F zQN1v%U%!+>nJswW_qRj9OrJixHz>@HGk_MQ60MO9Tx-di79iW#k8vLY{#H|%p_y@= zZXNelTtcJ^8PF9i)8~N(s4dbaw$128Hv!btI5!6{WDP*Q2=<$s_O5~Y^GZ|{OBuXq zOpE9jK?U*@Z!T>3?taHL$nvzSR(T)e-r3vu=X9xF`}=%9r~POv0S6%-u6UGD{@}1k zF`0G;J7zKZ zby6+16L(#@{pW5RTlM#^$Xma!pcxuNT{Ud~y$nhvWPhEJiS=T9P7r61+NU)SSM&`0 zUXwfESW^i>M`(E16VpEY_2fcqklGE`+~he}r<|5>Iq~Ot3G@B?pt5M8{J^LfpA;(b zj=R-s-T)&j`_p0w+=v_-iIuVZO{j?c@+daseSVygUi@HjmF|uRFp5CR~V-%Eg-2F zffWAs?VAeU-H?zgw{Fo>GSIy#x}vGP78@Jeng?xPMNQ2zl<5`V+Zi@*4!}po`gVes z3`r8_%vYg1ig|c=D!`~&W5J}P(>D??fZpPuFTcm215B`FGn3sb!dV}r_a}%jZ}fRXgEt)g z@vxAU%dphG|91DE=VjH^zZK!)hYuef0x_Aq{h|$~lwOxO?t&m_C50mC28{6~rKCQL zj;>Z#Rt}4q2s?uJL;^8{4Cr3l(lu8C3w>T+znId}(t3~GbyXnqDZJVQ{WC=ATXVKpw= zyX4%L&_15QXGVt_5#A1(5UE8Laq{cCQh(4B6i>82@$Q7YIH=|a31mkd|H*n!0WyDs z=tq%G{y2PA0ZQ!RYDdG}0%vI0Ni7GoJTQF7$w?d`ofc0`!(HhunC{6bER@G)DZ*Xg zxkvz9N*)iyc(PTgus!j}T70g4Jo(x^{4H3Hg`~QS!b+Mx%mXNnU?;sMVOERWAB@QG?KNxTDe!esQmPl8P0oO-?+g?YcxobrO;!NyUTbG(mnBax4Lhlnb30taLvBV5x>MMF@!0Pl&AZP0h{y z@f4GE2!Q(t1RN)4JAqJ;H;4oPu7u~I8)}4RTm;||*rlXvBE5~1GY8;MzFGSU40j-$ ztj&P~5_LGSU?b3K3?8J-J?qCvq!w(_L-WI57?W;+=to7h7jRpDLUKh>z~`S492H`Q z`^RU-x*x4YjTUYh^aeiC$KXZ_J*y0&KQi78Om6fclEZFp{xY8I4EH$FvK^KF=f)jG z|1Sh(6QQ?LT~KxqBptdapIv1?7eQ&!Y(9%|@@IGjp)(Z}aRO8HRd;oDUA1Y?Dxe9s z!=eAC#UYjl&fQ+P7oae8*mo3Ab8*8cfW@<*eJDTqx$tAMjw2nbiS$k9lYaPXqX@a z9(Yh_C{#=WWItpkC_9oSkP&YDEjCsVkz4xR+qbV0*fijJkdIFVZa|p4v~uje^$6Lg zhAP2YhqBk=Xu!W-(bt5cg|e5mU=&-`7BI%fm-s?YPY=$P2E=;nvBSs){W3RSsLIbk z)(f;~*LMj-KF)i2<2BwSG06+VQ{@bh#4|aRYY#IJG)PY&&+O3eTDa>_b^fE}`_oFJ-OMr)emtuvE{LBH9~5pQ=cA zf$%?c7i8w_bZpcz7uY zC{jUbcwkBpsVPCN6m0i435nCDr!jd{Gx}rb)sZ0^Tw2XCjW?L&Yt`TYc*fUcq<|Xo z8dA;VPh5RPUmNm)TDPS3llD(o7zzU1+l8lS(5*3qt6T2D2D+ou>CodobS=413?sN1 zq*@^NFk-h0YiCA`v;>AAQ!Snh``wHdKtc!nLmChCAIl)LBzlKB`_GqC^8ESH#?_=L zGp7DV10(N&a{a`f-a{k8ol&@_OtElmmW||L)|AmXt1@(Q_`@ z+fc5BeH_EY=J?NXBU61tltIXf2`D*G<+q9VTrH0IIV9Yh1LRaqnlwG@*IxrQI-I8@ zpZb+q3_A^3(jL%n+LR%@qU+hSdky`MfEMC?Thq-QM`!-1yz4~ z&S09q4W$MhcD?|k^+M5o^wmn>aeah9+All_W^x}_f*98!1y`_5krH;?WC%WxZU)oQ zfEhXX`BzELO|Fv<+UyG+D&wuV(7t`z$ytW)j4gpm%Z6?FTiY)S2iAvr{aI7fT4deU;yCQsCWt*i zQ$lnqjwJLlstV23IC42oyX?@it6Z3KG{mk8 z<~-uLKxO3TZiyo zoF)eu2#QGxMIhU0zY%qJ<%GgX1sAY~tGeh<8CAU$S; zib+%3^6c5mXu;w!CICs%Gh(gIe>!goDx;7(NzeJsNSUohpcTV@6tMLOdaLYT?^C>I zO7nNhjrA{Hy}~w#x_(2%1&26~6mjx+m_QcSULr+e3ZL{}Of+CS`W-autr?atetDp- zGxZTH4XNAjewCY)X~ZTYTIn?~Y`EE%ir(EFFX{3I@;XyCO78ICwAgcrNyuwq!B23? z)D&GJPML6-VjF53dpFm*g=7gVa)+9Dn-nBL*CEnTi^8$pzGH`Z&pAUwPR!!^fZfU* zAr~k3+&@J@3+5m2XeQg0qO4H##7Ad?Y2EcNC&LmfSwlj6bq+s!2g929?vo8~+!p5D zGeh!G;1RZO^6}3>2h`Nb68A3O+g`Azwg!n(GaMeY@;$%Pa(dk1Ku*(9k$*1OAWGxNy;kfn z;7D@!wZLO~S-RLubw0PCo>RH~=VuxOl zXzyC{pgV|&5^V&@i{_o8P^7DAcb2+3hJC&OCI#n^@1Zm*=}N%8;CTZuIAn3@GWiU3 z6@QDKM89~GlFppky)S=O3nX@)5rJf8FT%S zE$-e35WE7g!xF-neS5gFqT;hV5_SX@L%m8ycNIrWgjo)N#wSg&c6fBOAE_cSQf=1V zfJzQmDhIlK6c}f5FP?DlkTZArxm+#>8);fV<_1G`Dt4D4ZG#=>mK-$Ql2P2b5xjiU`ALu+< z1F$6|qzOxmi4YY09!Eqyx~iKPAsD*ujkk$#%uKdw3D=#D#k(R66|#GHIIN9mFpJ_< zS}Fz3``fn4Wel^DuT-d8GWIXHE!0$EAgIE3=YE>nC6h93Y$ppmLX^Ey&Su;Ji57y=L_A!bFa z9UQI=?_m8CAU7VKAdozH)Pd+U2Sf!e*^le8q6}#LtKW-uZxBp44n7X ztBcXPYNQzLUQ%t(3X6e*xUU3`L9tGfD4)6=k>8R&A4#tL1czE)Px;)a6*Dq1fXo1d zm2Id#Ot$)9n^#p zs_TpT`rBoc%9D3ECg`xQ1Maq8#jCGgZIe1;&yMH-q(Bz1E!h#<5Ag=D#)Qdir}7eq zk?UB}77qZ;q!43^){~>%8_Cvjf<8mKPsBO6=V5hGxFSeL=;jVVBiH6Jjujkoax?u$ z|L{$VT_FVo+|~^8JS=Uy*S+8zK#V^A{)f=&uUN4Hz5x`P5dxQVbQChW=!xNK_oFv; zn*S|vCpH$uqFKHLcGwVshDp{CP(vWOTX*bOj@t(H$kp4o)r$(lVq*iLVt@j(0yk&P znl;w8wrZ}w2e;wpP?M1bf%^LF=wmLUmgNcD8sz5}!r0)q{UJ!|$?}wznwn6wb^xij z0m33LY;W&PUU)`LMa2u1AzoNUFrxD5QySC~D6dG@eBVv{FY5Um@Tp?#T`eIt)b2-? z{upkLKRQ!fRK()01D`1~t4kNGtk$A?xoTODC=-;pBKL5X2 zWWo$ix-iJfaNlNm!jA}4VOKlio$b(0s8@(Z<&GSwY;0Tut;Yo-O)LU1M)D0b&)MGk zA-ub*ryWgx3<14)?EIY@XO@AUo?5xmBOvwE;_r(m_r1@ZmDZg3!WB_Z7z5z2n%dmA zU+g9b3gkk`9dkq+429b3uC8Yd4=%_!Avh6i0OU$Ms?M4(Gv-Ekx?UIA?LZK}6&dM+ z6S-vhawD(;1AIzyVM?@@@suLjt7c5#ctL3CjTN&%X_n*EZJWNOzY+LvEwIXDyCdk4 zJ${cLd4fsEupTkt-TFH;juT7==rIZXcycSMtCy0V7cmF@-3|Pj5fBQe{=mIS6IM^g zmjOq(3^Gl_b}a2)q#^&_ut+j)cM+5`a78(2CxDn*_f^qBQIRA&dr;FKSpoCv*MORq zb_021-XF6y4^AOlDFwNM8khJXhQFBOAtWu6y;xl`HOh}qitPseHh|U@kX)iZ;D8nv z`&^#FI>`@#Uu^_VP3(9`3EzX#;VN9swsLbXLp62*)X36N>_T5%kJXmjB33enH?-8Dwb?=s@%cL;Zw^wg= z*f#BuFjC|O@P()?2z5rXBY4gVB)c3;AK6Y_)p!5Pej>`#u3TA(axE9t6`2V@3_>{5 zn87-Gf9PKUvn?j98~F)9#bEX7;^z!Ky!4l+IGqq-xfcn}W(C!UPjoN+bh$?(P#_r@R4^0LB*8I-Ooj79I}Gp|N~b z_jOHJB`R{B(r}1cc?-T^aK#&Om)eTXJ7&f3*%O7{~$gtdI>H}a$d^BOt2}+f-w6pm_AKxm_T>Mh+VUb zyk6&-_^ZxT{iaxMr&Y<}Olks~Nui@S@qj^g7{*SgN2XX9n3)N9O;zAQD@cz(e-H4& z5avfUU*e3R@WKrA7Ccp_$mIbmLQ!Cn!X-8a37Vu055G!X3-ca={bIm2JFn!>K)h3v zwzf7WOo7|-P#@Hw^`R&LK(*Mm67a5kqE*at#P;Gy>z-bb$;`{eUXrG6?aS*GgAq-kULFwhXbTZUWB+SXsFZb;& zp(7D?zwkEE246+2CH5ku+$XmWrmV|}9!u@Cy7Sy~QFQ)_p5?e@CgIalQ`HzaUPj5s z#N7QwBh0Pt>xHK6JxP&w*ka zy4OC;T0oH{V!L)KRrzhlkJlXqcpMd{i(~gY9Uq^0(xLeRPAG^}tB_TIXs0!}<}*!W zwblm|Th!04lcMT=J5qkm?=ik>FA=dt+G8`7^sDvd%a`MDF3?H7!&~ZM9L-c~4H0Sb zL01#B_B3anY#05$*qA?sX`SvZ<~*AN+J_qFjIfZEMi%n z^?8XAG?z8NgJ_-{_#D(Z_49+nl_6|(*iFWOC};(;PS9PTE?^vZa<4(Rp9$Z16_b(k zc%*uZf;TQI4FyG)mYKw#+XpI<0_jZIyBfTC{Tf3m9`3dOJ>pSUPI1z?qcME_<_!wj zHAr+U;5lh1u;t4M*OdDXH9C>Fx|dwMh>5~w6p;zEwB*Vbnyz4)9sT&x8+;<`1AYe0 z1J#d8ynbwH#Es{ivl!lcVfVSW9(ql5HP-N6OJu5`X7VDh^|Lk`1t{@7G>BY@t9}1_Hbs_Z%Q2F0Gzc>^egOd*3X*Qh3*{tQVI8AVFhlX+ z0q|Gv-|{HSIyAn1mW`QN>r?)Y;!v)8b&lZFsl(q7f)|ZAeE<@3=m03f9iD*zQhwCH zndP4wrXm>ubVUAFjT@;-ov zg^Y^~5o3215RS+zMgTWpgRxe$V-rB$j&A3Pk94GE1EfnBCcqMWQ=fSOw6yMhjefXQ zIUNZQh&imf?CjJP-U2{6i}!H!`0;8;NEAn5yAO#o={z(#p!n_wr}7knQPPGSyCcr9 zVZ#QsQo>{cX}k_u6sG5{0gYq5*y}Iy(M`Gdz+U_j#O1d1yT$aff99O;m4sb;cJ@iU7rQo|&3oVQ4oqH($CW`5eIwh8dQPIXA=VEL&U%KvY1M^wZpuTO zlGtWeBfweK0<)ldE{<1B43%&KhAZWn!^)91Q)>X7S!otaHf&S_@Xd4^$MkQ6#B zp3)qY60MTCyhvqZMTZ`FwR(>On4?prnjM{I|FbEWq~2^UbERL z&;>tdX71rN`ML!`i-S(i!GsI`9W>*#NRHR(7a?oP2Bs4wAtiH;VhE70}b%ib`UQ^AckA@&cfNa8H#_@yt!?o;OJOLu}QxO#rk2w zM8XpuwfsGF$KuWt+X+#L@2K6iyN2=$9w|%N+1Vd^%#r;~c`LfF;nGLMH?yXNcB2|aua_=tISJ!9|TdH{n z6y#A)zWw0YTKS>N?p@4dauOa=;T9n3$HJeIHy8T8wGZE4Ct1@MSbYk zDYx8h-nYU9^q(-pLV;&`_@t&} zYS?GiTuC9--tVVRyYKlVky*E{`XbE8D0)&1YF5Mldn>we$nxV_Cs1tD$WZhs9W>Km zio%bWaoEMhC2cen;{br|Rsi$MMJVuULbkM;)$mdK%sczPS2C^TXjy56!?LFJ^d1kkJ*K z#?U%|( zO8Ed!Oz-Ig+DjTpr70zLSHNl;s4CQQtJOL~gHmBK`=PIo6Jd(Wo4yYy&LS_Ous_*- z8pFqAkOV}2jM^{Yg)}nIo22!m_Cd6L7@Ykzpr(kMH(HAUP3j||U6|5OS%5~upXt0Z zR@IulL66~wI+;vd2%!xVzS}=hwdGKiG)KOFcNb`du=c{T(eS+|SZ1Y|v2mO7!7`3< zw6n=me*HrLj1}wa_icEPnYolYYhjT&!8%+S*+i4g%l9?<3LmjD&&kgZ)&Gt(o@k|h zSIf{W_oc==b=eCWmFr4Kz--Qaf`?_s7jMBAb;q9}UwTlfhS-8;Ho|Kd1V9UY-{qfx zc@LjGn~;BC$BrZD8p&)D=pD`B5m&*DvOY*TE2%ytgPjF;va+|7>O&5(LoWlH2y{$i zbp{g-pD!4CDAZ)C`4TE9mmPom6uh;OZKiNU~0HyUs)vC0Br;M9iXwo}kaQ@mc z?sa2Nz6XKCQ;ouznGcNCMr0btkG$IuVy9~^hZ=-xryz{vGoI*p34}olw zWKz6&B~)3gkFr-9NiBbpa{smYigacSK!gS@?0x4{IoSPbZ+N8kMUk-SQ2MUw+Y?`{ zOsZ_J0hqv)7p#f-MiBe;hRb#uuA%;nc3H4M%z!~H0I7Tr`qnAU#KC!P za0;_a%tKmaj}2HEa-|8IM8v1sdtHBAESOsDERgUm{p0ew*YgI*SrBf6C<$@ns#z$Q$+QCo@6TG#|a28kk+A<~%Q2oC&G zip;qq2B8gX{r(Mlll%E(3nnqz6?LgrnI)z(fxem+_2Hk3exXmSdz-Zf<9l2bE#Pei z(CxWz0R``(AU%`k5io!+5w(>JreVXe#_%<7Z(8sls@O6RHZ{|Rl&H`Wfe4{!!nBbF z*g(R`ZWamKq`FH8Lny%1SR8s)_;~co2@l`f9Nq>=5{---*1@oLbC~$4?gMtiglu=} z*Ivkuhyo-RO>Dt7Z&6<5-$HVjhJW-}uOnAa?X<;6x21D`s8H`!vbKoY3cxUPB01+7 zsz;)r1j2hOe8`sn{wx*0)9Ao|js|gwv6sZi*()549e<8S@^t8)@uVlS8Rnj=8S4Gj z-6TpN=?Zf|o>&j$m=h+<7J7#p#((_SfY4%Ak71b=6emWlOpD4s+j4lLSbboB3{a0L zf9eR5)|SJ<%@FEhoc`d9BVY=Ag6j*l@7!yTXeKW#UN{z_aMjye!_SNUsFyeA%7mlT z!6l)5_v}y8I~<-aF{~}*Gdyk6S-1h@0L?)^(e<=aix#QMGMHYLVZAK+e0$W#Uvp2b zvP7an(xRfiIhX5~8GpS4)14jD2c{cD92>*P7$ya&*VUHd#?4BD8CJh&T%`@VHYtyE zi@iMEDZDC&NE~{_L-q90Eu) zCbJ|bM}FD~Ocb_ku_h8Q zMy+rTf&u1sF&NwqOd$y^^+JSELMiy>(y+LBFVaauC^4m~w$SDqIohjb*J)w_7G^sf z<*4UIH>J+yewmx@-9;s#0I17D;|v$9G>19Vjfwj=j_P&Z4%(roW8W+IX<&d?x~F|h zWK4IEcIKX#3UCnxRHx@BweM?e2>*pAWgRs!oRJ+Zkl4L|klt3TJo(vcu@{=mO4F%* zte%^t^c^nxb{Bwa?66QS4XWYoxuj>aCM7j>XL-kNhcy7r>VSV#W1QywA^*%;Su?Zr z>bb(}88co+e#LC+Dm^S>W}y>~r_0=mG)Kt2CmTF~U<82tQ6+my(finUC^ZegiZ1wHKfU{0Ki-aHgBDP9N`?<2GcrYjEF#%0FxByMLz9 zl3-~o8B+|4R@)Z-o*S%5l$II-C44O08D?}ZPpA$zj|YQ_vfn}(`1rWf`XLn@W&Zn)o!LVt^X)?jzlr+ankv+$AWhpXl+nxaH%ho{VKjrH8WO2(Oy<+ju<^Z}=T>x>i{ zSr)U+35TaNlX1S*8|4f0B`<^#?l!)kQNG^be*5@!`_7eA9$J{x_K}9yI}I1c&6M93 z{&-zjU}-+=AbKLI?}6PnR70n__5gi$h7a%njM8tz9_=WAyd-2!Vbtw!itJLKx-=fx z@a4mU1Meg6S=KUXI6|QtR*uB)nyHO~;sAy@IH5kRD=uEp;0psd^Bj{w0UZzbhPQqE z`ud!uj)VIN-`5+IpNNHzFc#0$YN74;2*1C=pQlwe+9Be;thobXf2Yk0UBe@_T|!r$ z-*vR=#qNg<^>6H?;y2xxcWV|KYwP@l%V>G&z+m(pq44{=e$gC}HrO{Z!_n8%DSF;u z%Y#7Qpf4FE*{1LApJVJ=tIrR)HrB&l#`)Iygyjmvw`3*t6U;U4=N9r%K86K52uHI^ zrPvThQSiD7gm~k&3DEI1)Zm+kJJ4Ue1SZzKhmt{m!~gYW8I+i;)NZ6S;?mYrDCRI+ zXf>)X5sUjdik+_&wY9bBd$iR%y2@ApAXK|ND8N(zC4;*Q->~x!2h!8izrnkG5k(B^ z`4^y+2r~)q*jpX!Fje&3DcuxW0m=*JjoQC;bnM;%bEBh(Hs_%UTuoO~k8#RcfF1aE z4`U$W#XN^m@qxt`{VfV?&V5<>MOuHUoJesp*nZp0xWQPg4dl)Z6yaS-qDrDF#jQgP?&%0c60YM)2XK~W4( zA?4W(yKt)WCNcPDX-jK*r=@ng#3-idN86M{a=C?hqq|k$0CMp&NDIO)`OxAa35Mkh z^rL4n@{W;EVQuAdtA^@3n8lZeD{BST$w)eKkj!zIh)m!l%iof0}G*+V0VXUu}uBuCKcCLvCZx6?Tjoerk4&} za4&xG0{&~;3BQWQ+yDE)&mTYX^jaQ^+4J&Z5e9i^&p5|zoO3osMcA#~&1dT#V&5b8 z33ZUDOL=jzcGS75wyN(l_UbpOjya}^sM)NTt7h1$QefHHl~*|V5(W+CJ(d=J{{CU} zWHi4z59dj}ZrG#aZ2e5o#coDNc4aZLOY2Q496z-R*z9@>NoaJ4ntc&5e^rlPYAaEA zEc4u(u7?Y>boZQ$w5x>3yw7XTCtd3vWlG^Lc1OFg1*)frk8EaQAp4G~PoL97symE6 zpd!gHwltT>SSy}#e^zK|do1h#cs={U55;sY0DH}NumBWPhu>V8KkY?N?n^b-;h8&m zVsTF9{GD&0`;^3jsa7bflbdM1uie~MeFw8s6(47@&!8^Uxqz*tBZxsx4vTvqM89f_ z8=>jZvahv9dTI#g6%^bvyVcyikyWYf#&0i?0|xv0|9JpCzdq!Gd31K1{qgVIVq@Mv zx^xuaYe6)f8D(dqAQPnG@872-22l9d{8^3wh6Z|3!tXxK4MGz_^`&Qi91wI86ZqA0@D!y$``&&z9# z?}f%a)Dv>-m&Q^vwGTytqBb4GR5UzFS637P5PrS#e|b;a%nBY~8fB%n9ysvMuzT{2 z3;*Dj8!16cUB$_5eVmn5!=&+Oa^W1h%RSlUSa3YhORVeO*#4*qiGIu0-B6I4)x=v- z=+yTzS9b>&*HYk)m4F5@Hj_3vjg}#>Zn$={o#P2864_z62X3wgGXJoFl*Yy8wX~Eq zI*5TW!=V!wT(g$j_cu#l7wB0JE0smkh7{JJ(hqsy5EQJ#TT$-q=8D zDEiaOCwu5h5f{OsCPIK#5RM9gAJTprYFF7$PT)1x@Z*gZ$o3p5wTxGJh0=$j0P5D! zp?$v&!Q8<6&VqFmmach$9KN~_-U>csYm2c9*b$HBBs*TVmio zwVpbIso}Z_=oEXGZCNteRD{3!#<-*d7Iq`mF{_3ao-{=%TsU8&KX<9i% z%(U*Ve;?gROuXaH&{TYEihXJGBlpbO^HVgnOr7oRkynO0;1izwyWuB_zI^97aMF|v z7)`i5HiZA1Jkz8coKFl{bw@k(S>)SqIBcnpZOM7zu_}(6GJTZ&BUJM!7*P38N zr|XfGmF*^?^|GG(yntw{(P0kVP+fg}m~CMIgO_w_ABa=2{g;-0DPeC8tbAkSa6W)* zSjnTJr75DZ*n@iKe%!A8b6J<+y=}9@ba$197iS-rg`Xu0bn1m>`q7ZynUocwJJy6-&sVE3=lBmtAm(M{)HPBSqw zUO}l&^VnlYsRYon!=|RZXmYPYmaKQ09U|`~MIj-K{@tx+1(C3j$Sd16GHmyGJZX#! z-9Zq5aaL<=n_v9cL((1X6EoZ`UyMf1ewxQqQTNCf(O5Iw6IBqjL!lGeJe&zZf~>_PLa-W_$2Aw7*IMhnm^@KAZFQZd$C*XQ2{FA)MGXs60{V|1r}M) zxjmGV4w9JF%DM6IKlQF(V4!AD3HC@aK?HQ8eIz9%g|%nsoUc@F61QXR9=p-B5xW&R zQgKe|{JoEDwt?>Og5TyAirVFwGan1+$_j%T2RJlb|%$r~`&tGdKtSsvrD1mF3yLOn1w3 zH(brfP(iG&GZSUKYD=l;>jy2&mM>H$-aKHIqkH7hu>~FbSd>;g(wknzO0otngWO|p z*CpHw;?Sri{n(+S46H9pD6fD30UqPsSmK*VH5@21)_6zMl#z;Ls{-fBipdB;JQc*F zFVRaB)hbni;`60tKZaQVeQqYZepacl>7A;Lt-XDqrx<15L5Q*_-EwMXcGd_J8{ChK zzNQux&x7d`gJHz7_cN7eG3_e^li{m*#U&?*b#cybV6n{mFFmlmW5>5 z9QG9v7x}TzmOR{l^Kv+OxM%LTuYupsCaU>wew$bzj$co#%PIZJhEOnKmlfpM#^OT7{3q&t>cpfA}muUKaND-1uC9h|4tF0jvZf5Mi!S zc09rNms6lHI42q4WH60SP6nXIa91s11N}X;58e{hEE2*A%*J5NkNy+Gp43*L6Kg~A*dGUYnAfyG`;BPpIsSqC+aB@`xQ?xQ=@sckf zz%7y>2EX+5#I6~g6lq-FM?_&CxSnDFK8iF&ZSr3F)Es7#Y@j9}HasIKNgUZs5RR0# zm~olLUAA8LZt(%TDy5#Dp8Ig&gXAR*+#Cb@DtzZ*IK$zeZReK2WdPv2E`oEOvbAw zqy!T%u%OyfMH0T3>IRWU&8#e3<8c>zYLcO2iv=n*CdGo?flM1V$e?WLL9MjjOGl|P&Y-b@~g zcG3A5YVZ6^fTWhAvre5!7sVb{o8GS`l`cvMk{9y>>BW|VQ0geO?1TlaCR%3$03Njf zJ9zbKW>)yX49v`2SXW-C8Nr3~Ey5r%f-clt%;W`FxnkwY`=}4K(L9CebVls*$$v0?V@W~?w1W4Ht=G40&tmo`J;I!@wp9#N;=09yyTQ1T>ClICLhuzX1}z3!eLs?64Dy zRW$k9FEwHj>@o5)8vR}d*q))67y#b&TgZQwx?yE3UYHZE{%a^FfMe_ew-krhf8$ob zt2k|$E*fb!`Oc^8EK5kTXl~MPuFo@zQ-3{v_(|7Yt7j;%bR7nElSM~sNQ1d7)MDSK z;tkF(c#gMvvcGXgS!IB>JBEDi0 zL=o8zTc%1G=&SCU&5T%-3GP0xCo&$R;WE)JlZ)x!(I}93!Q=>nB|ju^@*vzBfRjxQ znconZ#Y0J>Fy2@HE8^Ge3Bc?I=sA2q>k4H{jEcJYmVopgEkgR=Z66R6WMc7iN|!Fy zMGL?#g2oD{dm$JxDAh{oq#SleO8iZc!2YnR8v-Ya?*wCeuwh zxuo0bwN6gI)~=H~njks<>xM>-m`@$-#7NrIV&500)+%$RT~$@}Gm;Z%)+I1HgdUac z*gU`{Ul5IO{cU;o>xrV|6}UBl?+fGN^1<8hRMpkHkPjV)0RvJa{R7L^P(tC7Ro9_0||^)czOs-E5ofY(P3OG13~ZeJU|@op8P@!(39%X-hy%DL z3EwemBr!ntl3<+LoIs*(PneUj(n7nu^yjBt%S!&$Kz5@JVB^NOV@Xc zsVD2aL`LC8$Q8h#V`F0_z>B#D<|p_Soq^is7Is^vUNb0qn1x?aXxu^q-eL(fXVL$= ziH1(kk?VOTJaY8(!>##DxKa})gB|B*?a5;TSxvGDmNrf9!G-i19V61>^b?;vSf}PV z?;<&6X}yo8wrEyDKFj5K#-8v+lS9G1i9^>#d1~6`Z5ssMZxNm8k0Kls(s)PL5pp}- zC}htF9&F3Jv4}8YUJ97#3Z|4oJ(B}3}{!}`hLfRUHC(mj=qzu#{~SUb*IlB%^UX*415g61e&Ns{Oi@`3QN34br*@3 z8(U;K2YPRWs6q3hf8Yh-ol7ReZJju!^h`qhMnjQtnfbwz-2x&!;xDQmNM^z*sQ_p$ z2a>qRx{HsL<5ssd7rBa)K$ig~Ka5GQoL*J_ai4b9Y6M)uoRE8tAU~8EG4eTAm}Y)Jh769unDOjs$tR@BHU6S z&F%ol&b{&7LHO{v$MZvEwnbwfQf|%W`D*}V5mE_iLl1Ory3iRSo;XC@8+Cg*QH0Uw zDg#G^S;KY2_2np7U!kw~H>$tTDg)NE`@$28XPsoa4~sqYEs)aG+(fKoF10^rM&=Ce z97%M+#cgTjpOC<<_5(x6EfC%Se>^HE*a~7ADGOoDr!;i>-2&TN=ZNGp41EUPO^b+3 zz8zfIj!{E;`+&Sky0Ndj?~ULx_5DwGFw5Xz8DoXA1FL1cPb-yC;(&pxe;H*q#FM5*!m zV|c;(*a4*2WrWKOofd?#L;gZAroDlCP3Q?XIZ{0T_%Nwdy$cptg=XVb*)s?tf{0rq z{(0Cw{iUx3|I?<;;J--o5B|$eOXt6=*DN>vmfMj{2)C5~z27))WxM##L*x=qW+I3tq0>%@(|-cYcY5S94L6#l(fEI z5QM+@U$na7bHt$%cjqnGWs!+nwweFdS-2OdU}7O!C5Vtzt@Ke-{>|hz!KDr_TZ@y6 z|NWJ395Gyz1~}SHcj}LE&!0_w^q+L-Pt(rhLDW#>g z*=250y}P@cfVQmHkm(VVMdSA8qKH9g<0mIt;X)*X_y}tZ+zm68KEzr=nMXd*Qy7z0!I<*Li1Qao_c}o#0w-Y!lp#+B7vz`@-{HUJxkD`=gz|L{IvjLj30p& zAiQAmMF7adn#>(dGPQ80$PJus{be&phOU#qK5EWJ)F` z1tYG8Q>vK>I1hP>;VM)YC^*4#Mnf+^7(uxr4}>F4GTordCT9Ur%>;$FKP> zzandlDGEKVQhfb5N})*1SIJD?&#LjrCz8!)k)_KSm#>U5Anx+)S>|>%QMjp5vAHAFrzB(*nViv63|RYL8fTjlEVXy;_z^<;I8f4DNGN1DnCTZAg-_DJaaq6=GYHggW2G& z=Q3Rq^T%<_^or15WO?XLsi_%`<)O>~2A!H44iHi}K}I(rFK;u*lQ-OR^4S+2a>?)b zFLG2;@F}MQ+bxSQFGcO#;AN-~X23C0~yaBjbsqmMA>tJHq9=b_RkNdp%?-Zq* z2438MWomih4q3*FjBwR7r^* z9+r>|c!7;n7u?^LY088%gX50RA2VBoxCLzIC?@Y7nMOT8#NHG(;sup<^cL1`-FoTS zB@7({Hu@Stj|fN;HBuKrCn+_APha0{y$u^a-Q=xbo*H&9CNp88=%yT}X$&Z1Uo1)#5_*Z$3%>_&0HKy6xKk9AItSp94r(SdE=;zeA81l~ zfd1n51$>z*V@=enMCYDvHUr9uXW%3#uSjtpVjNb<+%!rDd2W`&S0YcaJ%T{3MN2?(RcmbEo5v9|3*@W7QRAjT$G^B-YWci6l z1RV<`-@+7yF60ZKW%TRTMfqb?S1FJn)2mwm`y9xf1sRyUWBKO|7{1~$qJTPp$mnoP zc0jX@yj)sNP7-kwN?cNlgV9`bmRY+`eL5#D9H>9-NxNV)VktB6USHZtla${dIXG*j<1;SG<@M z{@ax85uV5c7kjXddiJ&}&MWT)%k<+u4G#4#{@B0AnI;l?3@Gcvl^&};Q6_37JM=)u zz^R3e4-1*-GC$on4m%ejk}=FCVsU-ER$5zeaX@%*28vZ`#3=Sn1t2e#;)&L>u+VkP z@QkD4T|^Q4NAEQ!x7l^jP!eb^lZz~&zQ&lJxDZG#LBHcAT8Rc}p|Vei!5q2$!R*mi zJw#%CZeX9!Ym#l6Z^vOSMddRbtQ&>24mGq0e)34$JjsZFE8={%?j2u2LBM#>j*IoM zMdytYp)d>EDX;CEoU6#pkJW+l7*X2Yeq;}9et@W))>TLH2KRuyoSb}`^S~E~%yl(G zHJk_7)>Tt-+iv%Izek3jKEhuXXhR_Gp30FX7cPo|v8t?gnCO``O;+6S zIKS)DeM}R$-hc(yNH_E@YrLI1Kyow^ z51{O6>>a`IHCT|n8vYs6d#VyG6no(d{h;xEN;mR(nTGyKLT}<POSF!Lg?$l zjCBFo)hT*KZN|P{W zY)t$L?Q^{iA9=rT{<%~&;KIcNpqotWV7-PT)rDk=_%?fQ6TLXa-3@Yl2T^JOcDdA~ zOHvr~oh&oG#A|i)r2hLG@w;*J(8~0a2^M0`n}I(m1)@ZG+0Sh(E?%Vds~6nq6Uf+; z%smpYT#b7hU7+qFBA%f9JY>l54LkwAJY2*^zB%M3Vg3I3w;`!6`tpxZNI;l#V4LGG zj}r_iejvbG03{0ER|JhpNcPL3z#UvoCo)C>-M(0;?*yA*NRw@tGZ!!_xzC~13p+0oSpanf$a8+ZdYlRz{MQ2A z@;AW5cm)$}XB8!-zkyS+VnKHH3S(y-(uxsgOFS2U>AjG=Sroug)=nGS7l(|ishv`u z#0nFDodq<6$kb`^@?Rmu!b0ERlnZ1-WD9lVMrd?#FK)E=aeVXR5qjz7lE%>r5@fMJpWkRl&4 zRG8NJVntXFc_ZC_unqf%xOExvE3)hhVR|S(ZHtVr1}#IQgB7L^gKtF}@~S_02J7=! zzk_vb2M(-7=sl>RrlfSP&f*I4#V7-iwhw96*G?d@O=!x2UlVEkBCQaZejewN!qw;s z05~CQ&8*;wDQG~FWOXsd>Jc9It?4|k8#(RHe~QQhKrM^3v}~izQxkX$>*Yfpg*Sd;B~=3OvqrgeEmi~L(d~0)=MPO#jmH267ECM z>V^J7$UeZIju=|>`m82QaHqbsU$ziN0=h7Gic~chf?(_wb$bW8|LHcmdDbf+is#YN zZCGQo*@ z%-bQ((8nwfn>rOAD@;ax+1}oMwh@`PzpH7c76CLl0%U`ZeOiZ1uM=!Ie(A@Tkvm}V zj@#H|Dox+LyAq4p-eEofJpVIp!t9(da_Z4grL7>w%!mbL7p75OiOhZbNbJ4Mi{@Ym zRa1N+7uLd2`Cs14PZB*h+Q5&{Ix+@>_@yJd)B?d;F)_$=$c<;%_nwiS5bo6-p6JNcI+QKNk)juDKq)j1bt6qqxU?M1Aqf%!Bzkd9c%&!H zTqGrzpeqH<*9~9_v-3Z{(8|0apfaYp?46yJ6L%Ft8sYl0xt*dAT1gf7w&rl9*x2h7Q zlK0TV1$#}sQbHZLp{g_bFe)~5SVyiOUFS*F-8?DEan*2*C21j;$)&`5363@$_fBl% z(Y{@n*lJ5XM2o(r^%M35ku(w_#;yn?qW2Kt(A)q%umkBXY8It!(*0k)_@FPU4>gXI z?amNZ)*q<2Nj86d3d7)OiF2;CH9xBSD}c30+X@-}Mlq+uQ2fxK$DP$SF=1H%t9D@T zrvh!DnnYzMfm99-NEWb}CXNn};Sm4#-pVjj%O-lzxRC5f$!y2{x(yN$9=9P^MfxK! znu0Zy5Bgd}sbF7C%w-9Z4mtKYD2kW!Zv5K5lO^N{nRbhK4aUUWC|9u5(br4>FLoU4lUt#}~ zN_iOR*4aECoHNgDqMI?hHudmvDYVvK>a^vh;WjHpB{q($_B{G-xDqp!#1EnqiO*Dw z`%6xtYT?b;c-SsXL9lm2uD=7L){vnI1j{bUak0IG{207XC1vGBNFO2CBc_yMV%w0? zm>Xx>#v)+@>+?#_+YRVQZp7M;n%{>o>n*_mQIf)ncsoD;dNgv`d^R|m@NWj7vJ}PW zsf0RGi^DvxL^vWTEv0l3I)!4%G!5MVngyIojt|{`njrn}ZBmq0b=-_#3FbFhM8U^7Qk$T}|NPSio}(E|6ITFxaN8_njF*pL z>i|nZR>*zw_dj2TE4&bg;$B$T8hV=l_CLD$f8ENNZE2+}-vNd54G$mn&jZzP3?1I1 zNovAR#8~5t2f@Z8PoqNkcz--=gi;OgX-p)S5Qtn?H(a&n`n7As8vuP@eKZ-t z60xvYb;sRY&a+BV<-x-nQ|A$9ablG2OQ7pbfCt2~+KU2`O8_qs0ECK!D4+Brw34I@ z40e-PiW7%yPb$s|KM_?s4>RA_duVGxVDi_G|5-sfw-O{;O&|GQ~M9-)^SZ8vR_iqgb%!gAiWt&8o%1S>;kszZEKnJ*7szI>502s64P*!$jrgu@kh28AV}InDNz zf(cpX%<6+})^fbMbxAYT?biC|oUTU9tleJ_S5z36)Mkz*!6FGTAOLe(9d%4aI>d_T z0(78+xDP8tjbpObVv3ZWgkm~Ld;}3h@uIFrL>RSf6^Hl(71kzV8wId)bhP(V59up| z6a5U{L;rdla>LGxLdC09v4@TAJjgSYTZnmH`GrlFy=g=uw)V+8_rEWT_dYb4eJC?P zt{HGAC6T1I6T`yKH$tXJk_5^PMeHUpHWDhDjW{n0v298jRCDgXoR@k)EML*tovE$1 zy{)!>rj(D@`$9UVbrWAjV$2Q9ZGlRr?rZhVWSV%>z%Uhs%H{^;O3Bve&|AHgoPz`qyWE_wfuLnrZY~ zx9XTqWF`S_mi(NX^1A4v+9z4B7$8Z%U4d-w0ZjgH>T)X30iuSU1L z+3qSU_;CRyT}+XpcG3aYCDoDH{w<{VYq`a6abA!7+z7txBl0m1G?J)HA9xp2k7 z#Xvo&2Z>~={Z~VZOg$S#wecXUN=<}qi1BC08=H+0<@A3xk2_Am4iNW9XA!B8VZ|q2 z7PQBtWMozV-GqPA4cz1eJtGqXCL&(}vp)|iAMrNGusg4AX|x=o49d; za{UTCsic|mIRIzG(;ntV=i%)Jl;K&qB|t~gt^@jco75GA&p&@>xeGkpkczM%OdTU| z$+=fc&@o7^9}#9nI)Q|ba)Y>ppgmNiavXt`DG6(9>m(*?FRANb$dWjSuOi~ATYoX% zh4P}l=@vfwIs7ovVL)O^>|78*H;G>QplP;y(R_jX9vCkkJXqg92aKQePIK_THG%C2 zYdL6vtPZqSUj1xJC*!-55Nw7q+rb?Lf|UXL5nCm0Jm!@6^%+3X?q)#a#s1MR}OyNl3FxS+Iwew)0L5j zq2sSy!ViQmN!)VUQe@zqWEIXnJ9PnBQ6U?k9-9D| zdk5B_gpLT(Y7bDC2gvg6;mO|v%-jzZ^vMesqR9!Pwb9AHDnCD!U34cRB0}!rfg@xD zDvFs{4CQ+ZdW-?U>-!HM?!_ULYHo*r0ycIz4176!M5i87zOcm5sH;R|l15$&-jyVg z?<3*&CFOV;Mm*$V=}Ity4`zDjT#dBO;dOsVH&Psc{s*b26y+}aL^*JBxbZX)iVIhr zlhinYgAt-0;lCDuM@T5T3j{~Jq6ug}9-dK;sv%U!Xym{XkO>m1qXD{}_!rYaeNqVN zti_q_{xFTl4mf<8Z(BcMBzD609f=_0(1icSvqQPYRnyo;+yIDtVcDk&14b^Y8-)O0 zV>obVX*lH*)UHH9acGreXoIcj&t97={Zp;(R8qMlE-rkH9p^9Gr zTC*wjMYw*@_}lcZ4dPX68_Pa+rn?W7b$c>-TlCo}zs};>UFJ6mjB6jwc5F!ikh`o! zC-DRvO!u)P>$K(}Ux`4=T^5REcXw2C%PFEU2g6SeBD*;MIiS=_-EgIWqPz}7o(KCY z5E)qE2RMQ1dSmSzSeMeLPWe*4NIIjoJE2=m9uP3`UH3Pbhr&ul{5?Rqb^-e;W3gk=|X!@Dy^kHfR&T*wy&p02Cfwp;ncoGO0c?35BDzRiwOY1I<81l zAfMd{C(%P1uz2T0={$i!wuh6FBxl5?yVvcm1jW3zqY#NUkq)CnbrSB2`|7*cd0y05 z)dYVXK>a`ntAv$Je-CCXtEf9ZfLi834x*<%;I4}LE|R-{SXG-v$RUa0^Q0_Gin_`J z>#}83V}X5oNg8;w*q9qvVDiJ$aEG`3I@bJ*hj*6p=-TTRboOmx{b{G{#26RCBiMEG zYwaY~f1a$rjgL(K7ReOd!X@MlyQ7<$=>-3WZgBQa>q46^xgr>yG=aUsuv4Si4!Rbr zrD$U>TXgMm1PWqQPi0q&}?7)3j!&VG{|8cbO zV+#QB#8JKr`lo3e;8hf1)7ChKw=hk`C($wIlAWx&j;tk+nwYI!00i#9+g)<{GGGP7 zqlh=_6h(Fb8-1)C*NypLemD>h6D#g%bR=#pv+J=RzZ+rXKHSAVt{FXAip^bl`azcr zeOamhR>gWzwnvvKP$Xrmf#+)^^}u*$sT(mGXJrMSJY`Apf+oTVgIJxA`n^5!4e8nn z$^@QuHQ5?MKrRwO$9Egd!z$2)QOAJqrs3g=G?!N^w z2qV#EoSpCcID2N@q zU_H_@zpuwIkQWnGR&)}#CFBPi3oZ|a9CvJzRq=xW95oWJW-nx|g!T_=Jg>G-zP_8g zRpoKh$~~>m2SJHBi7J@#MI781AP$p+4`oDWrF15|=|46o3AKKs*ZHX+9{pR#Yy=rr zg>LBDm?E-YxAhIS-`39yOIFr>=wiXx)OfV}oW890p4KQR%Ex%RRrdFXUs^?hZl03q zWNFC@Yqd)mE;#emITrdJL&S{}-bc%*7F_6T6@y!T4X%7h0MhNY<_XhMm4Zx3f)N)-hfr&P7eOYh_$fBmmsH|(6W&ZL zx8leu$GZC4|D+(SwjaW@-E8zvpWJ$9Y2{rDPxWx76!Y#XK1uH}{~xPsM;^{fc`>z# zg)bRqAxX!khc}XMTklavrevPm!Ok=uX|wAqMZz7ATOoHhry_iPumc=7UhQWF2uTLmmVHzOyHs z2rE$^mIx6aHMG1rT$j<4v2zfrli}j9z}f&P=&$U6Shl%c*0`R^c|nTd57NnuMrXPs z)T$=@>i|$GS_+UNEwCM_FGJ^_3fMR(>wT)I7t83;S~})npcrCpSIBx|K#wnU&NgE= z<6%y6ZI~Ct=T>@++249YAI=a#^WCfba8@%)zwd;WTj$QgdSJ<&AighntH_5z(KE@arLA`(OHOj3tI6Z3cdPB8c&tO2?+>)k#dQb2mn$C2=pk@y1s9x(+71dM*e=1o9K zuQnsNET)L0W18I&bqr`p#ui1x0gxG^2|$(F-h$$|6B*oFQ{X|!?`bH^Q)6zsiY__0 zQ@4?$Z0F-UJ>5L;tG28yi=oc?Po4K{R^AdDb9OfI*Gn(q^KN{V#~mx5V&fc5r~|GO zQs6g>q!?U3YRrfQtF@Dpvm!Qe&*aU*OC!g$lU1n{b%Ak*<2J~sR-sEo+wnuO&@>G@ zVo1ZVV4}tDNiohv{W#7=&SkkuyK~~U-g|7CGp@>&k4N{NWylLpwGlJ68q)syk$x^h zw*Q);e4gMPI2{H=`u>(s?_N$x05zTX_zhJydqsqO?Rc9iperQ4{b_OQw0&C%m)F0yY@l9WnuSSKlcfuV5uz8iaA%0=_8 z78qc;7bqRLn*Kxed|U_rnPBesy~%wyt48;P@!7TuH=9-UeKqeJ*7${6v4c{pW$-~%htbj`9UfzOPg+a~p*u*8MFy_Y#d-M^z`i9B;Lt;%n{z=~_EY%q` z7CpTMN|~=&&9$}bF*_pl?8|HTt$`oF7a=o@JkjOu4K0iQ#r>nbUhCAz2`$s$Lgw=P#j#%lzr>g zt(MJEwyiem`91`h%5N|O7OQ;MJ;So+t=doIMsY29B1mWLWCy+pJw=DhSqly{n~L)B z6fm(s){<)mwuTJe{s7n~m(nAUBOEp=(siAiTu(m`Y?@qd_7myUX=KjI$Bq>*EzF+E z8_8-`mW)Q9kSluxO*V8b&4v2%B*DtJzw6F_?$wQh&6sv12lkmFs$&z}k?c?W;g=G| z$|?$qWm2WQ2Z~c+{M{2YVXsQ-Jvt5pE?}~<^!HG@H=P@~YOvb8wpXzR~Eu65Y<<{Y;>+=_K#U@)HgH6W1fvo4SQ2mX0CP5g;0bSS+9HQ(8sPp;= z=ed)SKFq?5Ha0e?=p}uEmeBm?bRveXof$fKT(aQXhm$p^xikvw$S7eBR)zM@->Z+& zl^}KR1bwgE%){Ld+}Wr{k9J_J;d8zw6HTVro*H*_)?R}SwFR-#bTUWgq&VQ5EeP|% zMn#x}$Uy(gQO*!-4!BDe5cW>xbdtddf=ujW?F9Pd(h4_*(?!IS1cF~@H$#FiDUFM)QCyStMG0`2fM{G>hp{i7GGz9#400opHswzMtnggiu<$%u^3t{+7NEz1QDXJoF+@vw!`o#bRjD~&ZQ%J(2 z@miztmHnY*Xwh-DqLXX_S`~3=HbE9<0*kOX02ttSkFb1Ec=t4A_Lo+!rNJPgd4doh zMl_RELhKIU&L2+8fU#?VUQ~jq#Pf#pveSBsAj{f7#;XyJdKNjqEDCr~tK0M$$q!3={>RTH!D$v85_5!-jyAi`5Xb<%t1rEu9t}-LJ3;-(O z<>O>viqN*n$w{`t1BVaQ&=}&fFUL6F+-GA$g510B4j0YMy%9!Afy$o;+G3O_z(k)4zP|CLOP+{wU}wrC zba{;HbPpY^I=4e#^~QMo&GEY)XC7rn`jXIgnS{1H-A9@GA6xZy4<7$4qq9OnB#&hleXMf;tZL^Gg zvX1k0pFVBGJlQ+odRQChCDUwh7~)DOuX5tq^erC!>k_f z2nG_u(Oi3Jk3uyw(nusn!IGTAc_-9r)+MkuRQ@rXg5gm5X6<5ih$sAXZOg?({+W!F*$U_VO diff --git a/src/genbench/tasks/icl_consistency_test/anli/doc.md b/src/genbench/tasks/icl_consistency_test/anli/doc.md index b95adef..ab3ff15 100644 --- a/src/genbench/tasks/icl_consistency_test/anli/doc.md +++ b/src/genbench/tasks/icl_consistency_test/anli/doc.md @@ -14,12 +14,7 @@ the other the MNLI-dataset (Wang et al., 2017). *Size*: Each subtask contains 57600 when using the full 600 data\_IDs. The user can choose to reduce the number of evaluated data\_IDs. ## Abstract -Just like the previous generation of \textit{task-tuned} models (TT), LLMs that are adapted to tasks via prompt-based methods like _in-context-learning_ (ICL) or _instruction tuning_ (IT) perform well in some setups, but not in others. -This lack of consistency in model predictions is a problem in prompt-based learning and hints at a lack of robust generalisation. -We here introduce the ICL consistency test -- a contribution to the GenBench CBT -- which evaluates how consistent a model does predictions across many different setups while using the same data. -The test is based on different established natural language inference tasks. -It introduces a consistency metric to reliably estimate model consistency and provides insight into which properties of an evaluation setup render ICL predictions unstable. -We conduct an empirical analysis of eight state-of-the-art models and our consistency metric reveals how LLMs lack robust generalisation. +Just like the previous generation of _task-tuned models_ (TT), _large language models_ (LLMs) that are adapted to tasks via prompt-based meth- ods like _in-context-learning_ (ICL) or _instruction tuning_ (IT) perform well in some setups, but not in others. This lack of consistency in prompt-based learning hints at a lack of robust generalisation. We here introduce the ICL consistency test – a contribution to the GenBench CBT – which evaluates how consistent a model does predictions across many different setups while using the same data. The test is based on different established natural language inference tasks. We provide preprocessed data that constitutes 96 different ‘setups’ and a metric that estimates model consistency across these setups. The metric is provided on a fine-grained level, to understand what properties of a setup render predictions unstable and on an aggregated level to compare overall model consistency. We conduct an empirical analysis of eight state-of-the-art models and our consistency metric reveals how all tested LLMs lack robust generalisation. ## Examples The test evaluates the same datapoints across many different setups to determine the consistency of a model's predictions. Every datapoint has a data\_ID (specifying the original datapoint) and a setup\_ID (with each digit specifying the presence or absence of a factor). diff --git a/src/genbench/tasks/icl_consistency_test/config.jsonnet b/src/genbench/tasks/icl_consistency_test/config.jsonnet index 1859622..43a4af8 100644 --- a/src/genbench/tasks/icl_consistency_test/config.jsonnet +++ b/src/genbench/tasks/icl_consistency_test/config.jsonnet @@ -1,7 +1,6 @@ { name: 'ICL consistency test', - // @TODO: Add a description of the task description: 'The ICL consistency test measures the consistency of LLM predictions on the same datapoints across many different setups. Different setups are defined by "factors". On the one hand, factors can be specific attributes of the used prompt (e.g. the number of examples the model is presented with ["n_shots"] or the type of instructions that were used to wrap a specific datapoint ["Instructions"]). On the otherhand, the analysis can also be augmented by factors that are related to the way a model is evaluated (e.g. whether a model is calibrated) or the type of model that is evaluated (e.g. the number of parameters or instructions tuning). These external factors can be added into analysis by using the task.add_factor() method. The output-metric is Cohen\'s kappa for each factor across all different conditions. A kappa-value close to 1 indicates that the factors does not change the model prediction, while a factor close to 0 strongly changes model predictions. The ICL consistency test has two subtasks, one evaluating the ANLI-dataset (Nie et al., 2019); the other the MNLI-dataset (Wang et al., 2017).', keywords: [ diff --git a/src/genbench/tasks/icl_consistency_test/doc.md b/src/genbench/tasks/icl_consistency_test/doc.md index b95adef..ab3ff15 100644 --- a/src/genbench/tasks/icl_consistency_test/doc.md +++ b/src/genbench/tasks/icl_consistency_test/doc.md @@ -14,12 +14,7 @@ the other the MNLI-dataset (Wang et al., 2017). *Size*: Each subtask contains 57600 when using the full 600 data\_IDs. The user can choose to reduce the number of evaluated data\_IDs. ## Abstract -Just like the previous generation of \textit{task-tuned} models (TT), LLMs that are adapted to tasks via prompt-based methods like _in-context-learning_ (ICL) or _instruction tuning_ (IT) perform well in some setups, but not in others. -This lack of consistency in model predictions is a problem in prompt-based learning and hints at a lack of robust generalisation. -We here introduce the ICL consistency test -- a contribution to the GenBench CBT -- which evaluates how consistent a model does predictions across many different setups while using the same data. -The test is based on different established natural language inference tasks. -It introduces a consistency metric to reliably estimate model consistency and provides insight into which properties of an evaluation setup render ICL predictions unstable. -We conduct an empirical analysis of eight state-of-the-art models and our consistency metric reveals how LLMs lack robust generalisation. +Just like the previous generation of _task-tuned models_ (TT), _large language models_ (LLMs) that are adapted to tasks via prompt-based meth- ods like _in-context-learning_ (ICL) or _instruction tuning_ (IT) perform well in some setups, but not in others. This lack of consistency in prompt-based learning hints at a lack of robust generalisation. We here introduce the ICL consistency test – a contribution to the GenBench CBT – which evaluates how consistent a model does predictions across many different setups while using the same data. The test is based on different established natural language inference tasks. We provide preprocessed data that constitutes 96 different ‘setups’ and a metric that estimates model consistency across these setups. The metric is provided on a fine-grained level, to understand what properties of a setup render predictions unstable and on an aggregated level to compare overall model consistency. We conduct an empirical analysis of eight state-of-the-art models and our consistency metric reveals how all tested LLMs lack robust generalisation. ## Examples The test evaluates the same datapoints across many different setups to determine the consistency of a model's predictions. Every datapoint has a data\_ID (specifying the original datapoint) and a setup\_ID (with each digit specifying the presence or absence of a factor). diff --git a/src/genbench/tasks/icl_consistency_test/mnli/doc.md b/src/genbench/tasks/icl_consistency_test/mnli/doc.md index b95adef..ab3ff15 100644 --- a/src/genbench/tasks/icl_consistency_test/mnli/doc.md +++ b/src/genbench/tasks/icl_consistency_test/mnli/doc.md @@ -14,12 +14,7 @@ the other the MNLI-dataset (Wang et al., 2017). *Size*: Each subtask contains 57600 when using the full 600 data\_IDs. The user can choose to reduce the number of evaluated data\_IDs. ## Abstract -Just like the previous generation of \textit{task-tuned} models (TT), LLMs that are adapted to tasks via prompt-based methods like _in-context-learning_ (ICL) or _instruction tuning_ (IT) perform well in some setups, but not in others. -This lack of consistency in model predictions is a problem in prompt-based learning and hints at a lack of robust generalisation. -We here introduce the ICL consistency test -- a contribution to the GenBench CBT -- which evaluates how consistent a model does predictions across many different setups while using the same data. -The test is based on different established natural language inference tasks. -It introduces a consistency metric to reliably estimate model consistency and provides insight into which properties of an evaluation setup render ICL predictions unstable. -We conduct an empirical analysis of eight state-of-the-art models and our consistency metric reveals how LLMs lack robust generalisation. +Just like the previous generation of _task-tuned models_ (TT), _large language models_ (LLMs) that are adapted to tasks via prompt-based meth- ods like _in-context-learning_ (ICL) or _instruction tuning_ (IT) perform well in some setups, but not in others. This lack of consistency in prompt-based learning hints at a lack of robust generalisation. We here introduce the ICL consistency test – a contribution to the GenBench CBT – which evaluates how consistent a model does predictions across many different setups while using the same data. The test is based on different established natural language inference tasks. We provide preprocessed data that constitutes 96 different ‘setups’ and a metric that estimates model consistency across these setups. The metric is provided on a fine-grained level, to understand what properties of a setup render predictions unstable and on an aggregated level to compare overall model consistency. We conduct an empirical analysis of eight state-of-the-art models and our consistency metric reveals how all tested LLMs lack robust generalisation. ## Examples The test evaluates the same datapoints across many different setups to determine the consistency of a model's predictions. Every datapoint has a data\_ID (specifying the original datapoint) and a setup\_ID (with each digit specifying the presence or absence of a factor). From 1c2f04ac751698d5db076d7dd572b585d81608a5 Mon Sep 17 00:00:00 2001 From: Anssi Moisio Date: Sat, 2 Sep 2023 10:47:25 +0300 Subject: [PATCH 38/57] Add europarl_dbca_splits task with subtasks --- .../tasks/europarl_dbca_splits/__init__.py | 5 + .../comdiv0_de/__init__.py | 0 .../comdiv0_de/config.jsonnet | 43 +++++++ .../europarl_dbca_splits/comdiv0_de/doc.md | 3 + .../europarl_dbca_splits/comdiv0_de/task.py | 116 ++++++++++++++++++ .../comdiv0_el/__init__.py | 0 .../comdiv0_el/config.jsonnet | 43 +++++++ .../europarl_dbca_splits/comdiv0_el/doc.md | 3 + .../europarl_dbca_splits/comdiv0_el/task.py | 116 ++++++++++++++++++ .../comdiv0_fi/__init__.py | 0 .../comdiv0_fi/config.jsonnet | 43 +++++++ .../europarl_dbca_splits/comdiv0_fi/doc.md | 3 + .../europarl_dbca_splits/comdiv0_fi/task.py | 116 ++++++++++++++++++ .../comdiv0_fr/__init__.py | 0 .../comdiv0_fr/config.jsonnet | 43 +++++++ .../europarl_dbca_splits/comdiv0_fr/doc.md | 3 + .../europarl_dbca_splits/comdiv0_fr/task.py | 116 ++++++++++++++++++ .../comdiv1_de/__init__.py | 0 .../comdiv1_de/config.jsonnet | 44 +++++++ .../europarl_dbca_splits/comdiv1_de/doc.md | 3 + .../europarl_dbca_splits/comdiv1_de/task.py | 116 ++++++++++++++++++ .../comdiv1_el/__init__.py | 0 .../comdiv1_el/config.jsonnet | 44 +++++++ .../europarl_dbca_splits/comdiv1_el/doc.md | 3 + .../europarl_dbca_splits/comdiv1_el/task.py | 116 ++++++++++++++++++ .../comdiv1_fi/__init__.py | 0 .../comdiv1_fi/config.jsonnet | 43 +++++++ .../europarl_dbca_splits/comdiv1_fi/doc.md | 3 + .../europarl_dbca_splits/comdiv1_fi/task.py | 116 ++++++++++++++++++ .../comdiv1_fr/__init__.py | 0 .../comdiv1_fr/config.jsonnet | 43 +++++++ .../europarl_dbca_splits/comdiv1_fr/doc.md | 19 +++ .../europarl_dbca_splits/comdiv1_fr/task.py | 116 ++++++++++++++++++ .../tasks/europarl_dbca_splits/config.jsonnet | 28 +++++ .../tasks/europarl_dbca_splits/doc.md | 62 ++++++++++ .../tasks/europarl_dbca_splits/eval_card.png | Bin 0 -> 163572 bytes 36 files changed, 1409 insertions(+) create mode 100644 src/genbench/tasks/europarl_dbca_splits/__init__.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_de/__init__.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_de/config.jsonnet create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_de/doc.md create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_de/task.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_el/__init__.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_el/config.jsonnet create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_el/doc.md create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_el/task.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/__init__.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/config.jsonnet create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/doc.md create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/task.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/__init__.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/config.jsonnet create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/doc.md create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/task.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_de/__init__.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_de/config.jsonnet create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_de/doc.md create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_de/task.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_el/__init__.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_el/config.jsonnet create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_el/doc.md create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_el/task.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/__init__.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/config.jsonnet create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/doc.md create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/task.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/__init__.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/config.jsonnet create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/doc.md create mode 100644 src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/task.py create mode 100644 src/genbench/tasks/europarl_dbca_splits/config.jsonnet create mode 100644 src/genbench/tasks/europarl_dbca_splits/doc.md create mode 100644 src/genbench/tasks/europarl_dbca_splits/eval_card.png diff --git a/src/genbench/tasks/europarl_dbca_splits/__init__.py b/src/genbench/tasks/europarl_dbca_splits/__init__.py new file mode 100644 index 0000000..eecdf60 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/__init__.py @@ -0,0 +1,5 @@ +from genbench import TaskDict + + +class EuroparlDbcaSplits(TaskDict): + pass diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/__init__.py b/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/config.jsonnet b/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/config.jsonnet new file mode 100644 index 0000000..4c9d9bd --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/config.jsonnet @@ -0,0 +1,43 @@ +{ + name: 'Europarl DBCA splits (comdiv0_de)', + + description: 'This task aims to measure how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms).', + + keywords: [ + 'translation', + 'dependency relations', + ], + + authors: [ + 'Anssi Moisio', + ], + + task_type: 'free_form', + + data_source: { + type: 'hf', + hf_id: ['Anssi/europarl_dbca_splits', 'comdiv0.0_en_de'], + git_commit_sha: '0dcb7abe8e18aa520cbfcbe9141b916c684912fc' + }, + + evaluation_metrics: [ + { + hf_id: 'chrf', + git_commit_sha: '4b119256e85de9130aa84d87247381c5acb29bc1', + best_score: 100.0, + } + ], + + has_validation_set: false, + has_train_set: true, + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/doc.md b/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/doc.md new file mode 100644 index 0000000..c6e1e28 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/doc.md @@ -0,0 +1,3 @@ +# Europarl DBCA splits (comdiv0_de) + +see ../doc.md diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/task.py new file mode 100644 index 0000000..ed51ebb --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/task.py @@ -0,0 +1,116 @@ +from collections import OrderedDict +from typing import Any, List, Mapping + +import evaluate +import numpy as np +from datasets import Dataset + +from genbench import Task +from genbench.api import EvaluationResult, TaskType +from genbench.utils.logging import get_logger + + +logger = get_logger(__name__) + + +class EuroparlDbcaSplitsComdiv0De(Task): + """This task evaluates how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms). + """ + + def evaluate_predictions( + self, + *, + predictions: List[Mapping[str, Any]] = None, + gold: Dataset = None, + ) -> EvaluationResult: + result = OrderedDict() + for metric_config in self.config.evaluation_metrics: + hf_id = metric_config.hf_id + if isinstance(hf_id, str): + hf_id = [hf_id] + + metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) + + refs_lst = [g["target"] for g in gold] + preds_lst = [pred["target"] for pred in predictions] + + ref_type = type(refs_lst[0]) + pred_type = type(preds_lst[0]) + if pred_type != ref_type: + if self.config.task_type != TaskType.MULTIPLE_CHOICE: + raise ValueError( + f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " + ) + # Convert predictions to the same type as the references + if pred_type == str and ref_type == int: + logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") + converted_preds = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + preds_lst = converted_preds + elif pred_type == int and ref_type == str: + logger.warning("Predictions are ints, but references are strings. Converting references to ints.") + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_refs.append(ref["target_options"].index(ref["target"])) + refs_lst = converted_refs + else: + if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: + # Convert both predictions and references to int + logger.warning( + "Predictions and references have the same type, but it is not int. Converting both to int." + ) + converted_preds = [] + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + converted_refs.append(ref["target_options"].index(ref["target"])) + preds_lst = converted_preds + refs_lst = converted_refs + + extra_kwargs = metric_config.compute_extra_kwargs or {} + output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) + + if output is None: + raise ValueError( + f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." + ) + + # Update output keys to include the metric id + metric_id = "_".join(hf_id) + output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} + + result.update(output) + + return result + + def chernoff_coef(self, vec1, vec2, alpha): + """ + The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) + = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) + distributions P and Q. The alpha parameter determines if we want to + measure whether Q includes elements that are not in P. + """ + if alpha < 0 or alpha > 1: + raise ValueError("alpha must be in [0,1]") + # use log to avoid underflow + return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) + + def normalize_vector(self, vector): + """Normalize a vector to have sum 1.""" + return np.nan_to_num(np.divide(vector, np.sum(vector))) + + def divergence(self, vec1, vec2, alpha): + """ + Calculate divergence between two vectors. + Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. + Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. + """ + return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/__init__.py b/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/config.jsonnet b/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/config.jsonnet new file mode 100644 index 0000000..c8b975b --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/config.jsonnet @@ -0,0 +1,43 @@ +{ + name: 'Europarl DBCA splits (comdiv0_el)', + + description: 'This task aims to measure how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms).', + + keywords: [ + 'translation', + 'dependency relations', + ], + + authors: [ + 'Anssi Moisio', + ], + + task_type: 'free_form', + + data_source: { + type: 'hf', + hf_id: ['Anssi/europarl_dbca_splits', 'comdiv0.0_en_el'], + git_commit_sha: '0dcb7abe8e18aa520cbfcbe9141b916c684912fc' + }, + + evaluation_metrics: [ + { + hf_id: 'chrf', + git_commit_sha: '4b119256e85de9130aa84d87247381c5acb29bc1', + best_score: 100.0, + } + ], + + has_validation_set: false, + has_train_set: true, + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/doc.md b/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/doc.md new file mode 100644 index 0000000..f880163 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/doc.md @@ -0,0 +1,3 @@ +# Europarl DBCA splits (comdiv0_el) + +see ../doc.md diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/task.py new file mode 100644 index 0000000..1197055 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/task.py @@ -0,0 +1,116 @@ +from collections import OrderedDict +from typing import Any, List, Mapping + +import evaluate +import numpy as np +from datasets import Dataset + +from genbench import Task +from genbench.api import EvaluationResult, TaskType +from genbench.utils.logging import get_logger + + +logger = get_logger(__name__) + + +class EuroparlDbcaSplitsComdiv0El(Task): + """This task evaluates how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms). + """ + + def evaluate_predictions( + self, + *, + predictions: List[Mapping[str, Any]] = None, + gold: Dataset = None, + ) -> EvaluationResult: + result = OrderedDict() + for metric_config in self.config.evaluation_metrics: + hf_id = metric_config.hf_id + if isinstance(hf_id, str): + hf_id = [hf_id] + + metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) + + refs_lst = [g["target"] for g in gold] + preds_lst = [pred["target"] for pred in predictions] + + ref_type = type(refs_lst[0]) + pred_type = type(preds_lst[0]) + if pred_type != ref_type: + if self.config.task_type != TaskType.MULTIPLE_CHOICE: + raise ValueError( + f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " + ) + # Convert predictions to the same type as the references + if pred_type == str and ref_type == int: + logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") + converted_preds = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + preds_lst = converted_preds + elif pred_type == int and ref_type == str: + logger.warning("Predictions are ints, but references are strings. Converting references to ints.") + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_refs.append(ref["target_options"].index(ref["target"])) + refs_lst = converted_refs + else: + if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: + # Convert both predictions and references to int + logger.warning( + "Predictions and references have the same type, but it is not int. Converting both to int." + ) + converted_preds = [] + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + converted_refs.append(ref["target_options"].index(ref["target"])) + preds_lst = converted_preds + refs_lst = converted_refs + + extra_kwargs = metric_config.compute_extra_kwargs or {} + output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) + + if output is None: + raise ValueError( + f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." + ) + + # Update output keys to include the metric id + metric_id = "_".join(hf_id) + output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} + + result.update(output) + + return result + + def chernoff_coef(self, vec1, vec2, alpha): + """ + The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) + = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) + distributions P and Q. The alpha parameter determines if we want to + measure whether Q includes elements that are not in P. + """ + if alpha < 0 or alpha > 1: + raise ValueError("alpha must be in [0,1]") + # use log to avoid underflow + return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) + + def normalize_vector(self, vector): + """Normalize a vector to have sum 1.""" + return np.nan_to_num(np.divide(vector, np.sum(vector))) + + def divergence(self, vec1, vec2, alpha): + """ + Calculate divergence between two vectors. + Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. + Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. + """ + return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/__init__.py b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/config.jsonnet b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/config.jsonnet new file mode 100644 index 0000000..e97f2bd --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/config.jsonnet @@ -0,0 +1,43 @@ +{ + name: 'Europarl DBCA splits (comdiv0_fi)', + + description: 'This task aims to measure how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms).', + + keywords: [ + 'translation', + 'dependency relations', + ], + + authors: [ + 'Anssi Moisio', + ], + + task_type: 'free_form', + + data_source: { + type: 'hf', + hf_id: ['Anssi/europarl_dbca_splits', 'comdiv0.0_en_fi'], + git_commit_sha: '0dcb7abe8e18aa520cbfcbe9141b916c684912fc' + }, + + evaluation_metrics: [ + { + hf_id: 'chrf', + git_commit_sha: '4b119256e85de9130aa84d87247381c5acb29bc1', + best_score: 100.0, + } + ], + + has_validation_set: false, + has_train_set: true, + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/doc.md b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/doc.md new file mode 100644 index 0000000..31a0e0d --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/doc.md @@ -0,0 +1,3 @@ +# Europarl DBCA splits (comdiv0_fi) + +see ../doc.md diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/task.py new file mode 100644 index 0000000..7e82a88 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/task.py @@ -0,0 +1,116 @@ +from collections import OrderedDict +from typing import Any, List, Mapping + +import evaluate +import numpy as np +from datasets import Dataset + +from genbench import Task +from genbench.api import EvaluationResult, TaskType +from genbench.utils.logging import get_logger + + +logger = get_logger(__name__) + + +class EuroparlDbcaSplitsComdiv0Fi(Task): + """This task evaluates how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms). + """ + + def evaluate_predictions( + self, + *, + predictions: List[Mapping[str, Any]] = None, + gold: Dataset = None, + ) -> EvaluationResult: + result = OrderedDict() + for metric_config in self.config.evaluation_metrics: + hf_id = metric_config.hf_id + if isinstance(hf_id, str): + hf_id = [hf_id] + + metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) + + refs_lst = [g["target"] for g in gold] + preds_lst = [pred["target"] for pred in predictions] + + ref_type = type(refs_lst[0]) + pred_type = type(preds_lst[0]) + if pred_type != ref_type: + if self.config.task_type != TaskType.MULTIPLE_CHOICE: + raise ValueError( + f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " + ) + # Convert predictions to the same type as the references + if pred_type == str and ref_type == int: + logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") + converted_preds = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + preds_lst = converted_preds + elif pred_type == int and ref_type == str: + logger.warning("Predictions are ints, but references are strings. Converting references to ints.") + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_refs.append(ref["target_options"].index(ref["target"])) + refs_lst = converted_refs + else: + if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: + # Convert both predictions and references to int + logger.warning( + "Predictions and references have the same type, but it is not int. Converting both to int." + ) + converted_preds = [] + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + converted_refs.append(ref["target_options"].index(ref["target"])) + preds_lst = converted_preds + refs_lst = converted_refs + + extra_kwargs = metric_config.compute_extra_kwargs or {} + output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) + + if output is None: + raise ValueError( + f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." + ) + + # Update output keys to include the metric id + metric_id = "_".join(hf_id) + output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} + + result.update(output) + + return result + + def chernoff_coef(self, vec1, vec2, alpha): + """ + The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) + = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) + distributions P and Q. The alpha parameter determines if we want to + measure whether Q includes elements that are not in P. + """ + if alpha < 0 or alpha > 1: + raise ValueError("alpha must be in [0,1]") + # use log to avoid underflow + return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) + + def normalize_vector(self, vector): + """Normalize a vector to have sum 1.""" + return np.nan_to_num(np.divide(vector, np.sum(vector))) + + def divergence(self, vec1, vec2, alpha): + """ + Calculate divergence between two vectors. + Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. + Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. + """ + return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/__init__.py b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/config.jsonnet b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/config.jsonnet new file mode 100644 index 0000000..0cf8db9 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/config.jsonnet @@ -0,0 +1,43 @@ +{ + name: 'Europarl DBCA splits (comdiv0_fr)', + + description: 'This task aims to measure how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms).', + + keywords: [ + 'translation', + 'dependency relations', + ], + + authors: [ + 'Anssi Moisio', + ], + + task_type: 'free_form', + + data_source: { + type: 'hf', + hf_id: ['Anssi/europarl_dbca_splits', 'comdiv0.0_en_fr'], + git_commit_sha: '0dcb7abe8e18aa520cbfcbe9141b916c684912fc' + }, + + evaluation_metrics: [ + { + hf_id: 'chrf', + git_commit_sha: '4b119256e85de9130aa84d87247381c5acb29bc1', + best_score: 100.0, + } + ], + + has_validation_set: false, + has_train_set: true, + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/doc.md b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/doc.md new file mode 100644 index 0000000..79e7f71 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/doc.md @@ -0,0 +1,3 @@ +# Europarl DBCA splits (comdiv0_fr) + +see ../doc.md diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/task.py new file mode 100644 index 0000000..bfff4f1 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/task.py @@ -0,0 +1,116 @@ +from collections import OrderedDict +from typing import Any, List, Mapping + +import evaluate +import numpy as np +from datasets import Dataset + +from genbench import Task +from genbench.api import EvaluationResult, TaskType +from genbench.utils.logging import get_logger + + +logger = get_logger(__name__) + + +class EuroparlDbcaSplitsComdiv0Fr(Task): + """This task evaluates how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms). + """ + + def evaluate_predictions( + self, + *, + predictions: List[Mapping[str, Any]] = None, + gold: Dataset = None, + ) -> EvaluationResult: + result = OrderedDict() + for metric_config in self.config.evaluation_metrics: + hf_id = metric_config.hf_id + if isinstance(hf_id, str): + hf_id = [hf_id] + + metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) + + refs_lst = [g["target"] for g in gold] + preds_lst = [pred["target"] for pred in predictions] + + ref_type = type(refs_lst[0]) + pred_type = type(preds_lst[0]) + if pred_type != ref_type: + if self.config.task_type != TaskType.MULTIPLE_CHOICE: + raise ValueError( + f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " + ) + # Convert predictions to the same type as the references + if pred_type == str and ref_type == int: + logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") + converted_preds = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + preds_lst = converted_preds + elif pred_type == int and ref_type == str: + logger.warning("Predictions are ints, but references are strings. Converting references to ints.") + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_refs.append(ref["target_options"].index(ref["target"])) + refs_lst = converted_refs + else: + if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: + # Convert both predictions and references to int + logger.warning( + "Predictions and references have the same type, but it is not int. Converting both to int." + ) + converted_preds = [] + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + converted_refs.append(ref["target_options"].index(ref["target"])) + preds_lst = converted_preds + refs_lst = converted_refs + + extra_kwargs = metric_config.compute_extra_kwargs or {} + output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) + + if output is None: + raise ValueError( + f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." + ) + + # Update output keys to include the metric id + metric_id = "_".join(hf_id) + output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} + + result.update(output) + + return result + + def chernoff_coef(self, vec1, vec2, alpha): + """ + The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) + = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) + distributions P and Q. The alpha parameter determines if we want to + measure whether Q includes elements that are not in P. + """ + if alpha < 0 or alpha > 1: + raise ValueError("alpha must be in [0,1]") + # use log to avoid underflow + return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) + + def normalize_vector(self, vector): + """Normalize a vector to have sum 1.""" + return np.nan_to_num(np.divide(vector, np.sum(vector))) + + def divergence(self, vec1, vec2, alpha): + """ + Calculate divergence between two vectors. + Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. + Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. + """ + return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/__init__.py b/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/config.jsonnet b/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/config.jsonnet new file mode 100644 index 0000000..837e681 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/config.jsonnet @@ -0,0 +1,44 @@ +{ + name: 'Europarl DBCA splits (comdiv1_de)', + + + description: 'This task aims to measure how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms).', + + keywords: [ + 'translation', + 'dependency relations', + ], + + authors: [ + 'Anssi Moisio', + ], + + task_type: 'free_form', + + data_source: { + type: 'hf', + hf_id: ['Anssi/europarl_dbca_splits', 'comdiv1.0_en_de'], + git_commit_sha: '0dcb7abe8e18aa520cbfcbe9141b916c684912fc' + }, + + evaluation_metrics: [ + { + hf_id: 'chrf', + git_commit_sha: '4b119256e85de9130aa84d87247381c5acb29bc1', + best_score: 100.0, + } + ], + + has_validation_set: false, + has_train_set: true, + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/doc.md b/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/doc.md new file mode 100644 index 0000000..58415ce --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/doc.md @@ -0,0 +1,3 @@ +# Europarl DBCA splits (comdiv1_de) + +see ../doc.md diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/task.py new file mode 100644 index 0000000..b89d8aa --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/task.py @@ -0,0 +1,116 @@ +from collections import OrderedDict +from typing import Any, List, Mapping + +import evaluate +import numpy as np +from datasets import Dataset + +from genbench import Task +from genbench.api import EvaluationResult, TaskType +from genbench.utils.logging import get_logger + + +logger = get_logger(__name__) + + +class EuroparlDbcaSplitsComdiv1De(Task): + """This task evaluates how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms). + """ + + def evaluate_predictions( + self, + *, + predictions: List[Mapping[str, Any]] = None, + gold: Dataset = None, + ) -> EvaluationResult: + result = OrderedDict() + for metric_config in self.config.evaluation_metrics: + hf_id = metric_config.hf_id + if isinstance(hf_id, str): + hf_id = [hf_id] + + metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) + + refs_lst = [g["target"] for g in gold] + preds_lst = [pred["target"] for pred in predictions] + + ref_type = type(refs_lst[0]) + pred_type = type(preds_lst[0]) + if pred_type != ref_type: + if self.config.task_type != TaskType.MULTIPLE_CHOICE: + raise ValueError( + f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " + ) + # Convert predictions to the same type as the references + if pred_type == str and ref_type == int: + logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") + converted_preds = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + preds_lst = converted_preds + elif pred_type == int and ref_type == str: + logger.warning("Predictions are ints, but references are strings. Converting references to ints.") + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_refs.append(ref["target_options"].index(ref["target"])) + refs_lst = converted_refs + else: + if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: + # Convert both predictions and references to int + logger.warning( + "Predictions and references have the same type, but it is not int. Converting both to int." + ) + converted_preds = [] + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + converted_refs.append(ref["target_options"].index(ref["target"])) + preds_lst = converted_preds + refs_lst = converted_refs + + extra_kwargs = metric_config.compute_extra_kwargs or {} + output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) + + if output is None: + raise ValueError( + f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." + ) + + # Update output keys to include the metric id + metric_id = "_".join(hf_id) + output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} + + result.update(output) + + return result + + def chernoff_coef(self, vec1, vec2, alpha): + """ + The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) + = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) + distributions P and Q. The alpha parameter determines if we want to + measure whether Q includes elements that are not in P. + """ + if alpha < 0 or alpha > 1: + raise ValueError("alpha must be in [0,1]") + # use log to avoid underflow + return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) + + def normalize_vector(self, vector): + """Normalize a vector to have sum 1.""" + return np.nan_to_num(np.divide(vector, np.sum(vector))) + + def divergence(self, vec1, vec2, alpha): + """ + Calculate divergence between two vectors. + Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. + Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. + """ + return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/__init__.py b/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/config.jsonnet b/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/config.jsonnet new file mode 100644 index 0000000..f6be560 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/config.jsonnet @@ -0,0 +1,44 @@ +{ + name: 'Europarl DBCA splits (comdiv1_el)', + + + description: 'This task aims to measure how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms).', + + keywords: [ + 'translation', + 'dependency relations', + ], + + authors: [ + 'Anssi Moisio', + ], + + task_type: 'free_form', + + data_source: { + type: 'hf', + hf_id: ['Anssi/europarl_dbca_splits', 'comdiv1.0_en_el'], + git_commit_sha: '0dcb7abe8e18aa520cbfcbe9141b916c684912fc' + }, + + evaluation_metrics: [ + { + hf_id: 'chrf', + git_commit_sha: '4b119256e85de9130aa84d87247381c5acb29bc1', + best_score: 100.0, + } + ], + + has_validation_set: false, + has_train_set: true, + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/doc.md b/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/doc.md new file mode 100644 index 0000000..90b6a6b --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/doc.md @@ -0,0 +1,3 @@ +# Europarl DBCA splits (comdiv1_el) + +see ../doc.md diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/task.py new file mode 100644 index 0000000..1db49a0 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/task.py @@ -0,0 +1,116 @@ +from collections import OrderedDict +from typing import Any, List, Mapping + +import evaluate +import numpy as np +from datasets import Dataset + +from genbench import Task +from genbench.api import EvaluationResult, TaskType +from genbench.utils.logging import get_logger + + +logger = get_logger(__name__) + + +class EuroparlDbcaSplitsComdiv1El(Task): + """This task evaluates how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms). + """ + + def evaluate_predictions( + self, + *, + predictions: List[Mapping[str, Any]] = None, + gold: Dataset = None, + ) -> EvaluationResult: + result = OrderedDict() + for metric_config in self.config.evaluation_metrics: + hf_id = metric_config.hf_id + if isinstance(hf_id, str): + hf_id = [hf_id] + + metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) + + refs_lst = [g["target"] for g in gold] + preds_lst = [pred["target"] for pred in predictions] + + ref_type = type(refs_lst[0]) + pred_type = type(preds_lst[0]) + if pred_type != ref_type: + if self.config.task_type != TaskType.MULTIPLE_CHOICE: + raise ValueError( + f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " + ) + # Convert predictions to the same type as the references + if pred_type == str and ref_type == int: + logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") + converted_preds = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + preds_lst = converted_preds + elif pred_type == int and ref_type == str: + logger.warning("Predictions are ints, but references are strings. Converting references to ints.") + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_refs.append(ref["target_options"].index(ref["target"])) + refs_lst = converted_refs + else: + if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: + # Convert both predictions and references to int + logger.warning( + "Predictions and references have the same type, but it is not int. Converting both to int." + ) + converted_preds = [] + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + converted_refs.append(ref["target_options"].index(ref["target"])) + preds_lst = converted_preds + refs_lst = converted_refs + + extra_kwargs = metric_config.compute_extra_kwargs or {} + output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) + + if output is None: + raise ValueError( + f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." + ) + + # Update output keys to include the metric id + metric_id = "_".join(hf_id) + output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} + + result.update(output) + + return result + + def chernoff_coef(self, vec1, vec2, alpha): + """ + The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) + = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) + distributions P and Q. The alpha parameter determines if we want to + measure whether Q includes elements that are not in P. + """ + if alpha < 0 or alpha > 1: + raise ValueError("alpha must be in [0,1]") + # use log to avoid underflow + return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) + + def normalize_vector(self, vector): + """Normalize a vector to have sum 1.""" + return np.nan_to_num(np.divide(vector, np.sum(vector))) + + def divergence(self, vec1, vec2, alpha): + """ + Calculate divergence between two vectors. + Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. + Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. + """ + return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/__init__.py b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/config.jsonnet b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/config.jsonnet new file mode 100644 index 0000000..76976df --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/config.jsonnet @@ -0,0 +1,43 @@ +{ + name: 'Europarl DBCA splits (comdiv1_fi)', + + description: 'This task aims to measure how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms).', + + keywords: [ + 'translation', + 'dependency relations', + ], + + authors: [ + 'Anssi Moisio', + ], + + task_type: 'free_form', + + data_source: { + type: 'hf', + hf_id: ['Anssi/europarl_dbca_splits', 'comdiv1.0_en_fi'], + git_commit_sha: '0dcb7abe8e18aa520cbfcbe9141b916c684912fc' + }, + + evaluation_metrics: [ + { + hf_id: 'chrf', + git_commit_sha: '4b119256e85de9130aa84d87247381c5acb29bc1', + best_score: 100.0, + } + ], + + has_validation_set: false, + has_train_set: true, + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/doc.md b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/doc.md new file mode 100644 index 0000000..0c5f258 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/doc.md @@ -0,0 +1,3 @@ +# Europarl DBCA splits (comdiv1_fi) + +see ../doc.md diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/task.py new file mode 100644 index 0000000..3e7b7f0 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/task.py @@ -0,0 +1,116 @@ +from collections import OrderedDict +from typing import Any, List, Mapping + +import evaluate +import numpy as np +from datasets import Dataset + +from genbench import Task +from genbench.api import EvaluationResult, TaskType +from genbench.utils.logging import get_logger + + +logger = get_logger(__name__) + + +class EuroparlDbcaSplitsComdiv1Fi(Task): + """This task evaluates how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms). + """ + + def evaluate_predictions( + self, + *, + predictions: List[Mapping[str, Any]] = None, + gold: Dataset = None, + ) -> EvaluationResult: + result = OrderedDict() + for metric_config in self.config.evaluation_metrics: + hf_id = metric_config.hf_id + if isinstance(hf_id, str): + hf_id = [hf_id] + + metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) + + refs_lst = [g["target"] for g in gold] + preds_lst = [pred["target"] for pred in predictions] + + ref_type = type(refs_lst[0]) + pred_type = type(preds_lst[0]) + if pred_type != ref_type: + if self.config.task_type != TaskType.MULTIPLE_CHOICE: + raise ValueError( + f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " + ) + # Convert predictions to the same type as the references + if pred_type == str and ref_type == int: + logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") + converted_preds = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + preds_lst = converted_preds + elif pred_type == int and ref_type == str: + logger.warning("Predictions are ints, but references are strings. Converting references to ints.") + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_refs.append(ref["target_options"].index(ref["target"])) + refs_lst = converted_refs + else: + if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: + # Convert both predictions and references to int + logger.warning( + "Predictions and references have the same type, but it is not int. Converting both to int." + ) + converted_preds = [] + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + converted_refs.append(ref["target_options"].index(ref["target"])) + preds_lst = converted_preds + refs_lst = converted_refs + + extra_kwargs = metric_config.compute_extra_kwargs or {} + output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) + + if output is None: + raise ValueError( + f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." + ) + + # Update output keys to include the metric id + metric_id = "_".join(hf_id) + output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} + + result.update(output) + + return result + + def chernoff_coef(self, vec1, vec2, alpha): + """ + The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) + = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) + distributions P and Q. The alpha parameter determines if we want to + measure whether Q includes elements that are not in P. + """ + if alpha < 0 or alpha > 1: + raise ValueError("alpha must be in [0,1]") + # use log to avoid underflow + return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) + + def normalize_vector(self, vector): + """Normalize a vector to have sum 1.""" + return np.nan_to_num(np.divide(vector, np.sum(vector))) + + def divergence(self, vec1, vec2, alpha): + """ + Calculate divergence between two vectors. + Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. + Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. + """ + return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/__init__.py b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/config.jsonnet b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/config.jsonnet new file mode 100644 index 0000000..6d095f4 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/config.jsonnet @@ -0,0 +1,43 @@ +{ + name: 'Europarl DBCA splits (comdiv1_fr)', + + description: 'This task aims to measure how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms).', + + keywords: [ + 'translation', + 'dependency relations', + ], + + authors: [ + 'Anssi Moisio', + ], + + task_type: 'free_form', + + data_source: { + type: 'hf', + hf_id: ['Anssi/europarl_dbca_splits', 'comdiv1.0_en_fr'], + git_commit_sha: '0dcb7abe8e18aa520cbfcbe9141b916c684912fc' + }, + + evaluation_metrics: [ + { + hf_id: 'chrf', + git_commit_sha: '4b119256e85de9130aa84d87247381c5acb29bc1', + best_score: 100.0, + } + ], + + has_validation_set: false, + has_train_set: true, + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/doc.md b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/doc.md new file mode 100644 index 0000000..50a2694 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/doc.md @@ -0,0 +1,19 @@ +# Europarl DBCA splits (comdiv1_fr) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Europarl DBCA splits (comdiv1_fr).* + +## Examples +*Give some examples of the Europarl DBCA splits (comdiv1_fr).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Europarl DBCA splits (comdiv1_fr).* + +## Limitations and Bias +*Note any known limitations or biases that the Europarl DBCA splits (comdiv1_fr) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/task.py new file mode 100644 index 0000000..9358e5f --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/task.py @@ -0,0 +1,116 @@ +from collections import OrderedDict +from typing import Any, List, Mapping + +import evaluate +import numpy as np +from datasets import Dataset + +from genbench import Task +from genbench.api import EvaluationResult, TaskType +from genbench.utils.logging import get_logger + + +logger = get_logger(__name__) + + +class EuroparlDbcaSplitsComdiv1Fr(Task): + """This task evaluates how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms). + """ + + def evaluate_predictions( + self, + *, + predictions: List[Mapping[str, Any]] = None, + gold: Dataset = None, + ) -> EvaluationResult: + result = OrderedDict() + for metric_config in self.config.evaluation_metrics: + hf_id = metric_config.hf_id + if isinstance(hf_id, str): + hf_id = [hf_id] + + metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) + + refs_lst = [g["target"] for g in gold] + preds_lst = [pred["target"] for pred in predictions] + + ref_type = type(refs_lst[0]) + pred_type = type(preds_lst[0]) + if pred_type != ref_type: + if self.config.task_type != TaskType.MULTIPLE_CHOICE: + raise ValueError( + f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " + ) + # Convert predictions to the same type as the references + if pred_type == str and ref_type == int: + logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") + converted_preds = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + preds_lst = converted_preds + elif pred_type == int and ref_type == str: + logger.warning("Predictions are ints, but references are strings. Converting references to ints.") + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_refs.append(ref["target_options"].index(ref["target"])) + refs_lst = converted_refs + else: + if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: + # Convert both predictions and references to int + logger.warning( + "Predictions and references have the same type, but it is not int. Converting both to int." + ) + converted_preds = [] + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + converted_refs.append(ref["target_options"].index(ref["target"])) + preds_lst = converted_preds + refs_lst = converted_refs + + extra_kwargs = metric_config.compute_extra_kwargs or {} + output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) + + if output is None: + raise ValueError( + f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." + ) + + # Update output keys to include the metric id + metric_id = "_".join(hf_id) + output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} + + result.update(output) + + return result + + def chernoff_coef(self, vec1, vec2, alpha): + """ + The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) + = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) + distributions P and Q. The alpha parameter determines if we want to + measure whether Q includes elements that are not in P. + """ + if alpha < 0 or alpha > 1: + raise ValueError("alpha must be in [0,1]") + # use log to avoid underflow + return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) + + def normalize_vector(self, vector): + """Normalize a vector to have sum 1.""" + return np.nan_to_num(np.divide(vector, np.sum(vector))) + + def divergence(self, vec1, vec2, alpha): + """ + Calculate divergence between two vectors. + Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. + Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. + """ + return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) diff --git a/src/genbench/tasks/europarl_dbca_splits/config.jsonnet b/src/genbench/tasks/europarl_dbca_splits/config.jsonnet new file mode 100644 index 0000000..9b01c57 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/config.jsonnet @@ -0,0 +1,28 @@ +{ + name: 'Divergent DepRel Distributions', + + description: 'This task aims to measure how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms).', + + keywords: [ + 'translation', + ], + + authors: [ + 'Anssi Moisio', + ], + + subtasks_order: [ + 'comdiv0_de', + 'comdiv1_de', + 'comdiv0_fr', + 'comdiv1_fr', + 'comdiv0_el', + 'comdiv1_el', + 'comdiv0_fi', + 'comdiv1_fi', + ], +} diff --git a/src/genbench/tasks/europarl_dbca_splits/doc.md b/src/genbench/tasks/europarl_dbca_splits/doc.md new file mode 100644 index 0000000..879242a --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/doc.md @@ -0,0 +1,62 @@ +# Train-test data splits of the Europarl NMT corpus with divergent distributions of dependency relations +## Abstract +Compositional generalisation (CG), in NLP and in machine learning generally, has been assessed mostly using artificial datasets. It is important to develop benchmarks to assess CG also in real-world natural language tasks in order to understand the abilities and limitations of systems that are deployed in the wild. In our GenBench Collaborative Benchmarking Task submission, we utilise the distribution-based compositionality assessment (DBCA) framework to split the Europarl translation corpus into a training and test set in a way that translating the test set requires compositional generalisation capacity. Specifically, the training and test sets have divergent distributions of dependency relations, testing the NMT system's capacity to translate dependencies that they have not been trained on. + + +## Examples +The task is simply sentence-level translation, e.g.: +``` +"input": "If the House agrees, I shall do as Mr Evans has suggested.", "target": "Jos parlamentin jäsenet kannattavat sitä, teen niin kuin jäsen Evans ehdotti." +``` + + +## Usage +To use the provided maximum-compound-divergence train-test split for a target language (German=de, French=fr, Greek=el, Finnish=fi), load the data, train a model on the training subset, and evaluate the model's predictions on the test subset +``` +from genbench import load_task +from genbench.api import PreparationStrategy + +# Load the task +task = load_task("europarl_dbca_splits") +ds = task.comdiv1_de.get_prepared_datasets(PreparationStrategy.FINETUNING) + +# Evaluate predictions +preds = ... +print(task.comdiv1_de.evaluate_predictions( + predictions=preds, + gold=ds['test'], + ) + ) +``` +To compare a model's capacity to generalise, we assess how much the translation accuracy decreases when the compound divergence between train and test sets increases. We keep atom distributions the same between train and test sets to make generalisation possible in principle. This means we should evaluate each model on both low- and high-compound-divergence data splits. To compute the generalisation score as described in the accompanying paper, train two systems on the splits with compound divergence values 0 and 1 (e.g. subtasks "comdiv0_de" and "comdiv1_de"), and take the ratio of the chrF2++ scores. + +#### Using your other data sets: +To compute the atom and compound divergences for any pair of training (pre-training, training and/or fine-tuning) and test data sets, use method `EuroparlDbcaSplitsComdiv0De.divergence`. To create the atom and compound distributions of the training and test sets, the frequencies of each atom and compound in each set need to be first counted. The vectors that represent the atom and compound distributions of the train/test sets are inputted to the method to calculate the divergences: +``` +import numpy as np +# alpha is 0.5 for atom divergence and 0.1 for compound divergence +train_set_atom_distribution = np.array([[2,4,10]]) +test_set_atom_distribution = np.array([[1,2,5]]) +atom_divergence = task.comdiv1_de.divergence(train_set_atom_distribution, + test_set_atom_distribution, + 0.5) +# atom_divergence = 0.0 + +train_set_compound_distribution = np.array([[2,0,6]]) +test_set_compound_distribution = np.array([[0,5,5]]) +compound_divergence = task.comdiv1_de.divergence(train_set_compound_distribution, + test_set_compound_distribution, + 0.1) +# compound_divergence = 0.4793101280037947 +``` +Each element in the distribution vectors represents the frequency of one type of atom/compound. + + +## Data Source +The original data source is `https://opus.nlpl.eu/Europarl.php` + +## Limitations and Bias +Our goal was to create a benchmark that tests generalisation to novel dependency relations in a comprehensive way, not selecting some specific types of dependency relations and leaving out other types. However, memory requirements of the data splitting algorithm did not permit us to use all of the atoms and compounds in the distribution divergence calculations, so we opted to leave out the most frequent and the most infrequent lemmas, and the dependency relations that include them, which probably affects the results. + +## GenBench Eval card +![GenBench Eval Card](eval_card.png) diff --git a/src/genbench/tasks/europarl_dbca_splits/eval_card.png b/src/genbench/tasks/europarl_dbca_splits/eval_card.png new file mode 100644 index 0000000000000000000000000000000000000000..6f7cd9588f976583c740e9d07ef98d1a9f3590c5 GIT binary patch literal 163572 zcmafaV|-*?vu|cH6HIK|M#r4k&cwEDPOOgYiEVpg+n(4q`(~c!ocG-O;ojRHdjEFU z+I!V1tg3(2>M(g(F}QEo-@w4Y;3UL_6~Vy1Xo7)#mV|=%_~qtV687Wr#Zgc~8S3NX z1!WZaagXIBqUNMzYvSap?_dmOYGZ3{Oy_9mU~FvTXlCnl0p7(221W=bAuOQmmT|i7 z>ZUcH`gnE0BBO+Nw_zpGxa)6iWu-+qFo)d(d_)UVa zJ7FrKrW@{|l)mhNEzU$#>7wJ{A;gFTXB5+}d-%m2pWlb)Va;(e^Q2|3eQ-9>_5+K^N!jy#mB__ z95+dC1fmh5eycH`CCB$TBsz8BCXYV?vi{E;+ggZ`q9x_!y8}43Mgu^s*`&J9W@)0n zzWf0J0kqn!w}I1G|HF^3@XNzRQAtVI^z<~!$H>0^etoudENpCp;W#q0G#629s{b%+ zU%6Cc2nin&FgG`ck`(`?<$q`Vi{^jE5y=ukH~xor!3;rUE2RGnB03mZE%V>@Rh{PTc72W z)Ll^==&I*EiHrDm@fi~T^0ZN)cXGn>ayn|oi~9Tb_lSWhmdvoJh_7M`pBLIe*eVS4 zq|?_fmW!B{>m!bG{DT84`aREmMJATGZ0Tqb(6r_^&D&{Z>UA6lt5oZCR0)2SIFw^* z<@-%g$C!bT*h{y!8Vj8cKLwPq6KSe;l1}IiWnI=drz=#jxLPZJ8+2RvM1+RTTsq_F zt*`K3H_@F1?nH20)Z#Sn1;eBwff^B-{~FFaym+<9hd|wu%O#d~C(|bVcNq`!>VGUw zFKv;1rqmw1v@tMLp?42b&&W3P)$EW{ZjSfneyFk3rPz>9u|xeWtk_K}IXXJz62Ms# zbIP)4em0-m(|hZ5%VZ%@RLvOCha&3yxgKw6ctQ1HhTzL#(f(b9)dXGc0b>3ME^1C zzP8<=QiJ})ALU@7Y**X1-mqV+)XK5-ZTSOY3C~#PSy)tBDApT<1{alul(M6^ zt5W>8M6q9-ev4iM$+R49?r6uktlOLu)NEOC2t1Qk@*^Jqh*jkr2X-eh=BR~Ur9hKI z@|CEih_i_AO0(RrD}Lg#WWT&bkuj=l$xKyE%4qhY6{Ka6JsiShK+%w}n`!zh z&c{JV`rBQ!4*^f`o-0zDCMzZF^uZp!Mb5F~=2|OrA5kk?{cH_p8^B#oP(|J0w3Iu8 zc%KI2v;9LlN)aEgDebO~4J`}lV#;$wMuv)k7UQL{>K@+b|HX?0Hbl0zohEZEm%8*i z&W`hxLu+^!15H68i^vcC!~V7j5^n$T30cUx$c&LxH7oEr=eezUG4s;O9DN%y0K?GW zdy=6Ptp6G7oU@2T22%vH7%!e_?)qR}^fdpJ)TAC_xSUB42XmQ!cSuYtTT^-8h%KRzR1~UWjbWK4?xCfHzUH=2cd6vM<|TKJbn3s# zdZ7K+U5moVN`NEbI`Ftw`eLqucB*s@k&$C%BgUdVnojh8nd1yxOqlRb1q>Zl@B<=! z9EG3|SmZb5#cqZq1~kCRp^a3v`yzi}WDYcDdPpKn&CUpqcf;q7aUI=_MJb14=tx}k z(8w8{d`t>uUm2qp@%OsYOj(*C^3(}Fci;GD5wfJJq)7i~!Mic<6}v;Bkw72me%;1SFEv3w_Z3+3C`}=+7?+%u}pz zXP0J_@z8b4z-5*K`~pT;a3L&8A(_`mcZ2k#-aXV2dCK1`&f2IJ%nxD9R($)8L5k$? z7^?AKW%hgPNT?DjQDzOy()^gzoB9p-QfSr6*~%$^B~-B5LJ{U>t$sa$%O9@ZpwBxEl#|R!@Uwqx zV}*qsOQBQQhPRccg5?#J=GLl0E1U1plqvtmf0Ts&^&fMii3}ML5v(A{)e`)LtEPGR z#kt>W;mX&Uvpiuq0xP9BNAvM1KiC4+I5wsciG?B%zdc?(1!Ena7}^a@x=9Z*YT8j0 z#b+kU1XF|seqo>BIKe@9pcbY4W7^!58lb7W-w)Sjjy3Ct`M!mpW?80i6>n^Pm2Yek z8;{PP3MEUy!6xi$OOUsNL>vnhYxZS4Nz&&?dZd;@ML8C%H{cESYyI%|(65Y)u$gLO zfh0_+z%eJSkoYW2qJiKLOw1^QMvdwe*!q;<;Y?`{69^kLQbw+x%S%LXaB$cce_^x&*^7n61bJl!)F{9SLDxQ(_kBwSs4zkE4NyQpH*vIo|`tS%gVZ$qm9O8tl+-V0OZS zKLg!6VBziojL3w!>UN$p!($_)bYx+|4=qe@jekv-KK@@5)*xf5b3osFL71tpNEdHT zURe>v-dBlLiuGFiQo3S?(lAbvGJ_$nkr9mbW^JRC6XbQvPVnbWRabwuQ)M8Of){`% z=N~Dq7r2$9?}dbu75BAVqa&Q`M}PhxH#z?4oxpJ;&mfB2#jjZmLR6miH^g<;sw=k> z7po9(urRdp*8R6NH}$X3V2|eOH_Xxe*Pk2mhy=GVA@pd}M(zm2`H`){;1=rcN(S8w zJTYSBW3H;%rbqjcLwXa3+aoHsS~aVTTzYn0{CVd(p1=5d8DMyKLj})A%_a&D$FXWT z_7)hqR34-bW|>;nA!u6gk%_eAbC7@L_g?3Pg@Dz1x@oq%!=~r$^3O0Dj3pGUZcB=k zdrK`sfM>#oMPef7WB1Bb?s{glUWMPJPnD?B>P~Fh80dG8ClCqds6Ugy_i&?G&q2&N z?DKImVL0$^Qk9QGb)Ac4x`o;au%5eE8yc-T|K&18NX43Yduwzm8Z+I^7yQbUKok8$ zqf+Ow=OsCVVvifvEPUB`wtuww>KDnYFWAkP_Sw;bQ)<%{$M_2V*u`c$Q3mqpTbdn* zjBv2WA^tagDH_9NGQc2;2K>_a(!98Y?&jy% zCJHPD1Z$)4r1E>57UyS>92A<1fw_S@i|)i8OH!i?rtYs{$xF+IF`TAo1kLXnDk{c1 zji1dPTOY2Dm7;If1~A-kc#|XNtYgz1Bz3j-XfVUn6CzvMuIf(3|Xy1dY+t6WX5=rt5fw z-#L>U+GCAr+EEcaFa1d1{N$a{dA#U$T@_tZ=>d>)b)8;!L?LKB5|rGg5Y)a08tm)d zDbyw-%zEh2oMdfCL_?77Gr#nVHoGWH7?F%Eq0A0Rrq5J1qkcQk?tx4Q;pJwl^RcDa zGLMRw=>@u+?k9G=B^I%{AWG5g_*>d;7t)}|VAy^_t-12SZsHv@jP4i;?rK!jZ3|Dg z>Bxc^ovlv)WJPMM>M$)sK-OTxU)5C`M&9+g0Ohu^dZ)pJjdiKSW7G6;u7@~oJg@%L zBWm+~o8k1L%JfDmT3=RnPbnEa6MQ&4=x;ii{7-TkIy;^*Twbc4fLC1Td_kyDow?FV zj(speo;Tz?7X#nBDJY|akX96q1*i*6Z=5m5B=UHuI5;xZG0+S_@Z9=F-}-7npPC){ zCQd{X))LWtI%5CXPzT?)BWO|ABZNDy3GaLqt2Y}ALs!;G@_|9`FJD{M&MiWS@3cQ{tpc~~e?zOS8U3K!>Uqac zOV7^j-W=%8tg5MLPU!j3q6U_}DzqVu;0}v&U}>EGb?Ud39C6;SxF8s*LQp3iy`qC@ z4F^dW$1EY_gUgKjE4YwbwIALj{{+g3CYGxSz;)Wf#trneix=jRQr%LQ(qRQRJq5*! z&?&m;x9HYmFnBktsUC+C0u3{fU^S9#5}%qRo`2<>W?_hkmz(E;;Q7te;fTW4Kz(2f z#3_+)Jds~IHSA*>Go{C0;fXNgZjVI9oFx9&<*av^Y5peWpRPpeQhsm+ScO1`PV1^l zDH-B)=6QIV!LrWJvh9-=)dOb|ntW3iI^a$>IwCqM_+YK_G0@^P698mW*AS*UOz!<; z`Rczht@S`zi_4B?r`8A>X-US{lhFN2@*3S4EkPXP-<@Li2R!Z<-)0E}^;aHcFo?u~yF=3+V{n8p;S?JyVtikKE!?9W~ zYc&f6MEZu)$DlSI2P`4tLQzQvZ(3!&lB1LEJLt#pV(0!VhtMYC?td~x>FNVOTPg6m z=L_x6bL(wAZ}7+fByx&PfgrBpN^6SKiK6myu!pc?Zu)?nBohtzhAjY?7HRVuX>(!A z+K2r(P{iz}s4cF}jO7%Xh|XO_WYgQHk|m6%Nx9Dm$7$)V5@A35m__l)BckT|ZYjhI z*K3}jGM>lt6nTfY{G+_}O80SXJ$rjL``wh!ZC=r#0;TRsnB-%Bg-SE&F-Qek3Fis- z5DiTJ;jKHe4tN zviCQQp`$D`_hzO#JCR5#bGvx|gUlrVMP?IF0CVFX36mKDrSo6D9F>>s$O~Z*fqx%# zEe;lX3!jTqG%;$AkL~nQ6G^Z@99H%eGCcf*X=OF&J_M59eTX2k6g` z7Eu41cvDM(d=czvZeD8a>!??o9NF!*NQ8a7GIc|p!d(P5IF0s4dN{{wgP1x(krDFw z$ckTBXdK_gW^-g;24}gkDhhsctC9#JqxGGe!q{t0OxKiQx|fbkvs$Bwdw_f_iKG0! zmsfcJ>*ir*!#fvB0@uZpcz726B2Cb~&{g*@-UuP#{EL!90c4$lBO?y?s*8X*bkYS) zF=uTO$I|d(N!(wJm2laB=mKLh(vG}EBK?5?{ekF@AZo~#%9LG94=5%XB>{#IfARl$ zl4%9XbTpp)WpMAFaDHB}QN=X3AbjShfu2l9)Vo_yP!iSZLjhPAGj@5N=FY!`66i|K ziZZcqIXAK$C-N8&q%4c*OMtDtuC)*8H5*I~dKzO49mxG6C?-gkBCRq;f$3soa12Eo zU`8uVi1{Ths>}Z)lQ!Yp5-0yhO`QLwCQKa8IvPrbRVimv-n?hb5Q}5;6ipEu4c^F1 zB*hU)s!``o=O43->8Z$C3WpGfgZs~aBKMf^a>YD2Qy9~wDc~n?8(L*qP)W~5a^~P9Nz@)8&qr7d_o6Sgn*_KISw{g3cY0s`1 z(n*^VQ%dX*3S0C8T^}Y4x@<9f6=rws$4&ustOUc|AS>_rHs4ZyI7G{tMG!nTryX)R`huk6fg`HMv5w(J^6D8Rw(n-%J;i0yweKH{LWEP{eCTK$HJ&EW~hfB zbyr7Smy5|3m{1xLbm}87d>)WZo3-KUoG5-t&=%K|+!)v#eT^6k*SCcbQGo6l=#=WV z3{sD^6!O&y9Hx461YpmUvq4SoW(ism&8O%CSXXxBzatp5!TrqJCNtWbVI@t+_S;iY&e%m)&6l3pvsRp9 zrV|uP_Y)nRB$p98DkA4_Iap#(7h;EjoG@yQgfA%>^xuSLnR0D2i~Ee2?X5r=y)QU- z?nk9%1tpH5m#b?HZqXsHaR$f~ZN0+1`mctDxFF34q^L)%8yQ$`=H)o?amqrL2Wz=6 z^nYEf?%%pv-;tfaENbKF@)&j1FAnqVVQxkiK~koWo?0xUt{Ws_Fw*2CfTt{3iwzAq zG3>*`)F!+1smvKuD4fgm;_SE`tn`B0b!lk|Z(BkMr%Htiw>{g-E?j`)#`e;+5oa`sy+D;mYk@Hf)x96B2~Z zPy5Fv9Sv9ooQwkf_Ep+#`4};$6@npF%#I+X(X~>85+*f&16`kdg+8_!MAN8$^Bq?S za|OX2A1}i^pxYo+V6NFTqhF0eI|&Ie@(EhRK#BaPguclh?Z{|tb^3238AFpFUjb6H z8{&n9AJB>LQ#L~Oq!=M8xe0D6DK-f<(QdZdKFpV}K5|P}^jf{)Ag%i56x5=~@0M^c zuIKrklX8r?IVW5mv^~vLApvdo4!;WHL&t+m2NYGbn}J{5@AK~I=!btmdlOSILTTL(82cK zT0g^lC>cbU>)mj=(ik?b>00A>;^H#jz{GLf&%Gq!D1{;uxgK~LmY-iF4U6~L3K~8) zpu?%*7I~O2JAy&IdpjnG8bwIkDd19^A8)*2 z&gGGjTWRRFneihXB@1cSLV&jh#%>9;uCAD$;gMK;=H(dC^Tgw3@R;<$lG)J}jCn+p-%JFiSZX@?5GnXJ0pBabJYY&t*GQqDL)R3A?LmaBTew*pk&95wvNA;9i8cK#{25&Y7s9dVmRFGCD&3qO3sPFFwd-x`5mhmsBkX> zOl7?)Q9b1FobBOfOc@H8y$$d*1<`L7yh3|gB9=OwQQGdC6kd9Cmu)dArBll7o8uhS zo$dtD89IL6Nb(HAs?UTyLYh_a4&^YRkjp^>cthArrZ?pSD!D1ua>a2TX4adN)96vy zT`W6_PwqrLqxKo=|i8Kt6X7VcmEB$P@N&isa ze~0f?A3TE2ug{G=%KS4cg#uR1CC7(xD`lhy3J;bbNJ+zE;1D1n0n}^Sw(YrhNMHY& zUc1=a^I{h{KYzC8MMv`X_R%K3#N@v6|e$SFt&8KUdC#T3eyIK ze_!!;$_rL|dwa}azseH|TGOwg{U3#4Q4bF;u$GpVqOPF-8FvuV-p=F2N8r;13fwDa zL`tpEsBr7h*`LUi^6`1cGSC07#eHOAEj^5|8L||^!^0G`6#u}2f7kZ^X`UIa>7S zEg`g4Pfnby-@4aIg|a|CIBFPGPSS=7FmgRSIZjMIR-L#ii7!anbAB3Brz z!dC7bw$ciIYJ1YH=E2|9_V8INsVnb7^>t^8l~R^`3dxUGeN0K6E57*qu-;y zVBI{2wI~jWJ7n}jvIZf+FmbU+_8k$?JXtl0gw~0{&DS|=l7N%+0)c~!36YPxUvL*n z7~;?*7efOFun-0rt@_Jb_mEB8*DhZ9L`0xr;Q=P$6ukX189(SBw@+jAM%DHEts3*5 zTx##ryNsf<`mdvXW94XN$mwNslvAdL7(*;{j7|_9rqH_L4u_utF%%EFVM{_~r$dPpdk`Y;l7) zn0R*!y6h`jg|Prepoi27f#-^7z2hS?ey+k>kfu>ul_Ks9I7Qstgk@2)4dy( z+mmK+`=S5Tqe=G#;Vii9JFXAfY+t*lsuj|!DtHA104?AZd4@?jXa`IB^opN3eO}rY zL+KvDnl_rlvBFG?-vl!N(kWvm(xL=#Cj@({zPw=lH!at_1lzSwR2-QZxxf4bSU3iz zei0}+uq{gz{h=<81gPD7?s~n70G2nW+xi&r-Mi|ZaOZ_!^4>D@gPt|GlD(4%Hd-!E z8$Prk4im-1wItK~ym=X4bb{5uuuI~qHM$1N-bg|bPdSamK-aM9<~&Oe50^c;i1C3A zYhN)&16 zfyrI(+-ZsoyYV1rc7idE!eV@*$KMx^yR4vzA4c*Gg5j ztCuq#yLbB$O7OfVH2l?BASdq-w3t#R{FSkmj}let{N|qsIiqT;J6dTI_XE-iIt{&_ z!FOG|IGgSX8mq{BE+DzbDUrQ=?xHnW(%srWdAfwt9{iy9v+NtehJjKBwD=?>-+SzL zS=fczIu8O!hzu>ZO8wso-RNlL2g3wj5e<0Ab+Bt=>~w4gZj-=x2gCMH9>PIfmfoJWv*^*xYNo!ZE&AvhfvIz?ssXiMv{BhSsVCC_6r1fM-<4Y0A$B(&&n_0mh=)&WRF zn{`%Y(yYb5V(2TC3uz}NktMc70H3z4QN2V^OzK-E6 zOSsN}jhifC5(a~xxi0j)shHK76;!RtwG%;7U!KiYrIkqCC8(N>Kr{%)+{p2PKRqI6 zssM(6MZ>ifzT_@~0s`fm+VIj%zdG)lWafI%EotLln|jD9cwYo#rSk^)UnkV9BN9z? z4%x8%c!9D2s5euB588)3%T}w=rZ_N*H>js}&JEke0_t|P=L!2t2>m+idI)i^Ydw_! z@TSxeDieG@w<;pgFD`WrtfCo!)F+s48)`;o!nsVqlHra-Sz2Ur-1!vxgQgs@u)sVs zTN0|rEjG((s4a0=?^fr;sD-XDI6U+?46UBm9lj~5dhx)e`b1n)I66sET%u={6Ut08 zQt4e%7y8-*W0x1{gi0;hSP6f+Z7qF7E6-R0uM;F4YpMMaZrh!0P7%`fy;L5*j}~mp z&A$AhMCLwSnc$*&aO{(Fg^)(WR8sH#`lQ+P;-dm6vssLNJRHhYnghg!XxRf zFdz7adH^E*uUgWOwcVXLgTp=pCYE)M_Z9^#vzo)JraV~K-V~n~rIV@LJ_8D~GQ@+= zhF>n^Ss~N#+;{+~sy@;VB*wo6b5XiZN(g9nA~)}#eHR1nR9PH#Pv2NTMcAZkY-!;4MH-e(Epbz+3ewnd|y0g<_fQT!Ak zYRm#JYaW~gUo)pHnySkfgh_8cCt?@HtC~<=NWNp#fhUF7y|-Je;BYURIWD2d2tJx z3ee$VFE?JbW9Fo(Jh(5rCbx0$7piZlpXEmH-J|+tB#IuTN1zQZ^cd^~Oy8D}kt3t2 z0*mhbLJ16w?#|+S78ApKpgK)mHaf^RKma1i43W}oJ!ak|;CJ)SiGf11nXR`3GXc5=Ggif)k@ry*tNjWk~yt#*y+>}C>FK$+Nea)5D= zkaE&bZ>7-)jm`Pw;rWaektQ)hm&-UBuxbL~ow4PoFZxDg(7kTOw_`%FDjUb_AmJ)N%OYv336r@&nG~oc8D1x@%=hdO zjERLu_G>UUCQDSs1z!EiBzK48*6EJ%5bhW93>766(Wo`&njZ1H0G9?g7N#nx5r%w1 zvLDU9VYO`xQ8vbf8fqI&WcjO?LmhI|%eB-IlfR+&Ej*BkZ@(7<5FZmOp`n4!iGR>1 zl*1evM83+rE96+DpvDz&`nc1Pt%|9Exhc;NPU180`iXc+LPC~^lO!a=s4auLZxdSN z;=H3ixVKq_wrFO{YX@H^vgXN`f7npUXH2y+T|}iF%kv5HNl=FhYZ!%-I`R_AyQ3F2 z5{?O5rCF(#a!67XA+VaPDE_n-uO$lwhH+|9N_UdbC&MUdEdTco`MKREH2wUxpoL2P z*ZdewMPZ-Zfy3WWy+Q#%AdVUeviSH!5g{GERCD%y68!U<-ovhHDYf(v$-b00ebV55V>f<-UCvX0%8@^C&G6}8q70B}mfpRi5|Oy;qgk4dNF1!Q%w!(>+#1Srv980Yx=hK4A6J1=pJ9cRUzmh)BnJ%4`DocN3ehg9yH z3GM?qAtIIPYSoEujEF;!Z+yXp|GF%D>bQ7040*m&iyDN4Rbai&&wo)N_% z0Bv}WS-isdZt%&zpCW5zp13>JZ42=~(L89U{6s%2&>uB?z3)yocTuf1Dh!AMhf7f@ zCv{x)XkYXVAy~6JEh`AKcAm4@Y7u+yY4wQAcsfvs8?5^SAxZ3H%q3GEPMk)KB}RNrTh zY8_w<^Tu%kFjRDZ>swv?&J2Bknec?9UFYB~oINS|pz;739|H$z_e`UBc zvhN=GR=g>#J#eRG?itsR2kIg4ZH9<89|@C&liszIXbzl*IZfG(63rmV9iOam@rJ7) z!P0WP3raxE&)St(>(4mhmr{3tpvt03*5M6oDodi9uPA8kq0jQ`Xe=jltizqW|CO*xm`T#YnT4H+C;N(Et zNcZq?v&!1h3eJG+c6X+U!n^kNCJZ{V%r6%{Y!`4$54{f#4y zQVsk=zuyCZiR*p54{H;9dlD&cGUcF~Y&R1rln!2J+;Urr-LRUiX#%CU^30x5{%&h_ zt%3GFB>2v2#ORI{0k`~sH#}K%-&33OR%a@Ac$7l{j|bjfg6uWX#YZS>$To@2%k1WZ zN=**_RR|9XhPPKA{Kqcvv8Ofmr^z-Oz56W%Nm0~IJD9db{=qti^IuYOMS7y-_Chka z*uz`72tFB_3}uAsm9^WrDA(|EA0oc8)8EsIa_UpXen01Jf4sDHt6d28E|0yR!+F~7 zq9u*25ZlWt)t?3 z06}j(vwa6YYWF8VyCeXidMnR3NYDjZFv>4?5ToP8Bko1r`EHo%ihTkq;zgD54jV(= z#WVe8Cs$Z-@_UbM7JulzO8v!Y;VDele%m6NzDUlKXR-S}`|F=DdgQq0<@tnRicS{> zsR6d@F@Wb`gP=u0MjsUiH4fwK_Z)5BX>_J*Rvt#=gjlwfZ?u3O~60^jhy z3c%g5bf_U(zRr?hVC3cTT)Rv>*)im??T_K)K6VdLe?|dX^Qj7~Oz%|Bqo?ln))_pt zjcgy&6M1ivCq9OBCDx6z(!(*y>vpszrewOv51qh9a2*p`x}v)~Z0h$X-xhCoK78)) zs2ANBk52&%J2FvuZ_9VWHjn81ueTN^g|c55NZwYTdKce?+eU@QBUY=m9|+bQePZY) zFZ39Rl6&S98sE--45;|{5v)5hB~|c$981M+?pSp%ab$D#I?CqQ)at#N?2zrEipx+|y`Nzk{OKZ-5ty~|v&S$F9DvnmZWIT_{Ui5{FDnD_t7eiLINP2g; zNW;J%!mqc*?amP@p@-<7)d3y1l(z;#j?D>t?LPEcw^{9-HL9F7oDjetXroxDigytMaSo9LrCbf=>I^|?VM=9`I(46?vh@7xsiB2Lj#4U& ztqT9Da+TLbAs0z`@JaR$UC@w4!)q<~oSd$Wa&PRg@f*$@Xro=3uKl=cens2+_>w^u z;jWC=_le#4?0WAw3FIz)b^%BIQPyRB)me8|PnBdeEw;pW(>TKlFHYO^((&FuK`3#S z!8;CO&@Dnw{83tfplUNITkp8G_;~Zt&>586 zS%JT@R5)<|F1hagq}6bmrCZ{h*{V1qztxyL_t5!BcDK%NtDwD6aZKT=le@3=YJ2Qj zbtkQ|S)ucMPce6W3fpj8hfoT)Zaar}xRQ(i`U+=pv`sU|%dcCMHdr4fl4Eh#HVLY0 z-Qo8wW5b)rCz|-GFyHc~aJT%`C6Vvpn=`1jhC~V|AF}e^*EE;e1QI_xD^)n@vQHe5 znpBmtY1Q<;*PJX19?ms{)0~^M$*7>Fe|EjQ{Zn&?W0CKZ_q@UCPVKu@aeUyclze1S zZ(jEN*5#|Veq)q=ybNl7KK)!2&Y+cF`8LNs_neiYxW)J5t?6ZMyXRZOt1iBBdefWw zTUYdYna}2~lJ*O}f@_xQuYKRs=u$bV0-*hT#|8A=Jc6(ETYZi$tMyZ%Mq)NNP&;(sOUt$HIHj_Q>Ns+x1`Qx-Quo{7>l%0pQ1fNOa{@ zvs~*us%5AOzA8`pH5_*$cxk+q68(Zm&r$$+=me+P_u48?D%>4WqaP}G68L`a@D)gH z-ThF=Itfa4NbvF8&Vx?w2sK|WK9&bYhs$ih$Em}iqTfu&x z$9c6k(&Cy;_^31UxQ*Z~(Bn@K`8@wdxsS7B%Lv!S<{S(hxW#rXX}$}eUBvUBhsCnO zZrQubvmey_P2YdezWsW>DjOfcVM;jmkzQ}fl{RMRkmG$?nVc)CY*T$R_e{t3e*4r= zNGvw4VIS1GeScBbb%rM&Kd#WQ*(NYf)hzG5Rf0W_!D&Kaz&GEyC3f71b~E2|0@AEl zUo83@-}J5KZBwa9%PZu0Zvldp`R>Hx!z(=dv7J}t7D|F+!;(%&Jm=;38?`z>Wpbsw z9kEo#1ghC>&)=L^5p@SPX6T}zUZl0N?2 z()@&FiU{wq-0+0RL;_!NW8Mj>fCHl}ARpj5;i{}B-d}rz>8v#&!V1w1Gb8#Od7?gP z{8~Q-Y^;meD2{9nglT@EX<{CQ6YUEJc(c(|mG6#Nh99jzNS(j*TwrP6$-LjD-bj36 z97Ia)1ddXlmT!KX-Txt0bnwF?Fef1TI@|S?J>8MNKfBWrL5c^AYx)oU!h1vFa|iA_4`=w$Z+f4X$f~P9 z@31zYW9_we2lA|5Z%;syoxU-I-0M9R8*AnMYfCDs8p-X&Bk$(Z#%w+M+HjL?b4qD> zSbb^~7EJNbFcB+L{Bpm!QEQsp`FEhdh&!sO$kHj?Ho+;G`FNK@?+;706u`1WI+|ui z>Qr&`A2&>J#=Ofjw1_H~RTP4tV5uXzhSy)mSk@_bBB;$U(iyk6X zHTy~?QOvBd#kksEz-6WDhCs;Y!&3f0mknNJ#O;2+Pr-TiV?$cMDK0wwYlk;OmLXJ{ zMDtfi<-x?$6}!GMv&~35YXcL)Icd{8Ui2R^ajmw$TcRiXb_*4|$D9iQsFu;R>>qfw zGOU9_(M$!2B2;u`wnnTzS5*}h9LshpBNpE6S=`^P+Eq#SekzVh&PnPxZK(K@9t08l ztn&;GVeh79KiO5&YIiN#Z`FfqDjE%edGye$aP%_|2)}T1n)eb$`^e#lLNTEjTf>yb z)-hl<;lQl*>j}bq^OBS`(eA=|ecQ(QGVJMjhzFPz_JFo$Az@@GXlw6;&9kPQL4z;P7RwDchzp|^ z1!H?$T)bdN&kMfkpvM&&%*D5aNnIt8R?lt!1SlB z3|A(0Rc%p^bpkBP7XRhcl9c<0oru6g?D74Z?9SMTgM0v!qJq8@qds#x?6aXTtDB*i z)8{+6%io|EJ_hsTn~E3yVKhg8efoEE0ns;su`75jF|yBWW0D=8?0|rT$1v|G zmP*5yUjnZ^q-$)Wv9)xQ`~&+gG^&$Bgsk@4H{!D%F0f|JrE#Iro~~5E{Ut_}}4N@s9E=dyH!J(@iP?1z3Hey$RZ`bjGnzGTgdFt9x7i(Hs3~ z!+{Sbo}bCsy&^(v7@^?ium50lj-pqj$gRI2Nk-3E>h(b!;;1YsCfQ9Z)InhNy=L;< zG=R3ka#;8t^In>7#SMI6?(@;sZbq{&yVnxSQ{tWlj0Ov#)uV>QDhxxD`hz_dvQ&BO&X-XVB9g?gSHNMTF(*9n>KpMU`8eQ*>W1j!>MMZ z96f&`@$W8|x}P@r8d^q#r1RA!jHFVDHCB0n!+sqIL(h_Pmqjn}W6KSKDW;_%rkNYc zp=Wp>8*RDq9!*uf6%7@?IQg~79lUk*C)`zLp$NdCS7;^s)tJou%l(_*v5>nAm>Txdv6@k6 z9BqIgyd~7vy3Co-3Gu@Oq-FYL(kbuVIh~$KCxFCkyw}F(bZu_G(BQseogR@C%aKsL zTq9Tm&Blf=e`Au2MDWxBBH++m#tMxry50fQf-Pgi@=!q1l#Kf7qkEA<@3-6YWyuh> z%r+bCi6C1PvS8o*MAfbbhSR9AZMn`@q~R}$Sf35fWoGPSFXcqbx0VbtJg zmMJ{g*!Fvg_p{CO93C3?XJS);ukTIATvfW1!s)9EN0dqJAVuCbNbcH zPULr3=Xq-M6=~A4PX@5}mg;rpNXzD;8Jdmq7f=dseGb><`z4IMoYHn1x|K`ZBdQECTl(@C!ochHTfi|ks#P! zIkB?%?<|S0PIv>Ug9=wfF7J;Nr#APDFT*%gvY#jQ2n=JP1WaM7-mX~&=A#wkvwQpN zG8rptn27V}PJPZ~O5?Yc_m-;BHwW3@FYuONuB!_yOzNGHvABEB={!r=XvT&cVpW{j zeV^7>YxWPcp!E76rFxPcG)cvO?NLVVh)S0wSLY~Ba%TqpNK6pjqIeG@$VJRL+_`<9 z?UQy?_fgebj9V;g2#pquN z?J?QFXw#se$(yGGV=+UWbbVDt_qAVY&<|er0dq)Od9*MHFknN<1B}kx6V66jJ%cH3 zw<69<8SEF|kqC~rrcLLLmcnN<0eGHeUsg0A;#lYATyady!7fy2`V9X`p`una8Q)u| z0-;b61>U<6}W!h7zvFfF8d#nj*_AdA;UrgyY^516xVl#g9 z?1SD2tW=1BuJu>X_bRtf2#yb}z=*PN5BtW>9)|np8hWkCfHG6r%tF!Z1c|#enF}+1 z=&NZ)iMJebK>;;+Pxazl;VH67V{I9>;biHr6^xxHrtK@+f~g>;gz~vc83m7k{`nQ=h9`SCLjI{886C z(*!M#(ZUT*Qa}Y4Oaz1*BbG9~Zjoip*2eT$Nm!ZC?Dn^}P@O=Wlz78#o}F^8Eso!1 zgOHe!CXAl>QJGcsKV5MTE}`hjGE3ofw@Oyg70p;Dpwi+fwL3H7&gf2`uT)zO#8j@! z&})u5L)>?>(y62+)0n=Ihs70)Qc~F%3{Jjhy!fPv`s@+gv|kY6cOIZ$Gnmuxi6b|* zPa152_1=YGGLkFQ6era->F!Ee65(2HO=0m%aF(gPB$=ZSA;AM$eGv{A<*RQ+(Z|~> zDA2oStD{}|f3K|s8I5bvV|Tasa?3#DrHV#@sfuaDuS6s5-?W|S#$=;2RO-}ZBF4OsJTNV~u`Py7lj03t;aSC>Xpali-uKAP! z{L;_i-b^B!M~*l|rs9^ABs^4x1F1RoKnp|bxt~w#wL>GxF7o^Pk{(nb6_RL5=>1T?`H>$9P zs7czUL9z!nshllY&jcl-qjXv!q!T78_|+)T%im|WLR!NzH*^Dc64vZTaow-q6LvA` zlI6?uQX~&|4@KM!gl#y2ew!bXjCW)n3oEJcior5m`h?!FACpJ|Fx zwgAIMqs#si`yI8d0y0(RHmk8xcPsX^^p?nH@@#u8M4EX4JB4l2Gr@7T+;0aaW4(7m zm#g;tm0f}+iAC1ena3jCBuc+nb)=>ZGpfH$5(a<&XJO=R+F~{HPL1JobRt_)(EDU3 zeda5$ZJNES$;ltN^Nki(^w|3SSgoVsRL>yKpA6G%r8_Yb1&Y@RzSL-!;>tT}n#j#$ zHn~{6Ei!d?6etnQ#~9-Zmwts5o>1LHio7!>OJZbjKQKrR%m_S+1%7-fbYl5)+h6pn zG1&ZOZ0d*j;FZX)uh6B+C~*d!C(QzRC$L%-PTe(hRV`h-`Y0m-Xw3LL0kgn~=YXeV zsl^WK{P6UB)yAm4HABT!nz1qzR^Y^y#?vcA7Nog0a)g6j!9%Eym>gx-Oj48x`N%$&0F*y^W&XeF5EN zgbhogv6FSpYKYq@`=r+iL=e||3jW8q-&|teh`A8{w;K2j6@@$XNSwo;hRjS>{cR79 zdrWAgb57}ka76d{W+`p~7a`8n(lQ!!kQtHJs7guPGSovirzD`16AS?Mib)uZR6JX`JDllsL6>7HJ3FF8^~6&_;O6fnlv2b zk3lPk55!*}7B6x<<@?K&%*f^Wb*eHvt+p)=TF|%sl4OL7Q6UYYuL5Hx>$~TPT*s;ltR}t9hV`Ma2|$p>u#gE+Ult(seTAv7|!ht;Md42 zrplyRi17RP`xWfdio8C}p%ljP+z65JiYW|!$*m?Vfnpj;j|u7{!(z8@NSY4cqo$xC zRAKSy-Re6Flk~#Ny~SIfES`WIF-?+J%M_kF(ivj`&`&6~CpKt_kl7W}j;B{FBK5NE zcX2QP$Qjmow28p6Q;!&__4;3z<4N5xFl`vOU}gAZe0^#&ZtwdkGBu>T!!?3~IoM}U z$;LA!)Q)a9dwIW2|A+}E9HH8$udEBsd39%$Cow_9x0$_DpRmWOUZE<=%&_e@Vt8QB zHI`65fI~{{#Tt z(^8nZiNXBF|&0~SaFIC*K8M|V5WZ{gS3YS5THeDV_@(zU{R>jgAc3s z;kQEHsn?*_w)6~_JK0TsaWsewjm>Ud3WW3=K#^CHvj*|Q~x~dhzCzG*9o>Hu; zA^cuz6zkK$;2yJdd*PD2Z&5jQ$XK->scvflcnosIyb27F#z#iD*nU(4f6ogjBNr#w ziw$G9+T-$whg0$H37F3-97|>iMv$t+FBr5ix|4#g$|u{xJvTOB6e9jgU==M(EiJP5 ztz^VlL0wTuPK$J*6qF_;I&JHnSL789jr*F$^XfV6bmNN4^SXSprlUvbBdwL=0E6TU zagQYq$Q!#mnkk-ZD9~!XH#39ode+J-5;nmI?7kf^I|cFX)K=JsAL~zZK}I4uwJNs0 z3=z1?h(_1pK5^hgiU_&G>FJH9#CT(w>^1*WD8xs^Oi%eKeAOYIoZ#WNBR;N(mF55} zs;R)v;A0e7$G8?<);GaetPF9fz&r8p;3!Bj05Gl=3}~QNCPtit8wWOra4? zHOqVW$6fuwVdgOl!zM$fM{!)<%!{*qb)-;LM&ZY4*lUTCDG#MSBY=`#qqfPm*bm}- zL%D_Q(*X&JQ_&zk{+Da3+*ohSVkA?=h~6D+f01I`>Cta=CN$@4pc$rsg9JbW7DA}eo zdLm1(bY7=EnE``ViVC@#*U^$osC9O%#f^=rk{g{z{niy1DA# zh9Q$TYGloP#ux{kVy8U)%#ksLEvhCRhjxM!chKb(A!1#Bw@h8c1%Su=5$W~OZMEd# z8jta_E>2K%V(O$)@9fofq?|VL{hIHwauIHNAEd23{ltmhG))k2|JlNYs^Zu@pMl)9 z<&hmziXYstq8#gtV#n9{W2MwWn;i-GZ-*`oxK5Hh@C4~Z`jXEI6eEpshV73l0z72w zXN@r&rJvt!YyPk7?^crvOqJLcSXgPh6V?*wZV0N}x zqC{87{!YRQ@ai>tJ>=rstul7y*#eNdPFjy7i7dS4$7`FxIQ9%KS@UqgJq>4gAmf)t z?wqps&2Bz}H?SA9?%mKk9XU%ShH71zCNn9UVV*(_U|~ zZC)oxwVn&yzZbrFi~I4`d#6f0Tp4@4v6o7!-#%j+n~Hy#JcQI*-63p$Wj#+V zr01|s*Y>)xaG(`o=#cCIcShsz;#4afGcVt?cA9dcvJ!2%`?W0{V1E{M?1YBJc zHmPXS20{rZ2m*ZI+TXV~d*4KtXSc}|Yf_t8nK%c{UdI(CZA^y*QGU1w(yUHyNh}X# zgfUiOf-T>lb?zq(f?Eq0E^f06Yc}HNn|`1FP4SC&_9dUNZjOaadmL)q^BgBLd;94`ue@e zoqYjc8F(sl4hNZ5I0Qy#X)Ds z)%W+SKg7=^E{ep(vi;k?Ot5Yk6+fsy?$zPNXOJvavqZ0O_vd#4%Cw3^^FyZ==AcVV zm1$>kB~K5UpWT&JF!LP49>s=Qw)oH0TZ8Ks%FX#4L#1W>ANPF1kcf6Mvvbc$JUiw` zeOImY=F?v#DsqgJlw{Kl1#Mg)z1Ji`1Fq{7cYNLIKUuLX-vFst0f0U*%-)1Eyqe}LD4e~1)F`- zw;^awIl|cX42&}0yL4tuY-7BR`zlZekf@k1Z>FFr^Nbvfo zq`|?CXCl3li}^vnm3Lwez4Lj*ND10#&MbxhYRlHaxA3x%Qa4 zWO40h_bvj@_#JcqK`k>n#3SUr15>6!fr<3@V)5`USteNU`{@)XqS1m2v{0!QOtZ4V z-_p6JYI`JtH=Bg)K81Jm5#9DimCIr-bHhJZ8IF6QRWrHnT3+KUQzIR68{(_8j708| z{G$LWJm9M^s^}YiP^D4R-C%H;)z-b=n2;x2wO;QKTha-7(0LD_uEY=?ra-T5DOoMj z<(TMBdlwsku2BJ@u#K{)PCF(%U0Z*UOmFy;;y)oW`37c4;ar1s93ApV3pq{RbHK6TO%)dG#bq8-P%~;u@;KObsBg{}a-p?$ocI1& z96L#!GzCGe*vPGRH;hrb?#y`ZJGYH?Adx2TjPk~qfE!fE1ltmax`0DNNc@o>@@8>v zos2)ZIs>?y>2>`J%Ei7=97tt#;GS@C-UQC~*$!ZDyCrq0l|jXL=uAzI1EUWe!uCp*p8(IE$=#8R@3HmbQc{y`CkDZ|p{*zT+31-}u#fiFSLI-r zAj}&~3a$D?jugjO$m!eXQXKAS`+x?xfi{s)4PpF z2BGytkK1Dh1H+XNC#RXXRDkY1SB4Vk4-xh?k#$VAVVC-{c5$6tR$>MFliaoU+eQ;U z>*(_lO8aa`SDDvD{I=8FBIW=~KP)R5^au@^xQD4Yg*$zF5&4Bz%&1Dv;mHP`!R%H1 zT*CQaN~|CBM|xGdWS#EkP8c*(Jn z$UVTf#2*kE?yHtZOqP8EKhyFd?5e^$GJZq}!0i_mlxKnWx|6|l^orViyG~kyr93Yy zCuU>8^n=^p`ojBwa5m^zb0?IZ`ocSSMkP_L&H5^!;eg#}#V)>jj+}0uEb7yYqQLE6 z>Hy96^7PfW_QSvPdBmfvqE72EhJ4)_obAl)AR@7rIde=j4L4i=;pX*q&##U{sw~~f zr9F|IM=Ap*A}vKzQ%HIszfkhnfTyL*U0zXcVA68u&c}Y=Jaw?m_b3;$9!^spNe$B| zM;9DiyO>B(vA6AeZJ`mactfnE6bVnkpNGh4w|yMCa~u-4?m^YoCTGrH^0Ftlhvmz8 zCi;>M9o^@a$o*94HCd1DuLe0qrg*h>lUs*}S@M@(yh5$+5oy*kryV|bRFJB%aCjIF`vSren1VXF##N;A33*7Z8p+wA&{uGhxm zMPKmK*aZLUJj-X*FeIk#aGTvXG3?_oVcJ*oc9 zwV^8yheuQVgMweU?|-Nh=V(bz_BAMO;h3=G;vilyPt0~BR8p(gU(nr6e_!XH%wp1O z#10S5Ou5{nyl7wKY%_ff;oxS~$5ijzaWi+um2-KDu17p5=**>icw(i($PEyk7h{vhg z{=$mgp)P{9G}OKxxUtT+)w%kEg#uE_H2x|^VeSmJ=D3?9Pb0Qqp>SHCnIuNV#HeQ( zFl!eCcw0J9OEw1Lbt4ttsUq)mk2xt;22|IkA715_r~&HFW?X5=Nyi3=8Eavh23wuq zp7d2&y#j@WIg*urPH>)g+I=U}F3CL~BV>#X1;~j)g10rTL|*pXSp8AI2ApfCN+!w$ z71qUbU@zLaHGjA08WyAHXJ#Ir+A8GuQ@Yh5vnZcv1*W%~5Kr&4V`^DZl>^O*2`N>N z%XYzQDj#Ii9OvSVxlk5c8R_Rf_V13@A(HQRTuvdXY|^}VC2;?KxvN!_9R zh^lN#)_Z;uJ1e*N%ljFQb^P_w$4%o}2H()IzVV?`le;PY`kV>IYu|g?pF}Pi#?cSs#aiVSx6=Z6r3SwetwuuCjl!uR%=JQ;ntMx~v zBbo0Q8BYxin*3@VxLoe?*?uq5>~BnRj}@f;lCQL<&X&&AlSBROQ+vAZ721#qH-Wd@ zGintX-F#&Cx`%$D=OVX~{~(uUBv)I6A@u>cKU*H%=#_GYI*3I+&GjPAsDD`R9uR)Y z`z1yTVxQ?8r*O_~*=EO7O37q2hGx*0iKbgt3G{h~!@^oe*}P=1RMQYF(0%SrPUVlC z9G2CFZ#RybEzTSrh_WzJ>bH+bnft{!7Z+~tFuUlGsDHPfoVYoW(O8~$+hZT(divXD zt=I4N)_%s?D12;>&E{mla2K1-^H3^4c$f71A-7FzL8Rjfc9gXqx!+#n`oI85vsJUw zP&7~4H4Fx~XK>52ad0T{uTI=YFr4v{I4ql^oLtp*TKohbFbG# zwkTGHl1Wk~rKrXmcF{EO_{;y<$#!T3-)m}70yPUCl}V;3~d>lO=q&ZAgti{ z^>O2K4eO)$^Q2kKX=0760~o1w9`=?NNV;bj?Ur2UW4lsN8?K3(i!SyjC*E$dBxB98 zq^z0T#Z<8?X1fyoFN3n!?Wt)l*An#xzw3z8PEiiX*zdNjmNt1l?F*{H`;P8=L7GDq z&zMdZz5ZL9S>|#)DRIu!Op%G(d#06@zP?wo#~U|^!sncs==j+2*Zl8IC)!*2Zy7LA z54NTstXf`oQY-ag&bO1D6Z#TNhG>XaF)W^!FRyR#UZ0D3i824ZxCB<@OJGQ zCrLJQ(x5a>pOOPte`8||*qG9d-?ut%ckiKSCv&by%%)XNV`vWeMVT8|HejyTM}E9S zRo%(*1vt5Hw9b2B$Z-E;!t!L7BolYEkDF?vOpPu7-wjPB`%?~gc zes6V|84i5tl|@+iWP0RnB8J3U=f}sd$Qs!N|6EOn;`M71XWwvW>)jqid5Q5%~h=>}~C8nq48vvWVMYoNjky zGK5|36OfgJA(_@Tg7^&4_Bd3+l2K4D=q-45gkXyE4kic>O1@fvYRkY2E>TdX+CU2$ zq&FI;1Spd_US&*r`r$j!9%y_#A8@x-9L8ROARKPcJ#=OVptB6BO?+u+%i)SCsh)O3~_|c;tB#|BIje>11N9^ z@bHZ7$HF*=Bga#H&nZ_)&+(1z4Sa(+KBa23A~s-180E_b%Luo%|-cl)|(ysSJ2l8qJ&nRdh zZQN-iwHFCgOmx)rSM-oNZm3y0swZ679j8{=8QZE~TQ@1e{+BSY+tY<{NoLKbTtWnh zcRP+S^vuQ4=2B!_Y&%%}7N~kLUx++%g=n5oG<6eMzL4PcUz`>WViGRLN;@i(4qvYm z^yTMRf`JdnsX-4(&gW*lOToe1P2mGb=_Vrqh0W&ilJV6WU%69ZplT#BXeAVKG6_WV z(HNC1;ct(9`Uvm`(``SmII=BKq@0 z?Gg$jr8qHcn=VOVmKd+aWSF$-A3e=&4O+_a%tUB0e*@%;K1ZnCI2oj&*7Lt&=C222K?3ht3>;b$vk|f?Mt<39QlnM0{*5=2iSI@u*)uh&6WcDXD z06)wLt6}Je@h6i&1sylrW>w}X>7uhM=ANNIyn{4Y^8RZ7Wbo)}ulj1>^R$N8C;$4N zXfC#*NuQXAQajwOfI{GYA(<`11Z}|-+8~DGbnQhFx55GG#RT2W(m();#IoDJAO+g! zzl?>xc{NkjdRJIYPB#6B7w77OAINVOw?SppuCEs^IyZzyYWa_UX*Hj5nEox$ad^J_ zE8T~mMmuu2j5I>HDWJwsj|F?v22tzZgx?Ee{z-Xtra*OAy!m;~@n3*KaM*6ke*+{q z5bOTs1OFSR@BdLK?=6e#U;29AJh2|)w@gUV-m8a(ZwXlC(~_z4;qT9mQlF1(qJ;bx zXWkd2i1VQe{^I!t?HFMSa9tFw_(TT~@6m3(Pi_j=pTkd!er9mHocKS4b9{M3Qqfe7 zIt#wu&!Y=zPnJpWTUe&q?~Rr64wIUS2d@Or#gb^M>I{CG;cvPnc|R-upUdF?hg|9qOzdvIyof1n@kVn@|U6$it{k=MK6>!^PPr5XJ-;lFgnA7L@(KjQu8tN+VT ztJW0HfB6FZNBjOyDWd&|5&M$0c=9>gfPV9VuT%J6S;1#Dtxq*!7N|N2-YixObXKDN8N%d+}g*HkD>y}Y2XFvyvE;_&eBWd`t% zkp8{|M}?lBTS7rWRV|w@nv=2oH}ZZ3(SH>8uRsa@_n|ZJ^{=1FdG(=u7Y;#=q9QMn zHDfNCr&Lri#gg$B4-ogr7wvx$CCK)MLMxO6gpzW6F`n0bMsDR&&Gv&XN zZ@q*-40jM;Hy}ka;{3O<6R4h`{{+XzUEq* zIwu&$j{-M1}$IqozE1#TVoWbGKxvk zC7S<2kmT3xqt&t^U47RnZChi6SgXgAUdAU?I2+0X7Sad)7Wxh@hh2T3Qf1wY?r_49 zx{CW)xxzl-yh;&?&n-FVi~+&q+!9F?ARHfWC}-sS(eh?VPJrIq{>HB7C{77gt;sZW z!?ffW+S@8ae+q6ry=)!+=CU`IS&wk?GP?&aP3Z4bmNc<}khpw9QU8h3Yr4kmN$Uv9XW+7fCrOY4<_g?IXLAQkDAYK23PLWCfMec*pDAvrkO z>YZ3|B&!YxK3SnR2!-?sg65)Y5r}gOP-78ad)nXop=D-udQx?+@OJl1#w)1nKWDl9 zC9Jy}{5x9l$mMtjO1DoibuLeYNTbL2qbY0&Sh_v(veMCZmvlb!Kwv4*LR)!!NmH9- z>RbhQR0aiyn>>B_JypVTSm6Q;M|0C<*t4TDxxf7rVM=eHET^U}pnLr?Kj}o}<25tb zw?w?O33x$av=dL^qKon&MJEsr%5RFVS?*tYpVE9o2oCaf&d7D1YWL9ptHHnG{^`0l z{4hE;dd*!N=nA3!2Xgp;G+FaJG z(*omfbB%r|1sU|B*Y^@3PB1R0DLhm$Me&QQZDTfz(*#Vqgm6gk$KAt+MxPA5(N;mn0Od^p~SZB+8CZ;F19SRlkPFcG_4yiYNImata z?hTb75~f@zS~;83JRl*DB@Va~UJu_US(xj2tEsB2tK5tXMRkD$y9hSkJc| z0y^^K;p=+LQNM`SkxRVYm!q3A&3@(o%@kHW1201>OxdA+f|5K0^vZd=$IaGM4i~K5 z(KSuWy^;)xQgP+zVb5W?P}1TcIw;%j4rj$Cy5UPQYJiaWaV@>@XOS{JKcqUXP`@XL z2ORg9Daghbi?f;DjXf0m=8!8~SR{lN?<;r-^03^s#)=^TH#QFVfJJqO5w6U$tE^Zd zl4t1iw{C}_z~V-?Vg|x3`f1hlrW}Ft>8#IXy2H)4mQT$NZY26Up$m4R_^G16=J(mx z@}c%d3+jqM{l3U|3?j|QsUZyI(#uPMIjnGyF6|2eCADcKvyOz@!AR>mfkhVOHxnl! zVhSo7qniFq3AtMX{>AGclFWkUDMw6f1 zRH4r32q_m?&{!QxY~=z2i8S$H|4wEX2kY2S5p)!rle*S*CzKE#B0jP@c`_U+P#!Z#L?rF##Q{?-yU)MJ^)%m2_Ya$`! z{FUH~OAxvG(jtTFkG+W7p;&dUV`yBOKV@<0v#F;rI%k3VPz$tCOM183NW~ z)$%g@NdsFC@oo^|0)qW>{BvwzD4OHxs7Z&J zCj1Bcm#X3bUcvqoLZLiyWs=yxi+`)q6f{0fe0mu^+C)V$o#PEM&gmWcc18 zV{Ij`h9qJ5;*QV~d={6(emdO3rCxz76?t&H(P`*W08oQgbj@b2Z>lRz}-pG}B(# z)jz-7HIbyPquq+$$tTAAl{l&TY6F}+9kYSBM-nb`=dwZ*#~h1PcbJrpEsVJ~vZ*#I z(dR4JGdQNrOEpbf+>5P|KaDs{Wp0Y!sfJULZh@CI3g7Q| z8m+;C2s1U=OhAb)XHTfil-)C|mg^Ng4<6n74N)b;&S}L##DV&<*Sq=3ud&z&`~#m~ zqxSDeI!$*FbRWm4Sel>wReGDBX|>+t54&wF=5pKqAkxpNCme0YbfOwKLXs&4#)dj< z#Skv(?4s+Efl7G^w0dtXchs3oVK8A^0)Nmj&UOz=`409JXkR_?vx~kxuHyo2Y3Ktr!dVxOPU zH+H@OWPv7Y?`AEq2b&b6*CnRSm3iK_9~ijcUcFJ9honfi+xLNhFO>CgeiU)nPo>Q_ zcD1Umo|&hI#JN&dm!rV5{K*}n5=K37DWXFAuWUT;g=Na{cBZ2;&d*I@-cGSU6~D%c zO{R^y%8+aXf^iOd5G$(JiHHB}3R*oQGNR2gng?U8)D|)1=QLVJ-Ix!-AecnRh!9<> z|2!q{TI#XI)rlAS=GVbto0y|FA+!e{*vud(pc_OPKCSNPmhP^3oN zS!=e#;c<Zh&M{vm`%{FC-|-rh$~UE zHA2QI{)to|vTD0D%iMJJp8znSnq6#F%kT|lux)l7&zq?=SNX^PL9j5EmR($u>5CWU z_XMMGWhuHNR%0X|lObU^(ld9D4gN;q(yuDmp3bu_Ox&5sh2H{f<#R-4qaGnt?u!(JW7RT{h&kNG*@AMlq)~h|9dxnbEdjIkjJ*5Q8$$1k{6u%{41+}zTQ%*2@I4ZX_VqxEo z`Exx?{)T*{QP+KxhaE9JLn@tTHk1e=wN7m;g$vmHodR9mBm%HxTT^Y4<17-G0zs8k zE5~R|%QEkhjt_#EX0p?@fc|(U$0m%8zk4OKKKg73oy_Ni#{!*Ht#`z28{rAY1rD)1 zQe07-{pcQv=wR)-$?ExVP4N5%HT*rS`RaV|@mBLhwZj&b3SA{S+je$wML{4C+eHum;uN7B>XES+r;naDsJw#y#{ z;Qllhn_R0ci`hb%u8RZQbBLuhkbU5aX}u}l{u+v@S~ALd`VcnnQzb2!kewbF4(4*~};vGEKUGx_IV1;#FZvj;+z(?V)k zLe4Ao8ZJ`2Z*vwqn$06v8;c<{wrc+vnn)C{ThXpRCnmJ)~c){k(Wc z^z5j>i5sd9^1CJsoC~15Y_H-4f9u`t;!goARWXIM=m~B?VQJjNf?fJ=d@_#8cwd;C ze4=0R>{}st$BiGU_#dX=QnausH&)v|WW0l9R@#Fl38kv_dS7c{cIxI9XRTXOI&{EL z?XIhTQ8fLXTNh+-yC8-2_mn7=eqv2kEXaudt{1d%eS$)r<5Tv`SzKW`BaY#fAAX!) zY+W{}EV~pXOnax6?O`_=&Qk90CDZwH4TOs?Y&PHxE@4#Sn{9P%^rE2e>S;$u@Mj2E zE_2(C)&xi1&aAU|(?Mi!fJHu+s=4iGnybo1I=GHtO1V<>yEGdR*E1c#Pc5zt>vYDY zuC+n)rCucxLJYn~$WYY%5(v*TZ&VX@5UrUWz{nw>qJbah!=8CB(lV-2?b!>qpg73BT$Yu^AHb+YCkUW-01$gG zUi71jNm>!X<9!hGcEJ=$Estq5r@n9Sx_%ykxCL&DX)*|t1eDP*ww-wBNRH-G(?rz? z1y4uT+Pi#HF+tUDYmI0qFnTS>H!$4OYd6n}<>Uw}5tdM0m>>7aN|g|g68h%<76+R1 z1&SXSH4I6|Ff*M!phKfSTr&^jR6ni37FxT5G6;vJpykSCM?)!#EjCU~%u*YjH7~Q7 za$^Vao1t(|mcjTd^h^^J?*6Sn$zony87#f?F*A`$3V}pO%MnL?5)IwO)>^P{dre?{ z<9hc>4Q8%3v&f7%dhLA!Lfi!Kx|Ioc?#%VOW=+7LWk!TB^ojxYlM>Z%b=q_|o9yB` zxVU%Bgk3s-(zsjGt7lwk;pmIJU@A}0b;&P#8eE}QX&fG@x@P5=j3+H#7t`B)d#I9G zipz&w8@$0UpkFO&Qcjm|)MWKrdW^(LX`EQ0ZNr8nRA#*z0?yV6kYW%BfbdGLw7;2< z73zHqBuHISL=*?OOW4+X+Z$2rt}J2%l>4C=8EXMIIL6uA5+^t1Tx>kB zr~+;lZCgoFl3!lhR&cuJJ;IZKEV1{-KwXE9ErEkL$B)0NMQ znu`5<%KV|^vSQ^E=mV_Nb$dC8{akxrVzZLFWxXa`^yiH~uN%7yil5f>MPw*+BX)Wr zxhaxYRS`+x%^0F|`dJsEm^Wp{2nWgTK|$M|sAQ6cl#3j13IO4dS5U%=E;lBHW^U~D z$PU((9#ut_qj6VYim_iTR8Hpl)evLfhn+V7KN19o>8;DlA{kH~8J{W0#5k=m)!+IO$}b1v@4W79vERt+k5 zv=TL`G^@5TZfvXr_5v5g2(bvf0#JGA0s^ievC;mZ-?G7RAlfn3+!9Kuhm|kX0i|cH zvmJcI*yzKF)v%6Dbnx)-D=Az~PNJ#J#uM#9J#vPUDU7K-uQ8ID>I2C+WA7APR|W^D zOty-6m`9kqk(Aj#*uowKHKfJJIB44W{vhVJ=LamH!IQ;xn$ly?V}P}KL_>o6fxa>H z;5sOtt&H8L8m+XtUViTt+Up4LqX}6?Nt8B}mt!pss<_z^BdOIv(T8FJ$V}##7!U;X zD!@~lUdr&V#_ya7eyW%JS^ zWy(U667uQ<3dXRc(h+69*|VM--6f*s1ikslLb|9gwd$h3$4`%?JDOzeA;f|X_{pK- z*c_oqk#lfcgZxkv6Qk0(Z@cOPcw^<1>VjXiX#+01Y#cDPUp$rD+Y<}ObsUkBG)Lxo zh@z-#n;KOlN2etdiQ`yNtF_NQj4N(|u~f33*YKe;4} zQ58jdVDpRwAv6cKNNYxWcvmM_y!=Buw*D%x1oPl(0+qui<^?|3>YQ{SD+j{wcpqykv5wiGZs6B!jz(c)C+x@=vhul zN#Zl9wlzEH+cdR}}^HGXH%NNPKMA|gU%`&XwE>VNu7!9S<154$hlV^~6Qd30FtBB20Lvx0PLVZg_WR-9gK2{Za0qhy@yZAnlj-Q{Rx&Kg@ZsWC~Z${Z^crJR$(8KO7>?Wi8+nw<)C!er?Yo|Fh{(}$dP6$xeH_qf}{{M z)6lzgB3WK9q+*l9#2h5%`T>4uL1`@E;e{|^-X9w~?j6LcR$Tx? zL^7F89rM7vBxVu|gU6_tb);DyZXiEANqiTMWlqBS(%@icFPCoSmwS{qv4Mg3MhD3w_J^WpiO|c|l zWku&B>;H+VwQyi+2Okyr{q~VbOBx##l~resi{;LO8lNbDKEMHvh-lAMZcC7h0r!zv8!R(S6e6Ip)M2G+(;!e3}2~kDmoU zUAnIPK0o^K^84yH&FoAVKI}hj%Jb+(@Zxr4YSAZ1fC+N0)2HXK6UhfH@)8@d8!k{V zNvk&-=5{&UZqI6NJl^mWKQ`VKF49yQa2&CEMI5Q~e)^49yt^hh%51}9M6!ExI?M>n{yWKW?@q22ZRU^9I!gyd zG)DyGN01I@$mnTPj76rdbLx9$lzTn1fBNQ+5?QeX;+?WvM{RpU;{K-Zv8>R4-|kR% zkCXguo7hc~g6MiCcv7<4;sQT4bfbnmA^3@K~p1LMmATum~qH&+y4V@zF{;#!0L z92x@#gZNCyWUsmJJrG03{1>{&DaA3^8$2FicE1LM$=-ojhFy0R{!MU!i4~&Q_YJxEo)( zt;3UZa&i%&?W??cHRe>5W$OTLDH?E!ZCtAf(3cF!`|IGea?w% z*(p1-LmBi*kfWt)%aq8+rn!1=oW}+3frPcd$Usf=wNiTXdz=E8L|w-o7z`L?YVahy z8QFRAY+3EZsDfh9-Yq{brn;h}eG9sq#bd>ZUCb+7wO6R~wPFK`rfF;^cDGvHgy47~Cp!`je59 zX1y(p*hsskox9&A9t&~;nseqJz(y@EAZ!92iZNF2pya`}OKmbMZIWUmE(njg$C-;d zRml;1|7a@Agg21~40Hw7z+p{Bywxf|SPJ@#hgV=4BHJ7IlU{O8V>B@VKRP{eIN|_x zCzd&IU*2wIZxZfCYsV z?mLefG;8}U8@ree!}bz;#$NU=cY%RZZ1$%due&CrPIM$*joQ8mO@FNAS4nVg&8 zHI`w3tt;fL@ow!=r;@+E7I(ddTDv}N%J+=9c&nUo&AvT5acg*Y@2C~3IX?<1B(UM& z`p%I=1A3imRfeggCxfcOXuc2W^UmLMHkR%r#KVtUGYp^ooUb*KP?hlCj!$=gjgd5R zSwzV%2&*z&Bsx{IwU`Qf_|l)btdOx|ON@wv7B30&8{>~b*{N;=PWR_wU3+F>{(1Xo z_O|WuVjUR*2HF_*T7S9%Kf>H&tmL2>GFVMUYsI@g~^fT}^W`*VSzzb&c3EP8j+Elt=pe*3-!xzx)M=ck^qTBRyK3svK@u9WTrZN^@fd zR!mcK`r|QLi*E$NrF2a|4;Q&Qv@@Jaa%HiFC~gU61<$=9qdb86KmmtUM|Lli-+Ga z#o(w6Qg^mGo|&rLLPW+wjh{>r(bHybI|FxH3@)QW*WRsvM1uEf=+*%I5K=(lW9dosW)r&t4S2ksO)3;W$t=FQYJ{x;>tB8~ltxbtb`8l|f&=h@=k} z8=&vvDzhAqSqmNA!}C$+e1*S5bz9!V!Eo-!_X~>;pB_t&4N&@)vu=Ot9;dRQDwsh5 z1TcwX)@|QsflZ>#?9mg*@JPLNqxsEC>7A2{+*f*2E&u964>vTgY4&;{&z>+^+2h9o zwPy*f%zehKh4?RDfY@@4(j)Ex$G@$BUXgH#@Uv>%iT7_Qepz3+jAcd(wvGGqpBcS? zTU~C1!YVf0Atly#*TUOl0@=ivx#Q&`BHC&~;v=!7efuGtm$cOc1#975vSNT}y_;1b zjar^QA7yJ&VIR)D*HJwqRnMbpO(F-=G=T0dgWr6Hm?CjInY3Ph~c+Ms_%@FsKp*0#E*%ctMp>2+v1d!x3!&eCe7jGVi*ZLt^!#r7Mkz(i3q@ANVAlj@+`M0pXPQ_WyO^Ut9H=f3I{zgVNGpT zl2{UDAsn9N6%5D-Tqzcy!>FDVc~AV|>cZmSgM3=?chHCV399iq z!1zZEiRldsRJ80vp-Eu^lzl-2s{#$fKKhX-I|fFs#g8>O56@u{phSa3^o4SDeLHAZ zPMoS-1xawfS}#!#X2|P{rFE^@?K9>whdHMS9YiXXX*R}$?&E#i!l=plF-QpN)@ria znc%eXs~b-nDL?d+lzf=k&HuNXi>`T%yGlZ zm}8YIBOHe*wntIoqlLiJwe@prCLS0j6|>~DS#P|`C>ek<6feyO6gK1_UUFzTDP3-Gr+MLO z2hD{FEy)oQeB?A+s+Zm?Km^iiDm!Hv|RZ}q}d;{A3`DqP;s}za!x4hpW zMmbHOs-vUM@$h~A1br2k3PHTT!&&%51RVDyWQuC(Z9bvEg8eSF%3;GHIewVhkQHqt zh|zr-_G{D)Ct`8?W;ZfZE#_@NN=p=VWk+haeX?x|+N!bi{@!|Oeoi>$1!pSm|5F9G zwUK%!LP@Gf1bX6DZ^THuCqN$=-mDu3C_da{)@&|73vh(>Qb8Tna*0}x_#b3_1yEaE z*KUOh#ogWA-HN+Y+}&M^wKxQa;t<^3Jq3chySuv+xOw02|NeVt?#!9YnaMsUTe8-E zmhANq`s7g8mSztqL)V7@0mD}R5x44`S&7|+a2xf@VbZ^F_uV$+e%&9GOMS9f7kYa< zUuW3$p+1IbUGtyaybPzBYM;&8^?iwZrj(il;PYYWYFDBB@`CcBRA^Bsp(0f#fgL;7 zS)TDQZBl1jBn2L}llfkJvQel~Y&Hn+OlF-Jij z7=eG7Q?U5u3?^RHHLbDIVl-mbwxYOja+@kwMk+p<^!d>8h}1lajD`cHt?L(=9X3xY zqueC%djl>6j^`4)JLGsX5kVxosON<`F9g}-a$}3oKU2ZU)s}tOQ{|||gr!a8Cc9T0 zwpA;VHX5-hj3hbPFaq^6=S4iWJxu;>4iG$(+)4y=vb;r11Xrx{6jR=CS4T@AkI2?tBXgf=1~ z`K<;>BTTNU^i(30Evn&B6nL`thWp{_d=z`pZ`D4svOWA;q!p$(PCN#$nW+iS%c973 z`uf!m2y`S55k4<)9;S4!*Tms)d0!_%F@@C#JJAZt_;Jo?GMpu2tt1`CV8FeTc= z6kse%#?_06T|-qE_a`%gVXL65=)A4c>Mqano3H~_1qYv2(~WOZ`KovfMP_#`kFIC0 z89nwNSI5;@OAj=R z>E19)X4a9%j#9R6)R-`nGWT30t!#B9zZCVXFOu}ODq9=4($o7DG)Qd(mQ;-jP3ihY zcsZp7N|&cw+uEJy+F&7%7xaOj9%~CZW+IflisRU)AXC@)*3#S3+?@$E4R7jC%p`w& zQI;pcKPk*B2abUGgD)u0MI|LVBR-a+C(W)? zI4P4&Wn)7!@iaRce?PEu1!{;l#%b4v;kop&m_1W++OJ9faG@w$Ux_L_mikP_=|Rhr zH@3Z}rHY-I9J&2nMA~$8TTZ&>+=cmA>~_b7kx#N$*xp zU)1ePZskOJo^KbqwPx1LAUBIKerlIZp%%sk`=hUY>C8A{oOk) z;p@gG7??h?HT8O5mLtwTGt&^O&5Zl$PgoTo=?Pc+MB zivN7Wkg+6g=gUUZgOqwlxhO@O(N)kCV|a2FRlce;_PR$v?`wvaRvFm!YjBU$Zbbw$ zQQk~u@oQ~HC~!rFJglfD>8X+W^Sz^t?6bjyo2w#1g9Kbj9tOL2>{_(8HEsuX`C%lr z0JN6QK^Ns6SBfKI%;lu?0@Tyzx_i1%Cuq3aM~mC4y?8Y@oVbiSh}RZd#>5}mzCHT6 z795u-5K-&HC*RRSoyij%)5utfqOl~kr{{vO=r*i*JqAY)C#n2{IfWjKA#c)sz*;z- z)^}igzoA+SZ?3Cl!t(~_CAZKP{^xkOq};)DI-Zx)k{4Sp#m*@oLvH)3%$&v7^>cL0 zlpa(Pgt33+AGZ>Qi)prdvLFdQ5DPsojw>Zjvw-xVUp}xa?-F+itlr$fgiDxE$D@>y zW#!kSqv~>w6lIIVQc8aPBr0Y~kTCjt8p@&eVrns9EtBZTEXG}ERKMiJYpW?&)TW3l zLoYW$L{5oU18#BRr3Vh6VK86{E4#s~^HF|;s2Ykq1R9wRhV|_B?Q;M0P0cGAm?Rc9 zK-c0japQ5~86k3dtqY-*goXYRi-||8W11qwBX^iJ&M-DMmsqQu`vlQ;uZ~GxWd2FU zBs7;+A{5}+wi`D81Dyxzn5kJRCsO!0lk0k_GfwyBlT}(T`v9ua?YhuLuzi%1_>Dv{r;+X!kB?LEU9}fA*pI279dQjjIg@(_te>t=ehG= zQxNTD!6YUwA)%+^e(fJ>!R+Pm5zK#7OFeU1CV2smv<#nbKXZy*GpenH8jfDzUEPyWBRAU9U(G+YOMMiR)c=Ii9F`BO{`dN*g z>+7IxUsmj2DlMO9wODvc+e%N{j}YS$mVhkYyR(v9QE5_;D}8UOJ(RUIM*;WV)>P5~ z*9@4-vh#g7_Gw$L+kL92DX7b`7sUbthyEXDxfN9a!#^ez^ftEmxB4BUcf=Rqvhd%u zvEs$~%h@)p_!7xO`(oqcCtmtsKnlkuHsJ8vKdp;?6-$u+^Bfe#hvSED*qxJq%@zxP z;P*6P?YgsWbkLPWE_6-RjqtE&4ehOX-eEe7l2tDKcCt&|R;%?7Cx| z8H;+ke}tYTXWm^Z)8r=E51K2F6YXG6tTJ4zL`J%Nl{xMfbShwVG23PPvefe-pmbYTDTPjkD@_i5#OmG7Qg2_?Cy7A2qRLa4y%kO5y zdXgIGYVaw#MJ`$<{|;B+KQ5bpMotnx89~Ppvz1VeNr5B3u!UNanPw0B&-CJm(#O*M zXL|qrbo>@>_&<~MpVguv|L?;6y9Qs2Vqw2ev%foWe>QrVQB|d6Wx-aTDNz3|pNh+mkm#Y;qCXwB$}KDh_is}m zN2C7eGGsS#p-PC$lxPWZ^|41P8djC!yar4wRyRbBX zAbohTr74TiKPA?>`%H$yVn@t8i!X4HEaB3@KY4zAQF!YVwA0wF*t*|MB|ncLvMN6_ zt6t~5ZO(aKTv*2?aR@8jEbIzpBR~SW53tYv9kD$x+e2e!HwKMAIfpu6P2h;VyI+3AC zj|C2B_dVf5c8Wj$gY-RARd3#){LkQn6xsAhR+j(HHD&Ezg4ze7XIxrNUkxr6TY{UB z7DsS}BCUWeD52ZuKG&7!HGRpWiOZBMQv#O5-EaH^40EHe@($MvNPa`IIhVTk9sLh? z7~9g~LYbOsa=rB$GR|bG8GR+AQNLkxVAphv2 znNL2K=?42xQziwaQq5=yG`M`x<%vdeDcmO}$7PP@lMUE<7!H^H4lCow9TYxy`=I?b zoPLELEVy=wV`(xOJb_-<0HMMx=qP+x|0}JV(^3f2H6}bvFNY58ShU7xe++i^X!?Qk zB&l)>Xmx|b)JDe2Z;`HgqEX^uedsrVI!Bh!{f}BsZtk;{yrimGZXC0+IDt0E4S@oa z0zI0P?#Zcfc5(-MQz^w#ZkJmJPPD)V+)DR&4q4xw?7);TxF+lLUPE@yAcvI|VI2%l zND&%ac@ssTz$YOkdOf|#ds0?&($H#}L3*ksQcJi1P^iNA{w2?h=OlC-~1&ag@}3nz%fqQl_UTM%sq#nr*OuXqBO#5cPJUBk1rGaYhdujarVA zl2KqxwP%}(>z_64?jT@icb&k4M~OUI$haggvPeiz4?3M7aK+$(Qge60#&88O;rAK> z$VfP%GTM#EH5F!y9uD*AgqPGIsRp3M5SCMs&Wk_SpR;&f(P0OZL|%EOsbyjGVkULk z5+ZPk`~suOyIB%Vg(d9->|XUhbfA!1(889qB%+f$6Z7ZwztnTFPs|bgCA!R8-gZpK zCx>UEWSZpNm^Nitksf_j_vv~k@cvWTsIaaKqImBicl3%d!i2i2q@*IU196zS`=*SP zv8NLWO=I4e98~4M|F8P(E{Y;7==ez`iv?1&=(a2zZwY(U5?z^3l^$&2avUE~n{rKE z+p}wbUP)-zvFV4N75?@t(&^LVxFn=0-H<43Un&PlD^Tk*7(pU@d4HW?0<%=QSbL+l z<^#&$l>JO5L6O&RVs`wBO;*{S-d2?%Q#tJ^<&PnfMXL_E=h4@k6@D3rL+2%BSN}0- zoYj?l_Dt%cinKCkJ&?mmo5Z+Rl01A_gw9OcFRcjCx8`@Ee|Yj;(Z=*}WtPMyjYj-Lk;rOsY7w{hf>W(SFUIPs9zD} zN=UX98A`;pXLP?DbJezgXGowYp;Z`MImA)EpfRFB*Z~|jO-EYfmU~WmyC?#+%XZX- z4VtjVEql}$>3u;1yaPBm>J7L%G%qs zNaudr1YI=1Ivubuag+f!lR-a!_oi1_oCQN#Fsy$yt~y*k5E)Qw+AeuJ*(j`C4BJVHkpiID`=w*e!LVq-rFhM*keD~TgW5rDu}=0Ae~=ku3AZjU z_C2@z0mVivF>goDE3=M_^DA%2F=LA2(e^u?AOUcCx<>7=KVR=0Rc#R#0P5D z{S$J6>R(lfOZQ3jmOyKUteM0(ImSoJ+jO$bQ}*);iF9@#ir9gj4b{#_q34+VirXsn ziWD|Jtf^vaY*}S{#N(h_`OAIJ%h92XbLAuA`lGHv_NTw-!0zh7x`Cq-cVla9V1k2zci^sGH0rMqwdqTakvBt>JX{l|KxM8 zZJ^1DYbn|K{6KyX5$UjO-PkLaa=NX_9pvWs+47V4nfW7~H;y+%WUGN0!FxHb+&sj< zjc7d`hzO0R7Mbn^>w3&rj3|7qep_94+UEjNxp$2}PJ29mWbnm{=RcPz9)boBl7B}ZdD4ZWqW-#dSCD+{z@bE!Up2>;1yXm1%F zY|f<@%MeEn%0W&2AJ`3KtuYL%OycZT=Q@k@xeDRcpF`iV3`&^{pZ>tI7&aT%&jT0n z)mx+5!-7rvao=Tg*5v??Q5KAVH$1!k%%(vnpvK@IPxV%dUa(QO^3t+MPB7OT#F#qq z;k{4P5BB%IZ0YQ#JYh|Fabv!8o`)?~DO5;gG+pw0;zFB~EfOJ74~~D!2U%ao5ViT* zyq*txBfmvc2=G8_wl%zLohNT#3#zQV1H#DvI4rIpGh{{uvZN`CRzfQBf_)zfxVP+h zTW5lWE=BA!_P+z{O;*qJ%Xtj8Y?@+@#${Lx*<YmJiIpA{4J`SNMaxt zwXfI5$e-C-bJ13tqO6JEHsDZrm-%vR5XN7(8C3RmtWZMC0#1wz~YX@B_*F9?G;|tg0D99t*ba#t4w!SRezf0= z;LY?!V>?UyJS<%w9g{-E*``!>R4W%RXe!G*J@zn>lQz03&zlcZ_R;6{uq)_2Bej2^ zM%1gCPvgx<1$F>|t4=&{g)wGgArjYV`wzEGoxO?%m6Xl{jqzlG-N6R?Sof<#W^jcp zjoj~B%yAUg{F&}wtzn~vkcG&NzQ=ccH#Yw!@h;uWazDBf^_XUVaGifFYhcs+AWdd` zsX(pFof67~{@$3u()yR1Lj{Fty^8zcI~(u;ufTC~jbeOtvK$Y2*{4|eoz)8+!j}D? zn4_uP;CPtCC5o5FO1u_G)iNx>WMaz+-pl2^hx6#8ODZo5@WJp?)!UWqit9Y6B+@e1 zry2V1qWrJ11!Tqa;6Hp6&!;xZU2|_=y1Y|@^-!;A^FfPB#j38}xv-2lInDL$SO2*( zm?+1%cd&z!DpenP$B5CymJlD%Lxr3}chksK<8^2+HEedYBAx%S6_;$o%5eRaGw}vz zF~;#xsH>^`0EHd7+Ym!;)oDZ9+Xeb{)d>4QK;>GsgT7`BDVY`6W-`&PU zc^)PdAz%rbpWoliT~qrd*|?I3ZQbQPrT<{ zrlq2Q4e*g7rrgc3SJ7|0BCkBft^{4aI)$Q7`RpMCEfSHH6K(rEuZ`qVLqTni zb5Y{HLUSfFpljK)2RDj+$SH>(^CDvN0CiU1?)pg1ZB)^N1pT2QrF^|Li_p`S!C~v6 zLHnzJffOn?n*`f)+Ou?t-=BH^-9D1B%xT#el#8cu>Z9!WuYr@1y=Q-|nhg1axpri# zLkazVDM8l?+tJK4?xZ$?b-i1f$Htz3og$!$5!D6ZzqtT)ztfI7PF8B82rzD4A4Op> z&^RSYQ^nCzpsDxMGIh#jNtZy2WW$zglrvnYd*H=3FOQCdAEDwQ0|!07O>k;OO)vSf ziE9Egf~<-7PZv{{1!s%f$0|@~W`tdO@zGc2*Ri6<%TI$Ty(%|FS%f{ynmv_v$56r$ zyx9st^1x`*Fe;B3_PtEL@P|_Wt5Ch`hsorz9A;H96(KDnT#s_|(V-u;a(PgWWehSJP=e+CYuy_fa*#zQug!r_gWAOW6iyZoeI z?#OEXI;$&4@|tk=)Gq?(QR_di&UtPv%(+m_cd9A(89qK56%hqKI1WMm1d$4{3RwBw`Xa-e9!9WUgYWcX{-f`4!%V7F zmRetaMeBi@f zxLCqFnPS`}h4kFu$s=x`Tay_w87~E-!hbaGRJg0~it;GSSRp5Jwq%fKF3RMrN=lqn}7`D0p2c`9#U%wB9VC+8Uu+bCV#%NC-n;633jEZ@7dLXE8-l<@ zOf?&Pi@uWVO*emWw}fE3)E?Rw?TP94*1y8d5ksy_rq2y)AzUpkig-IwZNhNge#c)` zWz(zt6D4{5#ST-Rnv`rT4F5){=0=GBdggpli4y>Bc-u%#OXcUE!DtIYI{Y@d_lN%} z6Qm;e*v5aInJNh*N{6^=YiPSKhR-&WTC^nB`Jo^Q z%E6u`_3iOv<@{9tpSa3t%cLNXB7nZTMOJfO%2RG;zfAj+_gwzj%g& z-AICRVO4DQiNcm+bQ{h<5cu9ixEq#5Gc&UN?7d&@@tfyHCeXzodPo3HG9d#yw8#2; zOvvHsQFebPV?jaB zC%48z1+wWTeUe^KxZ0C; zri@RsFZ5m3PU4I&dcmZHm|^?I)6gyNbU#Z&N1C zdpe>I<8vTrXnf`B?DYjga88@vl0D973A0fUIJC!~cI6*fAwfUJmS%sC@YFT4dvhcB z|A|E{tTW8O*w%W|A2YQ+XMBAw175{nYWuzXMUWege7*>w?~D~Uc>AVP)%^RO54G9K zmDaw0Q}k(~a=-aJ3(Il+k$WBFir(|NO^1(ca0YbGyzW~cx3~^L;Tp#i9ZH2Hw*Ub~ zW_40rr#nJSJcCj=;(pq+Z+RwD+tBkC){8gmpv&Z=n~W>S#P z-r9>O&L0+^!sVzLh}qU6R+J^7)RiIKpLB0;0jcpEQdyYJd_n^&$Z33n;^$W?DP}~zt+^cly+tSEY2+dqa<4&`1&m@M4a-kwTgUO~os11+IPttzCVY6DJ{fLHbAY_`uW%a0 zevRPCs_%Z(6*@Os^B)#}4Pw3A*@6;2IgUMiR^<0)tLzTQ%VIu|fgiu}zdTywEL{Ga zj&BOy(*KJD^u=_zqW<{dI5BCFOuMtHLRdO*1m7iD)bC7Ix+4K|E3( zz;`r=6@58^4+L-9nWX9;^w|mcyk$DH_l0z1j&L4HSuA7abJW~L&DvJU7^QXn+ANnr zn6#`|krhprTg&*Mq9VkAgBv;(emhrvbhv)iMx~B|?@hP>(iq-Y|N!2!PdXi)_`N+;9$pg;pPIRD3^quSrH3T zgh#Sac*fUC3zY7vDxO*w`ucxbm4=>~!es&14p`8!IV~o%9O*zK?~Om*lh2I}B#~HT z`Fkw%yK;5l24(rjq>skTMt%tG)?AY|PensK{vOB_-CQ|bOo+d4KRtErEzdVKi#H|O zNt!tzce@c^xm|XFb#IuMA`o{bkIza{SW#9cK&y;&(tXMla`ejv?1k9pVp72#YMFvF zyPDc97+@1P(s7#JLQ1$AYrVHQX>rppk0@0n#`X&7I;EtSYJh%H=5)Otf>qgL!!wk)KSK4tTBv4~6V&2X@%ok`2 zmLKOIZXF*lAf63>CO>~~f^odK4IA&QfTQJf_Pc5uh}>bp;83xk5X_4!&l*vlR0qcK z-c`=ix1Q?+lL>hzGTga5_91l&d1ill2ir!Mg<^&#rGsWXRKH22q4CxaP1CV=migSN zrLv2>JM4j4v&Uv4F}6{G4Ns00^#p7dMsV;2+WCbC@YD^kMchoPDma)Ag1DwqeBjWg zSGd2xbEZ_c;pZ!)4N@09UzFHc3+(QW!c?H8sP9mo(c?8Atu8MvO^A|vpioE;Hjfvg z$=%m_OMWEigx|v|ii`7X6mM0?!aOsa$MfHgD_Sq4*~j#Az2EMa?(u6l8@6!&gESxs9+6+2ORzIbNs74rS&P#++1 zYI*b2HI7sJd$09A&6UHQ7@VQL?m;%gdJ8stea>wKPXpM&c-NQj!$xhIj%MgAPcVKQ z4AL#=2=(^Y-$$f(q6v@sx=Z#=j?=7tgyGEUB+&Nj5k1c<294D8B$2)xgclc6=~2oQ z7!A9pedLr-r0ez#SylS%ZkIE)O}4#)S{wh_lbI$XZfu0lgdxIX{ipuZ>bciqLT`N; zXeim7@XeVX8t+DB8&IG#(;g&tBG;2lFbV2GG46k;m~q3PKqV<~?GXTH%0v7tdhDJjOqq*P7u**@S3lrL6>4`>* zg&{5#Mxd4%L)4LyB3m-pCg4fGhlhlVh2BU!dBnnoc8krL%x2Cg^%!+u#VuP7Y!Y`l zx?YU?WY5h|ZyKiLrSV0`^bXzDAJB}X2z&_K9C`V}Q#Qz;HWU2yS>PdWD7$Pt9e;6z z-vU3}>8^*d_2<0#-29gbKoJY?S`;LVuC`JvT*A0Gl(U>ptcRo<1?P0&W9il8vOhU* z1U?0RIB8j6as>Rzl0reh;t&{>^$pl@A06G+{Pbqfrv2x~>JOdUH3prt)r|-mM#Hoy z1QO%{w4c&amcNmuz8?M#LJhO(LtnX$<<)9qe|(?9pZIbw^fUVc&G$#}CIz z_Og~zLrW3A57zfwZO&9!Ap|*ydpfn@FPtjy8?*|2YbWGV2t{k^e{N8T|5k;S-1B>N zv+sJ`G;BPypNBbw#N;`7Gh4@ojG8qZ7L7GzRfSjC!+mn9SoBYtjJh9hPoDdU0*z6e z3@fWKWCi?5Fb1!v=lAlpBG>h`UC?~suu=b+tARET7Uztf-VCRdwA`49n)Tn@Lt}3k*rx8%^I9IifOhIX~D{9ZhcE6MA1vEEJPI)ePj2mtp?EFXI-7 z$#}qrhz^|tZl_uVv|u>~C1jkf-a}Xa21asz?o32ogPeVcs;2<=sryNE)z75#kSpH$ zKIU%{MLA0FTrjXc(Ak}L)N}e?RJTlEPy*10hqAPR4_&t(i-z{U>xHiR#f^C#wpLbY z-M9UAD6jzZmlwLPFTX&tgBLF6ov()u4bJxzQNPEsMr9oKVqG?T1%3&Y7PTn)oHzNq z_mF~`H8JdhJDyE798u6i1npsk)0JU%rb)*UMm#<(K{1IIIlVE^JqlJ8gqR*n5+hH- z#s>&OMl!eilIg2HVort`isvOqo4UA29{gMqEXc)a_i+{hw|werVkx`x6q%Ea_TfYt zix&Pv09m-K0u`OOQ_XP3#U{JW;enC~D=+h+0E|R1DrF1_2VY={><%lsAd|Zu-l*Qs z6LmWC(3sc*Zez5mL4`6qHH+N0C)gpf!SM+eWBO<%x!9kNXPX^DG1~86&#LN<3)4GQR?E-$Ub6G@(^T)C{9d9cF9QCr%}IWZYRu|xmyKt1YB(aC#1YH%>SVa~Dqxb} zyPU*(9_h)TPkIkBdQY5bZJ9w-6nQE^DAezR(Db~?Ml*4rGL4FBod=`;7*5^gp3WN8 zdl73!v(jXigDtxQKXOZ6EX`IFG|m4S#tPGold7eKX+qnXoTTm%4T-&PO;emMKBFLS ziyc{b6YRvSDhaUz+M8Ht5gl@^4WHncNImds$P40ZvWQ2@j*`ey^x7PYuFi>gC&M~u z@|W;ffrm~Gyk~9>7`SzjQ47gQ1fqhPsRIgrU2HHPPFU98;EpU11pNI=Eb^zO7!@r% z>RXHDIR+u!&!`Z>D8#_o@MS6rcG4v93^}TC&zH{3YZF(aNt;7un@PNig$`o4C{jV5 zRMQ{uId8Rh#1ym+pIxRtNGafPvc`nmMN=N}_DH8D3F|NQ8mL&xu+;5Ye6K}C_B{Fg z)4Ns427Uow|3lA~Qc2>#$4~lJ*fp2RFi2?O`b9V$jR5-!l(O0Agzw$ksM2zB|6yt>xwtRgZ1KNp=^-Y#XcPO7Hs09` zO{Da(&Z zfY?o($Po6Olf1FE71UuzvhO+}o$4tL2RZLY}2&@7gAch@5YcI}T4b2@uzy0GD8=J4#O=-Tg~tScvilzV zyiFlc_!>pR!xmbo^!RYr^rQE5$VWZdp^~q zE+E?H>0&8Zt6&RYc$cWsZx@ZPjO|`xl_zx{cQ0u5#%1DZHC5p%stM-RVWj9NIOYll z4HJ0=v{g|PFBweUd)3&C)t+{MYC2V9%%us#Q9mO5DN?3d8v%^dQ}KyyTw_};UcdK$ zs{iICfz4qfNOjN;tiAgp4)e_4%r5Em7DF^`>5*5lBHW@Q!)w<5xXdOMNSf-ie%|8m zgACy?xELG8vjPl<_W17`J~=Gh%*mVT=3jc(+{`FF^?nw7*do8X5b47xajYlY{3}f~ zP4qMA3nm5{+E?NW+%nC#Ehe7gG^v3xE}o2gICUYOkZ8T*1-Bt7Ieo^-J+k&|_m8Un zbxG@010Zr9;P{gKXpW+%_pQD*nIxNu8Vmu|5Q}1#^vmn0D}!$;sA@=@^7gp}>8b$+ zeZ<5k68+G_jq1Ta{I9J!6ZJ+y>uNdVP=I|56!GX{&Rjwsy3>rk^&`AE(MZe;%aM_% zvcrn&yh89hpfYBT?74UzCT_AL^#08+U}HlqN$`SLBj#Iy*|32ZM2tA%HP6;+#CSGP zBgdLwLaQxiAp`w`bz}9VD>v8c5rsVwRh~0MXhD}w_>gz(NmY5}>E(Kbh0ftZ2WHYE z4D|Y7g^2N*2;%odppfqAWRLmeCW`aiXm5UQMIcPmVoDX9U9uo4zwqI#&Er$@KFy)v zD~*FgS+?H1^z3g{b{*rd0-bS4OYSJxe3^Xbpg5dInR1S{hm9V{&47r~`D&p3$tpkO zLSAl^-vaD9S$#-B=qKM}rYJC~r;(H$|GTaz5~0c=fABPW(-Nn#hRbm{lDd^{Y>ELY zZF4fc(H*Yl6+8Peh;6ocNY~c*&CHL4l15G~uv_+DoFTe$TW?diqi) zkL$l;#_AgCZ(F57^Z?CSe>?*~XlJb+zM#H>&IYv~dr8nFjzK~y4{^8^*IKu!7PgbH zIxmuMUnpPc!O>x1%e3g!2wgM%pq!{UUUUjM`W$X_<^ieEq%Xhcy-GxP#*N3UN~3v@oey`UwKbxh`F7(+_BJ)s0&1rtIqp~#^bHDP zJ4TN+4-Otb+-S$tgW`q3z9<7FGv(M+oS2ab3B9J6n_>IS(BUE=SEAQ*>w$f7jzOH1 zx-j9SJ={}t9sZ(6A{xgj_S2H(6kW>Cc^Cc`>PMX;QvQH{RQy{wamp1A8v!gIa-hsTo?Nf?gsFTJS@UiSR_R$XyGuh1h8d_zLuh44;$ zB%;oHo+5Z7(;ad$4_9Tk(gbk9z03VpxGA-(n%^rUo{Y}HteQ)HJ)xc@%B+sy&h1Cx zpeX%8(WbFNYVF~!djktegLiYKqH15pcgNZ)iMu@0@(SdeNq>N)mKQ(}dK9>`?S3I$ zG3hBsR-@m;#f3GtNI?5V$p*%+KNL8nyLgXiwZl_xKl(ZL(0|)zGW~v~)yC7Y{ZVj9 zFGe+0M#7djWRBKa?{Vb~$aDoXQpH($?Fwz+c28+O@dCpY??(o`R9PZBWwK z{4q7i3nH|blD;jsH0mcGIIPg=cld`40GoCBK^@}FpJjrk7w8*Tge1VbdQ`9KCilRW zi-n(Jhrpz;fI+0N%ruwn>aaV87{Kt*Puz$ElGs3O$XeJyOX|4gTEziK`5~7BJ-y!E?h}%i5 zZ*Fjt1?Xq^8NTflh{z1FG@}+BB ztke~FB|RBH^xC543UpW@_Txek-elXlDG-E-QBbS!)s&MNL6!CM)x7!X`_X^i0fmnB zW3U=3HD$XJgD&BEta+oJ2Ou)rFz$Yv)yTt(Y4-YSCv33?Ao)z2nsT=wY+*o&BtZt| zcMYlUmWWe-odaOCLEFg1U<=}X)N3=385R^ovDI*;Zgiz1=&>KVm38b#I+taM)<*_H zhr2=(H@*{dx-|iYIX7rE2dgG_xMpIf{~@XLzx}lU*(2NXjw#1{PJa%5oJiP;=r*m{ zDEOGG?!Va*GERif=CTdm>E{9r5$;#lXo`6(hHgz#B=p`K6QP_FoFhv#A$MD#7QH#e zH1j(|KOFWYju;o3?)*I>lN-F5k z2GSoU1)37K35kKRLs zZEXlFH#oTRV2o}5n+p)Zzy$eD4?1?Ld762wl6k;qFg`apM{BC^(QL0(B+G!xElb#u z8&lXh6G)c-+GsQzA40i-2BDe@qrxf*kIgOE>bDvD)~_++uEX0qdGE()pDLiU^6>DS zt9y(B)dQ6$CYLK)aww=O#F#=?(Z#n9O;=w!W;MKQIN9G>&p3lsCI~i`5)#dWdNiX*GY@&AB|7-Wq!0cbdIk7RwwXVGNUwDfZjt zRZhXCNz6-%C~8!Li?Yi;Ka24v?%S6`_5k^-xN5hHd<9l#Xfd31Tuaw9*6U%rrGo>f z>%_-yt0K_ku@D72$L_%dXj9@-FUjCF?T!m9SwY#Bhv(De@ZOkm6v^d^k7kvGHP5L9 z#zzrCVFDJH^O>+zXb-h*iiU#l25o7_mO}Hic>2%>y?3*_ul9@coX3okkM}w~n4Q0Fcp;YhJ7Q zkkH=va}c#4PvPCPs-8&4HILKvEFG9hh!$-%P_$OvpWM%9mSd|+!y}xx&R|uKDeMnG zJ;J;s1`%bK||!dB=Uk-rinS`F%xO?4`Ec&5vZ& zZCDB!t$~}Zw~ubw7lwy|J%!1@&2jI5l;;GX%Uedhum>ScPTXZAP*3FhGK%0sI|`FN zeV_gil80)&C$Do6hMOvqerinY$gVUKc+dnwgd%Cuo&CnP>EPE(q)X|@r$QyJ9jyP&)pb`++hL^ zhM>}xTZ<)+54Ob^^;CD)e(h9?8AreD*_OB0#sU4VR9j*gc*lt3y*~fg&uGGV5;Z%GV`Mq!#Qx94xJ*#7J9#;vrzorZ9v;=0MhH#T1o~isej3&Shy94--+iBtI?F}s z3eCA9AoSY6;H44-%Oc5B>}3EVoN?i<0%K*SQ(36K93szSaB)0G=TasO@9&{7ql(^~ zi*D)qT{X?N-?af<)+^>+dPhnkhgCQ11P9v2SBE=5kVx8!TjQ+!PBeTQj_om^gpnF~ z&VI6DHcxeER>mNDsWGy^Dt4xV*dFT{$cC{ud~+ZcHC`}@XZUPy4p#l{{Ltd%hboO! z{IrU`Y!gQiw^=o?tRSJz%EQPaS zw>6OC-X1qd-W!Az^b5XQMN2`iD!jQSPdZZlVb$~NO(+yV`jDyu;H0nAoJ!?G8AwyV zdL4Kri{%K&+odBa&T(ID8HXo+sMJPOlc5<}ohKITsOGIA$I)5} zT{envWOA53GbL7k9=ggDncw9}qYfP-$8D}VdiNjCn%!9k_wmbfuV{wGDfK;qwy9*T zoHM+mkw!~Q@R=O?uoA?qeTY9?SRPwVA6565O*MI~d-lCF<0jOK1s)^13@X=uP12|v0+3flmEn7?H( zWXt!-+n_Ztikc(w?6VY{HDO9eXy`kfXaIYr{*V8wn|bkn;FnAoQKX95@O%+87^fog zAL;);0I@($zw18ay${~uoi+Q=Qzk0Ng$y5B=veb^0-)nf-lHY)9Dk`nQWv?E+kel)RV+sxtR@R1{4_y zL`MV-=nv*9jx~CSiBS<5>Q4&nrlG7BZGR|*Md>J_{0~3y z%&~J>b@P0tWDkZO697^P60;t&+j(K$0K!J|uGe|-51*kcXE`E4r+OP==gb=tL4mxWZv6F*0z=4&XLRjcJzM2>QUPsX7&+^ccjEl$@2VCpibX3)0rDTxb#)ntv z(P%9gEg$pdrz#fY#}cWCq41tp`TO^h7#$BDn-9Gyp6jlkgxqDKe%l5rJ<(jVY&yyz z5AopYKEVDi8R3iOGTLttLI_-16FJN8W=wzwUF8Ng)JjOYbs19whxXB5@pqp5V>#~e z&+^2L*_WIPLeO4OjfL?!G0NqyxnwaoCxJ~<`qbg4B;$j=g8hxeBpDLJ2rSEgy`Q5 zSt8>X-^KXFcY&>srs`65eey1EuHDX=kKSSX_vewViXhrgTA3fCiC=hc=fBSE7Otl(CsWJPDgoevytVq_@O5l z7m!ANW&(=ygB&C~>knm2Q5w-hgr}Fj-f$*Q8$(871pP<$aI(Wl`qh)jPKY^wz_V1J zqM_e}YHlv^u~7r_HyV)^&0tJ!3bCdW)O8wg=PYLF^dd6O4JvL5-+yunAh6ZG!%sfh zOTBC+FJ4|mPTU2niA~>+(e5F!IG5zO=pj5(uI95esC5M96p|4e`(MY`fj8b9#|Vg9!-+t38a|!UVF4V(8xAPFI(Q;>%_-ZBpLgULHW|^qYBj z`pw`qQML9re74~z-tiAGZ{Ac=hefgpsTASqr@Kc_j~+^r0X*2OChWa+R36&R=7Lzv zc?rbC6tM6|zhj9!iul9}tLw7Zu$VQ}G#p{u`aEl#s2N<*w&V*GRuBjta-lU2|Ef~Uh%dGr3+ zWGjP)CQUG&;@y}2!Ljai9{AHD3RF=;&wdga!YjeuR!f`1h7$&KDswrdHF_Kw%UL!# zmAJ4AMjV&ZipS?g-`R`X;RbYQWfQn^Y8vs8R!X-Yq{$S++8xcm}a@`H-xhzWx)eeeZKhJ0e*8qjbV# z23i|6IPFdhU8QU~=3>hJ$R|k&lvzWG2QDH}y7tIxYg0D%MX= zHe)c`u{0iL#}@^}223I=F`l@|tN2Mt7;*7&D8F5apT1Fi>x?bJ>%(btLSP6%%49AP zx_zluxXc=wjvt^kV;NWF1_3@Sz3m)Y^9gNXNnF41YKA8$@;Y4@+S=)tNQn*(K}s+& z$4*q#1(; ze+j|S$;^K2Ps|n$`dTVETx};bc_K41{Ria2sja2aAVrcKMR=gVq0?~it@o%4RC49w z>ligmpZ9q@7+YG=NhBzP<$en)BRF^zm(R~->xLaXciUE8jLV{A#v*Q7bt^Y79MCLf zq0!8I@^xkkFZRBAHg2rIo4lM=^W%rDiwFI&{nYs6Oqe!hh#^KI3nAv37kL$d*V#w; zC!f$D%jDrzvxX?VUXOwOe|>{9-Y{m}u$+?NXA>bXHZ-9H0)yqqkRS~x=Ki-{7|5p7 zaG1TV5~6Qd#lj)!{shi5yEv|wk$U@VGKY2RZC=A`J9Ut7Bg?0S4>73tG>6$+F9@DK zk8u&h(mETCu&366tZ+W#qeu8!zIk})?@;55NkAbFVhB%JXe`+iuOWNlc#>_)xaZvl zDx2-t#zFM?^OcL*qi4}s6kIknjfe<;K_;X4hrgm2z*EEevprbklbACm7G>Ba7E(2j zpFKYCUq{)S>~1s=I&}eK<6}8zYY{ysDQ`65%N<8SbQJ#Gc-4FOWKS(>#_>N>(@9kL zZ#Ht0M^C%h24|ZdxP`ZJX6ts!>(!_h%^)u|$k}2=tE!d z#?N;1c62I5Gp}IDO*eD>{E0+-gI-L&z7D#Y8|es0Vtz&zd0(1s)k(*ha*pmjfj8?$ ziYJZrlS5u~YBjA5dXy;{WakgRrBRZcUd;GH8UOg=FWk5N4Wcv0GIQbethjk8bH`s; zJa1nY-Hp}sIQj`{X{0hfmFp(B(RFB$C&d#HG$IH_FoF?`;NJs*y8I}ub_a^t6G;yZ z9d;=j#e^$okn{3>cCOt<-OBN#Dr5b08$ti>x7kps#xdqj=H)~aHB4R!eJfoiCr+81 zAZfsPk5p%MIn{kOV&{w_JtEvsCJMS5JL$JMkVh&Akja2dZhGpGfztln>^RziIbk95 z@?$S}NqSm1bG8S|r7q}zsQgJx&fmw{eShVVov))v&u8MoWvsa6My|>T9zsMb^rt9q z>&Ke0fc)raB8G^ebgupJ0|WKcpHk7kwn7ra)u9=#LD$L)DD3vi2N0Wc-L$>|tK|Xse zhO|*LxpLVGR$e!cv{3(*OJVf*(c~ZgfE|B%n9Z+8lQwE53$I(jEz9PS`dxNw;ag~I z2?&xA6|W>fs)yjfujiKSv(wvhl(lcJC+_JR2LEWoXsYDEaWfGyIZT@|oMqRC!)Bo7 zR0Ez61(~_oLvCihHVeHckJ23!$>JGB!^&)QmmZ@#G>Yq|kMb)MxUBuu>^(?tR1(YQ z6R>tYRP~w)(qR_8`&*`A=)KMA;B06U*X}GZ4PqXW=o{-SXxO`5=u+9W- zmzk>LH8=w0WEJH4jYUducpBIL^3U{yyv)wi9rT*?wCsA7Ck`H>_oMB+Fz13+E4(g@ zm7lS$I*^pRS29yE^hsW~jfy>Ia0X>?+3b`{uY>U7F&yQCO>Kl1{EU@ThPbuh^_=4K zeOjclam>FW`{L$48)uGGVwD6jrnqp}&7`3J@E6p&Lz%f?+|V-Nq9Ktv1kOxSH?g!pBDo%G{k&R+XK{yeYVnqf-cLgU} zEl@a_0;PYb(WT&|X3tTY453W9yomUi(DUnVK68pDlbzHBqlr~S47u4iojOWgpB_;( zl~JmguSN1D^c9r1_hZSJOJ2-y;ptbe{{;I_bz@GsmWgQz=gE5Uv~i}s8+|}7>V z*sM5xK72+U2CrvWc)6X511IStQB z?1BzJmU|-)KV~B=VG9S(cF^0`!}0f?qqIzq?6pU^c0%&`HkPV#>iSH?PajR9BI@F^ z;BTEV+IW6H|i; zl1C7wh(I1F`;xPGIFnaBz`ee3;t5b^<~S{uKk>pniy5m5K3{jzE%$Mc zFPzxThpFqR%kE+5CH6xbHf~CwzBgXYEEm(9_?AX_hB6B{ACxqa3=xDBK#Wa{l-0QHT zEjvPsJc7})#`zgXd^l}-%66Q_9;RZ-mD5QQUc62%UB*CyBl8)THGJwWY<(x#aacoG z>=>@Qdcc4od~QtL_0%+I@FvaSC$Ftz+nz0a{N`_X;D#}T*&FzDYpq{L0*|Yovh^Ei z4$kJ<i<+Vv8o5k*YOyLcTv#!@!z?LZbehMTV)T0!`1li@9-HAovV4R5W26EN`u8#f6lD8O4MTi4$^Cxw9OdB!TI3GLT9gbXC@( zbqNG+YWA0-_Qi48+&lsT?5L~i(7OMX9Q7mkDu}*AY&)PvFT7u|6M1nuY!Fq%=*&<; zgZ^kH>4Q5EH)jJR01Ukc}hXKYa+35rMp z0k5Tt#x^Y`o9BEzd-Vw#`V2%*EFm#Yd0xm0?O7^X^*A!elN%RHsJVk?wH}jWaOR$N z&eW^XNeda97E9cvJTGh-G+Haxpj`4(lzv`AAo^=L+pNQ+8bwZG9FYz+EuFm>tRAq| zQrX&rF1DDQ)C3f+?h9rAEr`k_H6e-kAR|VD35)HY?F>E~np5mM-H$w}kdlI--WT@P z20Ge02+m3)Ei>Vg%9Pn)#%Qt-nVLvkvTE3&-;1g4EcLYw*fPg5YIHGaa`3rKXgix} z>2VP|btO-{{xO@juIHUUKF+OkbMQ5tp}MLDJ%HDNQPWOSYY*P!nSAe=f3SY@X4b8K ziAPqn$Os>tc|v z#pWwu#;9n*LWXN>0(Dg-U3NRZ*d*d*fx~2_f$lyNZchxOauWy*86MJLqReae31jpB+g=$c3|lFdSw7nLbPbQgEo5O*T9@aao;s-C?ATDkd*KmyEPDlH!zvUDD7Za2mR3Zfv2) zlgt%A`ZaHF+QO!F|KK-|UeBZy2W7`<=YHe*i4TniTOW#p4FdiX4AUWy!7ae zOipugvb2Ww4&y~n`7YsGXKay#MWY%wpLt0hEG=b}cRIhKw!mZWrSbSab{z|3;oZx) zV2_Xz5FjU99!NlBG*Lr-Z$4ZmHMQG6X0Iue@mJr;GXEGO;lygL<9MBih@>e@^Y{N3 z4y>js4%fK|%NWnpbpIkVeHEXxORXS#{u0IoS?nW0iksGfoz~?qn z{%{f%|)N(!(2F?WphuOJYvYW9}vktWXIvW%hc z!Cvtp>&^y|ws-~80>P{KjGx{677b!x1Cj^{5K2aV(vW9*P33Gm*oIFrop}YpNF1m6 z>yKVwf4>KTnqzxg5y2CfJop&FHv!Yhqf{G=nC$}bB`;Ojn%R4> zo$yIlGAmU%#AC#I`Z%=)2T3KPi4O}U0D-0cQ-1r~k16XiFd(O#RF_uLYmH@eNiq@P z|HM_%)kSlc14m*pnUUcaykMLSoT%!?6kN!poJbT@u=<79*;U(#ML@7TfWQC=itq>m zWtVUm^jJ_g)YGA};Pe4_XgpPq)(}eJ_-qu)P{2X={=f6P)q6SHWj$}y>uu=Y*#ZLuDCF8Y8hKMa0EM(zp9H{=Dum)jb2WXQH>7)2(_O871V!50d9z^nxY$ zq!L6-GBM#1$j{etqpLVUO_!F)!a~yHVz8CI!(ZOt$;nnL?v^SVyS2!&3rUZS#$Ngk zudMlkvKGr%zu)f?g3}p4zL4T98%?z>w6s~jVs8+*&7GXwwwp$g#^@Q-$QwdU;-;&i znYMNhNtx-SBwsp(bYO5GK|z5;gvkjD9qN^t5z&zR9Fx}!GmTi zR}Wq4YU-SEBxV$l5e QN$;Z6llj~HDJR_U-f=AzV`;7)jKXd)^8fZi`!vF-={;T z)1%kx(d%^R`g-Z@?L}`mx37#~1pf;NA8v~dtycTh|9W-wn{Bv9Zk$H&Z^C)Lmh=gX zPm&|QNS(4$b-WRi)k$z%A_=lUWPnTGPP@*I%NvAT9)wgHLqfESz#;7gwwA4|-P46T zaS=bbek`i!b2^n5k3=A%5{Zot@@wa$Xa81?^tgz>W+hkTt56Oq0y8oI03ZNKL_t&$ zfJWL|d$5y9QDzw7=RB;1mCDjeG$sXOi;{^7vvKs*U-S0C2HG3;^2Rd{@sl_9($qU3 zL*&sJjJx7q9$h(&?2Kqa0s<~v+6gshtGY1y@)(n=Kp76W`#ApIA9&-7D%wqho4==x zirQ}aW%-OrQ=s(o_I5SxXMbfczEN`+my|@L@fdIY;azrDbzmhBWlR_$@<_tMWXLWd z;C*%-tqm<`jaEFEsM_)}KY!#o)*fl3-vh|PiOrtE4G-MK(uoBmhldP?TbVff`QLcv z(dSrmupXUzux{35uD|~-mQF4rC0vdy@LLyX^Nr$LXKX?=zUCJl8v_hTG4hk`&rxUgGlIDFE8L`aZ12NgBp#i6O^(0lK3DryGHe|8UZ6o4cU zdDtjsmqdcOmtNzAIrv;A+Rtp~t#>Plz3djA`src@SXMmPZIx_XbCh;7c>RQ;&+9Oub0U@JVFi}Z>MN5z2!i}Kt z+R@f-=Iz}r5^*D`B9V}@-6t-E*7AW7kx8z&IxGgJM+$Mnp+ zih@Ds!}MumQOJUEJ1^Y2do5j@T>m#duJu8|EK0(q*vmI^FmeLL(NZLIa;DXZFJ}gm zV!(E6J-Z^NQxY!y7XE1d6XBxza20yP8IB%nr_bWP=%6a>ebn!KlTTZ7xpw6hQ6v1g@SI+Un|PCy|uo406AeuLTHFqEaQ29ut7@4jO2M7l)yZvi*D6bGRLO zd;%%yaypv3(Q3V<<^<4GTh6|{4fNWXmo|!h;}BMK>PilLzKIPVz0W`1_&aZ{ zewVfDHnMr!cJ`H?rThOUyUz%|d+>Q&^i}U=%V(eR!JDt}mzQ7R)we!m)8?&g-8%4Z z%jVBm_wgruvSAZD51*p0+lc$0cm3Uf&rV-+J@sny$YtBV6>i#(AEw1-BWmJQ(nG^8 zVc6HB?J?nY`v{L$5*#Q)A}pL;_eb73)Q-*`P0_?6Vk6x2YAm?i7i6AYSI-x(y+f%t zohu)GkgEz3{4Vfhq~&H1739Hc_u};dc(JHUS@ZsJtZ|of-+lARONtnF1`x;yL>d~3 zTz1Yt;=$Cqm(8UuXhjw!Sz&}bk8_|UhV=MwsQ-fPCu`|8x)6S6G~wu_z12))@#Rd- ziAQnHb=9M$S!2hRFq)jGD8g}~tK7oI8U=+}DiQ~?E(~}(_v6YQM_ydakkE9^sd5^1 zf>ASzNlFN$bKgEXl_|tW#}ST<)bW!@Pn2WU_G7iW{XF=6*!#OUyZ<9z{?q#$s8eI7 zox?kiajK>diyOZ=c(8PJqjkqJVcIBCk|KZ(j_x_e$?87LZsFIj2a8%wuPcUeQ;SGV z{y$_M{tv*n(v%b&lg6YKKjVSspYzt%-}Bp8CwI=uC0Y@IJWztyZlPbRrn%|_M^5z; zzvyx9o1Z#tGC?R*#awgemF#=HlJ%dKkvL05kjIRszKm_}Z9-jgJ5N6OFmn=zKHq1z zqCb6to{(6spF51N#coAgSw>Gp3^&gjGo(x+4Ins7g)%6DNSdf9KgzbcWNv$Y`anI8 z9Y@;{PHQ9t=6E@^=4#J$-W?VpTSPgaBD4OCG(I_aEBFnh%bWdsQkS z!j7q@j-9W(!m*^~{PMRiuy{DV4gz!OUTWPD%vmywuLTGURGi@_*wvYAv31tvGrgxryCoI4^krZwQOB~277P-VWA!j>UwtVZzgf-9b_a9X6PAp zJvUxaO8cS1l$K=>AuUbB|Y1~=Lr z8>vmXl2z04N%NDEiQ^_PI_DJ4Ee$j@Btn=CP5Ec+Z*$|7DF|`((sinV{gynY<)@Pz zB?UaR{D17-cVJxAnfLMUbaiP)y<3)L$#Ua_qE3t4sLN;Wkeq_(D-ifl*@_fzxg^Bi;-FeYXZ6&|9e;WZBS zXHs%eA(K*#nA0-JOim{p8K(c}W)AkHQFzg0@{=#fUn%bxL&kEh|M(}d5B-K0H~y7B z#)MeEyoij1IHF7t@(nZ8*G|p8ogD0oXUt`S5o#^4b{81aX4c>$@(CJt$Wz`?CZ3}UCQS^^GU8*l5yh1t(2@>W-R{*p=nb{ z_4m-xp(pGc#$LCLeXTmCt-6x63r3@QOL7(krK`BySi+>F5!yQf5b`tJUBTA!ZlY#i z&Q+JrBI)eYdt)lVu%n-zZUllgK*jcmQJ2U#C8|pAnOnyo< z<`X9Q&V2X*kIWdGi zPHJEOKmJ%5!^K~Eh_78ggUr*4>rGy}o>kBOmWq8xIhp|JIy;9S|1Gb&OSs{?-{Qtu zIj8NC07#-_*<2>=ZJ?pHiAJkNe7K(@&urqDX2cX9L(tJhP3so=iWYKBb{bhxB}|_g zX6lj@B0f8vojO834};BHc&@?9C3jrQ!h-aZrv>Sf$jh@~^C|rHZd$AA+4V#@iED0S z+4TGqCtUh!kI?G$k$-gw8EFZpd^l+^#u9H%A=NUBy=5n5y93NyJC|wsHVi=gylYv1 z=>h)l=w1$%Ws@{J9djgrXJm-BBindo$1zgYew+ox1;k^gsHBu7>7`6fGT`m&pi2wk z_4HG@VJr6BbzFb>0*X?MKng`ur?JF3g<`7-e_sb(Duj2WpQ?@9=*wKg`pXwnoch6j zb~HTn&_fS}!(nP_YA7o!j-B^35BHPC1TC zXS9%5Fqhf3UiNIchO=fZmo(Tz4IHJ$44_4$(9HbA0pGqS0#AdIS38DdhU= zIZ|V1r0)pZD;4X$^DWlrn@`+VZ%8M{)IwEDA8vaE`-d;)o8P>Gtg$XSy@A+*#VpBo zvg_GRGz|}MtZFBlHtu3@*1i1jKkny}+_RJ(REpl+&+&TSeD3?^?UbJK;kgc5t_^$r zQMx^D8a5vx<-Y&mv$Mv1^cr2n;`uSuzO<1;&LKMYZ(?g(79acm54gQ}koqGH^t)>~ zIIx1Rf9Vo3%zAXBQk3JP`EVm$&PFOcEBWddSCMIvu;siMIlyzTcv=39A945eE;hfg zoyyie?Dj62>JPH>wT-;6f0!8`{Tg4qvXr#AGyg{%lbws%Sx;m8AkM};9B7}+wfEk{ zl0w_b?J(WtY$@+2_li5XdQm>sGxqPSy)PfyNX?VkJ8fHLu=hZc5m9u!LAq<-f}OWzH%P9 ziN}v9G?9^;fV;1qj(!Ji!M;j+ck$c)wlURnJEDQ`swXzq3YmPHf^n9AY~~x+p?w4LxyLStQ}2;ou(1n!4yd)$vsSE0~-1FZ^kxT-5D)g(v^~1TXK{&fcT#44X5^Pl+PrXy!;+ z8HXAM&?ilzq~P?`SURH9vdK=5!QFI(1BV-FJbIWqZyJkk{|q-Rw&H1TqPek`U|co} zKC+%gY4N0_rjVfeso1rhs;+K&TWdMAa~FqtEX=s#quhB#F^PHr#c)>>wKe@D&G`t| zUs=lN+h8UkA(|+?i{|=TYP;QFo5lK@R-O31z2kIdViL2MoE=S|Z4VpvN0@!>7x~G5 ze1_FCN+`@t=R^0Oo;YuGdLwZeQ<$8eMBDR!V5=jG8-DaC-@0ZdB_$=4l$0=S#vGO` zU&9sa*R#ah%!Xh5f+uQHSh99L*)bpRq_5#g9)9!yp^VksvTiaKoje7eSKT}K-EaQR z%P(!=s5gQ1+yde~$EdHV!fu_*th^**Ovk_HQDkQ7=<0AX;5U(K3)8S^8;<#(;G?Ui zk{)Y77hlNY*_jMiyvFXvPMoa`9N6*#`y7+F=KgPT?`5T=#Tw6imQS2YX_kw_I}gy> z(N6V_EgbR|bMu$K%Ey*Y#&*`{M?J|?rxNdPqN1XMp7uJ*_Uy-AbR+lOG@qD(HahKX zR5n^zebagtO-&~ucN$Y@;lQ3l)HHR|+g`_^-P_r6B*g4n?&0=T#iyS7qfaF}-A7+r zJH5^>Di2iPpLGKty>cOiwvz>f2afJ$UrQp(Zn%c!rJ0O>Y>rQj$Lr{!*V#`?c>{(e z*KzH|v&lPNf5vE~WJUo|y+_zz-9T4&EA>bAuyxaJngUa~{L^=H^U7kz&uIq9k?fStpMC!c_cCWgMue;#f~79W{s9wRIOw zrUl&mv1?gXnl{1n>4oo=S6)FW#nPoqv0AMY_R}e)l;7{?>8GFOkw+fki6@>QH}{OC z1@9dd!Z+AK&7mq9dL8&oDP-qNrl@!dlanVne&o!H5D9v5_VhBSizOo~gM@QEn5YOL z|1ge03z?ZQXSM}~ag12VOgT%%NfjaN8=$A(Lv&g`C%Y)T{q;P4_r-j@Vj&N`zL78F zg&6Mdz@_3z&r0RoCx@!hZ9tvfIJGF!bFxpX;c{kwr3G;JbIXSstjB7GR{alm2i#8I#k_SYZO7l*UM3EB#>`c{vk*;-OTKfsS_iLKZUe za!=a>c@{wz?Y$QA@+Mfe9dgms<{~b03KuE)(gK)6ZxV|AL`9_H|mBS(rj>l!5S24~?zOv>t1u#~V#*?i?X$G*YO zUIh2x2w}aExP-GUoj%Km;0P|ei;6j}2Jq_Rv87mvF#v?{4vlUZj zPP>D_pn-(sRFZ88XBOiPk1*o(6Es+{#h&(h!#;)w1{exku%#t)dhH!Vm7{+_dJZx)kC)6_mvwH}0 zY9_Y$C{DRl*iBzYAA>%H*_J_8N*tDR+$-$vr`P4jY)iu$H+KC~JJ9chgv1!4qR#RQ zD(t4a&xMLgB_%25l%u7`zm5@mAAJLEBGHMYr=<`-;mN>Z{Db}UI)(}AV@S?OBQYjw zg5OD}_O)fCku+i7_mBJUzn@4X!UGRHKtVylg#9l3u;qh71#q^$#J{b(i;Zay@cg## zGVcSw_EHETgb@EaA1yl`=RZGxA1_2-#TS3_XTG`O!&D0Sv z$@3SD7R(hw2qA=cBjEx1`x>ZhizPX=h?1hW5ws|caL|w6A0&Lv6J<0a;UNA%h*J)? z9vkoV`3ar-0;kyB75md3eVM9uScIVnLJa7P~%<>4W9ejS}=gm7Z@u# zI*~8|pN}(sM6@6Qj|Wd6a%OQ_Elj}Y!FRT{bKNQA!{ZAOe#4#`A)k*xc!H~IA$(`K zjur{v@puRXrJSx1;sPy@`+=e&guHedk5p3j;ttxQOw36P(pcR{#^fT>&%Jj@2qAmy7o zDapyCr^VnO3KE}v`Zz@->}8;*iyph1kS>gS#E-?Cj4gK(g;{ZD-ClZO(+CBe?BCW5 z=4=)(o&LsK86!c^DOA9PeE>MEShP}Myp9gqI$ey6`0;5F8xw^wA&V(fGKo8Rt}QA& zLPt#{&2|r(A&r7r(ixLdTInA0GVFHY_Zx{$ z%cFF9;W@Y0huz&YH}zom4&w_$Y*G$WC)pV8brF|WMB4bu1OXS#jV<)q2N+SYKQf48zym58TV4sJ#i>NgHgq9Av?OeK zr>P(#-TQd*cTZA1n90gzS?C)c=U0E2$+chlHdp4q*Ao;%2qDCUOW}3)bF8kGq2ye$ zN~V+dW=BT+40lzt`{n1^xU(HCvxtItKRx|U!nR~Ab}uX=V^-Yhe@H4qUI(pZukpe^ zU!=?tPvP_eES_H4`=ZEB_0p;4@x_Nf&h(Izmc1MJ$Ft=)%og0$2XHL-7QcD;Dze9( zP(wrQ9QfXTrCILyy2fbUp7Q?NeQ_Y2OUi#SPN3;)0S&!VK2_+a$bA(X6zdgXs3)8UxN{cX9 z|02Kq{wwHbStu$f9v z0=bhD@t6IB$M$7&|D%86a|_OXF1^36hCLhp%x^ce5<7D-b2401?(CxQ;%OXtZXYXt z_8gy|dAzre&h1a|vnP&_P?}8jum6JY{ug=v3$uyQ>wo~g+kVUU{yfBt;!Ym>b00VT z_Sf7RUCyTG4{$6xlgSwtnl}E9SGAAw>%V=I)oEtr5F;Tzq?A(1=kuw@9(znJUAk1Y zx3?=Hgb+dqaUt-k=Iy^%ch9t_^hGzQhj+N&+%PSmhT8V1zkK@^wS4JK>cM~3E4NZg z1$$NbbN{YZq!?7vv`f{Op6VX$A0AW<+a6U9-t%SkSWAaG_}sVE5?h*@y!cs&LLn)I+`3`a()6O^qDeuO9vM z3}rE8sM%NlNRGjC%0Z9_4J@t$uxdi7Hxhzj|Td_|+oHS+`yN*QJ>% zZ{f$)A5MQn4f~a|W|R8m-50A_m)xsrTXI!s-S##RAChSv= z`j^$u?_H{9uK0|4WdGPWPnRlx>=UZMU{Pr$>($SXo}5R})1k_qd{A9Cw^S|t#81`E z?$B7%uxc;+KXv=OtJKfx-rQSrA<=@vs_W(ZRG~3WrLX>PRr@CTcRzizO4pfHeCBQH z`GIk5VXx{r_@w&wni;D2im#|A>qf>}BFaCoUwwb2Ri!Qap4vO4lv0`s`FhpHyO*kK z9&J-WZ=L$DB?e{8UZrlmoR6qmZu`1=rgl(8#x4|m=`NLJPE_lk=vV$TCnkh=pZnq$zo_oJ?>^Pi(sIsz zQnN(}A%qa(LKSw6)oY0%HFXLz-c-F7MQFH<{V)8U|9z^SjLSd4J?p0u2Vk<2no-K! z+&FBh1x%lvIXYsbm!7sJ+Tt!^MS2jQqZhkJkzk#|>QCLplB}__uJt-FC_(`b!!GZs zx7GYOYj^VZpZ|)NJ@dHr^Pgf_ni0?uYqL=_e;TRrR+38=FeTJSd&5D>g3Gw$k_5c2 z7OEQDL?x$@etheW@-XbGr?Os$HG4h_Gf%l0erG+q{`M38=hYyKKK>;>wP18Vi#|4i zr#@XMxR?)}g2b5rs0<018!qDxtSSv~`Y|HvQq z1%L?Qk%MeFkj<^@ve5#aH1#O(xrn{xLB4ZY?#b;;Mm+{1IQML3;JSzS`m*$s6H*2Y z8VtF5@t-zVrH#<|^tbr@e^pa>&yV=->f`(Gbi^*ajF~Zjbvdi1F}6iWqvxrgv31sc ze7e*^q;n73TOckghq$pGk(%e&z?IxE%>up0=nN=Ay^3Yuc#!qQi6>XEm<;If;_4ki zsW)E_^b<_7F?DVUv4JoK zLo_Ap)>0UB&{e;U9Rn#$T5>5fjt>hAG0=XP3U3k_vllRB+~#)`9-_VGRetxEa`Z*3 zx$U~CoVZ&!NMN83mno6t>5G^?E(B4bVY;ec;s1Vr2xH+IK6XR#$vySq00Bn_eTH~! zGZr&CeGQ+Zl|6s_HP7_Lk-vB?C4ml_8|$g5Jj{WeTiCR*ny7Vma`~Ix-*v$lt=Dp3 zdo!3*ShTYA4eGT-2znaXw7CU?#mby(u4FWiQbYnSYX1HpKi=-8=%%mm>6x)-j5g{( z)11^c_k*Tja0tuV+qfiEBivp_d7lZ}@(1|#b>mJ&)fAzoYU+F@tSi6D*RCJerZj?$ z71W3HWKPQC)H^CdjxGGZA3RN0>N>t~&zw`*m9v8$KSVEB%c2tv5xVv}QnJV_nN9Wt{Xzq+RF@r~#~d&TFAw7E$HQ=Z6Q9iBy15v2qA>H zAO&&scTrc_g*9y+#j}ctI$Jjtps&7yZ5y@|zU68H_EtKM9Hu)Vjd`W>7@vuR2JQ6K zRWO)jW9_UWtfyZ~8^P&pqO!w8>e4yP8MkADpp*Xg103v)Cu`MGW}h;61lK?VWz}J< zB~zF^`()ID@-gUY;z(@(ThR;_j$Z+yd^r2-*u8&{)S^XPa$;GF#>k+PnzB|*No$!k z%{FdOgpmO|RlDlYCCy~|vKiw76%Ft30F8&5F(l4m_QK44Wl&vB(;x{1cX#*T?gV!T z5Zv9}J-EBOJHg%E?cz>wclW)?^Stl3zqfX)w(1sj&SB1(o}RYp>F%je9CQoL_;FDt zLMEH<_%v>}gW9^CSvfFQV_pA6YaN|+x093NpZw} zAt9@ik6e)B`_MTxH~0;UF-E4lXDs@?t?3Do=e~ZDW`ehQI-B6~iIOnQtQxD2XSyJa zbM;XRc74K$YS(GK6Yc1PZ={i{qdZO9cAlcoq|(phGoy$@2er=D-xc(WDsQMhvQT1H78^perASBq2w3D`zXuEU2gw~QVm9qF~e#>XHY?^g#230hN zFRY8dLf}3!Ki6g8Sr}mdnSVwp_C{5ZJPzEQOF^`CoNaZ%$gJTU*I~`x?S`B@H&)lm z1z1UIV|cxT#VJUoFnYHs+9Il$kSPf%D@b85&_2&K_MAggud&tc({r{2io0Rqn&z~B zso<IbF*6_=Vfl z*X~YzT;7mYN8&ty7cKT@X6r=PPFMB=!`n6eW#7~!cZ<8o?6kYaAu}dpGEKp5eT9ff zZ?lEY?z}O{l_G!d{LP@tbO@OFlD8|Q!NNcV-(G?&9Ib7@L@)%Stomr#m6c@QW}`1f zv}u8sJxQ`En)xH9m^og6JY{Ps_x>mH8q=QAPw=s#&Zm~3C&UY~%mO9R`AV6OvW}PE z?;kfg%E;|@GQWl{2HMRj+gb|SwLga~G^YRhg&hjB@ZA!hV9<1lop-`o3^m-QJ;5{o zlaCzhJTD&$)jDy1a8v;8AD%_TAhasU?kti#ZrFu7X)qOFKZ9thGqS)lzc=6S_m%aP zO$`Z^BBg!V4MRl3E&!JK1%=a7U@Bs!m>sLRR&l#Mkh68HXf-wM7SM4b6AB$k7*A)S zD}mCzu-03QV%xsnuQQfs>cb0mdje!%o9;nU@`BR{4+*0rrULl(QbF84^E|Gk)rbwmYN$Kvm&aVgZJToSBgr8j0n5X4PD`yX@ZaR~Z%^&w92 zy+2EnBvCclAMh}@+M-1JJe}|?m)f;2oc+$Co%=FA-r`7iRZL@1b2G!=Xb`PE{!3-o zVA_N8Hl6BgWfNnab{7O8#2}ZS1L8VE-X(fY6HRM>T?q6c)F(SM0jb2r1e)A^)_Hiv=y-fY$h z9dEFYXeA8qzUcQhUcO@4WkZ%uC>1g7j?HSZ663w^_+hy!u&nZ2{M3 z8U;oql#2&Z)JZv&BjwdkX@?M+mYU@E0P0PAqriYCtBlcWGOWX)U|Qcd%MJDjy?3~ZNe0B)uE|Xo zwH8l#F7S?13gqp+eXmZCzpmnKP@u1GctgfQg`F0nsn*Gd8;}GWp+_M$HufoSh8S!O zo-4{yua7@#zYrS+=0bCj%plN~_9#B3WnU6~{yfL-g3kPx8>PxuG>@k0Ho*9oT;coYMVoY|DMq z5JtmY`rkq6$+NYkX~~WFe&d8ugIYXKO_8{#(PTE0u;>}y`f?R$8b|i~6#P;bu^n?0 zG@=LRMep6lY)wOP>AHe9x8K#2B%I`kPWai?DM|TgvyB#Y2Bmzs2G3~|rL)l@n~Gye zG}x>86UKg=5C++@D=El z>o=m|Rjc~))70_^!|sJb<@;R|El-En7k0D(y|70+1kOY2wW)oTuTT>oG@IIK!-29e zFLw2FfOFfKh7+oLxFdkkV17fg;s)P>gqd(JlINuRXw#{_OGD=PSl3WCpAVFQ_Y2pMl^A;l*0njycvh;+XaV`=c@J`zEOXbsQcNYZ8hh`A+^RA zPVG(C6e(qOClrvnHG1`86+3} zsH?5qV0?ENh)a9Y~n@n!+@`}6#w z{K%kDuAsPNy8{y&J#S5diAcEmmX4CS@Rps{RMO*56X6}@Sz)4hWGb6I4Rs2#biNB3 zA#AE4_m0zR0GaIBQ2srp^XDZ`g+VSXeM>{>%$|f4_|eAGoc78u9;8`$ajI#f&k`-S zik|ObBdUX4+FmSnP*{co93`~$#ko!4tzqV^6LrluSvZuqW(AeGkv%M__XdXur8dDfu)nRLPxj#3M#jba;cAtPpuOBgZ(Lwy0 z_gHSULP)HHUD&uoJMS>2w!?UJw-Xt*FuBO~8Mnz=?yc3GtlKaQf`94#XJT+ zlP+o;76EQP6Si%Y(^Y&Z62U7lQMpeQ18@H|kY@N$f3@zVCfd>NvcYO@wen!o=aCZ~ z0-N*u=9stz);cr(Z;i!WT|V!pxz826PIGr~3_pN@52X)R>(Pmb9z04=>^Cd?z?_Ty zF;8zL{GuzfaL!E4Uy1ov{;s$X%Lkf$i=A#*Zt_9Q;iu3<8%^47duRy?G+M2w<#!PZ zHx!3JzwE`s&F!q*%`i#yWj~5sOggt+D9s-dg}m}cB>#=oYxJNeElo7f)p{)g^Mq-) zj5U)dF(J*n7mzRB9lXf=8LG5D+vv@q|H7zbGm3sx)Ls1*hb}g1seI{r_~2cew}6SR*4O%05`%h%o?N~a_jOZXX_{h_82cnD zuV$+HpW&0T+rEu0c4bAFr~T1dEu1v<$BQFphat@&17t+4iTHaKl6)AIsDKcgbr zHT5Z4<)=kF+F+#iQD=s}@MYbTuT;w?m~fBb3f;0c?L@&z#ujWMC_W)n)45Hg@hT~=gIaoyU&3gU zGaTw=UXiNd>u93|R|D`wQ>}JoIO`NGH&7)ha1Dt&s$*`U)D31GI1O?qAMe)$p*MBa zffB3}#=9r~j~gwZ*_Ns+ywP9ord;QhUhcFm8ZWvW$wU77n0u^EsO58nuUPq@s%} zIHNk+w^GuX_l6ywOmk8TVQmV{gi~7s2(2qOspa40KcY=KBawfv(V~OYtTu}e8P10r z>eM%St9uYm6|eM<&Mjm~%Zn?LVyaIXTSjY+RuE{${4d!r6fa`nN5{vc8BV+i8!j*w z;ZrqQ5`VrM{y*QPBab{%@r?b@x}jQg#^@s=De8BvSa?(nyXY*wWh{_+f!uuR8oPvB zv2z8*l!odrCXXkS>jUC)X>}DE3xcZV%T{6orvWZMS0)1ql0< zFU1#oU}bAyuYdkbl0a$vz4*U+x8uS4>FS=6Hed)4zK-JUfiZnFt${2)R*&T6T}=L; z{ya-0maaPskgo4*FXZ^Q)I4CJiki5zbAMoFmj*4*?{ke8L z549i2j7>V`uaG`xxwG+I%fYX|nW67?qB#87Z~XfXUj}f0a&ty6Ox6G};o{(Oq=AQ{ z-{0Wh*frOMf`anjW^)DKD*ua+emBy8JiLED@&DW1Sa9OKw%oUW2N>|E%hb7uf@l*h z3(Uxbo3*#aS$*C-K7`~`L0cCfnI*OU?;t0?LTPLyJ7P{KUz-$M9%pwAJ!~wnM)W6C z<`CZV$jSWoi!t%ldtM{oe&|CryJGJ42i(pwayZaG4`s<@*gQ=HVVZ_8DMoa#RV1nMoo!h)!^HJyZn#K9>A z>;EBZ{2_lJpxb>?x%H5-)j>hVe9lFckxIl6kP^80k8iVYlAE1&)b_C1UlABnK;` zu`n(^TTHnuvN@EV@DH}Gd`bRt$yKF%Ks)GdJ?`Fg)*EgmZxC}qpJ}7+w3)CV5iu7` zDCfYqHbe4^G%wzZw$ZFS$_>9g344JIF*FE2?>zTXGcXIo1%c`G3S|N01h=yDjm=t% zJ@T=MnRh}==gpdp=MHzZ{3VoBMh}3t=&-&QPASc^N+JaI@tr^h?PaOO07FbYn2;|r zA%(E7Bh#@he>k%%$)c*wuTW8u;RF~+gSZuf@4p;yg*UE~fKG%8?Y|Ksp}hEq*tfOF z{}}-t;FV4tp<~ox$})FUPj6<8OtZn2PVq(|1Q2)$w`yinnp>8awmvmi{dKp87Z?`B zn%$Ddb}mVd%tXO_lNUZsUB;olD(Lg=?F8kvLD69s1ftC2%JgTs?=>hy3XdCgJVsFl zlB#ff$J~W$6h-#BXajc=)y<;)M8io6GFShY9FJ(Q!Ox^x-qVp`WBVTSYgp@NjdW9- zFk*{imTTmVmKi%=Uwrkn)k_NW@9G(>=n7DOh@$;pU9C7a&(a%~c;lj10xB&T8DwP&wFly4;YQA4LfG60Yiag`JeNRGs{E>H3zh5&{PG z=>O=KL%cqBy4D?jc2v<}z~lx6_;}8kk~Ap!6=wxJAV>EI&QNOUj{pIZ?q4Tde;d(~ z5HOC?9Q!Pw0)w%mIfbg!BpM_l6zC{*U?u9pcz|g$OTEl z7BZ?rFp||DjVP5)HRQf|6l9e)F~Oj<>NCZxF*1ya&5T0gZOjlXn$q~j&I@Kh!JKc% zq{fJNj1M%B0)uaFm^VX9kb-#WaD~!%4*wDWFVPesPzUt4eRF64%rDCLfHE9%In7rI$FIr_mmr821>`xQ(z@qo zVI#4KejPcS&gVC}3nyBU6zz06-9*jZa>m9sFFCmunBiu)+9CGNppBeUX8j$~|L-I` z1pM2dU~m+mmBPF^l;p)F&Ny6e?^ZJo#UO-hHI#Gzhdnul!m*=4WL-u};sG2btu!Sj zDP~xXBe=9NvvEAqo7ya1;0y~;rlu9;nqec!fObID4fsj;Z089B#hq@5IENqhnC}l# zK@Q-1xL&{9{Xh8&M9ac#xvZM4w1uyGlpjKWt1Uu;`T_+WP4Jz!@8^us(YM zq8?PB3%?yXV%~BA*&I0(G1Syl9ZW`%vV&T%Sn)rx07!LY&~F)46?2&NnKVMeYIQvi%;Ep6x3_6y{yGd@F@)8umg0~@0i2g5 z43%kT?Y|GVmU>0Tvt#8%?TPK=Yx)NV5pi&Y922ne`I6SuFdSqAQcC*g9BjC;#RA`8 z5tfe5&I>n>^t;csXIT!eE=wECdpn=5$r&X> z{QUimo;DF;$%6kqjAp6!hLUoe_}r?1mU>sOd!w%=iHrKd+GxmE6ql%bcVKz;+mklQ z(Bcy2j>7OHy`e%Fep6lg{Q~|PCgU+ICTzqP8NCF~tb_;&RO9}vW;xv@0TmAzFKrlV<}hNWZC?5D|@p z(UL{ZB^WALO7y2p-fg3)4+pQB6<1YdzjyMT3Xue$nsPBS)+o=^ISh--ka7!N~9LrHjG%MIsGgo>T@&UN3RI@FmQDNIomla(sxZ4LkQ4XAr6cC zoUwaTPI1{7xNes{sK#mH5BSVP*P5ikB$bk2`k9btTK4Wd+3il8ePfGg4Ft@NMWwoL zd2ox8SGUk`K+^a?#B`}XG%QOmC`dLdY4^-x>oVz6T3%FFC8?ycretpc&GyD%d?@x& zuHL2;PhDMV?v_A%Y)w#g`gtp8`mb4_}7Kd`riJVoQv2Fm-B1jg) z&rc4PJdJrG6`iLmc6#CZ1yzBdIPd*q^Rhv&TzNYv5~FlRSH~8qs3^I6-BLXF=AeCj zLa;A_l((ukQYj6(_@Vdt^&GFJq9~$T0o|7%vIm5GLePr)bX(g>W2uS>B9awHmKW3l z(eahfeW#n9nOccff3h^P6GU3){n(hk$!Sn{k$^K9|JtsVQemj#$P91@V*g`7yHa4` zyC3ET_iiz;va$mGPto(`D*0wB9=!B_xW``vLrUKfbLtyLCU4&($#Wn{HafPe>v z)kMRO1iXUwyr@QB5O*n_(5o8Z^eOGtf%T`4<+h8*<}&83oi3`hJS;QPNYM_Lk+Q}$ zxaMhT1hRx_!Q(bdXGL-6x}#+o=Q^SQnc9&efeb`QvV@pS<~=LDwBFc$t})5BLr^r_6F|N<%x)Qwl7@BxkB6WrN%YuRZ}%!&?k@CN!J$IHi()$g3yFs0)5BRr^9S>X7pwJI&1! z4km1*Y!3h?j=nwb(Wuo1kV#Qf(N2;BjE!@@_jzXXFqzQ7!q2m-$)jpf$Go5G4i__1}ZUTapblh!(q~% zuI)X>nd zWXY!dOI^g$@|0>dbR1m(5lnzXj3zZ)t*u9hp*c}&0#v#cR2*iZGx1XP&ffKeP>jn1 zt7QwEt#X)R;}t)8@|{RM{Z3fBG1h`kN5UJJzUKyURBBeh9pT6U>I$q*b97{2F9A(8 z6eo47yv-aIBFFL@|Dv$QE~NW&({!38sqvDyH1p1B*-*(BLx!c(0(6?ZifK=U7Y+~P z@ow=GjUfm;aE&!Ut)k9Y9~X;;`m2U*-O+FgSgk&? zxExcyG(oOYNfZ{<&&2~GOcrDu1D?{39^^n;a(I98xZ4?4O{eueRFO;$>nTd(hQ6?o z)v;WE(8o`aWU0O3e7!?9+A^b8nVqzxsK0c*3a|#a`%v0m?CuxpcIb^O7@aGtGMWsf z?-@7je*U6w@9p#^cy9I#Q_&e~z}Q3LeV7tJ(IYD~E}Qu719h|2v@5yw)@7& zsd+Is3?ly40-i8*r5GGuA3l+Afa#!V8Vr6wyX!hWPtTXi+Ol`6hW+T)VBroxXD*uo)T&~wQ)}Ge5n`aL>{5#Ml$7;wdspZkQ2#5V`Uf+ zmjiWLhY1`5`%5?&!p7l$DNE7a@+eW$TMX(eMeq?&bH0cpc!3&7k8{|)w7Y7`W>^yl zNPHqSUNPtVp&{-q`+18XYg*4aIy^gFv8@s`INZ+wi%ZBe8Bo)i#qVmd&hAoKj_R_Od$(-QrrX=I zBT_Sf1zI87u+Sd(GzfYU3i|CsUJB78Yd$}ipOD1Bk{CmsB=(@DA@%Cw;x+lDPFr?z z%sdQ%>S^~Xrbz}qZAc<_ShEvsKhPEAtvWOvJSbWm_8L-IQ_qar} z%lQ+8?gGqzhl1&c;U*0Vu%={m=Z$%pM+}k*CM{ub-(!b0Dx#CdpE=p$LG1a5sUZv( z88WqYO6<&b4au&f6tLBKla_?-4Nyk3eoS1G*<4*S`|Kw+MKRzg*vU*VaqkF|@6YLM zsMBJ1+zV=aaWpo~6Ca_|zuKlz*FNjUOEdf}&K$g=9HV8`$37z@=76wOPxeEp(7^NJ zYZiK@c65*DHZc2+{5Nl7r<7olA6WE|dhd9YbM*;5-SJ&9bjv_Z2@kx?5A0MZm|qUa ze__@9FRZUZPVG;<);xuawif2ZYC$d*zlM?;BEnVfnWx$N&pScNNJ&PZnFt!`Z31H! z17~qWZ9<8hdwxYvA#(m4IvOOwsUBE%_2|%3^yG3QQ$jtx*qZ*4vYd;rete8YN!hcy zE`3uHnlV;J70T}bpb<%m^`|iJGi^}6MH+gyJDfg&O^L9rSFZ`}Z%12umwTwb?PvsD zG@U+lBrX>Lib%vs6vhX#Kg4km@Hs5}sz$BIxR<6G3LpVXT0puj&eLj1*=ssz$lwJ@ zkR>GTIA48NNZUTo>41GPrqjbL{j!nlwsj|TOZ$Uc8AC{27D{Q4Duv~zPsDqa|LKxj z%>E!7&(%|ck}*W2Z9x5{vdSOPLYM(cyhQvLz=+_UNj9!F(aWqs|LQ!-k}$s@$ZcfY{-M=(zB+Xl zY!>W^P<3XCj$4L}R3vw953E$!*ZCgwNYQX*hil|?gT)`9vPBSFhsOj1XxfjS4*K9< z(83QIC*Ya6r+X_Ws%pFB#6%^~j7Bb)Z$y;@I0woV+Br-_F>yJup8&$ZeW3&)z2_QT zDKNlQ>287{Qt21St^08XmTj+Odv2!ARH7AN!Zput&bFSfe}o94tOVFUq>Fp~z;17Q zVGZvbxdT61?1`yNMla;m2OHfwEnK9angR@*NgXTYC?ZDba~;aHJjDA5fU_=)kNYv2xx29loZ(;5C?{O~58;r_%S`nw zx^5jEEHyqboUOk_$WH#ge}W3~0|Vt-t22PDKQlV;pK!)nTo`K&Ml&P+@oPQm<#rmj$>2?qRV#dd6Pf?I>@sun|NeoZ^yC7EO-;{f43VlvvMt{P10UWBrg^ zBzEtv_*~yl?j|h#1qH}Hqj>2os*1IgW$J>?3GmZ7(Xu4PC1KaM4?5`V9IWCezU<5M zJlzEpAr)nm3Lp={^l&fEbq;dOvYDUCNumoa7HYlS&?&4=D;i?cy8I#+D{~2{usb{g z&eIl`Yk@sUV7%+5*m4MWpBS2#ga`53)Ebz;7}6WNiHru@E~oxsfLE_+ub0)$_eN5Q z`uh6I1r3`T?>Bc;k#YY&T+y3~ns%7eSsIVTnVQ$jl2@pQZDC$wbzJ0QnNeD5Hz*c- zcyd(G;>=!UvnsNq8`2!tV7b9 z^Z9kYX#=6uW6_XZAW8LLvHoJjjCNb_;^vZ^lm`8dEdLAu2H7!+acOqQb^4p50y5)} zWTdIIsw3f3F=qPRwgac#lP4G%yAWKk*=ek}nmrN*t;Eo?iY(~SglPS6^U$E(6VG@v zPjd(Ij3ovf+(ZO6v}%nroY7U0qK+^|s07L&`-<#KNI*w*T7!zFq}!_*4N;tCgkzI! zNh1bA4l_2u95sVPNE(7nFw0EzWtF3ThgI9lik~+wvYN zhN3tg8{$(F#KS41*vcwa+7*iJ6a(XooQei7#KXD1P9KQ$#ab1o1Jhi@of!y->h5b>}-&yPcq#8&JQyMr8*jH+H1)u`=`}-g`=hg;nZBT@M z5%t#kiRO!#FzAL(8VrOPRj8^-c-x~=*k~6Z-nXP?rV(T6k>r~)rVd3>LhBu{$VO+! zv(yAGCb@c;2?kzZdarU1 zV)sF+u{~P=A6@45O+zv|FM5cyXG{n9*=h$&p&!VB z<(0eJ^Jt`5q4E;KxNhO}2hJ9s)n4EMTpXcI4D6rxt5HI`@yUhF^eohvSBQ7!106iy>oSs$I$k z*)*!2Ox|tW>M)JEXkQQZ@yavSMY)k!Q$!g0FQPb8G4lDvA9a>?NF(U6v;U}sc*w@T zOu(rtOKZ{>S$HT7Sfb$~OfFQ}t-WP}>RKJRy@jRI!6b%N%`>PO5;kDWJoe>`8{boL zn7S_Cd>J%4X$F=kGrMKTPGsM$yAFCVPadi*zgBggH`BZ(<;p{bR&$df8e*v%;|=YWD|z|>elJQ`M4lioo1f_Y}1-#H;+ zc}4bPS9R*U3vi!L8P;Sw(4ziO20c$E>B0KzKZvFH$6_SZbp1i!_p8nAK9*vcKbNf2 z5RZNB&93q*BTB}3enV-hw-}UH4di$y+#cu4UH09yY z#XZt>#EknTNCV!d+A@{eJktAHaH%k9j^6`&_rLFgqb~LfM)mx(5$K{3e8jyMrT#$S zcezEx^jDP%G+&xlkdhk9r&yR&&&m}@L}MQseWcUkR$dH!@5~xoP>T0#ZBYL_8{x=O z-L(h#hUf3P?@h~1amag1UiHj0JYJ=1CXkVG^v6yM0NTxe$p;7{Q*268fD`*8y_qSasUP_Vl}@9#r|M~P}5eJy!_9_ zzrS8A(tkVu_kl^wzk~a?MTUSoh=@E$wJHsW>aT^HMUubr>d#yOG0`Oe004$-r72O} z7oFWf)RN?t(1QDX=grCZ`9Cro75+5(_qTN+Vr)!qU|{g*+38|c3=t9%@+}|0z{Mr0 ztc?C=yQ?7nzd!*meCXG#*+uhXQ&Lh~tSErl4*%OfGX(xM?f!jc3m&TbciHu~ku5xB zfIo6sS=L1qWUpE=|0Z5DVyR@xPm4G}tX+{7-)ADMNWRYoKX9pzQb|`~OYAW>UD>@P z$;nFhZ`O?j5a}qHyo+IM2t)EX(c@Ks0}_!3q*^Ui7)Q8Iq_$*Y_p|bb&mIjDz#_8& zivys_MGSPOHv41WGcfP;A6+9u22P8S#qCz37$M)l`PphyF>U>XD5eOpZ#^)rKZJ`F zUM3E8{a_b{ZcW8a@44{AYTz~AeA&2b! z-0mN|WJlPa2PY+7mpYw!0q?am>}_0JoND(13GO!>3uQ;2k;$Yp?vE`}@;u3Y`RZO$~$*2r_+)t>aB^LiaMce8$1CkDsN zy(Ey$9rYS#NId1@1113C7tuIBGd({sy@Xnqg&5Z(C~Zj-3yxG1> zd|0vG*NNB0IkzzIV};f8Jie(qm*qVIOv4aZ5y|7B#=r#bml$}6_L~Hs+o}_d6b5iT z#!=O)ZQKffJ=kMiWK5CjI3}C|)SLbKeD7VwVP(SspnRH>YHI={_v7Z3oep@*7Y|f2 zP+O^{JCPD%YOIcV|gy)d0W^{_9e*zRjL z5^7fa<)1!2fqLFAf_K(ip*$oOBa(H7O;vOwPI$P~-u%KoUUi40X-&5AzBWfU5y`yB z(uxtT-plwdPfCD-$7<=b=a%JNh-5QrU)g>Pf8JT|H}1;JMKvrub*3%alJYZ;-{3@( zwN+eyC!V40*=)lj<&l^^m-+cKn7||94<&&lK`@i&5}!SJ?>cVr!R?dBIbMud&sPr( zk;GS5@*q>*_VhDuE&%$b)YdNo!X-cO%niW|UTim0{wnsRYU>CdGlmL)h;Eo%KR#N4 z*8@H|dg2wX&LIw^Zq$H5Or}M?&h)MAuf?W6A*`*#91Zj`l zkY)Pg4xJ&oV`|03!J4;=?Y%o6FGBCrDgBArZ(KUw7zwH$H>{o6U-tfQp3=Kj~V5Jga1i<*m z&WVhPPsnrJ&dna)scE_|7C(+_olsV1M6Qy3ukVk_^_GI8aj4$jN?p>U>8ErU489!V9nJAm_K>dEumIJ7)>nsXZDvn`cf+It(dfILa! z;Rf_V_g3-3<6c-^Zq$qB#i82)+*7X>2?ouj3hmVfC1h^*DQhvRjD&I$`Gr+OY0HX) zEB4U?y3$w_r)Hx&C9^k!D^d+zB}L<=T&nm2q>mD+qy-i0pwfwaZ$~Zl&-Bl_pNK`% zO;VBXnG_y)_+Os8u5Jcd_^#)JAE#b6(M>r;^M{brry^bGDWFl3w!^9FGH>Xzh14Bm zF|HED2#jW{_@=g*Q@w~AuD3~6kFG@Z*oZ&V`dxk!=e=`;kJ^f$E#%?$4X6WptVA;l zJAP#|%25}#j1=@|ZB~~+ktuY5uH~Z=SiykJo~&^Eb&O5__-QU^vv4aP(;R1+^OpJ)c&K4s|xXwJ$k7<;z6ty z&sF}#^=czW<>S!B)dV-RTN4y=WKX~^LoyL`m=>vW2|kk!UVavAxSHia!=a4j6Y4&0 z*Ix+wNme<61r*S|5QNMtD}tMIs7NAvy8{%q`}Q*Ezs-tUEK*8K^r7P>JsC*n7JFlR zkQ|qv42wzicpMcOXIymZ@)@xhNe5A01THAtY@-+KZ7JiH z@)A+I489j3-!>oeEC~`)CzcIh!Dxvf20Zj^Hbsi*C^v6UM%R+_qsPnY>-UDzWILN# zZoQ?H?$}PTY}8AJA)hVvQ|H}?f@w@*2gcMTYju6_A-oTWtcVaCgu5hnTI75I`Gs!f z2n+84UA|zLU7K1q3iih5**V>&e5s<8nlmN#&E4dyk`iQKfy+fSHOj5~<(G}}Ou?m- zJ`AoWKFOu%#WUmFS+cH&jw+9B$$)n2_Tpu4pgzVH0+*!M5ga$$YY+!(*M23s%B6=C zo0yrK?(26=dA-)eg<`z;fDuZB`r`TcnTT|6-K5HoSH=+`T90O!gqA8R#5P8kGU65g z0FMj>9{dubcNk_ejtaKH1;BnxQAikGu#4a{nnU=iCC|ew-GH0Ni7TYJpkIbZ5hc;7 zCc1KRn8DS$@Qvtm2Wx2FD{MrV@bm@6BOk#6YE^t`(cA5@)h>OhDxn2g#47KwuRFy; z08y`E(%p+mikdff@_|uYA}%mu#mL<8vjo@M0SM>e(9RWd4C;o3VNQU* zD=**cr%**WlHDAFZ2ky)^__^PMfl%3x|$>qpsu{ypNRsSF3f3zL4grGpY@(@U9>^x zY(cuFL}<1^Bh z>L7uslh|oPy+4KfvLAMotolp6PM2q+2Qy^nx20()2DDH=EBZS+!u_5%hvMqU3H?S8 zFJWdx&d62sDBk?@iQDWjO?NH5r|=6^|4>JCl{xG1sV%TP&d#{2R{$#SLcisV0jghI zsA{nP5@lce@5pgo3y8NJqSJ6Xj;x-n2q z2tI@X$O-^8V7T5GDeFeqma2zSQsv|)(F}NM2L*AIJ9V%v8`0^bf~wrW2R5>g}uD7wu3yr6xPo2I}ah6kg`CaN^=x=u$ymlHO zx%f1piY55!=79noyyZ5?RJR)<8h*39^#oW0J9k@m!2n_PP}$LZ(lOtG9Bwm7rK{-K zb-+}?Y&ZpB?!2rZ1Id440gf353go|M_^u<{Ij-eCrXC*Wc(}%?sUSMIQzZ^i38&u< zRTa8Eo0($?BzK0afqgA%nTolT#*m@)lI9RXjF(YUV{?v-_st?2EHbfs@fx-#wR0Pw z8MW7imz!SY9xZS{ZyJ&kk0hll4-r|062_5>wI+T^!9C6QnUkSBCBux1WkgBV681Ky zLGNp4jKJ1){|(vUugLMnCnyO9pjr-aVQ1#RlMoPLtFg<|9(w{V6HEdnhj}Gu>$(h@ zW(Dn!(AHX4yxohO z+;aH%lVYw4$RiFeGW9rP^lovANrAndQkjK!Ju#N-@9LGX+< zk(zke_!h?=*V*s?kiuDghbZfT{}f8J@q9;uEB*Xgj_-{sZdH5xq02M=sGc~19e+PD zz~^9qe{F(0Nv8bMAa>Ku8cLUeMRa0Z;e0`0>(aVujyV7`eN*~?JH2Z{L=+^{S4*zi zEE}U2ul+t*ey-W5+5{yX-h7%=TSQa|9DSlD8i2U-%jRk&oc>Dk4KgV~U($YO8Ww*$ z(I4FUZ3Ll(nI9R+28r$m#c{(A`K&XJH+S0FWWarg<@*iHDKBw+N^-ase&M1Sxg>TD zz2+qC!`i6QnBu{gV3Q+L0$oc%QSKjO=d^XV{R8^84ff4SMY=m$Yd5jLD*ado;DX4N zTe9X&Haqhjktprln~?f>h;=VcGV>Z`ExG=&zQ?>5i4&tg^d+nsR(#@4IQ!|XG|ko z*(jlSs_Xi_?5zX@BGEbKxzDPZMuM~-%yjb=13TXOw^X*a*qSi22fX7BJGXe_ z6UXZ-Og*V*fX~Z{SH)MyjKde)d3Mo*Ev@$3?R1uC0cmGC&MWA?V2i`>t|Xtw@dlcS zw6Pzt>#(e6yMe{pzq_k_Bl0vYJGEQaV!_PKv0`tU`MA;BN*3+4Lv_cNbAm1>8*9!A zMH7Y{nHIEV2Fz4y7m@WsNUBWVFHlK8ARFl42sS@1CWcSCh9B@-UsBCnBVMc@ z!9UPr$6)wd!ho?*FB`k;(azuRen+>=FAP6?JTi27zQYaj>xiIZ*FRG#V4xw_)kS9Y zQ2jQo7E$VzZm{0M_;_~8+H%K#o_A^C`udrSqwo5oH4JXEQe^T8cC@AfovOP#>0{6C z&RYBRTAhox>)-(4om(fhJ*chg2DGzbP?n=Q`#qbl-|&QJWr*GOa7zrcc?$`@b7gR{ zlHh9ABzy1MZ|m!Q4EgU$8S7Oy;SinO`Mws9BSi$ZUd~`80(9>^;L2W3^p*W`_!khE zlwB8k-d+ThUi|tK+0KkL?^3VEv$XwY8~BX!wv9GBDj(NI-kh2P{JgJ(*&3Mv68WFV zLtB}$H?7{Fn+}X~Pypy#evWzC;g!t^Ypz3P3l>m}vrpvdogawl9eZmph4l6}VYqpB zv#0KtH?L!Ta`Go3`hB!EQaD-Vi%k3R5^Ra~z~dC{-Q0+aY`>=CKYsprX&VBjl1RMvp(}+ z)3$COXH$1?T)FdM#^H3eF*(#jN*v<5mvl61^<4wLpAzu6)bfEXb3)D25-?BW@9UcK`)Z~|xC&-ZsO?+X$s7Rtgi5lrH^ zJD(D8*Lkz)yn_gxdY^h|X`#j{$Io~n*hk5BjXEQEf`;(Ntr?Q^MS+Vqt` z_+4zb3cms&SfJr}7;V%5YqJ%L!K(S1=&?+BwDc01eX|-oMD?pVW9qdpxw)CcSDg12 zQ=|FlCH>K?IjBEjtodPMs*ao=zYcz-A#&|~Q?9P?2%5SUuVB0i96zn#?oR9}nt*S} zc0ufMM&;Z<$nJA2S4^5uZ->s3OrI|6fEt@V6ytntppN&6Si80A5dX)?o2FK5m-5(*OKyw*lWXig}CyVuMeO{ z%|aMf)$Wd_st1kxw^DGdN}zL7owXmg7N67%h?{&)beg$NOw}uuO^9vWu;kr&<&`X` zB0;{#_Qs#OmIjw;EF%AMJ+i_Pvj{6VT=3buv52B;Uog)vX}%>te;?vj<9S9+52D|D zFINOStt@M^N7B*x@5URaDWU2h(7!JxKlS*ko2A+FILP~UQ&dqm_8f3DZQ?OZ7f&&2B((`ts&csq_FFqZ@4vMI3Q$reYQ z99y34(*@LCTsX^#i7*QmSIyuYnkPK1im@e&#E=9jKDs#3ofYp}R}WU3`40 zm>u|MyA1Jqp^EgR*t)CnZI}#(v3il+CA{kOh84L`R3y<6uFMq5bHk3EkEkz0BqP$1 zy(~>p94xjW_+DIrr8TO=^~1dBZbeN$%jrf*=9vVw`Uv^r{Szoj26wQSwC3icw<0U_ z)kIMNq9B2QOzzb2@*)O66c1AbGor5O#9on{PeM349&lPsov>ME5F7A9HQmNoD>7i} ze};(`e;Vma(AeIsP1P$p`55=#d_nV28!_p!#lZRVP6-i3pO7LY03e(map-&Lh&T1V zOY}a6KBeTcInp}wgNEy;tK?{0Xki)LTH1(X=JK+m1}(}sy~{*{7m`HdNI*Su4bGLdMekoDAhM50o`Lltj^-I} zT8kL|7jbU^6i3_jeI|iG@Zb)?T?g0T?h@QxgF6IwcXxM(;LhLXCBJ zyWsm6m;Lj7fWlUVg_|(~mBjcsnYISkH(N*(X?xzJ)y+s5?HOZ$X>upE@H)4P+Z+digimv^MgTi=z5mM(m-|qs9_%6LN?H zx>z-0eZ3)6o?8uwtQlxECH3!Dm)PlFm9_y7?bB%VBjiMO-Z<&~dQ#l4hRh<1)kW=$ zk=xks7GJyMh)?Xc9Y0?LE4OE+ySZN@B0`5fTFLT=%7Oc7yjKhtU_y-grW*sN}|;$g8GYLXu{f=h`o@C+C8`HKhLc`Y-=%&G}F?5v2tpqQDG zr(Q^k7i)>_m@J?ZPrWfm^Tr2RsuM^^P!;H2e%R>hmz6bH)gp)e{-t>|kY~1cZz!9> z+?@#oM2u6i+0lJJ7DcI7tW9l%H0|55ZqKL-z-s82ytKGxc^Y`{-s>M$fe(dJ(WC?vlbdnQexMRAW3#2%6p_y#^%qOUgcJ6n}YoaV_=$JthmF(6lm49;`oo@mXDmDL>zii-X^a7E>0Kju`v;rN6d>IDDyPR!4&zSB+R?QG5VR~>CO%LU`kYXU{w zw)@CjuBzP`6pUfn)ZKA0c0f+b@c81)Z)=*C4&HN1_f=fWmfx++$jhZaK zR|sFJ@88&h=6Yf2Ti;AmMFYn>;Fr#+o67#fQrGOQc<=ze_akHc0*T2J9%T-)3GW#O%H!2s4Y zBo63qOTb;!u=Qb!IS#dwB1?To00$>HCF$>6hoDV_XFS99?{jc)W44VFy5Or>J@k_+ z-L(YP_mI!|%^kHZnQkB8(3RxxEqZTVtYS&*GS7w4#T!Bt`~@D*;9VTD=tYE9Hq(bT z1nh{bS$bQv?KHoohVaDW4vW z)zaT5FbhVQ<2^QVG((MEpJ3M;&l;&oCQtS*F`sJ^JpH-L@MdFr(s2xBQrFS&WmMjxjJ>F=q%Ik@iGwFp! zk}VLkiK0TvW7}F6`&i(3*^z-RL)9>?Fb$0EP1enve>#Ee9Z7ES&T#bS8Fi%K+cm+JN>1;jKNNYse=v{1iRv-zk3mMqB&%aQDqKtjuA6&XhS)Zni#-=Vwu@ z^d<$j6?841=S14DsBW{+UJUe5f%l1NjhbfbDxFbx=J7P4Q&|~m+@)UdO`yUqp8m=>DrFPnM>`G9#GY^N)51Hzs*oR%!_Dvea$Njxn!mv(oBGH9Qna4{l)LQb*KC zUckews!|LVcs1t*;p^j^ssBU(l@rsh^Jgl?{);7#(~_#ZfDHJBjuEN8UC)!F)on2& z34H_CPxD^|L{)$*epu9T8zPenbdDC*M!IjvW&32El$+lGjojwb8l~-?oR`-VaL*K+ zHD15!(iwU`iX}6)LLH>f<-5?D0AH|ziIH*K!@a!R^DwJ(S7}l4ERB4EuODuWKr*Ly zT%QIFROwvWpUc5>`i?t}716xX_ER>=Tk6hEf4q}>LhCA<+?f4au=p=nNK6|N5YW5qftd#TVhdItvv<^F#B3 zLb8zW;otD~^BQx*ftTM}jRQ%^Xxr)9POJ=%k9ma)O=A-E#n^d!3pOI-;^mr1yltfJ z^4qn?9U~(R>1atwdZKt%1@5~!>OZ1uDM@p7V9f#~!lJi@D_g$#N7CV8+aq^5Vi$t2 z`@eqsiio;E5%VKMN1^ov&?(F{2HmjEz>X2-jSN!Rb6)G#17 zD-_$o35vxTye8yyZQ1!3PJ&2m4|mSwb>(qE5?CPbsPOAY(F$kSA1d}dHYkqUCzZt$ zh9tEXoSb3NsXeM`K`)d#m`XCNH(-)tRmC^@KPDCVSydoILMS4hDjh^Zd;Lg;d@i{G ze!zF7*)MPPPqJTngG07*J+5s*Hl4mJ^z@_@Oi@cqOYuH<>PDm@UvsmKXRrko=bCjP z&&_aeolYanuUijC`Auf)<*tjxd`6njH^ghL!`L^hhISD@eQSd@ueAL$Y=ws2>=l{) zc4?mVvTf`7fSYaN)fwB9$$en|`^!YvfDs*ZwChwtWzmd2cP%{Ss*KDi2Z0pTsC|Bj zs+gHMf!+e{mKuY$)AM20U3_ar$OG1>1k$Whum#5!e4L8*OOqi1P7^`SBKGJ}BiKoZ zF|L7x?@QpiAoYD>Vk-0|8OZp#qsJOpX5vnk)(Ol$&H;%A{rMU`lkE(GY9`|)xPpu9g4f>&;P%75G-V)YpejTSXnBY*yK;kDX9K3%SbmW4qn@_8 z9ZueI*gVrFlTWrKsQQy}q4>*6X`QTKAkpY+`9Fhhl<{LfAGhqITBddli^m!bwgtl+ zqEgbQU|PibkKJ`n?A02t*6Ob_iq88AgtJj)Cv4Y3qm!K!nI4xnl#ydps)zBO_KxFu|uG%VD^+S^@++w%$#9a3-32$@0wAsJVIm~ zae7iGZ+PL`>E|mcmH-k;?Y1~bm7;f0_0=v#bR`6W!u_Cr`?A$S#c1ktW_lC9S9mqQ z8>EDauhEPWNw*o9V)GO;$>(!DIuD$|y=A!G&*V#j+V@!7>y9I^@e(20-{R>wM4lJ; zuN0#LZ&vY`w{9rSrt`_j*995|?1{gVw3?NgZw@mWVHU^7tTU?W8yGB$eOSZnl>PzXM?qDz!nO4+iBRz=Oy7-ld@bv7A<-y z8Jk*LpFAk=@2xOWB=&H9t)+0a6C>IFTT!#GqPHNSYYhNQGm$3JZ@%ut2DE<^Oouioi@82|k7hlr+;!ErAW%WGqm8<0=@eRPD(a1!M z!Crl|?0hYRei*3IG4X@dBd4!SaJqj%CDhE4wy(o4KeHAHf1`im^*$N+yhJ=cL>SWN z#d>mX2QFQ5r(2%jzy8CZoA1@%H?8wzUTJPHbDYya8%TNl!W0*sydVt9gV#4GUedzBd|G!+LKf-7!FDT20fn{ zinM2Ec0KcY{LVawZz+L|bMZS{#yP5+zX|{Skwj%Lw8X|FiT_ zq?cP`5;ywYuM!OVxM{V$xZH{-3}eBeBacUi&_G+$0$Qq&bAW(V`}cY;!)p{4Q2Rn2(_OFn)f| zB!q`Pk6p4UpyOqpaU`{OwDPB*+2s>4jhe~_9MTMYhRYRGR;}l0Nq8KDn4#a1B5Gd# zEJ%SIR9VEW*w&y?UGLlMmxoqa!8&kV8YY3ykbWBUU`j2zQWAQ!;oEOq&Jd5SFHW{E zzQNwLshvxLVRg(Q(|+`H$rZxf3gF%VT0;B;aho9A=F8RNMn_o+`Iy~qf@xXnXbek7 zT9{gmka1EP)+}+5*f%U5cOBQDGyKUb$#~pn^!uON1L#H?A5f^M7)WUZ^sQEb5w`0D zQ1&18^=LeFT@yBVY9pWc*s1H`<=Rqejbc+XLY)45lOf=B*Q7?Q4N?6%K?u%@3iAge zB7QZlSJH}&KJ1v}5-1iiWHX$?VmglE`$R>A*?0Qvwi`+B7;8>D5^3bz9{n3C^a_PL z_g8kTHxh;vVtC!FGy;2#3u*Y#Nh^}Se9Y9rhwT&z`Vop{R*8LIcS;7#Moq}g4oV^u z=wsKzT*52WJ*SFMaL?cFeFcnV0!*c{ znO1qESc1$@lD++GJ>5cV->xQK_Wjx3F3%FtI8KB$Bh8HY0o#2M1m^qlG7wd(D-X*< zi!l{G&rTKnRWBU(mFGLP?Ad?j!)^2Xt0)*b#h>4bANw>hs$P2+AD6ub-m^QN;p4Y| zpL@QaW_T2N6j7MMLvf;fK>qTpAcb`A8?eAwy~n<#?#JCxm65K;`N$7M%kz=Ad?Shb zDq|^)&tb$OiUr~^kl%MclIz(|lzN!Ba}AaKW?=k)3>6FU>DznueotTUfs&K#56s`? z-cG%ruM4nfF-Fe4L+B>Gvg51I_6rJ894x|WC%jI=*M>0pRY$p7Ly*HANC@9gwA*hx z{9m?CkuEm{6EBRAhICyMzmKa@i;_!x{>|yOLxz}2uq~x2H(x@fz87^`tUJEz>%jof zh09U0MoVb(ryU_c@SF z)5XfIeB?-6P;{*4#anhr;f#-eL7_?g!+~#m1JJ8BFqJ_FH2GC{(&7LO?ae2Cd`XgU z9IikSb=`ZdP1!hk5#05RrI|dw=<9o=jr3F33yHx6Z@;^fIYKJ)U`Bzs9`z=RV{F+#; zSOO<0g@dAI88zg5ru|=ZokXRToF$Xy{@6%@qcrg?HRwV7u8E{s!o@M4c8E9 z$CRK!>?ow7jW* zK)C-P9~NPv*R=Z$8ZiZ?YHhfz4v$t8Uw$lz4i~%Tv>!9w$dkj5Dbt9aN3>5aTCe4~ z`LRDU(e>25Tk!TN(kep5zKvH<&oPD)+hEgNH2N*#E(7}EWQa{+hxc5w&>gyMfp>E9 zDQb2hDLM`**SGJbU5R9IQ&6E7-ua}klqoqgyR5Jq(ACkE7_`Yzbi1OKlJHf7bvHr|*To(TG;KK%6mgf~gn$K!~4~dxF zaZ+5q6bjPkRBQ{wjdgP7^sKF{TNSjO-RlFdHZz}&@WJ>UqG4=BOBrWb0C1+}0DobC*W*2Be;EtNqyGuKYFzr;}Tl>@RvI>eTPwH`w zkaxW9S`oQp+Rt`gl-fY%q>_+=V;&eRB zGPg+j-lro=YP&t#^-fAd->RI4*pVA!|4iy-SWmk5n!(F}0%wh8nAe zujfi&EphKl2OalE^eFg&nw>-?Z9P&pfbfrFiL~@|^9ugn9w7=dLfBjvOr`oqJHE>~ z8#d+LvEU}>k)(x~bb_3E+n7x-pJusu81zD!{qSHj;z;y@ih6@|sNVXl<7`v5JNJbQ z@R+E=kw@ZqbHmIyC!*BH_=JvhnY!Vf!JV|5dCekuUXoL_9L}~bV7f?&`iA_Z8EtnX zOqlz!;s%Z+_tzs6TVy`S!gi3)4YQzb3&9tTEY*3ib1$IjC;Ds%w(m2?+Hu{#$Gp2< zLh||x;Hfkw>}g6wIZw@VkH8O9XlwTFV$jU1pR~Zz3`a-LOs|fP-7s3?ZOg^Sc^C&{^C&UXcQ~0HPm*+BdNX?$hja7}28xE*6cyLHa zffs2Ax{}~)oDE(r{$gi0JJdDV_4Z7|5_JfyF6qU`A&;p~!ykpKO`q&+m6*Q2x_{Wb z_Ary$QPfb83gqowlII7UsaTcV82}D_@s3n6P^?_jgL;rniYTxTIe@Vw2#}+M``2B! zM5;U7d%-BjK+R70`z08}%o9T%#y&M$3K=oP*(|L-aK2Q+nI72Z{N_q7MI|*F3F)zN zfCPzkWK*QyKhcr>#&VxPJti%Sd~!})_awpRzHc9mXa^wSzu z$3z8CaL%Ed4EW1Aybq2z=sI9-g~dX0+A2DnDBa}SO9 z^FQ*Fcu|=1I0iIfv!`dj&}O&W@)p_vxbT!Kwfq390+t(;h7j5;jh`vokFoh)=T2SY z0n#!u)7*MSvqDI<)=t>;hdQ+fI$pObEHZ}Acc-pAS>Z{u=4nM_@e)~Hcj}#w*?5P3TGCkiqmB(m>?EQqsjCl#rM1(X=@gh#mxg4A0?|~fqqkm0 zZ;9NBy~(h_cwoPEB|(V^V|rYcMCG3umo)^oU_In8OEU|!_bE~sIdqj)u$-e`5+V}; zv(iypFPk~eboV#kh~T=J)o`t0vjTs^hzZ~3iU^moh|P0CvZKmeau;7ZfWVWOUkdMT z0v+Q_t0~bsYv7y}n9htCyy!n_!{AGRvb@3ZaqIjgkcF*F4* zBfeehge0KFyN=X^94kB0e?Z2&7AYC%I?c&gw@F3*iD*mhau0LcDYeCl;?^pl6q#S& zKYfds0kQrgzB=(6^2?!?B%FOs+T>{_;224N#A%x`eZo>IE{8viiQUw9aZi^po7`%o zaB92TeY4-8*dhc995lqTE?w?D|6Ccz!k|qf<2*IxGAE|s$1I+AJmMqAHMZvoIT|M6 zUyM>$CH+m`c@fBvLkX+#{Sh-+lV?Ug;e{_V|8s!gm#u|w`r#bmqHMeS!|zWx{ORL8 zhMhsYW)gy_WS&No0p^XhnQ^@rJV+xOz>xq)5#qGRJc}y5#!7J`iwOHSOovmPqrk`{S z&42#-|9jN_uP>5;T`BwTkN;dH@JoNBar-pFS9VN%uA6{E&^t2&lJ@BWHOP!YhzgYk zelg$azdt!H*Nb6?fe#g*{QGot>3!7pYTR-;m|!+Zo7Rcee)d0SAEF5+Vr|j7OW#=# zJg;P5FGbpf{rBL9$8r8|0KorG?wS92_9VZG%)yW=*#m{X?}2^G)gwkh|}SR}cZ$!4C#Iq9@y3bMr$XDUoN zlGCaJ$o+ZJ|CLt3`FB))$PnQppj<$wzyyT~=k+ULv-pFX+akHyF>M7tK5`;Pq<-6m z=nJ6ZlcbisOxbhE9ZXG4(VHP8E~PFh3o4`5V<^aL&)CwMCDZ(=U=en4LrRh#9{_;T z`dpZRju~Uw(I4@sktFPz)SZxh2;%IGWxRBS6|wC*rOC}J^E1$;PhS3av|NM#rKZix zqd-zXA9C1*kRY7GPZ%+&Rj7@-51R}ZsMNSqYBxniT9(-bYA)TE_d#0n_oB)rZCz%q zPqJu>`3x}VmYY(dk*5vkl>E`UME2fJjaJAw?l1yi6vd{U7Qg+N1h0=oMhK55RDj0w z<@m-j%cW}Q<^)7!nAsP^W;dHTe>SA%hGb>r5wE7I4+aONETGr8-{!+Oi3t~ zLwuj3Q^VBqB@_dTX=o5Jq^Y8^vTs>dBSU8R44EaURw_8ZG&SkWF7N!eFEwCooMoP! znK>D1CZajxkFYhqlM@+jt&SQW-~2!>X7=u5M;;+AFlJ`eF;_jbEyPf}EPp1{j zvF*S8P~2P6Qj;c~!oaB?iY&JVrXg7tx4kcUGQ=NGYP5%CB4GjPEKI*y6*~^NFVHaz z)yh*Qgv5#uihsbWNTE|5BmaQEFXe$nr^ur0Qrje6%ayTmGek3*a$ z-KvtZJZfs{Ux+uNQ`v3|P_&#P3A$Epkb~yL`N*bbo&tTDozPMVp?Iu9{*g8*_B2A5}dEBRzHq<(%apmP4bsggn3X zfB}mE1LhjNY#bR8l{zCdbQJ}8G<0-)@_O{}fsi+8rq3e4JR@eK`B`uRa!xwQky-@y zd-0GSqaMq_bRCS~o6gY&m$270rJWWf=&`Q!aeR{c{mi@s#8N8kCrs_1=euq+Xw|3> zI97<^@(yoOq*blha53U(>BzvNM~m?<6_#d; zoSz|i6_13p{wr>-{$xF*sK-)^K0+heqwA_Ho-C$pGHUZ|)Vz^!c2@-5{j*xJ%a)yv zn>ENNB()S~P?Y@3*aV8Ki97<^>Z3@4va-Cw4#}dkjaf_zwnL0x3^4u%VHbKJIa$7WKmsR{!bm|_Kv>>q(q!x-1bNfJG2{E@wTRF z*+lcGW5_wCrM7##q*wN7nq8@?b$VxDC)-+DrsNfoO^OGxu+xp)*@SrtbB-LrC>3co zO#0v~WyYv=&AuQyC)bHlXBILMPx|iuen{}t-o&J@zn;1*PoGiKZXXkDtb_&hkw5ED z?A15_K!qF%J~+c6Nsx$@FcmMJH9{beJYJ|XR@r<}9>OM`&A)NE0b~xxj1&M1W(#T+ zX0S$ALv&;UkfNx?lX- zTQu!HznSA=2U9^yM^@d9Qw4oD0X_!2T;GkfD2Oo zwb>~k+6(1L(|p-2M7UYyq~c0@e!{`C44h`y-O^j6b;zZp`uvl2+qd|-gqOxDROV); z{hbv@v=>~xMfu?fuTT3{7=m{x@>hA1wM%8OH11CKu}s9oI0`VUic02Zga1=Axf!>Y z?wMBaf`@&nE2e&UTAE?Q={MUBw}dKnRJUE@ghzY3l$(NHZ&B{3=?(t=l8yd4kL(-+ zL;Ng2IgvRf@atO-8xA7jj-DZd_kfJL8`WnwdTGgw)?5j4#4(h49@-9TXl|F)!I2Tc z)#4#vDr9nxXfXI20zP0r1%XT!KRsjj#Iqy>!L&Dtbhgj>#N*9a!&Go=LIh~`t1%+- zo>R@6%Ss{d;Td=7dX*2V*I_IGg;I@5+lOLjpK-k_`jw6Zk_3zzgZ{E*4BUfueaT0v zZ=A6!a*<}kHmR!)G=xU_S<{K&Ga?`$Ld92&Tg)U?;1iG)rBB?%ta|?RC7R!!!V+#H z6C; zrgh?9s%VQ~)JUV%`rPEOT1ryrm&1CK4izOPlfkDH?Bo2~F9&p-XsQ)EP1Y+*O-^); z0|Zvd_}d>XJcHhfydE0Sf=2LpZs)ixuJFwqtlOXR-^VAto#KE+UMIC~myfWm`ODr# zE>g1dT+aMQ{T{CT9`}p0xXKtM0&+IvP-jnO*tlXEp!-1r9osb0@W9?(&DQ;OZndLN z@QC);t7`MGo`J_O2!#8U>^g=%Kd@>P8a#0cfBQg7;a|7?o#A1QL31q$;}C(g}>A<33Dn zM_+d@aCx%v=EO!m9&*0nbh&P_Mz!CNKs|}M9$W^48k^vYks0?Ce_K#Qr+PK8;TcS( z%RJO$vY^!{sMW_pXE`f?JX4z2Ir_g_`5K6_4~)8898g>xm~jnTRvO z^(gG*px)sI-y_uL_`H~tWf(|eyG2`OIMBzM@_`P2V8baU+2;Hn zh=YV8MFm&IQcxr)eS^1`Ows`_%QtKoy*mE;N{cobs5#?nqhT$3=5Zi!wxW7JfnaNc zZA?anwk*etD#6UjIDAl(E$xI}%#Iwn1Fir zM1m!`Yd4_&QDId0d@@7O{`o6=^sJ)KQ(aHUs8WBN@qV%wW&@)k1h1nrYI3szYxx?bteR$B40FEdXgqXd&| zTjR~9CNJ*qnP%K$e$XMVt`{%q7>iZc$847q5A6?BOiXzf!1%c44&vCkN~$%1f=96o zTQ)f9WSL>&NYF+^HdTedl@E*NhtC?)B z;s%!7TGe&$U9xpEgqX%1g=S~WXdZitQHOn`s5J7-5R|lRZn1GAiGl5ckMT)F%%Gml zAG&`U2PG6S2uD@Vw{np-WCspoq)i9a+cUs@UX#i`{F6?1(+`6LM@cFUa)rCg{BnP< z8-(n?;VJcjLmB9$JAMZv(f+~=&|&z|FPAa)XM*e135=q9s(D?(=RRxaTd=EYvy)p*w$- zI6WZ6!YX*&;b@~NkDd4{&(YubuL@g>q`T(F(I(#BxLfX29KxJx+h3cTcH|I%vB10l zAqPl*2VqomGz9Ue8I6r@ps^IyhGN*MqIq~_NZ1y*rEj7uaB(u2^7_5J%t$5ivsmkD z*p0K64a2$?0K%6bs+~jNhZp)^^s0h}xPC5DH3g3h+@l^RdSZ*HIw$Q2?*s3|*6zt$BaV$6 zY0p-_4JYdf6)X3E+7Fs|><{>C$^L#behv^{GZX#N#(C~iQ5GV}YaT{Qs3t#u1F(+^ z|9}$$KDyY{$7%l2W)x*sJ8iiRAI^;jswEswBkz?Ay5L9q{}U7cOQr|M#0k=AwC?a= zXfHmNHGyQZE2VZF@&{IY{R#e_Ki>(pt;bo|n`iks#T0#a)cA$^NTh8k3twKUC{62I zeyU|I+15r;ooB_@9jRD>RwtHLw53J!#v&S+0W%{^A39(xWri(+D7wKqrFalnXey<`P97M9<{tJ;NP--Rf_3ZQzDK}{t^d0@ zH6Qf(shm=Oa4@fUuLoEF6y6K*{dbnd`tRyq3$b0%(=t|AE2VB-LftBykKnBd<~lRx zdL0*=&HC_Bv-gCY&XU-%D?AylkqP~nR)kbAojb34$5|LuZF|>2ZE~gFaWJ76Qzy`O z;o~QOKyNK+X+>C(i8z#)IX}mQo-KbLl6L~dnYLJ|kQi4yQ7q5ekXW#+t*}d|zVbdI zilT&R9BG^cJzk8H7nU>>E!(v*pJAdEn+UTL7xi@REEq#xbu%WfkN{sg#~n{-<{8d#=Vzik$LEM9z`P#pQuK?vYPZ?-$1Wvw_fFRbiH z?+N`-zljxV79#&M3Din?St=Vvuq34~@>$MmOsIO+kb@>bo)uk0(`!~c7l{@uzX>^q~l^zByr*jmZDwqPhjwkOTlU?sXtpfj%!T8#J?PHv_3%#HeGdt(VQiG_h8(HG9i$be6rWtnJH0O(<|BX}Q zl-h>4)cqHL;WoSr5;7y$wo zGhSAJq7EM<1<9Kh7B?Oomtk&KA;cfrR_;c5zOqXJrBM;r+=wRzzuids&1oAPpu zRag*iCWv9Z9<^q9Uu6WM&!o?htvjm+cvxkB8vN??wKG9oOyMa3I1+lq`aCZZUW;y% zmICnQq}8s>HH3QRR5MPgP_cQz2886VU$Lpew3sN1A$5$0^gw8qo*T$@6N& zx$+1zdRhcwcCcpW&ES;5O(2>$9yqy2B!fABq@hMB@-NHlSA~Z^U9YKhbv*XG$ViZ1 zNDN8oXUxT90#lDHb1#hu0p>NX?Fp4yBBif$xDq0UkfIdgIt{bYqU3Dje}IN_%@KCm z>wiPh%a26=PQh90W*rv@P2PRu;V1efbXhdzB_4vYu+m9m>f&_U{Fck??QN%Zb>cyO z^H!OEx-LPK#BAairkIxq2t>r4iOC<+%z;D~pu%3Du|l8Lb$Yg9=wquuodW zI+evc(>nP@U!0Mq(p;O%1znG*2vCZ+zXc@ny!TDZZz)AYBQmnUVbQu$C)b*+QIF7? zCIbvu)$Nw|_-SQ?=I5?^StF(UCzr$-gr)&LPGu*J_`YxlYk|rY%VSAtO6N4FcYwKh zagT3qo3#qF0iv>3dz0S7^|lR+iJvMS6XvXq?|-AXu8+jU~ zm5hrzmSk_1vPAWwgms)>%@FSv7w1F@PPwsj4k6#3lA=`eFj8Tp)9I++{26*;~JxdbiGsG2U^5F(z*Qf-EZa|O})npfQ4xCA-bz;_^}3>!x$Yy^F;d-qB}BswJTq(ZH-CZ- zS)$AoV=I;ri_zSirD5S&T0N&zV9Q^(VF=EQFbP*3C% zsoH2s?wo*uh=I`goLpg(l*mDt$-t0-JW>+W29^eaCc*A64h2kK!Ua`)Tr0x8f@%~&(D);u~Qjm+5jYL!^DaR+u zj8u>(>sw%yv>6!o%Eu+gHp7QEv_n<+9k8z5PxQ^)jx<$8hDN2~Usq(Dau6{Trr^SDq* z4mh>CrjWv7$=ovaIoCL84Nle>k?|N=a`6LgGRYm3aPg#>zXgbNm--m%$frll*e zp)79}n^D8zBGG0$cE!eUypC9FUwa@WL!@H;6>E2Nab|f=+~c0(?JXYfP(gm$kWure zs_l+YW4U^pGpiFBCQ^`Nlfh9FPj7Xb6sKoHB9Tu>?Y0M&s(@+JCS8^B>B4En7{Y9= zZ3I)?!G7}T!r@GRyg8bP$Y_x`d0k4TaYcK_RR&ia5c0{jpN{t`KUD_GbZP&8(ye}Q3rWx?st|VjXtWftP%ct6!v7&)%W`V0W z$1kvdiL8tcFAB#dM4{UoYDH%A<`eR$jrt8NhkT9X?5S979YdA9^mRg zC%esmGp&5`$_71*<{dC?qdg!{Xj&9ijazlQ%`2Z;61+%w#JDt8Y;)HLUn(4-#^Ybsy!msvHnlpBK!zw?@~d2dyd(J- z8#i0{C5|6tq!pJ04-QG?uO5@3-3G$|C4EUASw$v2Fi$#d|6OaSFrF1`y|F1`CvuLY z)lOwkemLcsQ%a_}@(7San1rLZ34Qu!#X-{ZzHOthy*-1$`!Zbl#}D0xjGKRC468y2 zf+;YE1glfSm~DbnOT1e1|A$^UgIU{fzMuxpBXZbn8tZu5c}_${XMeDOZaW)j|FU#0;t1)VrylJc-PgIWr7AK372{at~`Nx z>Qow?aRw_0)=gi}b+{sYxTKlOb@E~e^5>&y*r9_#-^{P+=%i>l#>X6EeiDmFIw`8@ zNb6@o%tqQQKWD^!LCv@X;77$vm#OiJpe3Wnl0d)Vn3tqN<{nB64i%>$3`iKO+?e22 zy`WIjj8gqfl}8o$gYUk)ZGdqPlLC=)kRj}?ai4;+*wQ6sqUNw@fE&MFRc z$z(!or-+R{uhl)1NM>4fh*3$-DrHpXf0w0R2L&PO@|otxk3UI-!1N4BE9W3_>Tu_Q zl>?^M>x<>c1E)oKLxZ&1QqH_-ETAKl$26=3>}d-8HMf%C!_cRIe1$C_sk-TBnzF0f z)E%RzoY^>{Pk?$zAtsbk_`P=65x9;P+Qpy5>w-2_H6k*o>4$s6mK0c7=?0D*0kuD2 zxU(PO39IRB zwuJXtfOWdt=UuP3a}&u^3Ghne902wi^lwC4w#v`}f-H=<~7J($kr z2n-m)!pi8=QWLQ2gbUxqan2)8!NxP<+0JXI_!`~UU+Hr+ka|+J-<|&K`fP**ap>f^ zbKosWo1P*b#6~?il=LEx&yE= zNwH)KD6qh@QB<`PCsnQXC&@G}iEiu!F0P2%zE8ej!f4uj2voL;*yEk7@5&9_hLEsV z^$L5xY(~;Bc&6KZ0>v`s@lS_ZV-C|*!%@J{)+(q@? zJ1VSF=CZXqz*mtwVZuD7j^!fJqRHN^11kW`uZ`9@CD_4NU2R- z`{IocU{mD*p9y|qB)okQ#(hHGUnk$Y+21y^ACd{#QFU@EfCfW_<1>$==NFad?_p7g zETU3~XCf_73IG_Br)Y-*ac)3zmeE5|9w23yp)7CYbpHYu+B_5Rho|@#fs_Czx-oeW zBEcU$7g*DYNQ+Zwxto5ZhfxNf)avt0(!`Fd{jXcFSk~al5}t4V%g)AoqAlOzaK(g# zfCP-Ou`#$}K{3UUn0LKjO833`<UH zv5)KX_T%M`9{+49oK!(kn>mlWUuWsp((p{hTsED@iT6K~@0%iN!9rSP=aZFvA#}wd zb)G3_<~zdG*~)|6g~e2DzycF23w~LlA;wuC?pg{>adR7x=2l-)+WyLwog|kQqm`mf zlKOC%bS+rt&&K=VvFEm~&jgbXq9xyzrsrXG=ir9Vb@pRh*9&u3YyH>v>HUYh=FWlU zu2*aCt2_9|!Ytgihx8axa=uZ$ZK9|qnTh)-9V@OIQHu3m1J5V@sCI2R+tvQt_5fxq z+8s<4MU(8Ei*=HQN$je!AK^(+Ott4Lp1X2gBQ8<8KgkfxjN$ws!Xr$`sw=|U z9oeCg2#gucMT-IAQ}*>z(o=RXU&bM3g_#$NVLJvy^v$UjKm;0y&CKXhx!OX1T*_Np z-J1?*Ij&XfUS)BU4tbX(k13;`4qaa{@m^0?S{FSQpbZ+ft2+II34lFdg45>1{$qOA z8y0%_Rg|)#gMa%igYMqBM)L*xKn@>&mH9P zDE>bF@y1Rl;S15^3-JEoxNAJO>FV0kOr7uCi9yuq4(t7LqibY6Ze!E7^MW#+8pE9} zjhy3*A{U!6CGvpY%rMM%@jbEH+cd>m_X*&Ao#li^d!vn>YQU2uTFh=Z)1~XZbdo^3 z5kX;}ko|i+0b@>4BQE+;l>L~R6bk7gzv~%;!c*?j!{X=#?0-I!jr1 z$8NG6-^1d}VfpQ=aV>g1r|B`-3R$^p7siPnptm`x+kP8`SFr~*n2qlgVlCR+(dn#McaSDKmNBL5dX-Z^QC9* zV&!$n^bNjR)_X1V{wxQZ)5*%pds$)qm6{ErEJ|= zb;0XeHbbOg9~~JRD|c++13BXA1}XmNf8!rEYn0XGW4V^Wvf?u4XgtwRG-6@H?RDh3 zZ3~22R%*7dp(6JeoZSBkM@K%+=GuIG@A0=`xbbz~KG{X6)QH}Av709c8$R|WzWR^_ zkH>}M{9b#?2uEK(O6x#A+YBZyx|@o7`0O8VMutPjLKLH*+7>W4fif8jc>_1Y8HFKj`xFfkg1+Mym-QoXM_7X zhOQdPXo%J$y(sPtY}{OW-D)~rUpg71tGbwgJ zC@E5d`*?Fmhwn4nS><+P{ey4uz0a9(+l-*Zi4XmfAMYO_{&eoG)pPIzBz1B>%`+L) zik9H$BhECT@FX2Q8oM5P zfcqcWhJ9|3BQL$q@cI2h)i~kcAe~dCY+kd75nLutc=Q;@=NuHS+e+Onbs{LF5~I9* z(2vD;8#`BC*mxzApz~k@AxkM+w^iSOHhzE^^I1~slc42#iMLVTIzw2kWy=~bZpXr3 zap}1*iL8P`ij1P7z2_skQgksgwghwGxJm4&>LV)Am%9mjpkQ>N(Y9%3?H%BspDN^hkhk;)VU14E2OOQ@@?r0SL$ z8KN>XJi@@>ES|~=s;d{j55fDDnw+8k(6ju1-~Itdxs9hk|N9pYTILyYeB@WJVtHX9 zH7l22haMZZzDzjGMB@pj93EERzMSj!uvAMC3-xiLC5p4~Hnvw?uZm8~l8B9P=;$oA zf=%pPdX*QXIz2|KkDBVd1)r{J*)*ZndWLm2mTg#p4IMh2{Z!BbsF^hW!w2ZoIazn- zItf*8N-9H#>lw{t$=|k%WhT=lpQL9^rBoJKF}#bXF0tmfX_?DXarsuJ(AF`^oPIg$s`Bw&$3Q>c(?YL*7Wc|JN(#Lnh&lFdzOY4! z5}N6!J5skic!XWv2%`-z^UL1&IYqT(gz!iwqmB)1D!R!2PM1|PB%=}H$;;2r zkd4QfJbs)JOAgC-tXiOp=&2dvqp=uQH{?-6Q`5v_G0vVQLeo^zu_)n4;-dT0v@FSJ znAqj!mr&EB{YN;^r^mf&H)}8WWmHHfM|k__6lUKhcAXc-XqrMY8YOYb6Iw!Fsu|+Z5YhN0_MeGgUHxbFogx~IlR0OqjSQ)1h)^_%a{kS` znj#vFlDy!B^Y(Y%UB8-55DtfkCzUG?1k@~vSd@7BDzt^C~u0dGRhVf=E?T#N)|RVbDct4!549HxMCzeI3R5ISc$~sbmrf5~&Qz6`c$! znN)&UG(kFh@p@4+BohfzS>>F6JDQRr7L5>%r%*0-t}jR-os1Jtq{&`#JB>^-PArxn zlT~4^gO0&*X7g51TUtW?RR`l5N-9YrktTCay&C^Nd+!|{S9R@wf7C{8B#nCSy*FFM z9XAXZ8w>`A-a-wX5CRDa$%T;IB-{{4LPGD|fN{fJE|M&(TGgsE>V0~jv)?}?*^+ES z$bIkg%jL5EnrF^Ad+)Q)Ict6Q+G~B6-C{(q*JHBT|7rc;uv;(~4VbLoe2vg_(%!3L zRFOn{bPU1gu*MFX1(VT?)%I;Plt8jtFc~daXYoP_$%e(K$6z#bzUCym6{FFB(Q5np z@e3_VRUe&$-o(blkre*jiIw^v6=7w(;~1a4{wklf`crh#)m*hI?wg|pT-|V$2M`+_ zO?LJ-fE^?pb_+&>3Cp=2w2-jd%ot4;W}7b|99Yc;v^vANuRH%u;c!?m8jR=d&tWrT zFj_bRD+H3mf=RDKZ?c~IT@wPyX2D?4WBf*-C1KEx(_Gz)t0IxSoPQ?KWeUd`3`VTq z0ITNUN4p+}HHw zErzchxIj3jaH{2Nrq6D((th$3HrFt+(&xWh0=rd5-QgB&UP+V|ebc-5z5KtCQ7X#X zu-QCVup$!=*(^h7Qc-cRnQ`k(wIT;@F$>6val=(IVbGiZNo+7%?6g%>Gh|gzSe$@| z$JvJ0P=AC*wVkMI?(Y}}n0m&0PhXtaDSMg^vIy?zQ82^-xF9rTaLNX&{SIOP9EnC8D-%!Z`b zFxlC{NO%z$fpUV(MXX;P!H&-w*!@L4H)NEa_N-g=8V1`>Vb9xmo)jX1!>VVrr-X_vl%?&C|tCFDDN|U+RbW|b+xEuX)G>?m=0%^uo*`=Rds?6wG}Ht zq!kyEGew^DnTB(b?EY*RTbvvmG}V7o1#th|Mk{C3t4g5}M8` z>N+OT8}wKM(pa`6lR(#58pbw4eOV3am_;m2@;y@`Pv67w@)m{-1}v_T6fR#th`x=w z_5qAW8&-!CPBJ-3g{36djtiG ziind<(%sljuTFUoVM?cg~ zOGiKBY7@fA6*s?Fl4IP^sGSIkiz39wgQ-`=j;6DghV~IgbvC@CvnfpX$2`zQ&&UJ@ zqZNm6L*}1IPO>{=Jt~IPMy&cub~Oy4aZ5zqTuZsNm%xM^(xZd$kqLAIEi`uwGBK{h z<{d+JVH(i^-ncqRXq1h#Do4>;tn_?wl&(P=ewG1Fo;ZxQJB+mK1j7BjakWg))piQy z;3#S%1f=AXn;3z@;~cAc$%1;Yo6epA#`IPshchlt0*~N0Qc~gw@pV7rnKzHn-q=R> z;23%rKjJfUNey*H)6F?>Jqi+no!;X`yoUrh46z)z)&fbJXMxvPg zF&X>lQFfpd(X5S2Ci45jIRBJ541Lrc-og93l=v20%j%8GiTh{rGK4GM{)yx)jHKY4 zd@TZpMbBt&H(mXsm?S48vlU$BD54TcPYp-z4A_lK4s>#|v4=^A6W(FT>a@dVkjaKCtW{U$mu6X;0k(`}Pq_;B&2NwM(%C=T|MolA|+mdD-O)MAv)`t88BNJ?<(hDvmPgZ7j(^g*xTcce{>f43BI#-6(#Hz zErZ?dboXm;xH@Aon{jsXBPcPGoJ7SeP&|zvmCZDFpTUoEPdBW!RrI!$i8C(;0VFkzK1

;+I6}qD(?OW>sdikI{6jmhsR6 ziW38HpN);BL5^4UV)4vjQHD1jXNAk!X{)GV(CWw9qS?+E37xWm#?BGc8XXqD1Qr%& z66P&CgAwfGv{cqJ7LY|@Vh~>c5_zf}eSaHGr}~*RI1nyQ$b92SiE?2=<3wm;G{N5U zqH*kchPqDCIiSPlFk`nnut^>SrsR?pl>iwTS5PEc|Vc}d~;dYp$C*lu9Br=8Az4OY7ayWNgW@*p57 zo6Oiiv|Zq#+giK{OkGDxZa8ANnY|yr#r`2V;hr6A|3uCD7vJIW<l8~Q6#%j_5L2*u6~T`LpmsLu@IOPjlx*V z`)^d>d-Gp;@8J?WtQvYMcJRs8ZPe+*$j-^Y-`GP z#}zm%qa6INd%5SpB3`Nb3+ukMvqc~ohp0ceoozdhqO=7OpOsF8>jZ5__E2kI!rf2a z$A%2QGYVfgPag`E1`Q z^xiRK6r>Ph>*B!s`*3V{j^`i9$JfaT+h8RhzxEC%ol{9SeaMqvhVkt7cepWLcG?Ld z^?uGTe)t3<_Z;BB{Tb87#z@buzw^Q_cM4N_c=d%=F8I}BESRXGYS5XOv?TmgpYq2y zyIA_0t^Da?H_AVLgHJlWNsdtR*6-_C{?||WkBg%4baetuD8G7zzwCA)Cqc%;vhz@qAclY1;aA*a;eEBIZPx9p~`(vM=X{MkzEO?9+%k22V_hqBIbyfTfn*B)k~ zMMm6(Aw+}(;Hhur;Flj!-set`{sf;?xUu%pC%9u>7GWL&b!Qc22O8-e@1|;J9ZE|G zMT3o0?^Ds>Ux0gfD8c^D3|H-C>zv;U3>sX$wm_bYB2bRLaTmK=eYoK6hq>*NJR&^izjZd$$|lP9e#PFJE{qOr6_P&!O#~u%~_$Swt!c{u2FNWf=O(sp@tjc11j4;omNCKPK$T8mcFI~Z0qnR zXQc;05n;HFG;w76E{^v}Fy2UYw<`tLJ-~fi7M$i5(svx>?ce={;~`gb|BvrtttSX$ zA9ee-v8Q1Oomxq2kAs-vW#j~E=^JyP2=~X(bd0iEi3K-5zymieBFNB3OZhH7`>Gn1 zBbcP*K%B>VIjk`;p)ui+u$(Qo-N@ouo_p(HH@#h5SV9*PAOAgH%ReU$wB0AE*}I!2 zDT>wEOSvH9pPV^oKjQP(aCL&yIl{Xo%$fmOj_qgHf#bC6+=z-zB+}Q$aCI?cXBhshYP-NBAiAzbt8Kl9)cUyNfNRP5f$7yDYU1gDc3FGttgLATZe zAKM_>_&fQ-V~g>{j!C1UZtpfKhBb_|9%BE9D5E(e}_>e+pa4{ru*R;^_)U0;@?+OGQ0u;ezu-4O_PlV|5QBLgApN>?3wg zEasK&z7O8yt05nv|AlVJXm-|zP0kvIO% zE!lymNp+>ZojiNr&*-}HUA8_@Oo)8ea&h7~M_cts2}{Wd^f()j&gN@LKf~8zAE{%< z`yX(^5K4@BH!tk<;*J+z;I@U4_)a%34ea5$N1mj4?O%E4fu+RCzAgKbmC@!yZ2#y} z%DM#+8QH{mjdQYLKMJ2Qsyl+Y;n@dRpB9dM2J7p4X*jx%Z96ManY{^1Od>wOhN|i) zrxcg*(0%L4i}1nKK0@_puk&%YKOx$qd{nJq5^DRcur zdEz!MNC{^C9*tz^r>T5DU+yZWTjxP=LL%{j4hCu~Xp^tw?z=Wn6sN#N)5@Or-ltkp z;5fO1GW`OsxHJ`~v3@2{5EUDq!ar___|eDx!X|_e4vpwK{)V{oqFcl}eP=giH>*Y4-WSDP z7ZixaH#{fG`fWl8Ntj1k#rB662p`u#k+|YlqS`j^Ld&RV|LP%;8yGDLAO1>cXSb;z z5_KQ^Ok8vKOQK<7QmBsoLM#rB5lf${nRWfR=-zps$n=dA#g85lMj?a{4xt}z6mQ>t ziMai=^4G)z7i5dP zE1nj6d#A>4(+-FuKQ9r!&fX#@_a~xa&iL&@H(V}$R_G)A)9(K{3X5Hu`crpI z{A|T)@ytn`uxdXRmnqzZZ)~2p4Jw?)w_6xPqKHXl0_~n{);+dLpVbpvot_qb4zw}k&s#~5CUo?-M?$@^G zZjqu05Es4NDb!|Z-rO36a{u$AO|*;| zg+qLY#ZV@GvLsRXyTplWfA@~~^57A16KgaI1KJPluQ=OF{^nP`!7zSQh6eTmv?W7ml_IjUqKEph=|F%R6O)ng;1%jLWoo1)mv7I?1W3jZ@y{~BQ_y~FpJ7( zE)#hX$>N5W%SESd_6YQ%^}WZ$eZP239B3aG>Vv-&m*ggi)xY^l)Q`;_tYh#aasTQx z5x)Eh@oCMVu+BPWA3Z9bf9O&1yN|0xkGfO5efJ8Hl6Q-E^-#yOPMT>%Cwh*(DIVRl zO02!=G4a;X9-*FTXuMK9cij?Exawi?<}sBpNJ0oxi+KCdd&E<3?-ea0N5nImOGR94 zp15GsgW~TyYQ>;_N-@}LUlg}4PZgyPyde&$W{*q=Ata$!RfxAA+botXyHwoy;%?D2 zYMOpGwc^mT*NdBf@`BjeIyF+`V6}Mn@hxKI1y_kD-l`BPokIv=7el*#BW@@P6Efdr z;-S|Li@tBAlO!>I?62bSpZ=$Kb6=0>{NgEbRbi4yELbb9|K%HEcjKrqI)o5nRP6iR z^g~tH)oU&kn}6}XsOX;>v!!R7c;?!LBFHsVtaFY&FOo_Pb&$~tM><`mADyrW6t$6b3zlm+FlcM9(r^U4e`C`-0-Vt?%nUgG{ z>D`};D>D=~Fn zc!%iQ`=YpOX}nl)-815gF8jBc`2V}&m=F!0KO;6pI0<>$T5<1NZQ{Q_bCR&ARig5p zUyAEjmWq|P|50o^rI~KmB9uqp5x=}{gLrJmxDd8+(Y)tn@w2;rB0f^8MCHeiiX#6I zk+}3)@!0R)71cxLsTN1-#LlPIh{XK$;+Kc@XZ2^*4v3nMo)I^%DiTX?dRA<0RZsV4 z75&v)#ee)@jd%&Hvbh5W+6Z>QmytOAm;P3kt+V55FR+MrQDjT6C5FRa{rHNj%>+ zeYROGPEF&-MK}CW96F63+othjfQVoAOL5F}j;_A(ky(XSc}P6Jd7)Tx|CiHx%OOnT z&0_1Lt3>9?Ux|h@cP1uP2gOfv-G$6MPAt0Rw_@w5X$@|j6#ZX4AhP}A#D#x7gWDaZ zanbV0&&1~2UJ{KuofxkEwJ7or5sQCScaCY%i?N!gMUjuch`)Kem=75prU}vf=L>|w zCsJ&BYw%yV+3)3lUL2zBcT0uB%SCvVJ|JHIbeq_@>of7$2d|0$`soId;TJA8z11l+ zr@@X!)I5KUxZ<%dMZ4A_v^#DQDgORq!y8I5VLF2`hTq;Kq7@3U^3U~R)G!x0>i6C+ z@`C+D#smAsfO@`t7eb7EB5p_x7K+8sit+*N)S69bMg3oI5kGkBQ*m<4C^Wn868Yi5 z;*yt|#en`yza#HoE7C#(#ESo{5WSjjt05gi*S<$QcWtpKxbR2f)rvl$wN7EUp0{oj zg^_aM9C)R8wXRp_X1X*DiWBcXAue68K&-v<1+lMlQkbT1*H-^qv2n@m;1skH;bH@67k@t4PwANZ-}NL zQTyR9#TBK+V$E%T6gyi+#msWRcJeu~Wz`+xrK4?PQaHr$&Yz1L@BfE5&^#tuU%5#X z#f6BN-1XwYH;##(NsADoUA+Fo)gm`>y?A56CLl0a<6KDvfo@$pY(!@YO1G}INGT=DQvXU)|&aN9KtkPp@I z(I0=yTgq6jdF)55ikcd~tIU(Q{8YSU?)c{|A$|7vB@5P(qa5jU#``>cEjF`~iepoJ zEi>e~4yysxuJ>qLa64DVyJG4)$dP^*{F779>{vSN*t*N89&sb2bUA6?GA2q$HbyGn z=I4(;M{U%N{Q9XI$ekgxbaKWyawQk0+vwi@GJD4W1d_g!ozFeO6Ym*W`N-2eydq+H zS6QHKt6(UwgjM-74_`;iC%@srC-&jJ>DN5-qas3PwxCC9DAz_%x+)z7;H0nS<*m+K z^P}a2I2)+hThD|nn&b$VS*3OL96xXpi%Tkti$idp4tKFNeZgM$i@7w}1E+yb`YZwz z%em)~%SiW|N)2+4EMenyck$qr;T(JSAR$|RK(4|S^}z$QnLLP$4LaSPy=6Q5WouZM z_)OyY6LeN+2?qsq-980tqv^NfTmTGN`5R5Px~{S-vta<*o;=B`3sl zI>jZ7tlUW4+`LFH&LhBG_MP5|vUXI_JgUR&7ly5~j*0_&*}Hcyd%yaMy}Nd?dtVhQ z3c2Q|f8-B8-AqZ8?_AP0b03FZ`vWiR)sS<|t!ylg#oyfta3wM$5q}>qqv z{3Hbdvv;aZr`Uc-Pf}4HsnPCqG_*0KcPAv!`z$tpZBrc`qgsM7(2S1 zCV2+=8Ht4Jn`r16!{Qr;ub20%a7GDz=RsckuRrlwebA~Nr((5@LTC5JuS6;I0uFkn-Yj_a2&0A62ca8;YDj$o*9IvGk_D4 za6&Q}Fx$+`@ zy()(MlZlCno5R=Q7_Q}bj}710JWAq?D7(Aq)p-yY;E(^zv#}4-)2*aWA3WP-^F3&D98u>O(@DT(n0WP~Lba>&Ril zmFrm?GxajMg+vn?6-$tB46)I1#0Sk8JiR?idIr6Tjs2dl<)0SI7()Z?G>*6t5Ee;t zcIWm z%os-p7`3<(6q&=CYp>gnon(?sd$}xmIu}TQB%E+KtmwxyGZ$di4^sYREipgXLb{WY;j$z22wx&&&aOlx>{g6z zHK>pgmz^{(LQP<^=%_o?jLkislEUx#TK+v{hOfnw#5h4q^>L0Jt)SxgagLYQ(c?&F zX>I_rnN%M`J#T#}xa7J;M0(peS$2|f!bk{n!_{R5V;HD8(u_fhr>Gzd_c@^}7TS(g zGiddvWbxPWwb%$REF{Wfnyc`NJ?k`wt)dOW(zvkG_JVe zVoFm3@S2VQ5pE7}XwjJ+%%#7yGFts1zyIxP)OarEj{CN-JT(YUmnj{k>>ft)_ebIB zjvE?Ilqm_$O(Q(iol)f|CW{j>tFGnxRk_3{WPm40#cR3xuA5k!sbHe)A3XQ>!`SjR zbHlnqVii+SLGJOH#02`|9gsy%d;o#-q9diTdg^+|FvR4O6cl_mUyG!rW%sN6*I)Ob zPQHq}Zn>C+F+R8fHvIryeR^baUwph}I3uAs^%XC_@H!Pz4x6^DWnug@KJ|(xEi#ZG zpD@z1VhIj82U+2FRQ|2fh$MrWN#!(O%dE-_F1|s;EV`6siZTw?Y-dONW_}oae!dpF zeu&lsFZ23gJ&{*m&gQ&XE)%Pb$$?>Paz9dwGUnymN|^L&8p}Iz_FhOyZpfTtlB*M* z3G29`$P4{gBL}wEU<+Nvy48`V&oP@OsV(ouO}>PM`O_v-61WOaVlUZ5v9B3pO&yJT zUt%}h$mWb`!|CFQB7Gx2`|U;~^B66k{FT4#8XYsq zFD1prg~_9b>5<&Yz3~oK1i3L|r~FEO#4{xToxB~rk?WTQA#)n!(+`g@=2^h~7sex- z$^W$Yvhmt4<%Ki1cMPNR zTFmY1^N9_eNnwg&$q#?RlG#0t)lkzri82}0qH(e?vV*!c8$Hz}QYs@$WrH~g8L{9hwO#@1L%?Xq)Swd+};Pkf;NZP7v zNm~VyzKKts+riFyiOf3|vp6s0%=iszH0lXVA#udUClWb({6-bZz7BevzUOQCR$|g% z9PeY?E+^7AjOfVou%TEqjP$nC+%t~VYQt)CAmN0wiyK}+;lw7UlN9U@2&^NmRDJb6 zpR~A;aa9`eC~2s*U@+>?40qF5*N0oq1}@3~E8KYj!8N3KM ztQJNZ8yJ##lAAo^V3H87c3k5xVN0nuCT%NwKCi_ZvWE4mqRtwM!)B+ip&gBzKdE`q z^N!lFnOdo=(IN{kVQKQeak1_)|QT;Wxce0NT&CaO~M6iX+`{n=#Ki zGl46v<$_o*-1QY~+trB_a}yWk$nlub^Oj~RYsaw%UBQwVcjiDxsh8vBDlDG4=izIy z(srzdA*+J5ixcqloXLB&;J@T5mdE?zWvXT8o(`;Gn^>Ezz$))Njp!$pl<(G%vi@>%Lj&+c!t9dFMQd{i3xw9%Hq@p7R$sq`(u7dX6u#8Z z)-sAg3L-Yx9naae)2d|Wo9}U?T~FMd>3HggIaSw-NpD0y*-Pb~31U}YO>5e@5M#8`Xd=tG}IU| z=}oAI8#&zON#XKhGUEbpu?=zb{SVkvt0UpQHRPp5pP3uS7{lXQ3~tF}#fB2<^IhKl zoh$!F&K7}5qo%jD7kSn_r;QB&&T@hxi&(!df*o7y+4V&=ms}xeKPO*{9o<+LWgmXU zm^_(v>y~rIWg=nLPtbCr6K9`%3Kq_DM+gf>?I~)S%*bO3DV|dmS%xCCfNLKs0J{cN z&tY~pLg?CyIqho^R!rI!PPEwYjw_~g1{1h?6OeuxkEMe{J4kocQ3l-n*|4_c93>nb zm~_1ycxxN|vLH5ZSToCMBCuIBG?usE@JytrboML>Y!*GuN9xeIN3!&sd@VLKHOFYN zd6IIPuLWE^6)e2>Sr+2JG~UUM&zcdD*RgroX;%w&n}(XQRwU0HN*4y5)%MqlWXII{ z5icE_Ko;`@u30wU(;^Vo4qCcw;OvE;7f8}L`(J*KX1kmVuDqJ$IbXjMPKr!ctT;1g zgP;DH54s`Xnj2UbIj?!Q2=Z3U%B_yLn5Ta^^;2{7J+}3`k$TJJNU{h}A{bLh&mt4)o39{3M z&uhse{Ks^AN6PBZx+HPQhD^L=vl*#|Id-rEn@0glvpsQ}{neUI^6rPVXzdBCxhMts z>|F+XC&#M#u*lc4EK$ZeHXy!3qNe_M7iygg>5I|{4e-U??c3b2hN@3@aI)V(;@!)M z5AvTywrZxmrkOFFA9?vP_?|A)9L2@=+y;Q8e2-eC22vN19U6$w*Z8$G4Yjn7=?Pnr zMVP{Gt`ZI`=GNN*Bs2&AMn#(%|B}_@M~34&8`C&cG&c7$;gn2XVgP}295F5=Ty`f3 z0G2M^Jyb`xC7ElAQ;42Lyy)mZ%=V*A^w_hxd_f{H!ShV4W01Z5ip^8Q9r}tk~e9pCOS1ti7#tDhIH~tR*%wa+Y&mpsp{YqPl_6u=Qjm zCo_BPZ0v5Qr*{BZ!cwAQLg$f;+UPr_q*v)6=E4|aW9RPEyCtsTvc#$LR6FZ9*)&X0 z&Q)Y&hMxvOhMsOzszKZnmJ=NpI{kkj1?x6a0Ep2eZ0k`msz_x+QtDi1$@KWOEyp=g zSxKM&Vy;ZfB5sx;HT89)>Q*A6R}z~LImgCY2UV!Lm6$>o5*HWyJzvW|E0Tn;JCK|_ z@%9WL_&mgb!mMVX^%y($wldz^N&WGoH0u?_WtDQ_`n9Z*CrmqAY>X&7soGzU#U~M8 zPaREVW!TJm)EX@o0*EiZj;k)nBgPx>P9guI6!Nh%Tz!<5lS3$Cma%wo%$W!qVPRsV zmy;)r1V&_%n=o6E*_i0>rgBd$M(=3+eT=k}m0`0OP-`?;9DYO>Udz2}3W@awh$m~^ zZDiqKys@6kx(oan&WH}`D(_|J-~H$EvBu)c5yuO9y!j~sp#MQkQTOD^V$>#pL8 z6*D(5&DTmiN`qm^YrZG+&YDV(TqvUjWw;SGyT(19lF2b&7uxl4vPYS{} zUcs8Pd@WOzhT7_sxXPB0o$5LFu)~VE?+8amWcY1fLB@9)Km6|%OH&yqCTvKti&;F| zsRG!Mld^UNiSDxqXv5a<#80PwYWIG^E|o;g57(0;_dIQ1Bvm=bdMwD7m6Go5&K!Tr zL?uT%^oYds@wKR^s!(Ef&z{5ALg)qj_>rlfx&vRZr^iazmW#+%$WKQYOTEXb>@nhz zS4yg{7qYM0A3IAXso1)Wwh=QaHaqpJUpM` z*m<{aGob6M;J8{&z@^2coM(3I1|0*(cGDsSv1av(Gmasf5kuDrPKw1v!R8f)kZwLQ&?I&D@RJ$u^8GZubIF-bPG!oW_RGgYVM}8 zd<0p*YL=wW?_6NF>S;P$i$)g7;&V7-kZhP$d-$>ug0H!h($hW>2XL!rFFJ(!L8*X!aE&JJBtH!-> z9i`#!bB&wwD98J(*n$_aBteF|>+I_)sqC`gmbHW&zj+4pyC%jaPPB}pwfWq_aiPSVy(LHV=YIQ0>{_?L$ z53tZ)-NvZehn#{a0{qWYibNWta(6lH>M&NW%p^Q)?nfhy)>GFzjxlyQNkM_)^nn{hslsavJ;~SnRAOHv@3@hRr`~_@_jMMlA8kvmGinMwPwv^%w}tizO~$wnlX@s#Ma|IYD4- z3<*i+uwmcJxy9WJnQs7c7bl!uopC*nigNagC4c>W>_ zO~NeQqGv+WOIr_wXDwjE4Oeq_PvpKnc%LtlQPMFs)%(13zE?xtkEegihW1&G<1a_iX(5&x*4Lis+U6=;@!~ z@%B!tyN4Jb>Z9heX*TZmsqa8V%gb|&e*Y8yExfqLl%;K)^m|;Rz}JX(}A32C57KRRsStT zLRof#KAVkzk|iXaCc&K&5wVI}lM3;5Ki9izpk{j+DtjciUYIs#qi*8(F*+>Hq%Xb&l5OkITb)ahNW^%$v+{%l& z__9k`7$w8aLynKH91pj-^Q3M(N$1cABIOEFLPO?M%Qdt&(mgVcA}y8Zkihd)y8CX* zzfTO0LyLCo6sL?SEXX)VT(XBh!I^8=nCHZJ>rUQ%|0q4)`4pbT*J8z@8Dc=|g;!`6 zS+gyS&45l-L*1C1pqz!Ix)@RS)=_zCD&N&^)H8VEIDIld(h9Tj0fgO*rmvpzrg5fH z?*ta3hNk0fIDArAyyQ$6g~McI_{1>=JpIXBl!MG-puheY%@fln2rDLSE5}-Gctp_<%c#{8BPyRt$YL9BEchr3j5waHQ86dERnz zbn+)YLKFm3^~y;W3PThJ{`(BQ!xf=h4+?gV#tZ`|D>xI=JvcXtc! zPH=DB-R19`bG|$Nr~7ojr@i(VEmgB>)$U%a)|~L_3Fm<_t0E%q?9ZuH)~XcWkY}Sa z#{aNbJ^t*AZhXNO3t9WD(Qvv7Is}4EfIV=2D!t}kQLp$b5KsV&ux4HD6koc82!9h; z62+0{-rkLuFz98!%PF#*2m2wP;_lLQ?=E5i{Jrf{aJg3@o1Q2Xn1%LIKGRNC?I76(ITe&Z1phudk zxPbPcWHi{ehdGYN1W!85k2hP1^H_Ff(73;4jL{|8u|g`~53znpGE)ddb0p6!kFjG$ z0-B03E6!JQdp^HS`z}n+Ay{1K+Z6Ddk(eaXk0!|bK?CR^TJ>EVz>m?`sXFjkab9N~ z&C@HW0;s2Lz8N8v8?X-|&j$j6182g=5AIGdmXuG>o$TONUt!r|NyruQ+GxvLv8a=@ z+StSDB{J55Z)Ya#-Bvy$3@3M7VQ1)LwZ>zr^#F+iYQUq1`rL_#RrP?!*+6b;q}Yd@>C1(%BLXm^Ss(tLD*pyv=~sMlYl|I!^t46ccM)syi0?O}r=A zL^5fPZ|+@zi$Qi9p+DJIjue5ATQXZ%;>3?&US)R&@7Tm_L530bliW{nR#^ihMAh=A zG(5MsryEo33>SkXdze++Z`A5nP0W*@o+&_TzwAm%${mKQGqhQgEVq$vR4X%*j?PTs zL4qDbruM-~0vT>JB9cl!j2!7saKBM8FbtZEv~o>@f4SMY7X55_TtZiGpcUH@cZbNr z$`sSBPkOhvr{0UEji1%O9kn^$+$oeX(!*r3-#E{vtfX6K_!Tv~@TOOULP({-#-gB$ zv3Go$5y(QH2WkKG`Y23MMl(i5uY1+j5HV9LVFC`ts;VNNh^KD|h`iDug8hnzZ2&L{ zhq8oF!T2L3^;40rC?o_Y(*>>FDQg-MsKnAzabi|c-Q5uwI?rP?Hzy4`xnEscFnDO- z0QvwNeyQ$Q*i&G(#n_IUsSI#JC}yqjxu8)sAoYem=&=dJ6ugRS{Bs>!ydrdjfn2^wIC6_4cn-fvT8Bg+k^KTM^+vY)z;_9Io(nytwwcltIqCv&*_RPHn@ z8XL~l+dhi(eDDL#Zn>0x$;5|=g&sww95+vE>d3mAQx7~XG@E*wppyBI0f+KVqKIVB z8%{fUe0aF=xAp+5fQW|qj; zJ>Hn83NV?Jo_0EfPGk~~9!CxqOck3h6T3Tw9&``|w~cFUzV4f&%J^y@5-SAM{S4q( zrW?3qsjM|;_mwH%kITVHq1yR+Vc0ch%GFY%Mj{=q$7#b}*Vi@@>J9Vgg%(F_z*8+; ztm#CH5t;4k0+t6L&36#8jt%Hdj2A1N4{sF)ry5vrqT@BYGDJLNXAa~Pnv7sDJm`Af zm7KUqwCUHAa1NYSX%%idNYcO*&3VwlGQjt+QqNH|CEzhu z;aAf(%&`$ttky)xK(!_(Ymn#L$E6}P6=5EiTb9r=6n9V&-5`R~fxTDVM82RZ+gcX= zw$iq@`c=g~$%N5lzp8^rO(oB=>FVd#3Kl^vE{aCRf&+)kuBfNuTG)vPsrhwOeh6$* zo)3>L$!J8&+U~??0SCaODXwG$(oRW?530Ytl>tu7*EKf%1h0&E&r-nO_)Ps3c%ElD zIAM5)iEGtV7}j18-*YZl#7i7UCO)tOnB`PvWmizslM->~9Ds7i{%-q`vF2zaw2fUT zqp}2dt=dt|5x?HIlqq@4u53_GozlKE6}T?En^x!39qwb0)jhinb{;0UFV$RU@Mj*9 z&OIpV)c3>YHj8>CI)kFq`C#?9@h5nYP}FRWu#&pS%wD!>C=3=hpGyW#X6hZc<}l*q z%-!q9lv+%iA8oDpFTW;x^dhdN3mS;oeh6U3)4AH?kP-YC@}M~?EW_L!%IyLLn~tox zOg(lQ@HET-y-4)liVe;FF9FaKHxeB!QCws#$BbzDqQFR1XE~sY_Kt{^>JC>~#5qBG zq>vorhRNZPGLqVHsiiovBPIJC>pkwneOQ7GhjPg~ttTO?ZJoPz!0Yzo()V&PBy-G` zU!r$z^2i*jRqLUimom5=sP&p$-K2xm1enFae^csL8L+bHVqQ;^5J`1E`5eEj-9+S8 ze5aYT5lg)ph&kM>%`%og@g<#mfS<>aP^OR9>-`317y`YF$BN>&RZqily=B_;BQ;XC z>#xC3ayevm>Xu28;{w+dG%>C#6Ey@oAc$5Opis3c7_Vx-M}g7xd)tL5VSUukgM4(0 z>Ajo5JHpTH*`a2@`x%?mo`BA^@mKpXYTmcJPKN6MKeD|C-(S3Jh|8^Nk7M1BpYkKZ z#(FTPQh|}Q7E4Y;CgZq&61xES9ap_L+QB+J9yxHGY|=0MAM9N|8xpLEQ1wAPji^## zi22u}%v2=t_&%+fL2lIC4-2oOixK}Dr+B)>sD0^Z8Y2@tkYw=9ofAycM0g#OOjkC+r6c-!MtoP zXJ&bAWMjkIrf&C_oWb7azr!nk|EOC2DO%a3eQ+2~%e2Ut$=kQIZjVkw9{y@r(P~mf z+bvr&{YUnr0K&+KRHcxOgn;4FmZ57TsPs_A7YJ5xiXbz-Bd$_cRTo)@QO|W%3g7S` zgIcySvOg#a_qyJ3{f5QqV+GBL>iRl7=iq3*|-w-aKd0efYBTOMxt1vmr$ z2VBUCNhrk32vd>C{e=HOo?Ynm;A;CKZ@0Pstap z8ap?Ax4QU-LRwPB&JaPld`R8b%nX?+Uoq=htg``{Iua^6yoK9@J|NN64ks^3e>(ms zuI%Gry0s>5tZMJT`{*Y;ACseLe3m`}rx!S?GBZQRJJ1<4UQyvsK|x#zxNkJn4;qRQ zHbpSpNBH-;U5jKqHv7-dq3RK4E2^HxE=+iR=0w}`M^M)mt`0fhkw26vZM_cgI+!sb zZn1CK!XuJa<|=1(xsHuoN&@Z$r}pmSJ`p*t^FofZrMb>m>uO2lf)&E$%Z4<%iM$m` zgMX}{o65@OHK^7ZOYwd3MCN}VT;#Bt5?~XhVicT`$8|hXZBm6u#qPSg`J0m?+so)B zw`O#E35P0X4bTX-mOR2FJAe)#tB#{Y3y)8Q&+4Q7?vj=ia43^*7y3XglE_wwE8G{v zq;*}$zxVO z_jJq@cA4#ZD?sLp{p{PR-c_PeR-r9PZD{d>2TOKfmhbY(_-~b%k4lx zT>oG@_Cy)APIp#0+UYDA(5S%~dIo*7mo7?xO=it8_Tb~**3E##-SI-Bqn&=ya{0Hl zR)v`=e3`bKS1^ian)F#yIprhCI{t}Me8U+)FB55f$Gq!#vrDF#L5PN1>?IXv=NR+e ziZL&y@kZ|BO^@>ejsYuXiqWAf`6(qR)ANM!5FGzy^kb~P)24T1Z^p5AGKE*7%mUtd z5M!}YoQSe5v%jK!Ny&USlY&whiJ}wLSeHqJYa&tlT>9rWX~@xaKh%Ko(9ys_0An2cVtv^kdY?MKh^{++RH6yOfp>vrE3-g=-j5n4?iqrE*i82)iv=uoNw%WL6Q9wY( z3GN_awTYuJ)4&Ss#^b^R#~NQa|INMQd)ZUa=O)0My;pr-2G@7dyoxN#colkubokQs z8|>ypvzcQ$R@?3-UHF{&c&+K*O}GThZjnFWz6vvU{GKjde0-0A5y!P+t(u}d%q;9A z6&;&&_$tGU1F#_uX0j#FDyuI5@)dDy?%E|MwqNMDa#{+M${S3d$RDyMyADYVtVL$} zrY1k{9NsL}7t#vp-_x4DrY|ei-?ll?j`lvWaIm^tZ8RGsrt{@Y=QLLtsRd+N(YCF9 z&>p2EU)5J%o?*3V%&(|-x2C9@1;99%mm3JRczzG`6)T*s*^_rF=L#6JWGQ53FCqF;mo@RefBNB1F%4K+0ik>SP>A?Ci=km<6>dhY>K`&g&AlGWb2^L^oeU1uq_H*Upe zBrYeFgQ2mW4A0B8@$kLXL6R|^wpyEjw#K~BVqjjg!<$I9Yzu6)Ye=tgAZR|P5xTpl z94x18ATT0Jn|gwb_dHiKYo z!S3!lPmhzD-E<0J2eC+_xgO_4g$0p)M13p?`WwX`MZr-k%;A1JWWs~i13Bh>Mfo{x z+31a_R3&=Ja(JdaIvsCj$8{%J7@+9n zVTwz+hO6gqwlkcVg(XD^UX~eIRZy(ZXd;ESV}6~v6`oF|x>Dw7jfs1^)o$rswFX7_ zC^VW23R;%GeYz0E76g2Z5izezN~;R;XhQ(ZB0Q;3b^W}+hVEw?N(O{@y2^rzCCSSj z5p8Oy*1f%5u!wUo!EFD$GC0Z6Yw;jcGyavy43g|NL`1c48;kE|4Hix`7;t;Di6nS8 zj1Kqc2koLc4Us#{->UpjUax4~{}dkjRoI4KjhJgEaaqt~O?4)Pg@;R8&`cC4QX7U9 zzEi%=IT86}0*gH*&=VDPRr_}?8E<;1I92|n^nUW!C6cftE-Yj->?7rPjo4LH8(|K} zmJ~KH`sT`i#TYdrA^Um>wL$^W@mBj?nC95{AmPzD}Zobnxi8Q2l*15r^ zKK62gWn0s}NII^>H9U)5Z;N~LoiN0cXY8R9-RA)uqLbbs9CvB0fdn-fcFP`l_MlF_ z?nsr*B2zl@e)giuP|uN2zaYnsfZGK#{(g#SUwcNEO=OK-cvFkvdfoIripaGsc~uJS zmh0#uO*nZ|eC(LToGQ|U77@>UUf99jeX&|PF4XatQi$$uVitDMtFu)_=vL}Q@uJRH zlgD=e@JBxU_z#DX%Jm-sTr%U_5)UK!PTy>{yAVC5_YF(eC%Y&JP+mBcYB3N^<7nH& zI6ej+9+Q22qT^l2Tc4sp|8@@(uZ;RzS$(|jMjx4W(=~AU&s&z z*y+tU?OtX*@|;Kp*MdORbMLg>_1p0!{(5!h*T#dbJ%l7D<`JQbuY8qZGL?%J!lOkJ zl&6uNtY&zwHG-Ms1ddFP@ur;U?E#dUY6P9(+4cIXC@VY?iizzlcWHYE{F#{bvVNMD zKg!j!tfbwKe1S0dI1zC=i<0pQGKaS@IF)9Fs?}oocFbHvq$-V36HmB#2YTDu8>+aD z3{ZMX97P!g%-ZzvmUr1x2Q3*w-y8A!g2eqUQ0(w)b%?j zA7@1!9WjDb5og@els(MlS`S}EqP;)JSB4HoN^J|s$7(ed~ZYGQF z^#Z%1s+BN`T&yg8F;yVJEup*L;woiIGpmwyeJ&37kN_8!k|1#s9$COgnVT(9&#Z#K zV_!qFUctxOnAgGO=Tq*Ulc7JXwq}LPE;0`ZtxKb>*Y7ZFv+JFHnOdq6M)KG-N#|QL z$QOo>l%{S^hpIMXa$BkRIWPSp%oFXU*y3C1EdpA{kVXqHPj8sKzXZ#hr6l@HvD zqVEhl9-7UWzyM)GLfTeDr>bN7{93(r)C0fikAzD>2 zGvlV^y}f?5MNqrhF0QVaQLtd0Uw7wk&;CFm5R;wG9HBf<1#CrCUosq^nQ7UWPH|T( zI+R-XMax~a%Q@XN+j>VF#27DYmXU9#8lCpSd%Z~0$GpyD-yCFb)MR|m7bvaR>e_s# zJI8t9oASLGnqSw8nMg62IT-+^ZSRc4-xgf#$@w@^;_CN>e1yt%z87~qoi=kS*JOu# zk*CUoRWFVK{_gT(yY0qg0Wu9mpd)G%Lx?tdbFrBsOMxiGdXj&<~|t9vqP{PA*;Q`n~8N>z;-UkT>u zlMrc|an8QEqwIwC;%bvqCIAnhHaE*@G4mhp<$$o()h>=^R&)JD>B6q$UiveFm`KZq zu&D#`zLw2|JB0D9Bc@U&QK~fW4#s_Vo1VgVe&Wc;)=-x1S2`RsyIZ6h(uBO-iK*MM z67r0u=LBkgWTs!O26*sY!$OjL+chy`x>VO6X+ITVfJ@38@F1np`k-rKg7z%DBJchl z#y!V7LyaK|xRlPOh{#E0CzoTm_aKgpWwi> z;F7fcgAi&@vf5ZOS{W!SfyXMGLBg%wZ zEr={$%`mtvZQdFKQBqqo|xZr?ZK_xEzRHDv4JuBBj3wGbK<;zLJfZk8Xun<5!$ z6!+F_vn{#qiH6UTw0|bn?jPT@A7%M%`FsBH9|6IHNB;kSqM#Y%CSbqU=aWKxl z!g;-hVqsG~SfEL^YtO{~U0QDnJ*i>`de{-9EO69J#R1l z3o6@N!hLbAq_~EVk#~C8hmcZBYkgK+lR6+Ri?wjcgO`K3Sbjwkz9Z+!Vmc?0J>$x9 zuxm~Hy*)2XmQO|@#t}4}r0Z2r@wwsSGSKVek1C;gU5-S^ReRqBbMMBE(wbv!<64)Lq@rHH%pr~6gjgm4QdDdz;e<&$3! z=^43KdQd!}CR9$&_4-9t9(B|=yJD4fhKSP2#_9(3M@Bq)vsZr3@#p%7nxSjb>-~Ko{OMrmK>GpuB z%Re(w8%-ETk2n*=m-V8zS-hBR_)>{igX_#MOd)^+!~YJu!Tw~l5}jQ@rW{TFUFu+l z$kAeY%<#NZ?T_WB96JW0_rteaUe7GjAv}dZ%VFc#n#BN5k6$bY!6Tg~Ako)a$g#ql z@cNjlBaz!mPc!-&ZIx;$Z&+hYGy|Q$+K6IKUx0s^XEs1eWqa1XX`Sai_vPr#2Fo^+ zBaCn6rv?t`>D?A+xnu2#xS^S^rzc6$YV0i%!3osAtLU1On&&m=oP>8|)^%Wjl=Vc> ziM7J1R{_vP>zQ$kbQEMUE*zjzAuNL4$m^sV7=(yYPTtJ}b2wg5Q1S3H(@z`OVBsi= zv8U)t^|ZAutOXpcB-KBCB6KeasLtjd8vzC#HYMdqiNZd1E`_w26J08}Zv~v1RTN1( zvm@j43^fmNA93h~Hz^9+wZ+FD2&aUHmwqn3l-AuHZjC6xMVeM`W`(^gm%ZC#@O>I9 zRStiD_gPt8b>9i293Ip9e&KdTF3bTFeSL}?&AcV8B(EY8yArXUO;X&Hh#0hplz~yw zXyIV(PUWE4^6`FGZq->VV9ltoHZ3j)2%LO(?|=tJP8OHAT{?Q5&rD#yv5dE3YDAVS zJ@&{ssHGyw=gb^Y)3BgqxNgA(+R1P#SiOD8cK3P<*WiY-$n@)d#=Y%P2p)Qc)CM9< z1yZ%Xu)OZ~pbFJ1wP0dhQQ|#b78B-Ja4y9~x?^{9h+C|qy$%|sQ>))uI01;#$saigopE_gCf1{`LZil1Z_M5#Ao3>Ufh6q&-%4-HOF^-LUrIc_B z0+q7&b4T|miG$!0Oa0-ZLP|B*XMV!Cc&AOR+Z(%smgR)>R(^C2_Srp<#fC;~Z`cGE zQyvoDD!v|OUloRwRkOHtqd-YUMag)w=joXpto_VB%*%|8c66dkxcQCM)sMBY>2$}o z1_u>frgK^)!RVG3btRu7vd96UqvI46NX>D{s`t9)J+JNIx$k_ssvF$>f}MHKm^;l1 zd|P+Y#U|Ii1IvB^7IVty0*}{yWmv7s$c!&`^GIdQe14w$aja&)vt(IJ>||vO5oH-r z9K5}rQ)<=d!Zj7(HxOl$T=dA@;JuVHy(gGOl(qy$jtNp>8u|1K9>Ms;#Mn%Tc^)~N z!w^sw*RDO7^R-sP0Y}F%qoN!tv!QGQ)d7*UyFPqx%d4eKIy80Vxn`?`O}JQxN4Ulw z9^jsBvD_#7Zoory6{1(8tPMSh^VRZ%w^?{Zf*sKMP0KW=NKsDsN|8r}1V3L0n#_cW zMTGRBtw)iVgzg)~@hzdK7(T~A5jJ5OS?d)_mdEv{g@fQLPvMqgUi9@R><6pTT!HS} z)ZoGPqlZMhmwe>f_L<--QR}oZ>barMZQwt=>Jj89)gPwO)|%^)(%)xY3kFn6$T2^o zzwM@)*A1T{w@ioWIb3Ug9;x43vFuOZ@KJx@yVr{p{=7q%Dlo*N9PmV0X8rm zzJ&*=DA@Dp$n_W1=2$+Fd*mv6a(uaJoYss$BYBvkMQ~~=4yaKMHt}q4JBDvCiv~j> z8*8zG$Z?F%X0tvj4bK{)F+9{LVaAcKPWQ5XE(bMF%dheR26>ICona>83NvQe#~KYfa}K0Rr~lp>UO$ zzE7l^jU%)CS(Me9B6GaCSt6K|jzOB;oEsDIOxhucxxpji5Vy>XDqGN**VynuR5QD- zNnDUmMANTx{e@!-_8`sg-AOt=7_oHU6?0Pyqosyk8olj0$-Fmdd=nsNXIpecH{xC7pLz>1=oeM7#^9b&UL7iJ0?{NZCy(3tc^Y5h$al%b|CM;ADTG=|1=0ohw4 zVlIka#urZhSsJ2exM_sXV93@@I;O#LK{LwYGS+-oN3!&j&vp~iJK~AR1VnA6Id8+e+qKkJczzVS6yA&9;%8Y%10r` zLcS>2Dbr&hw~!%DG*-Z*$P7gFp$OJV;Jt^++EBTSh(-N4 zq2Zm7>$(FkZfnPp+nrll`{AgN)@sfjuf#%SVQU~<`(A9d0`^Nv#}P_nQ3Uz`6LuVV zj=f>H2-(*!=7JS9RqHoan!$n{gPd(J(JD7tMm5c3>nDfHf3au)XcL_;`v?E}C z0+GMhIJV1R9xvatN~ovlx^<=O=S<3!;Smrfm@k}J*m;KqOxc04;4b|4=tO)qVTyB! zc|EyTgwS3Cr@oJZF4f;U&Rv!)v>;Asl-)!r)$O=a@CkdWHu{CLAF&@;cTun0U}>e% zDmx-z&X**;w-p7X^wh(t(S8GAQ8I{(Xykj|hY0njf>G&h>k20GyIL`CcF_CcU9`_R zO(>p^&TxAxZT7c@_$FU$N{gp+AgxiH??kq;0JBU(ueVf2vUGGH-D~8vWdYgo0CvU* zUY#Lp*R%Y_T>V7y(rzAI-#2HF^r`2=`G7^IECIYg?1V{ut$HNU>lTdHnWY|Z*)+Sb zuI{=Gq~;JOLmBq0<7S9UI}kLcWoIl-F64&!S^(NqOPycr3I)*q>?0V^r!G2N$=2ii zCCw$e)cNyUS&ybd7pweakwoy?xTlgj&1m(?{kL=p6l9OjC2&+-=k98R*%XB8zj0Iz zsObhPy5?=Lo7Y8p6-9!cj&A_7_B<|>=2Fz;`_0X<69r*zr;oEi%2l+`sS{rwrkKxW z6g9O1DL95YD8okq`upFI9Us)tc-}hWU)Om*HUzYje)LE6)rKx;il?4Y(EKJNT{=U; zjMsDsAEg>=S7@OO+hhv+297I5yccA<$?|@NX)m{YWlWc#f~6}vCk0)Ua*an_Sr_Z* zoDeMXfIrfKI-Z7+%FG@I$9Qm=PhZk+qO6N{>sMkL9axHe6Mj}){rvQbP$f-2K#u8D zeirR?HIb%Jlw1AhK3sQr@CTb2@k=ms7to%wnOT2~E()-z^}nB%q*+ z)Rt(OuVk=BJTPm+m6nbZTomf5?M4C@DFT&po2EjfJZFzgFNfdR3Z@i&eukrb^ckL> zU!oBwo!s?~#H|39Lnn}#bNAq&ib~YgT1#+k@~eajQWH4Kb9NjmI=6(EMS#-zMJ;I%MS;I}e9|dV%=xnDO@RApwxYF%beQ zp#yOb^-$<@3a>q^xRqHMe?*y%sC2WlQ%GPevvB$>lFytHeoo0n;*TeN8zKYO<%uQJ ztcS8ZZ)9PAs9}x1i?@LpI9jCi>~g3rJjiSM6NO8%Li_c#VHb zNIk5uAy6eaVJBG^eJT<{k1Kg+RLDU^%4k~IF3G$*sn=n3&$pcnLtB5c`nRI0K$Yg* zJ(DHPaIY6+W#uoPOpk$mG=()Kq~2%4QEN$|8~DdKQMiW8*C$fB8=+?-FJLSGcGb$O zYggnU7kKp>{e$njh9c23+vd~GCtNtSOrdLNoAx(G@_rda^@NZM7E-VK~zZ7vs7-|fd%~Wj}Fa`<0m8w3iPgI~uiCwn5 z7Ti6lO9zjoUfWNBPqu`2c)?S2KBryK%KkngOBv}&$_^3q>~xv^(P6{${#Tw=%P$ds z{ch0J-k+W!wt+C>Po#mnbgJ&{&i&Bzz6R3)w$BlzxzERgZ?+TH1kw98k)oEC=Q_}* zFo15y#8_egKjU>!@9L~eGwXSbp=Jsk5cepmwSd4s`uSN*R-D1SNa(T|@;=k!I)iB_wc zKQ9J0_a--w4&Hsa`_(R5H{NfADl!w^81>#(4{$1H7$uoY1QQz^%H?d;VfDO3;gw^8&D^Ksbl*1O!R^DvCZJqf0&n? z1@s26@%D<@#0o?_K80qy(EEP$qO1(^_ABDlrBdBw^6y+^ACZ16$X#(0nYVID-B6Gh zZSJIg++n@`am=KJOm+<$r)6Gz;@RXsBJmwU<7p`T+2NW%N7cU)TOja>O1>1{Y^5tK zELaD9(%H^sdc^8OK^MDh$LQmU{|e@%0BnaL31LpGd+4S*YHrCP3nEFb=LNcH-{&u;L!)k|uGu@7%c9b+8B|i~ z8r}50`)8siV%#Zk9EToP#uO(+2mD^!<}L{-zC|NFhnGw~kNND_T8ravbfVy@P(BA= zOtU?4JeLLeK0M{>%6n;WHgo)l$kcyTbbVn8qt~ui zA5SlQ0@YsGGBW4T`QFL-KGX|&AVah9<|Zkh0>sepaV~bpwIhFWhq}B`K3=22n9(y= zX?~oU>{@yJis3;q;i%LvD~^I986TgFGk5dT34IpGRnBWf|Ei+d?Wk$A^XIT~AVJ&rL5FyQ1@#u~|ETV&3#0y{ zWc>GC#DV`icgGKH5P#DJ!#LD^->&8D$ERK7#{cOBKuyg)Lr>GTG9FkO7iG0v<^0D1 z;tiF5?7E^?2b?a!ci8#h+p31h-pT3_QGQ{oeY>J`v)eS0+_fS9$8evp;Y$?|lT)KGP^)qoSU2zTLU_$%eAR-Yp(e*|9alRO|}FQ7~avz%QsxL=DF+_ zwyN!#Ta3;<%rMbDSN!KiJITM$hyOa3RQ?Ei&D=fAKp);?YF>)!w`ocaHS=kX~%jqd+|^~F-$|9{~Q@t17z z2!Q$U&I1z*OM(pb!nyn3Z~vzp<(f5WlEi)<0bB(uB9OGkZjv^P^Bv`+(=y zgvkh?jtU(vVf2K^b?$8x;OFXA1^TH1(k>&UO#PIo8lzjh2fgm|0H_DlYt(_3>2^C0 z{r_6qFZpj0ZHT1ql6(U3=iSKi_7CGeR0`0%uY2Fi8UshHxAU7?T&| zwUQ#>exm0FX_%NETn)_?uz*ZKFjpbOXl$HMI>>#(c}a?FNli}$m=FtzCd;|YeBT=| zabgcuEdUyL$-*FopjGpWyV=LOaRrS2l2cIOb^gAGp5Aa_@tYLmijki?Y#om>aq^c$ z@koJ6O8B=D^D<&RQTD%xxy6jw{rG!T14uw(rQjdtot;8KQ)JSX-C{>VvxXy-x<#tP zH9#e@+HMFr!Y-|D?oPyN-}AgeVk8I9*=O{fz3%=Yb(-{m4a_HF5qM`Kd_NSUdeaD? zJ7SD}kMFn*Nap;$OKwz^dzU5Ve(J}|x@W28%TG*+)@hx==bSdFz2fh7-2)jjq%c=5HP%ac;vA-X1AE z5m-2GH{>gQKH!23T^X6DlK|BWiGaeou!Yq*Gj96nWtKqFw6_OWMy@`2TNnain>?J~ z{IafMSTZtr>dAVKP2&kxy<>m_VxPypMf2WS;9U?eO^czT54Q(k)j}uG0&u z!rnJr_bcEW!7n&>c%>T+K^_suo0|v5nvP6&TDCwWV}5U)Gbb4_^#HlQark05Azt&B zWmDa9nPbv#4xjTDQ^!%0jn9ogrt^a>?Z#OxnwmPl&K`~1PqVfm6Lz;tu#yC=>=LPhb0;p4-Ddn#A8zjHf{Sp^LR zJD>2yEr;c0*k5ynDscYA+>*}3G^ahL$DX0|wX?$3EXTw+#~7-yG$CpAoDv0g`(wsF zxY|>ZTrv@EKvN}Wg1WfN{o88ry3=wW01>oTvpU>nF-D_>QFO(*Z!gX81;XJvf{LDo zTn-jpwKmt8!4nI{zL>6Jv(C>WJ+0rLsC!B{u&(jwf(D%iO0vuA+1-!Q{ zF!WLT4I_RSNj;I`=3~lCI$RE`j|{F77JKEV3rL3iQ4JNrGBTO$8E;5WI6JuY`KQ?V z`280!qbvEqEyD!JpPM}e>(Qi&lE%@`ZSA+%qsJwc6?ta+2Ws9w%xe^y?_7?X-Wy2k z0W3K5$#Du)>aC`YibiDJ1>s&`%>A8c2)B9wGkWNMJ?XC|{tsX2T=_gjDh2S=OEO;5 znAq%mgO4OGNo$aZ-=EdwV>9?t3oAoQKrBgwpR1-UTe>W2%s?o)A^^j}37VIyGWd@g zf$>PeT!Etb>=A3~`0R(m(KHUiAw7E*ai(Ov|R!Pzn&QFed&kPVWh;148 zx1)GO$tqU#>v4*4cN1w*Ufe{#I++*u z8j7e%WxKgmPSbc49(b1`O4HPbXIRm^0%nWIB=GRjveL@ZoFxyLOA4IdOl+fB1Vf4a1K(IRAcP1 zSww)6>HrpN9fS!6@Gq`YjBe*L31}dp`UNHBIq|vRHk?pS|$Xm zX(~tk{S~-dvgzst&c;L}(9t`S^NnJ77Kx{iWH2p~)?xa5E=EWi!9xkHmFv%igo& zZgwJT{GIg4691McBS}j-#5-qYXStaRiC^S>d7cT;>|#e8MWVJ%hTmw5^C}{*M;&Nr zzLl1gGY3#pLdjA_E_fx^vj?XQc0jxp1f1W2R?k8svC|wvBqecj5W>&qc&jaoMy$V$+R7O zsoB|yVK>u32?eWd15TDOjZlQ6{kz3=`MC3e-uj~6(Z^Sut*$l-)&Z9DVsXq@otwqe z#KmQQ*)+=$Np%xOC)aC1Q1jb8Go}xR;1?OeVIj?#RLQ2#=+#j@O*Ba7%VS+>vV-64 zcXt7C_(XoXcLoH9IaOX*s=geYHmObir< z*WB*22I=Wl2JPR=ipoG$AZd}s)5VcV#5n~I10q0IG8uMjaHt!s+J^dw*mb%{Hz9lp z{t`I8)z6(xC>~lgpLn-xOUF~%LygBKjjq9QRccL^Nd<|R5OU~pvnW!fD^uTcLlG-YD5~fx8bM2Rphj8 zZIH9A4d7z1(aGi3{tj``q36{quP6yS?CWGM=6%Nvm#qzFzW`O*Y1|)e?=uYA^i0_V zLLS}_w~IY`O^;w48?NCl0*PIWy;=^WaafH6U5{_E8lVl;XQWl!%pvP~dgjE}n=0UB z3p94>todr68|D&Sdq798H9D>oM)l6Q7*gSiy`GC1#gnUHoUhN9zs?`h498pCO z_VMb8Qg<#yB%a5J5EccOXWdgS#cBqdG;}OQMmqskw0zmYA0Rc&CT;7D5k(IX6kFRb zhOdNC_#GuPb?P-ys)WF12=LrO`U!-mMBMFE+z9615f|M!*|_e=9#a^Z@mYReZoD5z z8jg+IB?M28v45r&3rb9moO>gJjGjJe3=3e$xois*08_Hy6RD3F;X4*X>>!NdDh=8N4Z@%_#1lav?p5hsf{m+Wmm6Xmarw7y74L>+JX zXDtiPn(n}zj3BZ@F#jcww2~FqU$V_`Y2tCSri`90*9tw#ZiX~a5vn!6SA&I<#b-KD zuXm!oStd#eToE&@3U=igLl;j#-A{}lL9mA=k(#fRAmR=)5;{!UZ3=|v&!`Ujj#p4u z*mWd)&%ES+y-mYpUMBfFD2~c1Y2eK8_2z@0A}Xpb52&gle0~6Q=Y^IO7kpwr6-328%WrOs zCA{eWm@qIQ$s>`gX^3TXB%6ta{wgrc1lpH^s9b;|BQq1IWm#Gj6fQS%xDwAZj292% z^7!_ANE^=35>)WYSFxfV$1*Fy>U}s-6f}b>sBJ+L+h!ojjdXp#xyq5k60nxxq=%JZ|9Lkjqw*ONeHw=m+sUj!lw^m6Dsn2vuBZP*w>WXT zhmdPEt)#pxr>-lL*b^*Q^XT}rsCaYav}FlRPNV|157mZZ%_|S8sLAwtvm3*2Y+v8W z%&HDP8oE+7M%Fb?5e1Pe^HZiEG9(x~+W{2>=_heUzbQXUtI*mJdO_?7Y}#0_$4g7b zAN}#V*mBQDOO@143epD8`4e=^!1XM^pymS@5MSd&IQYdmqfor!wG`lb` z4S_*Xgbjiu=Cfu${-XQBVdTO;m_im;BrC>y#(K!g<-x7M{^x_1fa0U&XWaR}LSvVLc58ZxdCp+dkQYFH zhJ?W9e?_5^{3W2z?48Gif`TI~^txNpYY?k$h!)1;PX`9+4awueGYZQF^BF}5vGjB0 zr#F5DKe9jpM442!SM2arNnbVY0Fu;R4bFuBkFu|fifdWIjSvFC-Q9w_ySuwP1Hs)P z1PC^`yIXK~A0WWs?(RCc^EmgO``+)j*8b6Jcdy>Nt-8AE`?|{i=9&cysrxin5|j z$EanlGp(N9kU`tNAK3?P1qial6#UURA;kPM7r z?iGdE9#OVyuIM$*Na)j8Q9;jj6=qiPB6n|um{;?_~bEX9@yG z6n-hdA8IJM*LOQX#CpkhTL~#M6GLutO$ewI05eaDiHql4@j7tf!y`m)Ul3NV!)!W7 zvd+xQ>M@0vlzi`HH_$`;>bCsB)NMSL=t{ULu;$eOnjRu1E{S5NbiqTga7qRr#eJbb zi4n9lz`-Hpj+6q96_b=SWmcH04SL_TlB6Z{Fv#*huH^S!ySXvPr3X&VDM$`7ky=1o z9S(w1pB)jcy91|<`_?$Y&3%H}Sg5_{jVv?EvLYf-%9G4mK;wc@y4~K9kebIv)n!&G z-C4p$vq5KLY3Q(6i))ddZS34)3#6~ZN7!>gXxZxEVK`0awUvVWPZXkhl+HDM6aD8>geK@ z2-K$&@FGsxvBw#Bp=cU39!XIQKnVuAeqCrVhx0yOI>2$RC`KZrS%`S^fLYZCP%2Y#p~O;58|F6E5lM}95(I7o(B6kORyjq_$Qbl} zmbJ-F5c4u>X%+&0j>xf~Nh2|$YL-2hZ5JdZW`*69=0N$fWV3SDU4MX@2bqaU#8`mPO{E zcyi|WO3*a-_y5RT-^aw{WHbVT(aYVDu*gXMO{d!bO0J?*0n)JY-a@S{OsDst739>P z>u1!Jr&szn4q$8>_@RE0OdK(#^oD|Ux-)uJFzHg{72<;Pt&u2cwApa!m8n}23U&!f z6h4>7szWDjJMg0meD_#+~m8BVtjvlR?~2IGlXdG_32zvd1u4wA9;JN2**|J3B_Xy6}zeWyG0$t z#}11logkSL#*1t-b`m5ej%@chkB@klAF71hH6FoO&|`^6w&H=vw~s=C-i1^(t|(>N zjg9$xvE+?vFb1VW=0o;;2qTnNeCh?$^q^N7g<9~>@rXe@u~}@GCM_6NtB;9B-64Z$ zA2Jd14WbR~g^H*1XFvT2&B;TaNrRauvWp<98i~~Wm`@F4lI4Qt7Rmmv*r3Yxi35Y> z&N}W4oSdB%n^(AzA^$o46$6>WQ?_sBCXSdT2VzihoCnff8ZBL*!I+=-XB@h43B2lp zrRvOiIDTu`lk!c8-3zvTsUTGKATL6)6^iJ=R;Y(S>CPQ7VY{l*$7&|2@-El>gD3{`aOR_P?6m{|d;HrP};=OZ)GeG>Lz;?|%!hMgF^~|68FI z?$Cej3IA3&#rE$J?BDVQR{t6v{@)KEA%6>o2^PVIhyK+=3>~;>)AWya{J-Zg!-OSO zNj*{NyUWz>b$9F+c}g_Qh~Sx-8P+MO|658ruD~CInVA`eM1g+MgbAAoH}ZS9h_LX- z)YMc!LG1r3`=|UHO>b}SKi`WSh-aW0{Qqp-;kespfyf|S&~g1XtltXzJEhjE*CY=% z*@{W}5nF9#+jODBPtEbqvE`)X02+}MC}%cfY~cO7zEFud^I^6&@PG}FtQ8v%d=X5Z zNq!j2^az#3d#;VJx8TdRfCyDYay>LS343dcjLC}q?|13AS8kR?merXLXFW1&9>!#+ z2A?icTH8hD`{|QCqqNM@|Y$Ya*eQ5Ftq-9m~hbjEc z2dsaeO;WjLjvTSYF9Z~aE{@as4I$xC3_QzE)n302OsAw3!+UKe2w_px*rmG7&CNXU zlMO|$rb!%r^a-bG31i)klaQc9{p0Veu_9~(Jszq%+a05)BaGqWPIQlOkefo2Bovw^R5GvTJe4!0Z*4(>QNs~QP)Gtl!wth?T~%F_aV zNlO=2`6W2G!~eqqh}(ygGH`#)EDOHN_D-8$qXqEMMV_4CJ2}-3jg1krQ-9KvKk>Oi z@wV2iu{H{k7-{1{i=M3sfAik;ic z!XgI0VsN=Fec$gz_3Y_|)yB2~jmwG}%%)nwcCrGVOA}rRsBMN;nf=#H)6Ccs}g4XFTe-g_c?4NO~5}ba1NXoe_=R^02ea9I5gM^wE zH*mNm)=O0s-u{W~Zlxn+#GoT?>b`CghEv`D4R+m|Ib!|X00aQ*XbX6bMUcF;(+<5m zaE?ul59F-$ZXd=S_xrz)696PQ%&Vc~2Rqa(K&DJMMeV5F+E&g8<#Pw3dlHrzJnvBScvk#hyV zDnS37_|BgGIwo2bRkIPXzW-Ib2Jk9Dh+FJ<499n^N6Tv05|RxLjaM8OWG2{zq@pv< zZ=5WTT%~c?p`;}I zYuT#E7SQ>7WBeF(EVrz7BCFg}K937|^<@;J(~Y_|C?%?MeAQZ?fJT|J+l z1H2zOJvW}_QS7;|1ovlaM9Uwp-Y)Dww$IN`O>yY&Qvoe@DCw}^(J_*}x|ln7+3zNh zIt)|jW|N@G#J(=6dS|g*%i&sID}Nd-i#dMBl3#oXkXQ3A-IK05Bnf*kGW&3b&NDu# zyVjf+&X*F5Y$es9lzf0X_T2C9aQV82W-cJ0z}c~uCGm`EiJjpE2I)WDZtz$-1G%Xj)7@n!F#&dDYyKQZmd7yl%J zloL$)Im`;ZHkxT+PN+>FjDmHK<;vs5I}(<>U@VtwXlO2qrt$Q2e~kMJAkTFHy7l{V zeF)(VE*+rkyk;`GPvZQd1gC0Pb2XSh z-WQA6STWl!xcwAzAZM{PH7zZm32lwy#}`F8eEeC=oX*thanU+lql$`>EZv10Iok!b z^LM6N^w)_UdQ-nRls-p$Cz^aw4G|Iy{lC#fj=x*yyi1lfquFDIe|z2#oLrzGZka|0p4Mzr5 z+LjaydKX;CXxMpvE5|qC?o+E>!SjJ5fr#)5BCSS9bi8XD{#r{QbXV+Fglw1q)?=(xv1P(#Er*^0 zzjHDsG6BY|aA{dn`pfd4g9cB^!DQqh^KpQJ8+p>u5r#Ts%gGb=ca)$)H14}$|LHx6 z$bnvk7cv?^h5U%kO7ii?hv?<7!Qu=De%I#k@HNK-oWdO{5E>+RTA5eEKeV)1v|Fp~r=<&@Q}d>N`IB6DxWoc-(*Mg`3@v=XeUE1_3m?=kq8b2c==gE(DN+u;y`dO#_Ix0x!*zpF z(5Gg7H+%-hp6$we=`RQN$3<;aViI;7+Bn53jd-LRcN=a`Fg|MOBH#R)&_enmIgxpU zl?cqL%aX?s1*x^UcPvH>U8V=e{~8G*%x;qx6)+c*2X04>(|;m^znNEJH6vzbk8kOZ z*G57Sf%R}~^HuHqOheKivAo%R2%B-iOaFfG84NovagEj<;2g7bXTRy;nzBfa%jbRN zSW#Vg<-7X?qUft#67-1> zpM#g-e6X!Dvo;40posI805eWDIo99inN$tE3N86KWr2Zfu@Y^-jV63Oc>1jOO?gnF zgevS5N(5oUjR!)6w@e*hley25CPRcO6#;PvSn^@v8CH5*}<4#mPqr*G!{i&t$D zJ;}6LT07+zNOMN>3ysNeBpab3#=4VCP$T;(ypbUCukuF)v*>p?%=2<)KK2^!XH z^vLc&m0aXJ`ZEJBM@Wd#0TiJ2&xY%wqeozdWh@frOK8f(`M~Zinp!b6C7-$s)oi5i zLS#R9JdErN{ZXszBa&S)+&Xo=G{U_DPX^cOM-eLAe96Py&X#CLTM`QHF!C%~>2v3V z@~2dZrD{u-@wUV&3qsvS)ZTY!bN4GD*qHS@zx{1UHq=pHSehrbd-LPM%`N4p0OD4# zc)3AbP;nzL?IY4hb#XjYY?CXk`r06<&uBe~b;+%w?Dg6h@RrDX-&yuBQQ2oAc`JfE ztwM-G;6yqdmk{xD{ANPdkEz_o`t`KZxE^-2kS`;ZnoO}q$KJU@Y{6xWMt&wgv;G9m zL^ne`0ynO5DbZ_>p2Z7%rp$_zAa@BhEl!~^FdGXUSpzvq@Ty45YhG}X0$FianlZj$ zEN-*&+}6A_!Y0Bxh9E&~@|?(gm)Sqt*Ccn$YBeE&tAuK{p+-`lp;@)x%}Xhmza{cn z^d1PGyq{rEa_mMw>izk$y+E?&*@mkRlG8Bcim>FqS#23r9($#vMQr%_Kp`!rC2&u% zVe1tiLUebL_!K7%gq2jpr#)}S5Vb&!<-yPea3_w6E)3$|lU-`H=cRxb91xMFQLrmb z{Kc{IH8=xGd@vc)Mbq3;1|Hp@B{T0o3GjU+fCR&7@$tqlFulNzC4QJ&!^BPZs;12e zCH^D%9?U4>fr6V{92Yi{&8fOE9;Fz`zK>^^XHW@lphU15p@BDR#q%mP6;oErGN|at zi|6Y=O)L7=x8!O|4N6C2qMZr;4vYl$=Fm*`kmq_XN&}VZ2Ex2QrP&{?e_jAhFq6iJ zhv!9|tvSgwI0d@kkYeBMWMa|=-;}BTTq>`?yK+pfFo*MEi@^j72`OF!P4@Nx$V7gB zRJmIYtEzHaoUtX1G6HpJ59^Oo`IW|V=_itz;#=x0Vo>b67q!LZMLGFR1>}Z;!Hh%Q zM#p�GJTY&_6#pXhIhIA9l!fCs2A>rxZ+0X*==_pmbc4*qq+u-wFqClO%#?v=HyG z;kmsnZ7lOT{GkBdB6BBS!1Umazd`DqC_2`7mS?!#QNdvCLhxu9Xn9(~`i^+pfJi{| zgT5MO-98p8EB_Ii#-0a`m32XBONe7vX6vZ8751WZ z+=p4Mc(-#dQ~4EqK@TE{y6z?Y1|UZkxPe0O#6GnuENn?;wuezKDQR_x1E8xUWQ&(i z4r&2dLp0(G*l5a^Y7| z)6vRMjxO)lX1IRk-&-ti-_fb0i~Q2xZ>fp4JA1l-PM=ipNk(=``Rh^XIX+nl40eNYXBO4aBMz8ROUP& zK~|akV-%h}^lP{OUF?ckK*8a{m!7JFV%eLA^25&V8n1UGEl=s8|$i&k)DdZH&mlxy4{*H(iCxk zz<7tja5PTr>kS4ye2Wj1dH^aaXiR7LGBqp(K}V>cbw$|Lmen*IlN~w8C{#&|;cA?g zl)g7~Y`QP^a zH=E`A=R8Bhi;VHN-F#vn==7E4B;*!iQzit}hF-(%25js;DSx5kw&y5K3rle@Vxhc- zb|FWnH-nVJnRvWMUv;JBkXB-V5~eQ0Bd=(*&#sbP=Rst%1?G0Q8CYRPZL zwzSSEb;dXR9;zgIALwOrC7a60nuvWh>6m$hQ&3tIcE*h5C;k&x#Cpg~!-_;378Cwe z_^1r`0M~8HJ@HDx6U!8A-dqHuxfK)5{*!>4KCT5;I;Syr| z$Ybrd+s@kGwSTW3z$;-4@;@d--`21^_abE^CZ}GO>^)DEP_Ya-v&9Ht`p3IF_dRwN zbwXWgA+cXQ){`ZZy5dgTXsrlfb2tMljwoI)_6b5ClIvLZ(Qd7gY1WcP+v`hzMOEU? z2|i&~yy_)YO7+aI%bpwk1d2!e<+|h`Ik9ciy6dguYsyXg2!N@efFJchTb^t0{y6=E zfj%nhl8q^-!G!+p>EKvRhg~a=bi%`{WbxutRYS4E<`Ew$8%;;sT(zA)?W*nh<>iPv zaDdT5yfOUUc|+(;S2%JR8aI^WoYr+D{(8THxYVn8byKAWJ`rQR@8J54xA=IyZNpNr zQ-_rhLue}&7+Vs=MGAzT$#t5m@D21O5Ma~C^Eg@bg@VImrlLiRF!(%7>}}{1=ZavE zFecCU@GO9D@p-P1XSfU(+aY8EmP);fZzwHLK_vnZpNZWQ(ke)+p)<0!J)1mNyK6)% zYDW*<@vFg=^O%D@e-uo`%MQJ2#I5UgQe2yxc4}{%S1T$zcj7|#CouOTRjfAakQ5tJ z{W<+yr+cVCSICm)>sa~?uEiKVIW37#t=XjLEZp5y-Ma07CRcU~tLnSd{8485A~;vu zwBDn%h}YiUf>1RuD5OiDID4w~IS1lid-H(h*`=>|TkbQp-79b%r0;sC+-A*hJy#BJ zx8>XpC;UwKNXuCVi3$8$j$f-vs=%CoUQlO)<$bHEukq7cg0mHT4qLOSf*`?CztTIv9? z7^WXS0Tb*u8g8Df2+p>;f4eUj*i#%3K8{F zlK$Gta(lX@=-Q9ic*-NB$`l5yhmsp3fnG5tFEWqK^k|Ng?OD?uKXW(OpN-@#J!?b} ziEy}6t}*?ZNEVY|DAl^_AhZK$H%G!V&>h_SL-I?4jHh4XIKcMAw$@mVkz7#Oyc&Z1 zx9JXewsUnC#mB_vw21Wt?y-`;*=7L&rlVcE%bLu;-QTZ;*%=VL(v53FZ-5? zOH=X@DL-?!z3CQXZGlHQntfw!xO+3Yd(DL&WFTu)640@Hr*uAQWNbwvF^;`D zySz|c7QXc7v(2(r6f0kP@3TY3OCVREu^Fa?@Veicr{g;!qa8lEV)q=6R|%)cOOw6B zVB-Fft9p+)YxSJidRH7>s~y;Mx}BM9xDjbvvCv?gBAU;ZU~4>G*>U1&Po)ppcQh78 zCx&VLZMg=)t?FC{l{f>_nzFV=&s;PN_|moLzn`I`HCAo5G^IN}@hsk7v*KS;pqZAL zxoh*Ua(tmz;JIt>T{pBPw^^gXEb^}f{(@=7E?pl^*QK?zqucM8cVj9TFDf91z7rD6 zS0H!~+El$@t@Spuz93*itaL?+)V1%*ZZG$DHk?)v!*9`BA(X=nX>9+G0T3xxU_~at=c39u9&?VHobqU;LExJTVZl7 z-;)izG(#rym_FZa8CJ=~*lyX|izg;U2oXkI$*Us|P2G=b{6p43B|U;l{mWJD<65^Odj;aAw~f`dFK#qxOI+8tv^G|5F={Gu?bWO+^NLIr zL3wKYqUZcQh8##gb;jZRy;6%}l4>3U@qgfgtYJdNT%R9eu}a`{|AuQ@i5=Rq+4)Dl zQx!8i2pHg#m=bz^G@h3iI2}i{SC2~_Zu!KSQntd;iiUVxCC)Fd;|NVRWC;p(5H(mb z_iNV#9kb=YSKkFwY0V#y&C~>R@%euMG^6NRBZSc4;W(8YIH){aqP6?{m6sU8uic_2 zih>nHbfxQlanW8<$4E+!ckz9!)_%M`Pc49G8&$DT!2I{ za_{j?_I#Cy`A-TawjvgO9lag`k@d>EoU zd2=#Kx`s^Qr0HYuY$G4&0Hc0iu5U7$3@5&HA`5!?>hm6 z^gF2H2E$RZQUN=krN2S(R-sc^$>Z@ZLePIx0x~@s*n4__QMHyvLON}SWvwRyll!f! zPQLvj7!HJGc#LJ>U3hs?aC;5~0oOM6PXo&jcP0)LL z<{v)$z`Zprc^huA3`*U!_m=QEzJ0^AzIBIv%6g-!%!sFS!qjK5dl~Nv8$~ef<@c=Q z5Ohn|N~sfD=_nGp_KLR67IitjFT3wal1N_A%exhR5WuHhwNZa(ybj1mbjtWgu8j*k0E>S@K$eoHo_>V$0InHYxX z%lXJd_qP)1lgL=CfA?#u(Et*)@vaFgKP=gU_8D|27}}2gK}(kcn;CrdGSlmzRZd?~ zzKcgc=4)m_agw0w)BB41m2ey8^O5Lq`Oz%usvyusqhL7E>wWZAVP;33>#1V@&6SGy zQMqh?6XELjA0h1`p$w2c-)yWhI7SS?=O+c_b!Ey|Nc=LWA8Vb`?{-CCavL7@{d&~r zSJAC9+NogbM;}OE*D3O65|SXL;C#0VkBzRkY!4s(L~MdZ8p7vc8s`0}H}vt7j1@Hd z2#*&|LCib;+i)&6HA=#_vqVzC1}VC(GdswE^^;hW>6RJHY!|VgS07)^1sqc7e7esY zNSRAuu%{S=R5gSp&iO3IXL!grhWf|hPCaz~b$?_l9IgsE`)#x=uY8@~Gt({N67VX! z^0G=4R*dN5kXKsi1sMx8#Blf99Na2>F(oAqLJlBNz6C3S=sA7lSMHqRQ@ps{G&+d= zUKdGY{df7dhFC&kiXv!iZjSp!o6O;yox+-*+Z03H*ZmFK-(H_guJ%SvriR7g?A$ z(0b=}DGgo@#Z7MW@cI?!*+Ms$Znie<4Qa=8J?rkP{79vEo8#Ii~hA5papC^L5*4W8eG zy)Mo7-^tRGa=e*1&PNlWSIq+DGWv#7ESH?Rv{KcG8D4e+4kHZJqOr@pm3_E56?P1+ zt(66K;qo6P#HQQ2Fgbm1qw~D#4cYh|^FulNLsR$twV$1A;~l=KH;1Hw1Ut39v&dFq zBH>?u8+Qoyc<8c6f!jA=@M|w}ByEKA1UakiL|G3T)!9QTcWL{SL;V?LS?AY4JFl zm;lwldV)N*-@@U@w!|PvZgQqJRGAU&Pj9kxQE*tT3FD#bmAw8 z_m-82xbQQc>BUf4=si_02%4ew%~pjOuD6SUTQ(RV-9?nld%XDW(Gi+mOp%Cu4Y7Rrn#O;6vVtN-j z3$qp&Xvu7CB|clcTj^bTWQe!i!M4*KCJ#Ux$JuP8P>m-3!kfH;Z# zHQAOZ1tN^zG$3ufnxWQyw|NWG+k$@(%#l_}#zuy*n%=DHEUOjgJ711l-qC>l?wwZz zlWS{!Q@duId8`|MJ?M>%>2#dd90#mF_8;c~+;KAA1kn!zXByo%nZ;QFv-&>gd!(c& zP?0ERnf|p;6DC&}C-TV7i`pKx@~BnG zVZW%;IxMc>^OqTg{yg8lewa08!=s$TqLCTbXuPow>)hPhj+!5@Y3uzT7JxKCxbfwD z{9$)mpYG|@@S&eTT(T(eCTd1`j{GmJ9CxdkzXO`T?z;F+2Tl;}Pn2$PfIs;NgP}z2 z-htZHMP&usePn`Obr3eU!AMsO(tvep4d16@Au5D?67=l&fJe&K&p3Me2~smPt2!$; z?G2*FYsvfZ$xyE2>`W&SCgLhw7J<-8I13F}`(WABR8++cAx5_Lgb~@1ADXHe zo1DPZ(wRBz>kHarq$DE)PDYY7uQC857>|5!Y0@GL)G1WBUcVLT|puN@WVH)aM1^Lx&l#bt2a9R7)K zz4QZ@Mhp!{3}`Ti_fh|CzF=MPSo8$TuzWV&p=*xOHR(;^6%1dC^xqwF9Ti)@$ky{; zd=9Hb6Y|l(g@o0J?V!iSrzvE(nLHtQTVH_oY6=QIn2xL;o?r_nqbs_A#dt+t*x6c1 zLuqMSg#%>^20AT8{yB6R$tJ~`xROSzT%u56qb!j4sX#pvF>`%|Kp2N z61;IWO#z2tiwW+Un|>(yBcv>nGmDbSmp#5#2P|~1<$h-VbNdnc$s0$WFS=0K;OL!Z zJxvwYb@9#a<%Cp4F6SM#+;;EPnUU4I#YeSl=#@dmajSY6xqg|soD}otGs>kY-eIx# zUs*E?8dd9zV0+-?naqzs*qu=WCi8a80C%&X*F6R~*)w8Rv%Qk2{`Rw$t1FAfKj*$c~vj?=y`-b6#qfw^Q z2nE5E(h!F1m)wh&eCz2hjo#0jJYY__Mmzqh&-ISov1mdTw0}_EZh+EP@Iq3{)xWX6 zZ1{-i>b8v#0qA)!+|lL4L=Nb-kD|Qm<-LE5Q>h$snW{dldVVeC4_bwMUvD$6ZI!kL z;p>W7rNz2?1FFpq#oS#{9E`13K;8ino*v?KGIzpmGi}TidvV5})CAWcGHxz=j(y=C z=<{|u&`>;qVb9}!A>;J-Mlcp6=xR_5>0h}`uZtIVJm`BqFn_68_+kB>^N{inY9Ngt zd+0>Ew868vpW~3q9=X;8kpNCc$ihO*!0x(CU!GO2tCH~Sjzd3Z=NJX+Hq$Yi(U|!S z;M`=gU3N%y*<-uM@M?j9jFbP8$j4Qk(2Ux`iOor<4C78+TJ(7Zw}%+txgH z8V3C=%!g_%%8hoDcw{pdz{J8aA7Y*93Pf_9#-2VBYm@h2F3JZdjh~|tZ56eu->o^1 zKXvXSSh^mt4XyB1kWh}d%kKxpSCmSVmtu$oRNE5TOHf261|OLI-bG`X5ab#`P2|(a z3O4BdnJpbhFX-MhZ(XWuzW4jPa$y>1rGl@nBQDe)Xnks0;;PH_Sd&Rxhw@#C0#LoX z{YIsmC#+Xj@w_VNoBrBN`8r)p*uvdm$R@YI*%zcNEOMsdmSC_Rh-#jZwaSQ}i%K+2 zAh;(lN{Z2S2W7n-00Rn+FBO3BEl-I;EiJWJ$QNu3jYHa=>?n2eHkquE%cjgjS zoTz zYHurTB9rQ?ZwAQ(O?@HextZ~6tIrTD4(E}M!PDN)z|L2S&IHc)Aj?_Ly+7RE4*dbL zUSA3whg0m%?RXU2w%@^Z?y`h3(v3bY&B@0NTx7F+Pc7qD!ThskRb~Ww=l6$j0-$A*w&J(RwmwSiCD>zRJhUwB4s6q)bMX`Nf=i{<5%&V=T%BUh<#)$y3 z;)t9QCPwC#-9fBz;?i!ULrn=f4pVniT~|}P(w`zFhv0NlPddRrr^A*zKx%{Pq}E8a z09)qAl=Q%geOxk1td_Rpp;;?ayE)}&M|Hk>Z~jEjA-YcQv`QSt-9Zke*lfq`k3Y^& zEqYH#?}RA0wXJCTem;|HoNVO|$b_@8K3n<_M*JXP7^hHxAx+(w9f+-&-6I z0R8BU`TDcUiY#V1BGF?S%eQaWwEN&_(&E&lG?k;vmFsq&$k$XeTqF*q78NLtUrDXD zxYolsb_U(5o?FkPSE9YV6+! zwU(u8t?BaSoab?yLZL?815$V1&llE*3!IutS}I~|mC;vN(M7RYgD&}%-?uow)% zxmLfN2;&DSl%Q3or5>IQBo!6A$CA*6Qjng)A?Rb7$HS0A!@8Y41}vuTe8kFx-CNg`f-IvnVg&+cr5@)oeX zgzKWY_F;3@=Tm#bCa0CYGYnl#u_pxjiU>~-x~Qp1)`&ZUPFiukuu{(%n{qNLLJ>R8tJ703#Q|5XF#_jwj&j~G z6x)L?S4~GoOmXyCnd&=#cLTDoGf|b19#oI7BLMNAKt(W0=NiT8y8bMI%5j-3=oa&g zSEC=AW1mE=$`{s>ZbjYk`X-}xr02%}^xfQ;GoPU9^2~=Nkt%8e9Vv*!AXa9yrg$+9 zvwMD3I9gN`0vW$p|e^>`kt_t}Db9)fE)u$%FU zpn84j^r?!Q8qBuv&V`mhS+{15g*;E6(2a{ZZ`XOIX6p13flWt# zqq3zX-go0s*zd=ml+48Mbq*^kMsE!9b-(Hmw2J+NAsNLbSum}pUCR^z6BJ|pX zN*}4!#9jYT4v(1PAbFE;gl0DoghDmV-+@i1@O(FOfGTMr&)V>1q;H8VBGUz%?tUan zDYa&}05(8+?e#U9OVE<)4y_QxYjN1d7ch9(Yn5JHHYn61!z!O(YK6CJ^`=I$-!+#PDd#v_yr zfw@W~_QP7LtX`UCK5tFnvA;@5+|bg?*Jm8%Ph+q;{@1E|DnA}7is$INq^M`f6AAbH zpSE`)U40dR6b%cfoqIC$0l$oxB z+g9GT8Ipkd7w@|j?rbwQT=T~U`>CT3t8-qc5>-x##0qT~O|zn6hJ*I6r^;^V>u#Mg zgnJK?{(lAA??hksko<(448B-%oNWKSBHC_>TGgngSr~s9p(N#wt|^uY+tCr=c@|AR zWNY6ZYG?p=u4;JX21x(ZRL89GcL7d((64x}f_Idq_0NYfGti1AjaW_ZKLQ{4h|( z%myIIwLqZodZC8}Px$cedr_u@Vb*G%_Oi`|(QovgI&6g%#dU3SXRWPCNn1?$HUTBT zzSlT=HM^$;e4I$D^muJF zAM}A1{JqJ%w}+8n$4Dah=JhuhFm0;%0hyy?u&E6?Mc#U^hMhg3(kxPK_sJsle;#QXJJwW6VHbDl+Xi8b#K& z$Si>n54NV|)>Dgmjfy`ue%3+aUxh|>9##sWgV%5QwZ7lGd5tR1U@{#)XlcbjM!UD4 zkYG6SII3HE;E|a83PEKo-czZ`4StCh?5m4DrC{9C3Fj$FVFNPfQ|A}fhvQp4`Wcbl zip!C8$VuFkmsuLw8a(rVwRB8XXA{&DXC=X87-wE+6Z3lQD>rlpaL{cT82x$s1OT8) zt>#cIPW53#5H(E)EMtjhst#@{o|}8i570M^RN|lxSuKJamy2mX9?XbN9En97}ZmK6$hxbabw!1$h|0c7fb*pW1N?c&r08QO}6FmBszuK5Ad1P99ZcKERI-+MFNA7G7 z=Su6ZLjGfD^{VZe8l`<6B6VJr1?yAvn=?$x5JRIUH6ZG@zbe-f*H-a%jhK799iCbq7t8VrJ{5x)7-e0*N@ z@isy!cB6fo-;FB_i-fJ3oa+judP6yKo@$9L3-xkHjRcP`U#zOuQV=@*fZd~eo1R-8 zI*3be_r2LEDqe*EVMgmO4VnV_n^8GR%E1WkM%Qf)+i2;bv;GG2IOJtn33-gepz0NrXiX5x|ypJLEHJP#5N zC1ux@tVeV-B@!B5z^4HL^L^wWLrh@7Up1fH7w5e)q!Zy{gk;GiEB{1e}tTvLF1*%!+VfAFLe z|6Wro;E)y&F3AS!Q0rR?vXFxH&Wr~RNn8gdhS}3}<7mR$Gx?qtbS_!fi#-5l0nYaI zESrqmhatxkfR)8JI+kVe;-08lKYn$bC0z?=N4KoKrSd@VYC84E#A@0MGZHh)KB#o@ zZ!Q(l=D{9|iaMmApeQ~rM+b-0DSZN+|Fg5!wzXyDM674B)eULUNd4{THns2Xy)8YK zN<)|c>DJ#^)g{(ix@lp4i9B<+@8>C8{T)FyaRoOUWh)}IQ%!7bwF!rUsNVK^-LZ6l zNlGdO>kQv0Sv2A9q$@Y5{~yM9IHhpIky^vuF!#*(eMai_pC8|xuHAh@c|)F3v)J>k zX~?>Wx5thJ`9>!G(mC(J&uJtprQkV#>XrB{S(I|MQ(M>k^V8|6rWf*dB1h}jqzQYE zB=!dJ701UwXL!XiCNzdkAxD%NRwhZ?&)0|ubvdQGLh=+}IPKzTYOOh(t1YfxG7~T= zhnOj;0LobBIR#w)i6lCh7MOdf`f|Az>kLtR-az?78nv*Yf**0xN)>*RsumUI=hI!E zOM~D272s?MH}Q}drixOMcIhNPt}=B1%ID3SG%zGK*?zS5(mE~zXIhUmTn)k+FQ+%YlN7?HUp-Y}vRQpX6-lpNQLrQwH&_0jJPXTj`XBp{Y zhUan=x-|VRftd(MW0R&=xPxm$jnvl{_CmL#gAJ%c^F0L_I;j%kJ)g%3-_0Si+)0ES z``$2yHR0vPXeTD6gY+Uv3d|bTp%QB$0p*=!Hx9!LeD}b~j=*4fe|fJjm~{<|J7Xn3 z;R;zN`FDB}8z3oa=p}6Ep!j|aeg;FyGHx{p7&LVfMyb1gd{m);$mF#lN)9IWTqUNd z%XC)|2zOeR)eKF^pHHja5~>{KpFCR=2LqpW{kec)h3}N7>K}?8i2cxv$plSp1;HEy7Wcf*2*mM^^0wZpsMsc z%ih;lpuTky>HRry1WSvnsZ@>LwIE4HnY5O}b7tN4zu05m|H3k7C#OK|)XNPDl=k^} zd|5N$k=1hNp6*e}F2#w35dwy!A$@tPNm^#!dMAU7czl&{0)^;|%%&KK!*M(@=!5la zD;O4D;k-(v%G>pQdmpt0HwI{E18H;8Qsf(s2*fN;&^DB{UMmmASYm zenrft8g3mu8}*&}m4(eEg_plT)VkPv*N#Amg#nNJ`WYh61Yjx4`NSG0=#RLF3ASm` zt&&o-l)!FjkrRcV)8H}eRcCIlyhVhuvwa#W2gF|tQM+E3Ez|jHjcD;GX#xuFeXiIR z62dKm;|IOViT@-K?k1j+6|@Bgm-Ht?@&mKV99GcM-RA2%;}-=*a63BC6i2pH;f@6frV`dBcgIXHWgu6vj1UQ<;>h2nTx1nO#~ zaJgQ)Vf&P1Rznns;=$BZK`?$4dHIqOzz0@V2y>>@wWR#-!5&#c2W_O9f{LBNwfm~S zTOxrWfzQyFD3Pc6-kDFsedkylJgrQt<#P$zTH@YPOj!MPWaiE$UtR3(#{9#5x2-36 zWt`S)HRk~6Qva1d35{amSRqqpB2OEsXJ@wnF@H7 zhJ4sm)4>>@ugSC7PZ)`#$W-X8y(!8SbKwnSY2$f(y9DyWDP z`|M_rS-pwEEV3<8SLC%dM7Htp{kUX|c7>xaWZ0aU&)mFcI9mT*Qe_zx$I0w#us!i8 zG*~!Z)>qcDB6G_mJjkj6LW3!1Kph171}vjHb4Lwcp9~z#ooAy3{5XWCljqd5)I4Z} z<=f9*5y%PNwA62U=2+3Vxar}BO~A73b6&es02LAiI<56(=?Tn=`_Lnb(%zSpJE zB$or)jH>#3?dR|jiwIo;iXjJ8ta6Qi4MNU+_OuQgENVT5KVR#aNu58AxHn?p%9WVQ zUm6z1t1+rD&|(o*QB~dTdTra#es#MM6lt-iqw*vb0Ofk^>EACBc0pB~y&vJSzcmF+ zgbG*-KrxfCE?AsT%Z#@n9dmVTVt>sm3=ED2HBfPNtB_hzIcTTLn{ad z^Yz8ek;CvHxWLWtp7l9RwE6*GTKJneEqaMdEZjP>W|pGXpB1RAlsZNm5D@GrZGW;0 z;Lq(i!B|-vI9j&#y{lV^n-}8-Ge^@01nX{|HBXbYk_js~t2;%Kkg<>XwT*(yT4X#xcAT8=zId?IF#SjU2ikOft%vs8ekq?RN6w{So%Y$Wv`M+KbiLX-P9o(efi?f8M3>>A^lyMxnO(NB zGF#>$y%OcR5e}cMSvZ=ZP;yRu{9N7k-stNo9iKZ?&YC zSX~|5DIT9B591M}rT28ul2p{wB|J#hjn^;A*7kNdgor9JJCMgFM1+UM>k}Sw{VueQ zuJgiWhzY~kg-9!;N!9hCaWzLw{^p1YOdlD?=C%&~G6{`E6vIsKLqXbvode1Ne^JgFl~-gNd7+`;EF;>6(%_zi^0Y-wf(QQA1IV9=>c;q1GXz{hqn+fY z@hQ7!EYHDx9QA2Lv>P%|R1{$7&0BgO<<22_r$%XaSa*|>i zkhG)M4~=+<(ZwmvMDo&Y+NtY>`~g6Odesye8O#(yybU_SFVvzw{_!WMrU}z7J*$+9 zD<6x_By;mne{BQe6jKhrNp(y>^JI8QYo#@<;VILF_a+I1z@+&WS?*Qo%;}~w9PyZ0 zq29&ngRIXp|LmR*n38w2>E)V10Uj<$hXDF2oyu4`#X`0WjABs3%)j}4mt6{n@gq{_QddF@@;xlp zICnHdbaj@Tt)nn6xSbo(=n0T`sD?qdBo6vDxUKvw5crb5(My-xSCvEex_b0(DO-%5 zB2LYew%y+OQ^uEY1Bv}?xb2JR>0rQVl0DU=Y)L;e9HxRGfyQpIxfa4#l+th%HDQxs@USa#dDx(95BC)-^ma9Onn>m{Z>i+>)Vlp6z45b) zz%;>2Oj`BJ&>Jgdpkj=7e!&3ec4v=~?_z48#_5En2Q%1bE;@al zcyc61=mn}cCr*#h^q~IQJ+O#Sv-owxK)%=OzC->HrDr3H@E-dd)1`7r88mH5F@|nJ zD%2kr_R-6I+I0dK8Ii3q%bJ9o^oDzN%8s6leR_c(WkJB(J&z>2AT@Qhdcrk70(0(w zGxEz&xH5RSRZ$8nW;2U#ys3*#a`ZAz4TBNy^Jfh50;c{EmGNbV3KqDOP}cK{F8`3K zj>V4UA#TrabHI_rq`)Pt?>X@|sOtHSKHv6c3;Jd(UDyQdzHK9NYci@Z1u+{wVz2QQ zY(+4T%7!P(?`S7)Eh1DBlQ+A~*8Y@rlkZ6J-7;NY{2-rHP2`?W> zo<(^f(nch3DdejA$w=;StUsc`&~d5c4rx>cm-Kt@C|1IMEFKz`Onqa)S?#Y50Cy78 zR{)pU$lp*4}9WzWlQ2`Y*hLQw517`oibG#n)cfy-0c{*IL;a z*YNZ}ApLNsCub0cXtrsVs1>wUgU_O*wx zVFRM{t}d;N@MI|nQd4D+xFG?5`-o?IFStY!1vJDsZqS)P6V*F>t%qU<7#@#0(8Qsk z%1*B}xZpH>`~0E%Jr>NI++k~UPI;Q`kgkj|6)iJvl>*l#qGX$Zxoe%j=rxji#xJSJ z=-RS}HhvtDQXd2W^uZ0kp)=?3p&?$NkCA|pn3hKbYhp-xf0~Z`7jHTrWmX3XjG4g@ zjXnSHHjDXh3mbh|2C|MG!agCC>M!17m@TqHWqEAM2>XmxWqFMTF_x2k?OeOtVIA{@ zZG4YIp7*a>Ec=k_b&go}Pnawvtao)`_vz>ImZSz}1r5yuR#4g{4b^=2s7Ay!kRRW=RYrcypTGN8>I;=Psl4s_Y zRjnw#@;XpR#VqOO-I$P(?n#HS&lMxl?#_cc1C?CE5t%=xyZvMs zX7PmV8Z}m?>;_Iar`8(63r%p_4r!Q2(VU-4v#)*aglC+aoj(LSj3LT-&r}W4km%hr zpQRW3*z-ueQKfWz{E#Y5oloFoZZ~$g6sZi#wYc=Ta#{B%&wx0;fnI5@K0IUcHYLfb z_4_cn)Onq7_Dbm&D*t~4?tvyxE_TgwZ5r^$zqHM(X8NY@*CM$w|Q-OR*9 zJkI1F1^NekCzJ*L|HuuKlSb9sn72_YeVO7o6L0bsNG2}fT?_>I7DpboD@>gQMh-v} zP?=l&Ei{tjlMAUIq=~EOlQe}T1EF&Z`K*2f(6N@~ObO_@QRY#Rvvcf>M=@y0n zQV;HBU1hp$sMCIaZ@PH5x`1}Y+}j)l*`GFRP_KYTJi@gHsxwQT_+vd2vz*Mv0u&Di zlAU(z(5yzctFE@b&^^p@JaKibq6t;KtTFs#$lz=uT6U}WqIvd?^Tq`%Ov2G;5%Ilk z6KtYTKWZJ26k(yglX$G~YsxLF$#>PP`k3eblDrJ*%dNss$)?Vb#jELNDF6OBEBnkd zf%%{8Emq8mXaj9V*|lrF=ko)M=e;uHt(_s6l4$^pznmHTT5cn7kTJYlB61(IV^(<2 zB3Rp$>FiIIr~Ob(`)#}?*tRi@=h4;cIq=27qYyeKnI!&a7k%GXa0JaDZ`3^JG1Fs4 z`3ZUa4_)OHh&iBy+Uh~C?UT-NbcaWZOIxZ3OEM7rkyN}zj!{+bb z<5H=Wzj&79kN&-Fz{^{E$qs|s>Q@M z*p8vf6`1=Y&RoKRQZWJ4r z5grjy38csOxfmV_Z`~zozvGuqOvxoj*RW=FlFQ8cE-XZ7Z4w_b*0jHW{=>K_JLZ~i z_%V`%7?NeGsyooTQ;^g*_N#p22RL4=QR$nV$B)aZxKAaOCewxJNh<}XV?wHh>Kba2 zwySs;y&ts471(@Q;#6S#Hz$tt+6XzcRo)Cplt+6@+O53zhqfAuy2aui_VhHNlcv%H zNgAi~qhsjaw5*yF)sBP|)=a8PjB~9xzs@WMKZ!zfvU@V)T)eEWM~MsY^e&~qJ9luS~ZgaNes($>bC|kKBEcK+b$8c~)((`F{P+;$&h9Q5wUdj1><#K-b$fk6t z{q_UPVUIo7WFk%0$nCQ9TJMeQgnH{U7Q;=<`GO&_{xM)L8nb+n?@@LUpu*2JuDw`Z)`ZeB};L8zrxn325XmXbF zO6L~Y;a@nKv(BD_K9o%d#L&k!;>+Z^mdU6|?(XDE?gu3Ca-qh|on=YE2xR`qNc96u zAe;LGt-to}Z=rM2<3YIabq_t_<4hX1N47?;{+n{u9FPKinIK^U*T~=G>F67Sem(z; zwYBOYyu({6Ab{9^%Lk-PT8+^#-N>uB)_{?L&U%g{pv|r%#%d%%Xc()3($x4mJWFqV z+oV(9Z?8ofzWl2J@#@4ewETNSl+B>9h75Aj!j?ns3IBt){(BDPfe%y{aJit(ttkLt zdaBBuJ>Z$Pcdu1W_taaVINSn$6X!9V(MN53^)-Z`sg`H$$Wu(e*0H=m8Mo%S|%#m6d43rgK;kPu^T5p8cP>x=QBn z@>#?WZ9w)$_mHN6VczEhAq|Uig>f?Q`sc2A(Hpo%0z0j$xwk*s;`ZW1M)vhjGCFml zo#^4_{$i!Fj{ca|k;sw?@h*qdWir&vo>)2r?1HHD>4*`$^NY&PN3SOMIV63TJusXq zXditr3Cvjy5i#XR8&wmN1LQjh5>NqSGn=Q1LsC&DNB6lw7@>fj*v9}-VA|Gn) zt;D6JAAws3$2&7Bh;5W>qv^TH3$z@wmvLADF_bS^T)SmL^{R3NHLDhQqcN>p!Mdls^{pz5BA zS4>TjTp0dv$jL16M3g~cKb@F16C-#n(AD2-sd^kmpJ9pwJP{VYR=5NmIgz=%O1Rkw z>5u_~0yyLceeXsX^e3v8%c-f5roVrOKg!yzV<0gvG||e@ppVZi>uFi1CGw~>8Q+;d z6CO{m$DBKo_^I+kO(k?zWH^XUVS;@~!5u04c}oV+<${98IHSwyP);4tns|zUNd;xb zHK&P28xy;y@jHCw#m640Thpf26f*r1)n}Cs!+cDLFR8%}rT3rdI7bNZ_3U;)#=}QQ zZhNvh0_yd;C+Ng?*DEi_)F3st_h8V=yB728_u7< zo$}dvV)A{55=tibXtCXWVGo%j5S#_SV@8(b7Msc{CrEA}y}}6%biS+by~FVvQwQ$W zV#+F>uSY5IItepCPyS-w&Z+-)MY+JCk<@IRv96a#V_tw zgHzQT(!XD1b4KOCrJZA3qQ3{nRV>b2#eClNKD5P$I{<1@OB0Y(X|FYwmBx*?$_Z)ptidB zW-tTOBFYx~yF2uYC)bu!JWUe-z9cce7}ahLY>S%jSm*%WmUz}oE+m9tV;vJm4~oEN zEexS+_@$2t5zXW}ibn2sdh;y};HjCN@zftFNZ4R94el|bJq++Lo>3WAB0Uy(JXqu7RlQdCNo|(f|x@P4XxObQCJ;|5)0^1 zHk$Cm3cxoC(85l@bQOPD&e0c?ajX$5oRn zbgFr@au7vTTe#0l@DkHnWJ)v~TfGWn&9bG-{gc+QwPk`Q1H;+|X6)%`bS8g$JK>nitoH6)j}TUP zwtKI9TDboJc#m`rRAN6Iv|4i;pHTBkb<0zU|Dh+TD20-;0cS-9EJu9TxJ|F*cf+%w zjiVcTLhbk${CGFIrD&7H9J#rdC+g*R@a~2h8k;yV+#$Vs7wv3^bZN$)oJe zX_+IZh?C9Dgh^}Y7AHR}%rI>QLGZrC+D0LnqN9uAEzwxVkS}>}G>~!7sko?yD%&d# zw~Q+`^18u-ZeUHOwtsj;l&3M{d%LqeM}Ns-+ibRap;k>JH~#HB7GfP?(Vt&4`=4kK zc}kA5x78^HB>g?ud4?iwqX0@UbK?svch*mi(bijTtmi5fSxHAR)}|a1LeiW|aV~z= z=(>Y2Pw!`1bv0wDR?iINJo2SYG;=r8&loR=tYk*=KEFeH4=g4Vy0f;zbhXpP%*ei` zmuJuhRqmuYk0t$1*@D`Xj8M@5x#^=PobPxaNl+(B&0{|Zg`u^335hMI@C8}MId5)o z;aM+NAWaB?Mj4E8yRF@}+<<1E0LA`SjF=T4=`N)e#kq9xzu+~AHDxtl=dsmnTnDEa z-}A5BkQ!vap+T_M&S0v_U8RM%-hcjYiLG%m6A4h*rz7J@_P85h8 z2|5;N{U%L=EttYrZpzZaSJH%NG5H=Y47rRtF{55nyul{;Yj5~u32oV@y>1V-7#|&W z=5@uvJjTnXMzTI(k?{nEa8Mb8$kEurxZ{OcR#q>8Mtu1w^~aC*PelM!zE2}d5z51K z+K=4>wUKd!)oam%I3IK&5%hBYOEYVx$5sq3B>>toZ9BLNa~?hClBpC_#Z{KUt)@Dg z+6@e7Vp1gohvujueFWTJUIIm4IV$y3>DZ~lMZ3Zd4ECf zbU^}U&_JZSlZ^D9-PK8uzdh`RZ4a3(yjoIUG@#G=9tRiT=H?|qo z*FopRNLERvOs@6vZ*a%_3iiw)d;(6OM_b^$9rW`pmf}>dqM{MljQ#7dSvR)Q2gNce zH(%|;YRs_FSmk&HJw2wTGx~jxbq0(tYA9vbJsAA<^UGoywu?Kugr|G(?Q!Ab5u+Q_ z`5>vXKhATAQbAtFTIE`p8}KZ|#Cn{b`n;P^%LcQW`wzNNN=0E&0JgR+9Q+?OG8H*n+?A#nA80!s?8l*oK+ZL-enX%O9j-eY#t$>X8co?|&J#lcX{Idm}Zn%FBLPcQ&u|nm~2X{Y5dEkm#>( zPl&wb2{hHVTD8yY`m%EyLmCo}?+P@Fuo+a)#4T!1m0bw?G8R5s^g%3DF_wG7e-?Fv zgT?HoM0R-egA~3elw-!416y|Lrz-Ta&XcmI$)X2GK{B<0Odjw?97wQx!r0)$&iQ%M zX-V#PQ}7@TEu9dmV#4R^v`>l>wVxX#|6Hf3$fs~e1h-k{o$3>HSt6Ioh^eJuC~mGW z(7$hEA!5M@ij+KC6&qzCN&g1d_PwJW{0GhVU(Ml2 zvI+k|;>G+ytN-6k@2Mz0{AW*sq6z=Og7*4KyYN5ETiBadC5V6WH~(?kgiEl9`oFqi z&YkfMT(~jIr0koeZ7kfzb)@B>x0aDv7wO_t6#MbWRKj}HUWyz<(F6wHej_afGbJBy z`nlEviYbn}k^lKnFgLnE0&E)4Z|nTmO#eF(kG+IUua_g4JPL(IapC9N7w`%|B+eX9 z+>g_XNaz|zq*L&lB3ROF`Kj^@`dQiP30CB7?}_VuQIGRa3sF+xbVd?ki+3YbcDNzW zO}`||fKCISa=NmeUUnc?Pa&@cyNeqgl(c5xSoQnNT~Var)^#-Qa-+XTM9yilG9|g{ zU&Z=QoA$BB(dzd(;PsW%bCL;mM+`g~@$KEo;^JE zTwx1xFpnNq2mRBoe(-YN4E}$ew*TEvFtzSd{olM2P*JfzJPZr}mum>8{nz}WBr(=Z z++TFToK9}rJd%D`Cy7<1KQw1UCts(SKdBG;2tE)T{78jMK6u;W(;_Aj*unIR5?akazwYn?#-Rs^%YVk^op~2TvuED;Nktw~-bk zO5g7C!7SL6V5*KPmMwf zm!vM%VA5d1U%Maklm&qvLdf#}eQlpi$fQkveSMPXhx5+i{&%$W^lJM0uUeyz0aQo~ z|Lzl}s%~VY Date: Sat, 2 Sep 2023 11:40:12 +0300 Subject: [PATCH 39/57] Update doc.md --- src/genbench/tasks/europarl_dbca_splits/doc.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/genbench/tasks/europarl_dbca_splits/doc.md b/src/genbench/tasks/europarl_dbca_splits/doc.md index 879242a..a32d0bf 100644 --- a/src/genbench/tasks/europarl_dbca_splits/doc.md +++ b/src/genbench/tasks/europarl_dbca_splits/doc.md @@ -28,7 +28,7 @@ print(task.comdiv1_de.evaluate_predictions( ) ) ``` -To compare a model's capacity to generalise, we assess how much the translation accuracy decreases when the compound divergence between train and test sets increases. We keep atom distributions the same between train and test sets to make generalisation possible in principle. This means we should evaluate each model on both low- and high-compound-divergence data splits. To compute the generalisation score as described in the accompanying paper, train two systems on the splits with compound divergence values 0 and 1 (e.g. subtasks "comdiv0_de" and "comdiv1_de"), and take the ratio of the chrF2++ scores. +To compare a model's capacity to generalise, we assess how much the translation accuracy decreases when the compound divergence between train and test sets increases. We keep atom distributions the same between train and test sets to make generalisation possible in principle. This means we should evaluate each model on both low- and high-compound-divergence data splits. To compute the generalisation score as described in the accompanying paper, train two systems on the splits with compound divergence values 0 and 1 (e.g. subtasks "comdiv0_de" and "comdiv1_de"), and take the ratio of the chrF2++ scores: `task.comdiv1_de.evaluate_predictions(predictions_comdiv1_de, gold_comdiv1_de) / task.comdiv0_de.evaluate_predictions(predictions_comdiv0_de, gold_comdiv0_de)` #### Using your other data sets: To compute the atom and compound divergences for any pair of training (pre-training, training and/or fine-tuning) and test data sets, use method `EuroparlDbcaSplitsComdiv0De.divergence`. To create the atom and compound distributions of the training and test sets, the frequencies of each atom and compound in each set need to be first counted. The vectors that represent the atom and compound distributions of the train/test sets are inputted to the method to calculate the divergences: @@ -59,4 +59,6 @@ The original data source is `https://opus.nlpl.eu/Europarl.php` Our goal was to create a benchmark that tests generalisation to novel dependency relations in a comprehensive way, not selecting some specific types of dependency relations and leaving out other types. However, memory requirements of the data splitting algorithm did not permit us to use all of the atoms and compounds in the distribution divergence calculations, so we opted to leave out the most frequent and the most infrequent lemmas, and the dependency relations that include them, which probably affects the results. ## GenBench Eval card +The motivation is primarily intrinsic: it is important to assess if translation models learn the systematic rules that characterise natural language, in order to get some understanding how the models work. Another motivation is practical; compositional generalisation is important for the practical reason that it would make the models robust. The type of the generalisation is compositional, and the shift type is covariate, since the input data distribution changes but the task remains otherwise the same. Shift source is partitioned natural data, since we do not use any artificial data, but the train-test split is artificial. Lastly, the shift locus in our experiments is train-test, but the method and benchmark could also possibly be used as a finetune train-test benchmark, by finetuning a pretrained model on the training set. + ![GenBench Eval Card](eval_card.png) From 8d4ad5bc96e0de6f839df4955b678a473921a0d3 Mon Sep 17 00:00:00 2001 From: drndr Date: Sat, 2 Sep 2023 13:18:21 +0200 Subject: [PATCH 40/57] update dataset links and cfgs --- .../codesearchnet_adv/config.jsonnet | 10 ++++--- .../codesearchnet_go/config.jsonnet | 8 +++--- .../codesearchnet_java/config.jsonnet | 8 +++--- .../codesearchnet_javascript/config.jsonnet | 8 +++--- .../codesearchnet_php/config.jsonnet | 8 +++--- .../codesearchnet_ruby/config.jsonnet | 8 +++--- .../tasks/nl_codesearch_mrr/config.jsonnet | 6 +++-- .../{webquery => cosqa}/__init__.py | 0 .../{webquery => cosqa}/config.jsonnet | 12 +++++---- .../{webquery => cosqa}/doc.md | 0 .../{webquery => cosqa}/task.py | 2 +- src/genbench/tasks/nl_codesearch_mrr/doc.md | 26 +++++++++++-------- .../statcodesearch/config.jsonnet | 8 +++--- 13 files changed, 63 insertions(+), 41 deletions(-) rename src/genbench/tasks/nl_codesearch_mrr/{webquery => cosqa}/__init__.py (100%) rename src/genbench/tasks/nl_codesearch_mrr/{webquery => cosqa}/config.jsonnet (57%) rename src/genbench/tasks/nl_codesearch_mrr/{webquery => cosqa}/doc.md (100%) rename src/genbench/tasks/nl_codesearch_mrr/{webquery => cosqa}/task.py (99%) diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet index 842d1c1..4272171 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/config.jsonnet @@ -1,7 +1,7 @@ { name: 'Natural Language Codesearch Ranking (codesearchnet_adv)', - description: 'Natural Language Codesearch Ranking (codesearchnet_adv) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', + description: 'Natural Language Codesearch Ranking (codesearchnet_adv) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness against covariate shifts', keywords: [ 'codesearch', @@ -15,14 +15,16 @@ authors: [ 'Andor Diera', 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', 'Florian Sihler', - + 'Ansgar Scherp', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + test: 'https://zenodo.org/record/8310891/files/test_adv.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', }, has_validation_set: false, diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet index 9c98587..990651b 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/config.jsonnet @@ -14,14 +14,16 @@ authors: [ 'Andor Diera', 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', 'Florian Sihler', - + 'Ansgar Scherp', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_go/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + test: 'https://zenodo.org/record/8310891/files/test_go.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', }, has_validation_set: false, diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet index 53ee70d..e97580e 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/config.jsonnet @@ -14,14 +14,16 @@ authors: [ 'Andor Diera', 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', 'Florian Sihler', - + 'Ansgar Scherp', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_java/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + test: 'https://zenodo.org/record/8310891/files/test_java.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', }, has_validation_set: false, diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet index ef89c60..3a691cb 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/config.jsonnet @@ -14,14 +14,16 @@ authors: [ 'Andor Diera', 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', 'Florian Sihler', - + 'Ansgar Scherp', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_javascript/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + test: 'https://zenodo.org/record/8310891/files/test_javascript.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', }, has_validation_set: false, diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet index fb59296..3f12d27 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/config.jsonnet @@ -14,14 +14,16 @@ authors: [ 'Andor Diera', 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', 'Florian Sihler', - + 'Ansgar Scherp', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_php/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/clf/codesearchnet_adv/train_sample_cbt.jsonl', + test: 'https://zenodo.org/record/8310891/files/test_php.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', }, has_validation_set: false, diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet index 880a7fb..e3d7582 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/config.jsonnet @@ -14,14 +14,16 @@ authors: [ 'Andor Diera', 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', 'Florian Sihler', - + 'Ansgar Scherp', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_ruby/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + test: 'https://zenodo.org/record/8310891/files/test_ruby.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', }, has_validation_set: false, diff --git a/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet index 80355cb..ee9854d 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/config.jsonnet @@ -10,13 +10,15 @@ authors: [ 'Andor Diera', 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', 'Florian Sihler', - + 'Ansgar Scherp', ], subtasks_order: [ 'codesearchnet_adv', - 'webquery', + 'cosqa', 'codesearchnet_ruby', 'codesearchnet_go', 'codesearchnet_java', diff --git a/src/genbench/tasks/nl_codesearch_mrr/webquery/__init__.py b/src/genbench/tasks/nl_codesearch_mrr/cosqa/__init__.py similarity index 100% rename from src/genbench/tasks/nl_codesearch_mrr/webquery/__init__.py rename to src/genbench/tasks/nl_codesearch_mrr/cosqa/__init__.py diff --git a/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/cosqa/config.jsonnet similarity index 57% rename from src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet rename to src/genbench/tasks/nl_codesearch_mrr/cosqa/config.jsonnet index d099231..846e115 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/webquery/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/cosqa/config.jsonnet @@ -1,7 +1,7 @@ { - name: 'Natural Language Codesearch Ranking (webquery)', + name: 'Natural Language Codesearch Ranking (cosqa)', - description: 'Natural Language Codesearch Ranking (webquery) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness in covariate shift', + description: 'Natural Language Codesearch Ranking (cosqa) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness against covariate shifts', keywords: [ 'codesearch', @@ -15,14 +15,16 @@ authors: [ 'Andor Diera', 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', 'Florian Sihler', - + 'Ansgar Scherp', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/webquery/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + test: 'https://zenodo.org/record/8310891/files/test_cosqa.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', }, has_validation_set: false, diff --git a/src/genbench/tasks/nl_codesearch_mrr/webquery/doc.md b/src/genbench/tasks/nl_codesearch_mrr/cosqa/doc.md similarity index 100% rename from src/genbench/tasks/nl_codesearch_mrr/webquery/doc.md rename to src/genbench/tasks/nl_codesearch_mrr/cosqa/doc.md diff --git a/src/genbench/tasks/nl_codesearch_mrr/webquery/task.py b/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py similarity index 99% rename from src/genbench/tasks/nl_codesearch_mrr/webquery/task.py rename to src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py index 17d40a3..37c5307 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/webquery/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py @@ -8,7 +8,7 @@ from genbench import Task -class NlCodesearchMrrWebquery(Task): +class NlCodesearchMrrCosqa(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: """Create the dataset adding n distractor pair (original comment, random code snippet) for ranking. diff --git a/src/genbench/tasks/nl_codesearch_mrr/doc.md b/src/genbench/tasks/nl_codesearch_mrr/doc.md index 3fc22bd..3cf5ad0 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/doc.md +++ b/src/genbench/tasks/nl_codesearch_mrr/doc.md @@ -10,23 +10,25 @@ Given n number of code comment pairs (1 true pair and n-1 distractor pair where ## Data Source **CodeSearchNet** : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub \ **CodeSearchNet Adv** : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE \ -**WebQuery** : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in the CodeXGLUE benchmark suite: https://github.com/microsoft/CodeXGLUE \ +**CoSQA** : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in https://arxiv.org/pdf/2105.13239.pdf \ **StatCodeSearch** : R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors -During evaluation for each true code-comment pair we create n number of distractors where the comment is matched with a random code snippet. The distractor samples are sampled consistently by setting the random seed in the get_dataset_raw function +For each comment in each subset we sampled randomly another code snippet from given subset, to create a fully balanced binary classification dataset. \ +For the dataset statistics we only consider the positive (matching) pairs. \ **Dataset Size**:\ *Finetuning set:* \ - -CodeSearchNet Adv train set 251k \ + -CodeSearchNet Adv train set 251820 \ *Test sets:* \ - -CodeSearchNet Adv test set 19k \ - -WebQuery test set 1k \ - -CodeSearchNet Ruby test set 2k \ - -CodeSearchNet Go test set 14k \ - -CodeSearchNet Java test set 26k \ - -CodeSearchNet Javascript test set 6k \ - -CodeSearchNet PHP test set 28k \ - -StatCodeSearch test set TBD + -CodeSearchNet Adv test set 19210 \ + -CoSQA 10293\ + -CodeSearchNet Ruby 2279\ + -CodeSearchNet Go 14291\ + -CodeSearchNet Java 26909\ + -CodeSearchNet Javascript 6483\ + -CodeSearchNet PHP 29391\ + -StatCodeSearch 1070 \ + -Combined test set 109926 ## Limitations and Bias TBD @@ -37,3 +39,5 @@ TBD Husain, H., Wu, H. H., Gazit, T., Allamanis, M., & Brockschmidt, M. (2019). Codesearchnet challenge: Evaluating the state of semantic code search. arXiv preprint arXiv:1909.09436. Lu, S., Guo, D., Ren, S., Huang, J., Svyatkovskiy, A., Blanco, A., Shujie, L. I. U. (2021, June). CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1). + +Huang J., Tang D., Shou L., Gong M., Xu K., Jiang D., Zhou M., Duan N. (2021) CoSQA: 20,000+ web queries for code search and question answering. In Proceedings of the 59th Annual Meeting of Association of Computational Linguistics and the 11th Internationaal Joint Conference on Natural Language Processing. diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet index 27927bb..0ffe3e7 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet +++ b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/config.jsonnet @@ -15,14 +15,16 @@ authors: [ 'Andor Diera', 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', 'Florian Sihler', - + 'Ansgar Scherp', ], data_source: { type: 'manual', - test: 'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/statcodesearch/test_sample_cbt.jsonl', - train:'https://raw.githubusercontent.com/drndr/genbench_ds/master/sample_data/mrr/codesearchnet_adv/train_sample_cbt.jsonl', + test: 'https://zenodo.org/record/8310891/files/test_statcodesearch.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', }, has_validation_set: false, From 42e2ca38dd9728854af8789267d791cbae86665a Mon Sep 17 00:00:00 2001 From: Anssi Moisio Date: Sat, 2 Sep 2023 15:08:01 +0300 Subject: [PATCH 41/57] updated europarl_dbca_splits/comdiv1_fr/doc.md --- .../europarl_dbca_splits/comdiv1_fr/doc.md | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/doc.md b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/doc.md index 50a2694..eda471f 100644 --- a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/doc.md +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/doc.md @@ -1,19 +1,3 @@ # Europarl DBCA splits (comdiv1_fr) -## Abstract -*Copy the abstract of your accompanying paper for this task here Europarl DBCA splits (comdiv1_fr).* - -## Examples -*Give some examples of the Europarl DBCA splits (comdiv1_fr).* - -## Usage -*Describe how to load your task and what is required for evaluation, if anything.* - -## Data Source -*Describe the data source for this Europarl DBCA splits (comdiv1_fr).* - -## Limitations and Bias -*Note any known limitations or biases that the Europarl DBCA splits (comdiv1_fr) has, with links and references if possible.* - -## GenBench Eval card -*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. +see ../doc.md From 0f6cfe861fa2fc14c98e557cb4b8f0ed0834e9c4 Mon Sep 17 00:00:00 2001 From: drndr Date: Mon, 4 Sep 2023 14:21:47 +0200 Subject: [PATCH 42/57] remove imported chunked method --- .../codesearchnet_adv/task.py | 75 +++++++++++++++---- .../codesearchnet_go/task.py | 60 ++++++++++----- .../codesearchnet_java/task.py | 60 ++++++++++----- .../codesearchnet_javascript/task.py | 60 ++++++++++----- .../codesearchnet_php/task.py | 61 ++++++++++----- .../codesearchnet_ruby/task.py | 60 ++++++++++----- .../tasks/nl_codesearch_mrr/cosqa/task.py | 62 ++++++++++----- .../nl_codesearch_mrr/statcodesearch/task.py | 70 +++++++++++------ .../statcodesearch/test_mrr_task.py | 36 +++++++++ 9 files changed, 397 insertions(+), 147 deletions(-) create mode 100644 src/genbench/tasks/nl_codesearch_mrr/statcodesearch/test_mrr_task.py diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py index 75788df..63b3ac3 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py @@ -1,14 +1,34 @@ import random from typing import Dict, List - import datasets import numpy as np -from more_itertools import chunked - from genbench import Task -# @Task.register("nl_codesearch_mrr:codesearchnet_adv") this doesnt seem to work +def chunked(iterable, chunk_size): + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] + + if chunk: + yield chunk + class NlCodesearchMrrCodesearchnetAdv(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: """Create the dataset adding n distractor pair (original comment, random code snippet) for ranking. @@ -28,31 +48,54 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: output: Dict[str, datasets.Dataset] = {} # Set random seed for consistency random.seed(42) - # Create 49 distractors for each item + # Create distractors for each item for split, dataset in raw_datasets.items(): if split == "test": + # Convert dataset to list for easier manipulation + dataset_list = list(dataset) + + new_data = [] + + for idx, item in enumerate(dataset_list): + new_data.append(item) + + # Create other_items list once and then simply exclude the current item during sampling + other_items = dataset_list[:idx] + dataset_list[idx+1:] + random_items = random.sample(other_items, n_distractors) + + input_parts = item["input"].split("[CODESPLIT]") + + for random_item in random_items: + random_input_parts = random_item["input"].split("[CODESPLIT]") + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + new_data.append(new_item) + + # Convert list back to HuggingFace dataset + output[split] = datasets.Dataset.from_dict({k: [dic[k] for dic in new_data] for k in new_data[0]}) + # Create negative samples for training + elif split == "train": new_dataset = datasets.Dataset.from_dict({}) for item in dataset: # Add comment-code pair to new dataset new_dataset = new_dataset.add_item(item) other_items = [other_item for other_item in dataset if other_item != item] # Randomly select 49 other items - random_items = random.sample(other_items, n_distractors) + random_items = random.sample(other_items, 1) # Split input into comment and code - input_parts = item["input"].split("[SEP]") - for random_item in random_items: - # Split random input into comment and code - random_input_parts = random_item["input"].split("[SEP]") - # Combine the "input" fields of the original and random items - new_input = input_parts[0] + "[SEP]" + random_input_parts[1] - new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} - # Add distractor comment-code pair to new dataset - new_dataset = new_dataset.add_item(new_item) + input_parts = item["input"].split("[CODESPLIT]") + # Split random input into comment and code + random_input_parts = random_item["input"].split("[CODESPLIT]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add negative sample comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) output[split] = new_dataset else: output[split] = dataset return output - + def evaluate_predictions( self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors ) -> Dict[str, float]: diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py index a93723e..c4804be 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py @@ -1,12 +1,32 @@ import random from typing import Dict, List - import datasets import numpy as np -from more_itertools import chunked - from genbench import Task +def chunked(iterable, chunk_size): + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] + + if chunk: + yield chunk class NlCodesearchMrrCodesearchnetGo(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -30,24 +50,28 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: # Create 49 distractors for each item for split, dataset in raw_datasets.items(): if split == "test": - new_dataset = datasets.Dataset.from_dict({}) - for item in dataset: - # Add comment-code pair to new dataset - new_dataset = new_dataset.add_item(item) - other_items = [other_item for other_item in dataset if other_item != item] - # Randomly select 49 other items + # Convert dataset to list for easier manipulation + dataset_list = list(dataset) + + new_data = [] + + for idx, item in enumerate(dataset_list): + new_data.append(item) + + # Create other_items list once and then simply exclude the current item during sampling + other_items = dataset_list[:idx] + dataset_list[idx+1:] random_items = random.sample(other_items, n_distractors) - # Split input into comment and code - input_parts = item["input"].split("[SEP]") + + input_parts = item["input"].split("[CODESPLIT]") + for random_item in random_items: - # Split random input into comment and code - random_input_parts = random_item["input"].split("[SEP]") - # Combine the "input" fields of the original and random items - new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + random_input_parts = random_item["input"].split("[CODESPLIT]") + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} - # Add distractor comment-code pair to new dataset - new_dataset = new_dataset.add_item(new_item) - output[split] = new_dataset + new_data.append(new_item) + + # Convert list back to HuggingFace dataset + output[split] = datasets.Dataset.from_dict({k: [dic[k] for dic in new_data] for k in new_data[0]}) else: output[split] = dataset return output diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py index bed70c8..fa79e43 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py @@ -1,12 +1,32 @@ import random from typing import Dict, List - import datasets import numpy as np -from more_itertools import chunked - from genbench import Task +def chunked(iterable, chunk_size): + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] + + if chunk: + yield chunk class NlCodesearchMrrCodesearchnetJava(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -30,24 +50,28 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: # Create 49 distractors for each item for split, dataset in raw_datasets.items(): if split == "test": - new_dataset = datasets.Dataset.from_dict({}) - for item in dataset: - # Add comment-code pair to new dataset - new_dataset = new_dataset.add_item(item) - other_items = [other_item for other_item in dataset if other_item != item] - # Randomly select 49 other items + # Convert dataset to list for easier manipulation + dataset_list = list(dataset) + + new_data = [] + + for idx, item in enumerate(dataset_list): + new_data.append(item) + + # Create other_items list once and then simply exclude the current item during sampling + other_items = dataset_list[:idx] + dataset_list[idx+1:] random_items = random.sample(other_items, n_distractors) - # Split input into comment and code - input_parts = item["input"].split("[SEP]") + + input_parts = item["input"].split("[CODESPLIT]") + for random_item in random_items: - # Split random input into comment and code - random_input_parts = random_item["input"].split("[SEP]") - # Combine the "input" fields of the original and random items - new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + random_input_parts = random_item["input"].split("[CODESPLIT]") + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} - # Add distractor comment-code pair to new dataset - new_dataset = new_dataset.add_item(new_item) - output[split] = new_dataset + new_data.append(new_item) + + # Convert list back to HuggingFace dataset + output[split] = datasets.Dataset.from_dict({k: [dic[k] for dic in new_data] for k in new_data[0]}) else: output[split] = dataset return output diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py index ffdb4ff..abbac4e 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py @@ -1,12 +1,32 @@ import random from typing import Dict, List - import datasets import numpy as np -from more_itertools import chunked - from genbench import Task +def chunked(iterable, chunk_size): + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] + + if chunk: + yield chunk class NlCodesearchMrrCodesearchnetJavascript(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -30,24 +50,28 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: # Create 49 distractors for each item for split, dataset in raw_datasets.items(): if split == "test": - new_dataset = datasets.Dataset.from_dict({}) - for item in dataset: - # Add comment-code pair to new dataset - new_dataset = new_dataset.add_item(item) - other_items = [other_item for other_item in dataset if other_item != item] - # Randomly select 49 other items + # Convert dataset to list for easier manipulation + dataset_list = list(dataset) + + new_data = [] + + for idx, item in enumerate(dataset_list): + new_data.append(item) + + # Create other_items list once and then simply exclude the current item during sampling + other_items = dataset_list[:idx] + dataset_list[idx+1:] random_items = random.sample(other_items, n_distractors) - # Split input into comment and code - input_parts = item["input"].split("[SEP]") + + input_parts = item["input"].split("[CODESPLIT]") + for random_item in random_items: - # Split random input into comment and code - random_input_parts = random_item["input"].split("[SEP]") - # Combine the "input" fields of the original and random items - new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + random_input_parts = random_item["input"].split("[CODESPLIT]") + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} - # Add distractor comment-code pair to new dataset - new_dataset = new_dataset.add_item(new_item) - output[split] = new_dataset + new_data.append(new_item) + + # Convert list back to HuggingFace dataset + output[split] = datasets.Dataset.from_dict({k: [dic[k] for dic in new_data] for k in new_data[0]}) else: output[split] = dataset return output diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py index 3790767..e8fd995 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py @@ -1,12 +1,35 @@ import random from typing import Dict, List +import random +from typing import Dict, List import datasets import numpy as np -from more_itertools import chunked - from genbench import Task +def chunked(iterable, chunk_size): + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] + + if chunk: + yield chunk class NlCodesearchMrrCodesearchnetPhp(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -30,24 +53,28 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: # Create 49 distractors for each item for split, dataset in raw_datasets.items(): if split == "test": - new_dataset = datasets.Dataset.from_dict({}) - for item in dataset: - # Add comment-code pair to new dataset - new_dataset = new_dataset.add_item(item) - other_items = [other_item for other_item in dataset if other_item != item] - # Randomly select 49 other items + # Convert dataset to list for easier manipulation + dataset_list = list(dataset) + + new_data = [] + + for idx, item in enumerate(dataset_list): + new_data.append(item) + + # Create other_items list once and then simply exclude the current item during sampling + other_items = dataset_list[:idx] + dataset_list[idx+1:] random_items = random.sample(other_items, n_distractors) - # Split input into comment and code - input_parts = item["input"].split("[SEP]") + + input_parts = item["input"].split("[CODESPLIT]") + for random_item in random_items: - # Split random input into comment and code - random_input_parts = random_item["input"].split("[SEP]") - # Combine the "input" fields of the original and random items - new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + random_input_parts = random_item["input"].split("[CODESPLIT]") + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} - # Add distractor comment-code pair to new dataset - new_dataset = new_dataset.add_item(new_item) - output[split] = new_dataset + new_data.append(new_item) + + # Convert list back to HuggingFace dataset + output[split] = datasets.Dataset.from_dict({k: [dic[k] for dic in new_data] for k in new_data[0]}) else: output[split] = dataset return output diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py index fe7761c..33e71c3 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py @@ -1,12 +1,32 @@ import random from typing import Dict, List - import datasets import numpy as np -from more_itertools import chunked - from genbench import Task +def chunked(iterable, chunk_size): + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] + + if chunk: + yield chunk class NlCodesearchMrrCodesearchnetRuby(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -30,24 +50,28 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: # Create 49 distractors for each item for split, dataset in raw_datasets.items(): if split == "test": - new_dataset = datasets.Dataset.from_dict({}) - for item in dataset: - # Add comment-code pair to new dataset - new_dataset = new_dataset.add_item(item) - other_items = [other_item for other_item in dataset if other_item != item] - # Randomly select 49 other items + # Convert dataset to list for easier manipulation + dataset_list = list(dataset) + + new_data = [] + + for idx, item in enumerate(dataset_list): + new_data.append(item) + + # Create other_items list once and then simply exclude the current item during sampling + other_items = dataset_list[:idx] + dataset_list[idx+1:] random_items = random.sample(other_items, n_distractors) - # Split input into comment and code - input_parts = item["input"].split("[SEP]") + + input_parts = item["input"].split("[CODESPLIT]") + for random_item in random_items: - # Split random input into comment and code - random_input_parts = random_item["input"].split("[SEP]") - # Combine the "input" fields of the original and random items - new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + random_input_parts = random_item["input"].split("[CODESPLIT]") + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} - # Add distractor comment-code pair to new dataset - new_dataset = new_dataset.add_item(new_item) - output[split] = new_dataset + new_data.append(new_item) + + # Convert list back to HuggingFace dataset + output[split] = datasets.Dataset.from_dict({k: [dic[k] for dic in new_data] for k in new_data[0]}) else: output[split] = dataset return output diff --git a/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py b/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py index 37c5307..03b238c 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py @@ -1,12 +1,32 @@ import random from typing import Dict, List - import datasets import numpy as np -from more_itertools import chunked - from genbench import Task +def chunked(iterable, chunk_size): + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] + + if chunk: + yield chunk class NlCodesearchMrrCosqa(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -27,27 +47,31 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: output: Dict[str, datasets.Dataset] = {} # Set random seed for consistency random.seed(42) - # Create 49 distractors for each item + # Create distractors for each item for split, dataset in raw_datasets.items(): if split == "test": - new_dataset = datasets.Dataset.from_dict({}) - for item in dataset: - # Add comment-code pair to new dataset - new_dataset = new_dataset.add_item(item) - other_items = [other_item for other_item in dataset if other_item != item] - # Randomly select 49 other items + # Convert dataset to list for easier manipulation + dataset_list = list(dataset) + + new_data = [] + + for idx, item in enumerate(dataset_list): + new_data.append(item) + + # Create other_items list once and then simply exclude the current item during sampling + other_items = dataset_list[:idx] + dataset_list[idx+1:] random_items = random.sample(other_items, n_distractors) - # Split input into comment and code - input_parts = item["input"].split("[SEP]") + + input_parts = item["input"].split("[CODESPLIT]") + for random_item in random_items: - # Split random input into comment and code - random_input_parts = random_item["input"].split("[SEP]") - # Combine the "input" fields of the original and random items - new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + random_input_parts = random_item["input"].split("[CODESPLIT]") + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} - # Add distractor comment-code pair to new dataset - new_dataset = new_dataset.add_item(new_item) - output[split] = new_dataset + new_data.append(new_item) + + # Convert list back to HuggingFace dataset + output[split] = datasets.Dataset.from_dict({k: [dic[k] for dic in new_data] for k in new_data[0]}) else: output[split] = dataset return output diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py index f3ffd6b..7e09704 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py @@ -1,15 +1,35 @@ import random from typing import Dict, List - import datasets import numpy as np -from more_itertools import chunked - from genbench import Task +def chunked(iterable, chunk_size): + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] + + if chunk: + yield chunk -class NlCodesearchClfStatcodesearch(Task): - def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: +class NlCodesearchMrrStatcodesearch(Task): + def get_dataset_raw(self,n_distractors) -> Dict[str, datasets.Dataset]: """Create the dataset adding n distractor pair (original comment, random code snippet) for ranking. Args: @@ -22,36 +42,40 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. The train split only contains the original dataset. """ - # Load the raw datasets raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() output: Dict[str, datasets.Dataset] = {} - # Set random seed for consistency random.seed(42) - # Create 49 distractors for each item + for split, dataset in raw_datasets.items(): if split == "test": - new_dataset = datasets.Dataset.from_dict({}) - for item in dataset: - # Add comment-code pair to new dataset - new_dataset = new_dataset.add_item(item) - other_items = [other_item for other_item in dataset if other_item != item] - # Randomly select 49 other items + # Convert dataset to list for easier manipulation + dataset_list = list(dataset) + + new_data = [] + + for idx, item in enumerate(dataset_list): + new_data.append(item) + + # Create other_items list once and then simply exclude the current item during sampling + other_items = dataset_list[:idx] + dataset_list[idx+1:] random_items = random.sample(other_items, n_distractors) - # Split input into comment and code - input_parts = item["input"].split("[SEP]") + + input_parts = item["input"].split("[CODESPLIT]") + for random_item in random_items: - # Split random input into comment and code - random_input_parts = random_item["input"].split("[SEP]") - # Combine the "input" fields of the original and random items - new_input = input_parts[0] + "[SEP]" + random_input_parts[1] + random_input_parts = random_item["input"].split("[CODESPLIT]") + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} - # Add distractor comment-code pair to new dataset - new_dataset = new_dataset.add_item(new_item) - output[split] = new_dataset + new_data.append(new_item) + + # Convert list back to HuggingFace dataset + output[split] = datasets.Dataset.from_dict({k: [dic[k] for dic in new_data] for k in new_data[0]}) else: output[split] = dataset + return output + def evaluate_predictions( self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors ) -> Dict[str, float]: diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/test_mrr_task.py b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/test_mrr_task.py new file mode 100644 index 0000000..90c0cb4 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/test_mrr_task.py @@ -0,0 +1,36 @@ +import dataclass_factory +from task import NlCodesearchMrrStatcodesearch + +from genbench.task_config import TaskConfig +from genbench.utils.file import load_jsonnet + + +def main(): + high_mrr_test_list = [] + for i in range(1, 11): + score_dict = dict.fromkeys(["score"]) + score_dict["score"] = 1 / i + high_mrr_test_list.append(score_dict) + + low_mrr_test_list = [] + for i in range(1, 11): + score_dict = dict.fromkeys(["score"]) + score_dict["score"] = 1 * i + low_mrr_test_list.append(score_dict) + + cfg_file = load_jsonnet("./config.jsonnet") + factory = dataclass_factory.Factory() + config: TaskConfig = factory.load(cfg_file, TaskConfig) + + task = NlCodesearchMrrStatcodesearch(config, "nl_codesearch_mrr") + output_ds = task.get_dataset_raw(9) + + high_results = task.evaluate_predictions(high_mrr_test_list, output_ds, 9) + print(high_results) + + low_results = task.evaluate_predictions(low_mrr_test_list, output_ds, 9) + print(low_results) + + +if __name__ == "__main__": + main() From b0677b08b078282d5c79a5b4f72a574387de56d3 Mon Sep 17 00:00:00 2001 From: drndr Date: Mon, 4 Sep 2023 18:49:37 +0200 Subject: [PATCH 43/57] fix style issues --- .../codesearchnet_adv/task.py | 47 +++++++++--------- .../codesearchnet_go/task.py | 46 +++++++++-------- .../codesearchnet_java/task.py | 46 +++++++++-------- .../codesearchnet_javascript/task.py | 46 +++++++++-------- .../codesearchnet_php/task.py | 47 +++++++++--------- .../codesearchnet_ruby/task.py | 46 +++++++++-------- .../tasks/nl_codesearch_mrr/cosqa/task.py | 46 +++++++++-------- .../nl_codesearch_mrr/statcodesearch/task.py | 49 ++++++++++--------- 8 files changed, 200 insertions(+), 173 deletions(-) diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py index 63b3ac3..269be7d 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py @@ -1,33 +1,36 @@ import random from typing import Dict, List + import datasets import numpy as np + from genbench import Task def chunked(iterable, chunk_size): - """ - Split an iterable into chunks of a specified size. + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] - Args: - iterable: The iterable to be chunked. - chunk_size: The size of each chunk. + if chunk: + yield chunk - Returns: - A generator that yields chunks of the iterable. - """ - if chunk_size <= 0: - raise ValueError("Chunk size must be greater than zero") - - chunk = [] - for item in iterable: - chunk.append(item) - if len(chunk) == chunk_size: - yield chunk - chunk = [] - - if chunk: - yield chunk class NlCodesearchMrrCodesearchnetAdv(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -60,7 +63,7 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: new_data.append(item) # Create other_items list once and then simply exclude the current item during sampling - other_items = dataset_list[:idx] + dataset_list[idx+1:] + other_items = dataset_list[:idx] + dataset_list[idx + 1 :] random_items = random.sample(other_items, n_distractors) input_parts = item["input"].split("[CODESPLIT]") @@ -95,7 +98,7 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: else: output[split] = dataset return output - + def evaluate_predictions( self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors ) -> Dict[str, float]: diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py index c4804be..84eff2b 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py @@ -1,32 +1,36 @@ import random from typing import Dict, List + import datasets import numpy as np + from genbench import Task + def chunked(iterable, chunk_size): - """ - Split an iterable into chunks of a specified size. + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] - Args: - iterable: The iterable to be chunked. - chunk_size: The size of each chunk. + if chunk: + yield chunk - Returns: - A generator that yields chunks of the iterable. - """ - if chunk_size <= 0: - raise ValueError("Chunk size must be greater than zero") - - chunk = [] - for item in iterable: - chunk.append(item) - if len(chunk) == chunk_size: - yield chunk - chunk = [] - - if chunk: - yield chunk class NlCodesearchMrrCodesearchnetGo(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -59,7 +63,7 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: new_data.append(item) # Create other_items list once and then simply exclude the current item during sampling - other_items = dataset_list[:idx] + dataset_list[idx+1:] + other_items = dataset_list[:idx] + dataset_list[idx + 1 :] random_items = random.sample(other_items, n_distractors) input_parts = item["input"].split("[CODESPLIT]") diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py index fa79e43..7f755ff 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py @@ -1,32 +1,36 @@ import random from typing import Dict, List + import datasets import numpy as np + from genbench import Task + def chunked(iterable, chunk_size): - """ - Split an iterable into chunks of a specified size. + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] - Args: - iterable: The iterable to be chunked. - chunk_size: The size of each chunk. + if chunk: + yield chunk - Returns: - A generator that yields chunks of the iterable. - """ - if chunk_size <= 0: - raise ValueError("Chunk size must be greater than zero") - - chunk = [] - for item in iterable: - chunk.append(item) - if len(chunk) == chunk_size: - yield chunk - chunk = [] - - if chunk: - yield chunk class NlCodesearchMrrCodesearchnetJava(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -59,7 +63,7 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: new_data.append(item) # Create other_items list once and then simply exclude the current item during sampling - other_items = dataset_list[:idx] + dataset_list[idx+1:] + other_items = dataset_list[:idx] + dataset_list[idx + 1 :] random_items = random.sample(other_items, n_distractors) input_parts = item["input"].split("[CODESPLIT]") diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py index abbac4e..d88eea5 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py @@ -1,32 +1,36 @@ import random from typing import Dict, List + import datasets import numpy as np + from genbench import Task + def chunked(iterable, chunk_size): - """ - Split an iterable into chunks of a specified size. + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] - Args: - iterable: The iterable to be chunked. - chunk_size: The size of each chunk. + if chunk: + yield chunk - Returns: - A generator that yields chunks of the iterable. - """ - if chunk_size <= 0: - raise ValueError("Chunk size must be greater than zero") - - chunk = [] - for item in iterable: - chunk.append(item) - if len(chunk) == chunk_size: - yield chunk - chunk = [] - - if chunk: - yield chunk class NlCodesearchMrrCodesearchnetJavascript(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -59,7 +63,7 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: new_data.append(item) # Create other_items list once and then simply exclude the current item during sampling - other_items = dataset_list[:idx] + dataset_list[idx+1:] + other_items = dataset_list[:idx] + dataset_list[idx + 1 :] random_items = random.sample(other_items, n_distractors) input_parts = item["input"].split("[CODESPLIT]") diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py index e8fd995..78af97f 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py @@ -1,35 +1,36 @@ import random from typing import Dict, List -import random -from typing import Dict, List import datasets import numpy as np + from genbench import Task + def chunked(iterable, chunk_size): - """ - Split an iterable into chunks of a specified size. + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] - Args: - iterable: The iterable to be chunked. - chunk_size: The size of each chunk. + if chunk: + yield chunk - Returns: - A generator that yields chunks of the iterable. - """ - if chunk_size <= 0: - raise ValueError("Chunk size must be greater than zero") - - chunk = [] - for item in iterable: - chunk.append(item) - if len(chunk) == chunk_size: - yield chunk - chunk = [] - - if chunk: - yield chunk class NlCodesearchMrrCodesearchnetPhp(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -62,7 +63,7 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: new_data.append(item) # Create other_items list once and then simply exclude the current item during sampling - other_items = dataset_list[:idx] + dataset_list[idx+1:] + other_items = dataset_list[:idx] + dataset_list[idx + 1 :] random_items = random.sample(other_items, n_distractors) input_parts = item["input"].split("[CODESPLIT]") diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py index 33e71c3..687ce20 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py @@ -1,32 +1,36 @@ import random from typing import Dict, List + import datasets import numpy as np + from genbench import Task + def chunked(iterable, chunk_size): - """ - Split an iterable into chunks of a specified size. + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] - Args: - iterable: The iterable to be chunked. - chunk_size: The size of each chunk. + if chunk: + yield chunk - Returns: - A generator that yields chunks of the iterable. - """ - if chunk_size <= 0: - raise ValueError("Chunk size must be greater than zero") - - chunk = [] - for item in iterable: - chunk.append(item) - if len(chunk) == chunk_size: - yield chunk - chunk = [] - - if chunk: - yield chunk class NlCodesearchMrrCodesearchnetRuby(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -59,7 +63,7 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: new_data.append(item) # Create other_items list once and then simply exclude the current item during sampling - other_items = dataset_list[:idx] + dataset_list[idx+1:] + other_items = dataset_list[:idx] + dataset_list[idx + 1 :] random_items = random.sample(other_items, n_distractors) input_parts = item["input"].split("[CODESPLIT]") diff --git a/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py b/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py index 03b238c..f86b0a6 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py @@ -1,32 +1,36 @@ import random from typing import Dict, List + import datasets import numpy as np + from genbench import Task + def chunked(iterable, chunk_size): - """ - Split an iterable into chunks of a specified size. + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] - Args: - iterable: The iterable to be chunked. - chunk_size: The size of each chunk. + if chunk: + yield chunk - Returns: - A generator that yields chunks of the iterable. - """ - if chunk_size <= 0: - raise ValueError("Chunk size must be greater than zero") - - chunk = [] - for item in iterable: - chunk.append(item) - if len(chunk) == chunk_size: - yield chunk - chunk = [] - - if chunk: - yield chunk class NlCodesearchMrrCosqa(Task): def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: @@ -59,7 +63,7 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: new_data.append(item) # Create other_items list once and then simply exclude the current item during sampling - other_items = dataset_list[:idx] + dataset_list[idx+1:] + other_items = dataset_list[:idx] + dataset_list[idx + 1 :] random_items = random.sample(other_items, n_distractors) input_parts = item["input"].split("[CODESPLIT]") diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py index 7e09704..bb6180e 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py @@ -1,35 +1,39 @@ import random from typing import Dict, List + import datasets import numpy as np + from genbench import Task + def chunked(iterable, chunk_size): - """ - Split an iterable into chunks of a specified size. + """ + Split an iterable into chunks of a specified size. + + Args: + iterable: The iterable to be chunked. + chunk_size: The size of each chunk. + + Returns: + A generator that yields chunks of the iterable. + """ + if chunk_size <= 0: + raise ValueError("Chunk size must be greater than zero") + + chunk = [] + for item in iterable: + chunk.append(item) + if len(chunk) == chunk_size: + yield chunk + chunk = [] - Args: - iterable: The iterable to be chunked. - chunk_size: The size of each chunk. + if chunk: + yield chunk - Returns: - A generator that yields chunks of the iterable. - """ - if chunk_size <= 0: - raise ValueError("Chunk size must be greater than zero") - - chunk = [] - for item in iterable: - chunk.append(item) - if len(chunk) == chunk_size: - yield chunk - chunk = [] - - if chunk: - yield chunk class NlCodesearchMrrStatcodesearch(Task): - def get_dataset_raw(self,n_distractors) -> Dict[str, datasets.Dataset]: + def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: """Create the dataset adding n distractor pair (original comment, random code snippet) for ranking. Args: @@ -57,7 +61,7 @@ def get_dataset_raw(self,n_distractors) -> Dict[str, datasets.Dataset]: new_data.append(item) # Create other_items list once and then simply exclude the current item during sampling - other_items = dataset_list[:idx] + dataset_list[idx+1:] + other_items = dataset_list[:idx] + dataset_list[idx + 1 :] random_items = random.sample(other_items, n_distractors) input_parts = item["input"].split("[CODESPLIT]") @@ -75,7 +79,6 @@ def get_dataset_raw(self,n_distractors) -> Dict[str, datasets.Dataset]: return output - def evaluate_predictions( self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors ) -> Dict[str, float]: From e6e0e3511c69091faa5375091ae4c71ab2e9ec5f Mon Sep 17 00:00:00 2001 From: drndr Date: Mon, 6 Nov 2023 14:30:38 +0100 Subject: [PATCH 44/57] add example usage --- .../requirements-usage-example.txt | 4 + .../tasks/nl_codesearch_mrr/usage_example.py | 416 ++++++++++++++++++ 2 files changed, 420 insertions(+) create mode 100644 src/genbench/tasks/nl_codesearch_mrr/requirements-usage-example.txt create mode 100644 src/genbench/tasks/nl_codesearch_mrr/usage_example.py diff --git a/src/genbench/tasks/nl_codesearch_mrr/requirements-usage-example.txt b/src/genbench/tasks/nl_codesearch_mrr/requirements-usage-example.txt new file mode 100644 index 0000000..ffb4c93 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/requirements-usage-example.txt @@ -0,0 +1,4 @@ +torch v. 2.1.0 +numpy v. 1.25.1 +tqdm v. 4.65.0 +transformers v. 4.32.0 \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_mrr/usage_example.py b/src/genbench/tasks/nl_codesearch_mrr/usage_example.py new file mode 100644 index 0000000..db5b768 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/usage_example.py @@ -0,0 +1,416 @@ +import argparse +import json +import logging +import random +import numpy as np +from tqdm import tqdm +from torch.utils.data import DataLoader + +import torch +from transformers import get_scheduler, AutoTokenizer, AutoModelForSequenceClassification, PreTrainedModel +from torch.optim import AdamW + + +########################################################## +# Data Loadig Utils +########################################################## +class Dataset(torch.utils.data.Dataset): + def __init__(self, features): + self.features = features + + def __getitem__(self, index): + return self.features[index] + + def __len__(self): + return len(self.features) + + +def _truncate_seq_pair(tokens_a, tokens_b, max_length): + """Truncates a sequence pair in place to the maximum length.""" + + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + + +def _convert_examples_to_features(comments, codes, labels, max_seq_length, + tokenizer, + cls_token='[CLS]', sep_token='[SEP]', pad_token=0, + eos_token='', + sequence_a_segment_id=0, sequence_b_segment_id=1, + cls_token_segment_id=1, pad_token_segment_id=0, + mask_padding_with_zero=True): + features = [] + for ex_index, (comment, code, label) in enumerate(zip(comments, codes, labels)): + # As was done in CodeBERT + tokens_comment = tokenizer.tokenize(comment)[:50] + tokens_code = tokenizer.tokenize(code) + + # update max_seq_length to account for [CLS], [SEP], [SEP] tokens (-3) + n_special_tokens = 3 + if cls_token is None: + n_special_tokens -= 1 + s_max_seq_length = max_seq_length - n_special_tokens + _truncate_seq_pair(tokens_comment, tokens_code, s_max_seq_length) + + # change sep for eos if no sep_token + if sep_token is None: + sep_token = eos_token + + # [SEP] inbetween and at the end + tokens = tokens_comment + [sep_token] + tokens_code + [sep_token] + # CLS at the beginning + if cls_token is not None: + tokens = [cls_token] + tokens + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # 1 for tokens, 0 for padding + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # padding with 0 up to max_seq_length + padding_length = max_seq_length - len(input_ids) + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + + # check + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + + # convert to tensors + input_ids = torch.tensor(input_ids, dtype=torch.long) + input_mask = torch.tensor(input_mask, dtype=torch.long) + label = torch.tensor(label, dtype=torch.long) + + features.append({ + "input_ids": input_ids, + "attention_mask": input_mask, + "labels": label + }) + return features + + +def load_data(tokenizer, batch_size, seq_len, train_file): + # create dataset + comments = [] + codes = [] + labels = [] + skipped = 0 + + is_sep_token_set = tokenizer.sep_token is not None + is_cls_token_set = tokenizer.cls_token is not None + is_pad_token_set = tokenizer.pad_token is not None + is_eos_token_set = tokenizer.eos_token is not None + + with open(train_file, 'r', encoding='utf-8') as infile: + for line in infile: + try: + item = json.loads(line.strip()) + input = item['input'] + # split at [CODESPLIT] token + input = input.split('[CODESPLIT]') + if len(input) != 2: + # skip cases with more than one [SEP] token + logging.warning(f"Input contains more than one [CODESPLIT] token: {input}") + skipped += 1 + continue + # skip every sample that contains special tokens + if is_sep_token_set and ( + tokenizer.sep_token in input[0] or tokenizer.sep_token in input[1]): + logging.warning(f"Input contains special tokens: {input}") + skipped += 1 + continue + if is_cls_token_set and ( + tokenizer.cls_token in input[0] or tokenizer.cls_token in input[1]): + logging.warning(f"Input contains special tokens: {input}") + skipped += 1 + continue + if is_pad_token_set and ( + tokenizer.pad_token in input[0] or tokenizer.pad_token in input[1]): + logging.warning(f"Input contains special tokens: {input}") + skipped += 1 + continue + if is_eos_token_set and ( + tokenizer.eos_token in input[0] or tokenizer.eos_token in input[1]): + logging.warning(f"Input contains special tokens: {input}") + skipped += 1 + continue + comments.append(input[0]) + codes.append(input[1]) + labels.append(item['target']) + except json.JSONDecodeError as e: + print(f"Error: JSON decoding failed - {e}") + continue + logging.info(f"Skipped {skipped} samples due to special tokens") + # tokenize + features = _convert_examples_to_features( + comments, + codes, + labels, + max_seq_length=seq_len, + tokenizer=tokenizer, + cls_token=tokenizer.cls_token, + sep_token=tokenizer.sep_token, + cls_token_segment_id=tokenizer.cls_token_id, + pad_token_segment_id=tokenizer.pad_token_id, + eos_token=tokenizer.eos_token, + ) + + # Convert to Dataset + features = Dataset(features) + + return DataLoader(features, batch_size=batch_size, shuffle=True) + + +############################################################## +# Fine-tune Model +############################################################## + +def train(model: PreTrainedModel, dataloader: DataLoader, args: argparse.Namespace): + """ + Fine-tune the model. + :param model: the pretrained model to be fine-tuned + :param dataloader: an iterable data loader + :param args: training arguments (and also some other arguments) + :return: the fine-tuned model + """ + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model.to(device) + model.train() + + num_training_steps = args.epochs * len(dataloader) + progress_bar = tqdm(range(num_training_steps)) + + optimizer = AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay) + lr_scheduler = get_scheduler( + name="linear", + optimizer=optimizer, + num_warmup_steps=args.num_warmup_steps, + num_training_steps=num_training_steps + ) + + for epoch in range(args.epochs): + for batch in dataloader: + batch = {k: v.to(device) for k, v in batch.items()} + outputs = model(**batch) + loss = outputs.loss + loss.backward() + + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + progress_bar.update(1) + +########################################################### +# Evaluate Model +########################################################### + +def load_data_for_mrr(tokenizer, file): + # create dataset + comments = [] + codes = [] + labels = [] + skipped = 0 + + is_sep_token_set = tokenizer.sep_token is not None + is_cls_token_set = tokenizer.cls_token is not None + is_pad_token_set = tokenizer.pad_token is not None + is_eos_token_set = tokenizer.eos_token is not None + + with open(file, 'r', encoding='utf-8') as infile: + for line in infile: + try: + item = json.loads(line.strip()) + input = item['input'] + # split at [CODESPLIT] token + input = input.split('[CODESPLIT]') + if len(input) != 2: + # skip cases with more than one [SEP] token + logging.warning(f"Input contains more than one [CODESPLIT] token: {input}") + skipped += 1 + continue + # skip every sample that contains special tokens + if is_sep_token_set and ( + tokenizer.sep_token in input[0] or tokenizer.sep_token in input[1]): + logging.warning(f"Input contains special tokens: {input}") + skipped += 1 + continue + if is_cls_token_set and ( + tokenizer.cls_token in input[0] or tokenizer.cls_token in input[1]): + logging.warning(f"Input contains special tokens: {input}") + skipped += 1 + continue + if is_pad_token_set and ( + tokenizer.pad_token in input[0] or tokenizer.pad_token in input[1]): + logging.warning(f"Input contains special tokens: {input}") + skipped += 1 + continue + if is_eos_token_set and ( + tokenizer.eos_token in input[0] or tokenizer.eos_token in input[1]): + logging.warning(f"Input contains special tokens: {input}") + skipped += 1 + continue + comments.append(input[0]) + codes.append(input[1]) + labels.append(item['target']) + except json.JSONDecodeError as e: + print(f"Error: JSON decoding failed - {e}") + continue + logging.info(f"Skipped {skipped} samples due to special tokens") + + return comments, codes + +def mrr(model, tokenizer, file, args): + random.seed(42) + + # load data + comments, codes = load_data_for_mrr(tokenizer, file) + + # create mrr chunks with (default 99) distractors + + chunks = [] + for i, sample in enumerate(zip(comments, codes)): + comment, code = sample + codes_without_sample = codes[:i] + codes[i + 1:] + # select 99 random codes + distractors = random.sample(codes_without_sample, args.distractors) + # create samples + codes = [code] + distractors + comments = [comment] * len(codes) + labels = [1] + [0] * len(distractors) + # convert to features + features = _convert_examples_to_features(comments, codes, labels, + tokenizer=tokenizer, + max_seq_length=args.seq_len, + cls_token=tokenizer.cls_token, + sep_token=tokenizer.sep_token, + cls_token_segment_id=tokenizer.cls_token_id, + pad_token_segment_id=tokenizer.pad_token_id, + eos_token=tokenizer.eos_token) + + chunks.append(features) + + # make predictions for all chunks + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print('Using device:', device) + model.to(device) + model.eval() + + ranks = [] + for chunk in tqdm(chunks): + # calc correct sample (always the first one) + correct = chunk[0] + input_ids = correct['input_ids'].unsqueeze(0).to(device) + attention_mask = correct['attention_mask'].unsqueeze(0).to(device) + labels = correct['labels'].unsqueeze(0).to(device) + with torch.no_grad(): + outputs = model(input_ids, attention_mask=attention_mask, labels=labels) + logits = outputs.logits + correct_score = logits[0][0].item() + + # calc scores for the rest of the samples + scores = [] + # add correct score to scores + scores.append(correct_score) + # create batches of size args.batch_size + batch_size = args.batch_size + for i in range(1, len(chunk), batch_size): + batch = chunk[i:i + batch_size] + input_ids = torch.stack([sample['input_ids'] for sample in batch]).to(device) + attention_mask = torch.stack([sample['attention_mask'] for sample in batch]).to(device) + labels = torch.stack([sample['labels'] for sample in batch]).to(device) + with torch.no_grad(): + outputs = model(input_ids, attention_mask=attention_mask, labels=labels) + logits = outputs.logits + scores.extend(logits[:, 1].cpu().numpy().tolist()) + + rank = np.sum(np.array(scores) >= correct_score) + ranks.append(rank) + + mean_mrr = np.mean(1.0 / np.array(ranks)) + + return mean_mrr + +############################################################## +# Run example +############################################################## + +def main(): + """Main function.""" + # args + parser = argparse.ArgumentParser() + #parser.add_argument('--dataset', type=str, default='./codesearchnet_adv') + parser.add_argument('--model', default='roberta-base') + parser.add_argument('--epochs', type=int, default=5) + parser.add_argument('--batch_size', type=int, default=32) + parser.add_argument('--learning_rate', type=float, default=2e-5) + parser.add_argument('--weight_decay', type=float, default=0.01) + parser.add_argument('--num_warmup_steps', type=int, default=0) + parser.add_argument('--output_dir', type=str, default='models') + parser.add_argument('--seq_len', type=int, default=512, help='maximum sequence length') + parser.add_argument('--distractors', type=int, default=99, help='number of distractors per true pair') + parser.add_argument('--log_level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='INFO') + + args = parser.parse_args() + + TRAIN_FILE = './codesearchnet_adv/train_adv_clf.jsonl' + + # logging + logging.basicConfig(level=args.log_level) + + # load tokenizer + logging.info('Loading model...') + tokenizer = AutoTokenizer.from_pretrained(args.model) + + # load data + logging.info('Loading data...') + dataloader = load_data(tokenizer, args.batch_size, args.seq_len, TRAIN_FILE) + + model = AutoModelForSequenceClassification.from_pretrained(args.model) + + # train + logging.info('Training...') + train(model, dataloader, args) + + # save model + logging.info('Saving model...') + model.save_pretrained(f'{args.output_dir}/{args.model}') + # also soave tokenizer + tokenizer.save_pretrained(f'{args.output_dir}/{args.model}') + + DS_FOLDER = './' + + FILES = [ + ['statcodesearch', 'test_statcodesearch'], + ['codesearchnet_adv', 'test_adv'], + ['codesearchnet_go', 'test_go'], + ['codesearchnet_java', 'test_java'], + ['codesearchnet_javascript', 'test_javascript'], + ['codesearchnet_php', 'test_php'], + ['codesearchnet_ruby', 'test_ruby'], + ['cosqa', 'test_cosqa'] + ] + + results = {} + for meta_data in FILES: + logging.info(f'Evaluating on {meta_data}...') + metrics = mrr(model, tokenizer, f'{DS_FOLDER}/mrr/{meta_data[0]}/{meta_data[1]}_mrr.jsonl', args) + results[meta_data[0]] = metrics + logging.info(f'Test results for {meta_data}: {metrics}') + + logging.info(f'Test results: {results}') + +if __name__ == '__main__': + main() + + + + + + From 407cb5674ee821795dd65a2df9e244686b64fca3 Mon Sep 17 00:00:00 2001 From: drndr Date: Mon, 6 Nov 2023 14:50:17 +0100 Subject: [PATCH 45/57] fix style in usage --- .../tasks/nl_codesearch_mrr/usage_example.py | 196 +++++++++--------- 1 file changed, 99 insertions(+), 97 deletions(-) diff --git a/src/genbench/tasks/nl_codesearch_mrr/usage_example.py b/src/genbench/tasks/nl_codesearch_mrr/usage_example.py index db5b768..7b108b1 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/usage_example.py +++ b/src/genbench/tasks/nl_codesearch_mrr/usage_example.py @@ -2,13 +2,13 @@ import json import logging import random -import numpy as np -from tqdm import tqdm -from torch.utils.data import DataLoader +import numpy as np import torch -from transformers import get_scheduler, AutoTokenizer, AutoModelForSequenceClassification, PreTrainedModel from torch.optim import AdamW +from torch.utils.data import DataLoader +from tqdm import tqdm +from transformers import AutoModelForSequenceClassification, AutoTokenizer, PreTrainedModel, get_scheduler ########################################################## @@ -38,13 +38,22 @@ def _truncate_seq_pair(tokens_a, tokens_b, max_length): tokens_b.pop() -def _convert_examples_to_features(comments, codes, labels, max_seq_length, - tokenizer, - cls_token='[CLS]', sep_token='[SEP]', pad_token=0, - eos_token='', - sequence_a_segment_id=0, sequence_b_segment_id=1, - cls_token_segment_id=1, pad_token_segment_id=0, - mask_padding_with_zero=True): +def _convert_examples_to_features( + comments, + codes, + labels, + max_seq_length, + tokenizer, + cls_token="[CLS]", + sep_token="[SEP]", + pad_token=0, + eos_token="", + sequence_a_segment_id=0, + sequence_b_segment_id=1, + cls_token_segment_id=1, + pad_token_segment_id=0, + mask_padding_with_zero=True, +): features = [] for ex_index, (comment, code, label) in enumerate(zip(comments, codes, labels)): # As was done in CodeBERT @@ -87,11 +96,7 @@ def _convert_examples_to_features(comments, codes, labels, max_seq_length, input_mask = torch.tensor(input_mask, dtype=torch.long) label = torch.tensor(label, dtype=torch.long) - features.append({ - "input_ids": input_ids, - "attention_mask": input_mask, - "labels": label - }) + features.append({"input_ids": input_ids, "attention_mask": input_mask, "labels": label}) return features @@ -107,42 +112,38 @@ def load_data(tokenizer, batch_size, seq_len, train_file): is_pad_token_set = tokenizer.pad_token is not None is_eos_token_set = tokenizer.eos_token is not None - with open(train_file, 'r', encoding='utf-8') as infile: + with open(train_file, "r", encoding="utf-8") as infile: for line in infile: try: item = json.loads(line.strip()) - input = item['input'] + input = item["input"] # split at [CODESPLIT] token - input = input.split('[CODESPLIT]') + input = input.split("[CODESPLIT]") if len(input) != 2: # skip cases with more than one [SEP] token logging.warning(f"Input contains more than one [CODESPLIT] token: {input}") skipped += 1 continue # skip every sample that contains special tokens - if is_sep_token_set and ( - tokenizer.sep_token in input[0] or tokenizer.sep_token in input[1]): + if is_sep_token_set and (tokenizer.sep_token in input[0] or tokenizer.sep_token in input[1]): logging.warning(f"Input contains special tokens: {input}") skipped += 1 continue - if is_cls_token_set and ( - tokenizer.cls_token in input[0] or tokenizer.cls_token in input[1]): + if is_cls_token_set and (tokenizer.cls_token in input[0] or tokenizer.cls_token in input[1]): logging.warning(f"Input contains special tokens: {input}") skipped += 1 continue - if is_pad_token_set and ( - tokenizer.pad_token in input[0] or tokenizer.pad_token in input[1]): + if is_pad_token_set and (tokenizer.pad_token in input[0] or tokenizer.pad_token in input[1]): logging.warning(f"Input contains special tokens: {input}") skipped += 1 continue - if is_eos_token_set and ( - tokenizer.eos_token in input[0] or tokenizer.eos_token in input[1]): + if is_eos_token_set and (tokenizer.eos_token in input[0] or tokenizer.eos_token in input[1]): logging.warning(f"Input contains special tokens: {input}") skipped += 1 continue comments.append(input[0]) codes.append(input[1]) - labels.append(item['target']) + labels.append(item["target"]) except json.JSONDecodeError as e: print(f"Error: JSON decoding failed - {e}") continue @@ -171,6 +172,7 @@ def load_data(tokenizer, batch_size, seq_len, train_file): # Fine-tune Model ############################################################## + def train(model: PreTrainedModel, dataloader: DataLoader, args: argparse.Namespace): """ Fine-tune the model. @@ -192,7 +194,7 @@ def train(model: PreTrainedModel, dataloader: DataLoader, args: argparse.Namespa name="linear", optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, - num_training_steps=num_training_steps + num_training_steps=num_training_steps, ) for epoch in range(args.epochs): @@ -207,10 +209,12 @@ def train(model: PreTrainedModel, dataloader: DataLoader, args: argparse.Namespa optimizer.zero_grad() progress_bar.update(1) + ########################################################### # Evaluate Model ########################################################### + def load_data_for_mrr(tokenizer, file): # create dataset comments = [] @@ -223,42 +227,38 @@ def load_data_for_mrr(tokenizer, file): is_pad_token_set = tokenizer.pad_token is not None is_eos_token_set = tokenizer.eos_token is not None - with open(file, 'r', encoding='utf-8') as infile: + with open(file, "r", encoding="utf-8") as infile: for line in infile: try: item = json.loads(line.strip()) - input = item['input'] + input = item["input"] # split at [CODESPLIT] token - input = input.split('[CODESPLIT]') + input = input.split("[CODESPLIT]") if len(input) != 2: # skip cases with more than one [SEP] token logging.warning(f"Input contains more than one [CODESPLIT] token: {input}") skipped += 1 continue # skip every sample that contains special tokens - if is_sep_token_set and ( - tokenizer.sep_token in input[0] or tokenizer.sep_token in input[1]): + if is_sep_token_set and (tokenizer.sep_token in input[0] or tokenizer.sep_token in input[1]): logging.warning(f"Input contains special tokens: {input}") skipped += 1 continue - if is_cls_token_set and ( - tokenizer.cls_token in input[0] or tokenizer.cls_token in input[1]): + if is_cls_token_set and (tokenizer.cls_token in input[0] or tokenizer.cls_token in input[1]): logging.warning(f"Input contains special tokens: {input}") skipped += 1 continue - if is_pad_token_set and ( - tokenizer.pad_token in input[0] or tokenizer.pad_token in input[1]): + if is_pad_token_set and (tokenizer.pad_token in input[0] or tokenizer.pad_token in input[1]): logging.warning(f"Input contains special tokens: {input}") skipped += 1 continue - if is_eos_token_set and ( - tokenizer.eos_token in input[0] or tokenizer.eos_token in input[1]): + if is_eos_token_set and (tokenizer.eos_token in input[0] or tokenizer.eos_token in input[1]): logging.warning(f"Input contains special tokens: {input}") skipped += 1 continue comments.append(input[0]) codes.append(input[1]) - labels.append(item['target']) + labels.append(item["target"]) except json.JSONDecodeError as e: print(f"Error: JSON decoding failed - {e}") continue @@ -266,6 +266,7 @@ def load_data_for_mrr(tokenizer, file): return comments, codes + def mrr(model, tokenizer, file, args): random.seed(42) @@ -277,7 +278,7 @@ def mrr(model, tokenizer, file, args): chunks = [] for i, sample in enumerate(zip(comments, codes)): comment, code = sample - codes_without_sample = codes[:i] + codes[i + 1:] + codes_without_sample = codes[:i] + codes[i + 1 :] # select 99 random codes distractors = random.sample(codes_without_sample, args.distractors) # create samples @@ -285,20 +286,24 @@ def mrr(model, tokenizer, file, args): comments = [comment] * len(codes) labels = [1] + [0] * len(distractors) # convert to features - features = _convert_examples_to_features(comments, codes, labels, - tokenizer=tokenizer, - max_seq_length=args.seq_len, - cls_token=tokenizer.cls_token, - sep_token=tokenizer.sep_token, - cls_token_segment_id=tokenizer.cls_token_id, - pad_token_segment_id=tokenizer.pad_token_id, - eos_token=tokenizer.eos_token) + features = _convert_examples_to_features( + comments, + codes, + labels, + tokenizer=tokenizer, + max_seq_length=args.seq_len, + cls_token=tokenizer.cls_token, + sep_token=tokenizer.sep_token, + cls_token_segment_id=tokenizer.cls_token_id, + pad_token_segment_id=tokenizer.pad_token_id, + eos_token=tokenizer.eos_token, + ) chunks.append(features) # make predictions for all chunks device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - print('Using device:', device) + print("Using device:", device) model.to(device) model.eval() @@ -306,9 +311,9 @@ def mrr(model, tokenizer, file, args): for chunk in tqdm(chunks): # calc correct sample (always the first one) correct = chunk[0] - input_ids = correct['input_ids'].unsqueeze(0).to(device) - attention_mask = correct['attention_mask'].unsqueeze(0).to(device) - labels = correct['labels'].unsqueeze(0).to(device) + input_ids = correct["input_ids"].unsqueeze(0).to(device) + attention_mask = correct["attention_mask"].unsqueeze(0).to(device) + labels = correct["labels"].unsqueeze(0).to(device) with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask, labels=labels) logits = outputs.logits @@ -321,10 +326,10 @@ def mrr(model, tokenizer, file, args): # create batches of size args.batch_size batch_size = args.batch_size for i in range(1, len(chunk), batch_size): - batch = chunk[i:i + batch_size] - input_ids = torch.stack([sample['input_ids'] for sample in batch]).to(device) - attention_mask = torch.stack([sample['attention_mask'] for sample in batch]).to(device) - labels = torch.stack([sample['labels'] for sample in batch]).to(device) + batch = chunk[i : i + batch_size] + input_ids = torch.stack([sample["input_ids"] for sample in batch]).to(device) + attention_mask = torch.stack([sample["attention_mask"] for sample in batch]).to(device) + labels = torch.stack([sample["labels"] for sample in batch]).to(device) with torch.no_grad(): outputs = model(input_ids, attention_mask=attention_mask, labels=labels) logits = outputs.logits @@ -337,80 +342,77 @@ def mrr(model, tokenizer, file, args): return mean_mrr + ############################################################## # Run example ############################################################## + def main(): """Main function.""" # args parser = argparse.ArgumentParser() - #parser.add_argument('--dataset', type=str, default='./codesearchnet_adv') - parser.add_argument('--model', default='roberta-base') - parser.add_argument('--epochs', type=int, default=5) - parser.add_argument('--batch_size', type=int, default=32) - parser.add_argument('--learning_rate', type=float, default=2e-5) - parser.add_argument('--weight_decay', type=float, default=0.01) - parser.add_argument('--num_warmup_steps', type=int, default=0) - parser.add_argument('--output_dir', type=str, default='models') - parser.add_argument('--seq_len', type=int, default=512, help='maximum sequence length') - parser.add_argument('--distractors', type=int, default=99, help='number of distractors per true pair') - parser.add_argument('--log_level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='INFO') + # parser.add_argument('--dataset', type=str, default='./codesearchnet_adv') + parser.add_argument("--model", default="roberta-base") + parser.add_argument("--epochs", type=int, default=5) + parser.add_argument("--batch_size", type=int, default=32) + parser.add_argument("--learning_rate", type=float, default=2e-5) + parser.add_argument("--weight_decay", type=float, default=0.01) + parser.add_argument("--num_warmup_steps", type=int, default=0) + parser.add_argument("--output_dir", type=str, default="models") + parser.add_argument("--seq_len", type=int, default=512, help="maximum sequence length") + parser.add_argument("--distractors", type=int, default=99, help="number of distractors per true pair") + parser.add_argument("--log_level", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], default="INFO") args = parser.parse_args() - TRAIN_FILE = './codesearchnet_adv/train_adv_clf.jsonl' + TRAIN_FILE = "./codesearchnet_adv/train_adv_clf.jsonl" # logging logging.basicConfig(level=args.log_level) # load tokenizer - logging.info('Loading model...') + logging.info("Loading model...") tokenizer = AutoTokenizer.from_pretrained(args.model) - # load data - logging.info('Loading data...') + # load data + logging.info("Loading data...") dataloader = load_data(tokenizer, args.batch_size, args.seq_len, TRAIN_FILE) model = AutoModelForSequenceClassification.from_pretrained(args.model) # train - logging.info('Training...') + logging.info("Training...") train(model, dataloader, args) # save model - logging.info('Saving model...') - model.save_pretrained(f'{args.output_dir}/{args.model}') + logging.info("Saving model...") + model.save_pretrained(f"{args.output_dir}/{args.model}") # also soave tokenizer - tokenizer.save_pretrained(f'{args.output_dir}/{args.model}') + tokenizer.save_pretrained(f"{args.output_dir}/{args.model}") - DS_FOLDER = './' + DS_FOLDER = "./" FILES = [ - ['statcodesearch', 'test_statcodesearch'], - ['codesearchnet_adv', 'test_adv'], - ['codesearchnet_go', 'test_go'], - ['codesearchnet_java', 'test_java'], - ['codesearchnet_javascript', 'test_javascript'], - ['codesearchnet_php', 'test_php'], - ['codesearchnet_ruby', 'test_ruby'], - ['cosqa', 'test_cosqa'] + ["statcodesearch", "test_statcodesearch"], + ["codesearchnet_adv", "test_adv"], + ["codesearchnet_go", "test_go"], + ["codesearchnet_java", "test_java"], + ["codesearchnet_javascript", "test_javascript"], + ["codesearchnet_php", "test_php"], + ["codesearchnet_ruby", "test_ruby"], + ["cosqa", "test_cosqa"], ] results = {} for meta_data in FILES: - logging.info(f'Evaluating on {meta_data}...') - metrics = mrr(model, tokenizer, f'{DS_FOLDER}/mrr/{meta_data[0]}/{meta_data[1]}_mrr.jsonl', args) + logging.info(f"Evaluating on {meta_data}...") + metrics = mrr(model, tokenizer, f"{DS_FOLDER}/mrr/{meta_data[0]}/{meta_data[1]}_mrr.jsonl", args) results[meta_data[0]] = metrics - logging.info(f'Test results for {meta_data}: {metrics}') - - logging.info(f'Test results: {results}') - -if __name__ == '__main__': - main() - - - + logging.info(f"Test results for {meta_data}: {metrics}") + logging.info(f"Test results: {results}") +if __name__ == "__main__": + main() From 3acd9e4082c63858038e86bdb2d5a885a2449da5 Mon Sep 17 00:00:00 2001 From: MaikeZuefle Date: Wed, 15 Nov 2023 22:35:36 +0100 Subject: [PATCH 46/57] Latent Feature Splits --- .../tasks/latent_feature_splits/__init__.py | 5 + .../bert_closest_split/__init__.py | 0 .../bert_closest_split/config.jsonnet | 57 ++++++++++ .../bert_closest_split/doc.md | 52 +++++++++ .../bert_closest_split/eval_card.png | Bin 0 -> 176039 bytes .../bert_closest_split/task.py | 99 ++++++++++++++++++ .../latent_feature_splits/config.jsonnet | 58 ++++++++++ .../tasks/latent_feature_splits/doc.md | 52 +++++++++ .../roberta_closest_split/__init__.py | 0 .../roberta_closest_split/config.jsonnet | 57 ++++++++++ .../roberta_closest_split/doc.md | 52 +++++++++ .../roberta_closest_split/eval_card.png | Bin 0 -> 176039 bytes .../roberta_closest_split/task.py | 99 ++++++++++++++++++ .../latent_feature_splits/test_hatespeech.py | 8 ++ 14 files changed, 539 insertions(+) create mode 100644 src/genbench/tasks/latent_feature_splits/__init__.py create mode 100644 src/genbench/tasks/latent_feature_splits/bert_closest_split/__init__.py create mode 100644 src/genbench/tasks/latent_feature_splits/bert_closest_split/config.jsonnet create mode 100644 src/genbench/tasks/latent_feature_splits/bert_closest_split/doc.md create mode 100644 src/genbench/tasks/latent_feature_splits/bert_closest_split/eval_card.png create mode 100644 src/genbench/tasks/latent_feature_splits/bert_closest_split/task.py create mode 100644 src/genbench/tasks/latent_feature_splits/config.jsonnet create mode 100644 src/genbench/tasks/latent_feature_splits/doc.md create mode 100644 src/genbench/tasks/latent_feature_splits/roberta_closest_split/__init__.py create mode 100644 src/genbench/tasks/latent_feature_splits/roberta_closest_split/config.jsonnet create mode 100644 src/genbench/tasks/latent_feature_splits/roberta_closest_split/doc.md create mode 100644 src/genbench/tasks/latent_feature_splits/roberta_closest_split/eval_card.png create mode 100644 src/genbench/tasks/latent_feature_splits/roberta_closest_split/task.py create mode 100644 src/genbench/tasks/latent_feature_splits/test_hatespeech.py diff --git a/src/genbench/tasks/latent_feature_splits/__init__.py b/src/genbench/tasks/latent_feature_splits/__init__.py new file mode 100644 index 0000000..8ceca21 --- /dev/null +++ b/src/genbench/tasks/latent_feature_splits/__init__.py @@ -0,0 +1,5 @@ +from genbench import TaskDict + + +class LatentFeatureSplits(TaskDict): + pass diff --git a/src/genbench/tasks/latent_feature_splits/bert_closest_split/__init__.py b/src/genbench/tasks/latent_feature_splits/bert_closest_split/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/latent_feature_splits/bert_closest_split/config.jsonnet b/src/genbench/tasks/latent_feature_splits/bert_closest_split/config.jsonnet new file mode 100644 index 0000000..d5c8c01 --- /dev/null +++ b/src/genbench/tasks/latent_feature_splits/bert_closest_split/config.jsonnet @@ -0,0 +1,57 @@ +{ + name: 'Latent Feature Splits (bert_closest_split)', + + // @TODO: Add a description of the task + description: "We split hate speech data based on the internal representations of a RoBERTa model. + The o.o.d. data splits leads to an under-representation of parts of the latent space in the + model's training set, making the split more challenging than a random split.", + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'non-i.i.d. generalisation', + 'o.o.d. generalisation', + 'latent-features', + 'hate speech' + ], + + authors: [ + 'Maike Züfle', + 'Verna Dankers', + 'Ivan Titov', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/MaikeZuefle/Latent-Feature-Splits/main/genbench_splits/hatexplain_bert_closest_split_test_new.jsonl', + train: 'https://raw.githubusercontent.com/MaikeZuefle/Latent-Feature-Splits/main/genbench_splits/hatexplain_bert_closest_split_train.jsonl' + }, + + has_train_set: true, + + task_type: 'multiple_choice', + + evaluation_metrics: [ + { + hf_id: 'accuracy', + best_score: 1.0, + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + }, + { + hf_id: 'f1', + average: 'macro', + best_score: 1.0, + git_commit_sha: '3a4c40f7397dcd7d9dccf0659616dc6b14072dcb', + }, + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + finetuning: { + objective: 'maximum_likelihood', + } + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/latent_feature_splits/bert_closest_split/doc.md b/src/genbench/tasks/latent_feature_splits/bert_closest_split/doc.md new file mode 100644 index 0000000..f345ddb --- /dev/null +++ b/src/genbench/tasks/latent_feature_splits/bert_closest_split/doc.md @@ -0,0 +1,52 @@ +# Hate Speech Detection (bert_closest_split) + +## Abstract +With the ever-growing presence of social media platforms comes the increased spread of harmful content and the need for robust hate speech detection systems. +Such systems easily overfit to specific targets and keywords, and evaluating them without considering distribution shifts that might occur between train and test data overestimates their benefit. +We challenge hate speech models via new train-test splits of existing datasets that rely on the clustering of models' hidden representations. +We present two split variants (Subset-Sum-Split and Closest-Split) that, when applied to two datasets using four models, reveal how models catastrophically fail on blind spots in the latent space. +This result generalises when developing a split with one model and evaluating it on another. +Our analysis suggests that there is no clear surface-level property of the data split that correlates with the decreased performance, which underscores that task difficulty is not always humanly interpretable. +We recommend incorporating latent feature-based splits in model development and release two splits via the GenBench benchmark. + +## Examples +{"input": "wow do not all speak at once niggers", "target": 0, "target_options": ["hate", "noHate", "offensive"]} +{"input": "how long will jews be a majority in israel", "target": 1, "target_options": ["hate", "noHate", "offensive"]} +{"input": "sounds like something a moslem would do", "target": 2, "target_options": ["hate", "noHate", "offensive"]} + +## Usage +For the task, the model has to decide whether a social media post includes hate speech, offensive speech or normal sentences. + +## Data Source +The dataset was published in `HateXplain: A Benchmark Dataset for Explainable Hate Speech Detection ` by Binny Mathew, Punyajoy Saha, +Seid Muhie Yimam, Chris Biemann, Pawan Goyal and Animesh Mukherjee in 2021. It was accepted at AAAI 2021. + +It is licensed under the MIT License: + +Copyright (c) 2020 Punyajoy Saha + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +## Limitations and Bias +*Note any known limitations or biases that the Hate Speech Detection has, with links and references if possible.* + +## GenBench Eval card +This method can be used to test generalisation in HateSpeech for LLMs (pretrain - test locus). +The split is based on the feature representations of a language model, therefore we assume that the shift is a covariate shift. The method assesses the robustness of language models and how well they generalise in out-of-distribution settings. +![GenBench Eval Card](eval_card.png) diff --git a/src/genbench/tasks/latent_feature_splits/bert_closest_split/eval_card.png b/src/genbench/tasks/latent_feature_splits/bert_closest_split/eval_card.png new file mode 100644 index 0000000000000000000000000000000000000000..5a6877dc52127afb43723665b78922f67b7bfad8 GIT binary patch literal 176039 zcmbTdWl&sO6D}MG5Ik6LXK;6i;O>y%?kOCMLE{=61k~&)otaLe&2Vi8z`V z04?lniB&CZOg_n35HqtA%iFmSvoNu+60ED2y-v6UYu@TyK?(FSJVt1Vo4RrHaDF zQ@&T0=2{-6hv#`^U#meF{xHQL2M80t$;IJ@o933mKe6&b%ai5+>>Wgcn5HGUsby6p zRJJl^`ISGx<(;5D3HTZ2AHukdd|y;u&L@_dagzC%g2y%-F)2KM@hhAV20Gs(>Npgc z+#oYGR}R?%EJT>Xp=YE7p-|$bv5(}uY+expSF!vZ4n@%r0ob*9`Q+$EBxBGrz>r!- z6Z#ABE0;69;# z>DSGf7*w;>Y5UXoSq;gg}n8~(&RCa{{hB}oXdqa*v#jh(fZL}zFgEG z{$RfJkK=~hgntQwZAKXt^ACFIJEI<_8Nh~n#!1Tg<-OencqdkaTc*8M*mzH)mi=Fi zGW<`Y&{NYY?)A6dl0jcmXVKP;``?WHiGuszVEbXt`qZk=g4M~Rl+R~{S5k?O>u0aH z4*Lu9LlZ;O0KPik87pg>n#P9bX&Nh|jrgiI<4_P+A~}7=*M*`hHl(^dSrkZIe`ly* zD4Hp#&x%4qqCu2b=cm(5j$bp1!Wv4u9w2-EWSA-Q`C`}A?~XL{^&qL-H?N=^-ZJwO zys&v`d2qRa#I{B};0W#`Ud$jUK(GO618hxdU%11)!!r1@WPgg_AYXc=EUg!KE`j$4 zQ@`~Y@SEjb!Vx%vA^bK?edrEbU@XK8^8Y~np)tfvR@c zqn;L?Mxh4F9nGV~GiAB!7DBLl!FZGoonCM{L08I;dGe)<^KtuK8~mVdM$9s|u#jeD zJfyy&Qa+wS{)x!P{`*N^bL-=_+=f+~ar6Kk-<1@(nnaa%&b1xAa;6<~Ze(&z!|&vh zWD#A{6u?m{`9Cp-s313&2G=xCo`R|F3ooKSVV_jw;swfjtJp~q9euZW-&S2-EE)>) z>_qHz4+{OyfwB}h=d%KWsB(d}wbwALz@YB{2qkQk2M8emtP%7$6SM5+Sr#VX8P)Fz zLld?$mb3GEr)&7dn)Tfk`5Xg>aNQiHtVo<_MBr#@(AsygW*OBOJnZUdJgr0;F-Hvo z74-f*1q>+BFk%lwS!yv>MK*L91*Sx(&6v>q)ZOh(L<%huJig2=S zWkVeSSng|BZ^dcjV(VszEa6O92)d{*$bfcV|0l_Nxfq`r7$*mfMbHlgHB3;S=Wt5k za0U-_!~cy^k5r3B$qHl0giVD z9!=cYo2dbM1qZS zP!zNH#GAy)trTUcQ6ev4_>qy@fX`$weQ3eQi_ zwz^NgdfI~e?I-Az@IASG_Rj*(4(XA0D%Cjs*`Qi}(okW4013sQJ?R`UTy&ATl*mp_ zUYN*ks&Mga=KL;Al%gxXSkXq22CEE{&id9s$50f^eAbMEBpxXY8zbCuoXDI-dV2#! zcG}SG{M{2{1_=qik=)WLVy=P4{a+8m_*cbn7*i_PX^>G#+}6|NPE)_|nrq9a+XzC|CiXwnFjM3loBU7}dNxh(y<@)cia-+2*hs3SYH1sui9+X<=K^5w zeGMumJFBId@@hTWxu+6S0ggClHV!S_lL4IqD&xY`iITJeWi115vm7d>9Os_g9$aqfi(*EU z8O?V-UL1)2mMXkcz3x8>n;S;7ma*>F3hq0`(HGTUv>)v*>K`jA)>P$Vx!-7fF3q6j z)2Qp)6o@fi`bE{uQd+-3^pijtt(6VfgJb_N_s_*GdLwz#?%VCBBQLuyrn%d~9L z;Tq=Z?UAwCho5%wE}<)+bkH6#^{O8e<%!j5?=-|;6WQ=2;(g%BsFXdagd(8HMXpRd z6Iq~zYI;o$$DcnOdZcayF1lnRAUiE+6x{0BqY!Lo2lP^OKcSgLm@%`GFVyE!lQ>M3 zc)en2ce+L8Y|Ku@boU0qvVoxgz5esT)0%@bYDxayLsOe#+US;C{sJo*a= z&O7c2n_YN*8!=%iBxDH=G8c((oX+6fTHFnuI%}g9Nz!ynro4;BRv4!Ila(B8#_!ll z#^01-tP9gtDwkH-*IUjEeG*2&KWAR+=zFu5R9e)Vy6L(6wXM{=jAxh z&s&Myf40?YA7e)xya6h0}JO^TjVU$@$ zJw2iARf@<%BPpKoneJgnR8Vp`8D{NCCrb;6RqIkDOvWmq-2f=Ujq*ifVIimv<-fJR z@r+(0$#bf*6cZyuf~Qme++VRJNzOxj=KmqdYf)aJprof$XbIQ5FVMbdYd>Wscr^v0!8&&bRU%{SLzgg>q3u8yWT#AglFkdwDc__v~riyZeAjDi)-$_PLfN zS%2n`Y4HvkE}KpqqMYjNzKvVGoc0lB&=ZL%O*?5Q5AeCm^!za0`;sk=y+yT_2C zo~do;Sg)a}lQw5zPA3f6Ish5au4o^>6utOq~P)xAV+;UHlU0NmU)yWU=^vV0DWjL$Y@;I~FL}?bMQaie`Xve(a zIs_m^l8DxL+mOkfLk_|eBSqfq4#IMa>=hVyiCKRo+0cY{4~+vHN!1Ui#20RUhqh*E zCAFf&DxLf`g;FXN^Jm@2s01|V5~JTH0g`E9))8ZOq%(WO|J*mp^CgN$$EA9sc>Rc3 zS(UU;h|^{!So-)-xynStcm_GSIPU=+yuj~q_WAtQ;Bxus4q@t30{B-{Q3Gw_Ut||> zUa;rQ6TPi<8xY1Q-+ZhmZ$f{oR-MUwmmP!c)X=YY1~SMo$WTC`^*DBz-}+HaXm$z% z8p%&s5<{XzATS5Tznkyt;f#Q}AOjC*&392Lgb=o-UaV25wL0A%jJSeGnUEp7!>OXpb zkgWvzjXcrJXQ7I9_HjADbr3#he6cn|Y!i>Ewwsc!skb#NQ`h%0U4}yeBZshY-r+>@ z@eO<$7oC@*(@8^;gTfB>Gtn?l4@Q%d8hnm~(P{buha?i~Ro?xROs7mmgI9bffPr_= z*L2BjiFdbWoui~G)WSx3wmA)KPiLg)V5%S>orG12?-P_T92%K3P;F%_8nbUp#B4rm zC5P>f$_yza8xIIX1q_@0IK4&}2o>mj^F7N#-GS3DFD(u;NL7b79}8Atkwc$lQA|uV zXCYl#&)?K5!n&fm5tL__1%i5Joaxjc5LMMca~N#Ul2B57p2KU@QZtlsAC+_AWFePE zUIPn^_a)Uz=ziMHOdveHrdrfYzC}Ibn?gRB z3bW3C$406z7W##sb9`wF~Mx_MkDoY znVh>mlp_xPBqKD>rmE)7riYH9swSawyZkE(QWC~ZCC2Yd(xiY>EI{h1_un|rN}V16 zrc)|o<7!AWttGOb`U?B3_1ksum;Q>XJPuvfMEs~_sluA`Lk4Ud3g}9L-WWdj*DqKK zF{Ot?gX0F7n=r{x!yt7kg$q>4uiXlhhFs2=`?SCB>etd`(_}qb+MAc?N{tiQx*Ejm zlmRBkhZ37LG;pvvSKEZ+uN>nGpRl61bov%P@Bc>S%8r~aLJ12y&3K2^$b2eAGRLG#yjYT4$)CVmFdlE=6F zajggna)RvB^S{(VkSmqWDn5Llyt0o}RmDW+Rp%xJx3OY-nZm~y=Z}7mF0DdH*GTMc z>_%ss;f^_-x+0ZdTOeK+TqwLC&)(5jzkySg;!wM{{AR_`(ty7lx10)JgFZbh~!3 z;}cz@V^}#wZ6fTr)HYp>O6fuw-qx@4&TyE*+B3v?OF|HbnH?o$9H9T#+%YC4suDh_ zC}cQVFSEFr1&?&ow{n2HWogjfx-Bjk%X^`fKtR zG&xh6XrWF+EjJ)*MqO20qhk0zwf(!%69*v?T znhFapfGeOPh2;YS()bzIs^cd2H5i0BpcObK!$f2InWZpL#008|z|ak&lM-rA>aT>I zgawB%;?90+YiJ0q804(t1|JZGB+F>xtc4|Yw13H${L!8OUhVh!FKerovxtcKy^yVr zNlBIB;&QM7p$`UAo*fka1r&O>(>5?M*%^eC{X(Lr?SXy~Sc6asH~#PH^WV_tNQ1PK zgEYf|(jrn&QYffF%p~~Ew$iZqu!50NXo@O0W528sWRJ;I%Jd`*7j-2odF(8Z(0o4?O+$pkkB_z{&3HY_4zZDm>7|v#SCy|2=8UIka zr~C^;Bp(IU4M6HW7n(xzle+VU2SE!<<5#U1Ek1f1da$iO9wdbuS&D<1C`w@I5&Y4- z6B}lVx+`%_MSrPzxxWkBhA}==%PT~|Mxsl_@IZOMJBBF3<-Thzd(jjN_aZ-3DBT9}x;U zf5i-_3yC2nfk73)pM>Ndv0hW1?^RX3DM)`hPjzS>{=Qtmt!S=6IRt{UlDIb*voVnoeMz4FkF1qk}M8J(8X(N-WrDcB7@;Ukv%i z${(jIJ$}gx=mscMCjq3;{y~0clzc;feYNAt_nFG?6wGB`E2ueGs#qL8hMq}dqwedG zJeoUJTUaPxQ?>-`K`$Jzs5&+H@uK?(AG+WmWw3qaxe36&*!~Gkmamks@CMOtf>*Cm zU&eA&l%lS2G0UBr-$ZuF!{++6%$y`iyH}_JWXlVf8O6X_Q0H-tpWowA6A@COE7lCQ zo*+qKoen^mJlko$sBKT$E>1HlEH|ddE+-IwMcsiCh5#^6mx6w8nr!V26(MCxc#3_S zqjM?l{VN?c?(OePRk}!9S&cbdG#|r&1NSX3I`5>A?lXO)l5mW1i-_6e_6mIh&`xyM zqF;DG71|&krOiluoL@YNCZRsa#F#k~vgo(eg!17tobHA1#2`Bd2p#O0Uds|_F519p z$b|$Oqsh24Wj1rdOqgtcVxc@Qc3d)9x1f3$V&qaFvi1g%G0yEjhi%pd5}GW)GnP`r zom*Vwc5?9>u!N_DgA4W=%wiB>?2n3BK=YAhvYyS^J&aaSR|+{Y|HFjw0M{Tdw}Ilb zX2ev*jI3OUbc?XoO)E9ioOPGt^Z0R@P;_|SGyJU9Hn6pgRxS(7r;-rK`Y0I2<3BO% zOfa*n~l^6?FCr(aA&tDM>2IW`Y|i`wN<1t@roLE-N$(b(EtLIkszBi^_^h zfZN(2GH1)xHovW{M@!P8I^Q!)_kHJ1rsh~2C%Y<}DAY}}1Bjo)6GFvRm}S|Bi~Z<{ z9P@|F+$_U+W5il(T~FgWNdgq9YOi`98@=>30#~%$hKF04r%R7kAEgO0H_(RgH+-ho zE5?Cdc8Mi*41>gETjVNK6+wIt!?=a4&oP<5z`1$cIX!v;^`4&?(CY@X3EOsn{3 z%B*5@A2otM04WblftO1W4o=R;1i?piU5}%p%{IPe@CSh2p^;*?4wOtOSN%D;wM4wqK>M$O!Dk$n|@4E5J z^e}&fB|lqCs+#UzIP%foD@D9vSLjiyk6Z(-V#yU^Wie6#1}6f(=7PjQ|Hi+7G=a! zLrE)J5U4o)hc3QitU>*^10uATOUy?J$Q=Fr>sgy-j$H^>w>c;cGnL<}fZw5O08nIg;$pmy?2G&Dj0(Qkf$Q1J~1K7vOKcMa|D%@ zmxlWSKIksWW47n0@+Wm20ScIH<}?6eaO}8-G0Q&XAaxv6e;%k%P#-MTqM4j%lQ5Z% zT^=qk%XY+^<_6S8>T?)MX;?}RE-R-BPaO~8`h%yChA0l@k-j9YVvmR~BchTn~XDT?b$^BhwyL_nk zY+(R2L~9d0k6GI*s?92XgH|KN1RUW>g)Td6+r91EJuFXp59+s?{_Tw!%PxBgS! z0rXwKrTEav3T~QUbfO&S7=<-I{8u>@!G=VIEP1u0^f=fza(#ycRY>_}=apq|x zg3(MQ+#1+#1H*C5Gv+wZ^GuRB4V$-2H2I}eZ9z#3Q@{vyc9e9#qRD+YzSB=NUU!bd zLP~5Ha{TcAG{MCMlsu?BKcs!E-}tx%*?&*IAb*>itn2#VCkg(oOb(*+kEx2~M*0o{l60pW8{yvj|Ye<1s=FV34IsHp-g%r z72%ipk6|qci5VL+W>HTLD(25JOY}P0!VyFLU`}nDWx6*HmgcKuulCl#kVZB)7?uAGt5$kxTnS*SqS-OO+91qUkDO|cVCcUAaIoG9 zA|ez5Fq0y~^~Dm0N-3hi$0Uzh7i48+9{qqHXJh`Rm~-$=Q54`1c|ZxP8kxwwnl0ee z3l-NNsQaA`gGuFsw7tr&0q8M*cnv;U5J?vi8UjjbX>gbUc4}~NvA5212NVe!DI;k3hON>q9DNraw9#q%L2vb=0KNFy|M$CSSr-@F&0bJhWN8SwOjciTs+;;bReLx3A%I zf-#>dcf^WygJJh;C;(yc@C#<9M?|Y-a^p)w>3sz49jImfi}gn5Xd|sFH&AU*I(gtB0$){%*uV z_xbf&m7VIBpR@iLo_5wHC=cU=MWo^f=f08?SzMZJ9 z;Ag9E5gUm9;*esyYR06RYW$b3(1xlmtB{_n>>%(qrkLv$NUOD`d_rzO!Q% zP(?6e_9oM2{Mp2(`ov@C;!)(A^vJKQP1>SP1X!po@GX)3Mag>*!cUjSk}OE8n(iLgw;TEeM;K=b2{qz0YgEE6kk9io{oCC- zA*eG|6=v?BGmPshv{Rs9gXC2;TnJ?cG{(`qNktq6l6@wC5y>$Jvm&ACo^o8(WSeqx zH9Cj4-j_Z%ySu*d;4MJD8#wq~Q&uG6?^s!$L4~3WW4KP4*mIHNw$JrqlVtahP(|y{ z44gz+H2=K;5i;l#bQW&LpZi)W))-@dgVwDOTbyL_0f%I|IRyy{S_z|(q2&OM>zvF| z5Cppv#v!!cNl5?@hTx+_iw2PzCh*kVWx?3b`_;ml{$_w%xo*0p)HlT-G#>*nC zHOa*CmQAxMg78#PmIul+WUZ@h_(}Nl`z;#girn^mp|oBjwCVT&HG#Tw<>IkYKecyaPDzrRfXICtUd9` zSY40J6Ef^#Q{_=2zcw(pwxW?k3%sjy!RP9yV3l>2r>AA|j6Pu(=y!VyzkC{j zO+^}SE_%{QBqask(J?gdr(`Ra0Lw87J zG3R~(6FT$T-wK(RtNib|xi~!K@D0y`8IgCKZqFrT`Km#JrF*HK7<>pzLHGynr`W<& zuq*q^dy&i$$OC0P zHn)AIN<_0a`!E$@Vz$p}YB@dh!nrHtV@5tht?WHzqGGdic8h)&t;DG=JU~}1U;zFc zUZ#3OYK=gbFFUbJajE?aQW8)=CW^6Mm&RA#`_`mh#!OvAqI89o=S@msw%Pbu#y4=o zJ7T@v^Jb3J!zS#uO^mE$?0j2R8(cc6kIw=p#}b6rY7HY378a~i+$Zq5jg5s6F0(rx zTfSgWLw_jmTt2D>KQLB_E_W)#RlG_tLnM2}_C>N5Oh~FmI##JPTAFwka+|RpfGayF z>&ZW8@P|zn7X8bpyo$|kT|dg8=US&r99~L|<8~*UkyUmB-k0539$OI*Q8e~FWs4C? zu!w1yJVO>Ks4wd0cQAU3O7JU5^~rV+NtlLcW5gT+W-H|+asgbOVDT&tOJ?vxc>oPx zWud|;9Vtr$8%wN;PO3E%eq_r5V?!c?qoF+c0bhaG3%=CT{nh>ELGM7u%k>#{-5n2Y zP1kU%W;C~!gpZ?O>0z#MX;pRJkw!fNI}6|117S6u$csVF_MFivrCXgCO-9igRRV|e zsl@X|!DLKG;kca#KVq2z;j0F*0>0h1$62uFI7 zfp?eW0_1kPthh*UAM7^hPlr%|0jwe6lXX3}SJ35(>jbVzd)FM2WOboB(Ot88cqPD zm21o<+etkfBD~CD5Z!WY*))9?BAWCLvNj5uuN1p2y+3c6?Q`XibVNPHD0-wxc4xzt zW}^U_fpX)pb}EqDQVq%Zdv9u2&)a!F{Nm zJ=oz9*vn{YjUr(AWm^%NjC`IP65Oau36%|c6LUpo7k4l3gt@9#Y^YjnqS8ON5MQbALd%KBt#r+k=JyJ6g$N4VYz9T?6x z-V2@Ot*lPOQJ)u{5?$U&kA?i3uxk-$S!o1>L`+P=q|a~;Tc|DQ;+@C=6C|%!iEm40 zTxv`a&c~MFkILymk;r4gqj2k}ulo?s_e4|Iq%dLO7k-w77NQ^vHcPg8YUQw){oA9n z4HK(_*m0>SYa3)sF0GIh1-;3FAK%VZPin z+d4UEmEi4hXUNy3%Ndn5Y#m+v`qO#jaHnQJku=1`K|>5)_Bf0Aa4@fxPQt4F*nle3 zw8kAaY3F2X=;sU$zJZ_LA5_4v?TE;{)(_-V0+)yRZ}0QjOTk-#gr}) zkH%2UJ0=*swf8NwcBLSfaj@%zYMGfzV8Umfd&a+So3DZ|Csl!Zh?7@es;EId=6Gv`p!cy%aF

rGBMg1P0XLbu9em@=xOI?pNqjWDV%5;f{HK`~W7OHwGV&}YO;#k`(?*;uCjKmz z@nARwp;Rgqma?S}dB-~>;1zjYYDiTsK{lf_;8~QG;!Zg!^4QQ*Zbj>UY58h-c~;}m zZ^E%LvVy0^)Di%u`ISX6)^PisZ#`a|B zN-z)yDB;ra&o3Nz`fNybD5rt>N`cumioX%)(ka|^P@<5r2?Vw__)`LOLv5)0>w->- zHB@36H0kw;0}h-lA%8Ck^6gGUE9@pxVbU7!d@5>}v70j8nNetUx$>%(O_r3;+L7?F zkG#mH&7H0IIhYh>OMVAJ2}vzPJ{4>7{*F{cZ? z*6lZ^EX-GI_KVBw2ocTdzQ43)Wt@n~{$AcM2{Ii{d+=}XR}!N0vvgMa(f6>cM1Rj4 zV^6ejL3%o-Z!N*Bt)sWDc9%Qdow8S(HT6x%U66Gr+qQPB`0kO%ln(IUl7Vh@w(A*L zTfdt)S|20y8rH{Azr{E`0fSVmIEt^JO@m=cj;6bO5>DR#(%xm7v?0{|74HKBJ0;$a zV_IFFMc!&ERJz^=9o|5jI)9AAH&?;u3O-}woxT-ii(JmuL7UuhMUN9cCOb1GZzn-2 zODA@h&ptQpnN)*xTeBTk9E%LP2mWtlmNM|8cZBf#$iXVMdTk^7w!$@zkxCwZBlt_E z-)UI&FIH+=+$?goyS2a{(DSAgus)5hHHgQeU33HAAWIN*#rc7~sTXTUPN_iN_n5da zYJ*eua})(tw#R=GSmGoBZH86_*Y{qv;IED$H4B7#V91XBJp;5-inY&UG)lW<%S;m> z{3pBywXhy;e?*@t`LQxC4i%PgX$hJxp8VClV}CPNE7^#Z%kJqf@-)i`D3;;u6wlM# z9mB`4M(GJPw@-ThY5Vv6b)BI?P}-?p7r=LzOSD$f?G?tmjwa~2Nm7+_uvW*;)*hZ^ zq1eo}Uv6@2CAjsF=G0NIU69>i52m2f!aqO%8zUITb>i7!dExZFQnS2@>*$_nfBb@5 zlbK=G;u*2f{g!$^_U>E%V{h*oPFk0Q0(|*a(|TJYxc#BT`1Fa)^+Bg#HVHCYo%Wup zdlwu`tfM6P*o**R2hv%y`+feTpf=m%tDx@wO?Y_At-`77*6|^Ic~=-I+E`22YS##( zPkjV0mg9c^*VM(-M(rSam}jk*rvuJI0M73O#s+N}hhFpR37eW>Q5&w4i=rw z3F1)WaLU9?)SQ1h-vVRIBmWrsYKYO<2*uj{%E#&cq~{mo%PD}u2g!`C%GZpr`1D!7t^LjPG2Zsvq2{^uAk*`l`@QcT6Cgd-1b*AX zE^*JuSw~*7JFL{v%KZ&S$GP~sKYIU|@eWKFdACnXGV!Yt4@IWHF6rLdN(Q~Vmh^XP z=^Acx!vSzKlBNz) zQ=q?HL#iAp5y`u`oiKRvl6j0^l$P#Nvas@pI~ZaAVuGLjpzn=TR&r=|~Gabi}d9 z2>CubOUuT*?Xv5$DYWQfZP$xV*p|HJgrcI-IE4WNeXb#h$QM@+=VTimOG0BM>d_{l zW@_+nsV(*>O`xT#G-+H%wrEn|Z$+}^T3O2zG?^s6fx_V8z)+#j_q)i?2RvSo!bCSy zFm|%9S0aR)-T|Hx)Pr>jW6S*= z>g_OsqK08>x-oT4zPD-L?#{B;U`aDqi&sicw{vKxCTUwbNxU3WiIAwBeZ%zr-l~*g z7$ ziHmhG)jgdHK%xO1Eoucyb12tAXlFXMmc5tRV7^~$HF!W$S|r253Ub1UJe~*&Wa%0Xju$CgV{4ca0eTx6B;^B z)LJ;yY;5PV4x|87*F~qvhPq_=;)?2D5hG~XlO5qm+amcOH?5dXv+b=GLF$ppRucC7 zLZ#wixHPCD;tS9}PBQm9GcqT-13B!rLLfEZzawL_E3~#;Q-E4WAO4a?$%j%bKUfa6 z%;r11qs~1W)7eA9mJswV(BHY3cmo^y@0!W@eoCTR^w-jTlS7Bi`@|By+H;h1Y2F+) z87fErU75dEe`My7dqQPN zWHyWABO~3YCK^i*65N`*x?-PqPIxoN5JiE5;nM}le|xe;ySV+jrYGbmXB$An3*&oS z3uM#LJ!5w}IYWMOwONpZlbZfhm-ptopVpHR1-aQIObTfXB@AVrKG3phe6S#JR74Md z{S8RlF9M46H5h_LW#SP8zih}wJ58c2%CN1H92Lst_LR=&UR_4kvKvl7GWt%PqpQmi z3U>6p@_t>uVnk0HEjU7J;?H%iRVY{)Xb>*;4&^*%`E>*TB_`s<&_3y8F@Bc@6=oPV zQX3LF@bY`TxuAFdc|}cKg;$C-yF~dXA%$K=Yaq$gqan-SFJ^p&iZ59N=~c|2&q!)@ zaq9?#YZmqfRDpTn-0#j!OljG7(SS_oHUwfS0 z1J}_|6N#|x_~)lrMDH)KHdic*@6R>$Kcr%l2>-$gq?I zP2kSFw}n^O7{0$EA`uvnxGEIs>VCb~{MfT8Kzdj6MwzVpf=jyx2nr3y5EyOP)FA8J zZ)njdKD<}VsQEjH_x0hckIO`FGe_%HyPm;EXA=E!=^K75t^yneHQ&sp%dM0ALhgR**oYbFP>9pGh zBd)2#uRdwaEWhmE59u|=a>^7s#1x$yM_;4>(c>cEVKg zhbdEsH7kq8ltIlpy$sTJypABhZ%-auzn>#j`yB8@6j4#b486fBaMYq%?J86MNI*&G zY1R?1BCOW#(_Sadm{2r!L8N~a7y(9}cmSDDjS?a=jP@Sok{eLHt>8_^D6?Rkr`y!!P@`KmJ4S(_8el_pYG1F}~S!XF7mZou;cVO5ucOqrvFOViDtOCN{JPU-|ezp_XYQwD?fI*|q%aT_)Ic+Tg}_qzT0^bw)b z6sxr((};nzpbfqF45sfbR;tZEnL%lu+%Umtu*A;c`XdRjN!XRSR>g|gC4(=Ig#)C4 zM_|&3Mdc-?dRhA)5;xSry=g|w8TmgrLUD;B;FxmP2^@cI=hGhyF(iiJ;2M~voKJjp zzijE#Dqz_hOf)tH(!AiD_+uyiE9TOmDAA9yB$|QChg!S(}>88wCgwC z;ypOzSO-rl8m~cP`5kaLode|JyTvYrNlL;$(%waAB^LOWhp-(RW-@*5mk^2?WfodN z!+gYwNfZdtv*r6XO|R>1XDMfj15FrDsXpU>VRdR*~6%0^l$=ZA5{XX5f7uHh_N^bkqGRLrO)8GJ#cp;C-wT0jHb+?0guDpvvF!48Hk z&!jZHv_L`L&?g8LPwD#(`EK69SxZq?Xz6Q{k$I}?BT6%p4V)!2#4ikHuXMw}^npk0 zVv<-$gf*VNyaA1$lv65&;qLGQe2?Z?zt<@eflpT^%4JYV0Bm@DKpcDqMnX+31EIag zJ4ttHg<_j_ZhDxq$e^ll?;oS7JX= zLXxGyqb-~*b1h4S^z`54Fq>vRs_?fy?MfVc|7 z6O1xI>?y2ck5nXn7N!f|07DOu5v~-?YyLNN|4PKA``w6;ui9Y{rfYVF`51f|&xH{KB%HwFVj%|hLj%mjuGT~01@8&qqs5Ouzp*dm!p??j zj-MZZMfNT}rh1L#H2FskMWVaUWDNTKUC$^3(o<<@hepeLA{KLtkn$mq-&W-PVV%6D z^@o@QpRfy1nyzM6Um*#4(C&Ytce{H)`phus_$m9u8Kp_be>R3JvNNxhbOCqnTV<%X+Zl#;s zO^9Y-YLq#0v?npdhu&+=+~8z2lbCsGlqFhDJdxdGE=4zB;}qJZrK6!ngc>WGOc3Oe zev81{%)yKGX$y-bhmsN3hyzv#8Y?FY>!G1Hli!HT?i|_&w3O{UOi^isr6Y|MLNe{r z&>iu5Ngd(M8Ct4#yn&eC-q7zs=^drJxp?O86ZU{&p4StfCP#JM(WJM`Z?w0ZA1|e( z^PN0ZrkzG5n$85izlNC+s#2^-+jwINX+es#es$~GqC=xlskKHZ4uY~*DM#U*tJy6^(^CyENq`TKh1#HGY*5Uu zgSg8y-MGF#yH1`%SwFL?$CyfDbcKq`O>A7f(4;QNM0td!10n>F3B3QR`NOT}YjniD zWqJExqS|(%-zVA_!yE{NigeA!G?lY~>kJcbH|QJQz%WweDalLg+t4U>d~&q7hb8&& zuD4fFiy;F?)&WW+Mh?xWc-t|>tr>o;OUmkEu6ZdYUYw>Dv#nXKm?l)vpOOh$M)ix} zk91~T>zfJ9o>xH0%Wqcg7yOsCPz;YFaB>oqi>|KM)V3P6Oe13ci~{t({G+KO(DFm~ zvPyo>N*S#hen8C=J)RD*OO}&g;bMs!F8MV#0O)mXS~%&pig$4x&>aWh{UB2-v@XQT zyTd|(^)Z~{FH5g2)J zq|4MkvjUba))SJgD(|aRu8q^hOd)gX8HnGVI$!I3+eZV+i z3+?_cL}+c}v*-BMSXKmrw3I0xuFPUpMO)5UAdk3;0d<7ou~|reGyfug?9`6j5V8?4 zN=U&ns-)4)b&Gm04yZ5(eJ=}YdG1Rzr8cMpQ-3cKrOFe!EnVz-PNQ|_l)Q3? z-1fjBb7ZFg{0-&lSNp!d+_UASRhp&N+*>Nd=CqM0xj&;Uw0F*w$8W^N_nnXb){9S~)!VL728wARguL7fFu@=8 zGGQg+Tq&`F&!6~P`tQub+~l{4TWT~mE~SIPs=eGKldB6R+m>W3r4VVr09MxHM=72G zDm)g!Zke~+KR)p+^Ul|Xx~>(zi38`gQQj2jh~CI!M}}j`NQp-_F5eLPR@ z;T_J@RslgyA9L%(cK8~N~fx@j)R4X z=-x!bB(d>DpHW6y&7?+>wYUJ#gKh%e^Fmq>$ zP9@(>I1bKc#B<~Lo5f3H?J?EQfP~e^k(YTP&j>_;Mt~9lVfd4D@#rt}Z)`OBBV~&#QN6>jcQ9P8Rwx>!D3B%J+cmUsgV8Ix``auHA zbU>CL)N2@H1B5e7Q3_}~VZ9WjMrTAJx4Q7g_=+)56^aEDdMMYFFV`1oj&q5NXNaVU zN(Qi7s-~P44LIfKznuG8_*WVLK*S0WlOY)&UE6iY9{lbKZ-+qO{IRn1Q{s=s&BYXX zHu{7cj^AvHtqMyF?99I_TD0}De`XD{?_F-cZ;i?@nZ5-b@nEirVllVMN2~~jZeh(JJpsoG^>ADyS1w4Q6%LgD@Y^|$Nydc z;UZ#<{`Tt6gkxeF=_Ief&a!+|=-z@B$3VVOhhIh*rM946sqO$(V}pvwx_Adro(j14@;0GbWdpiP4|tlM6MKCA zCYgRZAZ>}tcls)|Fg^N@t-9%3@RG8zmKd5TG{5o1O5w5^Max%qCb@=Vwf%02%`F&v z=4A_MeMe+10x`&iF<5Jfx)gf@OO%(IL{8hAN5;}?oUUR?E1#CeD_Au%X%!+x69-kh zLoFd%VV5^1t9_>{dtqPCD&tuc-{#Esi=O@L`2^Q$$v0r0MYURa>d|#9*hTeoCFt>b zx2t7;BM>QDhH>+K1*y?9{#Fl6Saikg9t_Nw)88MGjX{u5)x&3I%~|5wHNE>YD+SHe zZ7@rdxv0&-PhIAlHH@GQwbti>g$v#Bc|$?VPT~s_L&wdp$5awm$1wy9oOtAQ?$Gc2 z+H>6mHCJBATD%ty0GqHQ!wYz!2ePWz!ZH&epNwy}~dv?P` zdO=l8`^AfU_Y5A5_yeWUa8&hI@C>D3*%9nF0y@uBP8{lilKaq8_wLT?BJ zR*Yi67)BI9l?(+cPtwtpziHI{fd00t{yz1{xr}+AoyNhcWBvMzU?cDPtazw}ei*}C z+TKz71F`P1?xwU7P2q40_TX8)3(@L3F9naz;zxd*G#iZtPN8`HoQdVQXh=fLCj*Ua zx)TZPHQwy6_!W*hdejs%>ayBvnRCCkc>C>Kut|uF71d&G20N>ME%JX}yTiAGBU%;g z{U)pYmp*Tc?B;1c?XK+jbCR@}e@3m$%RQUrj~;xGS=p>?6{67>hqd(Bxd5IfOseQ0 zY&H%YC;+u=zUbi%dF$$R`tk7FBSnYfFTwzm6yo|sv0$+)*f@6YnvXC7q09+(OUHM} z_mlKLoT-ItbG*(rexv{kzjB3*v4sH&!sRix@My9TpWx*P_uQj$aVld58&(Z?h_;?c zgzpl}UShVC&;+$M7IP6|yPmf2*$Iel0*Lr|y3tgpLP8Ss1NR-QTIg0*|75#L&f9n& z|NW#fuv0ih!lUEgP4e`FmN}uREw|A~LK88@<#K|GM#5)cc{s(krxeUm(W<@P`0!z{ zmLQC%i`CaUAj4V6yX6;yh%xL;u*@fEHme*}BL7<5)E2mK;I}Tx+GcA=FsB zbo&eOMtG*9RQ}%$4^eT@wuW{YMnn>+=i}BB>+uGfF}d<(&8=%I+s@S*GM!XVP&SGRTK$DS!kI zZtnyR*bUGZ+Algb<{v7-pJEoB%KNn3>MUhxAzCJTys)NMsI?v?0M(z(OSGlQo03j) z9z6#~lnP7hl5)syUcy%`MV^om3UAj9VX1O>N|E3*i3B*#fTvozFDA+B4+VTs{ZyBQ zqQFq-A`kImTP*_fyc|tvG~GlCjZiW&X0oClmXp3%SWud`2qgk5)%_{d#JAWV)vHAv z`SdM@@6)_@6(DXQk)O!16tG!FBR#k}2~w@>Y~;1>&Q&owW5}*6YY>Xa5eV3buDRxq zST)nI*r>F}P55pC;mF^;4Mpidn|OS2gZuCZj=BpqXih?!Zwy$88Ze{8%*+5Zq%Zz3 zTxhF3A_GdQa?~iWSXfn?2AVor0nI70sGEJh*3F`hK%-+*m^R-7bi@|`B@MvBZvX4j zd~gRg)^E#D2Hrq?m>={>kp!`p9{RBiEUg_ov)n$>{I{p8)JD<(JQc9q1P3d^r#1s(4?WJ*Yrbg(Ehi zG)yI@hykf={o(#D)_tr{zxd^NVgiF`begQZFjwAj6q3+6fBAMwQG*BZK^q_V;kwx* zO$1hFx?^;x!@CCRK(I^_TZs9ni4#{rpV8{d*^kr%ors)aVe?RY1Dg;z+3oWw02&fdp+>-*+IXY1Qd?4fF+>a`-$NKVb%) zsz7eR=oBa&^_9)`aPW@Y0(^#+WGWD_0j8f&loiy3AhQJF+Y*04#O}~^!eD)LiK`7YU&D%y761E$ZYlFkCqU`TaAdeB< z&k?>J`wb3(O_HZDOU$t6il9m}RcuBmHfP*f^?dr`!Hy4_^q8c3OUJ?U8v1vuNBmMW zM)p#tMasnCj;SUH33EcAl*oF@*5s6{8lys|a;cdh9?098_$56!U?Dn-%nmGYv zi>k&hC{|obk_czg*z|_Wf~%$mE_#N%yVRtT!VI9<>&Wjfifppn^EusWN^`Fy-H zS~rE}D)rqJ=Xr9zyW}}EUiBk$rL;0bh)+|Dp`D{kh-BP0AadEi%>PQ@`Q$l7 zG|qBvDlq=ogye_fi`H!%-x`1#SEYGv zMXh616tiU;B=?T_lH}Lp6CSXsAXL0U6Y)v=6PBDYqGM z(euSSy?-wYC%y6+Zx4@q`IMJc9!U`%`?AvPJ&{CblvR6QAqja z&_VI@-rGucrH-+ueYi!Il-cj_-dG%o=iy$8CtYqdxC4% zV`8*H5HPo4`1%_1+6$XHLYQa}78XdBFT#atU4l8SkL?I`qMsH(oNB5+BvLYJ#j$(s z)btXkgYogK4@zrpp1FBC}CuEm3$9V?q;t5~e;FXGiU;Det(ctnir>i1Q5A1)8 z5>-<>tgV@D=t}3Ghj4BoV*cj{6<^jqoCLjqQMS8Aeuv}FqGbG`JL8+Y&s+GMjYugiakD1KZ4w+8DVKmHcYY&eZfaFOuePDQ3uWd&VSk z(27&sVJxb;(M4uWEDPSY;B)tTX&(=3Zhk+^ZXY6&DO9PEqs@#R(eK&Y{RHsEaB5Hq zIhgPKdxam1$}D(C@r|X_#)wXV?w3K4n@ex;SGe)KlNdH+bH2M;CUpk7sZIBkPzpc{ zhZ_wbZVZ!!Nttd;DV2e(9g?dtK)!@&lM9PiW>HGJvoMLU#76OVc3^E582y}Lz&tXHDrBGMK!i}BxN}hgI*cA<+|5R3ydhiF zy?25P7(_Ewpc^xF$#WI|Wxm8HaXe{Yi&;d5eEe5sRbAKPC7)!MHie>YBKf7`@YU(s0K&L z($jKWhIrZy17vbkS{QY z*uodoI#8U_T#jMAe~r?&N0F9ui(i$QlGJO{b-_5v|{ zlyn|aPE$N_6M7;1#S!Mbq24%=Q-loP(l#DznV3sQ+2bcbW>~K|5tKC6w(*TEts}O@ znpwd@XwYuW5lsCRTf-$K<<@5Ikq;^IvpBL%XwogR`djp$k2q)JgTWL?c?fBI?Ba?o zL$|lUud~2v6A#mvQ?Ts%V*&EFL1s%UBg3=@W$(~~jql_tG-J!PpEO#l23@ouQ&1h< zgb(v|^1n}z{?_8w%>~Cq(I@?t1R&G)isMBQa*K9;qEn7F`chU_tq~fnEbN5st(bMR z6NqbWp}PXMZ#EI1oH3iB5A~O@hA)!UpM7N%omQP3F z7*O#2Nx;N=-$D9=9T{!cSJ*NO=1T5#>huXVTme&3fDi)T4>B2Vu|f%s@A{S6Sz64% zb_k_D^`sBiP8bv3m$~^OqkauD z#cE_6)dR$8n7fVBN?>$PrsIi#iWs*0wO+`kmI@1sNM zeJm;)t#lP4rs5oDF1{P=3=Bef(2jBw5ZGe9)rKQRXG$Y)=2KwKgT%miX;QMc53zq6 z8p$`3wd}GgVyEcb)Cw|S^CIyzy#QY14qKFtRHj8_ghT+(pKUh+1fR(*+a9RfuXx+L zcZTENI>)2C&d=teH_G20%m0I+sH?AUwAn!#=f5MewY7a)|LAV)`%!;=%au9p)B5Ab z0eTVj2~`5kz#peLegZJeb;xPxMP(EWm+)KyYbO!>1O}R4UJ64E@Wrq(?ZidUppRd1 z1V2z2XZ=J9rhs&P5J8_yG?4q{Z}>@kE{J-wAW8@iN>fUq4}bGLrs@eAiW12fM>JAt zhsO5&mxyOqhkAk&y|rlP3>ZvKzJU!#QpVYtCQr}|%$$(wLsgX%jso}awqrlSr1|g? zu-3Ufst~aM#tKT@2TG-l*9gLDN(2$m7KKuf)wpnl53`0+6ce(E3S(kj#!nu>ta9(7 zXnz_{-Q*Fa_91_9NLSL+3*vfE4p!xn5F03{3Z-aQ2=WyWUf%o=*$zy*X$1c~p$PUADVN z#J5$6C4LyZI4Am~N(@p`f<-Ws@H32f?=xkWDav5z$hO=ch;bNF$ty;db&eE_Xq=LG zAOe4%VSw;jXXs~qOk#07g>60=g|45T(FsPd*j1Z#@8nH5pLksv@rt+|fB!jZ)!zNq0oxYwOPe-*}G zZ&q0zghb-9Rv2XkAw(9inWL>c$v&w7)Djm|#K}T1S`h0#PdKV_h&#{0A%288m2FGK zAZMU`WF(_leQSofKgD)_!gt>fHQ{05$YxK?A7I}lqywB=@R@X(nzGkIhk~I-Ih3f( zjQfo~Ho$ldn-Xn03*_glh#?h?HJUjS5&jN@~ zK78=&H}UjGsS0KG)H;&%0+^dl0;(`G8p#HM!DVD0FuhY$5fL$io#O9ML4R^G36QFO zW*sm`L6mAVPY2-@3aC{1y;CNIl!RU(R}AJ-RU)#HtfCj-&K6fpP~0R1un1GwmRv|= z?k`PBCTU8!qTRIy;R4E}jv@zvc*2}ABBrMuwP5r3?JYH64j3fS6P+~Pkp@G^f zM_&vFXH-ndxbNX*99Ih_)3nD=glv$M*Kr{*+nl;6Ykao5f8-O4$Sg*li0lQcEPAFH zg4)ac-Ib9)00*2&<|*A&oU>0m)KBXQBBR4ph)ml>mwXur=G8dr9Y3L zzkU$m#IW{D02l^q`nurZ#zx{2Xg)YSsMgN)G>eqRwLA%P$apeI97RJ>3_)Kr<^@Om8ULi)(* z&x|-FBJ2c=D$mX!M}$4;R@!8t6=)CN6Ql!R&{uu9>h5B?5TH5PVqL;aByGG9jfgav z3{-e_H=gWQc-9jU#zJ~zdCH3V;5_Rt_V8|;hd>0QW;l|YKeYMAT(lEKeOM3*!!L-O zS=*5il28K2M7RZ_cM;0(r_@xapm~w&Kar?>Cj$_0YEm@Y$NKpRV1K8{r9UJ7?uq2f zw`BLCQ$ARApCfbjds{#b#xILpAR6GFROuA_gV;Eqo>b=+OXLL?$Ffy{`jxqF*YIYe z`O)mwQ!e?%op9zyRmy*ABpZaJn zV!@n1@Sc22*>cIyZOT*Pu`1 zXCoE_NBlx8F^ihI;aC>iWFPRfq(${#w6kl2xkw-Uh z_%^X`P{*S6cHs;-V(RDp-^2Zv6&s-%)BU4VbOZJ&GEvF*5iGk*Qk#@12e zHTQH~GiM!@GKIK-WTxU%lxAajrR23ZUc(}w+2F!ceh{fns`OlTG6P4IG&cEM;F?10p1h5&_4`hn60;pMsFlQFbcx= zl>ZZCstQL6*XD*Z&UUU*J-U#91)pddV#l>pBe+#jDw5(}zC@4Ebwv4JsfiUMaRV+d z#gG5i&ue=dUBUiHf}0?P@bYZkQ4*uFRY+mHY6RIxd%yRP455pG{6DS!byxbo0{)W` zi^2)%zZJ@T5z_wusQTFSRp@;b?l+=?BD2`?9oJ0WIuT=Vr5};oxI(1*@AgMkz-2Am*Go`@jl2 zG5vSEb(5MNzlaxiy}kI4q(5QRQD+aa#F11pQpxZsi_?thr<+|e zD$++>x%R6bE1xM4x=1qOvRxBiKwY@$ce@oy+>a}0gRLWE9iZuDy>*IBjqsg_U>+BVL>05=teW_UWlil8TT%h zW*lfbhHnjqq_x0=m?zfO&locZCg!!wE<{>H&$#>^=L@dvw;$xa4wxqz-MhuxeLi%fMNro1ztcpY>c;;%GK+aXRi3#nICrC+eX9x*aQdmWI39Y4q zF)4a%(M~=u1r2+-RtCqM8WsEhCIYI_MuOAo-u8LmbTrzDun<}DelC!kSD`UOBnbjX z%!BU09n9{wAcic7zYz#}$=|pCJf~b?KWTR(m)q%E@fO(XgBF1nAVvVr3$eYRDbZ=@ zSdxoQ;qfXMUww&~Z@NnGDHKuF$Q1bIWSHH@`;OpqYzsjd2=qqq#LLoxOUk8$1|qJr z|K!iKf8sw)b>btSOJHDEXVKXh|NXac2i0yp;O1cP4}?oT!TFsMx2`c{?m_zgj{N&R zjeI}F=0bACrqQaa`z>Bh>)q_m{+w82W;@3o3I5oUsO*v+rB1X@sA)dte+W#S{;0r* z-TT?FUnM7ck3$|{wp(IBjE5_`7(?84M>EIUn- zE%3)(+4G;meTVQaKTi536V-Vta>K{tX)u8*w0PtsE<^2^9}{_bdbwR%P}HTrWv=96Z-gU?Ytb9G~mL9qf$ z7M^^WP~mEZGKXeXw*?pY$;vmz{cE-0^FibXSn+x*dZXo(5M-3JJ5^11PKC@SG20g? z{R}MCc+Dc-7+d;sCTC+h&Et&-c{5ATk!(piQVUKx-QQNY8g4+aP`{N-%-pPI6rZ{z z+^%%kjtj|m6%VVEZ8KIX{Ccj1rvB~~X7my&kz(nn;Du9|4TZ3O(%V)Q$o?i0KOnD_z;|st3=+ z`IA4Rk-pOnLXEn)k<*@5FT6ghXg(`zmm4p{rzujh%v9QLE1#(ly3GH5@Z7=}(WLsH zt`!^`?zUsPPeYxHG8@%W4v}P>lY{9zK@2gW2HT3Es7asud%i|26mCo597IRXrN>86 zV@3+noQv`aQbckn+FSJZaiH1}fX$S&Xwlq6)@Y>31T6WYz^EL^GIEtOfeUtH%g7hl z1R(ax6r#9H15c1#Cg2hTh5j$qj4wVeh@FiXUgcZG*mwJC4)RMAF(omG&~43yG@B9! zsw6yJ7{V@l3&OKT=NRvNT@baG2$H4<{yoeUw+9ZU1e=ydcn zp0W#+>{HrovDVh#{emW0&39-q;s(rE`xbPCSCr{Z3h~FZzOC80O97CZ;T07bFkqSy zvELa^F;YhVixgA>Zx7Ibu(IsfLJ{%GRd8TcE`z%N959kasw&zuE@`GH{7_D#m%)zG zlV5*w@(_xl1DJ+fbpYAaKry1&zLvuOCLy6F7tL>e1FM#9b-}DHFMtgG14W}vn0J}s zm0C4h!5>k3BUDL`5ROvdEckj4Zaje_js1jpn)hh>R2~s0euQ~i+q_Ek>%dWlOvoQx zS@_WLF??}W!80EE1LolAj9rO@+nCi0M4Hl%D~gh1e>Jq4Wvc;(|5Rl@c@tgUv=-hj zz}C3I)Q|q}U*z}rLq_|HCXS$U36s!%LpYKig=ufKUMr-MSfQ}KYk4%&hz$N}jpil^ z+fhJy3jFxYJJe%8EoFi@Y+r%@c|rt;#8pc_97b>6m*ub%g8M=0BdI5kvDK6jVZIYc zA_+wM9__O?pjdfxb)J|Dae!tXzvR^K;T#->~)vejQ%pQ|*sjJ(4^4-Jw7&Cl!VB%pktb5-I zwmiCQi-?bRnIgjX5%=`6_{bar0;H=-`M}`P!Ghq&L>3-`9Fj@5f%P1@dckaVq{w9` zm&y@k!=Rr^ly8Z@f_5o)yV;wjea)Om_;8W0p=9iRev(L>yfg4gO6hXc35x6bwD|jr zD);>!|5{4I0erV8`*`2_PS8m%ito_(1L)GsvJ{#^$83mB;bW4Gj&0C*h&L}{%pMQo z3wq(TBa%!>PU7GUpA? ztb{T&^ge0+EK@2bj49cR&zIhU=(+V}OlSTjVFjV^aZB3&3R{foi^|Suo3DRAO(h`u zPqn1!toPZuxU<&|fe}%?;!07h+Q`NGSw%y0D7P9l=!K0`7Mi;v%H8p}DdT7|V{TPW z5>JDa;ppM>2hu%FqQ4r0F;O7Aq~`^#<2w1Bu}ATmg?ojBNI>$Ri#yjBh`a&(#&lqL z*jA6kJmShcbZ-7%wu=N)NqId#y|kSphUdOIZ;c+!Fwqk1k?YSG*|hYIc|?@@u(?zR zsWB@_HTk}vk^ZubHZPl=Z|Rc|&Mq@Cm6qZX*jZmCF)>URW)yy)kT2|#tB&A1tF&Lv z{W_Z9%Mt)k(QL_8D{E>dz~xLcF>?uGxzvDX3t$df~5N8+>m6= z5@M;V_p+!qTg0ud?Z*{*Zer`*K^Y9ai8ICF5AJjOqR)!&qa_h-hjc&E^mM$smcN%D z$__o0%)z4FXmo*-}$ommfwc7ZH8^;AZ<`P#H4I!|`R|?IcCRwXplrq>Ey>lk>%f=bZAXm4M1p z&@Hb5`J}Jt9*&M9?G|Rs&z8l@ixxKAe3_1Qy{(@7K#*nmu+3N4*XirH|9jOgvh$Zia$qX=mN1pUBl{_Y~J7k&d>OCy7$uV`oD*6yaOW2OYEN)yCnoH^cak5YDxX z0kvZhuk_OI;7JV6iqX~|Z!D)I0K2(2WI-3(Ai?H$G{-A(>ggBPJ4uvl&N3a3WMw7F z>OMOX&yJNhr}Vc3U2)CELC@AkiE1v&v(bJ2<(>1FW}+UW?b`3l5b0-GPE7}#Ec`AF z5jB8an7H6HqaOza611^yAZ}idrhgI5y$^P{$K6{sXE=8tu}0iH%H?v4Zoag?gaik_ zk=r>N>Fc4pfIwg(;~UND<0R11k^|wCf79-5&aK68;tR74!{`O}LdMVU1xdbHRI=eD zRZpG7r>J^4%)%E~t^Okggwx4#2e$!sXw9;_h`L28pN%b~7`AWt?7zJPtB;GH%X(p8 zgJxNA6sxY>m z)5*8QkwOWm%FLuCH{-}_f8iy0Ii-AUe?l63Pnz@VH72RCf3c>w|IMRo1v^?Ad_}pC zx6x!8eC=~+%YaZG{cpvZiw9diJ=1?h;l@HWEYJ#HPy}qsBlCo(RBO2v$UgDh+PM9A z=MU8X<)>^IRx1)7tV@loPJ2p1S&QauXYxsPT?iV(-hFM|-b&yO46rh;r`G z-(0@G+fX3Ofv8vCt!bUK=cOlYw%=9SKQh5_mA1HXNJx_IdTE`eP!gD{F7Zihd-1W& zeB6XqX9N9gDBAp#CnuAF9$oTlTM68@xdES4rAqi_hv;|Y1CmQz-O(pMXx*&Il!lg!E(?9#13z1F>7AIHO1|% zZ2W49^eY-3*uaqZo|016&@N~QxgErn2*nPXDA3bZ9qWcx*AB^8 z5My9gGSxNAy@r(KTJcFI7NgA<&&-wCY>gJjl<>idc16WoUs1)`0%=IjE#6(8Xa|8l zrSG`O8+Z%SQ2b%k;3M;yIdxhdG!ApqXt>x}g8WkVy0s@JheMU})vU6;$`h(0?trG^ z3Ql=)kRFM_tfivs9q?NWx3x*c%8xgNIMG{tgc zz)``BpJ_E_)YThYc2JYTD8!LDa)VVEmD#{e^x#S{X-syUIO%HKY0}9l;W^W^@`Q_7yl#5ge+SU5(QS7K9j1IZd zQKBZK2BV90);E*qtO|jGvs_PB$*UU#KD5R_5?d!ea8p^dIHHGZa`g+-DKUzv5$@Cw z7VKQ^24RD0LIbz7%YgM>%~GbJLEZ70z+K4ty61H(Q1JnqDKh3cd06 zB{7uY*p#$iZI<_jT!O4*s80+xLE`o_>sl}vWK(c^Aa8rme0gf&APv!*C2t9!;_xa5FHrWKG>;>3k8=oq3m8o&~ zyaBjKwRTS~Q~c(^i8qfqq9PI% zE{=iB9pZ$3nQD-Gn$&R^e&;SV`RjfeI1smq6Yh*r+QBhERE8~H&S`o;i?<-H{bhLi z77l9fo~7;k9Pu8^#dbk>|7R>7RsnHh;@78@_l!5ZTtHJSsbK$2^?`56C8e`gC`Q(+ z8VrEfl;Tew<9hR3gEN$9dp*<4*yAQRMmX0o9BPU1+%__tWfmohEI!#!8G94N@ZX{* zj6NjF4jMMd%`*SkSxoX9B`S7BiNMv;?>NZmADm?l*a5DPeuV@uZMQ}&@N*d#M{mXl zC;SdFlFW`7*WZ`fmWAadv}$OVE7sa|azLE}(!QM2KO+Bc`}~~_a&Cuk(&)<=g19Tt zMj9#bbWSq{9hIi1^)b~WqMuh(VhaWYB^Mou8$=d-5#_|D{1uuz2-I)l6ixENk@w+8 zLZ`%({k1Y?zdbE&M#dH$wLi0A0oglDoj^YULXBv>Kh67dlAGHQuiv)`rU?loy(`|+ z15gSs@g5;+Vei9^7=uwq8b>RN>L|*>{i;zm+t8o{SSwI$Ca|%M%lK5BkQd6U-9`FE zV<2KQ09MY^&^p?Z}(mVO&a+@>j*PDYAWik6mE3IH~f-jozmlI_T{ zJaqg^8z=7~)?aUyUb~P8lEq4GF;K2i)4p<>Y1a2q5t*|anK3y`XWdV?_VkpOi_~!a z(oqbjVPw&twJo(D91ciM@DDBTH48Nkn}v zGU799n=b}uj5+G*826Y8O}HaX-LZt*wJe%hFjZ#R3)~jsog(6WoetXcoS(7a0C13S;NLZ6b4@S4H3>L5ksFMzq z3sh;p;T|USn|{gti<}7mhY(w#IdmLte(m#Az6=l)bj{aGh1knFm^?*sC$KlfLIBlAnBz00pa-01DQ+HI`U{^`|xb953jjq((ifOh>qg3d0<0re6si{;?WXr9n3J<(A~W+=(^>jC4L3D z)Jn-kSAyX|n#K=PavnIlrqon1B4{QaL;L5UnmE>rhtp|3?E^I6cNxKe1xJD@vRh-Z z6Uujce76*Zuy`DYtd7LD+$%P9!YKXBA*7#iWK=4~(QGvN?9{b;HAFH3)74Y zNEV|s?%>|=h_VzmaH{-X`Q{g0L7t5Muhd0Q1XZyd)_5X}&SJg}+U=N=C^3e=fXPoE zRQB;I#M^jg1rcRcu$*Wt7*F(r5l45&;CEB4285|jjJ40K2~Sepbc+j+#!t~;II;IF zGoGX?b=paGFLWOM{$E1ewO)J97*&^7=XXz#?LSZ34{0bn@IqJJryEqN12b$B38H>6 zC8;=%TYp11v7Rf;=~)t*@Ab|#o+AM??9W^}Di8brSB?Ni70Y#Xmh(OZ&z{fHBs6Di zVNenzzHETRfVst8j?cEPKMjjQc*4BY@|{~jMe2ntxU5F754p@v3QrYdiOwI{1$FJv z5&Xg3zivq*IvDgI6uoRpI}|%vvLC}@gJqO7mwDvctmeT0O^LXiy||S~9B4YvpuSz$ z*J%k^E>aBn=9FXp-HU8Wc@snVC7ng=^vOG}$}lA}<>iLXpM~N}`&D}nBk->GTZVAF z&Zju>nlF<-Y_3K>KgX6DZd*X)X4i@)mQkcy*s3Q9PNJKS>jQ{t_$B&XQT*AqAS})B zFR6jv80~^xciva0>A5xvwOW`r+ zr&N@vvvXM3=i=>$V;!;g$vVjNcaT@F(9Jr{4lzw0_4@hfg5unN8g1o;Kl~lG@J;|2 z1#-QcMq~#X2^R(lI{Eajfz5ZgfNXEI*1pp8QC1r&`esQYhrSfFJ>U1_54~1z(5ePV ze(9Bs%InI^l!Wic|A{Lc=Qr&zSf#XO>j!)5QT#6vTwQuP6x!z|7@1strjV}Zzg>+G zT~AGurl0ww7%42#zkQgF%Aq{4NAja8)1|e-%RR24=sIQNCniZ?YxD=TDfQ54tkEyY zOl6^+Wm(>C&Vy)at6I1Mrt?Rqfl|~CnSDdGazE?BpAM8iAhQ3U{x(bNZXd?-e_3uq z)-^3==w^pC7JW4#7)AtF2cd&jvKY9a`y4HrsGQB1Uqq7Gze(AX9R3fU-Z?mu@B1I# z*tTuko@nD_W81cE+qO2`*tTukcAmVyzxq5?^G{Fpbl-dXO!YnIyfE_FHMkI{mZC5& zbBFQQ@pF$lGa7M@zl1iQFFRX&w#P!=O6Pczz*c@7XPQqTS1dGjEPF(vEOPxO@{OMm zD=$fHZRH?3chf*JV7fV$Py4>0^%17`{=A6EoKpNZ4x>fA7^SPD)kNFwp$UchT0qepGkWx}+ zxy(eH2*+dOiF~=4f)<%kplOHj(r7!U{B=5LEIzQuEk1j1;bj`BgZY27|^5&g)>FY4j#df6rc= zzTQSl&w8Frm$7S+y54Gb(P-A%&|Xy}9u8JiR24Oj%!nCXmsr~&nT@{tlO9JjbM4-Q za}R;J52lZR_NVc>YdI1UT~{ypk8M}u6qGB56J9aFC)E8stV`82>4HDooDkv)L>YDcc^VV(__e;$uvoG?a`EL=??hi7p+QZtG- z1r3J7cK^Wnzy@*__sBGg+;NIrzOl!h+mA$wP$q2Jc8hx3tx@{-g&d~MX9|U@z)@#4 zx+h8GijJ{yac`!oL(&EpOV@W~>o%7$KmHEJ_`u@1)rWif+@g|=Q;hrF4ieA$J8raL zpP861r}0V#61iN9?=yY(SJ|YHoao<$)t0)UwP0p0NrO8++k2%9^Md7b&L`MNEFvUx z^j!+9-T5mFNnjy~jFO^}NUTh&&@~6J;eWrb(X1~yN{=Ix{&osIa+W;V4RGQh4S`pm zAUl`$MUW_cxU?qz)VxaYC6j1Npp;=wO#Blp6B*QsjA6Vn`#3}N`~>i3@k^6r%G8fx zM$tEQ=cI|ngyOpnY& zG~y4JP=>2c>X+Y1lpZPd9bta?;LJMSeAaF}qpT3^(kM`))5CvA2Xjow6KS5`zcG{e zAxya#{UIivCP|x=yg*Jc-uKZzJocSJ{{-)9SxO?!Vp?0@b2}%}q~QM?0IC$u->LsI zH?tSFa(|n6aj!&?bG=}ynU?b;`*N|LyP0;;Fg;p-zg`wO94M4II|`KjfcLNly-d%c zzNto$oh`peT$MNMWGhAyeH#dA%h?0NZ({rGx9&+o{QD0M{h&v@u#R}7$$aD#fs{#>UyF5rm=yZ~`YGByWk(Z|Q*DBGTg;_br}M4SEmnXvKWFMb1JDQBrk3 z402F=u@p%o1&K3vfTS>GrO|JsW_gq9bduaGo4Cm93G#{?2JpA2ffSg_J1xp{AsM2J$P57PJ}_ z7$FT1^*V`>S{AuLNvYcXUnlrKyp0#0Bv`9Nt9!cZ*V{4Z(C|YD!buRGNk89}b*_G^ zvC7|rS=ht*xO5=%HpSJg@TG5g zbM4m-&CZeP&*a}m$reI{__1bqtn43(g1?4h?HP3T0w(6oTLth|GfRvlg%kKn*HD=x zks0wkrH6YB7?rREIgQw}=g#G3#D^bZ{kKShrP!jh|t3zF3d zoZ_iQDqEDh@j2Zh)2|Y1Xh}nwtT@&-D!F!^h>X$4(&S9{?%>AZ6PYNn@6!E#>1V?F zi57<0%}dVM?pOs3<&aOUg{eMwhPUohgyFfLD)(rlf25f8-~Qp{;+|B|E$V#nP1_dgmf55sJlb2R9TfW(@_s;<%E zaiuqr*vqA~-HWKJC+nsIq6%MWofxJ$(CyS(ToBdxS%76JVF%;k_1D$UXT8JD(=&6u z*|wphL%LNnU;z;`{@*qV20;>E<<7>@Rxc-TC69m zCWO^?Ykz+>dV3*i@l0NeL-b@OM~l04G>kEbrT_tqt;WeAxCd`<6gX@M|Nd;+>rL8J zFS_RQ)ol?RNcm84^IylyHDovnB3>`RdEoQQ%Pv&x^pV>!5e*zjRzjqTZCuWVN!vS9 zx9^Ne4Z)6SoDsA+qJmb1zH;hlI~jD7B;Rp-5WZ>Yk4o*3k=<`g-^j~L5c~_zpkTHX z-!~X~c9^O&mXAV&6>qoDZGJO*-iY*4L0pjoBuVme1Eq>1Mklm=FLtasP% zjJ;QWdv;@9%z2wwNC#)q$-FB(BryKrhA7hCl7+v#%Oy}_plrK`pO%1Jd04y%g@~h! zS+z?#w1}-Pe*&CiW7Foo=$bAkaWqIMKxrgVcb(8ufb`U=!JGTwp-SA9*OU_<(L>wi zicin8eKOrA@UEESHPThJMPr(h2dX*|D7%m)fK~r$Iz0!UIcF$Kvhv`7NyfDAma;(L zi71Uoot?!sz20z!w(P__moUxVj@5uU8(GXSq|omN^VG=8f#=Y1G#8@%+p=sWMvKJ*xZh*xuo6 zonBIy;6lK>@lHpuvAVnU6{i2oK1+BfAC-nwHoo9FsO&TDSzyb!ozDWHG&goN}`jcq#^86KUwid!NvBkf<5bm95)?A^*n? z{Bv%WJ*!e2j%|a>XX(dcuJ-PcSbPCdVHtaZLhmAo!&{b+Z=Vyd3IFAs?PdLsIe4r~ zXx49AlfPtk(Nm{pIHR$9A+OQ zB*2P;zv$#X`@mFSz`-7oPq+n^ZS37qX-%>ev`IUAf-B)twBeLgXAZfOBd;B#%nj%} zN8RulABp>%hVk(X)X{!}&(W%bl zU{*mSMlu5Dy%M8Ir+u4bPh<_q;{&eH?SG@4VW?1=6S;^MFJLL1fSSp|wRa2zM;lkD zgwTBAQZLtS-(a}<8>3Ay7r!B6(70Q9;&HvhY*kOI`z-4|fXYiV{$^dGpK&6cI3iPI zJEFir-=7$=$EDTolUGXmhqH1x&gzJRnvzk4F703errbLEkKm_7;JkJpJwjV!?`{x$ z94))UIXhB4T=&S9IhG|r8hmw#cB#zfkJ{*|L_(B-vfVjz>b62%YN*x?t zI-a{U%E(MSTj<;%95x!xGmR#X{%k?bE_+SGlTvuM8SKi*)GZ3wV7Z|(dy7Mz*rrNt z7bCNru(Q_05Oc9|_E5)=#Z*BB9}ai7*Jgrx|6iTMF|hOiWJcVWG%{*`tHc^>d0m~| zu>`5nxarh#;&S%aAM0zQ$ZjW&)-^nqocEV->7??NL9*XcG*!A)^RyXi6(0$KA3t!k z$RzLaa>1|D+^=xomr&oGY}c*bv1FQpL8X`Zq`^4pbh)7i9ujh$0ovd3&M=!!Bdzo? z)`qL(ZN!D061%;${sY6$CQ)Ka#G~X)&`H|S8~5Z_UsH5HC9^`L`_nA*g2FWP;QGjN zYTfnbD^cS*@x8}%55}zXgRv_EF)YDR0|{PorP!~Le(I#!4c;U7*|%%qJsHXUxB;hl zqnQG#hH`P=llXM^KQwZyi?z|FTa9MFaVJjY6p2}@41lCmrvsG$*bvh1C?Al>m0`xG zsM5jHpZ95Eh_dPLOe6@>>%C4^PU*G++}zPb59!lSWP1UC|4 z0V8Y+%8+o)TKVa5jN;K(*1~kqgs1{)i7mr`Ta>TILT0|2A>D5RxQxf+#p?%u7X0s{ z<&SrPkM51riIo`X2g~ndOWW^&@2_!gj7a7!v)@%bMJn*Bfi?Wu=-y5wwlN&|Op&NZ z<+Aw~du`<0+ncCm0XLy1=BstSAH%)}KWufh9}bS=4Z+ycDKuCM99PHO+Kbsg#VLs!_3bLRq69JpSm(NBz}EB z2sAjjGcmOFxw=Lbi#nE9n3ZkMGkH=a1QlVIoi$+OgpuaVt#?m94I{YKDD`!{fZMkU zx-<6jN6ye^{K?>g-KPQGo_Em9U79AALaFD}ksd4lSxye?n$&n@Jnh9-d^s?om&E_F z#2-EZ_ABYBpH1`lY%f*kY+Qbl-Uom0Z|MuS=*dcYoZvy!>ggX~afp#5z)R;h?f8yw z!?TWVg7$Luk*oFq+_iVnbFzAuA21|C9O@#m=9!Iok@=+AQdP0fnR|A(VUA{l%5;W8 zrKeE_@hUPEcAQ-h-5<1ydTEc`-OhIVOI{;1NZ}xGJ&+M%tnCVq@DBU^fmQpduT96N z)%97YsICx`)srmVohTJ*@hDDhu|CIvFeDPOEMnw>lOdc4#F*hQHSmn;`D#~rVkE6o zdtxF1CJcgxGc)~DkhewZY?Iu4Y(aQxfEAd+T--nCVuz$!Ibx)zJ@nxCeH^AeRIKRk zpUXvI+DE*^8HJY@H?(NkWVMq~w_zxSGx6sb?oN!t2~F=xCB^ohiVP z-Mw8uzym7cuV@;cD!~v!wf_*Gw=(E-PseZ0msLbeACsU$Bf)}01qr9oi*+-5EAHWbXmx4IW&>8 zlc4WF7y~*4E{f9fjE6RO+o+%`A^=I~7cK-Y1P%}>8Y+W+cch>At7*kd)XPKGN-uK= ztOteTkzlhBq97`w&lJ2(TxZaSO~6(>9zGl{t+yW&?##J4EF2CD5GatsuXPu@ER;Ip zN_;JUMtspel5CfDUuC;rFiHe^@E%U@;NFvc=wtxNzG}!FPPLT4gFE8(&KF*0deOIQ ziT8ayERYHPOp`*9_EJM|hq5n3tX#i03F;b#81cLF^-t)|JA7B$EtC7aJ|DTEcv3GL zEJw}(b~w90)XW~fTj1l81YVv)etVanaCkAWqpi(wqUQO*>5dKw1x6>t9Jr8-BPVbT zxLAdZaQaJz(cUJHbJ0%^$73EkyieF!)oMJP{bWU5y+c09(R+wS7zqW0nj*OC24PW` z<03}krVIzKa}E|pq2j-W>#&wwD6eZA@1O7?{_$IHNEC!gIbg!mS&jG7Nd{3IOX_;( zv;zEsd(4|ZRKZYAeu(e#n=PY{o@iHB&X{c=ZXo;)P#kIKpHGOkF6kpLz^2E02YdWl zAI$svfD`zPb=K#t-C==_L7rxuGTkp0^uN5L944f5ceYJx?tq|D5Khc&=Kdid&Hsdr zFsRtxot!u#&V~*XZd*ucCB@Ghxz;vHRZj|Da!Iq1+|<%tfBq}E0#kXy-O5I;mYJP} z#1M<@eA)w-&-)Io@5NM6!Ug(?}(fU8epiAhU5Mb~X zblf6wo&2bS!385DDLu8?#oo1|lBbFi(JVaHF;;dJD}F-FP7?QKq1B%xPmyfc-VoYP zd-mm&34T%zaaG8dD$$RdL|-`(cD;nIYm?u}!oL6jR?>W7{d;nbZR#0FKnT9MU6uVD{SQB9E0{Q&L_Wn-SaU%wo5l4g9Z6|!9UGrIn z&3upjOt0f&a)C{Imixu+{XLXpcl{x`1)AI{Gi->3*sHtFZ^z7k8ffEm`kwrm8;bu0 z2`Q)AB%HguuaF)1!c}MQ5XbhBa{J&r=&T(P z6LL;2nu}-X(^00^w?Ow~yC?kA;r8L3;rK`8umdP6RP(O@mb`zCiI28h2hld!${(hAA;cUb|4?Sprh7Qd5j!b7u+1_IB?1 zy5X2d?%|AVDo696PddV>FZy~{DfE&slYG2Es!SW>u`#n&7t^{S7V}m#PuZ?5RO?5} zearhD-8Blf?*i+uj@Ch+R>~jN0ALADKLRq27`I*6!!1_ND@gqsZe*TZuzu`^*p|m1 zH;DJQ^^R>R{i%ujm0&XZ>Dc&iQF$_;SAV<>4g4v31zVF9d(rTi3%lQ(ev;jdGWTES z9glC;K2Dm#8d^Jjt(8?1a*WyG$j=g##5*f1ceMREgLerqbf_fP$&gWdJOW@8p0ok% zf>ju1&CV4jes0_{F2quldlV;E0j61D2wwfbIPb$37mFwS=tBdKQjlao`P(9tcr(2s zeB5%Z*rWuAcG(4&6H`EeD&dbRu-ecKy!eDCpL@nTB!s-yQAW5yrcry=X;!-oCu0QSskR1#E=xg%5H9x z{oj3~2ufZz9K%iazi^Ngc-=pDADbNO%fD}a1^WdW3bai3!}$sUdW3d*A7g$MgxwIT zedONdWh1OR&g~`cI4EFpg(1a4!n-^}-k-4PeD}(|JXwNe`QGwqc|VPN?Vpmq=H2B( ziT=`hDKIa=_Y4qojIy8*o}<|1d>Tyy+A+cnCthia-v6+w%85cdKG>d)hLasyVZfG) z`AO3gn?Iv`9X>;cFGFik5J7KE?*~Z21Lqm^F}4maQ+1BIy-d-Lbw94Nyq|DBrsgG& z_OD`BgnCh=8SPJ*xT7f#WwWH^gF`px`S>>g!#+Fb{2zPM@H|ii zRd>9cjZb$;Y|#JH1#o=!vfGB}I#=kkXl8WbC}*b~GspcW$> z4)W%WqTO&)JPbvuIPgav53X~$Xx?JZjD04F)om||0`R9@#?d#lZMXPrj3xakF?vKo zBif~NYRU&>J-3I0No39t2^GR=%vj?QJlX|e97Q}r2FcmtB}5s1oR7OHf`&_m8DEe0 z=xv158VFmXU8a2_uN2`V9X$iHA0>=22Yye2?@#PJ8|oVopp-V9G>aXajO z`++K#4TYj8a0FV09lW4C)bFFiYBcvdP^es}b*Y3Y>9kd`s1Uh9GP2Jk-9ovajY*0S zovb`y9N%xYMCW|g>Iv%vyX3-OFUM^}od1q-;^;tO_Jr*e?YaChB*_^H3Filc6MzCL zg5-dNq|E*+`b;5Sqb?*QXjeinysQqi%PS^LitquFH|1LiHc!s{9X|8>hHh({i3(72 zw>JhmogIyMKia(le}za|69?DouH%~c@KooaZLtXc*d%M1ha3hrDie%_k2gn?bRt?J z-zPcL56y25=@DwuDb+bE&H|2E02AO|kpHW-HsGOmz(n5y&m)-cEE~vJ1bA2s8Ejw& z^h-X=OKsqs87SJL4)9pk_v_Xlk8~rRvP&zbj2h*YEuzRF#QNfH{bI{KvD>qjZ7%i*i17T0e3p5pH6QXiy{DWL>yF+SUs<*I>``LVk%CH-pK< z5vblC0hv8zNz`)5%-NIGQv^IEV&^V#f&vPn^MzmHc3+P*_LQ9lm)**vEt=H78niYU zQYrn{>4{{QjnugwD?Akk8$1qr0%$>%9+kfEk9STw;h8!8wRezCq3Gcjd2|;9533OS zp&>6gJ=ToI%yyB6t>cR7t?|53=^W^vV1(}v-1UL0#S`cCq;>4hF8_RWHzmWDMDsmr zUM;~3c5c+`YbI23DA_0^C+@WO%8kv++5S-c3C8%yIJp=3D15PIk?N(qRSYv6>U6Cn zC-Nv@$bNb;$J{l+{5*=$AwHu5n_n0D2!-x|;@SN&^K4$h-^eL60RlUw)P;IW&t+A* z$D176yKB|fF-pvJgP8A+s6M%&Y1ff5w@wvGm4@bO)y~j7&99@H?^Z9lCH_O7Yz%S*F+_b*Aj7;}g> zaI@{A74MoG?VdsCIRNz=mcl=h*kCbDPgE|XZTi{i!IHqeus)&#(b0zRnDA?j8qVC8 z{F^B;HZHRdK#MM5vnc(D$D;!xqURO0prD1%(_QUok+dIAjIaZN?sWw#3iFbfdQA7JB47D;$g`VS{j;(AFm@|I8j#IC4q=IKK}dB^pyfOv#r*fK__jV~Gp$bKm`8Mt z^=-1om3#sSOd(cb^05kd(^wwq#hPVG?Q%N<$z5@%5)7Gmd-0>I*HN#n6>$=84>yDG zVCJu8&Dc*Lq&+}Dywwczcvzp`2Uye(ih1>w`)D-u7EYI;t+v*S);LbyMIbfs6s}n$ z-XPT#4ET{2ZS-ASxmK#{npXQzoJgR61Ail^nncO7Mpu1sL&@PtS8#vLuf)&iZ^JMczWgjqf4wmB4eN);KknR{CU;|8~9T@ zF6)2*a+5-#n#GdU%R+OIkPNYQHbFy=jZZg^x6+5 zV0w^T<-+wDELNA<-5(XZk|B1fDc*!FIn0~Z8a=(aOZ=Bk^El3N zU8RdkM$*FxSIb~T8O=B}FRD^!KH>8l5lmM~oM#T~9AkyIMMZfFyv?yRv&rdT?YYq^ zGApeXQJX^4@l`hlScr^hqjaL*hS-|giLN5}K}CsBoc#;)oySnGw+_py9d|$#PWSur zEMifj$RtHSX1EIK?v?&_{~LSWu8M!LNGvm&Wx)j>5_ci%`X%dZ@$Vl{_9PB&&GxR& zcraJ`@r6=kQ3L-Uei>{Kl$r9XuD+}4b**_7L?tC&SLcM$19FF3c;9=E=yP!#*zZzP zPTwJ^5i0u*pKR5y@z`3O8^oIfNY+@%Cl2$4D?88Oeu2Miw`zt13z+{5A`9Owt-9a6 zsc*SucA3A%iH?ppf(*!qGs&(x@7NdRfs`K%5BDK^If0(xe)I5ryof)qxb2xIt!|q# z5i*mXsa3Tglj-1XmeEq;x_spEj{*hullES6so3ek9Ye7X)llPSO3H)q- z{}`;KHiTjE`aEsmeC`rA^^L~agn4*JKDGk7J%Nut{|;}aj%~2e-9f+*IjE*Szup#7 zQl>qIUY%~{@O2NdF5Aj$X`ZUg6v>b>JYe9Taw8Vg2$1u#5qct|)G1oRzTC9d;b{|Y z(lFid$<6kx+|cGJ+Q6n*MNhBZE>`EO>)%Lb$@28dY3!7-bc|l?_KD7Zz=H1e9S?3y zMazXxcBT4TeXm+m{?d@>_4z!tx8ZvrrRAQR4+49MOflEe)MpmcY!iH)LuIu^w}@EUjz9N!G}1?LSMDSN42x8NzAkdQ zYq(Q65a*qGIyj{9Q(L2XdtZlcnoq`nye1JI`r>1s2DiyLVqUI+mwz&upJTS!a2^iyn(afdTrvDe6uNmyEKu(b@CV%>>MGixauv0L`tu?w{+fp{w_6M;XnK0oG}qY$M{NGia9fmmTq&sDUPr*FqcT^0^2a21ICT+{_NZjI`Qc5bT+PG z4sz0Z51h*{xfR-=*PKV=JtcBgd&HADDqDy5_wC&jw&}AE(K+>&15CY31+JE1gs(U3 z{tMDo0iTGE8~pq=^ma`sY>?1lbnT!{e&kicE<5~Gj^6Y`_rE%$Eb%TV4_>!ai?RxC z+-YxqZ&&bx*@n~_eomylcZx}XKQDhY$HmUm^dIpno39-2U9!KDg!z$4aHCONpi1c<#A{T4yYjum0W#XJGCHx`3%A%S}6<4nmw>f)$AFP_K79G%=mkw-A zKC5*$jBiGSnz0T@lZuhjBQDB(cpzQAggqaKp!#AP_Qn!N@9~Le!9V3itGCFNs~z}^ zi!vt=3Gj%r?)1M?{ZPGuo6P2@Qp+#@QF&4akqT@YWeG0n@#^h3F4je&{jJ_K@I}l& z)e(XQgbx)An9cm`y|T}f4j2ddqO&dxgqUi1#93oGTAew9u*MBRQG)i(z52nSx*-(M z;s7(6#%t%-b=uPo%bk5aXoQHU62*oOB>c38MQtO6VSE<`=*#7(v1*nhFTzwM=q6#o zz%Trh2ZUps0a2UOER;4%^ha??xR3b=TuCG>hhlIK@oVQBTbjlCFXfUnw4gs&jg?+5 zr&s9&nTxL}H)?XtP;rcv|3 zZj*D)Hqs}kB7QIUTBH(>MDJ$CgaH)1tf(MZK)PO;Pe?4fCwP< z^eezqD(A66|D;`D^{RldH3-P!@GoqQuQbYUl@R!AGz2476G*!D%*o=tYP&0zCrc#0 zU?OC%Xn<(c1aY*~hw@=BsZQ3#GUE~Y`B`Q{E9d|K{O_O0?)HktN&Np|Y&8s@3lu1~8z`srE>(tr31 z7R%}s{HBV6mXF2YKySr?2S9+MtKZ9pIjF7#CQ|k+v4y* z^xXjM9aHbkPpi-+Gt;fLXz7V=yWpKmK$)&fGjIaPLL~wA3BNOqSOB^=1rk4sU^)sS zH==bYNaW5hnySik|n= z?ypCrwu`AR^V_+{s}v2Syv$>Q!Fs5R8w1*w3V)v|U1-q(a1a>Q^)DglK%M^F&0z#s zy3Z5?9oWJPMTc(Y4@y~?=O_&ZR?z%j5d>@)z1OO}79tHHvu5%v+7fm4 zHL-cp@|)Wf^k|H4d@<)U_Q|IRs_vidHFn<91~-Sk!+nQn_~R|NRKQK;dvIZ439|O> z0J80#T9y^Yh(}y|>adZS2PWR!Ca;JheJJk?ZH(xOD0PcMdOAe! zU0$i_^q@y_*-N@D-(&jk=jSm{hUxo-T<`ScqTjOM;lFHFWwgxYFBVp|E^e%7{ZQ;{ zL(#?B!&NBdspafyKrj-I({gf^DW~n7|Se%A@BUw^KyGmSU zB5m!yhv>fU7NPqdFjK!S+^1qfu@Xg%J{pY>n2HDWYlenYLTMxkihAehqGWNZ)e4a1 z>Xw=y2U66+$XiKGm{1{G^KE%+Me+gV4Kl5XLNKQ3ZbwkKsW^0Hh+e4ucs^9PdHy9B zt27T>7jBG+ND+T91s^?#{#u)b23|x zUqGj=47UO~ql{S3zu+t62G130$A%jhK%hf}*4QDjL13$;E0y%`^JJWSVunknSoWRw zWkE6J+Ivm|*#_n^%>qFzlA-&ffgrfbi~pn zv^(Tz?O&g4ZSF!`lco9Gf_>d=e_N)2@SE^b<2Mh8@GbFmi9AjU94F%#cBrWD$$cO4 zPJ2^{&yf_uZX$*iWc;uPsFO8P3q5kV+;der`%Gknn+%ME=V_}*TM|<3BAr)O^K$B2 zOrFbCcuq1Rj8B|2K>&j?Ci+?Ov_VFvkdvN`5tbXginCr!DkUHTB=mZ}N@A*;F)c!q zD%~F@fgB^TrlQ4L4{i$;x`|n%&VYOxdAYUPH-8{;2_yCbVMV- z^?W^51T*IBlm!V;#gax862pO@8R(A&(X!)#2a`xr#+c)w;Z({Y8o6tPP*S}q~UNwHRfy>#%klSEDW%!nN$^a%O{E5pxJ*ByOl@%{dsql@XZ z_xnU25n;4vuosDQ=AB*Pc`fnAF!9jC&CQ%$-6EqwP0k6Z%b7XEte)|!Wqp7fq{kEz zpTjz*M4{@yr3NOby~hfGF!NtI9N*GKrZsU?Y>^6+Uu-;*#EGV~s6@mTEgH`7TUv*D z-mxOvz!=O8T2o-76?u@@D|}83KSW%nTEIp{0BJa3J5s_upx&51L^ofhrT@9>lYeX2 zz$m3Y=$0^&FmfllR8re5!KpmZ_uklMLv|PD&5b>s824r;8I<4psEYx5y>-EiBk*Be$&=gtl_ZzP#TD?7=QWo_hQbw=}y6O40OJI}R!bJC`B`=Jb>!KL#;@ z(=IY>Ed-`X>QGP7F|zyya{_GuCGn6c)L}$cbVr}iA-%xY*1H?QG?Nd#I3hy=WCWxI ztI*wsVUd(f!L#qgnMHsd^F+fa=V%^IEUnMojK4TOgcaQQ$^2zHEG*hAj+nD26r@+g z_PS(;e)DQk?LYAvj>rqC#HrGeXo|3bPPaz7hbI}GwZHb(;$$!Dh7wJEh)hZOX zd&ufly=E-Etr}zxx4v`#F3z>pooIr;N)Ne2Zo9dhvJ zv~)uI6Ogd9O7L%pmp8UHC>5w=4jkqZaj*~dNyiD657~6e>c^2+ zl&E21gn9H{;W&+nVprp5PwayZnQ{^!g}3(1(oVRbRL>Wag@nHg9~2O7wD;fQ)YLBd z1U#*%Psxa?NmC)Hu&`NG$~3U$Z30aL5OZ|_NMpEK-%tXdnFMx*zd}PGM@Z?v*->spq+VMYogIQV4Y3F_J)GWBZ=AC#M#p<^dJvr zj}cX!o{65%KY9Om*v$j)@h5NDr;Y!tULG6Qh2#TRvYYhazB}XY@lj^Xpfy zcGr7q4GV@x2+Z+tIE+^<+U0cAzVBP*M92Gb(eLG!rz`cw9!^tV&#W}n!TVL0P16YL z)e}6tLIh6!KdF&%2uX50i9C4H4HG`){j#D$xmiRRlMP9zP_6AJ(af65wsmh`{FAra zrtU7`$#K#>gOcI04Wo7|J1rD53V(=^5OiXJk;Bqi0H&F|Y)^bXcHj8hpP3w^uQ`Xe zG>HRXki4~lSrdDnccLa+-{kv?1a%&tu-P}Tk-B&k`#jI^w3w_ZSE;hPEX zeY=+mS`JL|4iVA~@&!Oz7|XVRnWpD62izA(k6S%k@wCEf(t;aZ$4Q0qz&8>ZELy2p z=&;W(V+?h8Av%PtzvmIZH$?Q<714G>I*;N`KLAVY;IG&`uoMqN5@+(dN_O{3sWach z$zP;hsStZCNjf({NsKhx!wQZHuepGYewz+8w~qx};0J@_Sf;SDQfjv%mgbf9@Bm7) zg+8g0ec4Yh%PDDP5D>IJ2~jQ)D*CX*y-pThpJV9GHQc9#u24Uc?HN+3m?DY7{WKa; zD!?P+q+xti_$n*@p|Q^ipJGfDL0s@-J*K0g{h~zFkq=!zZ<2S~=%JUEdqfYfahSH$ zOHW1-lbE%8rxn*4I_w_J?Qxy986a9tK+Km-5g+Z7$B*kBW?w71-_rNDUXYe;P^kfW z)eocuL@~_FBxbnlRvDV%@^%a1J16@eDMK~gotLaWHDS&}I~|g4Ial0k4!Pg<4wV zZVCWU7{1D**KKCE0FO*C9X~cK@RH4y5^mopa=w$sXO2z8HEzgaGt`H0iAac6&Te9a99)^ad6U{Ex}HhIEzI#qP+$L^e6?&z&$@e))P0o z3Lc@2FJ>vGtg&Z=9yxL#?V;_a)fAHAsD<A&?{@8F~13l04;*6JVeQ?F5~J)1r|oLg(OvweC+y7A116b*(k6Bp}9E zLg2Z*>OuFo_1$)ERp*$%r$g#s6f~U*DjvDoCi9=Sz?56sX*uJU9}FqMiwPD_9O6`q`-9 z;Bja27-}#QaA*i#%|0j`sxudemLRZaWMwrDxVBV_@hg0jHv(!lo2Nr7yCj%%$QBx% zs84ui!S~s5n1mn7lP#y8Spp%wvnLLRh&77wFLy4pzEq%cIxU`N;b9vt&H@x|=j$#L zVhYE|GQKZ3(q4XpIBvVAi|tlwtYrw~xiKvin5OZEK z_wS(>cZL3?7+-v4Wib@env`GGyz;J z;_uxtp!5a1mqG;*nUYM;!b{buUDDsLe2X=`0-Bp3Xd$dkyofJTLP*D7MCPUc((bb@ zyFUx0P;GVhsvL6Nfuy;7|MV;bANCauixPeI#JvY>!fi@Lg&ZYlfUjaH<%V(h(y+ai zh5$pA)Re*qy}WrU*LL44vFD>q4#y>upqnrF9kwfHGaCX03{`>#S%4mqIp@8LOMQ^S z_E8@N(;74iN9TyaRZ#uH=*7o5Corh!>bT*ZF`VmJZJPVLY7vurstF3@kwEwu{-S+o ze+JQm4@X)X{ClmV_6|k*o2yA+T^^Jk4?9 zn;=iw*HRj)Sgt^qMAZ!L1mNhZBq8o0+vSlFWV zBFwMF9F{Bcw^%6&f|!0$?r8#ADHM%)dC5HW$?fox+t4&HEB)!~FC9^)&tTssQg$;MI5;-+vD3t3lpr>;htG22FOgRY^$wotsF%F8_E z4||l+8RVhJTQc|i9%$VOW~>aQ5iq=ot3<1O@$3ci1jX%}a$_LyU!%z0pLSY;U?-kF zFA8byA0$zr_9ijcZ@euUh*d|K+Y1&LEq)yFWN?ZpA=Fz?UN;aOQpr8i_>Z`?HS%`b zs=0AqNlM3H7!)LvK;Q(z{inA$?K6<@w>%6{D21<<2#H+0r`kXrEn`FPSPq+%+6{Wr z+z2am&bHoKfq_I_Vux4JlzD2Y_~~Z8ZhKC6mm1;o6aMS&C-1+PYaP{mqqcm$DFUyl zFmAhs_gg;b5?`2S7bpjJWc1*F6*kxRo_JiTq9>UoL~rl(dkgi=WMZyu=9#tC=4nVp zp!ai#lJ~J8+UW)sK7}=)kjqIc)x{P*u=+zj96K zY8WY^n_XYldU{1UKLK|c1!-L%8}2{5*+bcsb5EKO^v{^Ffp2uFkS9en*coGU35Yn| zaNds+7&gB@ys_iKW#!t=kOZH=?dP-!dlZZW&v@bzT;6=)RCc7L7w1at9rn|hiqICy z*js0cEe~bs5TU~{U4eC~6m>QWO|lI!I);p<#n$CY5qldzNG9CA0Tx0ldt7ijeJ3uT z$Ez;Kr1>7nrnGp+8^wE9HVkY%VeT)$7PKO?4tUe+taEgeT2EQHY#y-SL!Ed;uCy2m z!2>3A(|fbiNe_4jwa&;iHu2Yb3W{&z$GE!B!UREc8K%bup`OkxJMNa8;ayGLE?>-Z zVx$24$c5KVQYVYnR~*EwVfD-y6#0$u70>1iY)zpJTGE4DJjdiG-(bvg%1GY)8}(ku zv5>Ii?M}mqa;2l80QG2h4-1xyb9P*b{TMG~2*5!iuWi2e3? z^|IX>drjZW4wSvYYB?V-pwj{zqZYk`_LbxE9Ce&X7^QC;JD7TCCrz7I_p8#~z za53T_MERQ?gyHmV&T+NQ0Ect9n8^F8CVEGz9*Ff*f<=rmh&O}eLdEPmI$#v0L(O-}StU+&#G_02-9)phGn`Twi~IEyHV%YZnN1Ig1gDaF#;zsA#gaWM?A=34(h0M$S$ze%2c zW|F%HDyxkynr>j~I&yV~cB{*_G|45G#Ee?NG~!hHKH?rXZjVA(`EY>INo7V^l0G;4 zoucp*5Z!))lb4t}*X5bZb=I5`+b{os>%DRO?Ia`lSXioY&_3=P$H@rSZ1F_y+fncLG%uBu<4j0YNv|X+~&A$QtbOVqZr6$ zoo2C5Yrnwpfrgb@B0o3DV`s9=rcY8#gyS(X6G=80VmHe)TQ(JYh|{lfSTQNu6XciA z5u90KI^iYi5)crOWD!aJg0m?UG5!q*&l|8cvKQ@7n0eb-)?Yp=MZJ+ zN55b(^~JXEHwQt$*86mJZ*jN*x5Tq7w5=~>OG|Kvkm4&$E${J_7uI>bU#7VFW3G=i z5qddtK^3=Sf<8pYVQ5$k+9r6sr~-%#z^+j)bvY;q_%7tg%;pKXm7fV_DS+Zhkjn;W z)WE1$>6BI49ucj5MD4glRm|cZkMP7}3oK8@2}(BA;wHy;-lyaW;@78f1#Ju|fEvmZ zkA?|GL{wLgUh|mk>#L~w35;xvlxTtAMGdEkx?-e!GC}3Ew+H`u1jUaOKg0Oq5idS} zpVymBHtzh0TjO;+#w7WW8xIy3CqiLp*$kRGw&Xz-1SG*mz@}5KQQAI6_g}=9nI#kS z5K|tknoE8pcY<^}NUoz{w(4}tMOq$_cE3ogazuqBQa;a>ON%VbrHHr$jw`zqR^OoH z@}rn8m_W}&$fzb>N5vIN5lP6T{BFX^7h6f? zi@}ZCpJe>}Rn9g9uI}w%R_^iBAH9My9cP&FpHy?QOop0;pm!1VAzd6K-$|)ayWhb$ z*reIDDF>E`CE_e5B>W#bo#YBf2~95$*F=J5nNF)gsUqOBdNhx!6b}XD%slhcOPrfY zGT}akjl1l1I{efQ<6rXN3xKWo@kb;wQkY1@gM@?CI;6FKm)lkVA!d*|<3+Yz$i6Vq zWSERUh}ZMcmG@J?8DeR5I>)OVtlwj`(4*jUP+bnT?V#xbqAcSM#z?HJkh%9RnciJW zoA-F*`U;VyBz~_4e`t(wOk*KjMA6%nigl8*4ucZiLV=p@LtTEJ`Q=3}&IL(v$g$C- zF_6(L37>0-(bp&+wdhoL*g5Xg2`-VGU1sI+NoK<`ZnH?W(V|_eG91=vwj=E9D_Ein zIjrJX;B<8)qlaY5R+G#yrlAP?GE32`5u4uz5jzB z?O)>d>LM#?m5`{@tX45S8Hg=%@!Sl{b2(x@1;5}RPCm}~M31kfuG6XS^WM!Jy#6M^ zZj;S*4LLo}SO5J5W)gnJWDU7=K)+gJ-^h~;WVtw@ko1dKeS_LzfoipdnV!U#%#jlW z{AdjA4&9DMyV_wW@6n%{pdMKy(z%IrutmAk=WrNcvQwsY+-9$T$o)Nw!`2iFPp>dD zmuD>O!LNvj$mHf`&|CA|xK*KX?*{ub(+uT0#^F7VnWnY;6i+Ybn9fBAtK#WySvUr0 z{~vqr5hdAm-g*9>HxVzqkIYD)Sr+AhDijD11ezo$i5#(8YEdnJ{pw@j_!9dE^wcGdU)r8Ubks-&vzr9>4CkqxK8wlA}{woSQeBR}*s$Hpc&QHT;V1q2*=)!USop68We2CdX1I@P089&mGb zhForv=@S;oaD~ps8he+Yr%~ExDOcdMxJ;_GO)W5kmYL??c>I66t7UU#qXr7ZN6Ab-zo?TgCgCX0VOl!ADsaV6GyU4=$49CYKq$~kN zbm-TXsjfWB>u!?4K7=Q2>{5rDgA>Hk(>(N0m~5=ZU~QAqwU;O=Syr-RoG{jy?ypkS z#*wDSIW->QXp{$*OZC@*9E=k^{asE8QT}7=FZtQ^OZ?*hxk2Of8LrP47>~3O`d!M! z9(rnl*z{S>98EDl5y4bs+>JJkwN>iF6z$2=#A6Ajb>79{8XTWO`TDn+DXvqx^(GtZ zadfdvrnkmcMdEh*Yn*swj`<^Fq=PEzZkuj>jrCgy*|NV#jk_I59&e9=NMv{leh28MZdrdih1Jz3>{B zDjoK{7J7A!S|h`D8}dO2TVeG1v!uq?Iimc8jmxj|Gd0JU`Z)2EQ6|UEQ<&)TOy&}e z+9q$jwSpO}Q|Z)Mx@9A!=lItDdYb7}kb(ws-v!?XO~dv2*wqd81~qQ5$I?oZju2($ znWveV%yKj-VMyRLeeAYPyR=Pr^Hpxk9)CNQ=E*ZM3-dDDia@j6rM0w5J2QqInWA6o zz}`)6EXx#)8WZXmR=GxLUBsHs@YwM=@+k{>u*aaZL%Xs~bs(`(>$6;xnTUwQ%rDNB z<%{*i6GQ|hKp;88#OY^vTw6fR?BE2Wh=CCCfQGKfgcIW^fdqOa&vY(EJfvfaAYp@FQ}f&OGAS~qL{MfFfjp?vtT(y6 zGC;K~CKs|8io^qz0nhhfP^Hyqu(&!vGJ;GT$r4cI2dkd=AhvP*2KzOzhe7g%6!C=h zFT?FBh#+Vf@kvxwTs0CLPd z_ASI@9p&5uN~xImAeuh&4o6FnCm%Abom{Y)mDrMv>FAoPOq;*ugkzRwL~e2nHiu_(ljR zm}E9)5|JKkjVBz8Lv$Q<;c?D?GlH2uPDza7>LF4Q9aEN&lX=og7&#DUtdJlRF$k)l zh(4eYnZL;VZ|KOWbI4;+QbFaN!;7FJ>nX<0J`Jf7`e2e^ID%;zsA7O%w1=3TBA7^! z3W^6~=o6Vd$)If+1Q zoNy{dE@5Hm2U^5X9%KGDxe$urJT%S2a|wdNyEk$vm`5LfjPp-UlAN=NOs0rMbfUsE zQdGwr3o|pBARjf*056b2nS6pHrpmvY%y7J55e+Ch(jYr=0&UzP6;QtTIV5OEdXnhOW1ReEhCiID(lra{`D2`&ixJU4 z6#>aY2+TA4@H70*e+{u0T40cw;Y2n_#4wPQadIav@QpvvsibD;#RH@R@5mQ=B1NJQ zC#s5Q;t=Ev@xnPC|J@*UC4x5=AZIGXq!6>l5MK!qpU#nr1+e7NA=lANV#l81gi_!K zr6@{bl9~AlrqT*YE6(X}exJx(lT)b^WM^`WM^vobJig!KVj@FBPLPjUXxbe5Ss06wwp3K{i(Bgi-}gV0CiN#j`3ZmaXMe`= zGUq|_|!v;5-gIFS(6 z#Yaf0GBLN0*SNvz;s$^Fk8MI@1-|ur7f1$Ff-oZW@JGI?0s?{v_&&bp9|$=Ilfoa- ze~dO<0pTv$?Lm3pKe+xuS)E4r@Gf(~!My_Dd-&cEuYR4)m34mjj~(<>o^Sp8^Q6ND z7WbMOgpqs4JumTnZ{)u&j9eiE;RD`xr(Orwd<4Wh?|)Z456a$iZS+nN0T2Wa{6%(d zzs6tuAODRdXB_Q6|L;6?GRODEKg+=W`taNVcB@3A-k?;o5sWxSEJwv+A4P(UeBrTQLXCRip7oI!BwOjOD85@g$E)e*Th0X1| zxP1rN7~(P-2SHXbEFDD<4#vU7^?htdM)LaTwCMI!x&s5tlF{`q?>KxWxLy2VlUk`p zz22jVg%%Ez%EbsM5}z`c%6s78I73=38($DHEDJ@F-luKy90$j?@f8tt8K-6A^j$D4 z6jepPXCHNJ8`pI}m+@SWVcW)+G%zd-`BQLke+kiRx9QtGFf3&4N9Jtj1Enb_&HcyL_|@+ zG)D(mN&H~9EkP8Kbsb+kD5D!FiVu?XzI{|w6a-lUQN$N~q<}QqBP5W%;%5d$(-0*Y zC)2StFT*kD$xCBMS&VrYyc&F>$Z#aZeFRQ4pjNvy2bOri3qy%&PAmhfzsc z23-f~0XY~vV1nmpgHVhRiV@<|x(Z5)f~4Fh1F3kRdBVpVwi)(|6pLk6cX~*3Cec(3 zMGqiL@+UMA2_VXdqeJF{3sHhl7BiJ&I_hl;w?5ds@$lhh~*OKp)Spps}!%@qB0I@ zGS3-(m%;unH*W1AOg>ET=r}AhEz>SuV`=;;GotwQnk4OaJU z%884N7Yy<-4OQyl4IAt(-e7yDfqg7SDyNfFh74MJw6IAAkR+dbTJbh|(v@=pstu!KyAH@nMUrIP#@GaCZS2 zMfwm8#Fq?$AR?;)WHo^KIg>pLf{37Kh$sone_j5vO^kI~u)qKI%07qIjZ10u@rd6x9AXXHMud?Cm(k}Q7$SEzer zfe-rmuIzD72}wGbYX|sHA!1Zg6hVD}(y=|rBF_Vek*TO(-=MYe95+_CczsvIYj;RH zO*)QF&yz78ct5dE0YQP_F_M!uzwxar@WLvqZ~T9}7;Iw;L4Nn~Jm=>kXpBteAfbgq zSlKkD7A0zG1kXOik+Vmb%0-BTz0v8oahulKZJysgN1{|`j4N#2y2|w%>y+ww5;G>p z=69*>+~(Q8x=!pnDP~Vz#1_G~8@L-UvGUpmfAwRBvqqBG6A%}`AGC1GZ?kk`ho8UF zBzfd0GiMe^4=zz(UF4_#a2X2cx?z)woJGp!aOEJH*urpcaQ&*o)op{XhkG=Q z7WJ#IbE7)J;y__$GR(|`gjRf$wZ(n@`Ax{q7dZCFDXjK&>dQ;~?XNCT4b3t!`80WP zWb){hO0;(OXa$a=C&tLB3em5)mf;YGIK&|i@zuhi;Q1NgBjXDJ;?pOYvkMsWIi?d5 zfdf;~S4QXr5F}9I1XBwfyZ994j>yu|%WSQ`!SnxQk+mMjFpqE~p%T(0KtPd4+74HU zAQ~~I=L^iurb)+i0=f%B1;0_kF0Qaw%;ShMRa-~2vZN*z)J%d{NFXRw*=m)!z0qNz zJH!ZpEV~R_b!xXRvwmxzYt>1n`XZ+2ArD)0D(kGg_7+#yJ8TYfg!M3ySe%%fKx|cr zh6Z@Lgd~V4GUysOLIiY^sksT}=h9?i2A1L@*j;MH3VUl6`WFCg)zRHs`kar=$ytZqA0{b_Pm zoOm)xm?4f|rdaG!9XQNsps61CL+ZN~%3D>uXbLlzC#K4nU#XAn5QjL#ArA3X$D!c) zsW8&$so`lXEy*|2-{fn357D&HOp~xFeQ!n(5gz)0TwHB+_h@Z~uM|QMVWmX#_eQT)Rxe4bjqqjE%*a9y3Pu^!xO? zZ92Um{h>%cX^~H<$dW+6xJBcwSJ`d_*bC%{#dWgbk^h_DAK&f`BN#H(L3t13^-e z%>;?rGo1X+zh~S11>MeNHkRJv?UxeVn!QLck|ZgETd&aFUZXCB=_MvfTP7J@L=i#2 z#&c`zmRf9|P?M8}5)+%)| zgq@foWd+D;;wWukvxZk*qvEL4%?PJr23f0*>^IonT<7iAmf0IVhM1lqWtt=;9|@0o ztwwQmlTLRAC72;ImL^|F5lforvWw@m*}ig}<<~B=DIF!0Kf+utN;ae+`vbb=61BZO zI>9N7#29H+CM2}*hC3AZ`)pTq!WY71@@LZZZyXnjaNzseF5~SlQLIVS#c>vnPq46%L|5)mefWS)zt>=EtxB`) zlbRkQlF$&nD#g8h);8MM?Fb<=$^4W_GVbAdP0GbO`+HqfD?~UJ#{5u95*O%EEmv7x zDk4}GiMawnQ@9HWHB7#%F?cU`2^-W&C(n1Qw zNQ|XW6&bNz!QZ(>si#oV3uIyjli>pbYLBj)z%=8`q#zs^Vmm$RjSh`Q4<&A32251p9X?gh8ICl! zl8hh>k!Vt_G+16~Bc3x!PR3Cb8Q*b1@@baqY%lK6Q@(+g9U~|>h>Z$+yZe;ueVVRL zI2|TG8AeuQoPHC<-=th^b8|O{_Gp2zlhdR^+B>vh{VHB#k?r!3%~pg{af^IPN0vMs zd!KIQ7RxL9EUvkj=Plyt5O@N^YL&s}B9);|EjUIhAdyiAxSoQ8fc%Ah!X9`7`aPR= zqr<=jN0g960a5Vr9T(5{agmTD5k&&maq&DKUoug3gK$Vgkv_k)u)gQu4Q;yZ7VSop zt{B2otWm(2@h`mG`5yQV!@f<=wz2yT{h>eN*;F(P(?Hi0bVWRLnfS88cfofU^zMwY z%fR(P99h8>Sv*LL;en)$I64gtRhE#1uSOuM@AUA7UD^Ygfu|4*s%VOIsMR{ehvG{p zcy{S`_E~=EZ7N*_PCQON9bq>5@srr?cc|Qcj;(5sMeRI=T#AcR=I4{$=)S-M26QX8 zC@;OlFaLR!H*VOZe)~Uha2(y#8(T)$048Kajy&O{=>co3w! zW8i{U#Zph!C*k&N}#A6EH)|E`Ydin(Z41WOATq;eX5NiiHJ+1-ehyLgEAK)nTa6P z*RlKi)D)kl6J}7)kvB!gVr`mTnf-p1w0oUuwa;3AhSS+NC+9S*z&*>a-GN$(P9NG* z5XF=+3nH!HYmodI_ZaV*J;My>#P+Gak>dhP6+-R{?bHI!e z3R$G1CjOw$px1|J0V`AB$dLm1ghfQWcisE)!f9^N-@C%A+X=P@DK4JPF_E!QIW(yr z;)C%e6g>Ml{VJuUTWmEo{PbD$P?*`s#~mMi+@Vc-XPKStHrIl4h(;KH()b)x9Nj0l z9mL)yTep{Z<0YFDk2e_;Mh4^ig8|T>)7)csbB)H88-&CVfn12J?xFb|N{id9F7**p z^GwE$F&PVx6gr5VEo${P>lKA?w~y#{7z#Q=!NRlILJZHhm>P+xO_UdJOv>LeLe1~C=(UH)A^(fjn7Dns+B%KhRc`Kd z=t>5`tcoJF@$7xpE?r@#A=Aj7WqvY2Hm;xw9Xh=h8;vZk*JKG!w~cp&s?mp0gV z^?8<$pTkRtw4E=ar$(82gkck@y~NtpbzZpEMm_p8Q=w^Q{SmSAA-=>A4pQk3YS6gF z;#-@%wc%kLd6ent2I2Y7A=yI!GDGjKWsIzyL_Z;FTS4(QjLw98HQ-43nZ z@L!_BzXl{gBM=CpK4u{Dq4gNkmBWD@e5+n#~5=odNcCooKO+IGIEZi3nn!%3huI+f|hD z7}7}$8d2K_;eVx`aC&wh#V=-0@dI!1o$6(Ykj-mpojli}LFN;+q7 zGGh{b=qQf8N9^xjW%rG3{w7>v$}%XZGHUK9v21|_Q6b>ii1l4s%{trN0o|QCq1_td zL;^J|B1k=)exGimNA>0cTTqW!B}{z-tf>&vW+7dUa;XLdFR+I0ra8r!R7;?qs! zg*3h}q*ZF-Rkv}BEmms<3O$pP8Ij~~YN`YwI=+^iUf`0C+HD0l|Y%sf*U9w*dQa3?JCmUy7;>@n=N*t@w!*AWrt z<_KFR33-%NN7gLj3l~^O_L0mSh1{3H#3ka{U7XT3U0K7 z;oNs)%%~CkV3<@w$5bR3j*&l_z!(?FpE}O?OpLfGBYRP>PIB~XLB6k^A@tBGa??r1 zEDckV5Ri$?USQ@Kk?*9>A?D+xE#*Ba#UxP2h~^*R$?t2-*$TtyS!U-76f#j_+67cS z#_t6b2G%%pvl+&t0SrY&9XrFM8{~Twsdy&l#02S}PR3ACVsU~I9YYb3{0Ib(Gk?+K zxBZiZ9y&#KCdGJ2Cm@LkgvcDbz$3pmAY{zaPRudCkfV?W%Y=uXi1K^VNrER&k(y3Z z2x)|kJk0+l7py3pp5g3l97}x91XB{o;p3cn`nOP1$JtTGkRw_0=?oJS3i-6lW8c;( zOgB+ZoFG3LBW216lPA!=@ke~KJHc^fic|A>GSR@OEnlF<%o9{>F@q70aV|Z_B2qIq3W>_uJ?26b@ zjPzKX!bFT%RKijmd@q1BcZ_zUM&Z(HtgeXs)6b70K0eRnV_7u#MCoB4L`b1Aw5caK zt0zbeQ%uB73=Ylghxl+jP=crL`y)r}doK#YC>5hH%9HNdmSxC4h!!6O%=ia_>F7Q0+%x(! z`gdfi7Z8L|*--@T$OI#Ns0YA1@A3VEHVAOXczW;qhRp`8kb6mBpk~^6UC8cEb}z!BTq5?q=X=dNYXo=5_0M+ zh2$yz;B=2(-=SrLVQ2({@6CXl0xiYEzj2Y1Pdn(kjQXx9Ese=Xo?+slC%7O7abyGQ zZaWJ+@>>@<^Ne$sMr(vwNX+sK@XUv`O^zX1Q6Bm3G-tj&pk;$ED;SoJB8td9jQ{RA zJl8|kWyCvCLkkzk&YWf(9am7%Wxl|GqIcklI<~n$I3VzECJF>J4cYe@R*KkLYizeA z8d{vAg%rueK@xE@h9N1OIGzG5bWM6^-rt#P{++oXzJLDsKK|XgbAW?2LJ$DQhV~Y< zMxUFtF)kWm7V;uN9em##m3!Bka{tk9wWzJHQ?)I+!D+_w5$5yKCkluJ-n}OJ{@oA$ z&VNUT=X=(q(Q0zHPki4y_-)kS57n^x{@v(*9}fEIo_`O1xw}5z$sql%e!ORF-c{~h zFB;u*clo^Y&V#(t?<;iY{yY6}=bazNn|$vYd(clGTBlzRy+(uT#wJzUWDppmFczSY zkr<^mml4DOk-76s3lbNfe~#wcm-*3;Um_OPc=E9-RkS;9lW?e=rm6rzxTI$>V5Z{9qo}E?2)539poW@ zkk4Ot+ri!bL>R48ch{o#jm`TW@~&_3gV&%t<8~)H;Q$}-><^Na?tI_y;0LWeAcCg@ z^l0pt+21VELt@~I;7TaLFoArIkTN9d)~VE6)LK27g8+Ik#Q1m|-K#-slk0E3!sR!v z^730l29YUFukDgBe0o`hcC|sP)}qq{LAS7`@5Rqh;fIg(=MX{wA^J$Z0NC7-r^%m>5c5uaWq~<2ELc#m&xILmwQWObM z6h^|85WtLOkwXylRCHZKmp<4zUY17!gY@fo&Px(fAb=nU$oE92-)Z%oXTN`DHTDH zP111#JO{;X&?q+8FV*QuCXOJZdldx5U}((~3mN1s01tOy(=BdLZ!{?lC0yS}!bP;k zF+yo_5eci`#@)U})9$gII)Q2h$@n$w?Fy^yF3kZ5W&|aYA{|nQoA+}G9MN_)s}+{6 zZqsnippVazKR7skyz$=yJi7HVom!Q8$EEN2C;^ET0jO1(zQ zGw3KO#u6GC%crrqO1aXZ=*0;q;>=9O&?WJm#ou?p>(H#!+25|v^&AEWpvEz*2-%E9 zK$lSP7&L3x^(ysNhpwkVWRh?wKqdr|VBo=#v8UNZC9z+TJ&&nr2r@~5~&dBWDo^--z;}J zc>NaDdXrkirr88F93eiLC#1;)Kexqm2=u9y$}C?i(h(oV9Ghc26d%KLLH+aN1o-MQKA!>~!C-l1CU(i`*`2qIW{!r?Iah=Stx@dq8Ml?v5{ zO>1ajgu>*}$r@GE&JwVF~QJ6^*3(IJ3mqxM3{#KE0Xc8fuWiqA^Hr~@2!zP1n zll}cVP20m311NqQ-K#RN=8;1=a@G*tY0;@xXf)c?TV161G}HoaDjW}{BM zUWdpGYB)_M(8Oz%*;(GECCBJRCYed<+v z0v?hmA%zN9p%}S{h@p&RrOM78m13D55{@Thu!Zm za9UJa6>Q(a@#n~kagw@^W7p{HFR^lKi_Maac{D&W4@z&J`ua_l-*}Uq@Hqxqjo|4d zk>JRjDELEg2ej)|nzb6$YKvyOM|^OCuQ)!MADFo2)F>@n#J& zGvip6d7mA(N0dpD1S5r>2%?5=Y3M-cV@gYsaE~VNvq2C=1Wo$@;acRLgWU%d6hKgr zL@++4lRmPZk^++Qfm&rj_%+)n%8249_51sO`hXo#7!AY&F-X0eMe4e&*WPP>S#$7tG1v5mOJyPwB_A*e0(b*Fr$?(+rBSU@t2JqMhjfNMLcfI9 zud|__MonnsqBh(I!cAr*fNWWLb9y&CAn@B2$vXCPp z_tE<`%InuyS>I>VnI)Rd6A1dqopm}}m$|xVQtBsggCeeMV%b$nYd2WGe3{MI5I<`Y zIvyruXb6Ds*m%PZ&1#KCtxmPxq}6xn2@3t%HvRoom=9u2XNedtO23J{zs%2*WDf>JCfS7pe4ITBe2At0Fo>dZ{4sT$ET4 zG_Qfv*_73XiukMi#x4Rb2% zu(bBdU&C2?mgg?#8Du2VUmM<48G5aC8auc7)h}1swGEQzA7>&r&U~oP;!l3W z?MqkqY2!bTJn=B!UoZ&vOE}fnxcuTKFJ9?$%E&M<2eg(dI2*U=?!C&TMS(TlU?!t7 zo)2IG(x49ZCRblvUme_jbGOzsbM{FO_anGFP;!py=2ks8FMw6@S zLA;|8rl!IKgNN~ghjg?y<9c=R5Pnl{S*F!Ht)%bGsa3YU~`5 z$uvITl^5wQKFf2zbXe}?_^op%h>abX4F_cgwRK+q=S9}{JtF5WkeyOFX_mP954U*p zpI&13Op5rKvUIt#kE^|{Nr_v?6G-HJ$8h= zyGnn1kzf4Pk61Jc6!qWdc-rHb)S-OiMQ*)*lh>lZL$)I_EwAAF+w3$qdHba*J9`@B zpll}y75C{bzD4EMb3BhuGZW-!W`Xg9h6Fgx4SL15c;S-3O2y>F#aZ(CAy)f3FT8!7 zzx&}8CjRIQN9G0kf{N(((Vb;BZ!Yq8KiuY|b&}I3U?76uujAC;=JuP5{LPQHIrRrg z=92;JjWTR6)82lAo0WZj-ZsgHPjPHEjKN18#=bZ>)eY<`KjqcyNnUT}`9aRZAjEpH zNAczgmCJw6RyTrc&oE}hDTGJN&7NJstu6A_>)Tvg>5zN)VUp1(`Msa>#&(MzE@d&F z9Op#bfc;n4y0OZiKc|wInB+IVcLHho7j)Kda;g0g?T*9=SHRxALH*L7^J?iNulLXJ zJ98l>#X3vZUci=Qs@^#=UvPhY5phBn z1r-6qx53$G=k^uee*P7%Y&6&&1dy#Lkwl7kGDpa(Arvoh?Zz5!F4k%FJcMBfrMtmi zX`kyGZA$w!>`sTFs3H0uX0J%AR%4@N)9$(W{t&!6wVh?IKl=hVSNB+VO#DEUXfi@7 zEh9M{cCKyHZrF$c9YK~3{M9ySG@C3nl88o_nK2#9yw4lvArA3b;IzQrpjfK0wl5$j z#z+*#iE1i$6ij|^pYHx5*DhV>?Q7fY4OP5gjCd+T(((vVX0O;`dvA!O_^1c=Xuq?E zUA)DOTkG7|Y|+&t81WRDRFXu*BxHah*>t;2*4J8e2O`<|6sqLVt5oTUCSo*ADw8IY z2$KjX7|Q)!{`_GVueL_DT;*0ZfDlSBHJc%98AyDX;OR1GRVd$hjm7IXczI($d1w%d zCWxgHgiRT5cZF7^L`ezKmozjyB+;hdtFyUWVrRWXt6iZv7}E9?WNCopHYpV=>{r^@ zo{t0g?ttFjO?DSA@#57Ii)DdeGDRX2B@t8bDyx(huX3x{WxFZh`y#T}MjRB`*{ic& zwDBYlRr5e_Xz#4C^VX{@ZdO=riTEC<^bx!&rInjpfBqG2Z8zBQ1IVEmv7|*hE#P*V z>|Ec$wjD%M14+bb?bBI%g_}1P`PGd!P2VJzN%8-)cV|tKocF!ozqwUr*1lI)Rafub z)7`UUu#tpFiXyd9adeKuAzhpsNBF`KzVsWAUV?6X!j7X0g{6q&6Jg61MIDmnB2pl@ zfY@dMre~(Rr}wq?Eo;fe7d-?75QGSD&=`>aPsGekb!KJNQ~T0Y z@xn<`nGD%{ieyB^-+PnewOhQg)nT_i25n6H;%Z=F5H zad%@#&+)LsXLo#5Z;U=TqEV~z=Jts0(8CU&-APj1A-Z!wqjtir?H&gwP5PY{qd-4< zglSmCY1P;~bZGWn{KwcPkBy>&8Y1X-aS!ftQa$8hv(IVG!RdOaAsf{n5mX=2Y&1CT zc{t;u;0NH13EF!!5AJj8tz9-w zMvUWmqS*q4T!u{CB5EmUDvVmEwC=yc=K3yo_FWt!PNI+_pHGrW*hGwfJ?7%c^+ar< z@A?e81KLNY96wm+q}ihrNl|+4X)b>5I!l=_xzGqjZ_{x+T8>S!TxMdngk^@%wGq12 zq3Hx1x1%JBMWz-eNX0Eo<1C~NJAL}i9%{NkygbE(5hCF=QJgj2_?I8?_5b^493+;A zKmSFpOr@ELg-{g>E39KhJn9EE4mWEI1_4?S5E~EhBN4i(0>hq)ISw&4_(!8yd1&MAm!{5%_p$0KFds{z+BqKNM|r& zaq@;jL^XAZ0)iQoBMmMdvy zA|9GLU_1^n9_5+H#h9stuq~BA?SRu)zsq`U%v+h?VX{==vqha`Xw29j(HRG*g(8`& zS1DxDWSly|_P2QRXD{-f|67+M^8C)9Tw*3&!>`@v)=8dYZ-z@V7L%FBci|rN+qkk(2$7tN`%xz(oH9HLn;azl)II?13*B%;&ET_IYS;+X=Z(iVL) zOt;}e5T*DG*gV+MuWRkfAL=C_lLAS|>5S`2d>2!kmghJHN&;y9N15`6aCtt=Hh0t0sIXBDV z#d)TpCfR5hyHFr(hp~7B$N=Dc$Dl! z63Y(3xC8CG+<*DU{O$kx9X5M0a$ouia}}M~;DFa}b~tL8tlpR+UrG=JXY)*t+bsr1 z_c=M)W3A)U&t0N$?T>kSsm#+85DkGlc5ylb8V5%lZ*6hX9WlxjnEAqISiP~zVpJpb zMBK;zU{6F_hhj-G%U8)KH42B{X8rD6zI(Sz(@YU96i9^(B5nsd+tiu?$Gsf6Y@F$m zg%wgzMje#a7WJ;iiB~3*jWbm+uuTlih>LDI9_{` z`#*k_?I6vtw8C5|Lpfn#C@`p3X&xTY4QEht74l|?#G~K3lA0o1UgkooOQv&|pML)i zHy?E9r)G#I;}pYV?C}x3<`G*}pM!^6^l$z>YgX4z0)x@obv8f==|8&4cC*@f&Y(y8Q+(-tMsLL|M8zN2QV`Wd($kD$bzFcGbcO zlFZM>$)rpa&xPg@!}=jrUtt(Ylg&+2$mYq%!)VEVus zjO`SmXp+38VRnw_@7`sv7ou)YGdq`It{leJ6h_S!&4WXFRssrBWNnM2*Fwy!)J178_6Smmj|$x{l$ zLwOpeKN`y~YbYZk{yHmgeJbaGCU_i7ph?IE8aF18Rm9!IKb^+%XFEnMibtxl$O!U~ zIF6nZ;)WE;$f{YNI|@I!#>ZkCnYq*ri=0}cmC&Z$qN1fK?%Z30ii;4va3@cS@6*EJ z0i$N}xA)DC18S=ZW{fCOr}=*)i*L^zGG69U zp0BozFVt7fM?v!iOLk2S3#EMQu+(q|QRo;^DrmbzwP#JsV(v+WxMlRkNhY_bFcrpScLz*D3jLSp>mOjPm$uhMHK;}6fu5XFx@rx1<@~lK7 zLsR9h+FB@GI8~{NpA$7HjN~R&C|b5{=x!0|ToQ{P8lRC@$^DkZP8gO$h0jL_BUR3z zgciN-8LRfE&L=+hg4|HSH_g!`>=+(TM=nq#-uN$AjYT-(cPx%HQz|ESH#TzNyI3-Y9IS;bsc0BD0;yG#_ZgDI=QURB6vAT( z{uI>!20}o<^c*VndZ#NHZOQd-JVV!kUSj;3eHNkt5?j>cmt33 za%>lb{KgutSmY1J6^zVT_43QxV~~}svztkcs5EI*)iq4{GvlN+=3F88&_-J1r@jOw z6LuhP8JFRgU>O_?*rp86uVy!F>LxE-h`nl}{p1##XpZ31NJ-XOUD@0(A-vx6l4Ucu zr5`%+3|}DrM1N9(+8FnX^C=j6$d+U%2GRG`-%&l(PP%)>Ig*W-WlJr{)8<=+$R|s) z2i2TeDTdEI+9@JeRXc&vYaO&xhU#}ocJ}lu^>~Eyd3?88Q1i48UeN0KrDPqtF!=*sX? z)eWEF?lM@02mi)v8MP0#Zds|SnVux#ut>b9PeH#$x=%of1YEu4k0|eJwK=+l|9w7g zeO~KlJp23p?UKPa)s|(t@T+dsgyRPmb)ZZQy8rpX=XP1Uj01FHU4TC@Wl7@4i{!2yG>-jPU^$Cr zP0j-$`emyiIACt{mm*vHHEt=K4%`o7K$lq4h2xbc9DTN_1=QXg)I|-1{ia~y znohp+A=!&)91IvJ6)G0s?(ZD_i!k}~K%!baiU5oXknNYA6Ry#r7#cYY7iqf(yD2j4 z(E8=t>UWmy_GT#&Ljl$?Z7G?KuvD(PF;OC46@45%jo)cN%89vQOH|QkuVb7LkVYQJ)ju z;~v$jWpwxbbdC_6>c`J+^j|zGM>>_0XiU~gk=oTm)EUw;G9Hn;6SX708#|`vT8I28 z`0*PJ+s`8`y4@=`XM))}W-bNnVS#EL-!4Wq=v^Lb5^w%OooAK5KzC_2812v}C`0D! zdZkJY(e0nA(R;EP7B}-$TV5F)s}1T(iqte(Ym%{k&@( zM+9&INqbh23BT;0bECq^nokj($lujJ$adSK&GxNtyF3&X)BDbxoZmou(>>hx8XOVx z(7N{aeNw&$oblr*j53##cp}t6WE^7qg{>{qa2d&VcprO4d6}xYRnO#w z0tGI%VR#oFMhyh{BBhJw>5;6m=bIU|u4@Fgc8uZtc$M{{rN}S2V&Ja zLPr z<;>|?Dbwmd@TwV~%Q<@BVq{jST1tQi=!8CZ&#czE^E_nG!g6gKr!M?^h8^n-BFNHk zXi%9%f16^6o?%j+7^%;J2>N(y?m$`}kJeW>(2nRBx)%TT;FT;PK`ei-M)JC4nKAiY#ZwB!czvnwEaj+ zy=T|GwXPL-3lKR2))96Mf5z{1IlWW?VY9y8Iem@@3Fa9`gKkGhC{{_8mF?&*$TJ!p zJpDbOQ~QxTcaiSpseW7lAtq$JVyo<3FR8}jBpXdZhNWW{6BWK-tK9uJptf>ft28o` z%4tRd5&Yc{{qG6b&z_M6K?N#v6`5G&MWSE!;hVuowYcOQ8l;#==#;{PqZiq;+rBx2 zaU+TC6I1#b^mkOKA1RVm<7;tYN#)G`7ifq2Nhy2zZY|a=gqKg2Fc~aZO=7l1B1PU4 zAJ7VTzQF@~3>ETd^w-9-rEbn+-FnEYbv(=*p}4VVL~*lSj30i{7d4CmQs&MqO6>N# zNF&d=e=lJzLZ8LkQSPl5&o+8Jq@!sHZVAxGVbG~g;FxWMzsCmuNvP3GGNV&8A0RV> zY#YQ=1jNSv0$ke1uoS5{qr+mbOA0`jX%0GJ@OuYMsLiOLTC4#2HOYrX8bxX42)@Ol^>?=<%p?6cv;-tech`1nBJfg^X(bzhPqKkBNF znyA80nl0e6c3FNZ`inE$s0wlK?CNFlw>9-oiv$~12IYKHna4E!M5$ZO@9~QdjQFLlyMQ4cfpD4Bsl-g%}rF4o;E%+-2jA7Wa(@xLVr{YBWOi*F2Tw00VcxNV&_}& zy8C6`i-(Tq%aped9`%Fq5k_lTr7q3*#4-84=xE+LIlrQCSSi=It*i-tFA}83uyQ>8{Ve<5x4D9r zVPS-*z1%N=NC7RUM_SNE_0nOX=CZ+Q&D>v|Le(pZ>F?>)&Bl^fDPo@0RfLLN-e8=} zHiNQGHFNKe#22}ln|rop&&M@})Fk!Cp_qJ@;B8pp`#$k%gT9F;pXQY}< zUq#M%>(QeIOU-tE>!gRXMWT%&+@b4eY@KQ|oe5f?P4thN;g2^Y-9q`pTgr)39NJc7 z>hi^fSW z4iLt|N>bnRa&RKKN{Z{~%cDf%#*C1^-@tAvrpA5*h?MsYuqmm~9*$d)$9xKIx(A1E z!s-hQ+n9)NJmLsU7%3$gFOJsP&D*%I5-6ZDDL7roMn)FLUf$ze+Rt7hm1Cwg6vE2h zGxL<3W!L+j%--x_U}=^!=$pEq;w|U;rr~ojHn5|T;)e+bs`SP$SnVMo#w=pM=e_Y) zO?qKvg92UqyMa~*80Xum3a}nE;`v$U^7E?l5B)0tmpg7?#k6WF*zft zicUy#no9BK);14!yoCy_4M*SS*{T^7ooUyoH`Ys63F?360vNgaQzubbWpeVj2iI4N zmz-9n4R&-CN>*0;Vb=&A32g{NO|(a&a8F26jnC&dK|z~Kc=!X@M}mSmqms0S6~U4; z#dp^q9Gp*f*`^^SI$whsrMwN2LNOAN32^C$c;E=GA1juXwDml26iUYAm6GA5q_0m% z*iOVgLO%P-Y9!|ddEHkeGzfVgAmXmJ51TL9=5JGy=w7Y^Bo~a;*GJdegFSBG*3Q`v zhv+hHN%K0dw>`h@Idxt#cFio*I4O<*dU{o=E0Fy82&(8&BeqGSA z-f;R&6%xy#9s(+JvVHN{QJqCU!iv7eab#_b3XeUF}kRawf zX?Ff4H!Cd-f3 zuZVLswQ%OW^A#n8`4@AAdW`gnIJI!U2|J!jO^`{*nAt$Dp-FV!UD)huks9Aof8+pj zy6Nixd!*UD(a#$kM=RUzsmt%%NBDKwj!jJi6T}~|%Xz*1mT)Ffq|=utXrdYl1172U zzMT1)`N2ZGM>SkZZ9k9HdTtmNmTOtDwQ#nr>pzZ2{SM^Juxr^eg%+ne!ADa&ax)&m@OE#b(BkbyW&uofCe(;^(SI7NS881~?KU85Ry}f28m2j7 z4|2l4G7h(|nKm?yY6At+O|irn+*A@&kV^9*-~4(gi-qv zjFxc(0C{QF8 zavPD^i z)gj_&#GimjPGe}g=YT5S0d|#Ex)E05VuZh~?YpMM=qK%{#b@oD*6{Zc_>DsjA8|XK z*2r_N%ll`o9T$T%r;1KEalM^7awYCzJ6SP$OTdx+7Xh#19fiSIR2a9j!B{^pplvr=q^U3&%BIKg=R7ba0kM6j=m?z%4L5(@?(P4= zQ+JGO#}*9Qu`dO0bRXR506F4LunR?1)y`cvHd|=gC z@vaWzsHyRw&3VYW3ToB{uh*$|`lp9`EU#2QRfKg=czb7`b7-@-_+K&^{2jOFH&3q7Db1)KaPQK}y%Y%*hkVOJWcyw-D zgHyI~r{f@WM&#n>4X@PV8=x|7+O;=a=nq?qs;8{*PA5VZN5@Yw-6EWvHPW-zEi-17 zp>a%_3_;E3IsWX=FOp@h-o9~Jg0c@lzlVTK;shdk@kK!#JVLP&Aw0G}h&M&NVMJs^ z8rJUdK=dUJnP0%w{RuJg>?P#lsEm6nQBoA$ zebd({bW+*5fW94u0v+5b&p_wffMBQ8;Lh*W$j%gZSQaqs8kX!WUi=Hh&^x>_nWKxB z+ZK19yje9YYP26~;6Rt;YTQp}2wotVHf}juKa#DDo)y5KBaJ9?h7Pg7Clo%&;Ydkx|#vmCrTHYX5wsp`4 zG+{9~su)LUM}dUEZuOJx)hVjbb>+`fDT+6bfz53Tga<3~OaJp5>B1z&_y(^ck{=U) z0P;Q@!X})fLxFJ95vPN(CO0T8#zcoUssx9lck=Xz(0yO$cQbcOK|uvwV3jGpKti z6%u#0FU`W^Q!MO5W(oIWpMxXM$AlSAI-8l2R(XlFqY zQ}@ipD}mw7bqLNapU~h8oy&OejSmh*Xs5@tPiV#(^)}`_QafwNCB~0mZ_Piz9xAGa zn!aMmN6_wcGJ*>5i?mLxIPutmPIhs3E3#!MGeEB5j`2MReV_XqVDte^(mpBQLI@KY#gT-7A6Aajo8 z@_%+S^gG02Y8A2jO}p&A$|KvvhH6rZLW-tXgrzxFVn}_wxm$DPms9ibjgwwYUst8g z(+lmKW8?x0T2CV>TA_Iu1Jb*>)vxat$)JfqkN_Krv+n+uF&KGwUwWP_Tk1MsQPB7b z=T8oIs%liEDC#{peR^4eTlhMYZ+PeT^_aiU@z16dzpm1^-w3tY=_oM%^tsV<1^>Bo@>QFxKKD8Ni z&TP9LW$1LsQ|3`t(O@+5+Po3{RLu3>|1Q4vX2#d4?yZt3nTDUryx{dvUHzwCZdRb* z319t<6Ce2vgUTSO39kjSZ_M&Q2hvbj)4T6OukNOGo7hxIP|WY;YtL(0-hR`Jct%{n zWnBL$GZBcwSLvkZlg^h<{<3{0ss6jX>pE~Bb#pEu95KD!B4)L49K@Mh;+5K6q}BfM zBvze@`>0t4G^jyYkztLR@p}M1217Te+t0=i3B$vHrv~B=T13Zq$EW|@h3>jXwk~X9 za@@bm+pqF}<-u~n+pR}h#oS}F%gyi##U}3RfBK9M_&B6})SNU+ri?;2$4WzAv3}bu zu@Z_R2@pxW>=)R2-jeHjUC6RU{^8gh__XHHwi?@C3TB^fUk2i(wjiL*KkOM;m|3LdhvdBV&aHUgYU9?qcRingPKUHuxkfvC>Y1ip(Z7hw zVu>6shZ=f;1ijrAK?}xHy zr->3zo_?I$p3k{yjTv-`Gv<-^>tt-D@f9we%~!O?e5|X#;XXdV(QcMy+uyuD@GmX5 zJYR1ArS5sU27jQ3m#08aku|5AoJ=7@mFM3W;VnC3x^2vo5^#!~;cixh-XJufM9rIQshL z=(XA>&K%QXz@^Ainl`NTcJHi%0{sC;GxR(fHN$G<7`-7e712l@**h0~-Hd!-GczWP zB)Kb_sO=$}x@KB?hq?tTKB+bBGX&-@X|;||{^+VneVy$5lsFU)40Q)cQhXur$occH zx$XQj?`B~>0|Tt;Q`+DhoC=It025w0IwFEgNy7il1)#E76>r2uJM=n|gPovFPAJWP zzFf`LyYWg|sOoaGOB#-(2?LK3qEg1#SV2gfXPpuFId!X_;mj(^fHBa>Fc9jM)Q+o} zZkb_}ZDG?P&yq@l9h;EXD}FOTSzD8mK}#i#BM67plWYFEM@OH|YwMN3F-N|P5Q zV>;~6wo~0)L`?Ini$l*hIuv@w|B*3sGqnV-uScUiEgreb+fiEhhcg#+VLQ**^pmWK z&4uBY1~7+_nTIFvl#)h%s{Ve8BMp0UN@)o8ja)P;ki$FDr(#Tgp2B9dLCxGk6I69v zyU@y8J1XjIczmk5-thu0`h~_zZ* zX;fZ8rR^F+E6*>itghP8tzl}zEYU|$NidH*Z!(!)UbUK`@pS`UzrA_r5H?ufK1jN8 zYGaRp4I3*XBg4E>J0D{~`iMGG(cox)7`~cOUNwehNFIL~Slrw^w1j^rUpg?PuKbSL zW6Jhh7Q-@KyW2hS@RVOo2mi?`_%#@mSSG)gNPAmiZ9ykd8cmz@ic;b$tR^?7f~KxY zYMK^Qc!B_pYFa_&qGSZHHjhkwp_P?MRv)5>U`oo;1yyEcY~)ufPKrXAV;^YX*1Q2< zjkMO(vdGW_npUb7qQI+3Q;ivTW=_nrE;Am`itBgfWT`7vomZltoYEdylx8p<(ol)5 z462Rfj>e^vTi{L|*HOVV6%aH1Hc)lme-&t$~@&nvYs4 zDk&%(l_=vF5l5C481pGEunhE~6qzJkY)Vdz(meJkh%Yp72vVE{o*Bb4f;01`k*DRN z+PGr*tOy{Kf$3)T7)zUmGLkOe3jh-Gn?4Cw>y(QYwoduM7_}|K<^U2k&D1xw@~I|P zuHkpvjMEDK3hA&m1LMm-m{?4V4ZNv^Kz0_)>qQKk>Vey(Xnp+*OD8fJMzn!z{)iM< zCBuW$$p!sP+3a6g_|F01*o-Dvnr+Tr8PU;d@}=a(10$nNLVgr*C_9}*fu<>0Q(CuXMWh2AVb`G1MjYJU55o;bhX%2Y-|c%En^`y z=xoW_r#u!|mO2JgiIF(O`luJn*B@7)D({Bgu|n^ILI?9Da=nkQDI_q0^Rep=es@{# zo7*3R*i428w}+D(ZBD_a4bP;%clMwKC#XGV7;A^mK&Ww|9jhAL# z;6u;~70Rtt8_Y#C=hUa=b`Fd!s5lAWbG#W9?5OEAXxhT}YKx1u@62y^<=wfvJpB9? zNDR8T4(` znYHruVK;&eZ3G5*{8=JGfPvQN6Nf9|Vk>eU(azj=o_k*Md-WLQgE{BV=kurh3{PDc z!t~EwD!uJbp;fAx-+%I40IdCYUybXP9{OjVSN%U`e7vdqm+yu4{og&bUW`5m!Flrq z=p=cFK+N^N?0hjqv4TG?7D%u4H$=h!Z`%7pIL^m$|P{%)xj! zhGEZ%aN+&=M?nw?2@Lq>9@A$~@8?guW?p|;~Y!&4b(!=LG?p`t7_ zA1MRo%Kl(W)ZyXbmDg3IT-p!fudkp0f4lXL<{QjMBBubRg_fgQwq&_O;__*!n9F6Nq z&DD)i?M2JZEE6X)%pzNWxuWNzH?CA*t=7YCWz{Hz0bc{YAZ$9$myW*@0Z>qhXIZmW zx?R`UBbc9i9a+Y4UjE}zBobYY+cS|drRi)PU499lVCW=Z#zUr{C8Zzit`h*lA> zvtxmtJk2X!hu*(e@=FM=^kJ2FkP~l8F2#6sQ8xKjc)VbVFf^4>pO1o1(;gOo@*$z% zSmgD@;H^(y%kJdf&4Q_gu6!52AR0h?qzibKSmj$^b@oS<7FhS%G6{TdLq6|Q=fKby zeE->l_*Ho}rSiNs(nw*?MW90v z^5Q_Wkou&rQFrk2^j^B|!1{t7bbdq#gnfb0%BG_9Te)~&ys!$BuGR|-%;wpm-s=F; zT(05;#FI0kNT>8U<&q*Rnm};f7&EW1@q&@AbP{xqzf11fga8c%N;9D}74c(56?Ke3 z@k3k2h0!ke>Amx}`!7N^;ZFj8E8#L8n?eQ+#ii2YB7bJ>Xa`IA0IEQ&3y|&*%O0@ptQigg`AOSYkx|} z+J1B_Q!B3dvT(M*M>uOe&D_c>RR}tm_l)77@~6cFl}W&Ezp+hgRV1+Yd$L5B)X8>^ zNmvdyOrCbi_T!!*e*fgJNtO5ULjLFwf{=p)g~ki6qtL^jRtfFlJ5p@j4_ zyRf2?k;Cn*?W*tClR|zB7<~g#N9Fpu{i|orKa8^xlilxBx&@026~(R>#Enx{a*N4zW2uJ+*w$YPcxEVeFtVz z=quw)sbSXRD`j6kSA`V3ugWP&hssj^?Qi$^Srn9uQ281eZbCY=PwCx^A__wq!i|l_ zANo&gs6FAI7YDVbL;raI)N*2^@PzxP72!f!wDxZ=LDsy!C!B}37)ij0;4`s1W)z%F zL;^0EZTM_TQzKk&Ne1AOX%1C z*%J|J8jQ=RXLMI3-jOHVBveq`@gB!DY|(?SIPD1dG2w!tk+U({Hk?NRoBNN*D8xG9 zb0-P0(0u2YNWf^Rh%-9g-ieFbFOH`iHUwP_l2U3gA6slg!a-&KzM>~_6xbt0L(uS_ zVU63+m2ensu-GB2$5$kK&;i=Fg!YI5j2$dQ`Az@NUnPEgoy^od8AX6n+@dU6w zKpiGuM)7~In|`%_q!P`B{~!7G^rf#c{+0At^nYadf2-!7|EbyU8(5p~$Vb4B?l2Ps zkSsQwnd}7V4e*bnEwM^a|Eq=!s9fk8Z-VW*yBdGp!6;o~GUN7p8-ch>E?SFS-Rmr0 zKHdL*en`p!fMY$8tOErRUr@03Ciq#`Z2RTeDow)2Jx;zsT2lqqzYG68GU-1HxJfLj z2cYKuwF&(m%o11|SeXAlF$oPR=vgaDV~fth9ux2MUC8=>>_(6kLV-=%jGRL7RN)2V zS-~$x_+M@Ci~L{4^Z%iO{{KEyf&b!HsUp-I#p4VAP5X4ix+m^dg?|<51OBgPAirQ{ zY6`yILOl6H$oZN8DV!Y-(dH-KPu*C_A`xx^KL&ite=SvzM+9Z(O>oXCrM;a86bb`$ zuB4E1ORbZnDaI_CD3WDB1%17}z2nV`{_Q?QeS9pCyS~0|>)^0(;r`#~2%(}BXt49A z>o%=8Sd zqx5|ui{|@)%`<81IocQxydAr~l*Sy-dFC+$&nk=rEzkF2Bx8O+P#K2c!;WRfnv{+o zt6D{c+*eO4ktIq~_{pL1qel*@MF-&qp6wuX-v5Be+$4=d-|h($n9Z35P$!p|Sn!HfEW zs#T^lQ(~TyEfJigcN82)33aKz#UW>M!?e*zH^!#Y?x;{&mesrAqN3~pPi}f53%>;Q zC6DeRUs*yriHAtXjO{B>qD2l}l~8z6p+%OVkIH8~SrPK@q(4cziY4Dq@Vb;>d@l{T zy2nM1Tk$_rSj%A)+a{;!7Vf&oYpGFDXonoMQzR1Hd#yp|MUSUV5PbS}G~F}BwX86o z`HFlcH4A+Ota;kn>u$Yl6vDi5p~sNoWy;Gs#?Sg~-4_kvsCSRl(snblZoB0B2!(aV zq*?Ke=H5Xc;7@UFaY$#$RDHXYek2{qB8$O_D<5g9%tjT(;M)PtmL15~*KbNy+@L<2 zl*E-N{hliu)hkYRb5KazAXNI(Kktpp!4bfukYKPoG0d>4nr5RpLKuLDXgx}v{fNpZ zMc7pW?mS|7P(cDCeTy+;?fkyO=)DQsjnnwbQ%V`Hg_Y+hu;U43l5YPIkkg60f(&V4 z9%%10CR!tpkbR^`-0LCy{RxX}%PSq4Fc~nd+>T*V3?8Qks<6pNK(HQRkr}O38DW@b zym7qKH;Rs6(-HQt*v}TUV##U6D(ELp{d0Ozw#k;dpA97X=l_V_g%s23>G?$Juz51e zXkhGhh{p6ytK!hrRYzEjL!M8mHl<9pJMz^-XjeLr@e5iK89?ipW-*TTwSCdAo4R5Q@^tKqQvJpZPhLp9)qn$YLuD`` z)wD9v;h4FiWvxYfB&Csxr1OQb({3GKJrXU-hX)s#{zG`yl_Vaxg#%sILFT5Ba5y=7F?0Q}FM+qs9tFEx1ArB~85A<8n~*N4a_8$#Xg5;KaHsrkIuZ z0!EuEVdN9|b$w4xEI5kxt_n6qP?P}eD&(9%Ze`HD&mn~~_!mM5$Od$>|Z*{2u zG(O`+%_4Vn3LXUzSO6e}A*)2XES{`)5eG1rUW7oZAnamvf#`>GlS{)2uvygehRfxi z9HWT^eB$+x$7zffR^4cBu7-C+2$ zl0VPtq&{F|W{5byF<&g;KFj-}jyKAzTeD`cXHw*UYUTyPz+}XR%S%B>0^%!((P^n@ zC&`j0jmOAzS+~{65}yev{+T-bJR&4m5|ARo2&Ex8Z|uMu7n%MWaCL=Zpz8P^)=DyR zYfjZe0vBu8B~}_B>yH0@3whqG3or(%xeOxiQRyT`^aoq$-X7jzW}oRCg(cirVfgVq z4rkmcx8R_TG-N~tffxgsxUE4S{u7SdpXM<1j;06_@IqWqLZ}=lYQ$ty^Uy+ERYxRl z8~%e>qps0in!>B^JFzz--_{-w!YFo+5GYihpLcNILM{U?i?Yr)Jk!tC1v2*&U|=0q z0P5Rifw8f$QHspCEZkPi_o7t&)~*=hUsYHd;LYJ0V=)y~O26^epl&)zljyEUL^qo3 zKI+LUt|4?M=%AoNNVY^6pw3cJ!+eN6#*N&Ul8N@n_vjY1D3>Js9>;|%{6fZpg8DP`ICSEDNs@Mu}y~2DC-_kih0fitj~^)1nvY53+`7 z-vd#0VwFxDK4P9aka;>ZEKJN}hf^_sYtgCSLZz7{vWipah^k2lGI~lD@$Lsdwawu- z;oqNn=r)Hj{!)o>_YQyG;AQG{Pi;EWI6}aTq{Ry-*yP*!9gKx8OjB!YQ15vdGvvHQ z;%KmO6SB%M^gtTNlk=DD9>|Ln+z&a3Bm>Jb!cKueu8v6odu%K%pVVQnRqk z?usEHr4DIoH`44ewHpl8gj3ul$LSXOXa2NVu960w4sOmWhKpeT5Mx%=sxhiEgnKW* z&^JEk&5oe2`X%KMEt)5oXMw_Q(sMdwt_Yo11Y#SHtPF8VHr9m)*C1o>$%DtsHQJ-j zU_ZB2c5vV2`X|eL5C2&IDl3;&-SuqF=rHZ%#t}Rj#rD_uyt98U< zggBcM-u0CHgyYa^{nfX0ngyC#K=v0db|X}2#z^I_#D6QB5rC?kJDyUplzo##Q#-jY(zU7pU2rx66 zg>-EJkPHS(u9&2N43eW6F;BCNoEV)#X2P{@w#_iDnAp${xQyp!I5&=*jrXDIPr31& zusTN#g(_quhvRKzD1agP)C5OMP$%RtH?@N6ct7LF)uoHID`NqRMUgJDhAL&qhXE%E zDkl9-RcdZhf;j&UAtIN6N`bgJ%d865`25M51)gM)U&;pLEdDhb&()Z3t6F8>q%w#5 z(^ql}q0Yc^v1%R(i*^X9QOKC#@LCNHewE!^O$19=83S~Fkw{TI`Ve1eQ)ou79m-#l z1XUo#{6}@NvUyGLAyTP#?!fUl;nGGpk1@Oxnt-t7gcq!_xKIZxii|vx)Cw<&tRRf- zb(6CRbZ;)1AK$2OzjCvqc&cu9yQ+EDBW|L}I5*nP3{RkF$*kqo2q98IpK2 zgFXkol&?$YVn2K^XFFvw+0VCCx?S0L+!G)gKXnk23pB4j9-qL|x!DJ=52-%Ic|nLO z$fL633AQ<)F~@AAf1< zuT$~V6Tg$kbdT%wL3d-75CB7{UKg4`TT~K7lX*LSR{#ed^!&U5mQMypSIIyk=?#Vrognua-&v0&?>} z0f9s^YD+8r$9wsa>&7e55N@cTa>clK-Ys=1lqivFq)~xk#7T2rIq_22k zq}?t4ahO4jb;h0S<#^57nN1Ee3O`GI%Q_LgvrQ%5uXp&PhLf^FOcR%E$#u#GOTs&| z+GA~51H}wOMcA*mlpUroXBP@dkr{9webJm$nL$#+Z8BO(pNp!J?&zsi|Y(dtQJ!|7dvT2 zg4Cvxw38uCkqhLqVq0k!S*S$Sojh?blr{Q9C06`EbEnl(9BS?bB0%Z%rSbG3Qk=|@Q3DXg$iQJmYNWR3ti z)DVw7^1OR$1SugvI7L~;Oi8SUI%@_(93j6Ru&>I-zC6I;5$pb8k0XtXB=QK9cD4k~ zH{J1oA&`U^1bb~VMN0sWZ4=gy^3_E*vu|?dfW((r9=OaPQhFFkJPi#NKO?Pf9s`mS zfYlg?{jy+Uz!_FeE1`=N!5t_Ap8O7;X<>TT{PSzs*$DJp%pAkOjpARrQig+f0_!4i z#b3&e=Ab$(dB0Y0QC#D3e;`5ZEN|^H^Is6-N%XeJgJhBBuP7e*#~ls*;&?)Q<-+RK zXW|!Iok3j$M{MKnd?;V`zqtUIsF1KasEL?~+9)b)xaTF9jV%(A+%-SFg~EMN3=46B zPP8B*1Z&-|x!%{@O-W)%VuW5rh+iqGK9!O9FTT^l44iIcfTKw5D8X8al`aV8q)GXo z05BP*Z1KJd_S99#{}HdL}6$1IuABTX}01(nR*h}Ih$ zmI7XM} z&V`yCWqP2nyPey=7;=2o7ve|E>wU>bokXfIAJ+vdNQDSax2MHJ-w`yAvK( zC)?0NsPc`)@VaM+2{GWc;p_3h=*vyeflHk*G)a(uO&MkodH7Mh<6}r3g zTwu|QkGhFdt1y$J0V7eJO6qWmH`Ii%YRU45>x3w}>*a^Iok?hWsJZ$&)i5|Ot6C(Gk%w`!AhEYzHn9 zX$)X@uzVg-yEwz!mQRW~fBg=2pWLm8h#UBV>YGK~ZV1w+>+Wv5Bv~lQl1aNC;ppQA zz7Iw~LyJ9j0I~xT~DDL}KYyLbZ>QY*8IUO|y8oOWp7B58oLb-M*L`uGcFAQIMz+;&c)4 z$bAH2Gv~&@cKWU(wyXk635+kfaN_mf<%tC15-AYh1^cf1FNi|`uJl}$!Ru7C$1^UaT^wA_u%rv#=7hFMQFB$|{XRRjA_`PNo53pjh z;Yr9n&we3<9b%BIoSG1gE0U&O;*%paGM?TkIu`tZN!lrh1l4jepnA$ec!L;e`Xcqq zv@W;kasInLzusWkkD*gG^G%yxa zLy;yyacuvVRrr?$^|3ZYoau0!XX|ZoddH5iNx{5Z_HuQ8i2Dl!EpRHxao)>g$s z$aO`Gpm?UdS z8TI-rJe!{J5`*g4nG#EAQbhSG%?qZ@ch>apV1QBT2=RM$Jc&f1Jse13NriOf1J>1-U!^AGhH2KX-*iXBidqYw<$Cc40Z`32T~dQj{s2N(_lC zOkoIxob+C}roB(DG+tqOg6bE=^jn|TSvOq>+A3aCNeOI4sb{&uFAeZ2@za+AMDOPY zNX3*Rs~n#@a+q|LN@v<5zsjrgw8qPX4sV5nf=H@>7UD4hB(h5G2thIBKww%N5kyeP zb48rcKLESlfA}6c1+2`gG71_bq>OkGdmgulIEwhecDu|A`?buTL&)fCuuhhU zOJ2CgRRcA5j5A}qe7K2X+1c?}#aGRFw3b`gNT#TWq5x*W94&OR!r9TRUoc{bBFc#e zWeBwcry_DuDGYX*AeX59U*~wp^4oQvHS0O&N^L7oq-m)@xc4hkO{katJ{@)qk4)yN8{?W6h_uk#rUA?QTtDff{ z^TBW499~^9H6y(3u~;;rm6{ia^X#cf=ZLD|B8yBSaP#PDuH; zb}xeY#{uh&pSs37C5>Hv+6+1BP(qnf&MX{u0$icN|gbbAkBL{`ae6(;$5Z_ zAoXPy?i<998Z@2dt}O&L+reckM3&jP?18(QiWG@%SPGi_zrW(mFQS{Z-F@-Hk;0;# zbwm2-kiw%XX{EjbQ;1h4x&L~)uaa?)8i!t&)XnCH_G;K0i2o#~Auzf?Qi5yWlh4+i!G!!Qa{6z}z&dVg0Fkc(XPt8x9J~83@ zo6;52SqIl?SE6wVYHO6kZ;UB$i7wzW~ZdgCqAs5`}Hn;|2nnIWqv9_y{E7XyIFvR1Yz1@_ihx6rC3Ao5!A z*jzb8=moM~4emd*V#~d~j8OJ~<++I+a#b49@%$N!(aMT_?Puicr0nK-V~LJ&UgNq$ z7U1)gd3u}rXnOD6{UHm$qGO=5 z`72#Fq-7Ud7XI-`VA1F9W5DC6Lz6x^jnLqBe}>@GRfgAI55Q`g1g`Vxod7_`xB?u-AR0X z+nfz7t7$Iu-i(0Tp`9s2k;u6yDUT=)zhv`>8e%XW`br3iyy9uK!L}&UB-i>Q_s&dRxa@&B zJgetTvVI{rD$;auan6Eu%j)3Q2LL7qC)>E|;WYjPn?bXvnyEW+qidECQk+>8~mKZxYC5hc@WgGFTqC|U94%27$=51v<*?udYG zqoBYC#j|)c^cW08mf&eZ8rGK`+&9=l#VHlbLj@Q##4D4-dv4|rF|OpmnN4q!R*63e zEA4|Je**e8o`Fz`4|9PsUbidHD}=_|l^J%L7L()|5-*j^Hapl!alU3;KsaC zFeL^}0WpH96c$pSGmOBJ2lXNZc_Btk^P%Af*jrQ;goY@NS3634r3F9Wkn0?z_6qk< zT;JBJ8&FbBPt4?29BZ?<+rT@jh;JJsjkmWXB_W=QHO6YoEUAY#1fCxI#g;Ux5AwfD z-{2^<8>wU>ox`e4OVosZ?|4ED@=lN8=p6Yn@rBXZM-uM|*Ammfj@RGqn^Hvu8{fiQA(d{OFTBVH47af`^50{+UR7k^4SZT`fLLSi@nTF4)- zodMT<57NMFI4%u#bS}!1zWs=u-u7Q{TYRnt!Y@Ng?5{aBQ_JX1W!%fJl+ee2PEp731 z+MDIPUnxvX!fRwj*0zfnTm|)h7%0^XG87DfjtF|+k;IldkduuxLx}kWn9X@Wcco`{ z+PvY`z;C6Xvszdhtg3}~R`gFZNF}L;xaMb9LPmL9qo}$?;v`iqH6GI5uQ{)b+l&U= zb5+B(DCqcK;bv_x2kgm8COP()*!QMxic%l_ys%V-nmxNtjqRsMF`Z2#YzoJ%KO>?7 z?=VD->HV$O3&EcZ^|vIb#$B*GKNZ=%IO7X=4zXK}=h(@AkmNUR9TKQ! z|Jv&qroh)JWyIrU!-yMBAY}OZITJOi@J-9!>!kDhZQh_h)I-K$*=JCBuZz zvyob)8u*-e-@kW3vf@P&@y%JnsF6ESJDd@c62RVLmgH18NtD$6?7@^cO#BthDe9)w zAesvU0}#U}1hzQl{-RK(6ecDAAWc2-bSk%xaBh9mi2LJs(dE#A_Pw2;x4#uPTfD@T zhz29;KH)_pm1eNudq4v4nnOEc zzz<$;?~{Tpt&3(zs44qI@7?Kl_*e3H*VW^BDhYI-vVe_!R^1fAkVqBJE+bW0Hq9)3 zUCPR1qFT(P7}7GYpxAHhCdY5BxUi%!H=MH`u>;;jTNE5ISG+OZl@#pF>YmMz%VMse z2hQgTOZM!@Ugu?&)W+x4>W+M8f4hbJOtu_Cys}1Z!P2Wf-Y0ivBT85+K$}ml?x?)v zU^i6v9s4iuGMP*W7j{^JJzvZVaT1vG(sXsxO;`XSN@teSEv(`4o>Uk$O61)F#JMe! z5eE9Bqls;SKbn}>kL>V@B7FQ~P(F zc`S4^$tyKeOe@}gG(^)inHlrxich=B$Zbr8<)2t9iny6Hbsv9W;mdBl+p7UT-X|&1 zmSx(!KfSzpsdUB0mZf^N_s~lZr*?)AnLx?ezLWy>X)+Y+6JJl2AR@l(Ay%AZQ!Ni> z?nP;v00B7ry+_&u`vF#>kl8p&q8Yx zO{nJt>vOGIs&KE5(e>Ohj+6%;MNAG+b1cHr09~2GKSs}d*+T8fpQe$#rMm(GWvMY& z*?TQ>LPF@xsNxuGGR4W{=d$*y)rVK?cO_R%Qq&-!JK7m0=ATZf_fkO>eq9p*Ljkgu zd#3qHGGAR*%@DLS*vxDZ8RfdG!bKM$mI`(7=^r z)w&3S0)l>E@biVQ*sf3ScKo%))eY4#?BnE0e>k=yNBV>%jIIyk6Ju*#qvDzRq~9V@ zoNCs;KQre<8(ElX(k^ZX4pqsN9U5~kDdEiU?g2-Lu9u0I&g}mX>eF!JWZktS6he5J zmaf~Q=0oW-A`Lty_|^QWviXzIO^uKK$y%VCmE|PCh`oWHiAAx33f#`Z`V7&=A?G2V zbyI7@J2*oD(6dWeJt27V4c)~L7(~Qz9eAFRlx_kfDQaUCC@S<(utT|Y#cBiexl&t4 zhbeEKJ6zJ~*ex7!StTk=BD@Mek)en9)_NhqM&P3lP11A!d(;?<5uAfpuR4>Z#l)CP~9jWbCS}9JEKz7sBa9?VO=l z(9Lt3a#yznXy*7ChJt`;Am>*R7vxQvcYcT|7~kw-PA$9C#3a(+(n zyF0%0iQTM9;?O*^@NxdZ@weiZre6MfRID5%v58d^6a|~PSS$}KpuJ#MNA($ze9G!axHVU&P8br_hkEHIJXJypIoi|9h7Iex#pTQTGxz zJCu)Bp62dnf10VI9IsvdA5V#PE(DsGq7qJZu3W&(#v`48Y;|3ybG$M>tz+Nr)QY{d zLldf2DvP(a3)1EmhN@u84N~tjl4(HPs5CaqDQ^7n^#?K!Ou#Cm3_shflZ1%q_UriH zSOii?B{-3X-wqkMC$-D5#&?HbRubh#E2M*~*Zu73>?QG6x{8ip5}kZ-uji8g!Nq=4 zKR(YWi&Z)@g34d&KJuM;l(iQ!k`4)uY57s^ij{tEub8~1?(qrw4aQv^oqB9(%nh zL&Uzz)Co1%E`S_&hpuiNE0LEE7?(mpL?+YJgLu+bHpS?gW+S31vlsvC| zyf{oNRzXQBcI9xo<8K)55`u1uZYfA7sGJ9e@-}$3ManC)u|4&9|M-|+Q5PlYO{s>J zj@S=T4I%`tCXrrGq!XhR(`^GOBcPd4xWwj`Ks=HAQ$w8e{t5Rn!Qyc7rNLD=eD1G^ z24S;o=jiMMN}mV-g_n{gn+!w5Sae1tcqG*vQ4L$o`SKaV`IwA+71i5wV|TQe`>>cB zd-LX45HoOWe2v@Def~a!E@x!*aJ-HO;PmW-$&lTvqu*QSz`Avf#*&MEiAg>M6c_j# z<*>p^lwc=`3#6iOI%s+a*vO{TRH!oGtRaV7y1~T_+`p;GdW2>r@ z^o?m!RLrjWvjff^(DF@k4)3uAz5$$$O(HXVgN}NHzl_2?l!|0qFH_pXHb&=bJ8`5m z+R*2~?PnINB~~AzIH(eY0U9OXBYJuW2i>X|FQNYl=)*nm`{VU)h_6!Wl)!#w0{JN2 zkq7C0Bj2=o@V*XfF_-(>XAF}&(UtRWmkVNxW?Oa}+b3U)3zqN6>w6snuD-^X6hcmU z=v%L(A0z#=MYgTwAT`6?>u)%C614O!)ij3vw+~|@X)<05fsl(cwt!tfe7j4yz{@!4 zy14VLb12(}cl8iQYYyJ)&|WMa{M!(Zgm!)3`&`LJ8j`=tF2AF7hqvDc4sVu1ImA8? zV8_R>7A<6gO0bA#HUc9~7og#P8XYHDjCMJU#=l~!T0^&GucNa3A$DapL*Ak=O!LCC zM3XIU+uuEP==)Y~{J5a>8B%AzC+zXt(EQYaJOJ!?Vc_(IeWsmY<(yyVet~!*H}I`7 zBDv}#T?<+4dd})!I>?&mep$yYTn70&snS~67psju!ly3}$2RdSUkr`tB)TkkiE3Up zi3DiWMe9TJxWvksvg2f8J)4LhNR@1@hKhK`hK;LAhQPh()=sN#4-X#@C!W2Nc&yZ$ z6Y2mI9#NRbv9e^@xlIzU%wF-7&7j`9^(_wQjK0C^2FU4h#Y)#PZ*r0Rs!%dRf}b{C z965p5^V`K^pnpzEO;<7W6j+;Y<5c1v@f3%X&`M?8LQ6eDbKU*nWQ`O8eWr|fxk5DZ zd-gxLRk7~4+3)v``tv7C z2Tp9=$b7QFd+J!tbH$!)uH_+R1-%IWk)tg$_eKv_?9c8U5_TM{_{3bFqhiqOkAbhEgV*?M)6>6dkF{TqKutjcT#9oYW%(D-D0_12rq02xu3Kipbj845 z+YI}vV*b8_A>@0KDry?*?2awr2x-Wo`jr&E&97B3(4(v*-p!<%DqcHwdfEy#l9$q* z&{)ni^JbHrPp>8MR7=Ne@{90kA2jDTTl}`Q{)D}7c6~p!PGVQhdU2#jQtz7+(ktna zhL}EvI?7bD`zG+jscxlY##x?&BRP*HY2TaGDPRM!D|M@;cS1WPpy7LyPgq>Pg;8ex z@Lw8?R-1u};BpHcE=soUrw|I7h2GP~i(R^&Bb*YGH{>8AH)8PuC z%p^j^GXVzBvAsBMQa#66N1%YN$0;2tBIAQP_r(drVE-2&tA3Q%gATnkf!^aH6fpQm zpN^3;gR!uO7xW{gQMyVq_EX>H7GL~X`#7fKy9S;8u?G@TOWgByK9b(=z1K)qN-@#< z1%3YUXC1vS3xQ)HUz1*2s_S%ftoqaCV#w7fd0a+zU4+sFG|a0wjLILL{k0j`f^XSR zw?aG0QuH>}^-=ye7N9DbX^#_eD@xXA@HNxPDM9w!5d0TUa_K@>{ft}an?8GK!!?`KAr?M5LFXeQTi9A67}6}Vs)L;wPP1w zfv*QdMW?6`!il=uZMymS2^UN6{1`<7Vxa-jxXU)Kmq!A9v1g7pZ~u(*8}@?zeJ^h# z36Z5j_vz9vLJ8W2`>`mx{r#@8fYaMUHz+Kf%NYMVQ%Jei%x1}08 zsrKpT(0Yl`+RV&2xd#yQGeTw>wl_0mH_A9S(p5#u#4KuCofw`g5FEo0>eXis<8bdB zlqyebxkg$c&G2$}4?-LDu80$U+_tC)L(M@l7emg+2_ob?#)uk6-`|%;T>^wHN@r0_ z+K^qw=$p|yNAIz`uR}cc>#~?M=O(V78A~ohN}8&co4C$;C-pS@U3QdvN;`YOcE52mfS5$_-J=|p_Y-N7F~12OxPrF{~Cw07c9OT zF@|IYCq3BKtlaUrfauA39VO)Kt-v|P;UY83Li+HRE5@kM?Qs}Q5E`N%o$P8xSt_%= zks)1mPEp(Uwxfa~p4h7eMuJKMW}AaU?jjb}bl#D}H%$3cEl&iV(6i19&|jQ{ZMNT4 z__}lUkLMdD6xoZ%)#_}7y7Pe92hn$)>pPCnKY`m*eW44`_i=mD^BO3Y2UrNq_2yaX zEq%A);_Q&|$|Vk?)LZ!EmIw-Isc4CHUM}}j0TTIB%w?tZ*AKTIAKI(}fMRx4mmzE+ z3NlF4UH$G3!ekkO#Ys#AoV;XOGd~gsHDz~^$O>A<8BydPO2-OCmF?d181CxX%0OQY z$zLHW*O0*A+6N@kVpg>jDR{@#RcckcRM(L{=hLEIIh`AP&e|FGQ+G%9Y^6b11se9H z!zzQOdQ8sFLej-QlBHZ%>ow@}_(PsavZirZvXzYDqYQFpVA|QO-?`agKv~S(7I0e) zAd43wd+(glIeL48-}IZkRy%2`(|aQcOik{A;&DDr7LK0EETD~A#a24mU^RPU4fl%u z7n2-<8Ms2{*Eo{4Q2m+BLLQr%%W9GA>R#oYcfYAgeNdVTm9JsZ@Y}+m-W{8~>3u|Y zKPb*Yu;Ti;?dUaV~T_>nyAbsoTaXQBTMpx0($iIHAC~2gk@hC z4rXvwRxq9l@wU>mj^d<^9EyfcFkzr-(>w%OV1cqMB>Z!98=nmQKbZHIA6&N=Os|&M z-w7lL-GX!D*1uk+jXs0o6jyavOk(4SJE71TNb?{lj^b(%E$7+VusAxiZwHWYD!Hu|$Xmh46a<>S!^SyB}VoOic!*SGAUZ_iZ~7RxOQ3r-F`|%k6B|ptul^{}CJwwjgZJTqA!&7Qo2I8#gx?Me@J4k3 zw^XxZh5vXrdk(0cS1`74XH;{jPh=e=!Jl!R)|3As52N)7AMnX)DDGXQw?5^{mddrx zDEnOv8JQEl$qgbNPuh1t>SapEX7>5r_b3zJ@E5dcond@3ioS!M@yFFX?hWYjLj8Dm z!D#>8MrW6zzQr`HwGn2Z&&Z{Y6<&spuACZ2l+y*Z0$hf*Q&-K(-c%R$@LY?NdtO*S zbC_S0LAkh@d)OBvBNK0`h=G|9g*U^I!{$9_0aC2GRo}FRbWx_NWCH^;EM5DYb!0(l zOGf-d`lwN23!9JprJ~?8B3&WrNo887vd&D?N*X|ZE6YG9o|QgqmNqtYVrcN0WGZ5OyQro zdky<=r!Qh3F;o%y^R2TIH6fP^sX>5y&S@Op(Yk)_D$H!~@bgiT5Kj9sf@|;F_p0{& zIH$_EN@Hl4cH;Zi<=gQcj6n#FdPz^3MT!eQEjoRJ3Jem_b+!j?ZqI^$-cA#p)W%27 z^-bOniQ3wpE0?gjf^crKNikb)Q0nK5`LawTo6@+zXEYB?-?jw>GlF_njw88ObdIJ> zLSwVzBm97C-_a^fyyPijhVzc`?%_9)mcRn*yc z2{aJ1Z4~~Id_r))S1mc5;n|L-ax4xTvE^hYEvMrYiV@N5A;Bg|K zI!Nh zryiwfr>h~ufKZr?Vj)_#Bc0RWQ|Im3UhoJ zH82dWdRx0fbM(scG|2Y4RWl&{ad19aT$m0wzC`fd-&nZ3tw&5<9ldQCVi%Il_a+yL z*}t&G9Dmneo@*}#2qgsgDRs`3m{rv@){Ni85PCED2jOcB3`+5-?{Y6kw67KN1mALv zbAov5`z)o+xI!-Aq7C1jdqr$5eW$6Om@`eQBqc^&Dy8s9vzIY-l@^B{{OIWmDI4|@ z-chTMeO-ggNau4=!1Z(*>y8>9rj5(cFZ#?}tNXnQb!*JbWz}@|UTMLI9ySM1!Ig%H z>~5Hu@$@I7c5V4=uD?*FomC)!{`)3L$mz%Moye<%SN-&3X(M(q-7WU)Vb-ZcPFFK&tgf@>y3gX;#aQ8S_>Io`G#+M-*5(OfhN-)b0 zt&MvccHfJqbEvFGHs*t>*t*4*s&=!{Xn~XiOj&m70Y=3`dweO$Qv)!#dl%s?ryn~- zmF5as?5VV<14Myt1I{zWR$$%x_4o4}-zFz%)q4qUm&o(H$;JK*oEsYMPTL~sq7pYm zG{pN9jYuoxyhjZW;yvN=b4oc=GBIcy+a94aY6(cQJNzQYL_*F~r<)Q>@>9Wod52!W zvB@S(i@?!dLzgjyN_@aV@W=(4_bxY|&BWfI;4`bnmNdEz@U;Iar%Kr80gf~V@mALL zjN)|CVa`St|0+84rdEg*H+Ts-aMq+M{W2E4u|bvSam*g5Gd0XHCvP=Pq#6tqBj)t? ziaBHaTJu2j+pxLoZeBfg`g>7u77Wrx{Wa3&Z~x^MF`7wqn0>e&m$Ilx&^mUn{HM{; zsiC4WJbd19t8bpQAabYa>`6e;kj48WI{XbqC&P$Tp`R;cu1p76?c1C)=JN(~^F#}d$vHE@ z#3cf9#WS<&*fP7DD$8z|&sWM*PokPU83yePYYKB=c9TUqESz)m<#4H^7DfT1$9{+m&-NPkX zNp(uI0YJtS80%fZK6YW0a%V$Oew|MIW1@NH5K=n*KGv%(9c?}C$prbGidndA+D)8$ zQ+jljXx?w?tl*8(6|XJ{`duf{LVuAS-3bXsTujw0BqvM$Ards!U;Y^^D~{6}8#`hN z3;x}EUxHHR=0`oF! z12UDZn`B}maG#~ZA4$sXgX(ysZsN1vV+sMR(V1LL{&w%%zEw;@_W4L3p-k^X*3vqD zE0I5E5K60jfMFO9qE*kKPivLu#H@+XST!LOFrZRmQ>)O_H7ZjZwJs`#L>;ejcDuu~ zI*hFnyDYn8?ej&C@=Y)CD&TFtzq6tEC&S;jVb-Fen$6Tgso)UMKf9@@H*)XUA9Kgo zs4b^uy!}|roDsu+mggzB?%nq-k@Inpdk;J_;PqmL^amRD?!!}??>3iH0cQSBQj#Tg zg0)$o$j0E|AEC$LziuVm!>|+ED&x)`Pv+13Z=?}<;wt2ddZ^gfM(ndkQ`|m1ZeHsL1V0;2@*MXGpTW(4{RC%wlAsQ*q z(>TXpNOjULFF6Ur5Y{v-d&%2-P@R8|jkG0f#~(Jcza)*0p}^p(W@gps_osiM$0TpGi8C)cA+ca9$j{}vE6Keb5b4-3C6G(~?U=-| zCy!geT@>iil_M6A!NW1$(+R~iXz_?TCMbf%C=sWMXba1>o0}3JJsfBT_N|gnlY7VJ zAlwW*1fsn8Jd%5wuQ`+2=DWo#THQ*q>%t%j^u=8U{Q&>g5y^Yf3Q! z0J!lg$ez%w!HCkf6>VZ9K7};*j=KS2I~L-SUZ%6>He4nf3ZC;DYZm+hx9CQLcfNvq zQ)U#7`~x+n4UO(@EQ%FN)WZIKH8+_d&f!rV1C24#O(gWm2=(*d*LN^f0*GB-xJg^^ z(6ZzDnDU&v#5p9HOVt=dVTgwzK_TZ6eaTS;giPi$c=f^+)RG6mD;6So&ivQTYbZft zX1poLI4Me8F-g-=Awu)DNxI(t>fKr&k&pI z7UpYe?G^heu)*0A8U?)xbZ+U;+cRZTWUh(o{ciK)M;SAdU5;k?fX5Q86|+>oFro2A z$r5*Si*HW1ZAgehy0zO#+0s5I^eHt%;2tx_uB|Scj!sPeAla>hwxm2s;dqxhMw0-R z>P4UQ^(7zIxQ4^fqj`gG zKl-GE;=-bp)0&%$%>1`;JOB*MO))%V*j|S8x2e74_Ik{)IJ&=@1#VuST<+-;0z|i= zoC4*9Y}4xDj0%jLG9DPjEBLqV_)V7Z=%vHfrZa#+JUz*M7Jgf{2zxEHJ!N+LIU7$3 z$o@Em>x}4TryD`}uDbazJnfQC{24TbWx!V9unTl~gxZ*yjncpij@7?+-#uHg+59o> zV1CDk6tj|uCCDCpuTauCr|`;F72mG7U6gG=o|rzmu5eb$$0uUXK!OWkkp_VaG3rlQ zdUzSz3Em@T;e6Ls)#l|3u30RKus+PG6YQC$-mSt&oAXF*yMgPgP7D7%lIl*)9d+eF zO$|K$-?dH$^$)j(NSLOt`;Pne1l+9PnIaxy?j@0|s$h#;;9#p+gs#;m5r?gP0wJP# zPE?XPks_lu0&MQFD^Hb? z_2=ehxrx~yVy5fb5tj%zp`k1{GGiG9N`Ge-UMMRvKBE}@PJRv zROo?oQ?cCXS|L`tlTq4W zU}58*a?|KAkIH;g`tgk=cEt)SYsaWe?cjR*-NCIPePvGDb6x+jV^t!v82vBL%ml-e z+K_Z4N#~?ihOuIFSgPe-KXpdMNHw4gw=02<+PtpyeUz+SMLhwr<}_**N4?v9=4!jI z+nL8h;2gdFLdWR-2s2ALVAPv2-nVa&cX;aJ$H7=6eppM@Hm2c)UsXYT$-7DZOA4iwzBi8~vly-7w zn6va7{~<;3=b*)$?GNzr6dw!t(lZSof%mVs^rNIqO&$TmN6!Rh()ZlTOX)B!yLtZj4s)oGmF)U0- z#HnmcnOb8UGrA>-h3zT}g7QGYZI&BfBBK;1Y6wBdKE$>qe2A8HpnxX8z83)XKAa#Q7 zCl-t87k}2L_U2H|#ttizW;86U+}BMHC%Q+F&t8m}y$@y7GdG_ruVA)h#eQH`z*0|t zhhWlIn^Ih1o2mg~jl7_fTNPD1XTSUQao4I-(7eV(L#-R53k`G*3QJ^k%FJ~qx}k5@ zOaCewackK6@*ZQr#@u(Td1c!P%x=rO<7Sa{_6Er$O;KRp#=x-PtIg1{ui0; zS$%32hz-PRCgGY6QDQzX)TEUTs3AJ%YAQ0qJfMqBb=0 z>{hlx4SAHRBC}z9b&7<#!`Yge6j4kX0_G=2vm{!!Ieh9R;efEs7TRQqD!sm@gk7IZ z-4Fx!6p2Gw`*sL$>2VJh^_47m+URZ5C}Va5H_ zaOd&nxb5}vn3bJ9U}^8kO&%dB?&HDa(gxU#bH|(jhRvB>@sowQ)#OJpkGZ1O)g0nT zy27h`f|3XYZY-v?hls}tcfUfqM0a1LJ&t_;T!lH0%@HD*?&A=#|MsWpy#r^UufV(1 zKQNw(Uut}G(?=;*{TNcX@n@o&2bWxAP#8s+U~Ym)@!20^#W3c8YKnIsD}h|<`o+hCO67qo|_~@tk$z+7EALBkZ3kyGj)z~IJ#RWP}O(H_?P!6d{k9H zqRTY{ivE}p*%KdSn5;9mL))EUx=NRC(xlCkH)D##KJ82*GNo`_wpbS3tPwKrw7@;~ z=4Fu-ax{hcTTd90Q~{5RDO|rfuy>ov($ozqRct7lyu3R5+dfM&YnP`n&+=gN?ZE6+ z;D-p5xH!IaSOx+{$(gZ9*%>4k%)=&H?C7FDa_$X1KhdZ7JK=O;ON?fZWd)9?=eAW@5x4-uN zT4G|KC}fx^#3LqTjUWDtNeRAn6|au2VL|THut+%&xV&?iATI+uc)0e01Sz(vmo8h^ zOfx1gh&+env{JQMV$>JkK zG24W1!i0|%wMLd8b;!nfeWchQ3~xOWgigmYtrmvA5H9L5B`KDxrnAIFN)UR^7A*+x zI92yDDwvR3@c+@)`CU?rgqfuTAfJdaXHAh&vl1oQ_I)ewkOE6jp{b7;bbLd55r^e?ZGFFz!j z>iUipLQYP&rmtgtXN04)wjQ;U+US06PWj${52zn+_Q=z;Ncxo=T)c?&-plPcv+#xL z=m;sSA5pu)n|lO6LsZiV$qXqZkSvc(X0G`>oHV}2HG>$f8`4%xjm=5nesI3JbuqRz z^{MdlxZ3YMCawCi{LzDT8UWp>-z8k7qJ?&a5bNIY@wjSoD6RZ`Oy@ls4F2~uy7PIo za~A}C1_d6!bYylzaJRdtuISOj`BQY?$mDC%OO{)kSa(`>DFQz2Zh37+a340r-?DfP z2WldCc?E@@vgouvp6pQpLlFl<9*Gvk5C&+!E^wecj|XVCOctN&Lq+quv+U3FuDUn zhz8$|a&wMbT|3K?I|H`NzPr6X0@aH{9UCCR2i`D{yq{SlG77Rmn3fljLCE-7nCBKM z+nGZUIa(BSc-TsTUrHN3M59D2g3hbkAkL9Y8z1Ed3?*_)8^S~;!$e2Z72v}2YP~Q7 zAV}rE3s<0442}0(5Ba8NT@id~Qv8NGn6k%N!d$t(23=EB7ldbCv|)Q;hbP_V99zIR zbj3ib2^73_2FSs9Fzp9Qe{bKJpG4tqDuIWH(byac;1%$&eaIO{yw||T#!fqxsQDX_ zGg9(=pZ9R2`BanPA^8`v(N}8RUw@S2=n}t>5~}@gEI_KU zCvTb-tuj^dr{GXlMT$^K-Lxcp;zKmDI8a7G3LPijp@uJjR$5vf%#}E4e%QvKp8RPl z>1|$uBZK@8F`MpUtFb8NVbthvR5VF)I6&Vc&8^DfBe^;Wk}QM=`8*M*8eQU?RV-zy z+h=?wEh>3Y`wQS{+u8A>_uqTFPL{fG*2)7_Bi+ym$3lpQ!i+h07J_b)s5&Mn(6b}^ ziJ>W(7O&Uq5vcl#lM3lhr%!GMJ|of$b4S0LQ&zM7JV4DZ2&wx1eCZ*fuBG|#+>P*= zbx)*z_==&T-`h}KE6Yt!I$6}m-q)onl%u$5s4kXu6eK89A(t${j3`g-Zwnre*wdOq z&x);}Vghf#XF={{`C^TK-EDl@h}~pj`jLBA^(;dODS|{lJe;q+a9$4s8{^zw5g!_M zAcSbS6dVzK)h!Kd!*WO7=M`7OgJKVhxS-bpku_}zb>KZ1jSK;R)E}Xw=8V6Uwph@b-S@c$BB$JMBlvE@2U519EeRfMOhboisu@ zECTt&ERUm#Ifmmmkag*urzXZXE=Tcj_Xl#~gL1L!1#Mw239Vur0{Yd_Qu)Kb!|^9^ zw#Pyc|L4j_!xlmOw-gGNKv4a+gs|0u)vfVg1uu4~3%vi^svX$4Lr@h|uvB+$Iq4?i zCE;>dm2Fu$prpjq)_8)pIhYa|b{P|5i*!;)2~cI;T4Tb=sDv^@!@IdqlH$;P)JiiI zHNNz7r~O;AZBgptIoj)&@Ad7*4q>5i9wW#{X255kBmPCVsCqs$pkP^7-#TT!mEseQu5n#_*(BwMBiW-4Flbz5hhuB?^1=$dFuJc-F94 zI&>#E9Xat z+}-afEun>h6LB;d=CdVB5T~9*xkrT<=6~cLd|?o|`GBv646`R?KR}1ho(&(p(;99c zaLjR@q;AbhT#{zCwX{>Kn`{1)C@2UfA?(Hm37;K4c4pK^&7Z02lW8Dgk(5Zr%0!-M zO~PpVd3SSXjnIF`mqYNM@jW{7EMs5a?DU22Au`w97HBoim!eHpDkz%dL%eYf*ZyzL zZD>(@1xVA*NvSDw8+(fXH_-xQBhmh6uJpK41(*K&)PGye^l$qqMF^?AQ2(cGVaWOa zm3mw+_1}SD#$c?=f3=V|oQ%DM={D}NR7#cTaR+v|>X*=0nmWhoQ2x8aID}SSE*W98 z&YG7*C;g`QTn{S_>-|`bxQNFS#n3K@&<+WQ*sh5 z%7cKf32Z2~gm7RQ-!UgqFxgA?K2xP6_C!d$2&>S{(nV*9HOvo{4Fg6_k?ck?Vo|jy z``+JRa1axAhO;H&#$3*uoz;^n{$fDI2}gZm6tUzrA~jK$X2-B+jJX+WQ`;pR7(Z0R)8nPED63FCi1dsJ5(rUlK zcjxaX%2ASGy70@j#B{YtGH8iSMi}Zw|GQzqna!0g8(NEytkhI1{xiM6_WPr^Ugjj# zyr`@9|}LT#A`T zW9U%RW%SZ0G31jok<+A-)x=I}@L}WAfdrdpJ%7~fQ@lQ6IChK|Mlb*>j&-oxg)s4`zE$FXz3ANE zK1R#LmkW$1pwH)v`rzpL)c!Hxc9aLer1))zIZrbzV*x#S53)(@tx@6&9hnI6eZHP2 zE}W&Gz9MbQ4p18Yu=UG$-89{3XPV-b^f!A(i*{;aFtPI{_s=&9(Ycr2z9z7xJhVC( zrVt z=D9*V#audzb0v7tW;2*s8&S<0*v)V{CR+)j;lhIo7_H^J%7&7|f0vbArKA$YU09Z$ zoaJ6xdyd(&Cyf>{u`PU#$6Yl6em&cw$8B}Uk@r`azOWH(?w`o}=p#VDXl(FR2rb?E z=AS|5FF+v(bxeDc$(4_>Bz81I0nR$c{#lh=MiPx4jsAm@g+#j}xXn8(4F>9C1gyKf z7H*8CWh$$Pt{53g4!y3eJp}^I=MzV(UH9gw)mukHspq$mauf+h=Vwe+BXiklRxEZ0 z5Ng9|?aH!MzS+dc_zc{nf|B)0{kjYA{#dtr0-rEFn_Y!_7g2|#MTl!TVC#yXA#BYL zb;PG2;;Ll;|QBfXQz~Xyu0c>O!B*5#pG-K)AzSJkfl zJkQ>zz-OO7i#i)I<8vBxe6ExAqj$9@RoQ&EGt^CdRY;j%c{fAaUnBEsvkBe8D1kP- zAfU3NK@LW;OGR;uzB{bPXHokWkwdzMevkWerS!mbU&c^C2+ZD43bCmhHhU9wZ8Hj$h>AQ=*+by+dwX1MnW|DCbN#MeBi)Hp~QExy-y z`C^U-o5{%&91TsM#3XW!tB{mhiH73j5BwbVcGnlf`MOU}l{k8|7@!t!`*9KrkI)t` zBX9ws)gDRfwGx^!c)72+B$A7lLNO)`y}+>k*eoIv&j1U0U#ieCs`8a3l6PgYF%3yf z+Sq|YHZZYndRF~5h)4ZbP?7&FS;W9+;B*#KjWFKT$V6>CbRDoOmLg#xjM9r|$aq)y z`TkRVMLf+9yVt(EIiyiRqX{l_&o1TIJl3xFAaGGs#4R>U1ex~ zADOS6>d*-DTTH(i3 z4N1mGc9)>00*5npqPg!gDVR$pfb~Pm?5S}jwT++@owS2^;ASYOKoi6~IND2YMPYdW z;AqP95h;^Qi1B0P2c^Xg+Onvlf1-iI0rI#FHZ7P~r0y$u#-wl*_5obp0uDYo+%hRd z^08GYAeYls>zQN+thPv%NfJ`tb}(u1{GI`VOu#XsX`=~;wFH*muJmu|s7)S$`rA|u z?DChp?KBYwk}YGdQzpE~lSeI01`0_uG%)Z%PKdS+;W1bN)LwyOmj z&369+B}RS!815+qBsrpvWb7cVIkuq>%KD;HkV%3~(A0Yu_4=FT4f1_f4qZ2lOh2Z% z0^lf&DL~3(aY{_W*!VN%$eg(Dzn!V*s`71=sYjUp@C;!ir$q+ehtMaaNnyNlXb+D^ z)zBc?KR`$Q$u{Q`m4xCNqusu@_PYyPs(jVaw ze8xoo&lIZcm@C1SW#A@%l#AWN_^o;DxMyeaIJV%F>59Ix-s=GEXVan@b;rY3f4V4As(J~9#@wV0uJ{gU1R0Va8V1i|hJ4J|%hz98Evzk{pFYSD=P|JR)6@7mf zh#*| z+!9`O511WgVqlQ2$)H?+JHmH1XP*Wo57i`6!F z9Oko`4{1RT9c2;h7XX52v&ImP$<&!#VUOC$S)rsqRnjVoKwgKfqm-LIDq?A+Na?u5 zQ-dJ7%#%>-Il=Tl%U!*P3tUA(JjK?zAPUa%Fac?zfEWxjTAkD;gd+~hJlK&-hB)Ql zboOKN2vu-UTy^ZPeX0Y+LYCjs5&Ep+Y$-y*B;S5xG)QMVwn~>}QltQ8+*Dloe#&q! zPlTLb^6YM=1vh`-V^%ROxJOIi7PtvB-C{AM?u4Lv?+Y`P__Ph(Y-Fr=cJ^)3LadoB z*8QMaK>PenzBnkYfgc@h{49n_9#(dUIWhuWmn?*6ERsNBGU4lu;v1L8?{q^37s--t ztqV=sp_hPA(P^qK|d%&=`VbaZa%PnyP(U}%BGwflI6+9q{b%6;f;Pv`Dsf;@ zLI(I_uf#BWIn$iDb{yZa`A;MfvWRCqNBedNU|jADe^%S>6s5d;B5Ruz#1lV^_a}b? z6#lHislHux4t6?7+^0Ic+H&vzFbG-Z>$_0Ld($*7+62kQa$%QO z)Yur)K?8^->uwsNJ7!PAU*BtCFT|$(EolNYaxhd-Pb{h^WMIyy?+5e2;n|wM_zWD>ru+HtDXEans1m$wNDq$ ziKq-WMM>l;sSx{s=iVmWn3@+m6HE5B7nF?gTYB6Qvs0cT!>RxkeRo@w&#L|3ZV`{q zeE=&##|yz#_d{%QoEWpTwA;UZ!xT_Whr?x4F1AWbUEROIBt)8lz?zieM0ubY(oBO? zJNsb+YtPtyD#6w5p&Y?YsRLWC zuCgWw!x8@%F5$xs(CQ~M*qu@@V5!EQ;e*lWt+VZS-*v z*62?|igVf2Q_a3kFwHs`;7PI!+g)U>WE}jfW|+^WcSlgAl8|+o8J9EoTLI&iVx=LP zW{gS&y7TwHc_kK~aZ6W;lFol0bDSqo3Gv>a1XSVoCu8oC@OAmQ=0{U*izdIu;OmNW zspPgG;09`jX=xR;)phMyg(4ghEmX07NgcOhR0t-_JrZ-9Fmu)8#l;aP0Oe_pkKO&% zD{CDOey|~isH>XT6^x1j#4<7&FIir`R@$d1fR}xfcU$*qNe5P{s%s~gH_WMEVEI9@mS3`z3rMk z;w^>#?^gL+SlWnOzlgM>ijk4A^2b??s}6NO#}huv`0@ys!)+_4QD~65ZpSKO%(~ zgsjGe5``T>}I2Vvd4bFH>B!C$^t+zCCwB zU*Iy>Rd|VXG9Ol?q_j912k(xXF{ML>N9tJZT&{1I%9~h_Z5QZE;*_OEzIbQUnG^NT zg7n2P)k2+mJ&+VHpNb|;Bf3nYF$oz}u{l~qNkKw8WG2~}JS68= zJ64Ik+q_qN!D`+O9tje;S!w z3FtpfYCE)?7jxkT86Qam6PiC@%UHZtdo!9dKSPWb#xtJ3jUC3LKryl7O|=a}5^tJc zY+S1O3jKOdeMqm&xx8WQD^_kgHd1^>|DnC6e_efDlh+)y21Z@J*1D{E#&=O>8 zhLSe&lSM}8nMHWL{lU7NhIdx!E~oRTVPH3)A&ihZ&uzJ? zK`9Kw7JT8)@BK|>fvVHdTP%BGQcDWrAb@;uE|^r;-=sRak^IzR`(D< z)8cJnI-iv|N!`H10ZPcfn>$)=U7jX8S`#KM zrDGIVh~N#Ri(tP>U;1*`BE>{0MmlOLMfF?Yjq2gIC=rsF=>NVXn$5QYL}wSn%?*j0 z{sBD7p9u+Jo#gla# zs=phSNMclP={uc;E(8$B>VgW;SwIDa?#H_Oca`5F$|J7@x5l2<`uI7e;r|(yy)rE1 z^ZAJ%tE-Xb%)q`wb{CuZpcSHS@ykKU%f@E#kIn8Kab)@Khxq70qRKPNvqC6(4D*A8 z5BHbC{Y}Vjso*rPx$eygG+emHCrhR2$ilwohNAgn^+^mcD!9c{`?U~HFZ1Zmn*)yh zQNVj4T0W(RhpJd#ELurnT+Z_`jj79_^Y!D0ui`fO^HnMoZJduEkj+$>EeRe*inkq^ z{E88O*IdKF{zG9$xY^V>oJH=rgZme|-BMp%Zbqg6P?7=b;2$34%O}elLSWI#7_Y|u z^Q@hsVj<9Jy<<)@+?~!t4isTVWDyp{6xeSmR*E7WjMxuHgZlWW=hQ}H@ny`U&B6&2 zA&`;3juasfwCwh%N8;olfZuHynpgf;nl zA2UKZOrC!pXgUW``$pjvJ`ujRqSI<}?3P_3fh<}BEUq+^L(+- z^5=U0k-}=x^7nt}4tfych4di9qQJ^e8&C|`BQaYOO^fdbSSNL}*O4R)7l%^rnF`@& zpmuHTU)KPwn&JorMZSi z!o^nKMHNqO&a((BsqROj39$pa(I6@3roP%NpFb-C3E%kqUhitSP@rerHcE{FOr8FUmAH6hi1?+n?(mgUBDjl|-TwoiHArT0A+TC=R=a z6Jc_pLsub#%KKRoy1PY1 zx(SBxjsorvLaVF4W9@{#eN0xi%trn3)#hFgOKTE;0D~blr7~oD1o!C-*3h+9j)Hlz zQeLmLCo*_UGJA1n=b$#vNN_Fe02cOw$vT(*wAsvp2@1N~Av1{`9ej4&Z~@hSkHZ%z zNSF1EO(ILF#;b`E17y;Iy0bC>G6eA!>zL1BqvLegTBz_Kw4+0eI&+C*s6atqLh1B` zttl2gEEGsSJt7noypa5Bj^EIMdE&>fklAur@Q}Ux0Bsa+6rMgtvU{)pxUTv23_bDs zU;liA14o{+yXJx7k41$7xf%(u#gZcmqa1$+bKYjWY(62lm?qd7ikPZ1!Q^Cs?`Cs- zEx_q}#d&(W8&-*)D`PR96$r&1A6K5 zs+F?JgpobwRT^L;HUxQxZ`KQm7BQR|~f4 z)`)ZE(eV@}GAA%C`xAnVlBVW~jpE4`$81W~?CtRtQDbGs4|x1^buTLLmz!#54|4I;l+(J-JN0%8+jisAT))*EeAU( zJX}=Llsy{a0jIcYmRsv3tM~v4vK|w)wS()8pDpa!l$WxK)TxDz_{8$W30sHb4dF}9 zCo80FPn(7Yj4>>#dUoVvqlqGe@B28-(~Y$n?40IKZf2UrnasnKBEcC?W|oaurr$Bf zDBV1%2g3aQh^P`}`3Irod)#c29u89{H!3V&STCQcD<7OrEyVAs6#k?g>>o&F@-s^b zq5-}Ms!Wej4%0cStK)BL2qRSqH*LCsRk-CWD8-8~5+Mk%I6`s?%n7mw&pTUK7VQ=q z6Y;DcTzB#|Rqzf5y1Fh0K#Q|i*Yw85HwJAqTwnxaJ;<^XMMF$kJ7Wq#&NAZMxHR=a2^@~Dz_v|Yr)BbU zNtO(@fWN{+vay+5f^LUoa^6N^t@`naw(If&3cnp3_77=gs~ZH$NSpCJD`eifqE!EY zHgc&Ga`Fm_Zo}N=LfX7hNYmrM{zeiCA77b= z4Q)&!$dvVO+7LnAx2LyW93&U@8h{F6GP{oi9-l12x@*XcL&GLpGN5}t-AGYU^F2SZ zlp-_@dZj@~H>#!$AG&Gk*_AAs>MZn6yrB?jN-~y9d!&aKw6pI;=NA^DARu%|X16%3 zY^*fg*EeLrg4ukrv@y8h6)77{7?4oN>{e%%U@0lQyTtI#klh)p(Sij~D-S62?Qy8J zf|-IK#O947jgyX_I6e+%NyFfCgw>1?Z12-$G}aA*3=CE9C4>zOP1v?dFJuNH$cVjd z6A8i6p4;VE7l>7?6wKi4N&z4z$kJp{m!Md>BlbcNJQ3BCW-=B{)3dUyD0(^R`{ke`u$CPV4K+70WRxm?@+ooJglP5Th&d8=K98U_lXjFtHxi zt*Xz1YRCPo)cZ@$7pKpoiO(bV6?^#&2n~_v!%Id>%ZmYV$n;6tPWPfU4{ELyzEbw9@;BmzI-B z0ppu%q!Id7~?brj4GD=c02EKyKOQ7!xcf`4f&lkqjrf$Em@QBtekcW=t-{ zl>~B#Uo#O&MA(UuX>^KoviEd{Xb>t-{~uH?2nMpD{a(6i$R`SN5{!bh4Ks-?gh4FY#z9HHEDUCzD?7g&Gh^FrYGq^|$SynXy?; z<4`ZrPEs}`g_IXT;hQkLA<4}>W~$RifhU3ece70$i}B&=n{+*}fCo>Kk0Y#zkf`Rm zF1lwRbSa)g5h~#hF3ddh5BMvU$om!Kg)Kth{a@rt4GueNNIiu7A%ju`RPrdrOyor3 z%{eI}JmDgPC8YIvS-UVnV=LNfYr5iS@EY&GfYs66elJ>ju^gfg-8BrGWH>A(i5rHX zr@{SSHEWXYpZ|IC|D3&Ri}W)8SArV!;dr2eX6648q#nK!#oz&@{rvar|9`_x+R=@t z4|Nd{ENmZ&JVpUI^Z(vMK$7Fq^)}u+*JSlScCc8%y=96e!w2>`Yks2t+mcVl&1%ie zv@|_y>u6V=&(`xN=Kn%g2Cgz$^X0;mphglBq5Sn z%=*&g2Ny(1NpU4A$`dF6O_$DFn0jCmKxU&N4XQQmv;5ItKp ziIN~ke!IT*|2780|1L}@xBo;Bx%F^kNyq?uL+SIH`H26SUy0CHTItyXCFpzP2H9KP zm5|dwce~=rhxq72<@bFv)cPZKu$S%aA~xD2R)=$JUdc}qf#u|5=#Q&&&g2e#cdxEhCqb$t*2w2k-2;DIG&eXW*LF=~~74J9e_ps zJ;19>XzJhDg~HYHx((&DN=}8N?w3%ui32mz4}%IV+3dT*j&FTBS9&Hh_B6q zE!Uf6)z;DE1gn_CRQ{ya?uj~xK&R9W+LDKbRLi0XctmXv;r-(UBbSIO8awcfAr=-Y zO$5P^H@ZNX${39{385e6l*=c~&A`?j&#JPnxoh&&7KairHYYd5tQd*D=R&o&f$Fz# zVEcjD?!kKLEJgNe`(n zCjO3inZ%VasJ3_(`;OXB$&q%?KoIcY+pab=SKA*o=G@+vPrWiuy?g;i(Ensd(s)w2 zM&F1jN2}-@bXHirzdvF*ecEB>NZq`j;JqZiUbCgR3U_SMdtAMfGIZGne7#GCuUvVC zeBCE8mk!3d`ow&=e8#0{w@m7ErRyJRY#s9R+TkcGx;lDXa6x*fJU+Isx8AZbbaajx ztLj@Or!nAWrpBK%;_<{6s%9G>oC6yI#bnXqz61#Kd+>Mhv`jn-hGG_tTU>7-=N!xs zycMq-^g4!S2I={~Y-A);Chgty(!&u3Ar$`w`^NBFGp%}rJbAwxeBSB2$Id)`G1zbE z=y4Hy`}H!DA@Ka`G5*~>^?Ayw?PU_u_yLxzPxOm8_*~BohmYn!(>FV-ILAABtmIIg z>Ns>(SpD|>(~T61pvLE*`(||O>*wd+FS^0IEt2|Fa-B$ncltMHXvi$BCY>*C@6TJO zPxmis%pAAu*&z5@IrwZ9h;F-x3Y%y(i-8yS!F|J35f~Anf73(Om5OUEL%10hQ$*whY`SS?r4njXhO6|x<`B&JCa%CA}hh-du;)eO^8neF9@_WNtZwW+G=B!?Y-#Pf?`_?^uP|+Czj_vDK3BaEL~<=(U`GA%f9x?v zpz?hUlSAks+=*Uw#9^J^#Pj;O4pScZ;~U%vBjYc(-8Asv6w)+V`LrSAqW}mnm!L)Fd>+pd)yqxw5Oi0ZVJ+& zQ@RxBi3(sZu5ml}wAeC~lUK{l(o#fZ#Uo_;iS;`9*^i%hwZ{Ul&i3HS2AN#)JpBa8UoNYrrCcjxXce=5>}2&@qjPMO?9 zcIttn5#xk@QS2924Q+I)1A_#sRx&$+`|10GYh4{-d1!zjNfwDVFg7+gvO>dBr!ycs zqu?XFlrr3<*wPI55o|mn&b!);a$oivR+H7E zLc7^hqJ|~OFU>FK0tySN4(t)L32dXcK+v)m9+_gb37rNoQuq>)R^Wsc%1WmYPET%; z#M2TF7#Z@)q9hKN%+abij&GHfpDn>%U+_6U@o9i6#u^E~(3=xW%zmVBc6+7`F-$O3 z-9+5#Hb|PV8-6WR;5mv@*&%X%mLd)#!12J?EcXcx$}3`phu*k-paJ2rK^Xbz^`ZB0WIwo? z7jf1%=UzSuC+y7^iIkPFk2(L8q2@z91WS;V6_n3Hyl%W3g8tA9GX!Qah2w2?%=Rjt zEowT4#|oW;d^p9cEnbZb#>hDja?DB5WAaHCx!K1bAob%gHS2+!RRsx${cnZ59Y7STOj!9h?4Rh#`@)G1Z91MN`Ts zdAh!y)=+to+v{?DtLRfL2ry^riK_GWo6UTo!yS~f7?%t4mR?cje#OCvilAJ6_A^CE z1pU{U0rZGtCPD(v6p;D7-4bu5Lo@5>UMS8+i{ny)7(13@;yg(GQ@B?QSR`?R@@DT9 z%8zdWEB-IJY$EUOPpW@dg@Uss&`>-|* zf&9lj@|KtBkA}*AgA^RYSH1vrz$i9B?PnI4b+y>u>J%s0koN(1Chc+Nh^O`&Ap{~$ z6@i5Y_^4+5x)3IBu_yf-QUF_H0iSrI?|K5~D6_iZ3p>mAewyI}$f=K>gF4c+3~U(|diMy9ZI@Uu9cu&R4xC-;fqf z1)SPtT`)q@BKGgx4);*ruE|y6W7TwDl>9{ig7mm=2-QbmtidW-QC@L&)C05jevTB* z9T%Z_%157F!!5D;W@Qo-AL`naw{`^AmVj2P26v>+C_dx7{(q_ok=hn01b^)g)?M$cPJY{RvKxY+UF)+K5)l=Mh18T1NmN4PM2Gs` zd#S*$X7KdMrK-JBC>%Wc&x}l2p9}1-ksdd}-$VfwL?%0I(R94}xhwscK@Kr!;9S@* zVtr?U zuVd>xXl`TZu}WD|Qk?U)f!i^)gKV<7`iUTD5_K;w89eO*w}EP~W@Os_tPLwc6$bLC zLQN1v;JwDD9n-gLRm+YnG$&I8i1lONkcaq<(1W9&Ks}Ndth+v@#w(_vOQyg!-j$-SW2E}Z=Sdw3X~#BW z@lbiw{RDo=9O_hYN}kItx;i_jn9BcUW}ALM9__~=gJ)3U#WX{jVN^_0!y9CF1T&6- z2Ajbe#dLhAn13RDE*xJr>%TfnF#{OlWT9|!2?+y86OodQp&w46M-rz@a2KR7sO~6j zY6iF#h6XDHT~q~>%*Sm)(nvQ|fWNHzhAS<3fu@JkWI3+O1?-q7-0&yTk!sRe_x;ye zLN?>3mdtgU;DUm%P$D*wx!5REXy{gsJ_7Y^W1p+|)}6P|56^Iwl!T;ZM{2`ZLji2j zZ>L;Hlf(b&=xCQn=M|~ifTOX$`PT{A5I`OL*9uUB$J33dpOjSqzxIghC8U@HQyTMrwIqzfp6^%p;)2(3>2Tqml_ni_C0j-W>H2qS z#!tehl<6&UP)YvfcOE}>(y2Oc8^|9M7or>=nU$Q_j47);(%M+fTi>s6TEKd9_f*bj z{QK7}MfF7rTnYVkXO-}1n_Ty|i&xaBMc?QyX%f}LTDJrxX-FRE1*>!q7{_7);fWM= z**A#-q<~X;m<4EY-tWfraie7F$<3vNh^=i%4nHx~B_`saBYU9QMXyQ+7Z-`Wi2ca1 z`#X8bl>0{c2b8q;65;En2cMo+DqxhjfuIzfm-sL$cmbHHErYKQs{w>o7RU{)@;495 zOV%>xfVhmEFsUEKO8BTTy)&2-7jG=LDRH+BnUmkCkQcs%`S!S&ovW;@RnE-O$I?yI zXeLq*>M^NGAun0WU$sTPBdjQ-Insm?_-{XAxtyNcD4QV&L)+s_*yuuwtG$%W+}g<= zdFpG&=sCsonMVbnKv=C**n&C#JmZQOD? z8@z zm@NI*LZJ4m?e+b)3_*jGSMQ`#0lnpxkPerSEWMzcTeJ8RTITCk_PC%+tgmZ2q^pj9 zF!J#J61gW5-54E|{E2@Tid=pdfdaW!EVkcE zVoOvwbQFfiNP`=B%@?3O7LfjpbLuk#lJ7AUTw>aYOWzoig(ExV(u>y@j;4JBL%JaXC3d-73`>fCzY$AbY93OjIAEZA`Xje;|La!zi(BXuOZn#T>H>p_vr%g7qrKe#N1xwVE- zCVT`1f%BF_kZ9HMr7~yvQkfHstKtz*dcwh5QuyP#+6`+P85bd4+)|nQ{sMCt@$#bG z67k5DCdtt5&%bHDR!jIn{K``r?gi8{CuH^PaUPd)?l_Hl@(3m{fa~ncW8G_y zt64|eguOe8xqE1}Nx+h&aC>9tLRI_7gx@P6>)u6Pws5%=DUF7!P?GC5U>WBIDD=}G z;x)ia&VEKpBga zo1fY!3^YPJBAc}mYQ5;}Pqcj_Kc!O2SqA$yl33QX@!Oddqj~EZ1G%_` zh|F@>{t(tx?hMNd)>3Xj<-42gIL2`QIPf5PL;vcMptDx`{oU=B6H3b+%c8``1=;o5 z(HF^T>F6<61Yj3lPZ^I@@)cy!?9t(QhPlQJJe4bvm>ED!KaW9cunJNaHjvDbewWhS ziEf`?RN*v7ES2PRr0biH?9qL)I`w#RD{&$)Sk*lg5*SoMO(ctF6}x0bRY;jOk!8x5 z0wg6)KY&$%CT$0&tW{t_zQSQ4L+ow)wsNb>h1TEh#;Df|_@sFjhrCegRw}WW(?PAj zS{vd%V&u$OM(gbpn<_EvS3Qi@x|vj@QB5bTr*VB1>8!m`q1x3&yV#EnY0MCNPlbE^yJSV$>8C}Bc*0Jbw37qH(P3f zry7*0BULq_5}LPh4}1?BteYM;^smfJWkm#gn^vpsGMgO`lwECsT`m4c(Q*EKbVU-W znS1As=|PSN!ilJvI3KGro8Co|4Bq}L{B08PSVRRh1mXL$7em)cc$9LiVY|d{s#~|+if$Ml! zr^W9WKw~~UG`D(KM4j~}6pj+Ks{I_Op0RFL>ueiSQO-@FVqy&ENrO6gHZWjaJYIZS z;XuO=@>1);jcANaxJDpi&)uq;#lQ0G;tziPoiLP=zl=+ZXe6aVEr?$ zz|$y1lhC_;clo-z!wF&Ta#d&NkTBtm#LPUFkP<$gtlz!f@rmkBM1otZL+9GbcM0-X zIc1xOj!H@ug5xdjchcU9Ro7XP;MF0CBiwbgu6MIY`TG15l6FLnnSj$PkB`L8@y9D` zLN_|P=SuM>E&h_e`wv)6xaU>YGqvRB`xsnn`~IMTC2rF2 zOWxf(I8cuWRd<0M3(u-9{MGF46D=(&_shj-)MKVag zX*;{XN@{W!jWPyjMVEEFEkGOMq|2C-~N)oPBv}v8{Kwe8mX= zdfVWoWEk%kPyVzJLd>#*0>TG^Tp5kx=)N`CeX!bv)}qb}w#yk``_He$F_WX0LF#~q zh3Pr zqh8#we|_t~jI^;myJ&u%*|Kr4f5AOIg6#M$G%Ix9AZle1Wx?+iV@V-3NG-qCGWuv9 z^2?={qrLzBnx*y>k;{esc}P_sLpEnmhu*Q)RQwa9G7$slT@YIV2nN=zC6Q&R)_}6^Rf<= zmC<4Y2NBQknyeiT2CQ&eeUG}%YRD)+99W?=2ws-MwT@F~dz+_$q|L*_PzD-H2?^?M z4!>)jC7;NBXdGVbJpmeO-$Ks2Z%8+_aqk?yJb2zbgD7)!`Z<<$ySsb8?xtK$DCH9P z*`rv<^LG^5pD0|kJ;Qz`xP|j(QBJVM>(yRxR&KP64pw1?jBWL2@D06?k?jL5`gFgz zxY@T3bFS!wKFPiwK#5W9KqYcakHocDe|^EVY{F=``lL3Ar=jfA`ut3H4QUwuzg&QExkc7&wXoVm zf?a2uS~m-j#B8-!S28e`E2pq)ZfI-SFREFMkmWhgHo2&bW2-wc!||i0y=9@IvQzQk z%G=uE21aM^=14a7_Q{m+^7i%E4%*(&hW^wvJ)*KTd-oR>zpHWf_$&V9M%UmNL-Wk5ndH~Z5VN#Mo&ZHb z0>hB}Mns~l&i={))0}#O>oKW1YkIqj>l27Zn9~&axD+Lb**FaB9MI`N-OIMPeu&^S zmZWh%W?XKcyj;k+sr!%#P8)ka86knQfv~uM@4JCAPpVFkC`p~+%Nyjz3g{f1JVpE* z)7d!e_o2NT&&f2`u( zH?!EF#Ytg=Bsj;!+%0IJWU7WGCpT6hj$n2ho8W?pXW7JqXoeN9IG9_Bop+Px)ZXz8 zn%4{G>db_Z-<6%RLcYvALeL_M-RGU)v|gS56E%_yHVbugll#2WBY^Nj+5kB2APFfR z*yDtak2h=Kd}2Zc40`Uo1KnW~aB%Z%%Ldq%RTeIs&6kF%- z!x6$vvH0G5%gO)^r6-6*sjUzU&qge$shB%m7&!~bEGs<9TpdlJ&?4X&*K6AMan+FV zj^;^QpR}J>ZJuM5OcqvqhEmVBEWzdZJ7Dx7S6%&s9;&WZj8VMxqpM%f5~^pT zpI$_+bIzZ>UE5sUQp}xaUkz36R0RQDxIn3`!`*im&IC@u+ENGmcg&|v?V~K!3{w+X z`xlECfU0a98ds<`p6A{R*R zv8div+1l07UOl{E4J)IRtFdPi?`!vu>;mb*3M0v#BO}CO3ywQOsQZgp3s|rrr^x3PFZK0Fk|G8O zj-J#%fwg&n=6^Q}@Z*}O=XZ^)r!0{`CwVjA0rdPDj~`V|RV15bf)wh>eS|s$0(kPAb0xg{WcDV(j_@ql3B^zpn9D22Zf@Y@= zVAY%46QhdXdF=88_{#L(z>*L0jOsC%ONI><`kw>Ud;fj&#N9I2e_&s>nQv7N%6AJM z9c2OehKCIxDyipg9oyU5;IdPZ7$2HK{h+zw(5Zs8Jg02k`DKU>x4VK-PDA>faP_zx zCI2&-BW281{|`lRfMt7s6*L%0)-unEg5QB;rl<5jN4!G4q`Y7DrSuN@O1tZsKzjND zN#vt*jqHo*mGuMDY8mk~^7fX0OI=Mp)DF^{Gqp>|u?u8n;iK~*Dy*pfK=#I-#uJJX zDwBFz6XJqajsQN7=BxTnbptimbTzwhC@_VVmdnt9D$J<1iLa^6j892>Etu@?<&?-_ zuPIN4EtQAza8Z>w3~G62jV9XmsBFQ$s7o{CVn1tL-xm&_0KgXX-AE2JRu9DZw@d8% z9;}#+;Huz`;XhOnh5be0ja^<7UQ?0dYDdC+@uGDS@`+Tfk}q!x0$D#%*{^u*cGo?o zc!&NfODNxh21R7Zkt_%Q`Ly{&t;K=3A&I|rT&s7&!r!R_LcDxOS+_IVf5YjP7EtD@ zIU(^ecGP05caG5uQvsB#H<89jy)S_5i~+i8Vs$wEbOFmdoPz=`jZtovKQQmxDk%@+ z)SMp({H3Cgt2Aj{5G0W|43A?K@*;*4d@vubcjA^e3B{U;K!Jc#h z#Um0k2&Qv6LSFGCNmKF~Qs~VBKMol!0NEAyeehF-mMZmV>E}#lS@K^qF`Q>9Clt*< zGTPBa(P+^nK`fTEaUL%DNJzNKe)tS_Rdpb)DWo*v=g;V^PkNgz%u=bd_fMbtk)jJG zGpJO#RJm6UcW8N?Je(z}V^k8<1m~E?Ue0JY^wtK+#c5zTDmds;%407!^BskKo-;8z zN-Rp%4}DR;3JaeBUEBWV$yL0p=yjpV;tc!(mwghCkBB9NS7-}QQ308 zBz_NLlkK4ve14N-MOolTlTxL|xT{RIy1((t1f}L)J%)bSYaBfJ6pTKkuv3}q-XA>| zAUOycCAW%jrRgph#*MpkiDNa{W@2A3ie++$RZ3sH(xga5+~b2gfH%uIa1?Dl0r?d3 z8??!-GE&1OFkD>=h$l_Q$2Wz>l+~kt8L|+eTtj;qck*c;y6rAFm4|VtYY9dPk(fv@ z;>|oTJv*mO)BypgC@+fXPuhEz$X$JB6V<5Q_DMcZ|KK&Cz9+$ozzL$DWPyEl$SuL8 zz7oW<7_mW^RO;b^hb012H+y8WY}NVunf3zHrRnn&8z3-d4Axu3-x~RkLTrj&1SJN|2Od)S^*K*7byZ9}h%vcbyZ(>aSAW+J6H%s%y3Z zcm9MXmZK~|@B15EcHcm6eJ-v=cHJnK7tsLie7W)LuSCQ<;|~TWXj&V>&S69fP`xpj zrVa(9bVV?e?32|hCq!Za%27hB0Y8>d1F-7{t)j0w2DdzqP<9l8Q$CG;{Bn-o+AP`W z$13A&QdO|T!~~SgcuI3v1ERr$?Gfb0V`;VbULy_$3`g*p+MY@}UM}ghI}=EJI)|5Z zHg$RbqTYo_Qk&i0_9LOcZ1L@jCW+J^k21^-YI~DR9{*YOplx|`t3|d)tzH(J> z-~wI;uVN>Rhs6-1-;j`TDMm8fRQ?`Fq=QYqAc=oHFLijiXR#aW>=o%JWoX#xb7Z;` z1Q1ibU?lxID~#u-hKhzYrBVp3V#8{Xb9-|Ru$cOx_N&@{LpE1}sr-EA{E=gY(}aFP zz77Ch5SYf;+Qz1z$asCk2De6$5tCQW)WVF_km^cf^??>99p9K4Smw`RCoe|#E}@^Q zNty(#&t3q6k6MaNa~qw*vs_3O+N;wa{H!Y(8|?~-Ig&La$X4LT1w`@o9W+XLzPu=h zN6vWsKLGDQ5WgDzObbg(P$_5>;^O;plTqLk472exP5Rec+ zF|pU5p?un9+jxV5U~Cth1Spv6=-<0fs%O-=pV+)Z7m(r~6&BVhXcX z#nLsD56OCH#UzEbDwd(4@LoW)h9t)+u4gE8TTD*R>7F#0206UG$mC4MtjJhd8C8F} z-Y~b_-23Z);MloI@XGH} zs8-p~7AcA5qY=G_59!S-%xc@jtR!g`6OLdyz&*Z8x6|c3jB~SEVe8roDSL6schRc@ zqUex%$LF*iCz(l6Dp|;~2t-7YN9VlF`ALT;`_G84)W|6c77d+YIP9UPv!ttK zOhrZkh$7t9DdW=vnp&E9w#rIeAuG3uoLz3ceZcpAJVkls615kvku`M8i|ZKn@jEU0 zuEfMLu`Ll(?89(}MrX|Kz(llbq$@S9mrW}3KK{X7x`9DAy+S5!Q??xxslncZ`+Vm= z{fIN`cTlQ1rl+^q8HKc)E~0gr-}@i8dFD!qwQ!7g=er!-e!y$tvs}(z<@dH^)?y-& z?-4Z5nDhqJ1`G`RkB#>fQsW`Q*I3^Qj z^(MhH>sZ?_a&2}d^1tWVE8A>bjXj;$wJfS6A~M07x7c~@ z7WdvdBB=cusqJT2)fAFqND#QRkDD}4d&tE+@p1uM64AmDbGOb}Q>5NaQOLw8XLTe| zz&*K3_szfJjYEewhe^H^9V4E#`QH5|Qz6CttN($oJad)xRe_wm&+h&%-~8(~{c8!X zf8#ky6&n*5i54Sg#NN4yP)@SFnIvKVgq;}41oIlD$`QZ)qQbX&_xZ>F_?JAV{2I0V zRkAS|OP(PLeY*WFyGLUtD+%n(3R@dFYJ~~%?2u01=V5=9^+ba8RSDA&@VWz>{kwGM zDy_;D>|~Nv4Ga-{$6XEr*;Q0*B8uYqj=DFukGX)9=nBzl+ z^|yF)zs=o#jO25xWQ#duKn%Nh^D~ay(4NF8mThu*ZLvBTc5&xtoV1}mjZ-Yg$>+5X zP6!DyNF^+*z~!39R2y(-w@W``;HLtbfrS$Ztf(>xb$PJplXya-MIyk%na(*ooZ-!& zS~IcY%0e-;Jc(R|8`txcj1k?pzR$b2PI>bH6hos}&`}hTS@)dIyWi)a>vO1HrkKmJ zo!2oG5r5dg*?*1W&XirhgdLAjO-pF9KrrqzKYl>ZmFOlmNF^*PG0;SC2LpyX59#|N z*uC#Hv+nO+$}n2w{s>XTrg$h#5~% zUCm(X%3{?vY}4vY*gq9MkZH;z+fovuY>=+x$rh4G?H112F+HbEb0*T7Wid2^oGm;> zsgy#%XVh+Uet)07A44c?vA$8`>Q0N;OhAoV^cyF<{i6nb*PwLs zD!F`$gy^G$efq6)?!0wGdjhe|O=9slF+d_@J{r>6-J?GZaPyZbR!eM^;>30NgK?Pm zh=#k=`w?eOj!ed)l2cJ+0hkd*eVUCvr{^wOwm_y@!ct^Z9EdtJ8xzjXz)GjdSJG&T zv?$d_z^KuneR9S$mO&}3P&5>hZkwoeoBg8^_Zk+7l_G1GbJ&`Ma*_RE(rVE?X)rP} zi1`&twnD<~;+@~6G4eT=O2o?*YO6VlNdN9LsOf0WMi7pqsYBp^I?PfY|-K@mbMM!1eke=i=`?F;lbB%Zgyzx95W$~Qrw_c&QM7iD1w0C_3=iB?A~v&a~Ke-Rw=HPNGc*)Fv6dnvA5S| z_jrz;&XOu*&_oew-X$0wQ6H(a{UU|5NhvKNi9Un#BTjC;#mPir7+<5DjZ;mUXqru` zvPtdcOI%$qvSqudgL~9Zj`+uY9Y;&^%(Hp2X#>S~hGb^(hJv zMG(n^N{&=@gPUKw!D@VtaQpAsxp%_brvYvlp-EE&@04zT!cN^pw)3oRRHzi zU^W`j*gIwBi^MiB5sxRH?ut2nkNNbJgZhZ`iH23%B41shV8%$AapK7=mCKu~ZmyEk z)kk&}e0*<8>!d@sJ3}p1uybi_9NgYHjpIX3njxbgL3uSxpJJVIB$}{}# z*JReSpi3}nb~t?f7H#kM2-a_skEf_ei=@v^{e=EM{w@2>1Uu$B-$+&)iJO{KZKW@G9i1Z|k@visnW@4w!~ z|D8DH+9DQJjN0HIQSW;^Y^6yi64a_Dx+Xw02LEs&YomYd0c&itYnkpiB+V>5CSuL%`Url8%)F(h*j5Fi5nEv$E3*#9GLFX?VR)G zk%h9I=Eio4Y(j*{f%zfLvt7RX>N$-qiSo-e;z{fMaUTyy?B6?P;6F?3*;lBQi(HO@ z`RH@T9ip=bG=~xmq0Gx^ooa>)XMao-olopZE% z&NP1$>+-W~#tpLLHqOp1nsbd#dYz4}BG=X}3{7O*?Q?o}2S<7VtyUu)GfB?ch>hE{ zTV3867TC_Oa_z-yT(9YD#TBAJL{U^!0~B$?ZvDv5=)K`GiIx5hD0AT+jW)@}$vdh<~_Crx#ME%yuEgMpZ{w zUHaWF_ii6D2rpx8-C*_7O>W+(kdS<2MFB-eS41QN1iCm*ll`L^BPB!i%U@z;wLnSq zkw$f9-3~{yJTbk%wX#S)*2C+buy;J=uoWYD;}YvPo@eWYES4-F2_WkRiuzt|%*CXu z-{^DlpiP)qCwu7ywvsmGi=7pmI)3LaJEsu`{T!Fe394m%k(zDZ!0X@P_QMWu9|*+0 znk1WzBZ?vnTZFA$+MYpAFObjZ6ypJsIH%oc@$lX$j`n4ewVP~gJY1nw_O2k72S;yX&&ksjyyM<++Nr0k!n4*3(utc5!`;-EgKG0&3QtdT9}kOdHt5EPw!sR)3iJn96_ z@!TP2$2~e-pVae3(#06E(Bq`tV)vlWcxYi+1vXb>6m!br+CdYqe#p6JGcYQwXA~;- zMLg!9f!8?X>_WDdQ9qCYi&0Q9V-~9$Ijp-9t=%)aD+-gmj&EE-wDU;HlUn`l7sHd; zH<1VPQ^wN{4`x}$QTqK<2n$4HD}%NDBBe^6E7BoI3g_}FmKviVhbV%JGnvpj>)}Qc zYBGhXDrmk#G&rJLKjFb%hfZKaW(_M0h)sLAp+l$Ja3Lmf-s;rn&Agc27?~=ANoXQmbIIg$YyLz z@xs`<6X=~X8oRXoIBHBM6&H}@InJm_LAyiLd`NHNa~zehErH^DvWZU=z1NizQnHAV&QYXG)H&nrdwu#Iq^mY& zOnGE${wW}uKyXT{)8^jc3@?@?U8|9a8Cb$&UYNjxQH$AdNMmLaC*C5O)?d6rE}tYOe)uG$+wU>h-=#0an55RQ zLmz84WEzFcW__aGK8>D0eU>4UhJ13)$O)MA+C;5~G$s<~QIV|alheiw=K*7PN))wd zw#S^eAX%`nCrwrRI z+IAV=Opu?pkY+u4-jspkGxHUURE9)0M+vdK8vWPH&FvVb=069jXnp3At~Vmz5K zHS#EiO-2dPy)KjXA@|>Uz=Puv-OM_=X;Gc`kvx}42to9bAq)e;?g8V|yF6@7IPp_R z>FcboS9#@{f)dVXoR68$eZuJo_xK@Q2ToJhNf+{L=T&S?#2rp(?Hu8RU|ilrmj#qT z6W^=ToA%f}3GoD-waqw+95EhF2>c(z3$V^jKM#LD=cA?X3(K|b#ucZi5d2+!J%^A?1LZ{xM zcY4A=PvWNvg_SBVB-+?1n@MVmxMGndM8;)4pV9Bn z2qMr-1xXZOKE&(p(5j#C?wQ9x%wS||#O5w)zs1ZS)9m)yxqU_`h~(C?MDCnPr;Fo9lE8I9XKJP--gELXm?K_(af*y@JU>(SfWV<5+v z<<~G~Gt@zsQ4ruvdqmAWT0@yekSCk+$k-z~W0ygzO>}mj)=Z|Zl*njPGGdqRG+;O% z6LCtbGo#*C$rMa-1%=_rVLWz05D`=zSzzOst^fca07*naRF7fE68gnqw8*Iwbl%`W z9qvvpbK`|=zWz#uY)oL<>2q-Fh``K|d*%gdsT3v2MG-pm`vZ1%d}K|hT(xi}b2`m2 z{@gmEs>=jn$mIM1jXVFy zJ9|C$CpxLM1ZGmE*BUc(z6i(DKrwQxKYxks=dz^7_ZXe;@@CtoJ`D-Oh%kzXqL3)` z7>6l3v;0?}k zIvtKC1q7?a%hyb*IhiQ%aq6cG+f91u0$$7}(LN;@_2~owjvs+2qgobPOh*+XL;?ce zW!xJvXpfkC0bzKt3(}qNux=4A71_F0#EPjOd?@AGI%Q!4q}X*EJA)y4@kJ+haVRvfq;MC7W6;hiw^v z3x1u^WWdgO5+8+2)e%H9THS!T`?NWp(IU>ge#&TPm&RCSX5?5cn-tQYVXq?W;?3)v zv{ky31eKhC6%09l`xvL+M>GW{i2|OLA|JKU+%D&{h*%5=C9Co~lpTXYuFFlea_a^_le! z*!{r|c>RqN?w!<7YiS-f9o7;mh4=I589;?fWrMGM?Gj#Li<6Bt67d)kA_5wADoO21 z1!H%I_N)I7-&8hfTq~1FB@yQ)tA>L$I>PBa;K+HA(?Da5KAtn+u<6l`HF1*1jMp}B z%s7HLC3I&vLy!K>yPQVf=B?+8$h9gJV~<1CVd6WS)raVNeQK8yn5Ki&eaN6cW;eRT zRw~D{D-y{^_gal4LKcfFAH=K)B1ma8BS$HtFw+x^ zwl0%SXG!uD1-yqoctiR}_c?p}Z@BgL18z@4gl?N;>x|YQK{soX&_9xwP1Yf`hLt^F zE&c|ByWioj{#fO-kmPbkB$F^99S7f`civ_1L6eoMI$B&n8tgOacG(YVq!UGM)MTsI7{~lM&A80mETL zbCzYd6;T{W+^7oJstewT+1zC~beZp;ku0~E@J#hhx}6g`M@PK#j*O;H zm^aU<9}2`Tzr;6QEb;7CiK;q95gN1yLv9@!$mkT+u_Qe%9ph9bX9tT$kAf0tg$!l_p9)i3095uy^qsC}JFFQ@qLISnPN;O#sqJ8$j+O$`SE^%sPZhoe(5E)OBu4rD8xvYs$Bn#=ec$JoO`eS z-+W8iroOjAE^d=frbwi+WW*Q=-@~t;GHiF~Wj9FVYHTVZDG|aj!WlbEdu_tuF8y(w z`_m0Blc8+ey!2d|X!elrz4~W-uW=joxiZ;!f~1uol}nRO$B|?San^(GyYy#pu3hGp ze4Onfn2&a$l|#yHP>FwsgX1@N?JqJk?{0B7og}aO=;aKO(ZLz?+20?NEzP;S|_sduK@twQue%Im8q$}KBE0D8olBqPwbdHRm6W4Y0 z>Kd85_eeLs&4ceY`9HNC3U-Qw1Ck19ZGhN$K%EYK4{U63K+Zv^Avu@CcV1q_1z2PA5qaATwv?j%oKD`fi=6 z`+)Y^CE8Mo)o_C7x=dyfgJutBcbDn*3chI)1*bTZIrY&hx}M=u4U#tCdoXI_^jn-y zC(xV_?haYsOc2Y+$e&i3;0eSZxA6MM{OHHG`QED!IqR5A%mMY26Po21qj&~gT!gnh z7H>}EEFAfEy~peyz6)2ckS<>***hT9Y=bwZJ+-*kmPk|;uIFLk%$T%#MExUXTAl~p zELv8loQCdv#<L2511s?XY=sBHI3am%p167Ai8N(h?z+=6Xe4$++DEUKI#V<&MASk?j*t7H{+aVv(g%#Lkvj zdG1A4dsA}FUFydXcPxc9V}%v7NUc&~4V}8{f}t~$OjM>Qnn9FYCvKQX@*LgN5n>jS zG)FURtbC2Q5)gCF8FPuLZj(=RSt+K;Ztt5zlq*wYNkR4l%$SHKYDA)qol+^T z7|4-MIF+#s6*Djvva~{R#XvzLnn>7&O3ZvTjz$0sR6R+yQoxH>nQhc4q~ch=m=XU4 zvB>A=4o7qkI?O{8eSMom#w4%K34@TC5Alc(FGtEc#A~FgHEvw9x$6!%y>*9!{uj#hT=^zVhVCL1DeXp)(KyK`8ED-+U5SO+w8LfUX^S@MYRN0 zvL-4SBy)w0E1NWiKD*sr`ca>Ey)^4Dtdh!Iro4I&G>=Z;U_>s0WgsYD%*R@gb+p3s ztOyqWeBQ@={gA(W?JbT42VZcRCUg?Q7_WDalb*}DeuMJ0n{2-F3|sjG2}D3AUR-7K zm0#yl_ki8w9hxT!x0HbOYz9v>kc&w&vWgygOy(deHrcCJsch7!Dk5 zk_J02Gf|R=icKz-;QEcL%$yPL-ace};BhbJQA!sn=OoNx9?3S5WDuu5%%-S`Jn_q` zY^$wcRmOw8jkz|>e6ho_CrMxoc z*~~4R(IIzktMpfHuxcf^T&}VvIBZ2BrY_NsELKDr^Aj8wpNP3XXVz{Z2pav~oRMcT zHmqbb5j#(}cSP3LYz)(eEw)oUJG?6#O zA2(?Yed_Z#sbYpyIf)t05au)7kWZ}wMkpbPI>~&J_{hQUw(;DMsJuZ;D5CX}tdwO! z!NeCVKqsZbT#KU^IVwqoaa>>~ScIZM+5pE`$jV8Du`S>bBNTKp=0{Q^i7JtiC6*df zElF&yme^cP5tp9SZRB4pLKH=xYyJM4-~1+j@+W`7wQJXS?z!i9@x>P@l}bPF_I;di zjz1gIZ=BK@%xJn6b}CJ|oW)i}4CTYa9t_U#Iy;=o>kPFDYYBnW#Q_?B+{Eo2vVT0} ztm7hO^Tg6wGKnOX*Txv`vhO5lMH#B)9Qm|PLUS3Oo^pO~hpAm6NR=t&lB8`FQ+Ao2 z9@4#kmvbdY-z-qhWy!~7(#n8tqtEVc7bTY`mM@S`#YkvA^1Ml_Ib;7MAhTK^Tg?&^ z9(n1)IfN4$2M08c&S^(BftnzdNs>+~q?6FzZPPs&AeSpx`3(6?3^V8>kB+#17IM&$ zD3^2Oaw!rC9o6q4jgGm07I4s!D3x>MFJzh`eJ~@5JaCU`o}BW-@3$aTWbLajQnFRD zPty!M3L$ivkGhOoXViOB8Y2%SUcyS{DCH6)V=CH56F5gcgcF9X2Isqbv}Y2!=#61@JH=Dh|{B85^alTVwZVgkynNv|_u|0KjJWXP=*hzan)`L2cI=`{8jpFZHg z%P^92tdz6lk}+&uLUOx=(-wF4#x(j7scMl-E=e|FqBwP;!6A1}6&e$R;%c5;Hcr-3 zQA7vAF~@g~Xw^rA`6^~MMh{XipK zNE46S7>YqGrentqBz&UzgyG39jYgk?u80(ik<7BN!i@-LJ^WFf zld}%ztqGo)N3{}UEfveM&}|!25zz#npnpVvIAw2~#7-pGsM;u^fZ)5#+dH(oeRf7M zG%HP|m?IUFv6KL(bx60>;;=KvlVcdE9C5?I)^%bD6T{R{1rWUf%v&6t`_#J<#f<{# zTpWW(60nC8{P}?T&H;_)kfvax#gY_KX_9dtQ=M_VJ772#uqzeP**N*6hTC|^_-vP> zpukMau~NyAu?=ihAeuIr4;vhxc4-ZLd^3+}S!65~)3z~U7N#PgGADAHw9XqG>@*mu z7IQmIHl8M}fi*j1;Dj7Z(`55mD#Z+OUBrBh0z_fJyt%`$-{s6tA;=nOBf@iCX0C^p zE)z?nNNFHU+BoOCw1o^^t;$+PC1**1kJD+;KX^zhvgoOKO1Tudq>iOb7&luS-8mt! zQ^>{gB5qaeqn-UF-~Pcl|I42z__g1AiU06Fe3i5ScI1NFrB&~8|3Mcik;bl;$R|xw zF%5~Q%c%*yDdB8L^X!~ftIN<;!AuY@m&hh9a+ZN4d`jgXzt3#c;o$Z$ok@USD3Z+P zD5bL)vm?TBorevXsbG^W<;f;uWNnq%>0O4+6ArvAzMiI1NRzg8;-)3@CSeJGvD{gb3A`qLLUMb7llAjP^FJ6a^Q>!+&Od2LXb5K38d&F*NMDesx0(< z0@npaK~R*7_0tP{7u+e1XyC~jh6wNZ!eJN^1ObvHElzs=v;;0;;NiFsBB2^8vb2~9 z2W~**M~Iq;_e!A>EWfO#eD4a8G9nkKMIT)shr_x;A%%I8Q zQ)L8yX2JkMmzf(f^8_?QMNt9qvKcNSKhl@%e$dA=w*9{1Q2vwDk z-WM*$*mWg>KtxqR`pDx0*Ch-BP*p@pLgd2>x?vFDIu1c3fM%db;^&HA3O$d&^FUP* zWced;tQYNcTzoG;G7Ka|dCW=PMJgxX^_Y%aP!*($e5Z005xVXoHd$2>#b59W(S@BE z4EhX*W2S*bBy2%-o#kdOr7V`CJBKEd1} z5F`T0K#>HL&;0^N5V&~0&v*`!EMprgf)Ei!0j>*CB%rF&;F1QYpNX8LVELB2Z7(x65Z4Wnq(vVt`DCInBy=2tD8e^QWKq1xDzV5|5QY*_Bp^MyU=X-O zVQ`^~(uZ~Nv9iSiKokj!?_%gMAGYb99MBFEG>t7TN6Otuk>$sav7!hUvd7$7Q4k*ye)La7 zNkTIh8;c0y=RT<^$}++u*8d|(yB6)#BxE&0xR{iE{8<7ZA}JcNqyoYteJluw>ct_L zUq+p}3p-;abC~fAVI(ZtCyGd-_>-F?X^5hVAup2u{q#RUL=r&zgwwQwC?RRF#Y9LD ze#R~mKva=ch4_U+F7$5kuc|ImWQrd$elmg}p)M-NKc=FK0HTg4LQEDuy$>V>bO{3i z4FO5GIC3wkNC@bBhTnIgFGLZ7C?E+kx}|>Nd+>>f;^JP9=;8+=0YQ^-x*djR4XD-# zf`Hk<5YN@wcx9XNR_UTT*`wEqXg`a+_!UABMMT3u1Z441`?PrH-qXPkn?+?&0EEZL zq{qo3z$XkPLO($An~bMJ9*k1Nl4)+P8dzW4w#rk<)9-j*Sg?;8g%2T$2z>OT`YG3a zn)4_KAV{C+nEh{t;O;gQ(5P-2>kF-UGZs38}J5S};i$Vx;3S zY;BpJ?D1Uai}0Ab(4ROqKjppoWE~bh(V6(P@xN%-PiUJUetw7Zkq7;RJ`mn_)_=l{ z7JdBDrCWa1KK@x*^rIwy-j!DIgGuk_eLkOd{i1(A_{*OWK|nzG*b4e5>)U6r!xs~q z&*C(`XJbEPi{88bGyQ-M>%Jfe2%mRx@spfYAwUoudZ+i<`~D9ZUcSN1PN8@;giMyN zZl$=K(>`WTg`e48{A%&|iRxpV%J)SVlcdMr?R`CcpUElo@A3L=hdU=Bg^e7EY#KA2 zAeXX9=;C7Xv*a`LE8uu8`OHL9@ORmJ^KbZ1|MQtA2xzkVT0Ij%09sL#Q&&z5|EV1m5GrI&R6{r_|oH@kvd-C(O6CoVl^ zC2z@+B}t==PnG*AO4ok4f#xKet|FWC7p&frZam(_+bzB}<-iA|N0LBisPO zC)89f9nYtMi|PW)@AZ-=j|<(sa5{Z|>3A+#vSi7}SyTp#LKim(aC`{DUv`^5AIlie zC!A?(pZ?*GIT1JL8<)6V)+r~J!@A_3KEBgq-reKDt=qiy##;L*&Zf?l+>`gcfAU(q;#luWW-HL#3dcFUG5S+eA_6FME{&WO5W5lSYNw2rR+ypGTah(d%Y%!mYq zu~23u`Tw(bXH9Zs*MaB%zHi~N?~(h89aTW>B&$e~Qmfe#TQlk&y^f8I*_e&>chFbR z%e;)qY#v4r(oAco7BA8$Iw9rhTG#tg<&roKub>BLiU zA`u;3d~8X7;Nee)j7KB-!zq)A%WO77vNLFQg18R)_y1Y};4|s>84U)EW;4c~z|7Z) z*cOSjh2l-Y9ntR%7&tSWnTi>Wkj~mDf`I7H@CG%Uu}f38!{Qr7(1Y6bgZW; z9Wxyb7!C#uoe@L|s8)=G=HLyxG)^0gl{m9VmO@M=X+3sZdSil_L$@=aKXjRj8UiyE zzmKn{@QorVbBYmk=^j7i@ZL@CeE2qZW*cmq5e`=)tmIAfSr30QWZ;bP^aO&HBVmHA z`M{Xbz@b$cAVzHTObXL?kX(m;Z$RJi7)({{gheW6F|Je?_lArkd9;X4){;@AvraJ^ z_n3?ZbOvK4KA`FdV;6DYAm?*v$t1Q2@@UBPc$co@QcGXNj3${kzzAz?ghIb$&YpQr ztAW$&GMK@{m+(=E#G=I0F*Fg>$Da$q4E!mB-hg3$h~td$WCJ-_Bx;Bx)hW(kfHN2| z82ALLj+swmiet=ai%QL*GXy;d>7V?}=T39m&VbG*@a7?*AqT)m;EEO@+5iChS@uo~$yEGd^ z?vB^UW|E|129sW$PWh1i<0>uRgryfLMJHJDHhbGX4^CyyznCML6zCn6>70ycRw4cD zdGhfnMaF2u2Cb7aC&vTIWtZgD^W>u@+5QQmqg`%(TxA+xA$cv&M&|LrGhpU)7@a)e zs5;lw!4p;yu>tD;Bt0M#yjEU*5C2gw_fK@ z{_0bPi$m-$o}=8(bKH!O9F>`NPC2eLaTA*eb2-ju!PY#WPrqGd=VlpoZk}W!NzUz~ z4JuUk%N*6F9QUHkZ|1p_EzsJ(M{ECtX6Z!|a~d&SItx650M}{JYaFv%Zqb?m(Iir< zk6f-{zW4&MSdv8njNyRc{zJN>B(2hAtZ15&1mTzVQ0SM9N8r2kD+g5e_b88`JJWEn zDXlEB8jF%t1k}erxcO58w@0^D=H&2{YPH4GOrxJmu$)myM$g8^O1Z}I@eDtjCKs{E z$t{%LJs#ewvVG!{y!K`0SE5+TV}WPjf#)#pH>e(zshl>bRh#JNUM9YlLX|vXvjM~E zAt&V%_KvHt_&i2Y<-B~vtli|}Er>48b3UISC4d_E;Efn}Yg8ZZQ|(NtjuhgN5xNnu zKU*dePm!<`rYE=9ee=)w>%abl?|j_C_+o(zu~iP+GZImVXq-*b1RhMfj5{?>%T-P) z6ROpK`D+(hh{ee&j|HB=h(Yt1>cJ7^o=mT?hkU%nL9c|fyiO#MW}YWEfO}Hg{locP z#@#C0@7>_2Iia53B$ry`{3V6%_B&J#-{af07qLp``Nm5{ig5)6pTXfx>PI`=s>K*7 z30Bq?Dd_hQAAZ1hKb)}r(U||~Hx{|JrlGqHj_&Vs`@@I$g=N%2kz%@yT0Y>T|Nc6s z^Ot!T`A=L=Dy#`U(}VXZKWOsC>?JO1X`b;xlpLncDG%QME?WnEZkN(r)YDuIWE9gw z(z_ho`h>kkfca&Ut~RFK{FIybJsvhKq{UZR*JSka`yAeJ_~urI*D^)&*8mSZr_E&h zZSLI}^0yCCd_jR|x{pn-B>(^*07*naRL6sQm-bGb)7!68F)Fl1D}3X6j@6hAcuc#e zbavk4&hC)6_oFOr7TLHGLqC3(+Tl(9_Wyndxp;xguY7^4C7W~Uu;k58!k@O8wQsQX z;U53^R)e)~Bw4Z~W~UAO@(#W7hum>`-1=cZwZV<6b zF3zye!NXIA(>S^FOT>~k8ewtaQ0SM33k<2;{+NSz-(pKxqirtm>}s6s=!nLRZ*q6X z=1;$~$tz!Yj%!ypnKQxmyZD1+wzp6DZI zt*l~1TddV?@aBUUCzU9%ms2FNpv5P2ZtQd8o9|jD}e6Ehggb%*cr4=kNx4A+xoj~Hr-GcQg2|OQt`tBUl*uTxqci(5HZ!vx4 zGO?v1^UHDKr{7}S`+$$`X}Cq5=P#v5L|kOILuKa{TQ_fUV|opBAv8e)uUCtx`$5lB9h@)q^& zQ*Ikqhz|rrK=itd`g=V1=tDl*nQ-vs|ACw<5d;#lIzyCuob2p!e{W1`b&ZJ)ZmmPD z?$ICkI8zaKJjJNop}IHajRz{L?J24VQXmqHhfI$?;poAbH}0Jyx32SS9l9NvW@||Q z@KegN$8Dv}_2n2~K`>&}IpFYvZ}H)d#@~0o!tb6-^VO9C=HOd&Z{6T~-x)Jqnlf8? zmVCl|BFi}ZLd<&5f574PF0a4UMZ9`MbS=ZA;UHI=_?>OaWr^=~7WsEyoM#$%C^Ls~ z^N8x!hupre@cw>|uWckbHy0z-yiav_#Eo|kNF~}NH1H%D2@N}%4q@ zfz^mbLK-4Zn+)1V+BI$6I(SlmKN;Zc-Qe)nHgDWa^4!Wfz9>N;=^#i5tsOc? zRc;=pkOG|-Vu7!{rBJIuKFs2^b!}7CFZ07-rj9G$3Fk< z?gASf7gh2RgCV^^hrQi4jww>SGDkFF^GleC8VddD@*QwbDBt~nxBm1m*uC;UBVK!n zFBP-Qw@z^D@36n6^2euL)XY5do9krc3Cf^C>){QyZZ~-Qmcz68RiX<~v}uiTZNP1( zPBvwOVBt0P>280>&3l^!1&Nm`D!ysa=_|BP4`}b+;*RnEp_+vWd9gZH{ z;p0ynoJf&pFUGJ3GmP$iP6un;GCO?zN*qxN(2NoNc7?6)eM&q#PdXRJtu+``+O%5` z8}t~t4xRP^&f#5d-mK9SvuvEtF*oP478`Q+&klI-_D6K%{{agF4;f8UNyH@7XaPBH zvUzTWXD(zZS}LYE!XNdi?%ihR_Dw!&okL7pEMJbG_EE)M8lK zV(;!g@4t6S?zIvNm*&V>U6ksS(}M}aq{i~b9LZFaCwVo;QxbSiff2_a{3EyDev5a9 z&%yk&eB=Ba7m5~w0Gg>`SsGrWLN_?1GQCWpTR}g4op;`E@aF9VSATnz%P%ZbvNfX0 z3eu(D;nh@ul9S~^Nya^S!0kW(OKuO+RImItS2osoDQA(?MW)3oJpa2Qqj(W~N{?udU%DCzriOd|=H=~ro z+0LJU0LUml*t0Q$ZqUzIjK>P{Kwx2G6)W1oIaNs6CWe~DFbp(zK+wFw&dn|U{I8Bt z*Pr2q|NJtmOL0k?ZuApW*6i zpD)Qi6RU(at-^zE^WI&T|GIUB*M5D2-+ZaWeA+;&`#6I!o}NTmKTk5ABp0~oUX_zU zlZVGTq+8qC`qBq1$ zNQ|Q~T7v?`Y?O=ZG9TZG(c4zZq@%>sQ6yP}Ne%ity#MVtdH4N8c;&ZPcxIE0RG0cz zjqkqg;64{+^O-rK2?N1*kS0T1*QJF4jzP>#v-;c;^IuVUZ3uQ!Ag%3j`|Y>6_s$lL zOTSL~(lsuYZ04*cf-u5$4ZK;FV%BD%q@k%Ij7QKaGdAX!ES@8u&9fXeiOM2kbDQz; zKk)jSHU9CAz}2rWarttVY_P@cdW$!!7sweoo?U{ZQKxzEDewNjf5SsRPviOD;@tW? zm*Wyq-=*OOjAt1lcAAAvlWgkA4&9#Y);|)ATLjK74?fxAdmo(8dA`K*`UN(Naf-Hr z00e=DH=8n>jG0Zxxc-Pvb(iV8-{t=CI(y<3zE&!+F&`%;fhuL7be+vagu+B5Bkxc- z+T#894>>L5h`)N7;(V03hyqB+@i|tWv+#8TDIsFu;Lgq#1og)jxnh!R)4`Ds?F^a15{sKfELlXs2L{v|eePF>I9{6c z+6u{3k_dn>V>+AAY7H2U74&!i)&|z&;Ee%s7JZ% zGRzwk*9)XlI*QkzQteZ3`pC&8vZVz!(*`N?Nq*n@c@mgSri|)!##5Nuc~mPw+>Vpa z$B{<{!_f@QFvt{3L~IqDHqEUM*|~enN6jSHu9diQWsaf>+PKTKQ^y%i86_7n<5?Ds zvmnuPXAG(hM#Cx6{4&vefr6o;0^-DF+G^l9V}_}Dv_yt^BOo$qGHM@iZ@bLHx_~;j zNIaV%0Z2?3xC0usA?>z{q#0Pr9Es8f%W)`K9{4@#_bc4{;C&u6RymEmO1wDF`S}P2 zGe#qa=1G?+1>zfP#N%Gd6&tr2JjMk0+F&67+@%x8z(l61z?ILTC$T+TpI1V;5khWGwEpM1K< zySp=<{gQ~TOz9n$X>@%W(QA|zmME^rNkt@>j`3Tk4EqDBzC~P55>FK=E*ID+Mo|H; zbBF2CTkJnLs_qNe=)6Lk6mI_(awYM|H=;?j`b{vPjq^L^^Rj`iGgWHL$8(xW6_$pAe|A|9a> zchOomsO{b6&C?VOrNm2@qLeZkXR}%&ie?hc<<1_gf`I0?nNChQXsA>MDXjffRGdw) z1_~zx2o_{;f-|@e5-hk43=rJig9LXA!QI{62@>3Oa1Txh?(TXf-`@M`zxmf$XRWz- zXWpLfYOk)R`Y9B`a8Vg?oPpT}jHcI{&r$CYH{C*wB&j}rmKAkb#to9XExFburs`j5 zrryDKOv$_3WA1*Zg(k(-fFX57gE(>+4vQDs_1*(b>O%uYYeG6@o*Ga{-QZa#u4HkL zuzta+&(0t1a0GF`PpI8fn%sh1YSQUP0JDJwm6>gP<|D3)3(069u1tA&f=MWw19W3Y79sCRqSm~_E7P-onr`i* zuncYbnmlH@x^H@#K8;n|oqu|v(ickJka2)h;Vo@+x8od0?GG&stH>IOoj_z;D_+9m zJ;0SN%4fpCRp~EoZ|wc8|H`XsUu|^X{3c}b4YT+z`xUji%_;K_9e7Lb z?YCKTr4EH+z5Ju4lt9YEWIXUdMP0Ikfm5yQab)MZ-psDv0kzCMvHCP1iBh-|IXHWo z0D)LrRvcz4cJZTeztZ+bw@p5Wj~rbBHQ3~WB)0k<3|@6O|*!8gTcPtJ-Oil-KBm6RULvCju`CDQoYP%YY<8;A&6F5o7Pki<;*{F-Axgol{ylf#JzA;L)>QaBjw>j~VYXoP z{O9g&h?zB~hf}(06ngorHUk|vDG3%2@Ih1G@he1okhl@}V3d}KD$B%D6vANs!wS&yF|I~ZpyB|BO$O!+LV8MPu zdTCcYs@N})sZHBj+cT|0GzbNy%PfR{G(qLPXz)K#&+QbegjW8 zb^FyiON)Z*n7F&CxRZ~qQaYsgoT6Zxuy0<4^`{MD$v!3{*%0|tgA#`i)-oSiW1B4;C_kfts_v%=9z*T#K@3BY2LI1M3H;T^QeiIH}8t$7Ii zD$u(EXfTb9D}vVV-FOJN(NS#WF)&2!UlJcLE7lnvBOTr zPe;U|%@QbOl5k|-3CCt=9{IV7&fViaCNDrVNB6T+({9WsE7`ts@(>kFMxgDg}j99UcITgt~L*0W5KaVJ8d*p`H1T4!>Hi z4TZJV?|6iN3w5-b@u32Zj4gVx~mDPhVDp^lRxEj0uwW zZ#}K&FHI70lGP;QPLSozlPhbbSEU_%=tbzQ-~^kB(X^MnoHkP)djaQ%60m^iptqAI ze4n>-D5r0X5_SLuGc@L?rvs4U&%?nsOC;e}{gj~@p~Us2a|cJHPJ}o4;$fP<@tA8Z zx0#;v^t&s`?55cX_AlFGOYF zG>Lvm18bR{PfqO&DSA<UXf<*Bi}YK2rOh{-Y>x|2Er*Sikk*-*wlIMQt;oF-!;QcS%0sf6FjHh z&8n)=o54~+l}qZ>PwKBE0|~xJxxBZ(ap)XNlr0yiBc%A665<~~If2whSa%-L^;pc` zYYrmbbPBeZLh^s(&p=TM=Z0;THA2UeK0$fbMZ%VP@8tbaH7RtT&AQPdLoG$TDhEb` zw2HmbF_@_r;GzHoWyNXMhHg_{X`z$UDKPQoOdr+h{|;@}p>*{@ADO-LTC63sJx&p6 z6`^8R=$5Zzv!sj!%=Qy{+^6WZOHen+*Z%BD%n)CL^WB-^tC-=l$=|6qUf?4p;}|L? z=%d`L-{ZW!KkF3Q()$#olVWT;x06c!qclsjaTAEPy$>C`b3ZRyC!RUIT09xcr$K3Z zf&}QuP^7q=MPuwWwi5RzgUg9f+uyYrrowLQ80WDy$U(HlXNurE#15}w@>j*Q#IQBP z>*8G4`;lq6zNfhM6!?sW;ulxb@O+Cp*6sX>y5t3jv#V%5+x& zX-)=q(Z{P|9q#XDtG6_ln?_YfsK~;}X|RE>mXE}Ls8ZbBzDws2?VWR~^$jG6;Ci>E zhR-;kVSO>;7A^j;tj!rZ%~Hvp9A1a_UM@%^U+QwkTgNgHYDBuY&ryy>FfhpD(}jjG z6k#|pBkuG;83hv|d&sWu=VneHWeFBSDv?<6@QFxQAH?qDo(mK>kwnRd$}%`ZU)eDV ziK(Fi#W8K5d-BW$XAr9`>?0t_Vuf9TpF(9DS15blEqx9qaZhASgVw4U@NX_Z{vJcO!5#F%rQ4>(;j5`Hfm(==gh!*a2&R&G3pg%yaBbhA6a&>RApW8*^RA{va)65nJ0$jxi(6zha6 za@BRrjtp$c1B)kzBCs7fS*n&rNMPy5pS5rWRmrHb5h}pr4EA$NI4S1|rGWOFLsAB3 zwWw~=**ETq89x$2#9qw5@7AjtPufng??_wG#2S)yvk`>7ow-0>^+>mU;6*7k9jB&- z-GrULw#eqRRM!f@LQup`;cS6eqw<>N-Ps^!VmH{T^PGA4_uxR@u_sYjE{a3m>EdNC zv>0|GednjKAepkdU35)uilMuBDgvwh=t>2ZClNL*tBfJg_y??3Zbl+rNwHwV6}=?b zcZyVRDrHAl%v#NUwDBxSMQ8j0m(H|Wg3 z9GgU2M|X`x{9UFsdY6wbZ4g=?t3qPDB$5K$Z^n{LRMr}#MLU}RB5TDo$W@!oY=-G~ z`f#^~CXV4MP@rF$`=C3k7dfL9Zsyvf5=AKscT*5rJ+>Exm@uNN#heWF|IQ#*{abQQ zuAVNyYFR8-AN@N&xy7lSgVx{GciG{ev-8Le2e4s)gqe8nB&pm2JSZ>6Fomdvg*_uO z73~p?k2DU4+>KP;JR*lqqapN3CA0LpyAS-IA@@7}*HnXr0?Y7bH*n1`3 z0z^K3GXq^wTTh8AJ8e8Hnz6@=M0b=;W+6yV!w+Iv+~TUZhwV)ud13v*Kq?ZGga!FZ z3vjJf*tSbwjelGG1LwRX!btcVPd^*Z6)g|#ODU2- zMxPRwiX3^3ePrN30wdW<(e31}!t3<9gOU+5%a7Cnd9#%CA0_BWZ1yx)>2g7XpLmbm zydKBvbUH2U|F(_pF~>gIB(J@&iypw6U&A80K#N^1ZgZI`FCA#cCG~pTtTAfIk?bUu zGeqzw=x6rvn)TzGti=Yh9CS*RtZqV>SmQJ^Xeq)7?qJZG!PeyQY%2Wie$foE45LP# z91Bv6lrsE;nEWEd6{FPq@O6FS5A>{Z3+|BBJFt90XJ>AV=e~q!$Mq%PX((l?TMQO2 zy0XVT0ut)T*VkW{SxY0IeV!A<@2@2^@!ZgVzeU`yl)SwqwNlzKq3vsJ-_iu_6-^7| z&53zyA1+eYyXF#U#1LFVJov4_o*~$Yt=jbi!r-gEDv zw0c#XZYQM0C3&mjZJpo;+Ho~q3Ppg%j1;lzQU>u2`=vXm{IWnLa022+$-rRd^WE(# zz+}_=rc-_Qb$WPodnZ)^%ESOSlrWOQFt{q%IE=!2V2mOYoXb(DOTxz6CiSUMSx?_k zFDhn>o-jc)x7hEr+X=0Vn@Zb>=Vg6@x9QrZBU97#Z%>`T?LoPYu8cizxqr%Fgtgta z()XJ!zsl1mtZyV0J>4jl{GxoZq`yIqK6irD z&6^V(aX64E*=Og|?}CRp)MT{1?!&#VuL{wfi@Jucyi&taF+=#!QD)33F3xWu zvn#!pvpgwdhKmwex^Y7Tg8Yg3Z+Vq!c#`Mg*LXaV6<%g(9Xk46z6N>Ghvg4KHx$b= zrJ*3n5>gP}cf-K390_W_RoSa_~z0g+JNY zm_=+wK6b=(XoC97HL?F6^BR*n-#gf?!F%(zW+8>2M)CSFla#X13%LrB=(QWMrDo3Y z3lxsyq0HleVv`7UZ0cFZYWwE45y*ZV8-iI?I2nA184*{ukxdy(wAlf3iO0dYUzyR9TlbpCs$97xolcq=m1HO|PTcvOHWr)GNMABZR<=HEdn`@vNOpQ8 z4$eC+&+i)p0qWJOxur{NsKS#mkvw3}A9LhAm&L|Mg}fU1LOL!ME@;kjk(QJsX6DDfl6k(1nHB zSd|!|bu-mmj5<1br`yVk{WiPv!x;j|u{xYQ(1-eq6%!a!6d7nT)Lxh_6iLM8R29WRfbTla0v&MEMLQMcff=xee3(Uil=Ir zh5vf{nuuRrb#8<<%eHu_kK@Mj()5Gl#!}9iGD)7YEc06ETbPjTBuPugl`7K=ITi;y z4w*f-(PYUK3BPhXAv%xc@F*}-oI$it zg-sQ?n~dEVkrTrT&3wH`X#p!87R6>l%Wd=Rb=D|kw#FEXNo+s`fhH3#ekvq3P58Im zMo`Gp;-{!CD!bqa58b6X#8u(^Y6J6{;ZV;O}4>?hkp6d9sTdKUGCD~ zTeV$~bwFP)^nD_U-`al*;=q)Rr$76QyK*)X4RA9wlel!+d1n<--@If(TWgj}H!P*O!6)pD1KB1S!= zB8wTlmvVvTzEm)G&0{$v9*?zwjIEsb#q{lW!WqM8`(m&aTfPuh2yl}C3sp{O5{FP# z9rL`~G1ckNiQl`78N=xvhYjpnNDz>%>~|{UbDIVchY?mICyt`<125zSUPKrZ{^{lP zRnYe?*cVFn)J=Bg*?H^vy0a;W%aIkr5W+BJ5+J=|3`*U24_8)26}l0vfk{Pfi3Ma9 z?fS{N=_F{$*kONR`7Ooc`3*qfH2W`pV<(}|UjUz6Q@Ux~U z6*&t7c}Al61q?ljvUQx*&1V_A)XEbsHOc)O+d$LzAv-hu8%6!&-S4vWakWIh7;zcFUN*~6U1;LFPD zn*=Z|n74x!#}9ho$cgBRBwdDj@ZkmI!~p$u-Rg1x0^}Vd^G@u3vM2dy^T|!2qS07# ziW0@qfKW?X@I56uN)ct=hM_d+_YX zw~-6U8ZtOTxN_u?=`|sa;;!`*+KuMWe0ow$a&l>uubi?yG5}eHvzRj=iNsb-vdr85 zh};zR7x%I}KO^cd5lBfTm?7DjY;aB=!Hq*wv?gtY9R(C2pkecYDh>c0I~qa+nWmf8 z8z2tHccX!n=;o~=(zrq@Mss~Pc)9Nu86|X^;TQS4?Py=W7f=`!i<=N`O-LxClqCxP zG9tjnpSX^3B2n}?(!ms&W{X3ZwZP-&LeP}toG!N&QZIOXFHK#1lmrOc&!=$ynsP*m}`W0%0{^Oa?WAS=E(F6Q%l6Zf!( z=F{~pi*2=2tXDhNlvWW)_&lW+Eoqh*VdP+FNhY`do#KK{4gEPiQx_ThX8*kP3#N!t zEOm%L2S+J6d8yD*PdE}>iO`1jFY++yqR_t0aMy7k+Gp*K6={$(i0|yabtCvzSp+l2 zM@395can9*8dO-LUc~z)l{v-WRoSI6%BWySx24Bn-t3sHYzt?*1|UK%4T=N49%kF; zq2)ap|6n?wa(pxwgBRyimgFFNEhekqeh;Jh^pO>^JrZ~f%o}tEXCw#CBZFY8QoOtlGsgG}LcC<=$nWHfagWrVB@NhK`Y>tnnT*(IA~r;WdhSU(KB(I_{25LcZ8IYVvZQ6F>rM)#PO3>StCtme8sw+~q}73;pnmWJJAG?3SfbRTj zWWzGwJ$*(r0kLfRB(~>!RoC2quJegEB`bk<7N(YWUP#Z{0pHE{*#C9`V0q*YB8lGV zAU#6@^p(W^CHU@e)~v*T|-?W(8m{b;lJcQ zcs*SlI38a!RH;+sh$UCi$7hYVo*B`pzLKZ+Z6Y=l1~O{=%lV1}8!-!3@7{M0Xf0ef zwq$IQeV^pueWttE-@wsO5nxE{uvEwBk7FbFsC}IISNfj1Hi27^sKN()m}zrmxAN4> zpCCL|u+eW+p=GQTW3=Rylc!xf-`G2I$RSJhpA`Ru|M*Y%&s<&x{)hKY8Az!*Nr8dc zG-m>jDy*u(Je!FApUj^2O8%?rGZU;O=&tHxZ9*zywKe`O{R$V7Sa+vcJjwTYz!K#d z`>z$UYhcdVfmxFNetv#uC3}|&bv)8Ys1m{dBz&bcB$@c1R?D57kbsH%B6M?dITU-@ zb@}wkn`< z`=2MIi9n>R|9(U2ais7s{`VHi|4b)jo&LY4|JO+u%5^dI2mm?4JpSFrH=kOb?gmL_ z_d9(M>0|7?V9|fkneJj<8NwN1r97=+bt}UXn|C{c{BB?zT!9$q!xmn@*Hhn6{3sHC z)!3Yn{28=|hXpT!nMHynfbcgk_if$mgm=2{bM{CCAcTVkG$Z}6U&NgCotB-Do=I2V znDl2y4*(Tbmri~0Gn@eYAe4mC@#NBR{reVFs(&>jqw<)7VP*dRCcWz!p{E9m5)T$w%ESQ!45cyQCu*(#w=L(%<&>v65rk(r zP3{-#Rek=h6T-j!(f#JnZ7PH&BP(m;@r8pv;eT8E+8rP;!)fZ{lLeub91V<8J_v{aR#fEtu*x>M1uutkPL)Wf%ElD~4$Th% zg2ChH)L=jyNH;?f+Kt%X0VG&H;sg5v-+uZ*3*I(sAd+DQF z!ur;X3>U&j zVVW?JA~3@PBp{wSY$m?ps!c?x8*#23R`xgFiiNZ|Mf25bi4D|1zU~Yq?3N@tjwZ<^ zuwtXk;Kcb^Y;X`i1Ss}6breSctx}qai%boMy><+kVN>Z*_}}OfR0Q0^{}AJ=q-hO) zCs`_4U|ypN-HQQI4IB%$_XY#c38t~+cNo5&6E|0c6*WN~mWfH77kOQfzV|_mn01V+ z*azo=z-q5(Lrg@NJaP>uD@LBhLu+69wVH%KkJ`Ea-qO*0PyRs)xTze4j=r$ARLIL| zN0sS;t5V(j0wcGbLnyP_)htaJ#dRX5jxvH{6<5Mlk(Juvh!B$z>ro8uzM%=W34TWl z1b^V{Sq0m}4y$@=> zeeCNORJJGOlh-qr-eKnm!%$yXOd1`LcYr*Do}Y(#8i+}yxw6y6@Dn&8NUO>Z7(TPBDS_@)^c-y&R$sUz{!Q&WwtrL0EewaIID$XA`MH~>(RG05{!&LvV{=&+ z*9?TB$=v-#6{o*zR3pJwNZk7st0MuEHYP7m8-||q^-p8n1N&sj!zlB3D8R+V_Yf08 z6NFTpxD=iw#W$MCu{irj!W3&i2hZC-pu0p_JW>#FH;bk+d0NB^=0o!j(G;Wj(A4YT zW96qk`-T*wUCBxl@ILC4bB<xV6jV(t)Hu-zvGerCL=O>bZZDVe3W$_DosDNhTd8P34^CA zF$>bkm(R^8E)gap@QLk#@`YvfkMGha%3jf+n6WDq78nzPV%`X5EpOTxN5}Va_afD* z;niu-l#}EQ(`yX~tKle3(4u?6kx2%-F1p+Gu%3+8iU$2sSFTVXd8YqiZD?{aI-XdP zz_BOYNjk^?o0I~H|27vdoiqTyZrg7P-HCj%rS)KOL-DTqjXPBg*lNk$d-Nnw-QFNv zu*B&;VD*dMbaX?a>NQm6+9-R(ICFGVfP+zD=W^!6fdDH=>_NT@D(iEt0_IXQuln+B z^t^p5C08%_fvB81czuigw6nGQ@gvI^IJMba(+su}{x$w@Q7GN0kzKzcd{05;8}a*d zj-!^rCY=JaxzZ?+1ppd{5+-L@B(k$ajUJ?Gq5|1gmIN2ETY?NX+#yfMiVX2NIq7&U zRk@6S+JraAhDeImvEP_-6NpR(8B%q!92UD&l&4KZ_h2{w9w_SK0!0nGYLFbp*?F*_tAW{-Wk~{XnU%ZiU0iHqjHBQJ$ae znRqRds%47+yBVQuoAOsgM1^=)fMRF(3?W+>C5=7b z2WzPK*C;I(QAHTtg>nlmv&SY9s~~^iC-S{a2M;*ve29zLTgH7O4-B`h4&ivZawN--Vw8QhjWf@F!jp>_Ae4^GK(YY24zyLu7zNR#J>H&p--+4 z<0zK0@QME2HF%y^m(S=Iq+1%pm|VlfHo@{atLg5UNKz0q0;!`_O<@r#{b@uN z%s~r=o{b81r&wcu*-Hp6X18s z=JIg$;63m*0E{+8;LDjabK;*MHU$VvH9wab5RUMyRd~IN$=nod2g3z|ijs_EKezDP z+>kLW)P<{jtYezMho%w@EMCS#jh#G%cdruK^979;$(a+_^YDgCdQYZEqh~>_yW@4U zKQ+_kQqiIbPXx`%@h3^a!a>8$&Uj-e;T|#)VMv~>^%x3^r--mStv@YvTD$!Zfn>*b@}9l+L$_C5Cc>s_-2PjUMZ)#;^M+x;=j%Ezhy%pF^e&#bQXFz2t&h= z&iI$_+I3q#QVY^?YIJzS%g(bpUIz8+boWg`mAuGNI&q`bGNoNPtlS% z6JjhAU%zWHMu?CY*KGysl-bYGDNUl`5XYV4rJsIYAD0XlG|;=EFTzh9Vq`L>qFE!D zxJ)TPR#-ZVa}(Wz5#KuStfh#?$kyM`;x*D;+=|4oJs)YjIZt|xX+HdPf9XFIR+{?`Kw&>w ze7R@7;(jV9DQi>P3%mT&?42f|je0+BwVP@=8zLzM@A>VbyIs&V z3Xs!vVTH_EsWVt*Il6}uRu@@!FWA|{zivYQj+ccVVPHg^o-hX0qbXhJ?@J~2%V8_v z(D#bO*6S4HTo|~rvVODt?V_u+))1dA(P2f$7q2>*9Elb5ZyUVh0s9|OrnXE(GU{$D z>^^(rn**>1k?r|R1f+SELQ>JtdAwH)Y~2F`6S>C6Oe}DUwKWAR$2cS?WE?fN9d&d@ z6cd)9o4RW+_r@n{{M}%M{oAMo0jH8B!trH3NtS3`M&m;iNkA+Rvwvu@R)VjrNJ&%2 zpkcYl%_=(Sh@wBoq};@OUPCi;`$;J=qcqn_BB42aG>NP(Mb)AOxqbGHW027^F@BX^ zUL33P@K+PT5u*HTN@8LT%e9^E-uG?IT!L5+ z`3@w@K=*HA5O`_1o40MO(ldQ!>2{y!uwo9cS*gOrYoH)k@TtjdAI!arv*_F3gMToV6p>E2 z6Yo$k?-bj$iKK+HHI6Y4+uez-ts3T z8&aR!6ocEbgMWDh;$W{fz!1BIqA2PgPq7jC`4&X|l0Uv4rnKc6WqgQHsim%10-=t; zI&RrKdP3}XffkcuqX`prpvL=rnU0pO;Xtn8u!ynbL^$O5HLUhWq0Ols_EVDkzF(lk zX@L>d^yaWYaMYsA_ifRx-+%E2mjS1VziT3y=FPeCexqjwf3yF#nbKM*us;pzPeovv zha4$p)u`uMEaRvoXYFEol5VOL$Bo5QE@%V=^=zqEcFlO^P?W|ak8&D z@_rN@^Jd=~`_I8pU?o;Dashj3S}y}@Z5FCD3Poio_?$2*5EuS$KlQ&uYPPT>`{kBe z=LkYwl-k<*-~7Q9-+BqFgvr|pBN-x*BQ~CRe+Vep3jWw!oSm>AH^?&WOBV&g%*nM8QT_n| zhI=M9A5=|=?>BT5*W5{hLSAQ%h$1VPmF`8i4s!8dD&V&>voEfJZj zh^Sm;%0HoZR+ZoQ@1=3T(>x+xIM9{ zbza!9NWtu5FQ8v-_r9r=s#izbu_9`&eiZHC#sZ2x89)XRSWQ~~#xmv8U1)0WCNMG5 zg+&1oV#L=mkOw|L*rqoePLXy4)6m;h6ZX;blo7*V>rE}4IDM>j&R2$|;uEe{OOqU7 z8vc=oK+UCmLD%=X0-eEsh-!HGwk+}oT7O+u=(i3qR{Mr;=S38Hm&U`CB+vgj$+5^0 zW)g!Of&wC`BfP%X<{G9G9w`MzR4I*a!+0)c8(bDvR}4EjHtYznc-~1?;zbD=CVVyV z)!)r1*mn{z$pA>i05hCzWTh`S#RZYG;y9ll=7b86!9;+#bsU>GORXK#UHd~TcbSq@b*mX$4|xZDY2bcNOo7h=g?_T#CLH3Lx~BT$WyW$cR< zea?BVZY9ie;q&7^=Na#NN)a}f!REo1(4mD#i{q1~?JGaS;U;P?;zl)1*SHk$42AO}r0@<`s z``Bc>8_%l^fNRieC!d`OS&Y&g+IM*l#I>sG9u0IhbKpjx#3^F-JYPJ8%>~&Uw$vBi zze91!*}39m9vT^<+S07 z9pQRmA>R=DWk|ynG|+t0ZbRZ}=aXaNGH^vGsKq+8jE{F1qW!%=t5`4~6BduXpIwkb z(@Wq_uf1r`m)`o3+3E_8POqrE45@Fku~jS%f21Qw+Ug;izhjOpN9sgGBNQ+n&?Mht3sFo3*DH7slkvI5r)(Y30PSUnh11~S z^I3{9Je|z(pqbU|<#Y)NX!kHZCYnZPiH8zmlF{U6o3;S5pmc(*C))pvtUa_c`N)Rm z*&;?4>muKs1jLl;*qsq{6AgoN;r`DTAjXKS(_6;3QEYzZi@CanhS1IX9ih9whd z)1PjV2s>q@nSJZ~$$fU17jr~i$~lthtP$tM*>oM4(@=Qbg|PYELssMwA5XEa4r>?x zNLh`L*)(srGuMM&n7{%1(Uskglp)RWh(UCp(**g!0$r7%1;iH0+&lv_j)g*j-=X0< z@*z(&&oji)C)ySrfz-=5JpphEma=ln(lqcLvYi|SLK&Fxae(*%?16Xhi$C=+nnH2R zOLbcY9Au3uJ-h5TFnI@we;sIqLF7)RkI;EZG^Lw04AtFjeb;sC$dcV06q#G-JlYTXchN6CHG(Q{RR+JP00|EV_^RP;x zB_6@SJd)bbjEq`gM&ii-Mll?rX53G-Y#-nBR-vOQO&z0e&VkKelgd5}-=*Aiqhr{T z&CKnldLL8kxw~g3vznBqV7L9ImpWbDG&qbg>*It`3+HdjrEN25AG)`lg^a#D5@$D9 z46uPIh%xM3P91dA7n&^cZLBv1+a~OnbF(46mYfz?pM8>ls4kSuO>uwXX{zggnJk9A zFj`p4XSMQ*K?O_m`qs!(m&c1y3h|3=J~1Tb4=!ZB74waLj`Vvm7`>i{Me`jQF|j_} zugtWB7x`G+KJoq*ePQWT?xP)YsA%xc4*L6MhRRe`P$A!apwQNV9QMeU`~FqZ;?>rb zmdspNg4(iUyKjvnL4UeB9(0qc^mbc!M@ExXaR^~SJSE09=3@N|i=H-aC}>aU*K$WH zUzkZdr{(B1n8`RJzkTBPA}9atNy zC6y&$eq(?cm|gOsV5Xc+AaZ`2xbyMyipFRyK?&3_%+kDO7D*IiBL0gg0zHSA`&^My z#$Z}2w6KuUSzC-^U>?2BCac_EfcyC|FB*X5j1d=|#0knstWtw{1C*6O=TaL*V6w{6q=ALxmNSJ6J zEgVH@zMOJOYBtJAPENHk8TtrH`w9#5mDnF%k|tu^$m1+hm9hoF)yyjFP?N}>qOa67 zv$c#0j04MOS6$NY^G5jhtUSm{OTPUzU9k9({R)m!MQN^&VO+^(ltW0pP-{3&k=kS^ z=t$ra{~MqjAwewogoj#ua5Qmf8+BBbQiW!38%u=J*o;GCkpX6jOwIF8GSh60W!t2V z88P0iTS$w@SV-AMNczTPIE$C^S*X`&(Q81K4`$?`jGMkHr8*UwyX}1SetD>VO-uju zt+t~>kfe(xwEMIFnW(h#z%k8>g}6>x4grJO(ccg6(LBA0g$#+obXs^E$yECEhUt6_ zWjRnl7}HyNnBjkb-99c3s6g`*f?t_nN~ozAGja3!0yQ4T`vv()!rd-6tM9Mh6}TQo@Lw{U!M^{bQUywinh9DKkULE|5DVre&x ztqZ~n-gjKr((wU`MliEl5+z0U+;%N7V1PXeHH^tn8_*5ychz0dpix7sZ5ALLA`uZ1Ds7w9G}9p_)bkPTw=YT@4d<{gpee?fmJHk|EPO$ruK zGO3RUs4!P#iowB^n=jT~$;Qho*xxulFc+u<(o2W=SJ0J&`4?GOAg6=|*_IHrJPh$` zP*MyrgjM8}Fw_?0S`t!%sgj(TlqFsl4!!Wsa7o7EhK7jD(6AK7i^-`k7C~1G08rY~ z$K3>_iyO|6gC!?bB{>c@K&ZIdl-xTx#V@ApvAUQ66O#W7J6STnDx%del)w zj$(K#b1|v(_B`pB;=zpgzgQbHtN#Z_BlnX0{}9&n+DF)m2figw9?3m4D%SfS{O?-} zryMvAe&oSxS;s4ll|&$)4u(nL;p3O8l~3eB{tx`y1D|>QUObU~Kfx&%3k-$Oz<_Kw zJX65`;E5fQ3jW2Gg3EFa*+Y8Aalq#T+5f>D?{?z1&4qBKAuRu!GV4I|6N6wp-o!2g zdPqK|PzdrS4mc^c04H@{yqFWg#C-`)76pbr7UU4orA3tvmkif^OO=Zc;mN=(2IZRM zlmi~r?MVN+s-hGC?Zt@$0}LSq)CJPgwGkCiX}^S`0!z`ff`8*6=xd6FOc;#>goa)b zE-g2|1T;MFt=Y31Pg7tC!GRA@6elPwhzkJHLmk4HMB<>*wf=dQC1DXaqQ9|cUNP(m zu;k#BRHde!R0#E;Bi>=0Q6Xa4NQu(Z>?-?Kt@D;J&4A(`n=m^vpcLp`peL#%ZHlX_ zJgI^J_QyY0gIR0=gAgM^AcB~BjOl)0+wVWBf11__UCE~Sy}DlAI{R&D#cOOp9%0Py zo1%hu`wj3XAs@7nkp@Xmy2Lbn5_=AuUo6_YnuqI1s^75Lr#XG{9q%saYj^>D!mHD% z`Z)0<&~sXbbVk7Kx_|U}XKja_e^5+-iIZO{SB*S=AVtJ)w>jc7+58WPapb*v$DoJh zXqiu6<*ja%Cj(=(ZUMV`9`Fal*S>F27Q$@SwU=is(XQx=0_Q2xgifuHMfsnRIPAr4gucp zIO}QFYLp9}Ch9AZQxN~MQ{Z0os_%V%d_?=1vZoowfdG*ZwbuodOKRzC4EMXM;WL`x zPU<&h%OZJ3?|;H@+bw`t&)wbp(j4Y}S3@&#c^QeC6@NpzI(F;!6St0Iwsej%%psab zVC$qEkDnGRRhE^5<5O?hF}7>;{_!cB^nu22>Z&nXeUVb79>wgJ-;dUjAKMaj^OIoi z#82&@%Jq0VKcN-F&DF;H?PKhg#Y>f>WH)eNZ5-f1F#r5b@t@*fe?&eO!5>yDa|dOh z-Yhi#*?GwgY7ta0YY-Pf3%WOh!NZgN-_~}|#+iNYrf9_$9Og~wFa$FZ}aNb2mXogn(*72uWp6H5WwACrr;07pqT|@ zoPdct*l6Sa@N-DDT$+>Z)z;6~PUiNa)70|b`Q^z|c65QkAua}pEjea<<9o;I#GI+9 zC^|(`@V&R>PtiO(L4;Tv0U!~-{I~B^DMI}b(+F;$x3RcmHHLaefB)%=XBF$$wzQ9I z9%r{+XXBnv)vvoBSDIgniE&;~eOoNfwT>l-G-BUii+=0+xo9%$I{6$z?lEOFIs^HG zZ3I({Zwc2Y`vy_-t8GkX;Vn(Rg2n7DOUUAiuOqI07W7PtN# z!>Hcm_Ck4IsQSB{@8S3Rh)m@tMgMpldaeVeYG#mraK>B@M)k~ZB8>*{_!9$zD>Aco zUi}Tqp-3oQx^c(ZQ>)6fh5!7D*`7rGsH!#ph|vopv2r2}Cc0fPOY3_I4{q5_9UH!= z8%DnEQx@beipXs7EXMPu>#_vD+{;GFN9V0(9TfBuQ^4FmoIS*~8}@LxE+pUbYM5Pr zB{fwuEln%T5vV@5n~7mNLEp2^jKLX!%E~6F{FDek+@-SelRw~t|3T5qCdDf>a&XU^ zWae~WmNZ#y_rycP*CsWR5loUSi}0Z%Zaoza+iNhl)mhYFJ0>}q?kmcDHRo=bpPbJQ zad~y;xQQuYAziFzRi-dKCYyb*Q=@!}1>VI406{GOEYaKxnMyz3;cu%>OwyDCp_zle zdLt8rW)Ah4fnhv<@N+;nnN;JH>rOmMw;IhyAzY4LS_L?Yc99ZOZ3QQlCca zv?jg`c_*e)z|mZ}q@&3{NRN!9%z+B4EqVGs(UFP!MGLql`rBur?wUc9omDjRmI>{= zJmbf6L}K^{MX5$)NYFmiuLvw%+%{RWOhHRjKoc!GGYSc6&Y^`u&)KO8)Gxx*9YG>n z$o6a|XJ>=fnJsO~j|xnzrj`piUaE=KMBja-T~GbUo@ZiKp5R`;R*7rQb%KR7CVrR5 zEqRGA6VAk1k5`UPYp~>hCF?*&(V|f}zHkQB%G6ZE&k&XDqgB7Q2Al!`VO5kujiCfH z=WaiGKb*X*Dx~Q6AlsOQmeVZcz3w}P_lAu7#P3XDPPB;_{yzY$KvTb_!Sy+bngvp# zqSYGMvoq9WmtMca)~-YETn6|m-gOheWz^c?{@?!ucXk=IJjccSR~o{+Jq@c`XZgZ1MLUBo=}2mYe6hsxl8O0l3wQT1r{h1M15}yb&1WYCBF4w}X}$S> z^LFh&(r(Q0?^d5r@YEVFF*9?GzqmP_T%>98b6?&k8Xn>A|B$!$R=DM?^Pm1`?kf(S z6QIY@t#^3%#zVBf_zSAJGFME{zlM&MZ%{r^0E2wFh>=OLZ4*rnff4FN`gDf;#3l6l zbRCa@7l4sLic=KFMJ<+47ZxZ=KACt-ujepyd^{JlQW1NhPCoXL{4tK>;EY0?iGf|r zQ*RVWhsR8Mr|kC?Le*w=d5&V%BrAV`PhIf{qCQ6(_t?2}o40;(hn;bT_`BO=ix%yu zOi7Z-$N&jZ;Nv+xM(&W2>oV4=#8!=^ia|N6e&m>^O_yeSJWZ~W3K(QdIf{)Ea{qwf zc!PWI{G9h6obrp4Wfmoyl>-;0Zj;UbekPW2r$>8Zi}uLC&ox+F%CXeY&{Ty&DG4PB!GsO&xWu4GHV%XZ@(Vcg=_3kde ze9xt-HMqKe!k_>HGs1No#*xZctx+vzm@gYlKj?V4C&!Fq2`|4u&XlPc6I{o^bw)T& zOloAY7iO{LG1*CrgCm#D5HfQs6v{-RPH4UFgG4294^_J{NaE<<0&jVxwnipx&^ zXAI87pi(PPtz}92`?&o+`#l*oSE9Z&M^0DH>aNcx8aMF=2W;GVhdb|W@a9`X)Fp{4 zyC=-ZW&DbU-|OKGT%1t?nf&xU!Ea*>PdExoOtb>avsv;P4H<}hhtM0)amEZMF&G)- zAVzjv)LIRFu0atGJsi>NIk>|B&jqtq!Jey;i#!NMXYUCK&cvWtEif~iBk3P7>UBBj z%Sf35vrBX2O^xg`N<_)nCER;Vw>e@jY;dJgVRbo2HlrdD0|8;+F?1(Hx{a2rQqWbb ziHq;_=sQFD&In42=yrjc(jgjq?6)9OE>c^nVF5-OL+IjrBYKXD>&Cb)losj~YB{oT zAHTKB!Gk;8dg~5v{bGywJ3ADO1KM*%%45(*eMZiZp5sDs0WDWzM(GfZ#~if4Dix_M zf9QPC2+tkT={SsIg^6roBz;sVB(hg9vjysQNa_vo`h6UC%s2$OxI{LWqn7aqeHYhp zaGV%FRLCyWv27W9(xTUPI5=`KXP2>O=e}Sabk_4QIHj|5kB9HQ&3kYDoXzYX;Wmow zcgxJrUo-1_a|i182l=oG=P)a^(VZ3q^EULVa?~(U9(Oi;)*HK_*RPWLd|W zoh6f1uqVf~cJ8uq`yGDy_JD`GGFPvia(!-&aU)O0(w^}Li%<6r3!VzOf6l%&#@up)Tv;dT?$O`9&-)uGu0G3EyU2=ek}Ky90WiCjIs?gZ2@}@;bwef_b$}A*&*j zkobMV-XjjShdkK#FboMv(wO*Js>^e%&DdyzeMXzV;HQ885B&Jffc?e)K(Z8a+VrT~ zCj8z7&uRD3sNLn{!8V<7jlgbDud2-Ce8M1R*gC~OJ)*m)k<@Zzb0zd~7p=9;$@U(* zJ101mYnY97@)?bc;S)Khw0C#d+&Q3ExQbRgM^@Fyj1D<@_!jrOHV^$p{`ikCkga5~ zCD2JBjfn!Ez#r4=wCJ}_7_^UQO)?y6aCNm#qofgzoAmc@@!t3UAAa~=mp7a%+*}^B zcG|{E6NcJ|gU63K^(;Dtn_NDpPJ?F}5PChD8+T~=I?h^+rHV>Tbs4pGIo>>q#P#Xu{;lS$M9^&Ev>hC|h+eF++Q?DLKoUBP zn%f-hp0c?gqAi@Go-I%~-9|oo#CzjalHwxyygCi*6OVqUMZa^(pnXJpEOV$PtSm`1 zie*y2gL`_H+duek{L9a``N`l4D+QnB(=HR+VxlLs4$X*xZkxI>&4#PqDX$b@&0d z#;c67%hIz$NG&aJ;?G#?}$?`PZ@M zmqFUbYjt`1KGZI*A}`jc#1oWY%w*JMFzC_j_BcHna=JgF@ts$hvn>|W9`5m7Hh=Pe z@h@+*_}jPO+cQnRd;JtQG;k&fX7_;OoqaYQA0Vy2PIf^CX`4yA!`t_vyuQM<#VXY_ zL=8RsVTXRZ&Czk2L8vfRvdGRhQZy!7E>NA%QL;j$QJdl6CR=-Lj-8aur5ugL3>tzK-C}D$p*^-((li>fPBPfz;NcE$zq3!~`kzvhXV7OAvfAf;r=EtyiF-)%{ttNb zU*6<@`@6eX-^pkJK&&GpuW09rk3Yn9T=!3`PNop%ac`NOVd!O{#SZX}HJ!?Kk)z|NGzaMthkJ`382=;X=mcPY7>WJUi4{u zhq!m&;e(+|*HT$OXH&Migb&}~aYyBS_Z*j3<~e^pfrB6OXlIvyc*9^mDD&sv15^_U z6}^3z-u*59{%)O>q`>^2q$Fv~WHR9B)(^P#sK=Y?pL1;-vu5qlAGbMZ_jvTnyL6Kp z?fMlwtxTf%4*#fOb!Ol{zXz26j$l2Fl@)#PPx(^ZINPd)(vaZ}f?k94oKL z82LPMV}^`gqLekUzV#B9Z(d|&u1N9svVV5EoKmmV=JDMl94}8~1vrl%up=Kc3|lmh z;ONxirGK-?rTVlRUC}~J+oijAi+BFtpD_HxBBlH_Zo~>ROo(W+_uyTA`rm$r_qvIF zr9yGgraalEGuYzc?S!AczkyxPU|*VJT>(AuAn0*;=ROS zeE5TFT)1)r%dBAL=BODObLAqlOW$Gb$_;Lwn`JKBMd|Nz^wy8KKge_P$`x{T1xO)0 z#BJT@r~mXb_QMM0@+;(vIY=Qmx<~W9clqasGcdEj|Fjen%COlC*}eAxhr9cn1oNy< zimVsP*!r{@iii6QPWSlbdq?yli`vy|w?GC4!hBDwnhi*6hooO7P@#P3rGhkvCj{WGx|VUO!{1_lGe zPrMi-mXL9l6Q=Vflh!GZ{`O7Yc{XFU^IIIe{2Kq&p~_xDM3mM9#4h^ynD4$}5w1;n zExn?7`61)8OFmev2^C!qH!M~+J=$-5fR@|G&R5ut-w@Y3{PlM&v_pfvgBUS2ff_E8 z_Nx?QA@Zdc*t+wO=Qi_H(mwpk0^jU}S^b*twjWR!O)2n{#?wa}pWHB*7Rft#?qntW z^DD+D-{a9d!{xfd-R%u_b5m%3haa6y__uGUypr1Hw_X*Iya3)>M|^y%ct(HT=c?V|^o=Ljr7^wo^L&0&rI7vf%OI*T ze32?CH9#>EBwu*Q#@*+5xLKr{3=@zATya2q_KY9?(8fy)crB^Z?Vd3^dBO(^6E7{Z zw-Y7c-e8};!Lv!6W24I5?JB$3CA7Z7`xkxw?HelhLKVL9Ie-A7qo;2VP@l3#j}7r*cV+l3S{Wr-Iz znRKss_b-1)__tN!?gFj%h;hBoo6lg|lZn547pMCH{Mldd zEE?w6x4~D7F_Qi`xwu3$pU2*)vAes^=XUaJ#Dc_q3#7Qov|s1l|L{FRU-l96bhQ45 z%<4VfdPcwY%z3D8gl2z9=lGJ*qi^uUh$0#_ zvacp6hCgl2o&fmpsXk(Kp^en;zla-R3YRQS+9}AN>_)7cKs(caMB=lds;7(0%g*+9wxiJ6ouw1o=>b zXfR1haVV5)EV7^FaBr7`LV~zY`6thBh2L<+(_61_M|CE{DQB|?(RhZ)3k~V zV|P#@S@KyM`?|}?=z=GmEu0wy1yDp8JP}6MXst7zb|U!Z9sv<#JZ#5ca(zncxKBO( z0`Y{O;mF05*Q~LaHgD*zmN;XRiFqp`J~c!i$T4J5A|N~X3pJ9tI~>)D?4$!kWfT48 zlJUh89zSi-S%-;N{A48X&pXVAYwGPVGu^_$!NXwQzv1%xZ*X>JpK(6T7vhkM2n1Cd zpCF98mOF?||Db{SjO-G?P)3Jbn z>PPm6kv*AEC_v%x9<|+VwzDxp(uCPWV{lQYHA|vB1Vg-y13B{>W;Y-3{^Kj!ioi>r zji-npDmarCi^e0)ZjSKmFq8fWAsj;Y{Fu`o%hm~v_z1OTvbNn@uf4sbckzs?<71xG zMeIa{OelajF|m4=^xX$EER)sR`IRp7&x14XqjxXpPAtZDlvKn)VM?zx#h5l2wmV#p zC6?+2yLUdvi!a^frJ_Pu7QvM8Ib+sZgOj>T#1q)-C-Dr2wG_aocu3}m(OHkqXvDK; z9m0WKG8+*TO~adYX?GNSd7ZWEfloyaMvzem_#))@4yo<$vYk(o3<=0<6LH#N)qcih zGmGJ77?fdUiwL1GiXtO9E9}XXcK?`1?;dj#@S~-p1QU7kAqV@sjy|07c6*!61q3Bf z6amv(F}-+3cbMb4te`patxOhO*2bLC`4gI*I9KUcXpe%7Ck{FmhOINXykfb|;LIIn z>tAdgL`6jjMUVxBfD$6Nb3ko>pY1}DOvHyGuJJoloW%`i*ADj7W~Y}z)h$dZh$1UU z!Ww5*r#os=fA55*e8@N%!5`0)PYkiIZdeaHyw~0$qd`a%kWs*KVQ|5uHRQu?oZ7-8 zD0xVdi@DYqUp}K%m$*v5#8%d0GB#lQOk z8=ITlEhdNtRs3k6>MW*xPCvXLwS7dkl*hMpusj9T4?YaMMT_?NF{|@aPP%1$`7C!+ zMY770q}OHE9q{C-LDaAi{Q`n;yO$N?npShbqf$3t zj8alSw-t>)-e%!REmR*5RuS7 z@K+iJ?n+~=NGwD@Q+JEKy&TVdPR8_tSf+`u_cczRwfNVMi&Tyd_=Dftpc?n1x+6?| zjfIR@-XxkWP!t5hc({&(KGiTx2c=jep3PGdL_!j*d;;!TMOlj!19Jk=Af4y|+wnTZ z=!(bT1mks-VoV|x;uj;47DSL0^u>z#U`iyLBeu6kJ{>3ScaVe;$8Y@&@BOF0;Zf}m z@E^X)=Qm@-n@_m-_6ZZ~bEwZBkx0cz;2~q<*ao_x(QEd&dDdZ8O>$U?^YUks_(MT5 ziin~wnd@VwPMoDb&Wn{KTSY(dpn&>c5-ZFng|h(jivD=WldgpD>H%9Xe1`qK7`whE zj_WbZC8#I&iN)f?xJ@1r9SgzG&~-lpqsCD(!Cuu*I3OSiE~;$f5=66#WDJA2FvY*U zLsIgS>YVakQp8S12?qnDWE-C_WmtcYcmMQH`A|9Hc<1-|Z?^Kh5L+_(VS`qEjko_r zBGn>UKvoQf@`~O}V&TLo@0KZ6QbYn0lDo!nOy;XNkY%$NB9~Oa^Dr70OrL&8!&F%$ zvlP<`nW*p9WpBHP_KILa!iYv0FACI(QJ%je^Va(@#-~2gsVMPu6iJrA=)(M%H~;Dl zzW-K>z*ql!c3wJQH#MXAexEnq(6PS|<=|n7Nc@*x9r#OyIqWd6pVBi#rh-D%ykyzd z_|X_dQ9+ceY<~G34-#q8*#g;Agot$8p~`bTSn0Ut5;Guy8ewD~@WTCv?0s26cTC3h z6B=Ltzc_P?JgfZyzqeK4^{mb4y(W#50rtl0gtp3LB$0@0vhpn$Y9iBhl-x#{@DZ$g0b_t1*k)0Cox^($U-Da6Y6`&^;&1#%e(-e3pFZ8?vtQch_rFk~ z9Q7ejT~?h#L>DdG{33>Mys@f;i_LFI6pWG;Ez2*C%EEaK=Y0o$V*o+3so z3}RI_qi2N75%2gDpoXa?MB=|7*}SNj2&%?tDKOTfWU58Vn^|Jv+YUlF+};Vs0#g;y z{UOHAE-xLvz@0BC=uQY*AEWlZ#=EC8{^gSmo_}qhKm1aSQq+gC?4vJDEXj|wu}eIa zrN}KMk7e7e#&gz=h+5krk-q&*R=|=)979F5Y|5b-a&&`kY@5yK8F6pQdw~R&FHAWu zlK7=;bqg3U*ZMp=Uocw4h-4*IH~jo5>(w zF+lZNgvQ_G-@j4k?I&xF{$QKCFWsS>2oPDU!J1?FVmPrp#dwHn_@e@vwE^=nh8)IB zmMJ8|RHHIJDTLvQplkR;9)VDpg`LF{3fw!AN%oV>1%+fROfVlu_5UInk+%hBST@Z1 zSfYSdy-O@#VK*KkE`i%=(EaY8^TSh*e{*w>-#gsncONDQ*1yH+#RXS^P0Fb)_97xF zA9!nzwZ3LCnlWr$)3284yW2z$54e{~Vr=A)0}er!tQ?ERz>j<M|cSWKpj+kJGqiX6+3DikPG%aqDt zM0d(+*yn@qoN{`ip+#ROT-stMA0{l>coUs*cgk#P6Dk!+gsl$!lN+W>n~CxB zeV>97KnQ2aBo%5|i_uJ{)sqNqB}r~%2!~X})ivwkk2rpE%6Fd55lRkaS!dBWLGNi? zbBEOCK40D`Q%#4sEvxbqPQ<7k=6S!{q5t+bd9wFKvYBmiWr?_7!7qw9^D##6hKU%( zNNiAuhbe?4ROy#Zz44m?PoLxUPZa^j1fLgKkDCmpW3KfKb|%hewl}!D6~~VU<^+2=q_+&SlCspw z5%OsjMRH*=#-8^XxlxQjj!N1`CT!w4b6S^IJbQe@P<)Y4szNcSkWg$qZ^dM`pgYiU z{2@ZgEQxHMRMI0IonSjYG$TnW9i@<0Q5275yUy%`Z}8}>$9p4@mpubXAJBZ-qBoaW z?|hlvy*)}>VKU)gSF6Sa9A>>9t@9RRa!C0-_P5LImy=`w*%v|yL`mjyBoi^xAqC&x zm3ezOj>UX1WYV8t3RR-9EV)vFLN!h~1=jFKn9Fk>9S>;5lK3Bp*jk&>>4NJ;oJAtZ zi`!e=*-a6WMCA1hciCgG^fU9KY!ssuvnoCrL_ufm&KV3AbO$zWFh)3)CtoO1PWvc_ z9k8d2u3CKf_G3=_DfGf`6E9WR&Ia&fVH!&Yttl%bNn*Q3Je?rqIf&~K=3-1|8pMhx z+1ZMbN%|2y2kaS^F`+*J%?=StrAXy6WYZ~9_7iM-#mb*Yh-RoI9DK`b+Px0LrHi)y z1aeW0AcnIPBV7`77%qH_-5eYF80DNFzYj#w!?kU+;fP6hgy~iBMYCjz1q#&+a`)FIb#tLW9K%39$x)JiRG}%&_d?`yM ztKefn`-cP0zWFV#Zgy#Vd0cCZIi7Po>@gKH6u$5;*xTLUDDDzfG$y7-v*TjL!)zUt zNTtH4^6j~H%@xkDfi|1db;9H#AB|VeQOO1HVKIIBjQ;n&Nuyt;72MV|d-dkRPB_cgaMI zbd*eNMpqOV%u@tIaWX{(X?0EaY({HhVOXCaPANrD0#ORp6sm48Y4p&|B!Qh>;+YI7 z85E;Ur}d1tzWGCwsh-iE?*4f*p<^utd>&EEfiiWuA06$!;x3Jigg(W#FZ_H`03=Sb#L$O4GWv8)c)oh6;ALatOGTS(nn5ylXEeob>| z&{#zYrMAhHiaabSDD@$o_BBH>$s)NyAt?};pK$fP|HS)EohO5zPc$q>^2pU!dHF$% zT*{_5F}Ui)2t^Z=OHou+!0KM3z4ISD1 zr2m;JodDiqhJE#n$zaKiTVlId<>;_VIR|+pynu!06^JVd3h|FOsI#17)SofYeROgM zgcEV9AqWWIt!CKGW9HL7ZNM4WAHwnbZ^7XfUC1UbNP9qyRY%;YoF)&trSVPl?#H1D2lhep?}KT;NaK>{brkP zW60Wmj%239-Mc9=K?Tp8pr5}<^U+&;ryIiOSNO93iup;G?+#|n@-LITf5dWMzSE_i1$JTnuFN``aXn74Bpu5&^(-z-ln*9P^!T zH)$qw?EmpSDqC^FcnBCwS2J$T2WU==+{+bW={U0I!lH{Y>2ouVvW_QtSoV>Pi{M#c zbkV0x>I0ijAW0~{N2!$MeonwU>rfx`>8T}B$s)C+jd1<}CvW~OZ+C32CO<#TLP~8R zS6}7Jn>}7n*Xb{VOvD(krTvsrKfY{TTzAc|-JxCYVXW^{N|iV|N|8$kZs)#kGV4F% zjqf(OQuf&T<2zJ$vPAFzgM~h$c`;j`#UJxX;#fgoV6{kdI9qL_xbAo^g6pKg^Fq+2|b#t5f@EAW;D*uPLkM9$U|4x zFq-l8D;Mp-V z%2=fxBEbmpk0N>6WJdk?8eM*!(9QwLNR&8hL`IBeV;-H)8J7bDHcM2u@>FY}u1}fF zOpg0Swu1?_iY`LyUCu5a@z+l@2HGcR3gF))lH27=zyE*&EBa?WR(hPw-WIV`0@XDU zX2DVEb$tut9 z<=84mP-OvaIHX^{!3@<ec<0&!L@fx*Ugs4wM#KrSmTvtSjZV*2F z10EiIjrQf++?<{8{u@ALdDdWV?GoHNAQ?@N5|>b&>vZJ^en~QO%9IAWWp-yYK+r)%+U7Hi|+B+ScGcGz$f+? z3|qYSXo^|OP~3Zt+CiG5tdE_VjxSyyR7z2bDZkz+9Nq%l4*mXwtEP)oE|J;VA{U8} zP*wat_X~bZ!?dJ54-wxzqO`R^-X{|l9c*h!|FS{5KEMb+AYHk~{^mUrr7Z%6i5aUA zN@vMORRZ#@Tw9O&Xg60(d}+*Fja<-AM*c|gTn(^CS9B&46N+pf)!5xh6Y`130(iEC z-M?Vku5-G|K)J{Z8!Cl_h~S#I^HV16r@UPkX{FjwJuIWr}Np5G)B3Vb|w7PwBAW^YMjq!QlTr@XsOA>bn-3g9|egG*-3 zbIz9uR)rFut@$Y>B?QmLou4slKjz(QkA@njS_+d%_->WUWgm0Yr8y6?3S`*X@RLtT z;MrJ%7FxSO%S&LVYur1?Qp<;s1&?vB&!E-CitiA~7s&XOTl%ekB%Jjcz1?KgpVAYu z6w_JmZiUDtMSSYd8sH=OG+)p>X)rY-C4XLlvg#1bFl|_=_pBQhkpmlb`#0a1qyg<41 z5$CJ|g0legoL+yxTW0~pI|=r8)5IexlDC5OfL7z0AH30FR`!!UC=&_?P%wcpli8G$ zGaK&3sB9MqhXaWA0`x0plPTxJB*7P|RA1btl!*|sdU)M4W=oxV12w;-+$KCz-`bOuDBe8 z2*0*X?P!~9z)$F-N32nc#my;AGs8Slqn1}G##{tk){`ry&G&iS^0;!6d_JX;jf=Q| zs|~RRH*{BF=1PHD-bWz`uII5H^w6#^845-0{4R$(X}0oyMA2n77}KeDaKaHnr4mso zNIDtAOXSGg9`+~ctR~7JtH{%LklUwBG?$iG^;=#5yg#r z939rk_(2stJlDY!#9Pl7CXVBx8!D2oL|`M$gS%IFm+$hwT+}&np3?q8nOa#T=q(uy zCe*JcNcY2}H*@$^1;HWZVkO<$TWd*)@Yj4lA=mCLN^~=QhMf5R@PhAo5f1 zSuC_U^@}b`_khICHt|S=h~pxPYnJmlSH~SD>xZcOuTZHJ*iE^p-jwcQ!g<5QJP5FP zm?so-SgjUxt~v|`23}^9RP7FXTMx*n5K}zx+*{HoK}5jA^ITTFF0*zEExJn}UnC=o zL~-zNnY4%W>I2MhieR=tT(Dt0rgPP1Fx6QpDydqETqTJtNqEZ<-tZZNna98@^Wxqn zhld3cevk%ZjLRpCon5A>9g?8{c|}7Onha+H9$%O^`#CCy57;;;aHkd_EQ9E|xUPpF zig^4Q8#cCvx31G0Ot|Vw_#PH1Y?ev*RX$O~)SY6PZ5q8bon?&l;USgnDrreT#YVSQ z^c!7z?Kw`oOrm;+osIjHqcRB#iW1qdi)R~{qhkim6W(k4@uG1) zpI1mmL~Pq;*=aKA^%%uoAd)Fj4nRzHaV;C8+ho@6&{vD3i(5Rum!O>V;hIZE?E!=C z7$Kb|l+6+KRATuZ+-#MS4bCU2E0YwERm^w4#pv>qg<8Q*R!R6(5}u2Q2<{BWYSQc* zH0MzgU)ZOzw?SG_2y%O=TZ0Dk_BA(7hG3-3Zb2p&aq*lr>+u<*=2JdsD=Z2LKA%-c zMMWIfL74S0hh18F0wa-QyC{>5gYCE&-4^p!i-A%=DQ@#%H%T?C;<+m(odNy!5Ryqk z`5Z9>{Qqcq>fx9Mvu2xNZ_db9U?-L4ZdtlDE`QFuu)dW~r(I@tj_89|s1$Q-1#k1y zS!ysmWip;}y3FuGrp(@hJq{{ic47)L-YqS!>mrH*tk<}M8y3S6H%^XSe}cWVO;o(S zPX#PinnwM!%hWm~^59h}#XMWVj}#Si0gH16!wK)T5*!BN>~BPfhCDo5!&zR^yl(Ku zn^RT=KZVU4fq);+Ss>75I-PM`myuqKvayxIA5?IRCBpcU*+k=FQRIH9#&a)J$tS_L zTCi@OGh4WH(nt6s2{wWfiNFEA!Yft3gz?;2b-JR%|+OW+!{2V=izcIw*@>*bIp2f;}4ceZ5Vv%D|Z>{&-mK^ z@lCE@-r~{XHR6&-SYM%C*O@I97S)$g;|X$}gCCJ!r8ND`R6HFM#&wpR2G=7nDhX-_ zWumFbfBx)p43kF;C(v~_IIJW%*zyth32V<RdApq|uc!S!YR5pD`rN{ANh|Vt|-_jojv4Hp4-(l8(TP z;ZUR5bcsA4BA5#>9W9wD0pck?KA(W{iE@_(5JlF@Ig^VEW?qF=X&=>c2u_A9qX9Gz z))u(FAYvpzIOrp)f+XJN7P|8ZdLW7w3=(o0ET$_ST^5K&BiuVsNpDx#|J+@QJ(2Z% z!qTx=xC^rG4TJHTi&mWAt1{`d#e8WnRv{TxZiUQ08?1pLGp)n3vl%0Mm*Cc2lF=k7 z;Qu?KahvqNSWUQU3V0Pi`*$Kpio`e_{L-x=;m*da5z)g5R2hQk3|b>v^%22jm{>ZD zKAxfbP2{M=aHP>2n1pI+l5s!uj#)Z=#sLLwoy1!}Ddv)nEa^{Wr0I}I^O%tkWm+sz z&d1n^gDSydG^hWlj;$OKs^ySZ8g^G>nKaN{u#u5MF@o_lVZTa1@(=|FW3gg6UgAqe z@kK%?@GGgp{CRM-5pMSp-O-9GJ4HI1Azw=1^L;`QJMZFj&gjm9%;hA9+Yz?P%1 zWqy6eNXcLpkJv~BDThApl;ycD>%{_Nxniw`5Cb71f`Dqzu@@a0jV|XmD|9JC+8?4y z583wU*%rgbh(ULbn#mE3$MKEFxKf0bw5HqWGw3YvR}aWEkQ^`Xc=p1j9;xxiQ~S^H{)17X5~h-z!NdY_w{E@zDe zOHzaaVJgA^)mG`R0t9*;7Q-&1NC96gPsy6$(-nG>#Qb_duQ|n6`aHSq1GWP`QsNTN z9?GdG_LYR2c$FMgiNQgu#^t0#tGhtvFGp;YYw0ag^YL`^Jz_tvrpDUs*pvPi4 z;Jgl2F3I7`JEU{*+b}QOYF)Mst_TDOg+jz*A^eJrgoA+2V(Bs-Loyj4h{dAaW;)fF zXAd9{BiU==#*)lJDj|9-mVGYjuugt7UOxUY$E3A}*$8uKGA06kAK|czEbHjY0fUP( zx?PPqMbZ(KT0+OvB}P4s1lLR_2IqQ>y^zdCcEZ|@(C^O)H!hg2JX*z<_K!WM1YWA{wVEZj%hhe)?s`;hh#|S=4woF?naE-B?b92^Zq2(y-7V!B*wC)fw8AS{RK3>azV*Cavp0@xqL=mZ(I*6Frt|4^KOd!msEQdWbA&c${Am}E-O2hIw^!qa!4IOp6M0{tP zTrfaHSO8G1CE6%*(^+(mF1BXKdsW{v07-1uWp#@X^c<+$8vEOYn)IJIS?e8 zOA}Z)pmiAcqTDoX!is{=u@DUdPYfdZZ9Kh$*6;G+2O9BAfovg9(B~(V%aBOQ zNkVj%VB9dCuBZ?F#4HO{T46dq49EM$D4~8+;n_IW3}bder+Ln!YmIJDLfMER3No0^ zZCjpya7}ISrs(>d?f}fJMP=_wN_UrX*x{3abJ7VoH0d zk}_RF&IH#^Vc^4`NDvy%P|Oj_QHw@1L`0DZ2p}2;f=|T@cqsB3&zmzCOlaNAAm*U> z1XhNPu{H?fF`q9P4_0`ECGL7be>5lLH}C~L9LJBb44~KsvbSJmhnNn>tWVF-%3 z-ldR=Q3~D?+K*=ox>p_6z7=v3Onr^6Nhq-({y>OqE=bE+u;|^;Z%1jP;*$lOwaZ#} zh}iD0r?7HuJX=TWH5q>JA=jM=9otV@5ecXsjsUg?^3OTe_N2$Eb;ZDnp+`2^DTS$~ zq>p%gMFi1DFcBvl3*xh<=;Jmw%?PWIN=OoJ*H)780v;k3FlRiP(x^{y?%JrTfMMF` zrcD%^#bChj>V{S50AFU0Qangj{mF||06|6oWOs=%?bE!tLI}CUtPz9p3~gm#h#?$6 z&^25$j3L2fs4*RA$Z-Kl5-=8P*0CjmZ$+m)r8{tl>}*opsFL+bgajSk9Wxy342Cue zHj-d5)nyX4KtlR7Rm29MF`G=OUrn&@Sor+{re&j>4iV48o((aYr;LmsTByuMG0J9I z;-|XW3K9q)xl7D>pY~N9e?mZW=eKiT>KI}eRS<4FQJDs&;)m&k<#>Q5=LyL^#DxyJ z#yTL->CEVKZ9>~Mid!|Zeuc2Gz+%jJqSG7LNUn`6+RStteeM3|5xh@?+d{Xs)?v~; z#PZJJ?D28oV zj2YZCX&EYVSVU25OjBWLsYDzH$6Yh;^q3CDto%MKQNo?+xPBWuAmFWAEVThw!yrx} z!`=IN(rNkQ@jSp45d;@WS+kn;>0e*ch`Ow-0gKs`nc<+@Awj)tMO~jP2)1DWG!u7uyxz>cvNMdVN%eA{q=36g^}?N7rXGZzfEPFu|gLhljSZ2t>V4 z7K!=)3BQ)I{2#_`e^+yS!{FjQ-u>ne`OX_psB1d%azd$CO{pE<` zE@Ueb2L;d_i^Y6_z3i}bvs?`BP$;da<_A2QuQ>09h`ySkx}8S#%iLO#8k%X+n^=s_ zn~0ef=JpXzGz6cD+Tiy8(6uG~MxSM`j$`NP8&!sOoRk-&Tr3j((o6j48w0$z{!hLs zKcs%O$yPE(u23aW-eW@yQx$03^T$yEx(G1V+$Yer*Yr|@S zwHl(iGtAL~lM{pDeVJS}P4?BhjI4KweEn-Yu}=7(m-pFBMX6-6B+Gjg@^Ln0iJ-W` zb9#*C3!2>(vuKk*euU;Pu~t1~MmUDaayVu5`1>p!fwvOR@j^l3UbM}3r4^IaipKSf zmN2oYRF9E`QWcs^(4)-kLLy7udglrkCFVY~d8FMc-3 zzwW%ryZ@@kf4th|e5*({8l_OJQrOxd>Q_+t^($WU>?QWBPW|{1-~0MIJiFAHI|;OY zn^~vLq*5js3=oxmwr4>f4w>Avm<8`ce2-crNJ07Wd}S0hLhb%OyVD`@uYZdR?SlW+ ze86TVK`osmQQjwC%&;ZNgv1#FeJ0wRM%Q2xYZJ`$&>rM510f{AWYV2-eKx_DNm1C} zARW9-J^Jx)6eNj2rbH|iCo%hg?vGmhOSMg7w1sVgzDHqKG*jGr6iWRUhD%4=G2(lzbn1dl5wf=_0Xo znxyuG#=A}aBG93-xkI&3;!yI)Wj$PJNM|wONk=4^6*)+IEUYz4O~YBWna>NH4g}&E zjlKAUx2GnJahUYybChZ+WLW~%;AzZ_HT|K<==7Rsrpo%@4$h}0DgQAXJa@&k{v$5m z{(pG$X@|#61vA@ZG8=H-*V#*|gd;ygp|?#KUEbznI#u$~0?%*AWPb7|1yfLZh3ZC+ z->!X&_dobs{(XN)@2J2|B~LP%qg>5X+f3v0fip6h_Qp(FH#n0M=Bqq)`#BC=KXFG# z8eOp*4yfA|QjsDrY{+CoKXGb&VMuKgOC7TlJ*V@|U-N%!bLyochZ`!pdkKgJ!O$4C z#`vy=9A#suQ4g`R#I`ha=5%{=8kY+;kCGr7+|*kv>^-v2-6fsN5W_>p!FJZnmJ75| z3(I}P@bELVbN0UbJ#L=1 z`QQJyKKrE%yM;8#{04q(AwSKlzhik$&m{Ci1|E(^&H(4m{<{#g*2r~ilFDB$Pv`aeQG(-iQa-@AM2rF_u;N%4|79nuN0PL zOQLCtCkTMJG57TJF@4sx4;myv5(EJd91_(36M-k_?yAbJtnA-pWo70m+YlX(**^?O zf*FoqJjaRE1y0PCsAOC`^)()c_q3%EaX4ah*khPvNlPmnJA09{XHW9t$ypYsixho} zYaaBCBtXdEFkw9Osh&8)+=(?#t<;#C%8*M3j6ka9R9Q^4M%&(v0`4=v5`q(Th^*ja7 z;(pGtN4*2W#4xB_U}Coi(Hw#up18K#RKd7Cn9wTZ}(J@p+{*H^hP>r?bT*58RzF);yg z>``gVP?}qyF*C>P^fb#gOQBd{?9Q>cc7pYjYaFZPn9f?FP8*$>B3L=k>e@1EE3?${ zDLE6;Y()e_boU2@ zNtVi~7g#>A%IVbxGo=i9N0AI#L=p7M7ig@m^9N^YG>YDbzX_9n5sC7NVxMxoPI-QT znVETJryDF+4dqgakvqrSu@kJHI>E8&BC`dDbkHI)S;mD`mX96h%-Rz3HJ54*y3G_@ zSY-L7Z?V2qVAePvWzT@!ApK=BqR8mu^_6e~^9;_k(zKya5rkp_8jr?|V6;#S-t zxO;FCtVoJ$!VT~D_gmk(cdeU0XRn-n_C7P2J$vTKInTURFWq99ci^*Fwto3cGh8)-$ylsh;Zl|b zD%#y_PMT@7de#TU6Elwv@(NnKA=xSs(AXlXpE3*67)`l23S~V`(bLaWAJmv*gJwdq zjbO_`Ov)Uj1An-WjTI~7U+^ibei0mPB0en}QBiX_2ML#5*JdLl=Z(Ch5MYFNhOqaA zvpnh(yArK6e5APbWa!NuoJDlwY;QV4)HjRF$irVc|2T+n_q0Gxky1udO_sb*5}Oq? zIoDT>S^8cNJmZ-Hp||EUO(4&k+4RP=J?np?L=+du5PV45=BzjjfBrpN9?~<(AS}~7KgJs8*2!mXquvO;7@&vbz5q6omdRm<(5sOL?B|!aed~XZL46vD*`1*@R%zP5|DB1YfIW#d|1E|sBUL-8w|*2=~0M_wVDCVASM(HZmg3`*JIDA6Zpx?Cn-s=3$O#U(kGoH z;|4+Bvpe+;+L|frFUT)$>oSOjhE-4AW+_**=gm`bQ}zSJ{9w&MEeSqm?2tex%j{Si z{k!;XmL5*Ed%xUK;zhi6kaJX*6?1lk^cew~OI=*OBh(H2SY|^ffZxJJiFVNwjAxC5 zrk4tmgmIg6!>u*?`vge)ts+r(`>&&<8J!2XeiztGDTD7^*{lIXF}LWqS8sl7a1H(- z1e~<vala-c48$;=fn}MWtB)2Bh$N)B7190{rz_Ef+dKNkp`dq zxBgcKrC~U$h0u_HfTlzquxR#!6|Xg!VJR0o(-Acnjs$@{&AtbmI%1YBJjIEesGqG- z1VWkN?Z&P2`lg%!rY4y-3cI3duO~+~wk=-{t?uz0&?pblm^}wX%-WntItej{K_X2R z@;KUUxun5=KG36Qb<_JQAvJ6^h$XTW8KB7!2h4lV#cDqIEdIt3zW`l35LhJaq6Vd> z)iJ=8E{qI;VI>PkQPc@U27{*TX!ft4juqhho@|`3$?-8qzlNAdX0WQ;S4a1`Wy&nL z;|!*-%9BT{*MXcxtpY?Y42yd={H$h*m#BGhH113><}aNSAe7)rZK_a-II)N>U~as{ zu~4%3x}s0OOhLPpn$Ti)ePh>?i)4so4-L#HWK5M;7Elw!A-2A*|J)lCd`Qo~1FA(a z68|JgjQn^IR0jp|cxEfkWh?4rLl|KgFN0NYJ+#0JYO%o*c_<794vF}lZ-7?9KXA~y z*{oQIuyjvoKt*3OK~pPl$%pvj>(Km{Ftw0Xy4-1A_$kJ<0axlDd8#rk)>jxH0&i@j z$v=olRqG++b>p%Cvc|@WE&)s_aUn_`tNO4yra?H1xf8PrL{$Fe6q=$>!{@4p_5?Vsn%(4vZ}3u`ObJInes|~P204JP`@z(tNKCk zX>XbT{oSSU-=MrUC<6bLM3E!9Pi&OyvUO@5xzCC zj2jj3F!HOxL5i(Q`5$A~&_pL2m*24BfhNUhhKrXR%?&e_NwlmV+aqSzSqk`uES!1S zG>*(@zIgk(sglC19`|=0Yy5-PSkdP!viR2e5uvCmu)VvBNlCp7GznR6zG>&z00FYm z3~^Al{%ccnkMOLf_3J`4oQy$A1T8HWSIGon`aQX@QPM!a?>NkxhX6-!YopEjA_p4KW~3 zzilmfe04*6d;f(`&^fWE z!*|iWwL&2QFFNvx;(*MQAvuR;G>YK0IA8Rs-uZ;5D)d=w+)lE#FoF+8;N;^O?ir|5 zQUn*WKtT`}J%@}WdgSp0^4&?>)s1aOpV1ATUFc~7IAu&W89ilan@E@mN}4l**25{gddvk0;k`H<1G0 z8gDM|2aMvnP+MP|My}%mEcuLEPX#_Wx?N>As5sfU`EPk)GEujQKR+Hp>#i3Rj@g2~ zs_Ev!wiUTFe0QU5ICDT*>|n1kD-mqoXHq(B@=ekbzOlutIQ}Y>@PgdVftz(CA4ikY zQ(;c?Q}@p$iwTD*3P!wq5-O|B1+HDI@cP%Y*F^3!Ow?bk-MqhIvbAwZc>ekXf&@5y znn6q+y6>HV1Q=q}3qRR6qp84#bLz)4lQ27E!PR12-v&g)bfU0s1KaQ`TF)sF?apCe zkiQgkj5cSk-?F#&STV;u!()qm5rv7@{MIWZ1aRSnw=BTU+b7 z1l(n@!q!rIqa5$ywh}|JChZNqEQ557o{}I5c&5&7C17_+Rym2^u z(2l|!yFn zaB#7uJtDBw5v`XsFY*h&>0P0AcQn|Ua~LS^VSi&=3uoudZ=*U`X&ve?P?D3!&0y2g zsUgt4#2yl{<5}+L6%@Vya(9M61^L=#GB;=yw=bG4u=1u2=1)D%l$6|vH7f4wS|!D5 zNw0JNu;4WQf((Cg+Wf>ngxO*NS_D4CUv;E7qG1=Jj#~hLEwj!+Qro{M^cwDihCA-` zQQMBIn=T#7sF?Ao1N?)_RoCy${8m@@tOi%u$-57oZI)}8E4r@^=I7Y ztuGpGMcUF4C&QzsaQ>DVj~L`W!euW?;ebX_gM8d6k7WJG!3p-EW4nU4$9;>j*z&FI zAi+{4>Ko^p{FUzt;_pgoiX)p7AR9=q*M7~r+L8KK;c{Fx5!3fkLGPKj_p;~VE7Z9~ zcjaOB3D#r^Xw}VKKyY~(x^*Jjru433$N?tQQRy-=CM36r3%oXG7fWg?8&*UD5^LYN zn7Cza$!9&{GD+|1V%GLKWlj%qYII84>x97#;=iG;g;!VhYq`z=eKx`3-_zf8fCI9T zaLw>F@6-~qXx=q&#Dzn;Z=+#;f_A}Ec!X{d<*U{M?x96bPSh$T+1N}9Co7N2iyX8+ z`u_f3E2@KSm_-1Gm_WQU+Vi>sZgH?$O&h6z{F~h|=AITuSL7Ip*gJDeeiN?HP@$O5 z5Dy&`acv?A_bl9^%=azz&U zTkMQFM&9|}U>1M5k-*WwXHQ-7P$T@-BUQ+`zR`l_JGMYyE$;*OedTWWGTrpkbesfynTDu#Tnfo?~7UjbmUw| ze_%YpjTE>&Lv8q;LX>;Qd3}2ul9Z3N_dVIgc_diIzsqTOc87E&QiG5L zqdr5*A7nh;llo?@7z#k&*db=KGSQkX;jSrocp85l)Nh? z9BdTDsBe<%5$a&LHFR$1#fBFbl0>QsTqHux05m(4R+hJNEEl6JbbgWPT0J%UF58lh zcWJa$-cWHetngMxwj@=gO&>9}Oyd$?I|9%LIoKyH%KMcPaZP`g_f)2(ad_)&-z`nRbl4H#2qNg?iVN~4&Rl>|r3{P|ul-oG8C>VWSc-e6AGe!suA zR_5IHdm5({+@o)>bZ!M@OQNLsV~%V67)cEo#XsV&J#*1i9{1uKqMRZR7L0^8G}Q}k zA61DJ{gwtf1n*el8?0FP-g375RyN48Xs_zIg1$!`C$2fY`Df+IBuLYbNzgkEit7te zs5ktxSb9L)d{EOHk%;~q+UaeezV=@0#hM4wt5>vP|GPGItguI>FY?g-Ce<8Tj7sx8 zT?UZN|AGUaLH$H}rlzRwu4ysvuG;23&y~Bcx9(-Z4nN{O2cS)t8(6NgK^Me7A{Jz**X&mw|naDQEO&eDI*2!)>!MNBm^GEa~Ku z@(pkR%{mU%RdP<7gtI6+gd}{}RLa-3|3bY1KXF2M6+WNn7TN!!`EoSV-l6q^04v{h zx2bmIUFOEwG121UtGJw@pLM-@3^J3NyXa(%PE8~zQKJlezvk-xm!QS5t*daYU8Q~5 z7nE3Eu>myTa7ia}OsQth?voh2}hr<_)F%BB~N_EpCy=9Rl_RqHY?#K#0paRHi1@V)Tw zRiF6F{!+7=_ZFw&;mNU#*vL$LJ*saDqlS$7U(EPze*&?szmsK#YsU1!@07Mmfl2R6jrj(&jo^|y)YoIIXXwtP20kWg3u*|0MDVYc{Dx2(HHM+8-INo7p zd7p~be%s5w+G(haPnf%!J{PJtz{)hRyrqNB`Gkp=bRK;5t4?D~JkCUGrR)bYU)Y>l6H-t;0x+AOH_Ib#6_Q*9Rnw_f2sXc}vqtUkb{7HK#g^~65@?$`d5 zvH)5SpnUF4OW^4K^6CQ2W}0Z5Y;gM*obW=`bN4BYemc6YEMPt97YX~TR;_f#^r`X2 z19V;2TP?!b-UH)jC@`l((sp_2h zkiIqfqfB|q&R6HKdg9@_FRfjUOKfcp+0Gd+XlaZIIjaS^?0~LiR}z0LVzfFUBP15@ zdBz*iVSBL>I;@Z3cuMfm>UZO@PyAmy$yn}4$jbl|S&=9-!*#Xyxqz_atJ<&Oi!RX= z+1`X*jPbsD`Jcln-&61*T=swWEFkg-FBfdljEOP@(E(GzRf3EFyz2I%*#noV6|rQJ zXj}60{=j?@WiDgy(!P}_?$lW(i*3#LB?T3vYxhpIaO@@ZobrgIIwzNr>XzC2 z^$^(BKjaTG{EywOFVXQ5%`}I-xLd%{;wfGLKrg87+meB|C86QlNgYAy_u2IQaDG9Z zgfUh7 @$s+Fj?OLvFUFVdxDWi*x!?pb!GDfRXltD1T%l-dv$R%VfUzCf{osuhMX zhY80@Czh<@?i82izByEE`Y1qSt%SEjC#-DHa(#Bz*xz-A!5#HqbaY!JEPqCi79!j#XCYZ+p-;rM;Z`@adEF21%*!oi^NJoVfC{?ysGJXEf3h?lq!d%yPeFtUk(qnb;8h_=hSmB@D0rYL55+(?bJ z8t?JRX9)AlUGNTsuYal_APB7ez_ zTM-|&OtRQnyiPgtQwl&}-KXpHIGloPp1n*yj37Ny`iTI4jQbOuihxp$NM-VKnLv}s z6CU#ZuIZf5kF6yu7cCZOf535oKA#}})lIXi=C=c^l~bK@{1 zL-+)fRtMtVc|4`?M%?XriVEI+t1cz4)40qjQq9knf@gN!c2C z>Ib$_eOjhaAtDC!Yxb^j~X~6)Fu?!JdVHt!(y8S(U*|dOR?^G^b8l!Id3!hn^ zk&dYWBmlf=FP@FUg5>JV!A+xJem*si(KyHz<-aNX)Lh;nS9cr(*%rr1tA;s z)E$BkD?ExD^Y_9^d+$4+8-XGN)0+_<(yNTwS%jQlXy)+JvL`6seUQZoHJ?beE#oyqN}?eeOBA_mEky(ap%@u2)yKe>;vs0p3ClN*snLy6lhF)xLP z=?ZEstr~=e7y=@eNnfHRIr~@lJs3sSW#1zZ$a4-PBh!U2Ukudt;~tt?D*U*U%iAKz zn(m4yM&|*uBSqi#caTnwq-V3F=gpNimM0Z;wxN1(|%i2U+gI&QN8-dLNV$ky-q;*T)BRri7aQbH#-*7f5z3bPMg~M znv#~*eTgit8D$~JN|X$u0u|tFf z?4aB4I_`KzE#e&-Za))9|GOBzw_UZ5Pfbl}EGEfM^MTp*|6?AT^zhZj2T}ejqWS;P cK6>x58o?EALJP3jN9d27l(J-nxbZ*#2Q|@b$p8QV literal 0 HcmV?d00001 diff --git a/src/genbench/tasks/latent_feature_splits/bert_closest_split/task.py b/src/genbench/tasks/latent_feature_splits/bert_closest_split/task.py new file mode 100644 index 0000000..b7d322d --- /dev/null +++ b/src/genbench/tasks/latent_feature_splits/bert_closest_split/task.py @@ -0,0 +1,99 @@ +from collections import OrderedDict +from typing import Any, List, Mapping + +import datasets +import evaluate + +from genbench import Task +from genbench.api import TaskType +from genbench.utils.logging import get_logger + + +logger = get_logger(__name__) + + +class LatentFeatureSplitBertClosestSplit(Task): + def evaluate_predictions( + self, + *, + predictions: List[Mapping[str, Any]] = None, + gold: datasets.Dataset = None, + ) -> OrderedDict[str, float]: + """Evaluate the predictions of the model against the gold data. + + Args: + predictions: A list of dictionaries, where each dictionary contains the predicted + values for an example. The keys are strings and the values can be any type. + gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. + + Returns: + A dictionary containing key-value pairs for the evaluation metric(s) computed on the predicted + values. The keys are strings representing the name of the evaluation metric and the values are + floating-point numbers. + + Raises: + ValueError: If a metric returns None. + """ + result = OrderedDict() + for metric_config in self.config.evaluation_metrics: + hf_id = metric_config.hf_id + if isinstance(hf_id, str): + hf_id = [hf_id] + + metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) + + refs_lst = [g["target"] for g in gold] + preds_lst = [pred["target"] for pred in predictions] + + ref_type = type(refs_lst[0]) + pred_type = type(preds_lst[0]) + if pred_type != ref_type: + if self.config.task_type != TaskType.MULTIPLE_CHOICE: + raise ValueError( + f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " + ) + # Convert predictions to the same type as the references + if pred_type == str and ref_type == int: + logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") + converted_preds = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + preds_lst = converted_preds + elif pred_type == int and ref_type == str: + logger.warning("Predictions are ints, but references are strings. Converting references to ints.") + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_refs.append(ref["target_options"].index(ref["target"])) + refs_lst = converted_refs + else: + if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: + # Convert both predictions and references to int + logger.warning( + "Predictions and references have the same type, but it is not int. Converting both to int." + ) + converted_preds = [] + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + converted_refs.append(ref["target_options"].index(ref["target"])) + preds_lst = converted_preds + refs_lst = converted_refs + + extra_kwargs = metric_config.compute_extra_kwargs or {} + output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) + + if output is None: + raise ValueError( + f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." + ) + + # Update output keys to include the metric id + metric_id = "_".join(hf_id) + output = {f"hf_{metric_id}__{k}": v for k, v in output.items()} + + result.update(output) + + return result diff --git a/src/genbench/tasks/latent_feature_splits/config.jsonnet b/src/genbench/tasks/latent_feature_splits/config.jsonnet new file mode 100644 index 0000000..ef4f553 --- /dev/null +++ b/src/genbench/tasks/latent_feature_splits/config.jsonnet @@ -0,0 +1,58 @@ +{ + name: 'Latent Feature Split', + + // @TODO: Add a description of the task + description: "We split hate speech data based on the internal representations of a RoBERTa model. + The o.o.d. data splits leads to an under-representation of parts of the latent space in the + model's training set, making the split more challenging than a random split.", + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'non-i.i.d. generalisation', + 'o.o.d. generalisation', + 'latent-features', + 'hate speech' + ], + + authors: [ + 'Maike Züfle', + 'Verna Dankers', + 'Ivan Titov', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/MaikeZuefle/Latent-Feature-Splits/main/genbench_splits/hatexplain_roberta_closest_split_test.jsonl', + train: 'https://raw.githubusercontent.com/MaikeZuefle/Latent-Feature-Splits/main/genbench_splits/hatexplain_roberta_closest_split_train.jsonl' + }, + + has_train_set: true, + + task_type: 'multiple_choice', + + evaluation_metrics: [ + { + hf_id: 'accuracy', + best_score: 1.0, + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + }, + { + hf_id: 'f1', + average: 'macro', + best_score: 1.0, + git_commit_sha: '3a4c40f7397dcd7d9dccf0659616dc6b14072dcb', + }, + + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + finetuning: { + objective: 'maximum_likelihood', + } + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/latent_feature_splits/doc.md b/src/genbench/tasks/latent_feature_splits/doc.md new file mode 100644 index 0000000..d51a56e --- /dev/null +++ b/src/genbench/tasks/latent_feature_splits/doc.md @@ -0,0 +1,52 @@ +# Hate Speech Detection + +## Abstract +With the ever-growing presence of social media platforms comes the increased spread of harmful content and the need for robust hate speech detection systems. +Such systems easily overfit to specific targets and keywords, and evaluating them without considering distribution shifts that might occur between train and test data overestimates their benefit. +We challenge hate speech models via new train-test splits of existing datasets that rely on the clustering of models' hidden representations. +We present two split variants (Subset-Sum-Split and Closest-Split) that, when applied to two datasets using four models, reveal how models catastrophically fail on blind spots in the latent space. +This result generalises when developing a split with one model and evaluating it on another. +Our analysis suggests that there is no clear surface-level property of the data split that correlates with the decreased performance, which underscores that task difficulty is not always humanly interpretable. +We recommend incorporating latent feature-based splits in model development and release two splits via the GenBench benchmark. + +## Examples +{"input": "wow do not all speak at once niggers", "target": 0, "target_options": ["hate", "noHate", "offensive"]} +{"input": "how long will jews be a majority in israel", "target": 1, "target_options": ["hate", "noHate", "offensive"]} +{"input": "sounds like something a moslem would do", "target": 2, "target_options": ["hate", "noHate", "offensive"]} + +## Usage +For the task, the model has to decide whether a social media post includes hate speech, offensive speech or normal sentences. + +## Data Source +The dataset was published in `HateXplain: A Benchmark Dataset for Explainable Hate Speech Detection ` by Binny Mathew, Punyajoy Saha, +Seid Muhie Yimam, Chris Biemann, Pawan Goyal and Animesh Mukherjee in 2021. It was accepted at AAAI 2021. + +It is licensed under the MIT License: + +Copyright (c) 2020 Punyajoy Saha + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +## Limitations and Bias +*Note any known limitations or biases that the Hate Speech Detection has, with links and references if possible.* + +## GenBench Eval card +This method can be used to test generalisation in HateSpeech for LLMs (pretrain - test locus). +The split is based on the feature representations of a language model, therefore we assume that the shift is a covariate shift. The method assesses the robustness of language models and how well they generalise in out-of-distribution settings. +![GenBench Eval Card](eval_card.png) diff --git a/src/genbench/tasks/latent_feature_splits/roberta_closest_split/__init__.py b/src/genbench/tasks/latent_feature_splits/roberta_closest_split/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/latent_feature_splits/roberta_closest_split/config.jsonnet b/src/genbench/tasks/latent_feature_splits/roberta_closest_split/config.jsonnet new file mode 100644 index 0000000..d30afa0 --- /dev/null +++ b/src/genbench/tasks/latent_feature_splits/roberta_closest_split/config.jsonnet @@ -0,0 +1,57 @@ +{ + name: 'Latent Feature Splits (roberta_closest_split)', + + // @TODO: Add a description of the task + description: "We split hate speech data based on the internal representations of a RoBERTa model. + The o.o.d. data splits leads to an under-representation of parts of the latent space in the + model's training set, making the split more challenging than a random split.", + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'non-i.i.d. generalisation', + 'o.o.d. generalisation', + 'latent-features', + 'hate speech' + ], + + authors: [ + 'Maike Züfle', + 'Verna Dankers', + 'Ivan Titov', + + ], + + data_source: { + type: 'manual', + test: 'https://raw.githubusercontent.com/MaikeZuefle/Latent-Feature-Splits/main/genbench_splits/hatexplain_roberta_closest_split_test.jsonl', + train: 'https://raw.githubusercontent.com/MaikeZuefle/Latent-Feature-Splits/main/genbench_splits/hatexplain_roberta_closest_split_train.jsonl' + }, + + has_train_set: true, + + task_type: 'multiple_choice', + + evaluation_metrics: [ + { + hf_id: 'accuracy', + best_score: 1.0, + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + }, + { + hf_id: 'f1', + average: 'macro', + best_score: 1.0, + git_commit_sha: '3a4c40f7397dcd7d9dccf0659616dc6b14072dcb', + }, + ], + + preparation_strategies: { + // A recipe for preparing the model to perform the task by configuring its prompt. + // This recipe is suitable for generative LMs such as GPT-3, OPT, T5, etc. + // We provide a few options for configuring the prompt. But, the task creator can + // also provide a custom prompt preparation in the task's Python class. + finetuning: { + objective: 'maximum_likelihood', + } + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/latent_feature_splits/roberta_closest_split/doc.md b/src/genbench/tasks/latent_feature_splits/roberta_closest_split/doc.md new file mode 100644 index 0000000..0956e1d --- /dev/null +++ b/src/genbench/tasks/latent_feature_splits/roberta_closest_split/doc.md @@ -0,0 +1,52 @@ +# Hate Speech Detection (roberta_closest_split) + +## Abstract +With the ever-growing presence of social media platforms comes the increased spread of harmful content and the need for robust hate speech detection systems. +Such systems easily overfit to specific targets and keywords, and evaluating them without considering distribution shifts that might occur between train and test data overestimates their benefit. +We challenge hate speech models via new train-test splits of existing datasets that rely on the clustering of models' hidden representations. +We present two split variants (Subset-Sum-Split and Closest-Split) that, when applied to two datasets using four models, reveal how models catastrophically fail on blind spots in the latent space. +This result generalises when developing a split with one model and evaluating it on another. +Our analysis suggests that there is no clear surface-level property of the data split that correlates with the decreased performance, which underscores that task difficulty is not always humanly interpretable. +We recommend incorporating latent feature-based splits in model development and release two splits via the GenBench benchmark. + +## Examples +{"input": "wow do not all speak at once niggers", "target": 0, "target_options": ["hate", "noHate", "offensive"]} +{"input": "how long will jews be a majority in israel", "target": 1, "target_options": ["hate", "noHate", "offensive"]} +{"input": "sounds like something a moslem would do", "target": 2, "target_options": ["hate", "noHate", "offensive"]} + +## Usage +For the task, the model has to decide whether a social media post includes hate speech, offensive speech or normal sentences. + +## Data Source +The dataset was published in `HateXplain: A Benchmark Dataset for Explainable Hate Speech Detection ` by Binny Mathew, Punyajoy Saha, +Seid Muhie Yimam, Chris Biemann, Pawan Goyal and Animesh Mukherjee in 2021. It was accepted at AAAI 2021. + +It is licensed under the MIT License: + +Copyright (c) 2020 Punyajoy Saha + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +## Limitations and Bias +*Note any known limitations or biases that the Hate Speech Detection has, with links and references if possible.* + +## GenBench Eval card +This method can be used to test generalisation in HateSpeech for LLMs (pretrain - test locus). +The split is based on the feature representations of a language model, therefore we assume that the shift is a covariate shift. The method assesses the robustness of language models and how well they generalise in out-of-distribution settings. +![GenBench Eval Card](eval_card.png) diff --git a/src/genbench/tasks/latent_feature_splits/roberta_closest_split/eval_card.png b/src/genbench/tasks/latent_feature_splits/roberta_closest_split/eval_card.png new file mode 100644 index 0000000000000000000000000000000000000000..5a6877dc52127afb43723665b78922f67b7bfad8 GIT binary patch literal 176039 zcmbTdWl&sO6D}MG5Ik6LXK;6i;O>y%?kOCMLE{=61k~&)otaLe&2Vi8z`V z04?lniB&CZOg_n35HqtA%iFmSvoNu+60ED2y-v6UYu@TyK?(FSJVt1Vo4RrHaDF zQ@&T0=2{-6hv#`^U#meF{xHQL2M80t$;IJ@o933mKe6&b%ai5+>>Wgcn5HGUsby6p zRJJl^`ISGx<(;5D3HTZ2AHukdd|y;u&L@_dagzC%g2y%-F)2KM@hhAV20Gs(>Npgc z+#oYGR}R?%EJT>Xp=YE7p-|$bv5(}uY+expSF!vZ4n@%r0ob*9`Q+$EBxBGrz>r!- z6Z#ABE0;69;# z>DSGf7*w;>Y5UXoSq;gg}n8~(&RCa{{hB}oXdqa*v#jh(fZL}zFgEG z{$RfJkK=~hgntQwZAKXt^ACFIJEI<_8Nh~n#!1Tg<-OencqdkaTc*8M*mzH)mi=Fi zGW<`Y&{NYY?)A6dl0jcmXVKP;``?WHiGuszVEbXt`qZk=g4M~Rl+R~{S5k?O>u0aH z4*Lu9LlZ;O0KPik87pg>n#P9bX&Nh|jrgiI<4_P+A~}7=*M*`hHl(^dSrkZIe`ly* zD4Hp#&x%4qqCu2b=cm(5j$bp1!Wv4u9w2-EWSA-Q`C`}A?~XL{^&qL-H?N=^-ZJwO zys&v`d2qRa#I{B};0W#`Ud$jUK(GO618hxdU%11)!!r1@WPgg_AYXc=EUg!KE`j$4 zQ@`~Y@SEjb!Vx%vA^bK?edrEbU@XK8^8Y~np)tfvR@c zqn;L?Mxh4F9nGV~GiAB!7DBLl!FZGoonCM{L08I;dGe)<^KtuK8~mVdM$9s|u#jeD zJfyy&Qa+wS{)x!P{`*N^bL-=_+=f+~ar6Kk-<1@(nnaa%&b1xAa;6<~Ze(&z!|&vh zWD#A{6u?m{`9Cp-s313&2G=xCo`R|F3ooKSVV_jw;swfjtJp~q9euZW-&S2-EE)>) z>_qHz4+{OyfwB}h=d%KWsB(d}wbwALz@YB{2qkQk2M8emtP%7$6SM5+Sr#VX8P)Fz zLld?$mb3GEr)&7dn)Tfk`5Xg>aNQiHtVo<_MBr#@(AsygW*OBOJnZUdJgr0;F-Hvo z74-f*1q>+BFk%lwS!yv>MK*L91*Sx(&6v>q)ZOh(L<%huJig2=S zWkVeSSng|BZ^dcjV(VszEa6O92)d{*$bfcV|0l_Nxfq`r7$*mfMbHlgHB3;S=Wt5k za0U-_!~cy^k5r3B$qHl0giVD z9!=cYo2dbM1qZS zP!zNH#GAy)trTUcQ6ev4_>qy@fX`$weQ3eQi_ zwz^NgdfI~e?I-Az@IASG_Rj*(4(XA0D%Cjs*`Qi}(okW4013sQJ?R`UTy&ATl*mp_ zUYN*ks&Mga=KL;Al%gxXSkXq22CEE{&id9s$50f^eAbMEBpxXY8zbCuoXDI-dV2#! zcG}SG{M{2{1_=qik=)WLVy=P4{a+8m_*cbn7*i_PX^>G#+}6|NPE)_|nrq9a+XzC|CiXwnFjM3loBU7}dNxh(y<@)cia-+2*hs3SYH1sui9+X<=K^5w zeGMumJFBId@@hTWxu+6S0ggClHV!S_lL4IqD&xY`iITJeWi115vm7d>9Os_g9$aqfi(*EU z8O?V-UL1)2mMXkcz3x8>n;S;7ma*>F3hq0`(HGTUv>)v*>K`jA)>P$Vx!-7fF3q6j z)2Qp)6o@fi`bE{uQd+-3^pijtt(6VfgJb_N_s_*GdLwz#?%VCBBQLuyrn%d~9L z;Tq=Z?UAwCho5%wE}<)+bkH6#^{O8e<%!j5?=-|;6WQ=2;(g%BsFXdagd(8HMXpRd z6Iq~zYI;o$$DcnOdZcayF1lnRAUiE+6x{0BqY!Lo2lP^OKcSgLm@%`GFVyE!lQ>M3 zc)en2ce+L8Y|Ku@boU0qvVoxgz5esT)0%@bYDxayLsOe#+US;C{sJo*a= z&O7c2n_YN*8!=%iBxDH=G8c((oX+6fTHFnuI%}g9Nz!ynro4;BRv4!Ila(B8#_!ll z#^01-tP9gtDwkH-*IUjEeG*2&KWAR+=zFu5R9e)Vy6L(6wXM{=jAxh z&s&Myf40?YA7e)xya6h0}JO^TjVU$@$ zJw2iARf@<%BPpKoneJgnR8Vp`8D{NCCrb;6RqIkDOvWmq-2f=Ujq*ifVIimv<-fJR z@r+(0$#bf*6cZyuf~Qme++VRJNzOxj=KmqdYf)aJprof$XbIQ5FVMbdYd>Wscr^v0!8&&bRU%{SLzgg>q3u8yWT#AglFkdwDc__v~riyZeAjDi)-$_PLfN zS%2n`Y4HvkE}KpqqMYjNzKvVGoc0lB&=ZL%O*?5Q5AeCm^!za0`;sk=y+yT_2C zo~do;Sg)a}lQw5zPA3f6Ish5au4o^>6utOq~P)xAV+;UHlU0NmU)yWU=^vV0DWjL$Y@;I~FL}?bMQaie`Xve(a zIs_m^l8DxL+mOkfLk_|eBSqfq4#IMa>=hVyiCKRo+0cY{4~+vHN!1Ui#20RUhqh*E zCAFf&DxLf`g;FXN^Jm@2s01|V5~JTH0g`E9))8ZOq%(WO|J*mp^CgN$$EA9sc>Rc3 zS(UU;h|^{!So-)-xynStcm_GSIPU=+yuj~q_WAtQ;Bxus4q@t30{B-{Q3Gw_Ut||> zUa;rQ6TPi<8xY1Q-+ZhmZ$f{oR-MUwmmP!c)X=YY1~SMo$WTC`^*DBz-}+HaXm$z% z8p%&s5<{XzATS5Tznkyt;f#Q}AOjC*&392Lgb=o-UaV25wL0A%jJSeGnUEp7!>OXpb zkgWvzjXcrJXQ7I9_HjADbr3#he6cn|Y!i>Ewwsc!skb#NQ`h%0U4}yeBZshY-r+>@ z@eO<$7oC@*(@8^;gTfB>Gtn?l4@Q%d8hnm~(P{buha?i~Ro?xROs7mmgI9bffPr_= z*L2BjiFdbWoui~G)WSx3wmA)KPiLg)V5%S>orG12?-P_T92%K3P;F%_8nbUp#B4rm zC5P>f$_yza8xIIX1q_@0IK4&}2o>mj^F7N#-GS3DFD(u;NL7b79}8Atkwc$lQA|uV zXCYl#&)?K5!n&fm5tL__1%i5Joaxjc5LMMca~N#Ul2B57p2KU@QZtlsAC+_AWFePE zUIPn^_a)Uz=ziMHOdveHrdrfYzC}Ibn?gRB z3bW3C$406z7W##sb9`wF~Mx_MkDoY znVh>mlp_xPBqKD>rmE)7riYH9swSawyZkE(QWC~ZCC2Yd(xiY>EI{h1_un|rN}V16 zrc)|o<7!AWttGOb`U?B3_1ksum;Q>XJPuvfMEs~_sluA`Lk4Ud3g}9L-WWdj*DqKK zF{Ot?gX0F7n=r{x!yt7kg$q>4uiXlhhFs2=`?SCB>etd`(_}qb+MAc?N{tiQx*Ejm zlmRBkhZ37LG;pvvSKEZ+uN>nGpRl61bov%P@Bc>S%8r~aLJ12y&3K2^$b2eAGRLG#yjYT4$)CVmFdlE=6F zajggna)RvB^S{(VkSmqWDn5Llyt0o}RmDW+Rp%xJx3OY-nZm~y=Z}7mF0DdH*GTMc z>_%ss;f^_-x+0ZdTOeK+TqwLC&)(5jzkySg;!wM{{AR_`(ty7lx10)JgFZbh~!3 z;}cz@V^}#wZ6fTr)HYp>O6fuw-qx@4&TyE*+B3v?OF|HbnH?o$9H9T#+%YC4suDh_ zC}cQVFSEFr1&?&ow{n2HWogjfx-Bjk%X^`fKtR zG&xh6XrWF+EjJ)*MqO20qhk0zwf(!%69*v?T znhFapfGeOPh2;YS()bzIs^cd2H5i0BpcObK!$f2InWZpL#008|z|ak&lM-rA>aT>I zgawB%;?90+YiJ0q804(t1|JZGB+F>xtc4|Yw13H${L!8OUhVh!FKerovxtcKy^yVr zNlBIB;&QM7p$`UAo*fka1r&O>(>5?M*%^eC{X(Lr?SXy~Sc6asH~#PH^WV_tNQ1PK zgEYf|(jrn&QYffF%p~~Ew$iZqu!50NXo@O0W528sWRJ;I%Jd`*7j-2odF(8Z(0o4?O+$pkkB_z{&3HY_4zZDm>7|v#SCy|2=8UIka zr~C^;Bp(IU4M6HW7n(xzle+VU2SE!<<5#U1Ek1f1da$iO9wdbuS&D<1C`w@I5&Y4- z6B}lVx+`%_MSrPzxxWkBhA}==%PT~|Mxsl_@IZOMJBBF3<-Thzd(jjN_aZ-3DBT9}x;U zf5i-_3yC2nfk73)pM>Ndv0hW1?^RX3DM)`hPjzS>{=Qtmt!S=6IRt{UlDIb*voVnoeMz4FkF1qk}M8J(8X(N-WrDcB7@;Ukv%i z${(jIJ$}gx=mscMCjq3;{y~0clzc;feYNAt_nFG?6wGB`E2ueGs#qL8hMq}dqwedG zJeoUJTUaPxQ?>-`K`$Jzs5&+H@uK?(AG+WmWw3qaxe36&*!~Gkmamks@CMOtf>*Cm zU&eA&l%lS2G0UBr-$ZuF!{++6%$y`iyH}_JWXlVf8O6X_Q0H-tpWowA6A@COE7lCQ zo*+qKoen^mJlko$sBKT$E>1HlEH|ddE+-IwMcsiCh5#^6mx6w8nr!V26(MCxc#3_S zqjM?l{VN?c?(OePRk}!9S&cbdG#|r&1NSX3I`5>A?lXO)l5mW1i-_6e_6mIh&`xyM zqF;DG71|&krOiluoL@YNCZRsa#F#k~vgo(eg!17tobHA1#2`Bd2p#O0Uds|_F519p z$b|$Oqsh24Wj1rdOqgtcVxc@Qc3d)9x1f3$V&qaFvi1g%G0yEjhi%pd5}GW)GnP`r zom*Vwc5?9>u!N_DgA4W=%wiB>?2n3BK=YAhvYyS^J&aaSR|+{Y|HFjw0M{Tdw}Ilb zX2ev*jI3OUbc?XoO)E9ioOPGt^Z0R@P;_|SGyJU9Hn6pgRxS(7r;-rK`Y0I2<3BO% zOfa*n~l^6?FCr(aA&tDM>2IW`Y|i`wN<1t@roLE-N$(b(EtLIkszBi^_^h zfZN(2GH1)xHovW{M@!P8I^Q!)_kHJ1rsh~2C%Y<}DAY}}1Bjo)6GFvRm}S|Bi~Z<{ z9P@|F+$_U+W5il(T~FgWNdgq9YOi`98@=>30#~%$hKF04r%R7kAEgO0H_(RgH+-ho zE5?Cdc8Mi*41>gETjVNK6+wIt!?=a4&oP<5z`1$cIX!v;^`4&?(CY@X3EOsn{3 z%B*5@A2otM04WblftO1W4o=R;1i?piU5}%p%{IPe@CSh2p^;*?4wOtOSN%D;wM4wqK>M$O!Dk$n|@4E5J z^e}&fB|lqCs+#UzIP%foD@D9vSLjiyk6Z(-V#yU^Wie6#1}6f(=7PjQ|Hi+7G=a! zLrE)J5U4o)hc3QitU>*^10uATOUy?J$Q=Fr>sgy-j$H^>w>c;cGnL<}fZw5O08nIg;$pmy?2G&Dj0(Qkf$Q1J~1K7vOKcMa|D%@ zmxlWSKIksWW47n0@+Wm20ScIH<}?6eaO}8-G0Q&XAaxv6e;%k%P#-MTqM4j%lQ5Z% zT^=qk%XY+^<_6S8>T?)MX;?}RE-R-BPaO~8`h%yChA0l@k-j9YVvmR~BchTn~XDT?b$^BhwyL_nk zY+(R2L~9d0k6GI*s?92XgH|KN1RUW>g)Td6+r91EJuFXp59+s?{_Tw!%PxBgS! z0rXwKrTEav3T~QUbfO&S7=<-I{8u>@!G=VIEP1u0^f=fza(#ycRY>_}=apq|x zg3(MQ+#1+#1H*C5Gv+wZ^GuRB4V$-2H2I}eZ9z#3Q@{vyc9e9#qRD+YzSB=NUU!bd zLP~5Ha{TcAG{MCMlsu?BKcs!E-}tx%*?&*IAb*>itn2#VCkg(oOb(*+kEx2~M*0o{l60pW8{yvj|Ye<1s=FV34IsHp-g%r z72%ipk6|qci5VL+W>HTLD(25JOY}P0!VyFLU`}nDWx6*HmgcKuulCl#kVZB)7?uAGt5$kxTnS*SqS-OO+91qUkDO|cVCcUAaIoG9 zA|ez5Fq0y~^~Dm0N-3hi$0Uzh7i48+9{qqHXJh`Rm~-$=Q54`1c|ZxP8kxwwnl0ee z3l-NNsQaA`gGuFsw7tr&0q8M*cnv;U5J?vi8UjjbX>gbUc4}~NvA5212NVe!DI;k3hON>q9DNraw9#q%L2vb=0KNFy|M$CSSr-@F&0bJhWN8SwOjciTs+;;bReLx3A%I zf-#>dcf^WygJJh;C;(yc@C#<9M?|Y-a^p)w>3sz49jImfi}gn5Xd|sFH&AU*I(gtB0$){%*uV z_xbf&m7VIBpR@iLo_5wHC=cU=MWo^f=f08?SzMZJ9 z;Ag9E5gUm9;*esyYR06RYW$b3(1xlmtB{_n>>%(qrkLv$NUOD`d_rzO!Q% zP(?6e_9oM2{Mp2(`ov@C;!)(A^vJKQP1>SP1X!po@GX)3Mag>*!cUjSk}OE8n(iLgw;TEeM;K=b2{qz0YgEE6kk9io{oCC- zA*eG|6=v?BGmPshv{Rs9gXC2;TnJ?cG{(`qNktq6l6@wC5y>$Jvm&ACo^o8(WSeqx zH9Cj4-j_Z%ySu*d;4MJD8#wq~Q&uG6?^s!$L4~3WW4KP4*mIHNw$JrqlVtahP(|y{ z44gz+H2=K;5i;l#bQW&LpZi)W))-@dgVwDOTbyL_0f%I|IRyy{S_z|(q2&OM>zvF| z5Cppv#v!!cNl5?@hTx+_iw2PzCh*kVWx?3b`_;ml{$_w%xo*0p)HlT-G#>*nC zHOa*CmQAxMg78#PmIul+WUZ@h_(}Nl`z;#girn^mp|oBjwCVT&HG#Tw<>IkYKecyaPDzrRfXICtUd9` zSY40J6Ef^#Q{_=2zcw(pwxW?k3%sjy!RP9yV3l>2r>AA|j6Pu(=y!VyzkC{j zO+^}SE_%{QBqask(J?gdr(`Ra0Lw87J zG3R~(6FT$T-wK(RtNib|xi~!K@D0y`8IgCKZqFrT`Km#JrF*HK7<>pzLHGynr`W<& zuq*q^dy&i$$OC0P zHn)AIN<_0a`!E$@Vz$p}YB@dh!nrHtV@5tht?WHzqGGdic8h)&t;DG=JU~}1U;zFc zUZ#3OYK=gbFFUbJajE?aQW8)=CW^6Mm&RA#`_`mh#!OvAqI89o=S@msw%Pbu#y4=o zJ7T@v^Jb3J!zS#uO^mE$?0j2R8(cc6kIw=p#}b6rY7HY378a~i+$Zq5jg5s6F0(rx zTfSgWLw_jmTt2D>KQLB_E_W)#RlG_tLnM2}_C>N5Oh~FmI##JPTAFwka+|RpfGayF z>&ZW8@P|zn7X8bpyo$|kT|dg8=US&r99~L|<8~*UkyUmB-k0539$OI*Q8e~FWs4C? zu!w1yJVO>Ks4wd0cQAU3O7JU5^~rV+NtlLcW5gT+W-H|+asgbOVDT&tOJ?vxc>oPx zWud|;9Vtr$8%wN;PO3E%eq_r5V?!c?qoF+c0bhaG3%=CT{nh>ELGM7u%k>#{-5n2Y zP1kU%W;C~!gpZ?O>0z#MX;pRJkw!fNI}6|117S6u$csVF_MFivrCXgCO-9igRRV|e zsl@X|!DLKG;kca#KVq2z;j0F*0>0h1$62uFI7 zfp?eW0_1kPthh*UAM7^hPlr%|0jwe6lXX3}SJ35(>jbVzd)FM2WOboB(Ot88cqPD zm21o<+etkfBD~CD5Z!WY*))9?BAWCLvNj5uuN1p2y+3c6?Q`XibVNPHD0-wxc4xzt zW}^U_fpX)pb}EqDQVq%Zdv9u2&)a!F{Nm zJ=oz9*vn{YjUr(AWm^%NjC`IP65Oau36%|c6LUpo7k4l3gt@9#Y^YjnqS8ON5MQbALd%KBt#r+k=JyJ6g$N4VYz9T?6x z-V2@Ot*lPOQJ)u{5?$U&kA?i3uxk-$S!o1>L`+P=q|a~;Tc|DQ;+@C=6C|%!iEm40 zTxv`a&c~MFkILymk;r4gqj2k}ulo?s_e4|Iq%dLO7k-w77NQ^vHcPg8YUQw){oA9n z4HK(_*m0>SYa3)sF0GIh1-;3FAK%VZPin z+d4UEmEi4hXUNy3%Ndn5Y#m+v`qO#jaHnQJku=1`K|>5)_Bf0Aa4@fxPQt4F*nle3 zw8kAaY3F2X=;sU$zJZ_LA5_4v?TE;{)(_-V0+)yRZ}0QjOTk-#gr}) zkH%2UJ0=*swf8NwcBLSfaj@%zYMGfzV8Umfd&a+So3DZ|Csl!Zh?7@es;EId=6Gv`p!cy%aF

rGBMg1P0XLbu9em@=xOI?pNqjWDV%5;f{HK`~W7OHwGV&}YO;#k`(?*;uCjKmz z@nARwp;Rgqma?S}dB-~>;1zjYYDiTsK{lf_;8~QG;!Zg!^4QQ*Zbj>UY58h-c~;}m zZ^E%LvVy0^)Di%u`ISX6)^PisZ#`a|B zN-z)yDB;ra&o3Nz`fNybD5rt>N`cumioX%)(ka|^P@<5r2?Vw__)`LOLv5)0>w->- zHB@36H0kw;0}h-lA%8Ck^6gGUE9@pxVbU7!d@5>}v70j8nNetUx$>%(O_r3;+L7?F zkG#mH&7H0IIhYh>OMVAJ2}vzPJ{4>7{*F{cZ? z*6lZ^EX-GI_KVBw2ocTdzQ43)Wt@n~{$AcM2{Ii{d+=}XR}!N0vvgMa(f6>cM1Rj4 zV^6ejL3%o-Z!N*Bt)sWDc9%Qdow8S(HT6x%U66Gr+qQPB`0kO%ln(IUl7Vh@w(A*L zTfdt)S|20y8rH{Azr{E`0fSVmIEt^JO@m=cj;6bO5>DR#(%xm7v?0{|74HKBJ0;$a zV_IFFMc!&ERJz^=9o|5jI)9AAH&?;u3O-}woxT-ii(JmuL7UuhMUN9cCOb1GZzn-2 zODA@h&ptQpnN)*xTeBTk9E%LP2mWtlmNM|8cZBf#$iXVMdTk^7w!$@zkxCwZBlt_E z-)UI&FIH+=+$?goyS2a{(DSAgus)5hHHgQeU33HAAWIN*#rc7~sTXTUPN_iN_n5da zYJ*eua})(tw#R=GSmGoBZH86_*Y{qv;IED$H4B7#V91XBJp;5-inY&UG)lW<%S;m> z{3pBywXhy;e?*@t`LQxC4i%PgX$hJxp8VClV}CPNE7^#Z%kJqf@-)i`D3;;u6wlM# z9mB`4M(GJPw@-ThY5Vv6b)BI?P}-?p7r=LzOSD$f?G?tmjwa~2Nm7+_uvW*;)*hZ^ zq1eo}Uv6@2CAjsF=G0NIU69>i52m2f!aqO%8zUITb>i7!dExZFQnS2@>*$_nfBb@5 zlbK=G;u*2f{g!$^_U>E%V{h*oPFk0Q0(|*a(|TJYxc#BT`1Fa)^+Bg#HVHCYo%Wup zdlwu`tfM6P*o**R2hv%y`+feTpf=m%tDx@wO?Y_At-`77*6|^Ic~=-I+E`22YS##( zPkjV0mg9c^*VM(-M(rSam}jk*rvuJI0M73O#s+N}hhFpR37eW>Q5&w4i=rw z3F1)WaLU9?)SQ1h-vVRIBmWrsYKYO<2*uj{%E#&cq~{mo%PD}u2g!`C%GZpr`1D!7t^LjPG2Zsvq2{^uAk*`l`@QcT6Cgd-1b*AX zE^*JuSw~*7JFL{v%KZ&S$GP~sKYIU|@eWKFdACnXGV!Yt4@IWHF6rLdN(Q~Vmh^XP z=^Acx!vSzKlBNz) zQ=q?HL#iAp5y`u`oiKRvl6j0^l$P#Nvas@pI~ZaAVuGLjpzn=TR&r=|~Gabi}d9 z2>CubOUuT*?Xv5$DYWQfZP$xV*p|HJgrcI-IE4WNeXb#h$QM@+=VTimOG0BM>d_{l zW@_+nsV(*>O`xT#G-+H%wrEn|Z$+}^T3O2zG?^s6fx_V8z)+#j_q)i?2RvSo!bCSy zFm|%9S0aR)-T|Hx)Pr>jW6S*= z>g_OsqK08>x-oT4zPD-L?#{B;U`aDqi&sicw{vKxCTUwbNxU3WiIAwBeZ%zr-l~*g z7$ ziHmhG)jgdHK%xO1Eoucyb12tAXlFXMmc5tRV7^~$HF!W$S|r253Ub1UJe~*&Wa%0Xju$CgV{4ca0eTx6B;^B z)LJ;yY;5PV4x|87*F~qvhPq_=;)?2D5hG~XlO5qm+amcOH?5dXv+b=GLF$ppRucC7 zLZ#wixHPCD;tS9}PBQm9GcqT-13B!rLLfEZzawL_E3~#;Q-E4WAO4a?$%j%bKUfa6 z%;r11qs~1W)7eA9mJswV(BHY3cmo^y@0!W@eoCTR^w-jTlS7Bi`@|By+H;h1Y2F+) z87fErU75dEe`My7dqQPN zWHyWABO~3YCK^i*65N`*x?-PqPIxoN5JiE5;nM}le|xe;ySV+jrYGbmXB$An3*&oS z3uM#LJ!5w}IYWMOwONpZlbZfhm-ptopVpHR1-aQIObTfXB@AVrKG3phe6S#JR74Md z{S8RlF9M46H5h_LW#SP8zih}wJ58c2%CN1H92Lst_LR=&UR_4kvKvl7GWt%PqpQmi z3U>6p@_t>uVnk0HEjU7J;?H%iRVY{)Xb>*;4&^*%`E>*TB_`s<&_3y8F@Bc@6=oPV zQX3LF@bY`TxuAFdc|}cKg;$C-yF~dXA%$K=Yaq$gqan-SFJ^p&iZ59N=~c|2&q!)@ zaq9?#YZmqfRDpTn-0#j!OljG7(SS_oHUwfS0 z1J}_|6N#|x_~)lrMDH)KHdic*@6R>$Kcr%l2>-$gq?I zP2kSFw}n^O7{0$EA`uvnxGEIs>VCb~{MfT8Kzdj6MwzVpf=jyx2nr3y5EyOP)FA8J zZ)njdKD<}VsQEjH_x0hckIO`FGe_%HyPm;EXA=E!=^K75t^yneHQ&sp%dM0ALhgR**oYbFP>9pGh zBd)2#uRdwaEWhmE59u|=a>^7s#1x$yM_;4>(c>cEVKg zhbdEsH7kq8ltIlpy$sTJypABhZ%-auzn>#j`yB8@6j4#b486fBaMYq%?J86MNI*&G zY1R?1BCOW#(_Sadm{2r!L8N~a7y(9}cmSDDjS?a=jP@Sok{eLHt>8_^D6?Rkr`y!!P@`KmJ4S(_8el_pYG1F}~S!XF7mZou;cVO5ucOqrvFOViDtOCN{JPU-|ezp_XYQwD?fI*|q%aT_)Ic+Tg}_qzT0^bw)b z6sxr((};nzpbfqF45sfbR;tZEnL%lu+%Umtu*A;c`XdRjN!XRSR>g|gC4(=Ig#)C4 zM_|&3Mdc-?dRhA)5;xSry=g|w8TmgrLUD;B;FxmP2^@cI=hGhyF(iiJ;2M~voKJjp zzijE#Dqz_hOf)tH(!AiD_+uyiE9TOmDAA9yB$|QChg!S(}>88wCgwC z;ypOzSO-rl8m~cP`5kaLode|JyTvYrNlL;$(%waAB^LOWhp-(RW-@*5mk^2?WfodN z!+gYwNfZdtv*r6XO|R>1XDMfj15FrDsXpU>VRdR*~6%0^l$=ZA5{XX5f7uHh_N^bkqGRLrO)8GJ#cp;C-wT0jHb+?0guDpvvF!48Hk z&!jZHv_L`L&?g8LPwD#(`EK69SxZq?Xz6Q{k$I}?BT6%p4V)!2#4ikHuXMw}^npk0 zVv<-$gf*VNyaA1$lv65&;qLGQe2?Z?zt<@eflpT^%4JYV0Bm@DKpcDqMnX+31EIag zJ4ttHg<_j_ZhDxq$e^ll?;oS7JX= zLXxGyqb-~*b1h4S^z`54Fq>vRs_?fy?MfVc|7 z6O1xI>?y2ck5nXn7N!f|07DOu5v~-?YyLNN|4PKA``w6;ui9Y{rfYVF`51f|&xH{KB%HwFVj%|hLj%mjuGT~01@8&qqs5Ouzp*dm!p??j zj-MZZMfNT}rh1L#H2FskMWVaUWDNTKUC$^3(o<<@hepeLA{KLtkn$mq-&W-PVV%6D z^@o@QpRfy1nyzM6Um*#4(C&Ytce{H)`phus_$m9u8Kp_be>R3JvNNxhbOCqnTV<%X+Zl#;s zO^9Y-YLq#0v?npdhu&+=+~8z2lbCsGlqFhDJdxdGE=4zB;}qJZrK6!ngc>WGOc3Oe zev81{%)yKGX$y-bhmsN3hyzv#8Y?FY>!G1Hli!HT?i|_&w3O{UOi^isr6Y|MLNe{r z&>iu5Ngd(M8Ct4#yn&eC-q7zs=^drJxp?O86ZU{&p4StfCP#JM(WJM`Z?w0ZA1|e( z^PN0ZrkzG5n$85izlNC+s#2^-+jwINX+es#es$~GqC=xlskKHZ4uY~*DM#U*tJy6^(^CyENq`TKh1#HGY*5Uu zgSg8y-MGF#yH1`%SwFL?$CyfDbcKq`O>A7f(4;QNM0td!10n>F3B3QR`NOT}YjniD zWqJExqS|(%-zVA_!yE{NigeA!G?lY~>kJcbH|QJQz%WweDalLg+t4U>d~&q7hb8&& zuD4fFiy;F?)&WW+Mh?xWc-t|>tr>o;OUmkEu6ZdYUYw>Dv#nXKm?l)vpOOh$M)ix} zk91~T>zfJ9o>xH0%Wqcg7yOsCPz;YFaB>oqi>|KM)V3P6Oe13ci~{t({G+KO(DFm~ zvPyo>N*S#hen8C=J)RD*OO}&g;bMs!F8MV#0O)mXS~%&pig$4x&>aWh{UB2-v@XQT zyTd|(^)Z~{FH5g2)J zq|4MkvjUba))SJgD(|aRu8q^hOd)gX8HnGVI$!I3+eZV+i z3+?_cL}+c}v*-BMSXKmrw3I0xuFPUpMO)5UAdk3;0d<7ou~|reGyfug?9`6j5V8?4 zN=U&ns-)4)b&Gm04yZ5(eJ=}YdG1Rzr8cMpQ-3cKrOFe!EnVz-PNQ|_l)Q3? z-1fjBb7ZFg{0-&lSNp!d+_UASRhp&N+*>Nd=CqM0xj&;Uw0F*w$8W^N_nnXb){9S~)!VL728wARguL7fFu@=8 zGGQg+Tq&`F&!6~P`tQub+~l{4TWT~mE~SIPs=eGKldB6R+m>W3r4VVr09MxHM=72G zDm)g!Zke~+KR)p+^Ul|Xx~>(zi38`gQQj2jh~CI!M}}j`NQp-_F5eLPR@ z;T_J@RslgyA9L%(cK8~N~fx@j)R4X z=-x!bB(d>DpHW6y&7?+>wYUJ#gKh%e^Fmq>$ zP9@(>I1bKc#B<~Lo5f3H?J?EQfP~e^k(YTP&j>_;Mt~9lVfd4D@#rt}Z)`OBBV~&#QN6>jcQ9P8Rwx>!D3B%J+cmUsgV8Ix``auHA zbU>CL)N2@H1B5e7Q3_}~VZ9WjMrTAJx4Q7g_=+)56^aEDdMMYFFV`1oj&q5NXNaVU zN(Qi7s-~P44LIfKznuG8_*WVLK*S0WlOY)&UE6iY9{lbKZ-+qO{IRn1Q{s=s&BYXX zHu{7cj^AvHtqMyF?99I_TD0}De`XD{?_F-cZ;i?@nZ5-b@nEirVllVMN2~~jZeh(JJpsoG^>ADyS1w4Q6%LgD@Y^|$Nydc z;UZ#<{`Tt6gkxeF=_Ief&a!+|=-z@B$3VVOhhIh*rM946sqO$(V}pvwx_Adro(j14@;0GbWdpiP4|tlM6MKCA zCYgRZAZ>}tcls)|Fg^N@t-9%3@RG8zmKd5TG{5o1O5w5^Max%qCb@=Vwf%02%`F&v z=4A_MeMe+10x`&iF<5Jfx)gf@OO%(IL{8hAN5;}?oUUR?E1#CeD_Au%X%!+x69-kh zLoFd%VV5^1t9_>{dtqPCD&tuc-{#Esi=O@L`2^Q$$v0r0MYURa>d|#9*hTeoCFt>b zx2t7;BM>QDhH>+K1*y?9{#Fl6Saikg9t_Nw)88MGjX{u5)x&3I%~|5wHNE>YD+SHe zZ7@rdxv0&-PhIAlHH@GQwbti>g$v#Bc|$?VPT~s_L&wdp$5awm$1wy9oOtAQ?$Gc2 z+H>6mHCJBATD%ty0GqHQ!wYz!2ePWz!ZH&epNwy}~dv?P` zdO=l8`^AfU_Y5A5_yeWUa8&hI@C>D3*%9nF0y@uBP8{lilKaq8_wLT?BJ zR*Yi67)BI9l?(+cPtwtpziHI{fd00t{yz1{xr}+AoyNhcWBvMzU?cDPtazw}ei*}C z+TKz71F`P1?xwU7P2q40_TX8)3(@L3F9naz;zxd*G#iZtPN8`HoQdVQXh=fLCj*Ua zx)TZPHQwy6_!W*hdejs%>ayBvnRCCkc>C>Kut|uF71d&G20N>ME%JX}yTiAGBU%;g z{U)pYmp*Tc?B;1c?XK+jbCR@}e@3m$%RQUrj~;xGS=p>?6{67>hqd(Bxd5IfOseQ0 zY&H%YC;+u=zUbi%dF$$R`tk7FBSnYfFTwzm6yo|sv0$+)*f@6YnvXC7q09+(OUHM} z_mlKLoT-ItbG*(rexv{kzjB3*v4sH&!sRix@My9TpWx*P_uQj$aVld58&(Z?h_;?c zgzpl}UShVC&;+$M7IP6|yPmf2*$Iel0*Lr|y3tgpLP8Ss1NR-QTIg0*|75#L&f9n& z|NW#fuv0ih!lUEgP4e`FmN}uREw|A~LK88@<#K|GM#5)cc{s(krxeUm(W<@P`0!z{ zmLQC%i`CaUAj4V6yX6;yh%xL;u*@fEHme*}BL7<5)E2mK;I}Tx+GcA=FsB zbo&eOMtG*9RQ}%$4^eT@wuW{YMnn>+=i}BB>+uGfF}d<(&8=%I+s@S*GM!XVP&SGRTK$DS!kI zZtnyR*bUGZ+Algb<{v7-pJEoB%KNn3>MUhxAzCJTys)NMsI?v?0M(z(OSGlQo03j) z9z6#~lnP7hl5)syUcy%`MV^om3UAj9VX1O>N|E3*i3B*#fTvozFDA+B4+VTs{ZyBQ zqQFq-A`kImTP*_fyc|tvG~GlCjZiW&X0oClmXp3%SWud`2qgk5)%_{d#JAWV)vHAv z`SdM@@6)_@6(DXQk)O!16tG!FBR#k}2~w@>Y~;1>&Q&owW5}*6YY>Xa5eV3buDRxq zST)nI*r>F}P55pC;mF^;4Mpidn|OS2gZuCZj=BpqXih?!Zwy$88Ze{8%*+5Zq%Zz3 zTxhF3A_GdQa?~iWSXfn?2AVor0nI70sGEJh*3F`hK%-+*m^R-7bi@|`B@MvBZvX4j zd~gRg)^E#D2Hrq?m>={>kp!`p9{RBiEUg_ov)n$>{I{p8)JD<(JQc9q1P3d^r#1s(4?WJ*Yrbg(Ehi zG)yI@hykf={o(#D)_tr{zxd^NVgiF`begQZFjwAj6q3+6fBAMwQG*BZK^q_V;kwx* zO$1hFx?^;x!@CCRK(I^_TZs9ni4#{rpV8{d*^kr%ors)aVe?RY1Dg;z+3oWw02&fdp+>-*+IXY1Qd?4fF+>a`-$NKVb%) zsz7eR=oBa&^_9)`aPW@Y0(^#+WGWD_0j8f&loiy3AhQJF+Y*04#O}~^!eD)LiK`7YU&D%y761E$ZYlFkCqU`TaAdeB< z&k?>J`wb3(O_HZDOU$t6il9m}RcuBmHfP*f^?dr`!Hy4_^q8c3OUJ?U8v1vuNBmMW zM)p#tMasnCj;SUH33EcAl*oF@*5s6{8lys|a;cdh9?098_$56!U?Dn-%nmGYv zi>k&hC{|obk_czg*z|_Wf~%$mE_#N%yVRtT!VI9<>&Wjfifppn^EusWN^`Fy-H zS~rE}D)rqJ=Xr9zyW}}EUiBk$rL;0bh)+|Dp`D{kh-BP0AadEi%>PQ@`Q$l7 zG|qBvDlq=ogye_fi`H!%-x`1#SEYGv zMXh616tiU;B=?T_lH}Lp6CSXsAXL0U6Y)v=6PBDYqGM z(euSSy?-wYC%y6+Zx4@q`IMJc9!U`%`?AvPJ&{CblvR6QAqja z&_VI@-rGucrH-+ueYi!Il-cj_-dG%o=iy$8CtYqdxC4% zV`8*H5HPo4`1%_1+6$XHLYQa}78XdBFT#atU4l8SkL?I`qMsH(oNB5+BvLYJ#j$(s z)btXkgYogK4@zrpp1FBC}CuEm3$9V?q;t5~e;FXGiU;Det(ctnir>i1Q5A1)8 z5>-<>tgV@D=t}3Ghj4BoV*cj{6<^jqoCLjqQMS8Aeuv}FqGbG`JL8+Y&s+GMjYugiakD1KZ4w+8DVKmHcYY&eZfaFOuePDQ3uWd&VSk z(27&sVJxb;(M4uWEDPSY;B)tTX&(=3Zhk+^ZXY6&DO9PEqs@#R(eK&Y{RHsEaB5Hq zIhgPKdxam1$}D(C@r|X_#)wXV?w3K4n@ex;SGe)KlNdH+bH2M;CUpk7sZIBkPzpc{ zhZ_wbZVZ!!Nttd;DV2e(9g?dtK)!@&lM9PiW>HGJvoMLU#76OVc3^E582y}Lz&tXHDrBGMK!i}BxN}hgI*cA<+|5R3ydhiF zy?25P7(_Ewpc^xF$#WI|Wxm8HaXe{Yi&;d5eEe5sRbAKPC7)!MHie>YBKf7`@YU(s0K&L z($jKWhIrZy17vbkS{QY z*uodoI#8U_T#jMAe~r?&N0F9ui(i$QlGJO{b-_5v|{ zlyn|aPE$N_6M7;1#S!Mbq24%=Q-loP(l#DznV3sQ+2bcbW>~K|5tKC6w(*TEts}O@ znpwd@XwYuW5lsCRTf-$K<<@5Ikq;^IvpBL%XwogR`djp$k2q)JgTWL?c?fBI?Ba?o zL$|lUud~2v6A#mvQ?Ts%V*&EFL1s%UBg3=@W$(~~jql_tG-J!PpEO#l23@ouQ&1h< zgb(v|^1n}z{?_8w%>~Cq(I@?t1R&G)isMBQa*K9;qEn7F`chU_tq~fnEbN5st(bMR z6NqbWp}PXMZ#EI1oH3iB5A~O@hA)!UpM7N%omQP3F z7*O#2Nx;N=-$D9=9T{!cSJ*NO=1T5#>huXVTme&3fDi)T4>B2Vu|f%s@A{S6Sz64% zb_k_D^`sBiP8bv3m$~^OqkauD z#cE_6)dR$8n7fVBN?>$PrsIi#iWs*0wO+`kmI@1sNM zeJm;)t#lP4rs5oDF1{P=3=Bef(2jBw5ZGe9)rKQRXG$Y)=2KwKgT%miX;QMc53zq6 z8p$`3wd}GgVyEcb)Cw|S^CIyzy#QY14qKFtRHj8_ghT+(pKUh+1fR(*+a9RfuXx+L zcZTENI>)2C&d=teH_G20%m0I+sH?AUwAn!#=f5MewY7a)|LAV)`%!;=%au9p)B5Ab z0eTVj2~`5kz#peLegZJeb;xPxMP(EWm+)KyYbO!>1O}R4UJ64E@Wrq(?ZidUppRd1 z1V2z2XZ=J9rhs&P5J8_yG?4q{Z}>@kE{J-wAW8@iN>fUq4}bGLrs@eAiW12fM>JAt zhsO5&mxyOqhkAk&y|rlP3>ZvKzJU!#QpVYtCQr}|%$$(wLsgX%jso}awqrlSr1|g? zu-3Ufst~aM#tKT@2TG-l*9gLDN(2$m7KKuf)wpnl53`0+6ce(E3S(kj#!nu>ta9(7 zXnz_{-Q*Fa_91_9NLSL+3*vfE4p!xn5F03{3Z-aQ2=WyWUf%o=*$zy*X$1c~p$PUADVN z#J5$6C4LyZI4Am~N(@p`f<-Ws@H32f?=xkWDav5z$hO=ch;bNF$ty;db&eE_Xq=LG zAOe4%VSw;jXXs~qOk#07g>60=g|45T(FsPd*j1Z#@8nH5pLksv@rt+|fB!jZ)!zNq0oxYwOPe-*}G zZ&q0zghb-9Rv2XkAw(9inWL>c$v&w7)Djm|#K}T1S`h0#PdKV_h&#{0A%288m2FGK zAZMU`WF(_leQSofKgD)_!gt>fHQ{05$YxK?A7I}lqywB=@R@X(nzGkIhk~I-Ih3f( zjQfo~Ho$ldn-Xn03*_glh#?h?HJUjS5&jN@~ zK78=&H}UjGsS0KG)H;&%0+^dl0;(`G8p#HM!DVD0FuhY$5fL$io#O9ML4R^G36QFO zW*sm`L6mAVPY2-@3aC{1y;CNIl!RU(R}AJ-RU)#HtfCj-&K6fpP~0R1un1GwmRv|= z?k`PBCTU8!qTRIy;R4E}jv@zvc*2}ABBrMuwP5r3?JYH64j3fS6P+~Pkp@G^f zM_&vFXH-ndxbNX*99Ih_)3nD=glv$M*Kr{*+nl;6Ykao5f8-O4$Sg*li0lQcEPAFH zg4)ac-Ib9)00*2&<|*A&oU>0m)KBXQBBR4ph)ml>mwXur=G8dr9Y3L zzkU$m#IW{D02l^q`nurZ#zx{2Xg)YSsMgN)G>eqRwLA%P$apeI97RJ>3_)Kr<^@Om8ULi)(* z&x|-FBJ2c=D$mX!M}$4;R@!8t6=)CN6Ql!R&{uu9>h5B?5TH5PVqL;aByGG9jfgav z3{-e_H=gWQc-9jU#zJ~zdCH3V;5_Rt_V8|;hd>0QW;l|YKeYMAT(lEKeOM3*!!L-O zS=*5il28K2M7RZ_cM;0(r_@xapm~w&Kar?>Cj$_0YEm@Y$NKpRV1K8{r9UJ7?uq2f zw`BLCQ$ARApCfbjds{#b#xILpAR6GFROuA_gV;Eqo>b=+OXLL?$Ffy{`jxqF*YIYe z`O)mwQ!e?%op9zyRmy*ABpZaJn zV!@n1@Sc22*>cIyZOT*Pu`1 zXCoE_NBlx8F^ihI;aC>iWFPRfq(${#w6kl2xkw-Uh z_%^X`P{*S6cHs;-V(RDp-^2Zv6&s-%)BU4VbOZJ&GEvF*5iGk*Qk#@12e zHTQH~GiM!@GKIK-WTxU%lxAajrR23ZUc(}w+2F!ceh{fns`OlTG6P4IG&cEM;F?10p1h5&_4`hn60;pMsFlQFbcx= zl>ZZCstQL6*XD*Z&UUU*J-U#91)pddV#l>pBe+#jDw5(}zC@4Ebwv4JsfiUMaRV+d z#gG5i&ue=dUBUiHf}0?P@bYZkQ4*uFRY+mHY6RIxd%yRP455pG{6DS!byxbo0{)W` zi^2)%zZJ@T5z_wusQTFSRp@;b?l+=?BD2`?9oJ0WIuT=Vr5};oxI(1*@AgMkz-2Am*Go`@jl2 zG5vSEb(5MNzlaxiy}kI4q(5QRQD+aa#F11pQpxZsi_?thr<+|e zD$++>x%R6bE1xM4x=1qOvRxBiKwY@$ce@oy+>a}0gRLWE9iZuDy>*IBjqsg_U>+BVL>05=teW_UWlil8TT%h zW*lfbhHnjqq_x0=m?zfO&locZCg!!wE<{>H&$#>^=L@dvw;$xa4wxqz-MhuxeLi%fMNro1ztcpY>c;;%GK+aXRi3#nICrC+eX9x*aQdmWI39Y4q zF)4a%(M~=u1r2+-RtCqM8WsEhCIYI_MuOAo-u8LmbTrzDun<}DelC!kSD`UOBnbjX z%!BU09n9{wAcic7zYz#}$=|pCJf~b?KWTR(m)q%E@fO(XgBF1nAVvVr3$eYRDbZ=@ zSdxoQ;qfXMUww&~Z@NnGDHKuF$Q1bIWSHH@`;OpqYzsjd2=qqq#LLoxOUk8$1|qJr z|K!iKf8sw)b>btSOJHDEXVKXh|NXac2i0yp;O1cP4}?oT!TFsMx2`c{?m_zgj{N&R zjeI}F=0bACrqQaa`z>Bh>)q_m{+w82W;@3o3I5oUsO*v+rB1X@sA)dte+W#S{;0r* z-TT?FUnM7ck3$|{wp(IBjE5_`7(?84M>EIUn- zE%3)(+4G;meTVQaKTi536V-Vta>K{tX)u8*w0PtsE<^2^9}{_bdbwR%P}HTrWv=96Z-gU?Ytb9G~mL9qf$ z7M^^WP~mEZGKXeXw*?pY$;vmz{cE-0^FibXSn+x*dZXo(5M-3JJ5^11PKC@SG20g? z{R}MCc+Dc-7+d;sCTC+h&Et&-c{5ATk!(piQVUKx-QQNY8g4+aP`{N-%-pPI6rZ{z z+^%%kjtj|m6%VVEZ8KIX{Ccj1rvB~~X7my&kz(nn;Du9|4TZ3O(%V)Q$o?i0KOnD_z;|st3=+ z`IA4Rk-pOnLXEn)k<*@5FT6ghXg(`zmm4p{rzujh%v9QLE1#(ly3GH5@Z7=}(WLsH zt`!^`?zUsPPeYxHG8@%W4v}P>lY{9zK@2gW2HT3Es7asud%i|26mCo597IRXrN>86 zV@3+noQv`aQbckn+FSJZaiH1}fX$S&Xwlq6)@Y>31T6WYz^EL^GIEtOfeUtH%g7hl z1R(ax6r#9H15c1#Cg2hTh5j$qj4wVeh@FiXUgcZG*mwJC4)RMAF(omG&~43yG@B9! zsw6yJ7{V@l3&OKT=NRvNT@baG2$H4<{yoeUw+9ZU1e=ydcn zp0W#+>{HrovDVh#{emW0&39-q;s(rE`xbPCSCr{Z3h~FZzOC80O97CZ;T07bFkqSy zvELa^F;YhVixgA>Zx7Ibu(IsfLJ{%GRd8TcE`z%N959kasw&zuE@`GH{7_D#m%)zG zlV5*w@(_xl1DJ+fbpYAaKry1&zLvuOCLy6F7tL>e1FM#9b-}DHFMtgG14W}vn0J}s zm0C4h!5>k3BUDL`5ROvdEckj4Zaje_js1jpn)hh>R2~s0euQ~i+q_Ek>%dWlOvoQx zS@_WLF??}W!80EE1LolAj9rO@+nCi0M4Hl%D~gh1e>Jq4Wvc;(|5Rl@c@tgUv=-hj zz}C3I)Q|q}U*z}rLq_|HCXS$U36s!%LpYKig=ufKUMr-MSfQ}KYk4%&hz$N}jpil^ z+fhJy3jFxYJJe%8EoFi@Y+r%@c|rt;#8pc_97b>6m*ub%g8M=0BdI5kvDK6jVZIYc zA_+wM9__O?pjdfxb)J|Dae!tXzvR^K;T#->~)vejQ%pQ|*sjJ(4^4-Jw7&Cl!VB%pktb5-I zwmiCQi-?bRnIgjX5%=`6_{bar0;H=-`M}`P!Ghq&L>3-`9Fj@5f%P1@dckaVq{w9` zm&y@k!=Rr^ly8Z@f_5o)yV;wjea)Om_;8W0p=9iRev(L>yfg4gO6hXc35x6bwD|jr zD);>!|5{4I0erV8`*`2_PS8m%ito_(1L)GsvJ{#^$83mB;bW4Gj&0C*h&L}{%pMQo z3wq(TBa%!>PU7GUpA? ztb{T&^ge0+EK@2bj49cR&zIhU=(+V}OlSTjVFjV^aZB3&3R{foi^|Suo3DRAO(h`u zPqn1!toPZuxU<&|fe}%?;!07h+Q`NGSw%y0D7P9l=!K0`7Mi;v%H8p}DdT7|V{TPW z5>JDa;ppM>2hu%FqQ4r0F;O7Aq~`^#<2w1Bu}ATmg?ojBNI>$Ri#yjBh`a&(#&lqL z*jA6kJmShcbZ-7%wu=N)NqId#y|kSphUdOIZ;c+!Fwqk1k?YSG*|hYIc|?@@u(?zR zsWB@_HTk}vk^ZubHZPl=Z|Rc|&Mq@Cm6qZX*jZmCF)>URW)yy)kT2|#tB&A1tF&Lv z{W_Z9%Mt)k(QL_8D{E>dz~xLcF>?uGxzvDX3t$df~5N8+>m6= z5@M;V_p+!qTg0ud?Z*{*Zer`*K^Y9ai8ICF5AJjOqR)!&qa_h-hjc&E^mM$smcN%D z$__o0%)z4FXmo*-}$ommfwc7ZH8^;AZ<`P#H4I!|`R|?IcCRwXplrq>Ey>lk>%f=bZAXm4M1p z&@Hb5`J}Jt9*&M9?G|Rs&z8l@ixxKAe3_1Qy{(@7K#*nmu+3N4*XirH|9jOgvh$Zia$qX=mN1pUBl{_Y~J7k&d>OCy7$uV`oD*6yaOW2OYEN)yCnoH^cak5YDxX z0kvZhuk_OI;7JV6iqX~|Z!D)I0K2(2WI-3(Ai?H$G{-A(>ggBPJ4uvl&N3a3WMw7F z>OMOX&yJNhr}Vc3U2)CELC@AkiE1v&v(bJ2<(>1FW}+UW?b`3l5b0-GPE7}#Ec`AF z5jB8an7H6HqaOza611^yAZ}idrhgI5y$^P{$K6{sXE=8tu}0iH%H?v4Zoag?gaik_ zk=r>N>Fc4pfIwg(;~UND<0R11k^|wCf79-5&aK68;tR74!{`O}LdMVU1xdbHRI=eD zRZpG7r>J^4%)%E~t^Okggwx4#2e$!sXw9;_h`L28pN%b~7`AWt?7zJPtB;GH%X(p8 zgJxNA6sxY>m z)5*8QkwOWm%FLuCH{-}_f8iy0Ii-AUe?l63Pnz@VH72RCf3c>w|IMRo1v^?Ad_}pC zx6x!8eC=~+%YaZG{cpvZiw9diJ=1?h;l@HWEYJ#HPy}qsBlCo(RBO2v$UgDh+PM9A z=MU8X<)>^IRx1)7tV@loPJ2p1S&QauXYxsPT?iV(-hFM|-b&yO46rh;r`G z-(0@G+fX3Ofv8vCt!bUK=cOlYw%=9SKQh5_mA1HXNJx_IdTE`eP!gD{F7Zihd-1W& zeB6XqX9N9gDBAp#CnuAF9$oTlTM68@xdES4rAqi_hv;|Y1CmQz-O(pMXx*&Il!lg!E(?9#13z1F>7AIHO1|% zZ2W49^eY-3*uaqZo|016&@N~QxgErn2*nPXDA3bZ9qWcx*AB^8 z5My9gGSxNAy@r(KTJcFI7NgA<&&-wCY>gJjl<>idc16WoUs1)`0%=IjE#6(8Xa|8l zrSG`O8+Z%SQ2b%k;3M;yIdxhdG!ApqXt>x}g8WkVy0s@JheMU})vU6;$`h(0?trG^ z3Ql=)kRFM_tfivs9q?NWx3x*c%8xgNIMG{tgc zz)``BpJ_E_)YThYc2JYTD8!LDa)VVEmD#{e^x#S{X-syUIO%HKY0}9l;W^W^@`Q_7yl#5ge+SU5(QS7K9j1IZd zQKBZK2BV90);E*qtO|jGvs_PB$*UU#KD5R_5?d!ea8p^dIHHGZa`g+-DKUzv5$@Cw z7VKQ^24RD0LIbz7%YgM>%~GbJLEZ70z+K4ty61H(Q1JnqDKh3cd06 zB{7uY*p#$iZI<_jT!O4*s80+xLE`o_>sl}vWK(c^Aa8rme0gf&APv!*C2t9!;_xa5FHrWKG>;>3k8=oq3m8o&~ zyaBjKwRTS~Q~c(^i8qfqq9PI% zE{=iB9pZ$3nQD-Gn$&R^e&;SV`RjfeI1smq6Yh*r+QBhERE8~H&S`o;i?<-H{bhLi z77l9fo~7;k9Pu8^#dbk>|7R>7RsnHh;@78@_l!5ZTtHJSsbK$2^?`56C8e`gC`Q(+ z8VrEfl;Tew<9hR3gEN$9dp*<4*yAQRMmX0o9BPU1+%__tWfmohEI!#!8G94N@ZX{* zj6NjF4jMMd%`*SkSxoX9B`S7BiNMv;?>NZmADm?l*a5DPeuV@uZMQ}&@N*d#M{mXl zC;SdFlFW`7*WZ`fmWAadv}$OVE7sa|azLE}(!QM2KO+Bc`}~~_a&Cuk(&)<=g19Tt zMj9#bbWSq{9hIi1^)b~WqMuh(VhaWYB^Mou8$=d-5#_|D{1uuz2-I)l6ixENk@w+8 zLZ`%({k1Y?zdbE&M#dH$wLi0A0oglDoj^YULXBv>Kh67dlAGHQuiv)`rU?loy(`|+ z15gSs@g5;+Vei9^7=uwq8b>RN>L|*>{i;zm+t8o{SSwI$Ca|%M%lK5BkQd6U-9`FE zV<2KQ09MY^&^p?Z}(mVO&a+@>j*PDYAWik6mE3IH~f-jozmlI_T{ zJaqg^8z=7~)?aUyUb~P8lEq4GF;K2i)4p<>Y1a2q5t*|anK3y`XWdV?_VkpOi_~!a z(oqbjVPw&twJo(D91ciM@DDBTH48Nkn}v zGU799n=b}uj5+G*826Y8O}HaX-LZt*wJe%hFjZ#R3)~jsog(6WoetXcoS(7a0C13S;NLZ6b4@S4H3>L5ksFMzq z3sh;p;T|USn|{gti<}7mhY(w#IdmLte(m#Az6=l)bj{aGh1knFm^?*sC$KlfLIBlAnBz00pa-01DQ+HI`U{^`|xb953jjq((ifOh>qg3d0<0re6si{;?WXr9n3J<(A~W+=(^>jC4L3D z)Jn-kSAyX|n#K=PavnIlrqon1B4{QaL;L5UnmE>rhtp|3?E^I6cNxKe1xJD@vRh-Z z6Uujce76*Zuy`DYtd7LD+$%P9!YKXBA*7#iWK=4~(QGvN?9{b;HAFH3)74Y zNEV|s?%>|=h_VzmaH{-X`Q{g0L7t5Muhd0Q1XZyd)_5X}&SJg}+U=N=C^3e=fXPoE zRQB;I#M^jg1rcRcu$*Wt7*F(r5l45&;CEB4285|jjJ40K2~Sepbc+j+#!t~;II;IF zGoGX?b=paGFLWOM{$E1ewO)J97*&^7=XXz#?LSZ34{0bn@IqJJryEqN12b$B38H>6 zC8;=%TYp11v7Rf;=~)t*@Ab|#o+AM??9W^}Di8brSB?Ni70Y#Xmh(OZ&z{fHBs6Di zVNenzzHETRfVst8j?cEPKMjjQc*4BY@|{~jMe2ntxU5F754p@v3QrYdiOwI{1$FJv z5&Xg3zivq*IvDgI6uoRpI}|%vvLC}@gJqO7mwDvctmeT0O^LXiy||S~9B4YvpuSz$ z*J%k^E>aBn=9FXp-HU8Wc@snVC7ng=^vOG}$}lA}<>iLXpM~N}`&D}nBk->GTZVAF z&Zju>nlF<-Y_3K>KgX6DZd*X)X4i@)mQkcy*s3Q9PNJKS>jQ{t_$B&XQT*AqAS})B zFR6jv80~^xciva0>A5xvwOW`r+ zr&N@vvvXM3=i=>$V;!;g$vVjNcaT@F(9Jr{4lzw0_4@hfg5unN8g1o;Kl~lG@J;|2 z1#-QcMq~#X2^R(lI{Eajfz5ZgfNXEI*1pp8QC1r&`esQYhrSfFJ>U1_54~1z(5ePV ze(9Bs%InI^l!Wic|A{Lc=Qr&zSf#XO>j!)5QT#6vTwQuP6x!z|7@1strjV}Zzg>+G zT~AGurl0ww7%42#zkQgF%Aq{4NAja8)1|e-%RR24=sIQNCniZ?YxD=TDfQ54tkEyY zOl6^+Wm(>C&Vy)at6I1Mrt?Rqfl|~CnSDdGazE?BpAM8iAhQ3U{x(bNZXd?-e_3uq z)-^3==w^pC7JW4#7)AtF2cd&jvKY9a`y4HrsGQB1Uqq7Gze(AX9R3fU-Z?mu@B1I# z*tTuko@nD_W81cE+qO2`*tTukcAmVyzxq5?^G{Fpbl-dXO!YnIyfE_FHMkI{mZC5& zbBFQQ@pF$lGa7M@zl1iQFFRX&w#P!=O6Pczz*c@7XPQqTS1dGjEPF(vEOPxO@{OMm zD=$fHZRH?3chf*JV7fV$Py4>0^%17`{=A6EoKpNZ4x>fA7^SPD)kNFwp$UchT0qepGkWx}+ zxy(eH2*+dOiF~=4f)<%kplOHj(r7!U{B=5LEIzQuEk1j1;bj`BgZY27|^5&g)>FY4j#df6rc= zzTQSl&w8Frm$7S+y54Gb(P-A%&|Xy}9u8JiR24Oj%!nCXmsr~&nT@{tlO9JjbM4-Q za}R;J52lZR_NVc>YdI1UT~{ypk8M}u6qGB56J9aFC)E8stV`82>4HDooDkv)L>YDcc^VV(__e;$uvoG?a`EL=??hi7p+QZtG- z1r3J7cK^Wnzy@*__sBGg+;NIrzOl!h+mA$wP$q2Jc8hx3tx@{-g&d~MX9|U@z)@#4 zx+h8GijJ{yac`!oL(&EpOV@W~>o%7$KmHEJ_`u@1)rWif+@g|=Q;hrF4ieA$J8raL zpP861r}0V#61iN9?=yY(SJ|YHoao<$)t0)UwP0p0NrO8++k2%9^Md7b&L`MNEFvUx z^j!+9-T5mFNnjy~jFO^}NUTh&&@~6J;eWrb(X1~yN{=Ix{&osIa+W;V4RGQh4S`pm zAUl`$MUW_cxU?qz)VxaYC6j1Npp;=wO#Blp6B*QsjA6Vn`#3}N`~>i3@k^6r%G8fx zM$tEQ=cI|ngyOpnY& zG~y4JP=>2c>X+Y1lpZPd9bta?;LJMSeAaF}qpT3^(kM`))5CvA2Xjow6KS5`zcG{e zAxya#{UIivCP|x=yg*Jc-uKZzJocSJ{{-)9SxO?!Vp?0@b2}%}q~QM?0IC$u->LsI zH?tSFa(|n6aj!&?bG=}ynU?b;`*N|LyP0;;Fg;p-zg`wO94M4II|`KjfcLNly-d%c zzNto$oh`peT$MNMWGhAyeH#dA%h?0NZ({rGx9&+o{QD0M{h&v@u#R}7$$aD#fs{#>UyF5rm=yZ~`YGByWk(Z|Q*DBGTg;_br}M4SEmnXvKWFMb1JDQBrk3 z402F=u@p%o1&K3vfTS>GrO|JsW_gq9bduaGo4Cm93G#{?2JpA2ffSg_J1xp{AsM2J$P57PJ}_ z7$FT1^*V`>S{AuLNvYcXUnlrKyp0#0Bv`9Nt9!cZ*V{4Z(C|YD!buRGNk89}b*_G^ zvC7|rS=ht*xO5=%HpSJg@TG5g zbM4m-&CZeP&*a}m$reI{__1bqtn43(g1?4h?HP3T0w(6oTLth|GfRvlg%kKn*HD=x zks0wkrH6YB7?rREIgQw}=g#G3#D^bZ{kKShrP!jh|t3zF3d zoZ_iQDqEDh@j2Zh)2|Y1Xh}nwtT@&-D!F!^h>X$4(&S9{?%>AZ6PYNn@6!E#>1V?F zi57<0%}dVM?pOs3<&aOUg{eMwhPUohgyFfLD)(rlf25f8-~Qp{;+|B|E$V#nP1_dgmf55sJlb2R9TfW(@_s;<%E zaiuqr*vqA~-HWKJC+nsIq6%MWofxJ$(CyS(ToBdxS%76JVF%;k_1D$UXT8JD(=&6u z*|wphL%LNnU;z;`{@*qV20;>E<<7>@Rxc-TC69m zCWO^?Ykz+>dV3*i@l0NeL-b@OM~l04G>kEbrT_tqt;WeAxCd`<6gX@M|Nd;+>rL8J zFS_RQ)ol?RNcm84^IylyHDovnB3>`RdEoQQ%Pv&x^pV>!5e*zjRzjqTZCuWVN!vS9 zx9^Ne4Z)6SoDsA+qJmb1zH;hlI~jD7B;Rp-5WZ>Yk4o*3k=<`g-^j~L5c~_zpkTHX z-!~X~c9^O&mXAV&6>qoDZGJO*-iY*4L0pjoBuVme1Eq>1Mklm=FLtasP% zjJ;QWdv;@9%z2wwNC#)q$-FB(BryKrhA7hCl7+v#%Oy}_plrK`pO%1Jd04y%g@~h! zS+z?#w1}-Pe*&CiW7Foo=$bAkaWqIMKxrgVcb(8ufb`U=!JGTwp-SA9*OU_<(L>wi zicin8eKOrA@UEESHPThJMPr(h2dX*|D7%m)fK~r$Iz0!UIcF$Kvhv`7NyfDAma;(L zi71Uoot?!sz20z!w(P__moUxVj@5uU8(GXSq|omN^VG=8f#=Y1G#8@%+p=sWMvKJ*xZh*xuo6 zonBIy;6lK>@lHpuvAVnU6{i2oK1+BfAC-nwHoo9FsO&TDSzyb!ozDWHG&goN}`jcq#^86KUwid!NvBkf<5bm95)?A^*n? z{Bv%WJ*!e2j%|a>XX(dcuJ-PcSbPCdVHtaZLhmAo!&{b+Z=Vyd3IFAs?PdLsIe4r~ zXx49AlfPtk(Nm{pIHR$9A+OQ zB*2P;zv$#X`@mFSz`-7oPq+n^ZS37qX-%>ev`IUAf-B)twBeLgXAZfOBd;B#%nj%} zN8RulABp>%hVk(X)X{!}&(W%bl zU{*mSMlu5Dy%M8Ir+u4bPh<_q;{&eH?SG@4VW?1=6S;^MFJLL1fSSp|wRa2zM;lkD zgwTBAQZLtS-(a}<8>3Ay7r!B6(70Q9;&HvhY*kOI`z-4|fXYiV{$^dGpK&6cI3iPI zJEFir-=7$=$EDTolUGXmhqH1x&gzJRnvzk4F703errbLEkKm_7;JkJpJwjV!?`{x$ z94))UIXhB4T=&S9IhG|r8hmw#cB#zfkJ{*|L_(B-vfVjz>b62%YN*x?t zI-a{U%E(MSTj<;%95x!xGmR#X{%k?bE_+SGlTvuM8SKi*)GZ3wV7Z|(dy7Mz*rrNt z7bCNru(Q_05Oc9|_E5)=#Z*BB9}ai7*Jgrx|6iTMF|hOiWJcVWG%{*`tHc^>d0m~| zu>`5nxarh#;&S%aAM0zQ$ZjW&)-^nqocEV->7??NL9*XcG*!A)^RyXi6(0$KA3t!k z$RzLaa>1|D+^=xomr&oGY}c*bv1FQpL8X`Zq`^4pbh)7i9ujh$0ovd3&M=!!Bdzo? z)`qL(ZN!D061%;${sY6$CQ)Ka#G~X)&`H|S8~5Z_UsH5HC9^`L`_nA*g2FWP;QGjN zYTfnbD^cS*@x8}%55}zXgRv_EF)YDR0|{PorP!~Le(I#!4c;U7*|%%qJsHXUxB;hl zqnQG#hH`P=llXM^KQwZyi?z|FTa9MFaVJjY6p2}@41lCmrvsG$*bvh1C?Al>m0`xG zsM5jHpZ95Eh_dPLOe6@>>%C4^PU*G++}zPb59!lSWP1UC|4 z0V8Y+%8+o)TKVa5jN;K(*1~kqgs1{)i7mr`Ta>TILT0|2A>D5RxQxf+#p?%u7X0s{ z<&SrPkM51riIo`X2g~ndOWW^&@2_!gj7a7!v)@%bMJn*Bfi?Wu=-y5wwlN&|Op&NZ z<+Aw~du`<0+ncCm0XLy1=BstSAH%)}KWufh9}bS=4Z+ycDKuCM99PHO+Kbsg#VLs!_3bLRq69JpSm(NBz}EB z2sAjjGcmOFxw=Lbi#nE9n3ZkMGkH=a1QlVIoi$+OgpuaVt#?m94I{YKDD`!{fZMkU zx-<6jN6ye^{K?>g-KPQGo_Em9U79AALaFD}ksd4lSxye?n$&n@Jnh9-d^s?om&E_F z#2-EZ_ABYBpH1`lY%f*kY+Qbl-Uom0Z|MuS=*dcYoZvy!>ggX~afp#5z)R;h?f8yw z!?TWVg7$Luk*oFq+_iVnbFzAuA21|C9O@#m=9!Iok@=+AQdP0fnR|A(VUA{l%5;W8 zrKeE_@hUPEcAQ-h-5<1ydTEc`-OhIVOI{;1NZ}xGJ&+M%tnCVq@DBU^fmQpduT96N z)%97YsICx`)srmVohTJ*@hDDhu|CIvFeDPOEMnw>lOdc4#F*hQHSmn;`D#~rVkE6o zdtxF1CJcgxGc)~DkhewZY?Iu4Y(aQxfEAd+T--nCVuz$!Ibx)zJ@nxCeH^AeRIKRk zpUXvI+DE*^8HJY@H?(NkWVMq~w_zxSGx6sb?oN!t2~F=xCB^ohiVP z-Mw8uzym7cuV@;cD!~v!wf_*Gw=(E-PseZ0msLbeACsU$Bf)}01qr9oi*+-5EAHWbXmx4IW&>8 zlc4WF7y~*4E{f9fjE6RO+o+%`A^=I~7cK-Y1P%}>8Y+W+cch>At7*kd)XPKGN-uK= ztOteTkzlhBq97`w&lJ2(TxZaSO~6(>9zGl{t+yW&?##J4EF2CD5GatsuXPu@ER;Ip zN_;JUMtspel5CfDUuC;rFiHe^@E%U@;NFvc=wtxNzG}!FPPLT4gFE8(&KF*0deOIQ ziT8ayERYHPOp`*9_EJM|hq5n3tX#i03F;b#81cLF^-t)|JA7B$EtC7aJ|DTEcv3GL zEJw}(b~w90)XW~fTj1l81YVv)etVanaCkAWqpi(wqUQO*>5dKw1x6>t9Jr8-BPVbT zxLAdZaQaJz(cUJHbJ0%^$73EkyieF!)oMJP{bWU5y+c09(R+wS7zqW0nj*OC24PW` z<03}krVIzKa}E|pq2j-W>#&wwD6eZA@1O7?{_$IHNEC!gIbg!mS&jG7Nd{3IOX_;( zv;zEsd(4|ZRKZYAeu(e#n=PY{o@iHB&X{c=ZXo;)P#kIKpHGOkF6kpLz^2E02YdWl zAI$svfD`zPb=K#t-C==_L7rxuGTkp0^uN5L944f5ceYJx?tq|D5Khc&=Kdid&Hsdr zFsRtxot!u#&V~*XZd*ucCB@Ghxz;vHRZj|Da!Iq1+|<%tfBq}E0#kXy-O5I;mYJP} z#1M<@eA)w-&-)Io@5NM6!Ug(?}(fU8epiAhU5Mb~X zblf6wo&2bS!385DDLu8?#oo1|lBbFi(JVaHF;;dJD}F-FP7?QKq1B%xPmyfc-VoYP zd-mm&34T%zaaG8dD$$RdL|-`(cD;nIYm?u}!oL6jR?>W7{d;nbZR#0FKnT9MU6uVD{SQB9E0{Q&L_Wn-SaU%wo5l4g9Z6|!9UGrIn z&3upjOt0f&a)C{Imixu+{XLXpcl{x`1)AI{Gi->3*sHtFZ^z7k8ffEm`kwrm8;bu0 z2`Q)AB%HguuaF)1!c}MQ5XbhBa{J&r=&T(P z6LL;2nu}-X(^00^w?Ow~yC?kA;r8L3;rK`8umdP6RP(O@mb`zCiI28h2hld!${(hAA;cUb|4?Sprh7Qd5j!b7u+1_IB?1 zy5X2d?%|AVDo696PddV>FZy~{DfE&slYG2Es!SW>u`#n&7t^{S7V}m#PuZ?5RO?5} zearhD-8Blf?*i+uj@Ch+R>~jN0ALADKLRq27`I*6!!1_ND@gqsZe*TZuzu`^*p|m1 zH;DJQ^^R>R{i%ujm0&XZ>Dc&iQF$_;SAV<>4g4v31zVF9d(rTi3%lQ(ev;jdGWTES z9glC;K2Dm#8d^Jjt(8?1a*WyG$j=g##5*f1ceMREgLerqbf_fP$&gWdJOW@8p0ok% zf>ju1&CV4jes0_{F2quldlV;E0j61D2wwfbIPb$37mFwS=tBdKQjlao`P(9tcr(2s zeB5%Z*rWuAcG(4&6H`EeD&dbRu-ecKy!eDCpL@nTB!s-yQAW5yrcry=X;!-oCu0QSskR1#E=xg%5H9x z{oj3~2ufZz9K%iazi^Ngc-=pDADbNO%fD}a1^WdW3bai3!}$sUdW3d*A7g$MgxwIT zedONdWh1OR&g~`cI4EFpg(1a4!n-^}-k-4PeD}(|JXwNe`QGwqc|VPN?Vpmq=H2B( ziT=`hDKIa=_Y4qojIy8*o}<|1d>Tyy+A+cnCthia-v6+w%85cdKG>d)hLasyVZfG) z`AO3gn?Iv`9X>;cFGFik5J7KE?*~Z21Lqm^F}4maQ+1BIy-d-Lbw94Nyq|DBrsgG& z_OD`BgnCh=8SPJ*xT7f#WwWH^gF`px`S>>g!#+Fb{2zPM@H|ii zRd>9cjZb$;Y|#JH1#o=!vfGB}I#=kkXl8WbC}*b~GspcW$> z4)W%WqTO&)JPbvuIPgav53X~$Xx?JZjD04F)om||0`R9@#?d#lZMXPrj3xakF?vKo zBif~NYRU&>J-3I0No39t2^GR=%vj?QJlX|e97Q}r2FcmtB}5s1oR7OHf`&_m8DEe0 z=xv158VFmXU8a2_uN2`V9X$iHA0>=22Yye2?@#PJ8|oVopp-V9G>aXajO z`++K#4TYj8a0FV09lW4C)bFFiYBcvdP^es}b*Y3Y>9kd`s1Uh9GP2Jk-9ovajY*0S zovb`y9N%xYMCW|g>Iv%vyX3-OFUM^}od1q-;^;tO_Jr*e?YaChB*_^H3Filc6MzCL zg5-dNq|E*+`b;5Sqb?*QXjeinysQqi%PS^LitquFH|1LiHc!s{9X|8>hHh({i3(72 zw>JhmogIyMKia(le}za|69?DouH%~c@KooaZLtXc*d%M1ha3hrDie%_k2gn?bRt?J z-zPcL56y25=@DwuDb+bE&H|2E02AO|kpHW-HsGOmz(n5y&m)-cEE~vJ1bA2s8Ejw& z^h-X=OKsqs87SJL4)9pk_v_Xlk8~rRvP&zbj2h*YEuzRF#QNfH{bI{KvD>qjZ7%i*i17T0e3p5pH6QXiy{DWL>yF+SUs<*I>``LVk%CH-pK< z5vblC0hv8zNz`)5%-NIGQv^IEV&^V#f&vPn^MzmHc3+P*_LQ9lm)**vEt=H78niYU zQYrn{>4{{QjnugwD?Akk8$1qr0%$>%9+kfEk9STw;h8!8wRezCq3Gcjd2|;9533OS zp&>6gJ=ToI%yyB6t>cR7t?|53=^W^vV1(}v-1UL0#S`cCq;>4hF8_RWHzmWDMDsmr zUM;~3c5c+`YbI23DA_0^C+@WO%8kv++5S-c3C8%yIJp=3D15PIk?N(qRSYv6>U6Cn zC-Nv@$bNb;$J{l+{5*=$AwHu5n_n0D2!-x|;@SN&^K4$h-^eL60RlUw)P;IW&t+A* z$D176yKB|fF-pvJgP8A+s6M%&Y1ff5w@wvGm4@bO)y~j7&99@H?^Z9lCH_O7Yz%S*F+_b*Aj7;}g> zaI@{A74MoG?VdsCIRNz=mcl=h*kCbDPgE|XZTi{i!IHqeus)&#(b0zRnDA?j8qVC8 z{F^B;HZHRdK#MM5vnc(D$D;!xqURO0prD1%(_QUok+dIAjIaZN?sWw#3iFbfdQA7JB47D;$g`VS{j;(AFm@|I8j#IC4q=IKK}dB^pyfOv#r*fK__jV~Gp$bKm`8Mt z^=-1om3#sSOd(cb^05kd(^wwq#hPVG?Q%N<$z5@%5)7Gmd-0>I*HN#n6>$=84>yDG zVCJu8&Dc*Lq&+}Dywwczcvzp`2Uye(ih1>w`)D-u7EYI;t+v*S);LbyMIbfs6s}n$ z-XPT#4ET{2ZS-ASxmK#{npXQzoJgR61Ail^nncO7Mpu1sL&@PtS8#vLuf)&iZ^JMczWgjqf4wmB4eN);KknR{CU;|8~9T@ zF6)2*a+5-#n#GdU%R+OIkPNYQHbFy=jZZg^x6+5 zV0w^T<-+wDELNA<-5(XZk|B1fDc*!FIn0~Z8a=(aOZ=Bk^El3N zU8RdkM$*FxSIb~T8O=B}FRD^!KH>8l5lmM~oM#T~9AkyIMMZfFyv?yRv&rdT?YYq^ zGApeXQJX^4@l`hlScr^hqjaL*hS-|giLN5}K}CsBoc#;)oySnGw+_py9d|$#PWSur zEMifj$RtHSX1EIK?v?&_{~LSWu8M!LNGvm&Wx)j>5_ci%`X%dZ@$Vl{_9PB&&GxR& zcraJ`@r6=kQ3L-Uei>{Kl$r9XuD+}4b**_7L?tC&SLcM$19FF3c;9=E=yP!#*zZzP zPTwJ^5i0u*pKR5y@z`3O8^oIfNY+@%Cl2$4D?88Oeu2Miw`zt13z+{5A`9Owt-9a6 zsc*SucA3A%iH?ppf(*!qGs&(x@7NdRfs`K%5BDK^If0(xe)I5ryof)qxb2xIt!|q# z5i*mXsa3Tglj-1XmeEq;x_spEj{*hullES6so3ek9Ye7X)llPSO3H)q- z{}`;KHiTjE`aEsmeC`rA^^L~agn4*JKDGk7J%Nut{|;}aj%~2e-9f+*IjE*Szup#7 zQl>qIUY%~{@O2NdF5Aj$X`ZUg6v>b>JYe9Taw8Vg2$1u#5qct|)G1oRzTC9d;b{|Y z(lFid$<6kx+|cGJ+Q6n*MNhBZE>`EO>)%Lb$@28dY3!7-bc|l?_KD7Zz=H1e9S?3y zMazXxcBT4TeXm+m{?d@>_4z!tx8ZvrrRAQR4+49MOflEe)MpmcY!iH)LuIu^w}@EUjz9N!G}1?LSMDSN42x8NzAkdQ zYq(Q65a*qGIyj{9Q(L2XdtZlcnoq`nye1JI`r>1s2DiyLVqUI+mwz&upJTS!a2^iyn(afdTrvDe6uNmyEKu(b@CV%>>MGixauv0L`tu?w{+fp{w_6M;XnK0oG}qY$M{NGia9fmmTq&sDUPr*FqcT^0^2a21ICT+{_NZjI`Qc5bT+PG z4sz0Z51h*{xfR-=*PKV=JtcBgd&HADDqDy5_wC&jw&}AE(K+>&15CY31+JE1gs(U3 z{tMDo0iTGE8~pq=^ma`sY>?1lbnT!{e&kicE<5~Gj^6Y`_rE%$Eb%TV4_>!ai?RxC z+-YxqZ&&bx*@n~_eomylcZx}XKQDhY$HmUm^dIpno39-2U9!KDg!z$4aHCONpi1c<#A{T4yYjum0W#XJGCHx`3%A%S}6<4nmw>f)$AFP_K79G%=mkw-A zKC5*$jBiGSnz0T@lZuhjBQDB(cpzQAggqaKp!#AP_Qn!N@9~Le!9V3itGCFNs~z}^ zi!vt=3Gj%r?)1M?{ZPGuo6P2@Qp+#@QF&4akqT@YWeG0n@#^h3F4je&{jJ_K@I}l& z)e(XQgbx)An9cm`y|T}f4j2ddqO&dxgqUi1#93oGTAew9u*MBRQG)i(z52nSx*-(M z;s7(6#%t%-b=uPo%bk5aXoQHU62*oOB>c38MQtO6VSE<`=*#7(v1*nhFTzwM=q6#o zz%Trh2ZUps0a2UOER;4%^ha??xR3b=TuCG>hhlIK@oVQBTbjlCFXfUnw4gs&jg?+5 zr&s9&nTxL}H)?XtP;rcv|3 zZj*D)Hqs}kB7QIUTBH(>MDJ$CgaH)1tf(MZK)PO;Pe?4fCwP< z^eezqD(A66|D;`D^{RldH3-P!@GoqQuQbYUl@R!AGz2476G*!D%*o=tYP&0zCrc#0 zU?OC%Xn<(c1aY*~hw@=BsZQ3#GUE~Y`B`Q{E9d|K{O_O0?)HktN&Np|Y&8s@3lu1~8z`srE>(tr31 z7R%}s{HBV6mXF2YKySr?2S9+MtKZ9pIjF7#CQ|k+v4y* z^xXjM9aHbkPpi-+Gt;fLXz7V=yWpKmK$)&fGjIaPLL~wA3BNOqSOB^=1rk4sU^)sS zH==bYNaW5hnySik|n= z?ypCrwu`AR^V_+{s}v2Syv$>Q!Fs5R8w1*w3V)v|U1-q(a1a>Q^)DglK%M^F&0z#s zy3Z5?9oWJPMTc(Y4@y~?=O_&ZR?z%j5d>@)z1OO}79tHHvu5%v+7fm4 zHL-cp@|)Wf^k|H4d@<)U_Q|IRs_vidHFn<91~-Sk!+nQn_~R|NRKQK;dvIZ439|O> z0J80#T9y^Yh(}y|>adZS2PWR!Ca;JheJJk?ZH(xOD0PcMdOAe! zU0$i_^q@y_*-N@D-(&jk=jSm{hUxo-T<`ScqTjOM;lFHFWwgxYFBVp|E^e%7{ZQ;{ zL(#?B!&NBdspafyKrj-I({gf^DW~n7|Se%A@BUw^KyGmSU zB5m!yhv>fU7NPqdFjK!S+^1qfu@Xg%J{pY>n2HDWYlenYLTMxkihAehqGWNZ)e4a1 z>Xw=y2U66+$XiKGm{1{G^KE%+Me+gV4Kl5XLNKQ3ZbwkKsW^0Hh+e4ucs^9PdHy9B zt27T>7jBG+ND+T91s^?#{#u)b23|x zUqGj=47UO~ql{S3zu+t62G130$A%jhK%hf}*4QDjL13$;E0y%`^JJWSVunknSoWRw zWkE6J+Ivm|*#_n^%>qFzlA-&ffgrfbi~pn zv^(Tz?O&g4ZSF!`lco9Gf_>d=e_N)2@SE^b<2Mh8@GbFmi9AjU94F%#cBrWD$$cO4 zPJ2^{&yf_uZX$*iWc;uPsFO8P3q5kV+;der`%Gknn+%ME=V_}*TM|<3BAr)O^K$B2 zOrFbCcuq1Rj8B|2K>&j?Ci+?Ov_VFvkdvN`5tbXginCr!DkUHTB=mZ}N@A*;F)c!q zD%~F@fgB^TrlQ4L4{i$;x`|n%&VYOxdAYUPH-8{;2_yCbVMV- z^?W^51T*IBlm!V;#gax862pO@8R(A&(X!)#2a`xr#+c)w;Z({Y8o6tPP*S}q~UNwHRfy>#%klSEDW%!nN$^a%O{E5pxJ*ByOl@%{dsql@XZ z_xnU25n;4vuosDQ=AB*Pc`fnAF!9jC&CQ%$-6EqwP0k6Z%b7XEte)|!Wqp7fq{kEz zpTjz*M4{@yr3NOby~hfGF!NtI9N*GKrZsU?Y>^6+Uu-;*#EGV~s6@mTEgH`7TUv*D z-mxOvz!=O8T2o-76?u@@D|}83KSW%nTEIp{0BJa3J5s_upx&51L^ofhrT@9>lYeX2 zz$m3Y=$0^&FmfllR8re5!KpmZ_uklMLv|PD&5b>s824r;8I<4psEYx5y>-EiBk*Be$&=gtl_ZzP#TD?7=QWo_hQbw=}y6O40OJI}R!bJC`B`=Jb>!KL#;@ z(=IY>Ed-`X>QGP7F|zyya{_GuCGn6c)L}$cbVr}iA-%xY*1H?QG?Nd#I3hy=WCWxI ztI*wsVUd(f!L#qgnMHsd^F+fa=V%^IEUnMojK4TOgcaQQ$^2zHEG*hAj+nD26r@+g z_PS(;e)DQk?LYAvj>rqC#HrGeXo|3bPPaz7hbI}GwZHb(;$$!Dh7wJEh)hZOX zd&ufly=E-Etr}zxx4v`#F3z>pooIr;N)Ne2Zo9dhvJ zv~)uI6Ogd9O7L%pmp8UHC>5w=4jkqZaj*~dNyiD657~6e>c^2+ zl&E21gn9H{;W&+nVprp5PwayZnQ{^!g}3(1(oVRbRL>Wag@nHg9~2O7wD;fQ)YLBd z1U#*%Psxa?NmC)Hu&`NG$~3U$Z30aL5OZ|_NMpEK-%tXdnFMx*zd}PGM@Z?v*->spq+VMYogIQV4Y3F_J)GWBZ=AC#M#p<^dJvr zj}cX!o{65%KY9Om*v$j)@h5NDr;Y!tULG6Qh2#TRvYYhazB}XY@lj^Xpfy zcGr7q4GV@x2+Z+tIE+^<+U0cAzVBP*M92Gb(eLG!rz`cw9!^tV&#W}n!TVL0P16YL z)e}6tLIh6!KdF&%2uX50i9C4H4HG`){j#D$xmiRRlMP9zP_6AJ(af65wsmh`{FAra zrtU7`$#K#>gOcI04Wo7|J1rD53V(=^5OiXJk;Bqi0H&F|Y)^bXcHj8hpP3w^uQ`Xe zG>HRXki4~lSrdDnccLa+-{kv?1a%&tu-P}Tk-B&k`#jI^w3w_ZSE;hPEX zeY=+mS`JL|4iVA~@&!Oz7|XVRnWpD62izA(k6S%k@wCEf(t;aZ$4Q0qz&8>ZELy2p z=&;W(V+?h8Av%PtzvmIZH$?Q<714G>I*;N`KLAVY;IG&`uoMqN5@+(dN_O{3sWach z$zP;hsStZCNjf({NsKhx!wQZHuepGYewz+8w~qx};0J@_Sf;SDQfjv%mgbf9@Bm7) zg+8g0ec4Yh%PDDP5D>IJ2~jQ)D*CX*y-pThpJV9GHQc9#u24Uc?HN+3m?DY7{WKa; zD!?P+q+xti_$n*@p|Q^ipJGfDL0s@-J*K0g{h~zFkq=!zZ<2S~=%JUEdqfYfahSH$ zOHW1-lbE%8rxn*4I_w_J?Qxy986a9tK+Km-5g+Z7$B*kBW?w71-_rNDUXYe;P^kfW z)eocuL@~_FBxbnlRvDV%@^%a1J16@eDMK~gotLaWHDS&}I~|g4Ial0k4!Pg<4wV zZVCWU7{1D**KKCE0FO*C9X~cK@RH4y5^mopa=w$sXO2z8HEzgaGt`H0iAac6&Te9a99)^ad6U{Ex}HhIEzI#qP+$L^e6?&z&$@e))P0o z3Lc@2FJ>vGtg&Z=9yxL#?V;_a)fAHAsD<A&?{@8F~13l04;*6JVeQ?F5~J)1r|oLg(OvweC+y7A116b*(k6Bp}9E zLg2Z*>OuFo_1$)ERp*$%r$g#s6f~U*DjvDoCi9=Sz?56sX*uJU9}FqMiwPD_9O6`q`-9 z;Bja27-}#QaA*i#%|0j`sxudemLRZaWMwrDxVBV_@hg0jHv(!lo2Nr7yCj%%$QBx% zs84ui!S~s5n1mn7lP#y8Spp%wvnLLRh&77wFLy4pzEq%cIxU`N;b9vt&H@x|=j$#L zVhYE|GQKZ3(q4XpIBvVAi|tlwtYrw~xiKvin5OZEK z_wS(>cZL3?7+-v4Wib@env`GGyz;J z;_uxtp!5a1mqG;*nUYM;!b{buUDDsLe2X=`0-Bp3Xd$dkyofJTLP*D7MCPUc((bb@ zyFUx0P;GVhsvL6Nfuy;7|MV;bANCauixPeI#JvY>!fi@Lg&ZYlfUjaH<%V(h(y+ai zh5$pA)Re*qy}WrU*LL44vFD>q4#y>upqnrF9kwfHGaCX03{`>#S%4mqIp@8LOMQ^S z_E8@N(;74iN9TyaRZ#uH=*7o5Corh!>bT*ZF`VmJZJPVLY7vurstF3@kwEwu{-S+o ze+JQm4@X)X{ClmV_6|k*o2yA+T^^Jk4?9 zn;=iw*HRj)Sgt^qMAZ!L1mNhZBq8o0+vSlFWV zBFwMF9F{Bcw^%6&f|!0$?r8#ADHM%)dC5HW$?fox+t4&HEB)!~FC9^)&tTssQg$;MI5;-+vD3t3lpr>;htG22FOgRY^$wotsF%F8_E z4||l+8RVhJTQc|i9%$VOW~>aQ5iq=ot3<1O@$3ci1jX%}a$_LyU!%z0pLSY;U?-kF zFA8byA0$zr_9ijcZ@euUh*d|K+Y1&LEq)yFWN?ZpA=Fz?UN;aOQpr8i_>Z`?HS%`b zs=0AqNlM3H7!)LvK;Q(z{inA$?K6<@w>%6{D21<<2#H+0r`kXrEn`FPSPq+%+6{Wr z+z2am&bHoKfq_I_Vux4JlzD2Y_~~Z8ZhKC6mm1;o6aMS&C-1+PYaP{mqqcm$DFUyl zFmAhs_gg;b5?`2S7bpjJWc1*F6*kxRo_JiTq9>UoL~rl(dkgi=WMZyu=9#tC=4nVp zp!ai#lJ~J8+UW)sK7}=)kjqIc)x{P*u=+zj96K zY8WY^n_XYldU{1UKLK|c1!-L%8}2{5*+bcsb5EKO^v{^Ffp2uFkS9en*coGU35Yn| zaNds+7&gB@ys_iKW#!t=kOZH=?dP-!dlZZW&v@bzT;6=)RCc7L7w1at9rn|hiqICy z*js0cEe~bs5TU~{U4eC~6m>QWO|lI!I);p<#n$CY5qldzNG9CA0Tx0ldt7ijeJ3uT z$Ez;Kr1>7nrnGp+8^wE9HVkY%VeT)$7PKO?4tUe+taEgeT2EQHY#y-SL!Ed;uCy2m z!2>3A(|fbiNe_4jwa&;iHu2Yb3W{&z$GE!B!UREc8K%bup`OkxJMNa8;ayGLE?>-Z zVx$24$c5KVQYVYnR~*EwVfD-y6#0$u70>1iY)zpJTGE4DJjdiG-(bvg%1GY)8}(ku zv5>Ii?M}mqa;2l80QG2h4-1xyb9P*b{TMG~2*5!iuWi2e3? z^|IX>drjZW4wSvYYB?V-pwj{zqZYk`_LbxE9Ce&X7^QC;JD7TCCrz7I_p8#~z za53T_MERQ?gyHmV&T+NQ0Ect9n8^F8CVEGz9*Ff*f<=rmh&O}eLdEPmI$#v0L(O-}StU+&#G_02-9)phGn`Twi~IEyHV%YZnN1Ig1gDaF#;zsA#gaWM?A=34(h0M$S$ze%2c zW|F%HDyxkynr>j~I&yV~cB{*_G|45G#Ee?NG~!hHKH?rXZjVA(`EY>INo7V^l0G;4 zoucp*5Z!))lb4t}*X5bZb=I5`+b{os>%DRO?Ia`lSXioY&_3=P$H@rSZ1F_y+fncLG%uBu<4j0YNv|X+~&A$QtbOVqZr6$ zoo2C5Yrnwpfrgb@B0o3DV`s9=rcY8#gyS(X6G=80VmHe)TQ(JYh|{lfSTQNu6XciA z5u90KI^iYi5)crOWD!aJg0m?UG5!q*&l|8cvKQ@7n0eb-)?Yp=MZJ+ zN55b(^~JXEHwQt$*86mJZ*jN*x5Tq7w5=~>OG|Kvkm4&$E${J_7uI>bU#7VFW3G=i z5qddtK^3=Sf<8pYVQ5$k+9r6sr~-%#z^+j)bvY;q_%7tg%;pKXm7fV_DS+Zhkjn;W z)WE1$>6BI49ucj5MD4glRm|cZkMP7}3oK8@2}(BA;wHy;-lyaW;@78f1#Ju|fEvmZ zkA?|GL{wLgUh|mk>#L~w35;xvlxTtAMGdEkx?-e!GC}3Ew+H`u1jUaOKg0Oq5idS} zpVymBHtzh0TjO;+#w7WW8xIy3CqiLp*$kRGw&Xz-1SG*mz@}5KQQAI6_g}=9nI#kS z5K|tknoE8pcY<^}NUoz{w(4}tMOq$_cE3ogazuqBQa;a>ON%VbrHHr$jw`zqR^OoH z@}rn8m_W}&$fzb>N5vIN5lP6T{BFX^7h6f? zi@}ZCpJe>}Rn9g9uI}w%R_^iBAH9My9cP&FpHy?QOop0;pm!1VAzd6K-$|)ayWhb$ z*reIDDF>E`CE_e5B>W#bo#YBf2~95$*F=J5nNF)gsUqOBdNhx!6b}XD%slhcOPrfY zGT}akjl1l1I{efQ<6rXN3xKWo@kb;wQkY1@gM@?CI;6FKm)lkVA!d*|<3+Yz$i6Vq zWSERUh}ZMcmG@J?8DeR5I>)OVtlwj`(4*jUP+bnT?V#xbqAcSM#z?HJkh%9RnciJW zoA-F*`U;VyBz~_4e`t(wOk*KjMA6%nigl8*4ucZiLV=p@LtTEJ`Q=3}&IL(v$g$C- zF_6(L37>0-(bp&+wdhoL*g5Xg2`-VGU1sI+NoK<`ZnH?W(V|_eG91=vwj=E9D_Ein zIjrJX;B<8)qlaY5R+G#yrlAP?GE32`5u4uz5jzB z?O)>d>LM#?m5`{@tX45S8Hg=%@!Sl{b2(x@1;5}RPCm}~M31kfuG6XS^WM!Jy#6M^ zZj;S*4LLo}SO5J5W)gnJWDU7=K)+gJ-^h~;WVtw@ko1dKeS_LzfoipdnV!U#%#jlW z{AdjA4&9DMyV_wW@6n%{pdMKy(z%IrutmAk=WrNcvQwsY+-9$T$o)Nw!`2iFPp>dD zmuD>O!LNvj$mHf`&|CA|xK*KX?*{ub(+uT0#^F7VnWnY;6i+Ybn9fBAtK#WySvUr0 z{~vqr5hdAm-g*9>HxVzqkIYD)Sr+AhDijD11ezo$i5#(8YEdnJ{pw@j_!9dE^wcGdU)r8Ubks-&vzr9>4CkqxK8wlA}{woSQeBR}*s$Hpc&QHT;V1q2*=)!USop68We2CdX1I@P089&mGb zhForv=@S;oaD~ps8he+Yr%~ExDOcdMxJ;_GO)W5kmYL??c>I66t7UU#qXr7ZN6Ab-zo?TgCgCX0VOl!ADsaV6GyU4=$49CYKq$~kN zbm-TXsjfWB>u!?4K7=Q2>{5rDgA>Hk(>(N0m~5=ZU~QAqwU;O=Syr-RoG{jy?ypkS z#*wDSIW->QXp{$*OZC@*9E=k^{asE8QT}7=FZtQ^OZ?*hxk2Of8LrP47>~3O`d!M! z9(rnl*z{S>98EDl5y4bs+>JJkwN>iF6z$2=#A6Ajb>79{8XTWO`TDn+DXvqx^(GtZ zadfdvrnkmcMdEh*Yn*swj`<^Fq=PEzZkuj>jrCgy*|NV#jk_I59&e9=NMv{leh28MZdrdih1Jz3>{B zDjoK{7J7A!S|h`D8}dO2TVeG1v!uq?Iimc8jmxj|Gd0JU`Z)2EQ6|UEQ<&)TOy&}e z+9q$jwSpO}Q|Z)Mx@9A!=lItDdYb7}kb(ws-v!?XO~dv2*wqd81~qQ5$I?oZju2($ znWveV%yKj-VMyRLeeAYPyR=Pr^Hpxk9)CNQ=E*ZM3-dDDia@j6rM0w5J2QqInWA6o zz}`)6EXx#)8WZXmR=GxLUBsHs@YwM=@+k{>u*aaZL%Xs~bs(`(>$6;xnTUwQ%rDNB z<%{*i6GQ|hKp;88#OY^vTw6fR?BE2Wh=CCCfQGKfgcIW^fdqOa&vY(EJfvfaAYp@FQ}f&OGAS~qL{MfFfjp?vtT(y6 zGC;K~CKs|8io^qz0nhhfP^Hyqu(&!vGJ;GT$r4cI2dkd=AhvP*2KzOzhe7g%6!C=h zFT?FBh#+Vf@kvxwTs0CLPd z_ASI@9p&5uN~xImAeuh&4o6FnCm%Abom{Y)mDrMv>FAoPOq;*ugkzRwL~e2nHiu_(ljR zm}E9)5|JKkjVBz8Lv$Q<;c?D?GlH2uPDza7>LF4Q9aEN&lX=og7&#DUtdJlRF$k)l zh(4eYnZL;VZ|KOWbI4;+QbFaN!;7FJ>nX<0J`Jf7`e2e^ID%;zsA7O%w1=3TBA7^! z3W^6~=o6Vd$)If+1Q zoNy{dE@5Hm2U^5X9%KGDxe$urJT%S2a|wdNyEk$vm`5LfjPp-UlAN=NOs0rMbfUsE zQdGwr3o|pBARjf*056b2nS6pHrpmvY%y7J55e+Ch(jYr=0&UzP6;QtTIV5OEdXnhOW1ReEhCiID(lra{`D2`&ixJU4 z6#>aY2+TA4@H70*e+{u0T40cw;Y2n_#4wPQadIav@QpvvsibD;#RH@R@5mQ=B1NJQ zC#s5Q;t=Ev@xnPC|J@*UC4x5=AZIGXq!6>l5MK!qpU#nr1+e7NA=lANV#l81gi_!K zr6@{bl9~AlrqT*YE6(X}exJx(lT)b^WM^`WM^vobJig!KVj@FBPLPjUXxbe5Ss06wwp3K{i(Bgi-}gV0CiN#j`3ZmaXMe`= zGUq|_|!v;5-gIFS(6 z#Yaf0GBLN0*SNvz;s$^Fk8MI@1-|ur7f1$Ff-oZW@JGI?0s?{v_&&bp9|$=Ilfoa- ze~dO<0pTv$?Lm3pKe+xuS)E4r@Gf(~!My_Dd-&cEuYR4)m34mjj~(<>o^Sp8^Q6ND z7WbMOgpqs4JumTnZ{)u&j9eiE;RD`xr(Orwd<4Wh?|)Z456a$iZS+nN0T2Wa{6%(d zzs6tuAODRdXB_Q6|L;6?GRODEKg+=W`taNVcB@3A-k?;o5sWxSEJwv+A4P(UeBrTQLXCRip7oI!BwOjOD85@g$E)e*Th0X1| zxP1rN7~(P-2SHXbEFDD<4#vU7^?htdM)LaTwCMI!x&s5tlF{`q?>KxWxLy2VlUk`p zz22jVg%%Ez%EbsM5}z`c%6s78I73=38($DHEDJ@F-luKy90$j?@f8tt8K-6A^j$D4 z6jepPXCHNJ8`pI}m+@SWVcW)+G%zd-`BQLke+kiRx9QtGFf3&4N9Jtj1Enb_&HcyL_|@+ zG)D(mN&H~9EkP8Kbsb+kD5D!FiVu?XzI{|w6a-lUQN$N~q<}QqBP5W%;%5d$(-0*Y zC)2StFT*kD$xCBMS&VrYyc&F>$Z#aZeFRQ4pjNvy2bOri3qy%&PAmhfzsc z23-f~0XY~vV1nmpgHVhRiV@<|x(Z5)f~4Fh1F3kRdBVpVwi)(|6pLk6cX~*3Cec(3 zMGqiL@+UMA2_VXdqeJF{3sHhl7BiJ&I_hl;w?5ds@$lhh~*OKp)Spps}!%@qB0I@ zGS3-(m%;unH*W1AOg>ET=r}AhEz>SuV`=;;GotwQnk4OaJU z%884N7Yy<-4OQyl4IAt(-e7yDfqg7SDyNfFh74MJw6IAAkR+dbTJbh|(v@=pstu!KyAH@nMUrIP#@GaCZS2 zMfwm8#Fq?$AR?;)WHo^KIg>pLf{37Kh$sone_j5vO^kI~u)qKI%07qIjZ10u@rd6x9AXXHMud?Cm(k}Q7$SEzer zfe-rmuIzD72}wGbYX|sHA!1Zg6hVD}(y=|rBF_Vek*TO(-=MYe95+_CczsvIYj;RH zO*)QF&yz78ct5dE0YQP_F_M!uzwxar@WLvqZ~T9}7;Iw;L4Nn~Jm=>kXpBteAfbgq zSlKkD7A0zG1kXOik+Vmb%0-BTz0v8oahulKZJysgN1{|`j4N#2y2|w%>y+ww5;G>p z=69*>+~(Q8x=!pnDP~Vz#1_G~8@L-UvGUpmfAwRBvqqBG6A%}`AGC1GZ?kk`ho8UF zBzfd0GiMe^4=zz(UF4_#a2X2cx?z)woJGp!aOEJH*urpcaQ&*o)op{XhkG=Q z7WJ#IbE7)J;y__$GR(|`gjRf$wZ(n@`Ax{q7dZCFDXjK&>dQ;~?XNCT4b3t!`80WP zWb){hO0;(OXa$a=C&tLB3em5)mf;YGIK&|i@zuhi;Q1NgBjXDJ;?pOYvkMsWIi?d5 zfdf;~S4QXr5F}9I1XBwfyZ994j>yu|%WSQ`!SnxQk+mMjFpqE~p%T(0KtPd4+74HU zAQ~~I=L^iurb)+i0=f%B1;0_kF0Qaw%;ShMRa-~2vZN*z)J%d{NFXRw*=m)!z0qNz zJH!ZpEV~R_b!xXRvwmxzYt>1n`XZ+2ArD)0D(kGg_7+#yJ8TYfg!M3ySe%%fKx|cr zh6Z@Lgd~V4GUysOLIiY^sksT}=h9?i2A1L@*j;MH3VUl6`WFCg)zRHs`kar=$ytZqA0{b_Pm zoOm)xm?4f|rdaG!9XQNsps61CL+ZN~%3D>uXbLlzC#K4nU#XAn5QjL#ArA3X$D!c) zsW8&$so`lXEy*|2-{fn357D&HOp~xFeQ!n(5gz)0TwHB+_h@Z~uM|QMVWmX#_eQT)Rxe4bjqqjE%*a9y3Pu^!xO? zZ92Um{h>%cX^~H<$dW+6xJBcwSJ`d_*bC%{#dWgbk^h_DAK&f`BN#H(L3t13^-e z%>;?rGo1X+zh~S11>MeNHkRJv?UxeVn!QLck|ZgETd&aFUZXCB=_MvfTP7J@L=i#2 z#&c`zmRf9|P?M8}5)+%)| zgq@foWd+D;;wWukvxZk*qvEL4%?PJr23f0*>^IonT<7iAmf0IVhM1lqWtt=;9|@0o ztwwQmlTLRAC72;ImL^|F5lforvWw@m*}ig}<<~B=DIF!0Kf+utN;ae+`vbb=61BZO zI>9N7#29H+CM2}*hC3AZ`)pTq!WY71@@LZZZyXnjaNzseF5~SlQLIVS#c>vnPq46%L|5)mefWS)zt>=EtxB`) zlbRkQlF$&nD#g8h);8MM?Fb<=$^4W_GVbAdP0GbO`+HqfD?~UJ#{5u95*O%EEmv7x zDk4}GiMawnQ@9HWHB7#%F?cU`2^-W&C(n1Qw zNQ|XW6&bNz!QZ(>si#oV3uIyjli>pbYLBj)z%=8`q#zs^Vmm$RjSh`Q4<&A32251p9X?gh8ICl! zl8hh>k!Vt_G+16~Bc3x!PR3Cb8Q*b1@@baqY%lK6Q@(+g9U~|>h>Z$+yZe;ueVVRL zI2|TG8AeuQoPHC<-=th^b8|O{_Gp2zlhdR^+B>vh{VHB#k?r!3%~pg{af^IPN0vMs zd!KIQ7RxL9EUvkj=Plyt5O@N^YL&s}B9);|EjUIhAdyiAxSoQ8fc%Ah!X9`7`aPR= zqr<=jN0g960a5Vr9T(5{agmTD5k&&maq&DKUoug3gK$Vgkv_k)u)gQu4Q;yZ7VSop zt{B2otWm(2@h`mG`5yQV!@f<=wz2yT{h>eN*;F(P(?Hi0bVWRLnfS88cfofU^zMwY z%fR(P99h8>Sv*LL;en)$I64gtRhE#1uSOuM@AUA7UD^Ygfu|4*s%VOIsMR{ehvG{p zcy{S`_E~=EZ7N*_PCQON9bq>5@srr?cc|Qcj;(5sMeRI=T#AcR=I4{$=)S-M26QX8 zC@;OlFaLR!H*VOZe)~Uha2(y#8(T)$048Kajy&O{=>co3w! zW8i{U#Zph!C*k&N}#A6EH)|E`Ydin(Z41WOATq;eX5NiiHJ+1-ehyLgEAK)nTa6P z*RlKi)D)kl6J}7)kvB!gVr`mTnf-p1w0oUuwa;3AhSS+NC+9S*z&*>a-GN$(P9NG* z5XF=+3nH!HYmodI_ZaV*J;My>#P+Gak>dhP6+-R{?bHI!e z3R$G1CjOw$px1|J0V`AB$dLm1ghfQWcisE)!f9^N-@C%A+X=P@DK4JPF_E!QIW(yr z;)C%e6g>Ml{VJuUTWmEo{PbD$P?*`s#~mMi+@Vc-XPKStHrIl4h(;KH()b)x9Nj0l z9mL)yTep{Z<0YFDk2e_;Mh4^ig8|T>)7)csbB)H88-&CVfn12J?xFb|N{id9F7**p z^GwE$F&PVx6gr5VEo${P>lKA?w~y#{7z#Q=!NRlILJZHhm>P+xO_UdJOv>LeLe1~C=(UH)A^(fjn7Dns+B%KhRc`Kd z=t>5`tcoJF@$7xpE?r@#A=Aj7WqvY2Hm;xw9Xh=h8;vZk*JKG!w~cp&s?mp0gV z^?8<$pTkRtw4E=ar$(82gkck@y~NtpbzZpEMm_p8Q=w^Q{SmSAA-=>A4pQk3YS6gF z;#-@%wc%kLd6ent2I2Y7A=yI!GDGjKWsIzyL_Z;FTS4(QjLw98HQ-43nZ z@L!_BzXl{gBM=CpK4u{Dq4gNkmBWD@e5+n#~5=odNcCooKO+IGIEZi3nn!%3huI+f|hD z7}7}$8d2K_;eVx`aC&wh#V=-0@dI!1o$6(Ykj-mpojli}LFN;+q7 zGGh{b=qQf8N9^xjW%rG3{w7>v$}%XZGHUK9v21|_Q6b>ii1l4s%{trN0o|QCq1_td zL;^J|B1k=)exGimNA>0cTTqW!B}{z-tf>&vW+7dUa;XLdFR+I0ra8r!R7;?qs! zg*3h}q*ZF-Rkv}BEmms<3O$pP8Ij~~YN`YwI=+^iUf`0C+HD0l|Y%sf*U9w*dQa3?JCmUy7;>@n=N*t@w!*AWrt z<_KFR33-%NN7gLj3l~^O_L0mSh1{3H#3ka{U7XT3U0K7 z;oNs)%%~CkV3<@w$5bR3j*&l_z!(?FpE}O?OpLfGBYRP>PIB~XLB6k^A@tBGa??r1 zEDckV5Ri$?USQ@Kk?*9>A?D+xE#*Ba#UxP2h~^*R$?t2-*$TtyS!U-76f#j_+67cS z#_t6b2G%%pvl+&t0SrY&9XrFM8{~Twsdy&l#02S}PR3ACVsU~I9YYb3{0Ib(Gk?+K zxBZiZ9y&#KCdGJ2Cm@LkgvcDbz$3pmAY{zaPRudCkfV?W%Y=uXi1K^VNrER&k(y3Z z2x)|kJk0+l7py3pp5g3l97}x91XB{o;p3cn`nOP1$JtTGkRw_0=?oJS3i-6lW8c;( zOgB+ZoFG3LBW216lPA!=@ke~KJHc^fic|A>GSR@OEnlF<%o9{>F@q70aV|Z_B2qIq3W>_uJ?26b@ zjPzKX!bFT%RKijmd@q1BcZ_zUM&Z(HtgeXs)6b70K0eRnV_7u#MCoB4L`b1Aw5caK zt0zbeQ%uB73=Ylghxl+jP=crL`y)r}doK#YC>5hH%9HNdmSxC4h!!6O%=ia_>F7Q0+%x(! z`gdfi7Z8L|*--@T$OI#Ns0YA1@A3VEHVAOXczW;qhRp`8kb6mBpk~^6UC8cEb}z!BTq5?q=X=dNYXo=5_0M+ zh2$yz;B=2(-=SrLVQ2({@6CXl0xiYEzj2Y1Pdn(kjQXx9Ese=Xo?+slC%7O7abyGQ zZaWJ+@>>@<^Ne$sMr(vwNX+sK@XUv`O^zX1Q6Bm3G-tj&pk;$ED;SoJB8td9jQ{RA zJl8|kWyCvCLkkzk&YWf(9am7%Wxl|GqIcklI<~n$I3VzECJF>J4cYe@R*KkLYizeA z8d{vAg%rueK@xE@h9N1OIGzG5bWM6^-rt#P{++oXzJLDsKK|XgbAW?2LJ$DQhV~Y< zMxUFtF)kWm7V;uN9em##m3!Bka{tk9wWzJHQ?)I+!D+_w5$5yKCkluJ-n}OJ{@oA$ z&VNUT=X=(q(Q0zHPki4y_-)kS57n^x{@v(*9}fEIo_`O1xw}5z$sql%e!ORF-c{~h zFB;u*clo^Y&V#(t?<;iY{yY6}=bazNn|$vYd(clGTBlzRy+(uT#wJzUWDppmFczSY zkr<^mml4DOk-76s3lbNfe~#wcm-*3;Um_OPc=E9-RkS;9lW?e=rm6rzxTI$>V5Z{9qo}E?2)539poW@ zkk4Ot+ri!bL>R48ch{o#jm`TW@~&_3gV&%t<8~)H;Q$}-><^Na?tI_y;0LWeAcCg@ z^l0pt+21VELt@~I;7TaLFoArIkTN9d)~VE6)LK27g8+Ik#Q1m|-K#-slk0E3!sR!v z^730l29YUFukDgBe0o`hcC|sP)}qq{LAS7`@5Rqh;fIg(=MX{wA^J$Z0NC7-r^%m>5c5uaWq~<2ELc#m&xILmwQWObM z6h^|85WtLOkwXylRCHZKmp<4zUY17!gY@fo&Px(fAb=nU$oE92-)Z%oXTN`DHTDH zP111#JO{;X&?q+8FV*QuCXOJZdldx5U}((~3mN1s01tOy(=BdLZ!{?lC0yS}!bP;k zF+yo_5eci`#@)U})9$gII)Q2h$@n$w?Fy^yF3kZ5W&|aYA{|nQoA+}G9MN_)s}+{6 zZqsnippVazKR7skyz$=yJi7HVom!Q8$EEN2C;^ET0jO1(zQ zGw3KO#u6GC%crrqO1aXZ=*0;q;>=9O&?WJm#ou?p>(H#!+25|v^&AEWpvEz*2-%E9 zK$lSP7&L3x^(ysNhpwkVWRh?wKqdr|VBo=#v8UNZC9z+TJ&&nr2r@~5~&dBWDo^--z;}J zc>NaDdXrkirr88F93eiLC#1;)Kexqm2=u9y$}C?i(h(oV9Ghc26d%KLLH+aN1o-MQKA!>~!C-l1CU(i`*`2qIW{!r?Iah=Stx@dq8Ml?v5{ zO>1ajgu>*}$r@GE&JwVF~QJ6^*3(IJ3mqxM3{#KE0Xc8fuWiqA^Hr~@2!zP1n zll}cVP20m311NqQ-K#RN=8;1=a@G*tY0;@xXf)c?TV161G}HoaDjW}{BM zUWdpGYB)_M(8Oz%*;(GECCBJRCYed<+v z0v?hmA%zN9p%}S{h@p&RrOM78m13D55{@Thu!Zm za9UJa6>Q(a@#n~kagw@^W7p{HFR^lKi_Maac{D&W4@z&J`ua_l-*}Uq@Hqxqjo|4d zk>JRjDELEg2ej)|nzb6$YKvyOM|^OCuQ)!MADFo2)F>@n#J& zGvip6d7mA(N0dpD1S5r>2%?5=Y3M-cV@gYsaE~VNvq2C=1Wo$@;acRLgWU%d6hKgr zL@++4lRmPZk^++Qfm&rj_%+)n%8249_51sO`hXo#7!AY&F-X0eMe4e&*WPP>S#$7tG1v5mOJyPwB_A*e0(b*Fr$?(+rBSU@t2JqMhjfNMLcfI9 zud|__MonnsqBh(I!cAr*fNWWLb9y&CAn@B2$vXCPp z_tE<`%InuyS>I>VnI)Rd6A1dqopm}}m$|xVQtBsggCeeMV%b$nYd2WGe3{MI5I<`Y zIvyruXb6Ds*m%PZ&1#KCtxmPxq}6xn2@3t%HvRoom=9u2XNedtO23J{zs%2*WDf>JCfS7pe4ITBe2At0Fo>dZ{4sT$ET4 zG_Qfv*_73XiukMi#x4Rb2% zu(bBdU&C2?mgg?#8Du2VUmM<48G5aC8auc7)h}1swGEQzA7>&r&U~oP;!l3W z?MqkqY2!bTJn=B!UoZ&vOE}fnxcuTKFJ9?$%E&M<2eg(dI2*U=?!C&TMS(TlU?!t7 zo)2IG(x49ZCRblvUme_jbGOzsbM{FO_anGFP;!py=2ks8FMw6@S zLA;|8rl!IKgNN~ghjg?y<9c=R5Pnl{S*F!Ht)%bGsa3YU~`5 z$uvITl^5wQKFf2zbXe}?_^op%h>abX4F_cgwRK+q=S9}{JtF5WkeyOFX_mP954U*p zpI&13Op5rKvUIt#kE^|{Nr_v?6G-HJ$8h= zyGnn1kzf4Pk61Jc6!qWdc-rHb)S-OiMQ*)*lh>lZL$)I_EwAAF+w3$qdHba*J9`@B zpll}y75C{bzD4EMb3BhuGZW-!W`Xg9h6Fgx4SL15c;S-3O2y>F#aZ(CAy)f3FT8!7 zzx&}8CjRIQN9G0kf{N(((Vb;BZ!Yq8KiuY|b&}I3U?76uujAC;=JuP5{LPQHIrRrg z=92;JjWTR6)82lAo0WZj-ZsgHPjPHEjKN18#=bZ>)eY<`KjqcyNnUT}`9aRZAjEpH zNAczgmCJw6RyTrc&oE}hDTGJN&7NJstu6A_>)Tvg>5zN)VUp1(`Msa>#&(MzE@d&F z9Op#bfc;n4y0OZiKc|wInB+IVcLHho7j)Kda;g0g?T*9=SHRxALH*L7^J?iNulLXJ zJ98l>#X3vZUci=Qs@^#=UvPhY5phBn z1r-6qx53$G=k^uee*P7%Y&6&&1dy#Lkwl7kGDpa(Arvoh?Zz5!F4k%FJcMBfrMtmi zX`kyGZA$w!>`sTFs3H0uX0J%AR%4@N)9$(W{t&!6wVh?IKl=hVSNB+VO#DEUXfi@7 zEh9M{cCKyHZrF$c9YK~3{M9ySG@C3nl88o_nK2#9yw4lvArA3b;IzQrpjfK0wl5$j z#z+*#iE1i$6ij|^pYHx5*DhV>?Q7fY4OP5gjCd+T(((vVX0O;`dvA!O_^1c=Xuq?E zUA)DOTkG7|Y|+&t81WRDRFXu*BxHah*>t;2*4J8e2O`<|6sqLVt5oTUCSo*ADw8IY z2$KjX7|Q)!{`_GVueL_DT;*0ZfDlSBHJc%98AyDX;OR1GRVd$hjm7IXczI($d1w%d zCWxgHgiRT5cZF7^L`ezKmozjyB+;hdtFyUWVrRWXt6iZv7}E9?WNCopHYpV=>{r^@ zo{t0g?ttFjO?DSA@#57Ii)DdeGDRX2B@t8bDyx(huX3x{WxFZh`y#T}MjRB`*{ic& zwDBYlRr5e_Xz#4C^VX{@ZdO=riTEC<^bx!&rInjpfBqG2Z8zBQ1IVEmv7|*hE#P*V z>|Ec$wjD%M14+bb?bBI%g_}1P`PGd!P2VJzN%8-)cV|tKocF!ozqwUr*1lI)Rafub z)7`UUu#tpFiXyd9adeKuAzhpsNBF`KzVsWAUV?6X!j7X0g{6q&6Jg61MIDmnB2pl@ zfY@dMre~(Rr}wq?Eo;fe7d-?75QGSD&=`>aPsGekb!KJNQ~T0Y z@xn<`nGD%{ieyB^-+PnewOhQg)nT_i25n6H;%Z=F5H zad%@#&+)LsXLo#5Z;U=TqEV~z=Jts0(8CU&-APj1A-Z!wqjtir?H&gwP5PY{qd-4< zglSmCY1P;~bZGWn{KwcPkBy>&8Y1X-aS!ftQa$8hv(IVG!RdOaAsf{n5mX=2Y&1CT zc{t;u;0NH13EF!!5AJj8tz9-w zMvUWmqS*q4T!u{CB5EmUDvVmEwC=yc=K3yo_FWt!PNI+_pHGrW*hGwfJ?7%c^+ar< z@A?e81KLNY96wm+q}ihrNl|+4X)b>5I!l=_xzGqjZ_{x+T8>S!TxMdngk^@%wGq12 zq3Hx1x1%JBMWz-eNX0Eo<1C~NJAL}i9%{NkygbE(5hCF=QJgj2_?I8?_5b^493+;A zKmSFpOr@ELg-{g>E39KhJn9EE4mWEI1_4?S5E~EhBN4i(0>hq)ISw&4_(!8yd1&MAm!{5%_p$0KFds{z+BqKNM|r& zaq@;jL^XAZ0)iQoBMmMdvy zA|9GLU_1^n9_5+H#h9stuq~BA?SRu)zsq`U%v+h?VX{==vqha`Xw29j(HRG*g(8`& zS1DxDWSly|_P2QRXD{-f|67+M^8C)9Tw*3&!>`@v)=8dYZ-z@V7L%FBci|rN+qkk(2$7tN`%xz(oH9HLn;azl)II?13*B%;&ET_IYS;+X=Z(iVL) zOt;}e5T*DG*gV+MuWRkfAL=C_lLAS|>5S`2d>2!kmghJHN&;y9N15`6aCtt=Hh0t0sIXBDV z#d)TpCfR5hyHFr(hp~7B$N=Dc$Dl! z63Y(3xC8CG+<*DU{O$kx9X5M0a$ouia}}M~;DFa}b~tL8tlpR+UrG=JXY)*t+bsr1 z_c=M)W3A)U&t0N$?T>kSsm#+85DkGlc5ylb8V5%lZ*6hX9WlxjnEAqISiP~zVpJpb zMBK;zU{6F_hhj-G%U8)KH42B{X8rD6zI(Sz(@YU96i9^(B5nsd+tiu?$Gsf6Y@F$m zg%wgzMje#a7WJ;iiB~3*jWbm+uuTlih>LDI9_{` z`#*k_?I6vtw8C5|Lpfn#C@`p3X&xTY4QEht74l|?#G~K3lA0o1UgkooOQv&|pML)i zHy?E9r)G#I;}pYV?C}x3<`G*}pM!^6^l$z>YgX4z0)x@obv8f==|8&4cC*@f&Y(y8Q+(-tMsLL|M8zN2QV`Wd($kD$bzFcGbcO zlFZM>$)rpa&xPg@!}=jrUtt(Ylg&+2$mYq%!)VEVus zjO`SmXp+38VRnw_@7`sv7ou)YGdq`It{leJ6h_S!&4WXFRssrBWNnM2*Fwy!)J178_6Smmj|$x{l$ zLwOpeKN`y~YbYZk{yHmgeJbaGCU_i7ph?IE8aF18Rm9!IKb^+%XFEnMibtxl$O!U~ zIF6nZ;)WE;$f{YNI|@I!#>ZkCnYq*ri=0}cmC&Z$qN1fK?%Z30ii;4va3@cS@6*EJ z0i$N}xA)DC18S=ZW{fCOr}=*)i*L^zGG69U zp0BozFVt7fM?v!iOLk2S3#EMQu+(q|QRo;^DrmbzwP#JsV(v+WxMlRkNhY_bFcrpScLz*D3jLSp>mOjPm$uhMHK;}6fu5XFx@rx1<@~lK7 zLsR9h+FB@GI8~{NpA$7HjN~R&C|b5{=x!0|ToQ{P8lRC@$^DkZP8gO$h0jL_BUR3z zgciN-8LRfE&L=+hg4|HSH_g!`>=+(TM=nq#-uN$AjYT-(cPx%HQz|ESH#TzNyI3-Y9IS;bsc0BD0;yG#_ZgDI=QURB6vAT( z{uI>!20}o<^c*VndZ#NHZOQd-JVV!kUSj;3eHNkt5?j>cmt33 za%>lb{KgutSmY1J6^zVT_43QxV~~}svztkcs5EI*)iq4{GvlN+=3F88&_-J1r@jOw z6LuhP8JFRgU>O_?*rp86uVy!F>LxE-h`nl}{p1##XpZ31NJ-XOUD@0(A-vx6l4Ucu zr5`%+3|}DrM1N9(+8FnX^C=j6$d+U%2GRG`-%&l(PP%)>Ig*W-WlJr{)8<=+$R|s) z2i2TeDTdEI+9@JeRXc&vYaO&xhU#}ocJ}lu^>~Eyd3?88Q1i48UeN0KrDPqtF!=*sX? z)eWEF?lM@02mi)v8MP0#Zds|SnVux#ut>b9PeH#$x=%of1YEu4k0|eJwK=+l|9w7g zeO~KlJp23p?UKPa)s|(t@T+dsgyRPmb)ZZQy8rpX=XP1Uj01FHU4TC@Wl7@4i{!2yG>-jPU^$Cr zP0j-$`emyiIACt{mm*vHHEt=K4%`o7K$lq4h2xbc9DTN_1=QXg)I|-1{ia~y znohp+A=!&)91IvJ6)G0s?(ZD_i!k}~K%!baiU5oXknNYA6Ry#r7#cYY7iqf(yD2j4 z(E8=t>UWmy_GT#&Ljl$?Z7G?KuvD(PF;OC46@45%jo)cN%89vQOH|QkuVb7LkVYQJ)ju z;~v$jWpwxbbdC_6>c`J+^j|zGM>>_0XiU~gk=oTm)EUw;G9Hn;6SX708#|`vT8I28 z`0*PJ+s`8`y4@=`XM))}W-bNnVS#EL-!4Wq=v^Lb5^w%OooAK5KzC_2812v}C`0D! zdZkJY(e0nA(R;EP7B}-$TV5F)s}1T(iqte(Ym%{k&@( zM+9&INqbh23BT;0bECq^nokj($lujJ$adSK&GxNtyF3&X)BDbxoZmou(>>hx8XOVx z(7N{aeNw&$oblr*j53##cp}t6WE^7qg{>{qa2d&VcprO4d6}xYRnO#w z0tGI%VR#oFMhyh{BBhJw>5;6m=bIU|u4@Fgc8uZtc$M{{rN}S2V&Ja zLPr z<;>|?Dbwmd@TwV~%Q<@BVq{jST1tQi=!8CZ&#czE^E_nG!g6gKr!M?^h8^n-BFNHk zXi%9%f16^6o?%j+7^%;J2>N(y?m$`}kJeW>(2nRBx)%TT;FT;PK`ei-M)JC4nKAiY#ZwB!czvnwEaj+ zy=T|GwXPL-3lKR2))96Mf5z{1IlWW?VY9y8Iem@@3Fa9`gKkGhC{{_8mF?&*$TJ!p zJpDbOQ~QxTcaiSpseW7lAtq$JVyo<3FR8}jBpXdZhNWW{6BWK-tK9uJptf>ft28o` z%4tRd5&Yc{{qG6b&z_M6K?N#v6`5G&MWSE!;hVuowYcOQ8l;#==#;{PqZiq;+rBx2 zaU+TC6I1#b^mkOKA1RVm<7;tYN#)G`7ifq2Nhy2zZY|a=gqKg2Fc~aZO=7l1B1PU4 zAJ7VTzQF@~3>ETd^w-9-rEbn+-FnEYbv(=*p}4VVL~*lSj30i{7d4CmQs&MqO6>N# zNF&d=e=lJzLZ8LkQSPl5&o+8Jq@!sHZVAxGVbG~g;FxWMzsCmuNvP3GGNV&8A0RV> zY#YQ=1jNSv0$ke1uoS5{qr+mbOA0`jX%0GJ@OuYMsLiOLTC4#2HOYrX8bxX42)@Ol^>?=<%p?6cv;-tech`1nBJfg^X(bzhPqKkBNF znyA80nl0e6c3FNZ`inE$s0wlK?CNFlw>9-oiv$~12IYKHna4E!M5$ZO@9~QdjQFLlyMQ4cfpD4Bsl-g%}rF4o;E%+-2jA7Wa(@xLVr{YBWOi*F2Tw00VcxNV&_}& zy8C6`i-(Tq%aped9`%Fq5k_lTr7q3*#4-84=xE+LIlrQCSSi=It*i-tFA}83uyQ>8{Ve<5x4D9r zVPS-*z1%N=NC7RUM_SNE_0nOX=CZ+Q&D>v|Le(pZ>F?>)&Bl^fDPo@0RfLLN-e8=} zHiNQGHFNKe#22}ln|rop&&M@})Fk!Cp_qJ@;B8pp`#$k%gT9F;pXQY}< zUq#M%>(QeIOU-tE>!gRXMWT%&+@b4eY@KQ|oe5f?P4thN;g2^Y-9q`pTgr)39NJc7 z>hi^fSW z4iLt|N>bnRa&RKKN{Z{~%cDf%#*C1^-@tAvrpA5*h?MsYuqmm~9*$d)$9xKIx(A1E z!s-hQ+n9)NJmLsU7%3$gFOJsP&D*%I5-6ZDDL7roMn)FLUf$ze+Rt7hm1Cwg6vE2h zGxL<3W!L+j%--x_U}=^!=$pEq;w|U;rr~ojHn5|T;)e+bs`SP$SnVMo#w=pM=e_Y) zO?qKvg92UqyMa~*80Xum3a}nE;`v$U^7E?l5B)0tmpg7?#k6WF*zft zicUy#no9BK);14!yoCy_4M*SS*{T^7ooUyoH`Ys63F?360vNgaQzubbWpeVj2iI4N zmz-9n4R&-CN>*0;Vb=&A32g{NO|(a&a8F26jnC&dK|z~Kc=!X@M}mSmqms0S6~U4; z#dp^q9Gp*f*`^^SI$whsrMwN2LNOAN32^C$c;E=GA1juXwDml26iUYAm6GA5q_0m% z*iOVgLO%P-Y9!|ddEHkeGzfVgAmXmJ51TL9=5JGy=w7Y^Bo~a;*GJdegFSBG*3Q`v zhv+hHN%K0dw>`h@Idxt#cFio*I4O<*dU{o=E0Fy82&(8&BeqGSA z-f;R&6%xy#9s(+JvVHN{QJqCU!iv7eab#_b3XeUF}kRawf zX?Ff4H!Cd-f3 zuZVLswQ%OW^A#n8`4@AAdW`gnIJI!U2|J!jO^`{*nAt$Dp-FV!UD)huks9Aof8+pj zy6Nixd!*UD(a#$kM=RUzsmt%%NBDKwj!jJi6T}~|%Xz*1mT)Ffq|=utXrdYl1172U zzMT1)`N2ZGM>SkZZ9k9HdTtmNmTOtDwQ#nr>pzZ2{SM^Juxr^eg%+ne!ADa&ax)&m@OE#b(BkbyW&uofCe(;^(SI7NS881~?KU85Ry}f28m2j7 z4|2l4G7h(|nKm?yY6At+O|irn+*A@&kV^9*-~4(gi-qv zjFxc(0C{QF8 zavPD^i z)gj_&#GimjPGe}g=YT5S0d|#Ex)E05VuZh~?YpMM=qK%{#b@oD*6{Zc_>DsjA8|XK z*2r_N%ll`o9T$T%r;1KEalM^7awYCzJ6SP$OTdx+7Xh#19fiSIR2a9j!B{^pplvr=q^U3&%BIKg=R7ba0kM6j=m?z%4L5(@?(P4= zQ+JGO#}*9Qu`dO0bRXR506F4LunR?1)y`cvHd|=gC z@vaWzsHyRw&3VYW3ToB{uh*$|`lp9`EU#2QRfKg=czb7`b7-@-_+K&^{2jOFH&3q7Db1)KaPQK}y%Y%*hkVOJWcyw-D zgHyI~r{f@WM&#n>4X@PV8=x|7+O;=a=nq?qs;8{*PA5VZN5@Yw-6EWvHPW-zEi-17 zp>a%_3_;E3IsWX=FOp@h-o9~Jg0c@lzlVTK;shdk@kK!#JVLP&Aw0G}h&M&NVMJs^ z8rJUdK=dUJnP0%w{RuJg>?P#lsEm6nQBoA$ zebd({bW+*5fW94u0v+5b&p_wffMBQ8;Lh*W$j%gZSQaqs8kX!WUi=Hh&^x>_nWKxB z+ZK19yje9YYP26~;6Rt;YTQp}2wotVHf}juKa#DDo)y5KBaJ9?h7Pg7Clo%&;Ydkx|#vmCrTHYX5wsp`4 zG+{9~su)LUM}dUEZuOJx)hVjbb>+`fDT+6bfz53Tga<3~OaJp5>B1z&_y(^ck{=U) z0P;Q@!X})fLxFJ95vPN(CO0T8#zcoUssx9lck=Xz(0yO$cQbcOK|uvwV3jGpKti z6%u#0FU`W^Q!MO5W(oIWpMxXM$AlSAI-8l2R(XlFqY zQ}@ipD}mw7bqLNapU~h8oy&OejSmh*Xs5@tPiV#(^)}`_QafwNCB~0mZ_Piz9xAGa zn!aMmN6_wcGJ*>5i?mLxIPutmPIhs3E3#!MGeEB5j`2MReV_XqVDte^(mpBQLI@KY#gT-7A6Aajo8 z@_%+S^gG02Y8A2jO}p&A$|KvvhH6rZLW-tXgrzxFVn}_wxm$DPms9ibjgwwYUst8g z(+lmKW8?x0T2CV>TA_Iu1Jb*>)vxat$)JfqkN_Krv+n+uF&KGwUwWP_Tk1MsQPB7b z=T8oIs%liEDC#{peR^4eTlhMYZ+PeT^_aiU@z16dzpm1^-w3tY=_oM%^tsV<1^>Bo@>QFxKKD8Ni z&TP9LW$1LsQ|3`t(O@+5+Po3{RLu3>|1Q4vX2#d4?yZt3nTDUryx{dvUHzwCZdRb* z319t<6Ce2vgUTSO39kjSZ_M&Q2hvbj)4T6OukNOGo7hxIP|WY;YtL(0-hR`Jct%{n zWnBL$GZBcwSLvkZlg^h<{<3{0ss6jX>pE~Bb#pEu95KD!B4)L49K@Mh;+5K6q}BfM zBvze@`>0t4G^jyYkztLR@p}M1217Te+t0=i3B$vHrv~B=T13Zq$EW|@h3>jXwk~X9 za@@bm+pqF}<-u~n+pR}h#oS}F%gyi##U}3RfBK9M_&B6})SNU+ri?;2$4WzAv3}bu zu@Z_R2@pxW>=)R2-jeHjUC6RU{^8gh__XHHwi?@C3TB^fUk2i(wjiL*KkOM;m|3LdhvdBV&aHUgYU9?qcRingPKUHuxkfvC>Y1ip(Z7hw zVu>6shZ=f;1ijrAK?}xHy zr->3zo_?I$p3k{yjTv-`Gv<-^>tt-D@f9we%~!O?e5|X#;XXdV(QcMy+uyuD@GmX5 zJYR1ArS5sU27jQ3m#08aku|5AoJ=7@mFM3W;VnC3x^2vo5^#!~;cixh-XJufM9rIQshL z=(XA>&K%QXz@^Ainl`NTcJHi%0{sC;GxR(fHN$G<7`-7e712l@**h0~-Hd!-GczWP zB)Kb_sO=$}x@KB?hq?tTKB+bBGX&-@X|;||{^+VneVy$5lsFU)40Q)cQhXur$occH zx$XQj?`B~>0|Tt;Q`+DhoC=It025w0IwFEgNy7il1)#E76>r2uJM=n|gPovFPAJWP zzFf`LyYWg|sOoaGOB#-(2?LK3qEg1#SV2gfXPpuFId!X_;mj(^fHBa>Fc9jM)Q+o} zZkb_}ZDG?P&yq@l9h;EXD}FOTSzD8mK}#i#BM67plWYFEM@OH|YwMN3F-N|P5Q zV>;~6wo~0)L`?Ini$l*hIuv@w|B*3sGqnV-uScUiEgreb+fiEhhcg#+VLQ**^pmWK z&4uBY1~7+_nTIFvl#)h%s{Ve8BMp0UN@)o8ja)P;ki$FDr(#Tgp2B9dLCxGk6I69v zyU@y8J1XjIczmk5-thu0`h~_zZ* zX;fZ8rR^F+E6*>itghP8tzl}zEYU|$NidH*Z!(!)UbUK`@pS`UzrA_r5H?ufK1jN8 zYGaRp4I3*XBg4E>J0D{~`iMGG(cox)7`~cOUNwehNFIL~Slrw^w1j^rUpg?PuKbSL zW6Jhh7Q-@KyW2hS@RVOo2mi?`_%#@mSSG)gNPAmiZ9ykd8cmz@ic;b$tR^?7f~KxY zYMK^Qc!B_pYFa_&qGSZHHjhkwp_P?MRv)5>U`oo;1yyEcY~)ufPKrXAV;^YX*1Q2< zjkMO(vdGW_npUb7qQI+3Q;ivTW=_nrE;Am`itBgfWT`7vomZltoYEdylx8p<(ol)5 z462Rfj>e^vTi{L|*HOVV6%aH1Hc)lme-&t$~@&nvYs4 zDk&%(l_=vF5l5C481pGEunhE~6qzJkY)Vdz(meJkh%Yp72vVE{o*Bb4f;01`k*DRN z+PGr*tOy{Kf$3)T7)zUmGLkOe3jh-Gn?4Cw>y(QYwoduM7_}|K<^U2k&D1xw@~I|P zuHkpvjMEDK3hA&m1LMm-m{?4V4ZNv^Kz0_)>qQKk>Vey(Xnp+*OD8fJMzn!z{)iM< zCBuW$$p!sP+3a6g_|F01*o-Dvnr+Tr8PU;d@}=a(10$nNLVgr*C_9}*fu<>0Q(CuXMWh2AVb`G1MjYJU55o;bhX%2Y-|c%En^`y z=xoW_r#u!|mO2JgiIF(O`luJn*B@7)D({Bgu|n^ILI?9Da=nkQDI_q0^Rep=es@{# zo7*3R*i428w}+D(ZBD_a4bP;%clMwKC#XGV7;A^mK&Ww|9jhAL# z;6u;~70Rtt8_Y#C=hUa=b`Fd!s5lAWbG#W9?5OEAXxhT}YKx1u@62y^<=wfvJpB9? zNDR8T4(` znYHruVK;&eZ3G5*{8=JGfPvQN6Nf9|Vk>eU(azj=o_k*Md-WLQgE{BV=kurh3{PDc z!t~EwD!uJbp;fAx-+%I40IdCYUybXP9{OjVSN%U`e7vdqm+yu4{og&bUW`5m!Flrq z=p=cFK+N^N?0hjqv4TG?7D%u4H$=h!Z`%7pIL^m$|P{%)xj! zhGEZ%aN+&=M?nw?2@Lq>9@A$~@8?guW?p|;~Y!&4b(!=LG?p`t7_ zA1MRo%Kl(W)ZyXbmDg3IT-p!fudkp0f4lXL<{QjMBBubRg_fgQwq&_O;__*!n9F6Nq z&DD)i?M2JZEE6X)%pzNWxuWNzH?CA*t=7YCWz{Hz0bc{YAZ$9$myW*@0Z>qhXIZmW zx?R`UBbc9i9a+Y4UjE}zBobYY+cS|drRi)PU499lVCW=Z#zUr{C8Zzit`h*lA> zvtxmtJk2X!hu*(e@=FM=^kJ2FkP~l8F2#6sQ8xKjc)VbVFf^4>pO1o1(;gOo@*$z% zSmgD@;H^(y%kJdf&4Q_gu6!52AR0h?qzibKSmj$^b@oS<7FhS%G6{TdLq6|Q=fKby zeE->l_*Ho}rSiNs(nw*?MW90v z^5Q_Wkou&rQFrk2^j^B|!1{t7bbdq#gnfb0%BG_9Te)~&ys!$BuGR|-%;wpm-s=F; zT(05;#FI0kNT>8U<&q*Rnm};f7&EW1@q&@AbP{xqzf11fga8c%N;9D}74c(56?Ke3 z@k3k2h0!ke>Amx}`!7N^;ZFj8E8#L8n?eQ+#ii2YB7bJ>Xa`IA0IEQ&3y|&*%O0@ptQigg`AOSYkx|} z+J1B_Q!B3dvT(M*M>uOe&D_c>RR}tm_l)77@~6cFl}W&Ezp+hgRV1+Yd$L5B)X8>^ zNmvdyOrCbi_T!!*e*fgJNtO5ULjLFwf{=p)g~ki6qtL^jRtfFlJ5p@j4_ zyRf2?k;Cn*?W*tClR|zB7<~g#N9Fpu{i|orKa8^xlilxBx&@026~(R>#Enx{a*N4zW2uJ+*w$YPcxEVeFtVz z=quw)sbSXRD`j6kSA`V3ugWP&hssj^?Qi$^Srn9uQ281eZbCY=PwCx^A__wq!i|l_ zANo&gs6FAI7YDVbL;raI)N*2^@PzxP72!f!wDxZ=LDsy!C!B}37)ij0;4`s1W)z%F zL;^0EZTM_TQzKk&Ne1AOX%1C z*%J|J8jQ=RXLMI3-jOHVBveq`@gB!DY|(?SIPD1dG2w!tk+U({Hk?NRoBNN*D8xG9 zb0-P0(0u2YNWf^Rh%-9g-ieFbFOH`iHUwP_l2U3gA6slg!a-&KzM>~_6xbt0L(uS_ zVU63+m2ensu-GB2$5$kK&;i=Fg!YI5j2$dQ`Az@NUnPEgoy^od8AX6n+@dU6w zKpiGuM)7~In|`%_q!P`B{~!7G^rf#c{+0At^nYadf2-!7|EbyU8(5p~$Vb4B?l2Ps zkSsQwnd}7V4e*bnEwM^a|Eq=!s9fk8Z-VW*yBdGp!6;o~GUN7p8-ch>E?SFS-Rmr0 zKHdL*en`p!fMY$8tOErRUr@03Ciq#`Z2RTeDow)2Jx;zsT2lqqzYG68GU-1HxJfLj z2cYKuwF&(m%o11|SeXAlF$oPR=vgaDV~fth9ux2MUC8=>>_(6kLV-=%jGRL7RN)2V zS-~$x_+M@Ci~L{4^Z%iO{{KEyf&b!HsUp-I#p4VAP5X4ix+m^dg?|<51OBgPAirQ{ zY6`yILOl6H$oZN8DV!Y-(dH-KPu*C_A`xx^KL&ite=SvzM+9Z(O>oXCrM;a86bb`$ zuB4E1ORbZnDaI_CD3WDB1%17}z2nV`{_Q?QeS9pCyS~0|>)^0(;r`#~2%(}BXt49A z>o%=8Sd zqx5|ui{|@)%`<81IocQxydAr~l*Sy-dFC+$&nk=rEzkF2Bx8O+P#K2c!;WRfnv{+o zt6D{c+*eO4ktIq~_{pL1qel*@MF-&qp6wuX-v5Be+$4=d-|h($n9Z35P$!p|Sn!HfEW zs#T^lQ(~TyEfJigcN82)33aKz#UW>M!?e*zH^!#Y?x;{&mesrAqN3~pPi}f53%>;Q zC6DeRUs*yriHAtXjO{B>qD2l}l~8z6p+%OVkIH8~SrPK@q(4cziY4Dq@Vb;>d@l{T zy2nM1Tk$_rSj%A)+a{;!7Vf&oYpGFDXonoMQzR1Hd#yp|MUSUV5PbS}G~F}BwX86o z`HFlcH4A+Ota;kn>u$Yl6vDi5p~sNoWy;Gs#?Sg~-4_kvsCSRl(snblZoB0B2!(aV zq*?Ke=H5Xc;7@UFaY$#$RDHXYek2{qB8$O_D<5g9%tjT(;M)PtmL15~*KbNy+@L<2 zl*E-N{hliu)hkYRb5KazAXNI(Kktpp!4bfukYKPoG0d>4nr5RpLKuLDXgx}v{fNpZ zMc7pW?mS|7P(cDCeTy+;?fkyO=)DQsjnnwbQ%V`Hg_Y+hu;U43l5YPIkkg60f(&V4 z9%%10CR!tpkbR^`-0LCy{RxX}%PSq4Fc~nd+>T*V3?8Qks<6pNK(HQRkr}O38DW@b zym7qKH;Rs6(-HQt*v}TUV##U6D(ELp{d0Ozw#k;dpA97X=l_V_g%s23>G?$Juz51e zXkhGhh{p6ytK!hrRYzEjL!M8mHl<9pJMz^-XjeLr@e5iK89?ipW-*TTwSCdAo4R5Q@^tKqQvJpZPhLp9)qn$YLuD`` z)wD9v;h4FiWvxYfB&Csxr1OQb({3GKJrXU-hX)s#{zG`yl_Vaxg#%sILFT5Ba5y=7F?0Q}FM+qs9tFEx1ArB~85A<8n~*N4a_8$#Xg5;KaHsrkIuZ z0!EuEVdN9|b$w4xEI5kxt_n6qP?P}eD&(9%Ze`HD&mn~~_!mM5$Od$>|Z*{2u zG(O`+%_4Vn3LXUzSO6e}A*)2XES{`)5eG1rUW7oZAnamvf#`>GlS{)2uvygehRfxi z9HWT^eB$+x$7zffR^4cBu7-C+2$ zl0VPtq&{F|W{5byF<&g;KFj-}jyKAzTeD`cXHw*UYUTyPz+}XR%S%B>0^%!((P^n@ zC&`j0jmOAzS+~{65}yev{+T-bJR&4m5|ARo2&Ex8Z|uMu7n%MWaCL=Zpz8P^)=DyR zYfjZe0vBu8B~}_B>yH0@3whqG3or(%xeOxiQRyT`^aoq$-X7jzW}oRCg(cirVfgVq z4rkmcx8R_TG-N~tffxgsxUE4S{u7SdpXM<1j;06_@IqWqLZ}=lYQ$ty^Uy+ERYxRl z8~%e>qps0in!>B^JFzz--_{-w!YFo+5GYihpLcNILM{U?i?Yr)Jk!tC1v2*&U|=0q z0P5Rifw8f$QHspCEZkPi_o7t&)~*=hUsYHd;LYJ0V=)y~O26^epl&)zljyEUL^qo3 zKI+LUt|4?M=%AoNNVY^6pw3cJ!+eN6#*N&Ul8N@n_vjY1D3>Js9>;|%{6fZpg8DP`ICSEDNs@Mu}y~2DC-_kih0fitj~^)1nvY53+`7 z-vd#0VwFxDK4P9aka;>ZEKJN}hf^_sYtgCSLZz7{vWipah^k2lGI~lD@$Lsdwawu- z;oqNn=r)Hj{!)o>_YQyG;AQG{Pi;EWI6}aTq{Ry-*yP*!9gKx8OjB!YQ15vdGvvHQ z;%KmO6SB%M^gtTNlk=DD9>|Ln+z&a3Bm>Jb!cKueu8v6odu%K%pVVQnRqk z?usEHr4DIoH`44ewHpl8gj3ul$LSXOXa2NVu960w4sOmWhKpeT5Mx%=sxhiEgnKW* z&^JEk&5oe2`X%KMEt)5oXMw_Q(sMdwt_Yo11Y#SHtPF8VHr9m)*C1o>$%DtsHQJ-j zU_ZB2c5vV2`X|eL5C2&IDl3;&-SuqF=rHZ%#t}Rj#rD_uyt98U< zggBcM-u0CHgyYa^{nfX0ngyC#K=v0db|X}2#z^I_#D6QB5rC?kJDyUplzo##Q#-jY(zU7pU2rx66 zg>-EJkPHS(u9&2N43eW6F;BCNoEV)#X2P{@w#_iDnAp${xQyp!I5&=*jrXDIPr31& zusTN#g(_quhvRKzD1agP)C5OMP$%RtH?@N6ct7LF)uoHID`NqRMUgJDhAL&qhXE%E zDkl9-RcdZhf;j&UAtIN6N`bgJ%d865`25M51)gM)U&;pLEdDhb&()Z3t6F8>q%w#5 z(^ql}q0Yc^v1%R(i*^X9QOKC#@LCNHewE!^O$19=83S~Fkw{TI`Ve1eQ)ou79m-#l z1XUo#{6}@NvUyGLAyTP#?!fUl;nGGpk1@Oxnt-t7gcq!_xKIZxii|vx)Cw<&tRRf- zb(6CRbZ;)1AK$2OzjCvqc&cu9yQ+EDBW|L}I5*nP3{RkF$*kqo2q98IpK2 zgFXkol&?$YVn2K^XFFvw+0VCCx?S0L+!G)gKXnk23pB4j9-qL|x!DJ=52-%Ic|nLO z$fL633AQ<)F~@AAf1< zuT$~V6Tg$kbdT%wL3d-75CB7{UKg4`TT~K7lX*LSR{#ed^!&U5mQMypSIIyk=?#Vrognua-&v0&?>} z0f9s^YD+8r$9wsa>&7e55N@cTa>clK-Ys=1lqivFq)~xk#7T2rIq_22k zq}?t4ahO4jb;h0S<#^57nN1Ee3O`GI%Q_LgvrQ%5uXp&PhLf^FOcR%E$#u#GOTs&| z+GA~51H}wOMcA*mlpUroXBP@dkr{9webJm$nL$#+Z8BO(pNp!J?&zsi|Y(dtQJ!|7dvT2 zg4Cvxw38uCkqhLqVq0k!S*S$Sojh?blr{Q9C06`EbEnl(9BS?bB0%Z%rSbG3Qk=|@Q3DXg$iQJmYNWR3ti z)DVw7^1OR$1SugvI7L~;Oi8SUI%@_(93j6Ru&>I-zC6I;5$pb8k0XtXB=QK9cD4k~ zH{J1oA&`U^1bb~VMN0sWZ4=gy^3_E*vu|?dfW((r9=OaPQhFFkJPi#NKO?Pf9s`mS zfYlg?{jy+Uz!_FeE1`=N!5t_Ap8O7;X<>TT{PSzs*$DJp%pAkOjpARrQig+f0_!4i z#b3&e=Ab$(dB0Y0QC#D3e;`5ZEN|^H^Is6-N%XeJgJhBBuP7e*#~ls*;&?)Q<-+RK zXW|!Iok3j$M{MKnd?;V`zqtUIsF1KasEL?~+9)b)xaTF9jV%(A+%-SFg~EMN3=46B zPP8B*1Z&-|x!%{@O-W)%VuW5rh+iqGK9!O9FTT^l44iIcfTKw5D8X8al`aV8q)GXo z05BP*Z1KJd_S99#{}HdL}6$1IuABTX}01(nR*h}Ih$ zmI7XM} z&V`yCWqP2nyPey=7;=2o7ve|E>wU>bokXfIAJ+vdNQDSax2MHJ-w`yAvK( zC)?0NsPc`)@VaM+2{GWc;p_3h=*vyeflHk*G)a(uO&MkodH7Mh<6}r3g zTwu|QkGhFdt1y$J0V7eJO6qWmH`Ii%YRU45>x3w}>*a^Iok?hWsJZ$&)i5|Ot6C(Gk%w`!AhEYzHn9 zX$)X@uzVg-yEwz!mQRW~fBg=2pWLm8h#UBV>YGK~ZV1w+>+Wv5Bv~lQl1aNC;ppQA zz7Iw~LyJ9j0I~xT~DDL}KYyLbZ>QY*8IUO|y8oOWp7B58oLb-M*L`uGcFAQIMz+;&c)4 z$bAH2Gv~&@cKWU(wyXk635+kfaN_mf<%tC15-AYh1^cf1FNi|`uJl}$!Ru7C$1^UaT^wA_u%rv#=7hFMQFB$|{XRRjA_`PNo53pjh z;Yr9n&we3<9b%BIoSG1gE0U&O;*%paGM?TkIu`tZN!lrh1l4jepnA$ec!L;e`Xcqq zv@W;kasInLzusWkkD*gG^G%yxa zLy;yyacuvVRrr?$^|3ZYoau0!XX|ZoddH5iNx{5Z_HuQ8i2Dl!EpRHxao)>g$s z$aO`Gpm?UdS z8TI-rJe!{J5`*g4nG#EAQbhSG%?qZ@ch>apV1QBT2=RM$Jc&f1Jse13NriOf1J>1-U!^AGhH2KX-*iXBidqYw<$Cc40Z`32T~dQj{s2N(_lC zOkoIxob+C}roB(DG+tqOg6bE=^jn|TSvOq>+A3aCNeOI4sb{&uFAeZ2@za+AMDOPY zNX3*Rs~n#@a+q|LN@v<5zsjrgw8qPX4sV5nf=H@>7UD4hB(h5G2thIBKww%N5kyeP zb48rcKLESlfA}6c1+2`gG71_bq>OkGdmgulIEwhecDu|A`?buTL&)fCuuhhU zOJ2CgRRcA5j5A}qe7K2X+1c?}#aGRFw3b`gNT#TWq5x*W94&OR!r9TRUoc{bBFc#e zWeBwcry_DuDGYX*AeX59U*~wp^4oQvHS0O&N^L7oq-m)@xc4hkO{katJ{@)qk4)yN8{?W6h_uk#rUA?QTtDff{ z^TBW499~^9H6y(3u~;;rm6{ia^X#cf=ZLD|B8yBSaP#PDuH; zb}xeY#{uh&pSs37C5>Hv+6+1BP(qnf&MX{u0$icN|gbbAkBL{`ae6(;$5Z_ zAoXPy?i<998Z@2dt}O&L+reckM3&jP?18(QiWG@%SPGi_zrW(mFQS{Z-F@-Hk;0;# zbwm2-kiw%XX{EjbQ;1h4x&L~)uaa?)8i!t&)XnCH_G;K0i2o#~Auzf?Qi5yWlh4+i!G!!Qa{6z}z&dVg0Fkc(XPt8x9J~83@ zo6;52SqIl?SE6wVYHO6kZ;UB$i7wzW~ZdgCqAs5`}Hn;|2nnIWqv9_y{E7XyIFvR1Yz1@_ihx6rC3Ao5!A z*jzb8=moM~4emd*V#~d~j8OJ~<++I+a#b49@%$N!(aMT_?Puicr0nK-V~LJ&UgNq$ z7U1)gd3u}rXnOD6{UHm$qGO=5 z`72#Fq-7Ud7XI-`VA1F9W5DC6Lz6x^jnLqBe}>@GRfgAI55Q`g1g`Vxod7_`xB?u-AR0X z+nfz7t7$Iu-i(0Tp`9s2k;u6yDUT=)zhv`>8e%XW`br3iyy9uK!L}&UB-i>Q_s&dRxa@&B zJgetTvVI{rD$;auan6Eu%j)3Q2LL7qC)>E|;WYjPn?bXvnyEW+qidECQk+>8~mKZxYC5hc@WgGFTqC|U94%27$=51v<*?udYG zqoBYC#j|)c^cW08mf&eZ8rGK`+&9=l#VHlbLj@Q##4D4-dv4|rF|OpmnN4q!R*63e zEA4|Je**e8o`Fz`4|9PsUbidHD}=_|l^J%L7L()|5-*j^Hapl!alU3;KsaC zFeL^}0WpH96c$pSGmOBJ2lXNZc_Btk^P%Af*jrQ;goY@NS3634r3F9Wkn0?z_6qk< zT;JBJ8&FbBPt4?29BZ?<+rT@jh;JJsjkmWXB_W=QHO6YoEUAY#1fCxI#g;Ux5AwfD z-{2^<8>wU>ox`e4OVosZ?|4ED@=lN8=p6Yn@rBXZM-uM|*Ammfj@RGqn^Hvu8{fiQA(d{OFTBVH47af`^50{+UR7k^4SZT`fLLSi@nTF4)- zodMT<57NMFI4%u#bS}!1zWs=u-u7Q{TYRnt!Y@Ng?5{aBQ_JX1W!%fJl+ee2PEp731 z+MDIPUnxvX!fRwj*0zfnTm|)h7%0^XG87DfjtF|+k;IldkduuxLx}kWn9X@Wcco`{ z+PvY`z;C6Xvszdhtg3}~R`gFZNF}L;xaMb9LPmL9qo}$?;v`iqH6GI5uQ{)b+l&U= zb5+B(DCqcK;bv_x2kgm8COP()*!QMxic%l_ys%V-nmxNtjqRsMF`Z2#YzoJ%KO>?7 z?=VD->HV$O3&EcZ^|vIb#$B*GKNZ=%IO7X=4zXK}=h(@AkmNUR9TKQ! z|Jv&qroh)JWyIrU!-yMBAY}OZITJOi@J-9!>!kDhZQh_h)I-K$*=JCBuZz zvyob)8u*-e-@kW3vf@P&@y%JnsF6ESJDd@c62RVLmgH18NtD$6?7@^cO#BthDe9)w zAesvU0}#U}1hzQl{-RK(6ecDAAWc2-bSk%xaBh9mi2LJs(dE#A_Pw2;x4#uPTfD@T zhz29;KH)_pm1eNudq4v4nnOEc zzz<$;?~{Tpt&3(zs44qI@7?Kl_*e3H*VW^BDhYI-vVe_!R^1fAkVqBJE+bW0Hq9)3 zUCPR1qFT(P7}7GYpxAHhCdY5BxUi%!H=MH`u>;;jTNE5ISG+OZl@#pF>YmMz%VMse z2hQgTOZM!@Ugu?&)W+x4>W+M8f4hbJOtu_Cys}1Z!P2Wf-Y0ivBT85+K$}ml?x?)v zU^i6v9s4iuGMP*W7j{^JJzvZVaT1vG(sXsxO;`XSN@teSEv(`4o>Uk$O61)F#JMe! z5eE9Bqls;SKbn}>kL>V@B7FQ~P(F zc`S4^$tyKeOe@}gG(^)inHlrxich=B$Zbr8<)2t9iny6Hbsv9W;mdBl+p7UT-X|&1 zmSx(!KfSzpsdUB0mZf^N_s~lZr*?)AnLx?ezLWy>X)+Y+6JJl2AR@l(Ay%AZQ!Ni> z?nP;v00B7ry+_&u`vF#>kl8p&q8Yx zO{nJt>vOGIs&KE5(e>Ohj+6%;MNAG+b1cHr09~2GKSs}d*+T8fpQe$#rMm(GWvMY& z*?TQ>LPF@xsNxuGGR4W{=d$*y)rVK?cO_R%Qq&-!JK7m0=ATZf_fkO>eq9p*Ljkgu zd#3qHGGAR*%@DLS*vxDZ8RfdG!bKM$mI`(7=^r z)w&3S0)l>E@biVQ*sf3ScKo%))eY4#?BnE0e>k=yNBV>%jIIyk6Ju*#qvDzRq~9V@ zoNCs;KQre<8(ElX(k^ZX4pqsN9U5~kDdEiU?g2-Lu9u0I&g}mX>eF!JWZktS6he5J zmaf~Q=0oW-A`Lty_|^QWviXzIO^uKK$y%VCmE|PCh`oWHiAAx33f#`Z`V7&=A?G2V zbyI7@J2*oD(6dWeJt27V4c)~L7(~Qz9eAFRlx_kfDQaUCC@S<(utT|Y#cBiexl&t4 zhbeEKJ6zJ~*ex7!StTk=BD@Mek)en9)_NhqM&P3lP11A!d(;?<5uAfpuR4>Z#l)CP~9jWbCS}9JEKz7sBa9?VO=l z(9Lt3a#yznXy*7ChJt`;Am>*R7vxQvcYcT|7~kw-PA$9C#3a(+(n zyF0%0iQTM9;?O*^@NxdZ@weiZre6MfRID5%v58d^6a|~PSS$}KpuJ#MNA($ze9G!axHVU&P8br_hkEHIJXJypIoi|9h7Iex#pTQTGxz zJCu)Bp62dnf10VI9IsvdA5V#PE(DsGq7qJZu3W&(#v`48Y;|3ybG$M>tz+Nr)QY{d zLldf2DvP(a3)1EmhN@u84N~tjl4(HPs5CaqDQ^7n^#?K!Ou#Cm3_shflZ1%q_UriH zSOii?B{-3X-wqkMC$-D5#&?HbRubh#E2M*~*Zu73>?QG6x{8ip5}kZ-uji8g!Nq=4 zKR(YWi&Z)@g34d&KJuM;l(iQ!k`4)uY57s^ij{tEub8~1?(qrw4aQv^oqB9(%nh zL&Uzz)Co1%E`S_&hpuiNE0LEE7?(mpL?+YJgLu+bHpS?gW+S31vlsvC| zyf{oNRzXQBcI9xo<8K)55`u1uZYfA7sGJ9e@-}$3ManC)u|4&9|M-|+Q5PlYO{s>J zj@S=T4I%`tCXrrGq!XhR(`^GOBcPd4xWwj`Ks=HAQ$w8e{t5Rn!Qyc7rNLD=eD1G^ z24S;o=jiMMN}mV-g_n{gn+!w5Sae1tcqG*vQ4L$o`SKaV`IwA+71i5wV|TQe`>>cB zd-LX45HoOWe2v@Def~a!E@x!*aJ-HO;PmW-$&lTvqu*QSz`Avf#*&MEiAg>M6c_j# z<*>p^lwc=`3#6iOI%s+a*vO{TRH!oGtRaV7y1~T_+`p;GdW2>r@ z^o?m!RLrjWvjff^(DF@k4)3uAz5$$$O(HXVgN}NHzl_2?l!|0qFH_pXHb&=bJ8`5m z+R*2~?PnINB~~AzIH(eY0U9OXBYJuW2i>X|FQNYl=)*nm`{VU)h_6!Wl)!#w0{JN2 zkq7C0Bj2=o@V*XfF_-(>XAF}&(UtRWmkVNxW?Oa}+b3U)3zqN6>w6snuD-^X6hcmU z=v%L(A0z#=MYgTwAT`6?>u)%C614O!)ij3vw+~|@X)<05fsl(cwt!tfe7j4yz{@!4 zy14VLb12(}cl8iQYYyJ)&|WMa{M!(Zgm!)3`&`LJ8j`=tF2AF7hqvDc4sVu1ImA8? zV8_R>7A<6gO0bA#HUc9~7og#P8XYHDjCMJU#=l~!T0^&GucNa3A$DapL*Ak=O!LCC zM3XIU+uuEP==)Y~{J5a>8B%AzC+zXt(EQYaJOJ!?Vc_(IeWsmY<(yyVet~!*H}I`7 zBDv}#T?<+4dd})!I>?&mep$yYTn70&snS~67psju!ly3}$2RdSUkr`tB)TkkiE3Up zi3DiWMe9TJxWvksvg2f8J)4LhNR@1@hKhK`hK;LAhQPh()=sN#4-X#@C!W2Nc&yZ$ z6Y2mI9#NRbv9e^@xlIzU%wF-7&7j`9^(_wQjK0C^2FU4h#Y)#PZ*r0Rs!%dRf}b{C z965p5^V`K^pnpzEO;<7W6j+;Y<5c1v@f3%X&`M?8LQ6eDbKU*nWQ`O8eWr|fxk5DZ zd-gxLRk7~4+3)v``tv7C z2Tp9=$b7QFd+J!tbH$!)uH_+R1-%IWk)tg$_eKv_?9c8U5_TM{_{3bFqhiqOkAbhEgV*?M)6>6dkF{TqKutjcT#9oYW%(D-D0_12rq02xu3Kipbj845 z+YI}vV*b8_A>@0KDry?*?2awr2x-Wo`jr&E&97B3(4(v*-p!<%DqcHwdfEy#l9$q* z&{)ni^JbHrPp>8MR7=Ne@{90kA2jDTTl}`Q{)D}7c6~p!PGVQhdU2#jQtz7+(ktna zhL}EvI?7bD`zG+jscxlY##x?&BRP*HY2TaGDPRM!D|M@;cS1WPpy7LyPgq>Pg;8ex z@Lw8?R-1u};BpHcE=soUrw|I7h2GP~i(R^&Bb*YGH{>8AH)8PuC z%p^j^GXVzBvAsBMQa#66N1%YN$0;2tBIAQP_r(drVE-2&tA3Q%gATnkf!^aH6fpQm zpN^3;gR!uO7xW{gQMyVq_EX>H7GL~X`#7fKy9S;8u?G@TOWgByK9b(=z1K)qN-@#< z1%3YUXC1vS3xQ)HUz1*2s_S%ftoqaCV#w7fd0a+zU4+sFG|a0wjLILL{k0j`f^XSR zw?aG0QuH>}^-=ye7N9DbX^#_eD@xXA@HNxPDM9w!5d0TUa_K@>{ft}an?8GK!!?`KAr?M5LFXeQTi9A67}6}Vs)L;wPP1w zfv*QdMW?6`!il=uZMymS2^UN6{1`<7Vxa-jxXU)Kmq!A9v1g7pZ~u(*8}@?zeJ^h# z36Z5j_vz9vLJ8W2`>`mx{r#@8fYaMUHz+Kf%NYMVQ%Jei%x1}08 zsrKpT(0Yl`+RV&2xd#yQGeTw>wl_0mH_A9S(p5#u#4KuCofw`g5FEo0>eXis<8bdB zlqyebxkg$c&G2$}4?-LDu80$U+_tC)L(M@l7emg+2_ob?#)uk6-`|%;T>^wHN@r0_ z+K^qw=$p|yNAIz`uR}cc>#~?M=O(V78A~ohN}8&co4C$;C-pS@U3QdvN;`YOcE52mfS5$_-J=|p_Y-N7F~12OxPrF{~Cw07c9OT zF@|IYCq3BKtlaUrfauA39VO)Kt-v|P;UY83Li+HRE5@kM?Qs}Q5E`N%o$P8xSt_%= zks)1mPEp(Uwxfa~p4h7eMuJKMW}AaU?jjb}bl#D}H%$3cEl&iV(6i19&|jQ{ZMNT4 z__}lUkLMdD6xoZ%)#_}7y7Pe92hn$)>pPCnKY`m*eW44`_i=mD^BO3Y2UrNq_2yaX zEq%A);_Q&|$|Vk?)LZ!EmIw-Isc4CHUM}}j0TTIB%w?tZ*AKTIAKI(}fMRx4mmzE+ z3NlF4UH$G3!ekkO#Ys#AoV;XOGd~gsHDz~^$O>A<8BydPO2-OCmF?d181CxX%0OQY z$zLHW*O0*A+6N@kVpg>jDR{@#RcckcRM(L{=hLEIIh`AP&e|FGQ+G%9Y^6b11se9H z!zzQOdQ8sFLej-QlBHZ%>ow@}_(PsavZirZvXzYDqYQFpVA|QO-?`agKv~S(7I0e) zAd43wd+(glIeL48-}IZkRy%2`(|aQcOik{A;&DDr7LK0EETD~A#a24mU^RPU4fl%u z7n2-<8Ms2{*Eo{4Q2m+BLLQr%%W9GA>R#oYcfYAgeNdVTm9JsZ@Y}+m-W{8~>3u|Y zKPb*Yu;Ti;?dUaV~T_>nyAbsoTaXQBTMpx0($iIHAC~2gk@hC z4rXvwRxq9l@wU>mj^d<^9EyfcFkzr-(>w%OV1cqMB>Z!98=nmQKbZHIA6&N=Os|&M z-w7lL-GX!D*1uk+jXs0o6jyavOk(4SJE71TNb?{lj^b(%E$7+VusAxiZwHWYD!Hu|$Xmh46a<>S!^SyB}VoOic!*SGAUZ_iZ~7RxOQ3r-F`|%k6B|ptul^{}CJwwjgZJTqA!&7Qo2I8#gx?Me@J4k3 zw^XxZh5vXrdk(0cS1`74XH;{jPh=e=!Jl!R)|3As52N)7AMnX)DDGXQw?5^{mddrx zDEnOv8JQEl$qgbNPuh1t>SapEX7>5r_b3zJ@E5dcond@3ioS!M@yFFX?hWYjLj8Dm z!D#>8MrW6zzQr`HwGn2Z&&Z{Y6<&spuACZ2l+y*Z0$hf*Q&-K(-c%R$@LY?NdtO*S zbC_S0LAkh@d)OBvBNK0`h=G|9g*U^I!{$9_0aC2GRo}FRbWx_NWCH^;EM5DYb!0(l zOGf-d`lwN23!9JprJ~?8B3&WrNo887vd&D?N*X|ZE6YG9o|QgqmNqtYVrcN0WGZ5OyQro zdky<=r!Qh3F;o%y^R2TIH6fP^sX>5y&S@Op(Yk)_D$H!~@bgiT5Kj9sf@|;F_p0{& zIH$_EN@Hl4cH;Zi<=gQcj6n#FdPz^3MT!eQEjoRJ3Jem_b+!j?ZqI^$-cA#p)W%27 z^-bOniQ3wpE0?gjf^crKNikb)Q0nK5`LawTo6@+zXEYB?-?jw>GlF_njw88ObdIJ> zLSwVzBm97C-_a^fyyPijhVzc`?%_9)mcRn*yc z2{aJ1Z4~~Id_r))S1mc5;n|L-ax4xTvE^hYEvMrYiV@N5A;Bg|K zI!Nh zryiwfr>h~ufKZr?Vj)_#Bc0RWQ|Im3UhoJ zH82dWdRx0fbM(scG|2Y4RWl&{ad19aT$m0wzC`fd-&nZ3tw&5<9ldQCVi%Il_a+yL z*}t&G9Dmneo@*}#2qgsgDRs`3m{rv@){Ni85PCED2jOcB3`+5-?{Y6kw67KN1mALv zbAov5`z)o+xI!-Aq7C1jdqr$5eW$6Om@`eQBqc^&Dy8s9vzIY-l@^B{{OIWmDI4|@ z-chTMeO-ggNau4=!1Z(*>y8>9rj5(cFZ#?}tNXnQb!*JbWz}@|UTMLI9ySM1!Ig%H z>~5Hu@$@I7c5V4=uD?*FomC)!{`)3L$mz%Moye<%SN-&3X(M(q-7WU)Vb-ZcPFFK&tgf@>y3gX;#aQ8S_>Io`G#+M-*5(OfhN-)b0 zt&MvccHfJqbEvFGHs*t>*t*4*s&=!{Xn~XiOj&m70Y=3`dweO$Qv)!#dl%s?ryn~- zmF5as?5VV<14Myt1I{zWR$$%x_4o4}-zFz%)q4qUm&o(H$;JK*oEsYMPTL~sq7pYm zG{pN9jYuoxyhjZW;yvN=b4oc=GBIcy+a94aY6(cQJNzQYL_*F~r<)Q>@>9Wod52!W zvB@S(i@?!dLzgjyN_@aV@W=(4_bxY|&BWfI;4`bnmNdEz@U;Iar%Kr80gf~V@mALL zjN)|CVa`St|0+84rdEg*H+Ts-aMq+M{W2E4u|bvSam*g5Gd0XHCvP=Pq#6tqBj)t? ziaBHaTJu2j+pxLoZeBfg`g>7u77Wrx{Wa3&Z~x^MF`7wqn0>e&m$Ilx&^mUn{HM{; zsiC4WJbd19t8bpQAabYa>`6e;kj48WI{XbqC&P$Tp`R;cu1p76?c1C)=JN(~^F#}d$vHE@ z#3cf9#WS<&*fP7DD$8z|&sWM*PokPU83yePYYKB=c9TUqESz)m<#4H^7DfT1$9{+m&-NPkX zNp(uI0YJtS80%fZK6YW0a%V$Oew|MIW1@NH5K=n*KGv%(9c?}C$prbGidndA+D)8$ zQ+jljXx?w?tl*8(6|XJ{`duf{LVuAS-3bXsTujw0BqvM$Ards!U;Y^^D~{6}8#`hN z3;x}EUxHHR=0`oF! z12UDZn`B}maG#~ZA4$sXgX(ysZsN1vV+sMR(V1LL{&w%%zEw;@_W4L3p-k^X*3vqD zE0I5E5K60jfMFO9qE*kKPivLu#H@+XST!LOFrZRmQ>)O_H7ZjZwJs`#L>;ejcDuu~ zI*hFnyDYn8?ej&C@=Y)CD&TFtzq6tEC&S;jVb-Fen$6Tgso)UMKf9@@H*)XUA9Kgo zs4b^uy!}|roDsu+mggzB?%nq-k@Inpdk;J_;PqmL^amRD?!!}??>3iH0cQSBQj#Tg zg0)$o$j0E|AEC$LziuVm!>|+ED&x)`Pv+13Z=?}<;wt2ddZ^gfM(ndkQ`|m1ZeHsL1V0;2@*MXGpTW(4{RC%wlAsQ*q z(>TXpNOjULFF6Ur5Y{v-d&%2-P@R8|jkG0f#~(Jcza)*0p}^p(W@gps_osiM$0TpGi8C)cA+ca9$j{}vE6Keb5b4-3C6G(~?U=-| zCy!geT@>iil_M6A!NW1$(+R~iXz_?TCMbf%C=sWMXba1>o0}3JJsfBT_N|gnlY7VJ zAlwW*1fsn8Jd%5wuQ`+2=DWo#THQ*q>%t%j^u=8U{Q&>g5y^Yf3Q! z0J!lg$ez%w!HCkf6>VZ9K7};*j=KS2I~L-SUZ%6>He4nf3ZC;DYZm+hx9CQLcfNvq zQ)U#7`~x+n4UO(@EQ%FN)WZIKH8+_d&f!rV1C24#O(gWm2=(*d*LN^f0*GB-xJg^^ z(6ZzDnDU&v#5p9HOVt=dVTgwzK_TZ6eaTS;giPi$c=f^+)RG6mD;6So&ivQTYbZft zX1poLI4Me8F-g-=Awu)DNxI(t>fKr&k&pI z7UpYe?G^heu)*0A8U?)xbZ+U;+cRZTWUh(o{ciK)M;SAdU5;k?fX5Q86|+>oFro2A z$r5*Si*HW1ZAgehy0zO#+0s5I^eHt%;2tx_uB|Scj!sPeAla>hwxm2s;dqxhMw0-R z>P4UQ^(7zIxQ4^fqj`gG zKl-GE;=-bp)0&%$%>1`;JOB*MO))%V*j|S8x2e74_Ik{)IJ&=@1#VuST<+-;0z|i= zoC4*9Y}4xDj0%jLG9DPjEBLqV_)V7Z=%vHfrZa#+JUz*M7Jgf{2zxEHJ!N+LIU7$3 z$o@Em>x}4TryD`}uDbazJnfQC{24TbWx!V9unTl~gxZ*yjncpij@7?+-#uHg+59o> zV1CDk6tj|uCCDCpuTauCr|`;F72mG7U6gG=o|rzmu5eb$$0uUXK!OWkkp_VaG3rlQ zdUzSz3Em@T;e6Ls)#l|3u30RKus+PG6YQC$-mSt&oAXF*yMgPgP7D7%lIl*)9d+eF zO$|K$-?dH$^$)j(NSLOt`;Pne1l+9PnIaxy?j@0|s$h#;;9#p+gs#;m5r?gP0wJP# zPE?XPks_lu0&MQFD^Hb? z_2=ehxrx~yVy5fb5tj%zp`k1{GGiG9N`Ge-UMMRvKBE}@PJRv zROo?oQ?cCXS|L`tlTq4W zU}58*a?|KAkIH;g`tgk=cEt)SYsaWe?cjR*-NCIPePvGDb6x+jV^t!v82vBL%ml-e z+K_Z4N#~?ihOuIFSgPe-KXpdMNHw4gw=02<+PtpyeUz+SMLhwr<}_**N4?v9=4!jI z+nL8h;2gdFLdWR-2s2ALVAPv2-nVa&cX;aJ$H7=6eppM@Hm2c)UsXYT$-7DZOA4iwzBi8~vly-7w zn6va7{~<;3=b*)$?GNzr6dw!t(lZSof%mVs^rNIqO&$TmN6!Rh()ZlTOX)B!yLtZj4s)oGmF)U0- z#HnmcnOb8UGrA>-h3zT}g7QGYZI&BfBBK;1Y6wBdKE$>qe2A8HpnxX8z83)XKAa#Q7 zCl-t87k}2L_U2H|#ttizW;86U+}BMHC%Q+F&t8m}y$@y7GdG_ruVA)h#eQH`z*0|t zhhWlIn^Ih1o2mg~jl7_fTNPD1XTSUQao4I-(7eV(L#-R53k`G*3QJ^k%FJ~qx}k5@ zOaCewackK6@*ZQr#@u(Td1c!P%x=rO<7Sa{_6Er$O;KRp#=x-PtIg1{ui0; zS$%32hz-PRCgGY6QDQzX)TEUTs3AJ%YAQ0qJfMqBb=0 z>{hlx4SAHRBC}z9b&7<#!`Yge6j4kX0_G=2vm{!!Ieh9R;efEs7TRQqD!sm@gk7IZ z-4Fx!6p2Gw`*sL$>2VJh^_47m+URZ5C}Va5H_ zaOd&nxb5}vn3bJ9U}^8kO&%dB?&HDa(gxU#bH|(jhRvB>@sowQ)#OJpkGZ1O)g0nT zy27h`f|3XYZY-v?hls}tcfUfqM0a1LJ&t_;T!lH0%@HD*?&A=#|MsWpy#r^UufV(1 zKQNw(Uut}G(?=;*{TNcX@n@o&2bWxAP#8s+U~Ym)@!20^#W3c8YKnIsD}h|<`o+hCO67qo|_~@tk$z+7EALBkZ3kyGj)z~IJ#RWP}O(H_?P!6d{k9H zqRTY{ivE}p*%KdSn5;9mL))EUx=NRC(xlCkH)D##KJ82*GNo`_wpbS3tPwKrw7@;~ z=4Fu-ax{hcTTd90Q~{5RDO|rfuy>ov($ozqRct7lyu3R5+dfM&YnP`n&+=gN?ZE6+ z;D-p5xH!IaSOx+{$(gZ9*%>4k%)=&H?C7FDa_$X1KhdZ7JK=O;ON?fZWd)9?=eAW@5x4-uN zT4G|KC}fx^#3LqTjUWDtNeRAn6|au2VL|THut+%&xV&?iATI+uc)0e01Sz(vmo8h^ zOfx1gh&+env{JQMV$>JkK zG24W1!i0|%wMLd8b;!nfeWchQ3~xOWgigmYtrmvA5H9L5B`KDxrnAIFN)UR^7A*+x zI92yDDwvR3@c+@)`CU?rgqfuTAfJdaXHAh&vl1oQ_I)ewkOE6jp{b7;bbLd55r^e?ZGFFz!j z>iUipLQYP&rmtgtXN04)wjQ;U+US06PWj${52zn+_Q=z;Ncxo=T)c?&-plPcv+#xL z=m;sSA5pu)n|lO6LsZiV$qXqZkSvc(X0G`>oHV}2HG>$f8`4%xjm=5nesI3JbuqRz z^{MdlxZ3YMCawCi{LzDT8UWp>-z8k7qJ?&a5bNIY@wjSoD6RZ`Oy@ls4F2~uy7PIo za~A}C1_d6!bYylzaJRdtuISOj`BQY?$mDC%OO{)kSa(`>DFQz2Zh37+a340r-?DfP z2WldCc?E@@vgouvp6pQpLlFl<9*Gvk5C&+!E^wecj|XVCOctN&Lq+quv+U3FuDUn zhz8$|a&wMbT|3K?I|H`NzPr6X0@aH{9UCCR2i`D{yq{SlG77Rmn3fljLCE-7nCBKM z+nGZUIa(BSc-TsTUrHN3M59D2g3hbkAkL9Y8z1Ed3?*_)8^S~;!$e2Z72v}2YP~Q7 zAV}rE3s<0442}0(5Ba8NT@id~Qv8NGn6k%N!d$t(23=EB7ldbCv|)Q;hbP_V99zIR zbj3ib2^73_2FSs9Fzp9Qe{bKJpG4tqDuIWH(byac;1%$&eaIO{yw||T#!fqxsQDX_ zGg9(=pZ9R2`BanPA^8`v(N}8RUw@S2=n}t>5~}@gEI_KU zCvTb-tuj^dr{GXlMT$^K-Lxcp;zKmDI8a7G3LPijp@uJjR$5vf%#}E4e%QvKp8RPl z>1|$uBZK@8F`MpUtFb8NVbthvR5VF)I6&Vc&8^DfBe^;Wk}QM=`8*M*8eQU?RV-zy z+h=?wEh>3Y`wQS{+u8A>_uqTFPL{fG*2)7_Bi+ym$3lpQ!i+h07J_b)s5&Mn(6b}^ ziJ>W(7O&Uq5vcl#lM3lhr%!GMJ|of$b4S0LQ&zM7JV4DZ2&wx1eCZ*fuBG|#+>P*= zbx)*z_==&T-`h}KE6Yt!I$6}m-q)onl%u$5s4kXu6eK89A(t${j3`g-Zwnre*wdOq z&x);}Vghf#XF={{`C^TK-EDl@h}~pj`jLBA^(;dODS|{lJe;q+a9$4s8{^zw5g!_M zAcSbS6dVzK)h!Kd!*WO7=M`7OgJKVhxS-bpku_}zb>KZ1jSK;R)E}Xw=8V6Uwph@b-S@c$BB$JMBlvE@2U519EeRfMOhboisu@ zECTt&ERUm#Ifmmmkag*urzXZXE=Tcj_Xl#~gL1L!1#Mw239Vur0{Yd_Qu)Kb!|^9^ zw#Pyc|L4j_!xlmOw-gGNKv4a+gs|0u)vfVg1uu4~3%vi^svX$4Lr@h|uvB+$Iq4?i zCE;>dm2Fu$prpjq)_8)pIhYa|b{P|5i*!;)2~cI;T4Tb=sDv^@!@IdqlH$;P)JiiI zHNNz7r~O;AZBgptIoj)&@Ad7*4q>5i9wW#{X255kBmPCVsCqs$pkP^7-#TT!mEseQu5n#_*(BwMBiW-4Flbz5hhuB?^1=$dFuJc-F94 zI&>#E9Xat z+}-afEun>h6LB;d=CdVB5T~9*xkrT<=6~cLd|?o|`GBv646`R?KR}1ho(&(p(;99c zaLjR@q;AbhT#{zCwX{>Kn`{1)C@2UfA?(Hm37;K4c4pK^&7Z02lW8Dgk(5Zr%0!-M zO~PpVd3SSXjnIF`mqYNM@jW{7EMs5a?DU22Au`w97HBoim!eHpDkz%dL%eYf*ZyzL zZD>(@1xVA*NvSDw8+(fXH_-xQBhmh6uJpK41(*K&)PGye^l$qqMF^?AQ2(cGVaWOa zm3mw+_1}SD#$c?=f3=V|oQ%DM={D}NR7#cTaR+v|>X*=0nmWhoQ2x8aID}SSE*W98 z&YG7*C;g`QTn{S_>-|`bxQNFS#n3K@&<+WQ*sh5 z%7cKf32Z2~gm7RQ-!UgqFxgA?K2xP6_C!d$2&>S{(nV*9HOvo{4Fg6_k?ck?Vo|jy z``+JRa1axAhO;H&#$3*uoz;^n{$fDI2}gZm6tUzrA~jK$X2-B+jJX+WQ`;pR7(Z0R)8nPED63FCi1dsJ5(rUlK zcjxaX%2ASGy70@j#B{YtGH8iSMi}Zw|GQzqna!0g8(NEytkhI1{xiM6_WPr^Ugjj# zyr`@9|}LT#A`T zW9U%RW%SZ0G31jok<+A-)x=I}@L}WAfdrdpJ%7~fQ@lQ6IChK|Mlb*>j&-oxg)s4`zE$FXz3ANE zK1R#LmkW$1pwH)v`rzpL)c!Hxc9aLer1))zIZrbzV*x#S53)(@tx@6&9hnI6eZHP2 zE}W&Gz9MbQ4p18Yu=UG$-89{3XPV-b^f!A(i*{;aFtPI{_s=&9(Ycr2z9z7xJhVC( zrVt z=D9*V#audzb0v7tW;2*s8&S<0*v)V{CR+)j;lhIo7_H^J%7&7|f0vbArKA$YU09Z$ zoaJ6xdyd(&Cyf>{u`PU#$6Yl6em&cw$8B}Uk@r`azOWH(?w`o}=p#VDXl(FR2rb?E z=AS|5FF+v(bxeDc$(4_>Bz81I0nR$c{#lh=MiPx4jsAm@g+#j}xXn8(4F>9C1gyKf z7H*8CWh$$Pt{53g4!y3eJp}^I=MzV(UH9gw)mukHspq$mauf+h=Vwe+BXiklRxEZ0 z5Ng9|?aH!MzS+dc_zc{nf|B)0{kjYA{#dtr0-rEFn_Y!_7g2|#MTl!TVC#yXA#BYL zb;PG2;;Ll;|QBfXQz~Xyu0c>O!B*5#pG-K)AzSJkfl zJkQ>zz-OO7i#i)I<8vBxe6ExAqj$9@RoQ&EGt^CdRY;j%c{fAaUnBEsvkBe8D1kP- zAfU3NK@LW;OGR;uzB{bPXHokWkwdzMevkWerS!mbU&c^C2+ZD43bCmhHhU9wZ8Hj$h>AQ=*+by+dwX1MnW|DCbN#MeBi)Hp~QExy-y z`C^U-o5{%&91TsM#3XW!tB{mhiH73j5BwbVcGnlf`MOU}l{k8|7@!t!`*9KrkI)t` zBX9ws)gDRfwGx^!c)72+B$A7lLNO)`y}+>k*eoIv&j1U0U#ieCs`8a3l6PgYF%3yf z+Sq|YHZZYndRF~5h)4ZbP?7&FS;W9+;B*#KjWFKT$V6>CbRDoOmLg#xjM9r|$aq)y z`TkRVMLf+9yVt(EIiyiRqX{l_&o1TIJl3xFAaGGs#4R>U1ex~ zADOS6>d*-DTTH(i3 z4N1mGc9)>00*5npqPg!gDVR$pfb~Pm?5S}jwT++@owS2^;ASYOKoi6~IND2YMPYdW z;AqP95h;^Qi1B0P2c^Xg+Onvlf1-iI0rI#FHZ7P~r0y$u#-wl*_5obp0uDYo+%hRd z^08GYAeYls>zQN+thPv%NfJ`tb}(u1{GI`VOu#XsX`=~;wFH*muJmu|s7)S$`rA|u z?DChp?KBYwk}YGdQzpE~lSeI01`0_uG%)Z%PKdS+;W1bN)LwyOmj z&369+B}RS!815+qBsrpvWb7cVIkuq>%KD;HkV%3~(A0Yu_4=FT4f1_f4qZ2lOh2Z% z0^lf&DL~3(aY{_W*!VN%$eg(Dzn!V*s`71=sYjUp@C;!ir$q+ehtMaaNnyNlXb+D^ z)zBc?KR`$Q$u{Q`m4xCNqusu@_PYyPs(jVaw ze8xoo&lIZcm@C1SW#A@%l#AWN_^o;DxMyeaIJV%F>59Ix-s=GEXVan@b;rY3f4V4As(J~9#@wV0uJ{gU1R0Va8V1i|hJ4J|%hz98Evzk{pFYSD=P|JR)6@7mf zh#*| z+!9`O511WgVqlQ2$)H?+JHmH1XP*Wo57i`6!F z9Oko`4{1RT9c2;h7XX52v&ImP$<&!#VUOC$S)rsqRnjVoKwgKfqm-LIDq?A+Na?u5 zQ-dJ7%#%>-Il=Tl%U!*P3tUA(JjK?zAPUa%Fac?zfEWxjTAkD;gd+~hJlK&-hB)Ql zboOKN2vu-UTy^ZPeX0Y+LYCjs5&Ep+Y$-y*B;S5xG)QMVwn~>}QltQ8+*Dloe#&q! zPlTLb^6YM=1vh`-V^%ROxJOIi7PtvB-C{AM?u4Lv?+Y`P__Ph(Y-Fr=cJ^)3LadoB z*8QMaK>PenzBnkYfgc@h{49n_9#(dUIWhuWmn?*6ERsNBGU4lu;v1L8?{q^37s--t ztqV=sp_hPA(P^qK|d%&=`VbaZa%PnyP(U}%BGwflI6+9q{b%6;f;Pv`Dsf;@ zLI(I_uf#BWIn$iDb{yZa`A;MfvWRCqNBedNU|jADe^%S>6s5d;B5Ruz#1lV^_a}b? z6#lHislHux4t6?7+^0Ic+H&vzFbG-Z>$_0Ld($*7+62kQa$%QO z)Yur)K?8^->uwsNJ7!PAU*BtCFT|$(EolNYaxhd-Pb{h^WMIyy?+5e2;n|wM_zWD>ru+HtDXEans1m$wNDq$ ziKq-WMM>l;sSx{s=iVmWn3@+m6HE5B7nF?gTYB6Qvs0cT!>RxkeRo@w&#L|3ZV`{q zeE=&##|yz#_d{%QoEWpTwA;UZ!xT_Whr?x4F1AWbUEROIBt)8lz?zieM0ubY(oBO? zJNsb+YtPtyD#6w5p&Y?YsRLWC zuCgWw!x8@%F5$xs(CQ~M*qu@@V5!EQ;e*lWt+VZS-*v z*62?|igVf2Q_a3kFwHs`;7PI!+g)U>WE}jfW|+^WcSlgAl8|+o8J9EoTLI&iVx=LP zW{gS&y7TwHc_kK~aZ6W;lFol0bDSqo3Gv>a1XSVoCu8oC@OAmQ=0{U*izdIu;OmNW zspPgG;09`jX=xR;)phMyg(4ghEmX07NgcOhR0t-_JrZ-9Fmu)8#l;aP0Oe_pkKO&% zD{CDOey|~isH>XT6^x1j#4<7&FIir`R@$d1fR}xfcU$*qNe5P{s%s~gH_WMEVEI9@mS3`z3rMk z;w^>#?^gL+SlWnOzlgM>ijk4A^2b??s}6NO#}huv`0@ys!)+_4QD~65ZpSKO%(~ zgsjGe5``T>}I2Vvd4bFH>B!C$^t+zCCwB zU*Iy>Rd|VXG9Ol?q_j912k(xXF{ML>N9tJZT&{1I%9~h_Z5QZE;*_OEzIbQUnG^NT zg7n2P)k2+mJ&+VHpNb|;Bf3nYF$oz}u{l~qNkKw8WG2~}JS68= zJ64Ik+q_qN!D`+O9tje;S!w z3FtpfYCE)?7jxkT86Qam6PiC@%UHZtdo!9dKSPWb#xtJ3jUC3LKryl7O|=a}5^tJc zY+S1O3jKOdeMqm&xx8WQD^_kgHd1^>|DnC6e_efDlh+)y21Z@J*1D{E#&=O>8 zhLSe&lSM}8nMHWL{lU7NhIdx!E~oRTVPH3)A&ihZ&uzJ? zK`9Kw7JT8)@BK|>fvVHdTP%BGQcDWrAb@;uE|^r;-=sRak^IzR`(D< z)8cJnI-iv|N!`H10ZPcfn>$)=U7jX8S`#KM zrDGIVh~N#Ri(tP>U;1*`BE>{0MmlOLMfF?Yjq2gIC=rsF=>NVXn$5QYL}wSn%?*j0 z{sBD7p9u+Jo#gla# zs=phSNMclP={uc;E(8$B>VgW;SwIDa?#H_Oca`5F$|J7@x5l2<`uI7e;r|(yy)rE1 z^ZAJ%tE-Xb%)q`wb{CuZpcSHS@ykKU%f@E#kIn8Kab)@Khxq70qRKPNvqC6(4D*A8 z5BHbC{Y}Vjso*rPx$eygG+emHCrhR2$ilwohNAgn^+^mcD!9c{`?U~HFZ1Zmn*)yh zQNVj4T0W(RhpJd#ELurnT+Z_`jj79_^Y!D0ui`fO^HnMoZJduEkj+$>EeRe*inkq^ z{E88O*IdKF{zG9$xY^V>oJH=rgZme|-BMp%Zbqg6P?7=b;2$34%O}elLSWI#7_Y|u z^Q@hsVj<9Jy<<)@+?~!t4isTVWDyp{6xeSmR*E7WjMxuHgZlWW=hQ}H@ny`U&B6&2 zA&`;3juasfwCwh%N8;olfZuHynpgf;nl zA2UKZOrC!pXgUW``$pjvJ`ujRqSI<}?3P_3fh<}BEUq+^L(+- z^5=U0k-}=x^7nt}4tfych4di9qQJ^e8&C|`BQaYOO^fdbSSNL}*O4R)7l%^rnF`@& zpmuHTU)KPwn&JorMZSi z!o^nKMHNqO&a((BsqROj39$pa(I6@3roP%NpFb-C3E%kqUhitSP@rerHcE{FOr8FUmAH6hi1?+n?(mgUBDjl|-TwoiHArT0A+TC=R=a z6Jc_pLsub#%KKRoy1PY1 zx(SBxjsorvLaVF4W9@{#eN0xi%trn3)#hFgOKTE;0D~blr7~oD1o!C-*3h+9j)Hlz zQeLmLCo*_UGJA1n=b$#vNN_Fe02cOw$vT(*wAsvp2@1N~Av1{`9ej4&Z~@hSkHZ%z zNSF1EO(ILF#;b`E17y;Iy0bC>G6eA!>zL1BqvLegTBz_Kw4+0eI&+C*s6atqLh1B` zttl2gEEGsSJt7noypa5Bj^EIMdE&>fklAur@Q}Ux0Bsa+6rMgtvU{)pxUTv23_bDs zU;liA14o{+yXJx7k41$7xf%(u#gZcmqa1$+bKYjWY(62lm?qd7ikPZ1!Q^Cs?`Cs- zEx_q}#d&(W8&-*)D`PR96$r&1A6K5 zs+F?JgpobwRT^L;HUxQxZ`KQm7BQR|~f4 z)`)ZE(eV@}GAA%C`xAnVlBVW~jpE4`$81W~?CtRtQDbGs4|x1^buTLLmz!#54|4I;l+(J-JN0%8+jisAT))*EeAU( zJX}=Llsy{a0jIcYmRsv3tM~v4vK|w)wS()8pDpa!l$WxK)TxDz_{8$W30sHb4dF}9 zCo80FPn(7Yj4>>#dUoVvqlqGe@B28-(~Y$n?40IKZf2UrnasnKBEcC?W|oaurr$Bf zDBV1%2g3aQh^P`}`3Irod)#c29u89{H!3V&STCQcD<7OrEyVAs6#k?g>>o&F@-s^b zq5-}Ms!Wej4%0cStK)BL2qRSqH*LCsRk-CWD8-8~5+Mk%I6`s?%n7mw&pTUK7VQ=q z6Y;DcTzB#|Rqzf5y1Fh0K#Q|i*Yw85HwJAqTwnxaJ;<^XMMF$kJ7Wq#&NAZMxHR=a2^@~Dz_v|Yr)BbU zNtO(@fWN{+vay+5f^LUoa^6N^t@`naw(If&3cnp3_77=gs~ZH$NSpCJD`eifqE!EY zHgc&Ga`Fm_Zo}N=LfX7hNYmrM{zeiCA77b= z4Q)&!$dvVO+7LnAx2LyW93&U@8h{F6GP{oi9-l12x@*XcL&GLpGN5}t-AGYU^F2SZ zlp-_@dZj@~H>#!$AG&Gk*_AAs>MZn6yrB?jN-~y9d!&aKw6pI;=NA^DARu%|X16%3 zY^*fg*EeLrg4ukrv@y8h6)77{7?4oN>{e%%U@0lQyTtI#klh)p(Sij~D-S62?Qy8J zf|-IK#O947jgyX_I6e+%NyFfCgw>1?Z12-$G}aA*3=CE9C4>zOP1v?dFJuNH$cVjd z6A8i6p4;VE7l>7?6wKi4N&z4z$kJp{m!Md>BlbcNJQ3BCW-=B{)3dUyD0(^R`{ke`u$CPV4K+70WRxm?@+ooJglP5Th&d8=K98U_lXjFtHxi zt*Xz1YRCPo)cZ@$7pKpoiO(bV6?^#&2n~_v!%Id>%ZmYV$n;6tPWPfU4{ELyzEbw9@;BmzI-B z0ppu%q!Id7~?brj4GD=c02EKyKOQ7!xcf`4f&lkqjrf$Em@QBtekcW=t-{ zl>~B#Uo#O&MA(UuX>^KoviEd{Xb>t-{~uH?2nMpD{a(6i$R`SN5{!bh4Ks-?gh4FY#z9HHEDUCzD?7g&Gh^FrYGq^|$SynXy?; z<4`ZrPEs}`g_IXT;hQkLA<4}>W~$RifhU3ece70$i}B&=n{+*}fCo>Kk0Y#zkf`Rm zF1lwRbSa)g5h~#hF3ddh5BMvU$om!Kg)Kth{a@rt4GueNNIiu7A%ju`RPrdrOyor3 z%{eI}JmDgPC8YIvS-UVnV=LNfYr5iS@EY&GfYs66elJ>ju^gfg-8BrGWH>A(i5rHX zr@{SSHEWXYpZ|IC|D3&Ri}W)8SArV!;dr2eX6648q#nK!#oz&@{rvar|9`_x+R=@t z4|Nd{ENmZ&JVpUI^Z(vMK$7Fq^)}u+*JSlScCc8%y=96e!w2>`Yks2t+mcVl&1%ie zv@|_y>u6V=&(`xN=Kn%g2Cgz$^X0;mphglBq5Sn z%=*&g2Ny(1NpU4A$`dF6O_$DFn0jCmKxU&N4XQQmv;5ItKp ziIN~ke!IT*|2780|1L}@xBo;Bx%F^kNyq?uL+SIH`H26SUy0CHTItyXCFpzP2H9KP zm5|dwce~=rhxq72<@bFv)cPZKu$S%aA~xD2R)=$JUdc}qf#u|5=#Q&&&g2e#cdxEhCqb$t*2w2k-2;DIG&eXW*LF=~~74J9e_ps zJ;19>XzJhDg~HYHx((&DN=}8N?w3%ui32mz4}%IV+3dT*j&FTBS9&Hh_B6q zE!Uf6)z;DE1gn_CRQ{ya?uj~xK&R9W+LDKbRLi0XctmXv;r-(UBbSIO8awcfAr=-Y zO$5P^H@ZNX${39{385e6l*=c~&A`?j&#JPnxoh&&7KairHYYd5tQd*D=R&o&f$Fz# zVEcjD?!kKLEJgNe`(n zCjO3inZ%VasJ3_(`;OXB$&q%?KoIcY+pab=SKA*o=G@+vPrWiuy?g;i(Ensd(s)w2 zM&F1jN2}-@bXHirzdvF*ecEB>NZq`j;JqZiUbCgR3U_SMdtAMfGIZGne7#GCuUvVC zeBCE8mk!3d`ow&=e8#0{w@m7ErRyJRY#s9R+TkcGx;lDXa6x*fJU+Isx8AZbbaajx ztLj@Or!nAWrpBK%;_<{6s%9G>oC6yI#bnXqz61#Kd+>Mhv`jn-hGG_tTU>7-=N!xs zycMq-^g4!S2I={~Y-A);Chgty(!&u3Ar$`w`^NBFGp%}rJbAwxeBSB2$Id)`G1zbE z=y4Hy`}H!DA@Ka`G5*~>^?Ayw?PU_u_yLxzPxOm8_*~BohmYn!(>FV-ILAABtmIIg z>Ns>(SpD|>(~T61pvLE*`(||O>*wd+FS^0IEt2|Fa-B$ncltMHXvi$BCY>*C@6TJO zPxmis%pAAu*&z5@IrwZ9h;F-x3Y%y(i-8yS!F|J35f~Anf73(Om5OUEL%10hQ$*whY`SS?r4njXhO6|x<`B&JCa%CA}hh-du;)eO^8neF9@_WNtZwW+G=B!?Y-#Pf?`_?^uP|+Czj_vDK3BaEL~<=(U`GA%f9x?v zpz?hUlSAks+=*Uw#9^J^#Pj;O4pScZ;~U%vBjYc(-8Asv6w)+V`LrSAqW}mnm!L)Fd>+pd)yqxw5Oi0ZVJ+& zQ@RxBi3(sZu5ml}wAeC~lUK{l(o#fZ#Uo_;iS;`9*^i%hwZ{Ul&i3HS2AN#)JpBa8UoNYrrCcjxXce=5>}2&@qjPMO?9 zcIttn5#xk@QS2924Q+I)1A_#sRx&$+`|10GYh4{-d1!zjNfwDVFg7+gvO>dBr!ycs zqu?XFlrr3<*wPI55o|mn&b!);a$oivR+H7E zLc7^hqJ|~OFU>FK0tySN4(t)L32dXcK+v)m9+_gb37rNoQuq>)R^Wsc%1WmYPET%; z#M2TF7#Z@)q9hKN%+abij&GHfpDn>%U+_6U@o9i6#u^E~(3=xW%zmVBc6+7`F-$O3 z-9+5#Hb|PV8-6WR;5mv@*&%X%mLd)#!12J?EcXcx$}3`phu*k-paJ2rK^Xbz^`ZB0WIwo? z7jf1%=UzSuC+y7^iIkPFk2(L8q2@z91WS;V6_n3Hyl%W3g8tA9GX!Qah2w2?%=Rjt zEowT4#|oW;d^p9cEnbZb#>hDja?DB5WAaHCx!K1bAob%gHS2+!RRsx${cnZ59Y7STOj!9h?4Rh#`@)G1Z91MN`Ts zdAh!y)=+to+v{?DtLRfL2ry^riK_GWo6UTo!yS~f7?%t4mR?cje#OCvilAJ6_A^CE z1pU{U0rZGtCPD(v6p;D7-4bu5Lo@5>UMS8+i{ny)7(13@;yg(GQ@B?QSR`?R@@DT9 z%8zdWEB-IJY$EUOPpW@dg@Uss&`>-|* zf&9lj@|KtBkA}*AgA^RYSH1vrz$i9B?PnI4b+y>u>J%s0koN(1Chc+Nh^O`&Ap{~$ z6@i5Y_^4+5x)3IBu_yf-QUF_H0iSrI?|K5~D6_iZ3p>mAewyI}$f=K>gF4c+3~U(|diMy9ZI@Uu9cu&R4xC-;fqf z1)SPtT`)q@BKGgx4);*ruE|y6W7TwDl>9{ig7mm=2-QbmtidW-QC@L&)C05jevTB* z9T%Z_%157F!!5D;W@Qo-AL`naw{`^AmVj2P26v>+C_dx7{(q_ok=hn01b^)g)?M$cPJY{RvKxY+UF)+K5)l=Mh18T1NmN4PM2Gs` zd#S*$X7KdMrK-JBC>%Wc&x}l2p9}1-ksdd}-$VfwL?%0I(R94}xhwscK@Kr!;9S@* zVtr?U zuVd>xXl`TZu}WD|Qk?U)f!i^)gKV<7`iUTD5_K;w89eO*w}EP~W@Os_tPLwc6$bLC zLQN1v;JwDD9n-gLRm+YnG$&I8i1lONkcaq<(1W9&Ks}Ndth+v@#w(_vOQyg!-j$-SW2E}Z=Sdw3X~#BW z@lbiw{RDo=9O_hYN}kItx;i_jn9BcUW}ALM9__~=gJ)3U#WX{jVN^_0!y9CF1T&6- z2Ajbe#dLhAn13RDE*xJr>%TfnF#{OlWT9|!2?+y86OodQp&w46M-rz@a2KR7sO~6j zY6iF#h6XDHT~q~>%*Sm)(nvQ|fWNHzhAS<3fu@JkWI3+O1?-q7-0&yTk!sRe_x;ye zLN?>3mdtgU;DUm%P$D*wx!5REXy{gsJ_7Y^W1p+|)}6P|56^Iwl!T;ZM{2`ZLji2j zZ>L;Hlf(b&=xCQn=M|~ifTOX$`PT{A5I`OL*9uUB$J33dpOjSqzxIghC8U@HQyTMrwIqzfp6^%p;)2(3>2Tqml_ni_C0j-W>H2qS z#!tehl<6&UP)YvfcOE}>(y2Oc8^|9M7or>=nU$Q_j47);(%M+fTi>s6TEKd9_f*bj z{QK7}MfF7rTnYVkXO-}1n_Ty|i&xaBMc?QyX%f}LTDJrxX-FRE1*>!q7{_7);fWM= z**A#-q<~X;m<4EY-tWfraie7F$<3vNh^=i%4nHx~B_`saBYU9QMXyQ+7Z-`Wi2ca1 z`#X8bl>0{c2b8q;65;En2cMo+DqxhjfuIzfm-sL$cmbHHErYKQs{w>o7RU{)@;495 zOV%>xfVhmEFsUEKO8BTTy)&2-7jG=LDRH+BnUmkCkQcs%`S!S&ovW;@RnE-O$I?yI zXeLq*>M^NGAun0WU$sTPBdjQ-Insm?_-{XAxtyNcD4QV&L)+s_*yuuwtG$%W+}g<= zdFpG&=sCsonMVbnKv=C**n&C#JmZQOD? z8@z zm@NI*LZJ4m?e+b)3_*jGSMQ`#0lnpxkPerSEWMzcTeJ8RTITCk_PC%+tgmZ2q^pj9 zF!J#J61gW5-54E|{E2@Tid=pdfdaW!EVkcE zVoOvwbQFfiNP`=B%@?3O7LfjpbLuk#lJ7AUTw>aYOWzoig(ExV(u>y@j;4JBL%JaXC3d-73`>fCzY$AbY93OjIAEZA`Xje;|La!zi(BXuOZn#T>H>p_vr%g7qrKe#N1xwVE- zCVT`1f%BF_kZ9HMr7~yvQkfHstKtz*dcwh5QuyP#+6`+P85bd4+)|nQ{sMCt@$#bG z67k5DCdtt5&%bHDR!jIn{K``r?gi8{CuH^PaUPd)?l_Hl@(3m{fa~ncW8G_y zt64|eguOe8xqE1}Nx+h&aC>9tLRI_7gx@P6>)u6Pws5%=DUF7!P?GC5U>WBIDD=}G z;x)ia&VEKpBga zo1fY!3^YPJBAc}mYQ5;}Pqcj_Kc!O2SqA$yl33QX@!Oddqj~EZ1G%_` zh|F@>{t(tx?hMNd)>3Xj<-42gIL2`QIPf5PL;vcMptDx`{oU=B6H3b+%c8``1=;o5 z(HF^T>F6<61Yj3lPZ^I@@)cy!?9t(QhPlQJJe4bvm>ED!KaW9cunJNaHjvDbewWhS ziEf`?RN*v7ES2PRr0biH?9qL)I`w#RD{&$)Sk*lg5*SoMO(ctF6}x0bRY;jOk!8x5 z0wg6)KY&$%CT$0&tW{t_zQSQ4L+ow)wsNb>h1TEh#;Df|_@sFjhrCegRw}WW(?PAj zS{vd%V&u$OM(gbpn<_EvS3Qi@x|vj@QB5bTr*VB1>8!m`q1x3&yV#EnY0MCNPlbE^yJSV$>8C}Bc*0Jbw37qH(P3f zry7*0BULq_5}LPh4}1?BteYM;^smfJWkm#gn^vpsGMgO`lwECsT`m4c(Q*EKbVU-W znS1As=|PSN!ilJvI3KGro8Co|4Bq}L{B08PSVRRh1mXL$7em)cc$9LiVY|d{s#~|+if$Ml! zr^W9WKw~~UG`D(KM4j~}6pj+Ks{I_Op0RFL>ueiSQO-@FVqy&ENrO6gHZWjaJYIZS z;XuO=@>1);jcANaxJDpi&)uq;#lQ0G;tziPoiLP=zl=+ZXe6aVEr?$ zz|$y1lhC_;clo-z!wF&Ta#d&NkTBtm#LPUFkP<$gtlz!f@rmkBM1otZL+9GbcM0-X zIc1xOj!H@ug5xdjchcU9Ro7XP;MF0CBiwbgu6MIY`TG15l6FLnnSj$PkB`L8@y9D` zLN_|P=SuM>E&h_e`wv)6xaU>YGqvRB`xsnn`~IMTC2rF2 zOWxf(I8cuWRd<0M3(u-9{MGF46D=(&_shj-)MKVag zX*;{XN@{W!jWPyjMVEEFEkGOMq|2C-~N)oPBv}v8{Kwe8mX= zdfVWoWEk%kPyVzJLd>#*0>TG^Tp5kx=)N`CeX!bv)}qb}w#yk``_He$F_WX0LF#~q zh3Pr zqh8#we|_t~jI^;myJ&u%*|Kr4f5AOIg6#M$G%Ix9AZle1Wx?+iV@V-3NG-qCGWuv9 z^2?={qrLzBnx*y>k;{esc}P_sLpEnmhu*Q)RQwa9G7$slT@YIV2nN=zC6Q&R)_}6^Rf<= zmC<4Y2NBQknyeiT2CQ&eeUG}%YRD)+99W?=2ws-MwT@F~dz+_$q|L*_PzD-H2?^?M z4!>)jC7;NBXdGVbJpmeO-$Ks2Z%8+_aqk?yJb2zbgD7)!`Z<<$ySsb8?xtK$DCH9P z*`rv<^LG^5pD0|kJ;Qz`xP|j(QBJVM>(yRxR&KP64pw1?jBWL2@D06?k?jL5`gFgz zxY@T3bFS!wKFPiwK#5W9KqYcakHocDe|^EVY{F=``lL3Ar=jfA`ut3H4QUwuzg&QExkc7&wXoVm zf?a2uS~m-j#B8-!S28e`E2pq)ZfI-SFREFMkmWhgHo2&bW2-wc!||i0y=9@IvQzQk z%G=uE21aM^=14a7_Q{m+^7i%E4%*(&hW^wvJ)*KTd-oR>zpHWf_$&V9M%UmNL-Wk5ndH~Z5VN#Mo&ZHb z0>hB}Mns~l&i={))0}#O>oKW1YkIqj>l27Zn9~&axD+Lb**FaB9MI`N-OIMPeu&^S zmZWh%W?XKcyj;k+sr!%#P8)ka86knQfv~uM@4JCAPpVFkC`p~+%Nyjz3g{f1JVpE* z)7d!e_o2NT&&f2`u( zH?!EF#Ytg=Bsj;!+%0IJWU7WGCpT6hj$n2ho8W?pXW7JqXoeN9IG9_Bop+Px)ZXz8 zn%4{G>db_Z-<6%RLcYvALeL_M-RGU)v|gS56E%_yHVbugll#2WBY^Nj+5kB2APFfR z*yDtak2h=Kd}2Zc40`Uo1KnW~aB%Z%%Ldq%RTeIs&6kF%- z!x6$vvH0G5%gO)^r6-6*sjUzU&qge$shB%m7&!~bEGs<9TpdlJ&?4X&*K6AMan+FV zj^;^QpR}J>ZJuM5OcqvqhEmVBEWzdZJ7Dx7S6%&s9;&WZj8VMxqpM%f5~^pT zpI$_+bIzZ>UE5sUQp}xaUkz36R0RQDxIn3`!`*im&IC@u+ENGmcg&|v?V~K!3{w+X z`xlECfU0a98ds<`p6A{R*R zv8div+1l07UOl{E4J)IRtFdPi?`!vu>;mb*3M0v#BO}CO3ywQOsQZgp3s|rrr^x3PFZK0Fk|G8O zj-J#%fwg&n=6^Q}@Z*}O=XZ^)r!0{`CwVjA0rdPDj~`V|RV15bf)wh>eS|s$0(kPAb0xg{WcDV(j_@ql3B^zpn9D22Zf@Y@= zVAY%46QhdXdF=88_{#L(z>*L0jOsC%ONI><`kw>Ud;fj&#N9I2e_&s>nQv7N%6AJM z9c2OehKCIxDyipg9oyU5;IdPZ7$2HK{h+zw(5Zs8Jg02k`DKU>x4VK-PDA>faP_zx zCI2&-BW281{|`lRfMt7s6*L%0)-unEg5QB;rl<5jN4!G4q`Y7DrSuN@O1tZsKzjND zN#vt*jqHo*mGuMDY8mk~^7fX0OI=Mp)DF^{Gqp>|u?u8n;iK~*Dy*pfK=#I-#uJJX zDwBFz6XJqajsQN7=BxTnbptimbTzwhC@_VVmdnt9D$J<1iLa^6j892>Etu@?<&?-_ zuPIN4EtQAza8Z>w3~G62jV9XmsBFQ$s7o{CVn1tL-xm&_0KgXX-AE2JRu9DZw@d8% z9;}#+;Huz`;XhOnh5be0ja^<7UQ?0dYDdC+@uGDS@`+Tfk}q!x0$D#%*{^u*cGo?o zc!&NfODNxh21R7Zkt_%Q`Ly{&t;K=3A&I|rT&s7&!r!R_LcDxOS+_IVf5YjP7EtD@ zIU(^ecGP05caG5uQvsB#H<89jy)S_5i~+i8Vs$wEbOFmdoPz=`jZtovKQQmxDk%@+ z)SMp({H3Cgt2Aj{5G0W|43A?K@*;*4d@vubcjA^e3B{U;K!Jc#h z#Um0k2&Qv6LSFGCNmKF~Qs~VBKMol!0NEAyeehF-mMZmV>E}#lS@K^qF`Q>9Clt*< zGTPBa(P+^nK`fTEaUL%DNJzNKe)tS_Rdpb)DWo*v=g;V^PkNgz%u=bd_fMbtk)jJG zGpJO#RJm6UcW8N?Je(z}V^k8<1m~E?Ue0JY^wtK+#c5zTDmds;%407!^BskKo-;8z zN-Rp%4}DR;3JaeBUEBWV$yL0p=yjpV;tc!(mwghCkBB9NS7-}QQ308 zBz_NLlkK4ve14N-MOolTlTxL|xT{RIy1((t1f}L)J%)bSYaBfJ6pTKkuv3}q-XA>| zAUOycCAW%jrRgph#*MpkiDNa{W@2A3ie++$RZ3sH(xga5+~b2gfH%uIa1?Dl0r?d3 z8??!-GE&1OFkD>=h$l_Q$2Wz>l+~kt8L|+eTtj;qck*c;y6rAFm4|VtYY9dPk(fv@ z;>|oTJv*mO)BypgC@+fXPuhEz$X$JB6V<5Q_DMcZ|KK&Cz9+$ozzL$DWPyEl$SuL8 zz7oW<7_mW^RO;b^hb012H+y8WY}NVunf3zHrRnn&8z3-d4Axu3-x~RkLTrj&1SJN|2Od)S^*K*7byZ9}h%vcbyZ(>aSAW+J6H%s%y3Z zcm9MXmZK~|@B15EcHcm6eJ-v=cHJnK7tsLie7W)LuSCQ<;|~TWXj&V>&S69fP`xpj zrVa(9bVV?e?32|hCq!Za%27hB0Y8>d1F-7{t)j0w2DdzqP<9l8Q$CG;{Bn-o+AP`W z$13A&QdO|T!~~SgcuI3v1ERr$?Gfb0V`;VbULy_$3`g*p+MY@}UM}ghI}=EJI)|5Z zHg$RbqTYo_Qk&i0_9LOcZ1L@jCW+J^k21^-YI~DR9{*YOplx|`t3|d)tzH(J> z-~wI;uVN>Rhs6-1-;j`TDMm8fRQ?`Fq=QYqAc=oHFLijiXR#aW>=o%JWoX#xb7Z;` z1Q1ibU?lxID~#u-hKhzYrBVp3V#8{Xb9-|Ru$cOx_N&@{LpE1}sr-EA{E=gY(}aFP zz77Ch5SYf;+Qz1z$asCk2De6$5tCQW)WVF_km^cf^??>99p9K4Smw`RCoe|#E}@^Q zNty(#&t3q6k6MaNa~qw*vs_3O+N;wa{H!Y(8|?~-Ig&La$X4LT1w`@o9W+XLzPu=h zN6vWsKLGDQ5WgDzObbg(P$_5>;^O;plTqLk472exP5Rec+ zF|pU5p?un9+jxV5U~Cth1Spv6=-<0fs%O-=pV+)Z7m(r~6&BVhXcX z#nLsD56OCH#UzEbDwd(4@LoW)h9t)+u4gE8TTD*R>7F#0206UG$mC4MtjJhd8C8F} z-Y~b_-23Z);MloI@XGH} zs8-p~7AcA5qY=G_59!S-%xc@jtR!g`6OLdyz&*Z8x6|c3jB~SEVe8roDSL6schRc@ zqUex%$LF*iCz(l6Dp|;~2t-7YN9VlF`ALT;`_G84)W|6c77d+YIP9UPv!ttK zOhrZkh$7t9DdW=vnp&E9w#rIeAuG3uoLz3ceZcpAJVkls615kvku`M8i|ZKn@jEU0 zuEfMLu`Ll(?89(}MrX|Kz(llbq$@S9mrW}3KK{X7x`9DAy+S5!Q??xxslncZ`+Vm= z{fIN`cTlQ1rl+^q8HKc)E~0gr-}@i8dFD!qwQ!7g=er!-e!y$tvs}(z<@dH^)?y-& z?-4Z5nDhqJ1`G`RkB#>fQsW`Q*I3^Qj z^(MhH>sZ?_a&2}d^1tWVE8A>bjXj;$wJfS6A~M07x7c~@ z7WdvdBB=cusqJT2)fAFqND#QRkDD}4d&tE+@p1uM64AmDbGOb}Q>5NaQOLw8XLTe| zz&*K3_szfJjYEewhe^H^9V4E#`QH5|Qz6CttN($oJad)xRe_wm&+h&%-~8(~{c8!X zf8#ky6&n*5i54Sg#NN4yP)@SFnIvKVgq;}41oIlD$`QZ)qQbX&_xZ>F_?JAV{2I0V zRkAS|OP(PLeY*WFyGLUtD+%n(3R@dFYJ~~%?2u01=V5=9^+ba8RSDA&@VWz>{kwGM zDy_;D>|~Nv4Ga-{$6XEr*;Q0*B8uYqj=DFukGX)9=nBzl+ z^|yF)zs=o#jO25xWQ#duKn%Nh^D~ay(4NF8mThu*ZLvBTc5&xtoV1}mjZ-Yg$>+5X zP6!DyNF^+*z~!39R2y(-w@W``;HLtbfrS$Ztf(>xb$PJplXya-MIyk%na(*ooZ-!& zS~IcY%0e-;Jc(R|8`txcj1k?pzR$b2PI>bH6hos}&`}hTS@)dIyWi)a>vO1HrkKmJ zo!2oG5r5dg*?*1W&XirhgdLAjO-pF9KrrqzKYl>ZmFOlmNF^*PG0;SC2LpyX59#|N z*uC#Hv+nO+$}n2w{s>XTrg$h#5~% zUCm(X%3{?vY}4vY*gq9MkZH;z+fovuY>=+x$rh4G?H112F+HbEb0*T7Wid2^oGm;> zsgy#%XVh+Uet)07A44c?vA$8`>Q0N;OhAoV^cyF<{i6nb*PwLs zD!F`$gy^G$efq6)?!0wGdjhe|O=9slF+d_@J{r>6-J?GZaPyZbR!eM^;>30NgK?Pm zh=#k=`w?eOj!ed)l2cJ+0hkd*eVUCvr{^wOwm_y@!ct^Z9EdtJ8xzjXz)GjdSJG&T zv?$d_z^KuneR9S$mO&}3P&5>hZkwoeoBg8^_Zk+7l_G1GbJ&`Ma*_RE(rVE?X)rP} zi1`&twnD<~;+@~6G4eT=O2o?*YO6VlNdN9LsOf0WMi7pqsYBp^I?PfY|-K@mbMM!1eke=i=`?F;lbB%Zgyzx95W$~Qrw_c&QM7iD1w0C_3=iB?A~v&a~Ke-Rw=HPNGc*)Fv6dnvA5S| z_jrz;&XOu*&_oew-X$0wQ6H(a{UU|5NhvKNi9Un#BTjC;#mPir7+<5DjZ;mUXqru` zvPtdcOI%$qvSqudgL~9Zj`+uY9Y;&^%(Hp2X#>S~hGb^(hJv zMG(n^N{&=@gPUKw!D@VtaQpAsxp%_brvYvlp-EE&@04zT!cN^pw)3oRRHzi zU^W`j*gIwBi^MiB5sxRH?ut2nkNNbJgZhZ`iH23%B41shV8%$AapK7=mCKu~ZmyEk z)kk&}e0*<8>!d@sJ3}p1uybi_9NgYHjpIX3njxbgL3uSxpJJVIB$}{}# z*JReSpi3}nb~t?f7H#kM2-a_skEf_ei=@v^{e=EM{w@2>1Uu$B-$+&)iJO{KZKW@G9i1Z|k@visnW@4w!~ z|D8DH+9DQJjN0HIQSW;^Y^6yi64a_Dx+Xw02LEs&YomYd0c&itYnkpiB+V>5CSuL%`Url8%)F(h*j5Fi5nEv$E3*#9GLFX?VR)G zk%h9I=Eio4Y(j*{f%zfLvt7RX>N$-qiSo-e;z{fMaUTyy?B6?P;6F?3*;lBQi(HO@ z`RH@T9ip=bG=~xmq0Gx^ooa>)XMao-olopZE% z&NP1$>+-W~#tpLLHqOp1nsbd#dYz4}BG=X}3{7O*?Q?o}2S<7VtyUu)GfB?ch>hE{ zTV3867TC_Oa_z-yT(9YD#TBAJL{U^!0~B$?ZvDv5=)K`GiIx5hD0AT+jW)@}$vdh<~_Crx#ME%yuEgMpZ{w zUHaWF_ii6D2rpx8-C*_7O>W+(kdS<2MFB-eS41QN1iCm*ll`L^BPB!i%U@z;wLnSq zkw$f9-3~{yJTbk%wX#S)*2C+buy;J=uoWYD;}YvPo@eWYES4-F2_WkRiuzt|%*CXu z-{^DlpiP)qCwu7ywvsmGi=7pmI)3LaJEsu`{T!Fe394m%k(zDZ!0X@P_QMWu9|*+0 znk1WzBZ?vnTZFA$+MYpAFObjZ6ypJsIH%oc@$lX$j`n4ewVP~gJY1nw_O2k72S;yX&&ksjyyM<++Nr0k!n4*3(utc5!`;-EgKG0&3QtdT9}kOdHt5EPw!sR)3iJn96_ z@!TP2$2~e-pVae3(#06E(Bq`tV)vlWcxYi+1vXb>6m!br+CdYqe#p6JGcYQwXA~;- zMLg!9f!8?X>_WDdQ9qCYi&0Q9V-~9$Ijp-9t=%)aD+-gmj&EE-wDU;HlUn`l7sHd; zH<1VPQ^wN{4`x}$QTqK<2n$4HD}%NDBBe^6E7BoI3g_}FmKviVhbV%JGnvpj>)}Qc zYBGhXDrmk#G&rJLKjFb%hfZKaW(_M0h)sLAp+l$Ja3Lmf-s;rn&Agc27?~=ANoXQmbIIg$YyLz z@xs`<6X=~X8oRXoIBHBM6&H}@InJm_LAyiLd`NHNa~zehErH^DvWZU=z1NizQnHAV&QYXG)H&nrdwu#Iq^mY& zOnGE${wW}uKyXT{)8^jc3@?@?U8|9a8Cb$&UYNjxQH$AdNMmLaC*C5O)?d6rE}tYOe)uG$+wU>h-=#0an55RQ zLmz84WEzFcW__aGK8>D0eU>4UhJ13)$O)MA+C;5~G$s<~QIV|alheiw=K*7PN))wd zw#S^eAX%`nCrwrRI z+IAV=Opu?pkY+u4-jspkGxHUURE9)0M+vdK8vWPH&FvVb=069jXnp3At~Vmz5K zHS#EiO-2dPy)KjXA@|>Uz=Puv-OM_=X;Gc`kvx}42to9bAq)e;?g8V|yF6@7IPp_R z>FcboS9#@{f)dVXoR68$eZuJo_xK@Q2ToJhNf+{L=T&S?#2rp(?Hu8RU|ilrmj#qT z6W^=ToA%f}3GoD-waqw+95EhF2>c(z3$V^jKM#LD=cA?X3(K|b#ucZi5d2+!J%^A?1LZ{xM zcY4A=PvWNvg_SBVB-+?1n@MVmxMGndM8;)4pV9Bn z2qMr-1xXZOKE&(p(5j#C?wQ9x%wS||#O5w)zs1ZS)9m)yxqU_`h~(C?MDCnPr;Fo9lE8I9XKJP--gELXm?K_(af*y@JU>(SfWV<5+v z<<~G~Gt@zsQ4ruvdqmAWT0@yekSCk+$k-z~W0ygzO>}mj)=Z|Zl*njPGGdqRG+;O% z6LCtbGo#*C$rMa-1%=_rVLWz05D`=zSzzOst^fca07*naRF7fE68gnqw8*Iwbl%`W z9qvvpbK`|=zWz#uY)oL<>2q-Fh``K|d*%gdsT3v2MG-pm`vZ1%d}K|hT(xi}b2`m2 z{@gmEs>=jn$mIM1jXVFy zJ9|C$CpxLM1ZGmE*BUc(z6i(DKrwQxKYxks=dz^7_ZXe;@@CtoJ`D-Oh%kzXqL3)` z7>6l3v;0?}k zIvtKC1q7?a%hyb*IhiQ%aq6cG+f91u0$$7}(LN;@_2~owjvs+2qgobPOh*+XL;?ce zW!xJvXpfkC0bzKt3(}qNux=4A71_F0#EPjOd?@AGI%Q!4q}X*EJA)y4@kJ+haVRvfq;MC7W6;hiw^v z3x1u^WWdgO5+8+2)e%H9THS!T`?NWp(IU>ge#&TPm&RCSX5?5cn-tQYVXq?W;?3)v zv{ky31eKhC6%09l`xvL+M>GW{i2|OLA|JKU+%D&{h*%5=C9Co~lpTXYuFFlea_a^_le! z*!{r|c>RqN?w!<7YiS-f9o7;mh4=I589;?fWrMGM?Gj#Li<6Bt67d)kA_5wADoO21 z1!H%I_N)I7-&8hfTq~1FB@yQ)tA>L$I>PBa;K+HA(?Da5KAtn+u<6l`HF1*1jMp}B z%s7HLC3I&vLy!K>yPQVf=B?+8$h9gJV~<1CVd6WS)raVNeQK8yn5Ki&eaN6cW;eRT zRw~D{D-y{^_gal4LKcfFAH=K)B1ma8BS$HtFw+x^ zwl0%SXG!uD1-yqoctiR}_c?p}Z@BgL18z@4gl?N;>x|YQK{soX&_9xwP1Yf`hLt^F zE&c|ByWioj{#fO-kmPbkB$F^99S7f`civ_1L6eoMI$B&n8tgOacG(YVq!UGM)MTsI7{~lM&A80mETL zbCzYd6;T{W+^7oJstewT+1zC~beZp;ku0~E@J#hhx}6g`M@PK#j*O;H zm^aU<9}2`Tzr;6QEb;7CiK;q95gN1yLv9@!$mkT+u_Qe%9ph9bX9tT$kAf0tg$!l_p9)i3095uy^qsC}JFFQ@qLISnPN;O#sqJ8$j+O$`SE^%sPZhoe(5E)OBu4rD8xvYs$Bn#=ec$JoO`eS z-+W8iroOjAE^d=frbwi+WW*Q=-@~t;GHiF~Wj9FVYHTVZDG|aj!WlbEdu_tuF8y(w z`_m0Blc8+ey!2d|X!elrz4~W-uW=joxiZ;!f~1uol}nRO$B|?San^(GyYy#pu3hGp ze4Onfn2&a$l|#yHP>FwsgX1@N?JqJk?{0B7og}aO=;aKO(ZLz?+20?NEzP;S|_sduK@twQue%Im8q$}KBE0D8olBqPwbdHRm6W4Y0 z>Kd85_eeLs&4ceY`9HNC3U-Qw1Ck19ZGhN$K%EYK4{U63K+Zv^Avu@CcV1q_1z2PA5qaATwv?j%oKD`fi=6 z`+)Y^CE8Mo)o_C7x=dyfgJutBcbDn*3chI)1*bTZIrY&hx}M=u4U#tCdoXI_^jn-y zC(xV_?haYsOc2Y+$e&i3;0eSZxA6MM{OHHG`QED!IqR5A%mMY26Po21qj&~gT!gnh z7H>}EEFAfEy~peyz6)2ckS<>***hT9Y=bwZJ+-*kmPk|;uIFLk%$T%#MExUXTAl~p zELv8loQCdv#<L2511s?XY=sBHI3am%p167Ai8N(h?z+=6Xe4$++DEUKI#V<&MASk?j*t7H{+aVv(g%#Lkvj zdG1A4dsA}FUFydXcPxc9V}%v7NUc&~4V}8{f}t~$OjM>Qnn9FYCvKQX@*LgN5n>jS zG)FURtbC2Q5)gCF8FPuLZj(=RSt+K;Ztt5zlq*wYNkR4l%$SHKYDA)qol+^T z7|4-MIF+#s6*Djvva~{R#XvzLnn>7&O3ZvTjz$0sR6R+yQoxH>nQhc4q~ch=m=XU4 zvB>A=4o7qkI?O{8eSMom#w4%K34@TC5Alc(FGtEc#A~FgHEvw9x$6!%y>*9!{uj#hT=^zVhVCL1DeXp)(KyK`8ED-+U5SO+w8LfUX^S@MYRN0 zvL-4SBy)w0E1NWiKD*sr`ca>Ey)^4Dtdh!Iro4I&G>=Z;U_>s0WgsYD%*R@gb+p3s ztOyqWeBQ@={gA(W?JbT42VZcRCUg?Q7_WDalb*}DeuMJ0n{2-F3|sjG2}D3AUR-7K zm0#yl_ki8w9hxT!x0HbOYz9v>kc&w&vWgygOy(deHrcCJsch7!Dk5 zk_J02Gf|R=icKz-;QEcL%$yPL-ace};BhbJQA!sn=OoNx9?3S5WDuu5%%-S`Jn_q` zY^$wcRmOw8jkz|>e6ho_CrMxoc z*~~4R(IIzktMpfHuxcf^T&}VvIBZ2BrY_NsELKDr^Aj8wpNP3XXVz{Z2pav~oRMcT zHmqbb5j#(}cSP3LYz)(eEw)oUJG?6#O zA2(?Yed_Z#sbYpyIf)t05au)7kWZ}wMkpbPI>~&J_{hQUw(;DMsJuZ;D5CX}tdwO! z!NeCVKqsZbT#KU^IVwqoaa>>~ScIZM+5pE`$jV8Du`S>bBNTKp=0{Q^i7JtiC6*df zElF&yme^cP5tp9SZRB4pLKH=xYyJM4-~1+j@+W`7wQJXS?z!i9@x>P@l}bPF_I;di zjz1gIZ=BK@%xJn6b}CJ|oW)i}4CTYa9t_U#Iy;=o>kPFDYYBnW#Q_?B+{Eo2vVT0} ztm7hO^Tg6wGKnOX*Txv`vhO5lMH#B)9Qm|PLUS3Oo^pO~hpAm6NR=t&lB8`FQ+Ao2 z9@4#kmvbdY-z-qhWy!~7(#n8tqtEVc7bTY`mM@S`#YkvA^1Ml_Ib;7MAhTK^Tg?&^ z9(n1)IfN4$2M08c&S^(BftnzdNs>+~q?6FzZPPs&AeSpx`3(6?3^V8>kB+#17IM&$ zD3^2Oaw!rC9o6q4jgGm07I4s!D3x>MFJzh`eJ~@5JaCU`o}BW-@3$aTWbLajQnFRD zPty!M3L$ivkGhOoXViOB8Y2%SUcyS{DCH6)V=CH56F5gcgcF9X2Isqbv}Y2!=#61@JH=Dh|{B85^alTVwZVgkynNv|_u|0KjJWXP=*hzan)`L2cI=`{8jpFZHg z%P^92tdz6lk}+&uLUOx=(-wF4#x(j7scMl-E=e|FqBwP;!6A1}6&e$R;%c5;Hcr-3 zQA7vAF~@g~Xw^rA`6^~MMh{XipK zNE46S7>YqGrentqBz&UzgyG39jYgk?u80(ik<7BN!i@-LJ^WFf zld}%ztqGo)N3{}UEfveM&}|!25zz#npnpVvIAw2~#7-pGsM;u^fZ)5#+dH(oeRf7M zG%HP|m?IUFv6KL(bx60>;;=KvlVcdE9C5?I)^%bD6T{R{1rWUf%v&6t`_#J<#f<{# zTpWW(60nC8{P}?T&H;_)kfvax#gY_KX_9dtQ=M_VJ772#uqzeP**N*6hTC|^_-vP> zpukMau~NyAu?=ihAeuIr4;vhxc4-ZLd^3+}S!65~)3z~U7N#PgGADAHw9XqG>@*mu z7IQmIHl8M}fi*j1;Dj7Z(`55mD#Z+OUBrBh0z_fJyt%`$-{s6tA;=nOBf@iCX0C^p zE)z?nNNFHU+BoOCw1o^^t;$+PC1**1kJD+;KX^zhvgoOKO1Tudq>iOb7&luS-8mt! zQ^>{gB5qaeqn-UF-~Pcl|I42z__g1AiU06Fe3i5ScI1NFrB&~8|3Mcik;bl;$R|xw zF%5~Q%c%*yDdB8L^X!~ftIN<;!AuY@m&hh9a+ZN4d`jgXzt3#c;o$Z$ok@USD3Z+P zD5bL)vm?TBorevXsbG^W<;f;uWNnq%>0O4+6ArvAzMiI1NRzg8;-)3@CSeJGvD{gb3A`qLLUMb7llAjP^FJ6a^Q>!+&Od2LXb5K38d&F*NMDesx0(< z0@npaK~R*7_0tP{7u+e1XyC~jh6wNZ!eJN^1ObvHElzs=v;;0;;NiFsBB2^8vb2~9 z2W~**M~Iq;_e!A>EWfO#eD4a8G9nkKMIT)shr_x;A%%I8Q zQ)L8yX2JkMmzf(f^8_?QMNt9qvKcNSKhl@%e$dA=w*9{1Q2vwDk z-WM*$*mWg>KtxqR`pDx0*Ch-BP*p@pLgd2>x?vFDIu1c3fM%db;^&HA3O$d&^FUP* zWced;tQYNcTzoG;G7Ka|dCW=PMJgxX^_Y%aP!*($e5Z005xVXoHd$2>#b59W(S@BE z4EhX*W2S*bBy2%-o#kdOr7V`CJBKEd1} z5F`T0K#>HL&;0^N5V&~0&v*`!EMprgf)Ei!0j>*CB%rF&;F1QYpNX8LVELB2Z7(x65Z4Wnq(vVt`DCInBy=2tD8e^QWKq1xDzV5|5QY*_Bp^MyU=X-O zVQ`^~(uZ~Nv9iSiKokj!?_%gMAGYb99MBFEG>t7TN6Otuk>$sav7!hUvd7$7Q4k*ye)La7 zNkTIh8;c0y=RT<^$}++u*8d|(yB6)#BxE&0xR{iE{8<7ZA}JcNqyoYteJluw>ct_L zUq+p}3p-;abC~fAVI(ZtCyGd-_>-F?X^5hVAup2u{q#RUL=r&zgwwQwC?RRF#Y9LD ze#R~mKva=ch4_U+F7$5kuc|ImWQrd$elmg}p)M-NKc=FK0HTg4LQEDuy$>V>bO{3i z4FO5GIC3wkNC@bBhTnIgFGLZ7C?E+kx}|>Nd+>>f;^JP9=;8+=0YQ^-x*djR4XD-# zf`Hk<5YN@wcx9XNR_UTT*`wEqXg`a+_!UABMMT3u1Z441`?PrH-qXPkn?+?&0EEZL zq{qo3z$XkPLO($An~bMJ9*k1Nl4)+P8dzW4w#rk<)9-j*Sg?;8g%2T$2z>OT`YG3a zn)4_KAV{C+nEh{t;O;gQ(5P-2>kF-UGZs38}J5S};i$Vx;3S zY;BpJ?D1Uai}0Ab(4ROqKjppoWE~bh(V6(P@xN%-PiUJUetw7Zkq7;RJ`mn_)_=l{ z7JdBDrCWa1KK@x*^rIwy-j!DIgGuk_eLkOd{i1(A_{*OWK|nzG*b4e5>)U6r!xs~q z&*C(`XJbEPi{88bGyQ-M>%Jfe2%mRx@spfYAwUoudZ+i<`~D9ZUcSN1PN8@;giMyN zZl$=K(>`WTg`e48{A%&|iRxpV%J)SVlcdMr?R`CcpUElo@A3L=hdU=Bg^e7EY#KA2 zAeXX9=;C7Xv*a`LE8uu8`OHL9@ORmJ^KbZ1|MQtA2xzkVT0Ij%09sL#Q&&z5|EV1m5GrI&R6{r_|oH@kvd-C(O6CoVl^ zC2z@+B}t==PnG*AO4ok4f#xKet|FWC7p&frZam(_+bzB}<-iA|N0LBisPO zC)89f9nYtMi|PW)@AZ-=j|<(sa5{Z|>3A+#vSi7}SyTp#LKim(aC`{DUv`^5AIlie zC!A?(pZ?*GIT1JL8<)6V)+r~J!@A_3KEBgq-reKDt=qiy##;L*&Zf?l+>`gcfAU(q;#luWW-HL#3dcFUG5S+eA_6FME{&WO5W5lSYNw2rR+ypGTah(d%Y%!mYq zu~23u`Tw(bXH9Zs*MaB%zHi~N?~(h89aTW>B&$e~Qmfe#TQlk&y^f8I*_e&>chFbR z%e;)qY#v4r(oAco7BA8$Iw9rhTG#tg<&roKub>BLiU zA`u;3d~8X7;Nee)j7KB-!zq)A%WO77vNLFQg18R)_y1Y};4|s>84U)EW;4c~z|7Z) z*cOSjh2l-Y9ntR%7&tSWnTi>Wkj~mDf`I7H@CG%Uu}f38!{Qr7(1Y6bgZW; z9Wxyb7!C#uoe@L|s8)=G=HLyxG)^0gl{m9VmO@M=X+3sZdSil_L$@=aKXjRj8UiyE zzmKn{@QorVbBYmk=^j7i@ZL@CeE2qZW*cmq5e`=)tmIAfSr30QWZ;bP^aO&HBVmHA z`M{Xbz@b$cAVzHTObXL?kX(m;Z$RJi7)({{gheW6F|Je?_lArkd9;X4){;@AvraJ^ z_n3?ZbOvK4KA`FdV;6DYAm?*v$t1Q2@@UBPc$co@QcGXNj3${kzzAz?ghIb$&YpQr ztAW$&GMK@{m+(=E#G=I0F*Fg>$Da$q4E!mB-hg3$h~td$WCJ-_Bx;Bx)hW(kfHN2| z82ALLj+swmiet=ai%QL*GXy;d>7V?}=T39m&VbG*@a7?*AqT)m;EEO@+5iChS@uo~$yEGd^ z?vB^UW|E|129sW$PWh1i<0>uRgryfLMJHJDHhbGX4^CyyznCML6zCn6>70ycRw4cD zdGhfnMaF2u2Cb7aC&vTIWtZgD^W>u@+5QQmqg`%(TxA+xA$cv&M&|LrGhpU)7@a)e zs5;lw!4p;yu>tD;Bt0M#yjEU*5C2gw_fK@ z{_0bPi$m-$o}=8(bKH!O9F>`NPC2eLaTA*eb2-ju!PY#WPrqGd=VlpoZk}W!NzUz~ z4JuUk%N*6F9QUHkZ|1p_EzsJ(M{ECtX6Z!|a~d&SItx650M}{JYaFv%Zqb?m(Iir< zk6f-{zW4&MSdv8njNyRc{zJN>B(2hAtZ15&1mTzVQ0SM9N8r2kD+g5e_b88`JJWEn zDXlEB8jF%t1k}erxcO58w@0^D=H&2{YPH4GOrxJmu$)myM$g8^O1Z}I@eDtjCKs{E z$t{%LJs#ewvVG!{y!K`0SE5+TV}WPjf#)#pH>e(zshl>bRh#JNUM9YlLX|vXvjM~E zAt&V%_KvHt_&i2Y<-B~vtli|}Er>48b3UISC4d_E;Efn}Yg8ZZQ|(NtjuhgN5xNnu zKU*dePm!<`rYE=9ee=)w>%abl?|j_C_+o(zu~iP+GZImVXq-*b1RhMfj5{?>%T-P) z6ROpK`D+(hh{ee&j|HB=h(Yt1>cJ7^o=mT?hkU%nL9c|fyiO#MW}YWEfO}Hg{locP z#@#C0@7>_2Iia53B$ry`{3V6%_B&J#-{af07qLp``Nm5{ig5)6pTXfx>PI`=s>K*7 z30Bq?Dd_hQAAZ1hKb)}r(U||~Hx{|JrlGqHj_&Vs`@@I$g=N%2kz%@yT0Y>T|Nc6s z^Ot!T`A=L=Dy#`U(}VXZKWOsC>?JO1X`b;xlpLncDG%QME?WnEZkN(r)YDuIWE9gw z(z_ho`h>kkfca&Ut~RFK{FIybJsvhKq{UZR*JSka`yAeJ_~urI*D^)&*8mSZr_E&h zZSLI}^0yCCd_jR|x{pn-B>(^*07*naRL6sQm-bGb)7!68F)Fl1D}3X6j@6hAcuc#e zbavk4&hC)6_oFOr7TLHGLqC3(+Tl(9_Wyndxp;xguY7^4C7W~Uu;k58!k@O8wQsQX z;U53^R)e)~Bw4Z~W~UAO@(#W7hum>`-1=cZwZV<6b zF3zye!NXIA(>S^FOT>~k8ewtaQ0SM33k<2;{+NSz-(pKxqirtm>}s6s=!nLRZ*q6X z=1;$~$tz!Yj%!ypnKQxmyZD1+wzp6DZI zt*l~1TddV?@aBUUCzU9%ms2FNpv5P2ZtQd8o9|jD}e6Ehggb%*cr4=kNx4A+xoj~Hr-GcQg2|OQt`tBUl*uTxqci(5HZ!vx4 zGO?v1^UHDKr{7}S`+$$`X}Cq5=P#v5L|kOILuKa{TQ_fUV|opBAv8e)uUCtx`$5lB9h@)q^& zQ*Ikqhz|rrK=itd`g=V1=tDl*nQ-vs|ACw<5d;#lIzyCuob2p!e{W1`b&ZJ)ZmmPD z?$ICkI8zaKJjJNop}IHajRz{L?J24VQXmqHhfI$?;poAbH}0Jyx32SS9l9NvW@||Q z@KegN$8Dv}_2n2~K`>&}IpFYvZ}H)d#@~0o!tb6-^VO9C=HOd&Z{6T~-x)Jqnlf8? zmVCl|BFi}ZLd<&5f574PF0a4UMZ9`MbS=ZA;UHI=_?>OaWr^=~7WsEyoM#$%C^Ls~ z^N8x!hupre@cw>|uWckbHy0z-yiav_#Eo|kNF~}NH1H%D2@N}%4q@ zfz^mbLK-4Zn+)1V+BI$6I(SlmKN;Zc-Qe)nHgDWa^4!Wfz9>N;=^#i5tsOc? zRc;=pkOG|-Vu7!{rBJIuKFs2^b!}7CFZ07-rj9G$3Fk< z?gASf7gh2RgCV^^hrQi4jww>SGDkFF^GleC8VddD@*QwbDBt~nxBm1m*uC;UBVK!n zFBP-Qw@z^D@36n6^2euL)XY5do9krc3Cf^C>){QyZZ~-Qmcz68RiX<~v}uiTZNP1( zPBvwOVBt0P>280>&3l^!1&Nm`D!ysa=_|BP4`}b+;*RnEp_+vWd9gZH{ z;p0ynoJf&pFUGJ3GmP$iP6un;GCO?zN*qxN(2NoNc7?6)eM&q#PdXRJtu+``+O%5` z8}t~t4xRP^&f#5d-mK9SvuvEtF*oP478`Q+&klI-_D6K%{{agF4;f8UNyH@7XaPBH zvUzTWXD(zZS}LYE!XNdi?%ihR_Dw!&okL7pEMJbG_EE)M8lK zV(;!g@4t6S?zIvNm*&V>U6ksS(}M}aq{i~b9LZFaCwVo;QxbSiff2_a{3EyDev5a9 z&%yk&eB=Ba7m5~w0Gg>`SsGrWLN_?1GQCWpTR}g4op;`E@aF9VSATnz%P%ZbvNfX0 z3eu(D;nh@ul9S~^Nya^S!0kW(OKuO+RImItS2osoDQA(?MW)3oJpa2Qqj(W~N{?udU%DCzriOd|=H=~ro z+0LJU0LUml*t0Q$ZqUzIjK>P{Kwx2G6)W1oIaNs6CWe~DFbp(zK+wFw&dn|U{I8Bt z*Pr2q|NJtmOL0k?ZuApW*6i zpD)Qi6RU(at-^zE^WI&T|GIUB*M5D2-+ZaWeA+;&`#6I!o}NTmKTk5ABp0~oUX_zU zlZVGTq+8qC`qBq1$ zNQ|Q~T7v?`Y?O=ZG9TZG(c4zZq@%>sQ6yP}Ne%ity#MVtdH4N8c;&ZPcxIE0RG0cz zjqkqg;64{+^O-rK2?N1*kS0T1*QJF4jzP>#v-;c;^IuVUZ3uQ!Ag%3j`|Y>6_s$lL zOTSL~(lsuYZ04*cf-u5$4ZK;FV%BD%q@k%Ij7QKaGdAX!ES@8u&9fXeiOM2kbDQz; zKk)jSHU9CAz}2rWarttVY_P@cdW$!!7sweoo?U{ZQKxzEDewNjf5SsRPviOD;@tW? zm*Wyq-=*OOjAt1lcAAAvlWgkA4&9#Y);|)ATLjK74?fxAdmo(8dA`K*`UN(Naf-Hr z00e=DH=8n>jG0Zxxc-Pvb(iV8-{t=CI(y<3zE&!+F&`%;fhuL7be+vagu+B5Bkxc- z+T#894>>L5h`)N7;(V03hyqB+@i|tWv+#8TDIsFu;Lgq#1og)jxnh!R)4`Ds?F^a15{sKfELlXs2L{v|eePF>I9{6c z+6u{3k_dn>V>+AAY7H2U74&!i)&|z&;Ee%s7JZ% zGRzwk*9)XlI*QkzQteZ3`pC&8vZVz!(*`N?Nq*n@c@mgSri|)!##5Nuc~mPw+>Vpa z$B{<{!_f@QFvt{3L~IqDHqEUM*|~enN6jSHu9diQWsaf>+PKTKQ^y%i86_7n<5?Ds zvmnuPXAG(hM#Cx6{4&vefr6o;0^-DF+G^l9V}_}Dv_yt^BOo$qGHM@iZ@bLHx_~;j zNIaV%0Z2?3xC0usA?>z{q#0Pr9Es8f%W)`K9{4@#_bc4{;C&u6RymEmO1wDF`S}P2 zGe#qa=1G?+1>zfP#N%Gd6&tr2JjMk0+F&67+@%x8z(l61z?ILTC$T+TpI1V;5khWGwEpM1K< zySp=<{gQ~TOz9n$X>@%W(QA|zmME^rNkt@>j`3Tk4EqDBzC~P55>FK=E*ID+Mo|H; zbBF2CTkJnLs_qNe=)6Lk6mI_(awYM|H=;?j`b{vPjq^L^^Rj`iGgWHL$8(xW6_$pAe|A|9a> zchOomsO{b6&C?VOrNm2@qLeZkXR}%&ie?hc<<1_gf`I0?nNChQXsA>MDXjffRGdw) z1_~zx2o_{;f-|@e5-hk43=rJig9LXA!QI{62@>3Oa1Txh?(TXf-`@M`zxmf$XRWz- zXWpLfYOk)R`Y9B`a8Vg?oPpT}jHcI{&r$CYH{C*wB&j}rmKAkb#to9XExFburs`j5 zrryDKOv$_3WA1*Zg(k(-fFX57gE(>+4vQDs_1*(b>O%uYYeG6@o*Ga{-QZa#u4HkL zuzta+&(0t1a0GF`PpI8fn%sh1YSQUP0JDJwm6>gP<|D3)3(069u1tA&f=MWw19W3Y79sCRqSm~_E7P-onr`i* zuncYbnmlH@x^H@#K8;n|oqu|v(ickJka2)h;Vo@+x8od0?GG&stH>IOoj_z;D_+9m zJ;0SN%4fpCRp~EoZ|wc8|H`XsUu|^X{3c}b4YT+z`xUji%_;K_9e7Lb z?YCKTr4EH+z5Ju4lt9YEWIXUdMP0Ikfm5yQab)MZ-psDv0kzCMvHCP1iBh-|IXHWo z0D)LrRvcz4cJZTeztZ+bw@p5Wj~rbBHQ3~WB)0k<3|@6O|*!8gTcPtJ-Oil-KBm6RULvCju`CDQoYP%YY<8;A&6F5o7Pki<;*{F-Axgol{ylf#JzA;L)>QaBjw>j~VYXoP z{O9g&h?zB~hf}(06ngorHUk|vDG3%2@Ih1G@he1okhl@}V3d}KD$B%D6vANs!wS&yF|I~ZpyB|BO$O!+LV8MPu zdTCcYs@N})sZHBj+cT|0GzbNy%PfR{G(qLPXz)K#&+QbegjW8 zb^FyiON)Z*n7F&CxRZ~qQaYsgoT6Zxuy0<4^`{MD$v!3{*%0|tgA#`i)-oSiW1B4;C_kfts_v%=9z*T#K@3BY2LI1M3H;T^QeiIH}8t$7Ii zD$u(EXfTb9D}vVV-FOJN(NS#WF)&2!UlJcLE7lnvBOTr zPe;U|%@QbOl5k|-3CCt=9{IV7&fViaCNDrVNB6T+({9WsE7`ts@(>kFMxgDg}j99UcITgt~L*0W5KaVJ8d*p`H1T4!>Hi z4TZJV?|6iN3w5-b@u32Zj4gVx~mDPhVDp^lRxEj0uwW zZ#}K&FHI70lGP;QPLSozlPhbbSEU_%=tbzQ-~^kB(X^MnoHkP)djaQ%60m^iptqAI ze4n>-D5r0X5_SLuGc@L?rvs4U&%?nsOC;e}{gj~@p~Us2a|cJHPJ}o4;$fP<@tA8Z zx0#;v^t&s`?55cX_AlFGOYF zG>Lvm18bR{PfqO&DSA<UXf<*Bi}YK2rOh{-Y>x|2Er*Sikk*-*wlIMQt;oF-!;QcS%0sf6FjHh z&8n)=o54~+l}qZ>PwKBE0|~xJxxBZ(ap)XNlr0yiBc%A665<~~If2whSa%-L^;pc` zYYrmbbPBeZLh^s(&p=TM=Z0;THA2UeK0$fbMZ%VP@8tbaH7RtT&AQPdLoG$TDhEb` zw2HmbF_@_r;GzHoWyNXMhHg_{X`z$UDKPQoOdr+h{|;@}p>*{@ADO-LTC63sJx&p6 z6`^8R=$5Zzv!sj!%=Qy{+^6WZOHen+*Z%BD%n)CL^WB-^tC-=l$=|6qUf?4p;}|L? z=%d`L-{ZW!KkF3Q()$#olVWT;x06c!qclsjaTAEPy$>C`b3ZRyC!RUIT09xcr$K3Z zf&}QuP^7q=MPuwWwi5RzgUg9f+uyYrrowLQ80WDy$U(HlXNurE#15}w@>j*Q#IQBP z>*8G4`;lq6zNfhM6!?sW;ulxb@O+Cp*6sX>y5t3jv#V%5+x& zX-)=q(Z{P|9q#XDtG6_ln?_YfsK~;}X|RE>mXE}Ls8ZbBzDws2?VWR~^$jG6;Ci>E zhR-;kVSO>;7A^j;tj!rZ%~Hvp9A1a_UM@%^U+QwkTgNgHYDBuY&ryy>FfhpD(}jjG z6k#|pBkuG;83hv|d&sWu=VneHWeFBSDv?<6@QFxQAH?qDo(mK>kwnRd$}%`ZU)eDV ziK(Fi#W8K5d-BW$XAr9`>?0t_Vuf9TpF(9DS15blEqx9qaZhASgVw4U@NX_Z{vJcO!5#F%rQ4>(;j5`Hfm(==gh!*a2&R&G3pg%yaBbhA6a&>RApW8*^RA{va)65nJ0$jxi(6zha6 za@BRrjtp$c1B)kzBCs7fS*n&rNMPy5pS5rWRmrHb5h}pr4EA$NI4S1|rGWOFLsAB3 zwWw~=**ETq89x$2#9qw5@7AjtPufng??_wG#2S)yvk`>7ow-0>^+>mU;6*7k9jB&- z-GrULw#eqRRM!f@LQup`;cS6eqw<>N-Ps^!VmH{T^PGA4_uxR@u_sYjE{a3m>EdNC zv>0|GednjKAepkdU35)uilMuBDgvwh=t>2ZClNL*tBfJg_y??3Zbl+rNwHwV6}=?b zcZyVRDrHAl%v#NUwDBxSMQ8j0m(H|Wg3 z9GgU2M|X`x{9UFsdY6wbZ4g=?t3qPDB$5K$Z^n{LRMr}#MLU}RB5TDo$W@!oY=-G~ z`f#^~CXV4MP@rF$`=C3k7dfL9Zsyvf5=AKscT*5rJ+>Exm@uNN#heWF|IQ#*{abQQ zuAVNyYFR8-AN@N&xy7lSgVx{GciG{ev-8Le2e4s)gqe8nB&pm2JSZ>6Fomdvg*_uO z73~p?k2DU4+>KP;JR*lqqapN3CA0LpyAS-IA@@7}*HnXr0?Y7bH*n1`3 z0z^K3GXq^wTTh8AJ8e8Hnz6@=M0b=;W+6yV!w+Iv+~TUZhwV)ud13v*Kq?ZGga!FZ z3vjJf*tSbwjelGG1LwRX!btcVPd^*Z6)g|#ODU2- zMxPRwiX3^3ePrN30wdW<(e31}!t3<9gOU+5%a7Cnd9#%CA0_BWZ1yx)>2g7XpLmbm zydKBvbUH2U|F(_pF~>gIB(J@&iypw6U&A80K#N^1ZgZI`FCA#cCG~pTtTAfIk?bUu zGeqzw=x6rvn)TzGti=Yh9CS*RtZqV>SmQJ^Xeq)7?qJZG!PeyQY%2Wie$foE45LP# z91Bv6lrsE;nEWEd6{FPq@O6FS5A>{Z3+|BBJFt90XJ>AV=e~q!$Mq%PX((l?TMQO2 zy0XVT0ut)T*VkW{SxY0IeV!A<@2@2^@!ZgVzeU`yl)SwqwNlzKq3vsJ-_iu_6-^7| z&53zyA1+eYyXF#U#1LFVJov4_o*~$Yt=jbi!r-gEDv zw0c#XZYQM0C3&mjZJpo;+Ho~q3Ppg%j1;lzQU>u2`=vXm{IWnLa022+$-rRd^WE(# zz+}_=rc-_Qb$WPodnZ)^%ESOSlrWOQFt{q%IE=!2V2mOYoXb(DOTxz6CiSUMSx?_k zFDhn>o-jc)x7hEr+X=0Vn@Zb>=Vg6@x9QrZBU97#Z%>`T?LoPYu8cizxqr%Fgtgta z()XJ!zsl1mtZyV0J>4jl{GxoZq`yIqK6irD z&6^V(aX64E*=Og|?}CRp)MT{1?!&#VuL{wfi@Jucyi&taF+=#!QD)33F3xWu zvn#!pvpgwdhKmwex^Y7Tg8Yg3Z+Vq!c#`Mg*LXaV6<%g(9Xk46z6N>Ghvg4KHx$b= zrJ*3n5>gP}cf-K390_W_RoSa_~z0g+JNY zm_=+wK6b=(XoC97HL?F6^BR*n-#gf?!F%(zW+8>2M)CSFla#X13%LrB=(QWMrDo3Y z3lxsyq0HleVv`7UZ0cFZYWwE45y*ZV8-iI?I2nA184*{ukxdy(wAlf3iO0dYUzyR9TlbpCs$97xolcq=m1HO|PTcvOHWr)GNMABZR<=HEdn`@vNOpQ8 z4$eC+&+i)p0qWJOxur{NsKS#mkvw3}A9LhAm&L|Mg}fU1LOL!ME@;kjk(QJsX6DDfl6k(1nHB zSd|!|bu-mmj5<1br`yVk{WiPv!x;j|u{xYQ(1-eq6%!a!6d7nT)Lxh_6iLM8R29WRfbTla0v&MEMLQMcff=xee3(Uil=Ir zh5vf{nuuRrb#8<<%eHu_kK@Mj()5Gl#!}9iGD)7YEc06ETbPjTBuPugl`7K=ITi;y z4w*f-(PYUK3BPhXAv%xc@F*}-oI$it zg-sQ?n~dEVkrTrT&3wH`X#p!87R6>l%Wd=Rb=D|kw#FEXNo+s`fhH3#ekvq3P58Im zMo`Gp;-{!CD!bqa58b6X#8u(^Y6J6{;ZV;O}4>?hkp6d9sTdKUGCD~ zTeV$~bwFP)^nD_U-`al*;=q)Rr$76QyK*)X4RA9wlel!+d1n<--@If(TWgj}H!P*O!6)pD1KB1S!= zB8wTlmvVvTzEm)G&0{$v9*?zwjIEsb#q{lW!WqM8`(m&aTfPuh2yl}C3sp{O5{FP# z9rL`~G1ckNiQl`78N=xvhYjpnNDz>%>~|{UbDIVchY?mICyt`<125zSUPKrZ{^{lP zRnYe?*cVFn)J=Bg*?H^vy0a;W%aIkr5W+BJ5+J=|3`*U24_8)26}l0vfk{Pfi3Ma9 z?fS{N=_F{$*kONR`7Ooc`3*qfH2W`pV<(}|UjUz6Q@Ux~U z6*&t7c}Al61q?ljvUQx*&1V_A)XEbsHOc)O+d$LzAv-hu8%6!&-S4vWakWIh7;zcFUN*~6U1;LFPD zn*=Z|n74x!#}9ho$cgBRBwdDj@ZkmI!~p$u-Rg1x0^}Vd^G@u3vM2dy^T|!2qS07# ziW0@qfKW?X@I56uN)ct=hM_d+_YX zw~-6U8ZtOTxN_u?=`|sa;;!`*+KuMWe0ow$a&l>uubi?yG5}eHvzRj=iNsb-vdr85 zh};zR7x%I}KO^cd5lBfTm?7DjY;aB=!Hq*wv?gtY9R(C2pkecYDh>c0I~qa+nWmf8 z8z2tHccX!n=;o~=(zrq@Mss~Pc)9Nu86|X^;TQS4?Py=W7f=`!i<=N`O-LxClqCxP zG9tjnpSX^3B2n}?(!ms&W{X3ZwZP-&LeP}toG!N&QZIOXFHK#1lmrOc&!=$ynsP*m}`W0%0{^Oa?WAS=E(F6Q%l6Zf!( z=F{~pi*2=2tXDhNlvWW)_&lW+Eoqh*VdP+FNhY`do#KK{4gEPiQx_ThX8*kP3#N!t zEOm%L2S+J6d8yD*PdE}>iO`1jFY++yqR_t0aMy7k+Gp*K6={$(i0|yabtCvzSp+l2 zM@395can9*8dO-LUc~z)l{v-WRoSI6%BWySx24Bn-t3sHYzt?*1|UK%4T=N49%kF; zq2)ap|6n?wa(pxwgBRyimgFFNEhekqeh;Jh^pO>^JrZ~f%o}tEXCw#CBZFY8QoOtlGsgG}LcC<=$nWHfagWrVB@NhK`Y>tnnT*(IA~r;WdhSU(KB(I_{25LcZ8IYVvZQ6F>rM)#PO3>StCtme8sw+~q}73;pnmWJJAG?3SfbRTj zWWzGwJ$*(r0kLfRB(~>!RoC2quJegEB`bk<7N(YWUP#Z{0pHE{*#C9`V0q*YB8lGV zAU#6@^p(W^CHU@e)~v*T|-?W(8m{b;lJcQ zcs*SlI38a!RH;+sh$UCi$7hYVo*B`pzLKZ+Z6Y=l1~O{=%lV1}8!-!3@7{M0Xf0ef zwq$IQeV^pueWttE-@wsO5nxE{uvEwBk7FbFsC}IISNfj1Hi27^sKN()m}zrmxAN4> zpCCL|u+eW+p=GQTW3=Rylc!xf-`G2I$RSJhpA`Ru|M*Y%&s<&x{)hKY8Az!*Nr8dc zG-m>jDy*u(Je!FApUj^2O8%?rGZU;O=&tHxZ9*zywKe`O{R$V7Sa+vcJjwTYz!K#d z`>z$UYhcdVfmxFNetv#uC3}|&bv)8Ys1m{dBz&bcB$@c1R?D57kbsH%B6M?dITU-@ zb@}wkn`< z`=2MIi9n>R|9(U2ais7s{`VHi|4b)jo&LY4|JO+u%5^dI2mm?4JpSFrH=kOb?gmL_ z_d9(M>0|7?V9|fkneJj<8NwN1r97=+bt}UXn|C{c{BB?zT!9$q!xmn@*Hhn6{3sHC z)!3Yn{28=|hXpT!nMHynfbcgk_if$mgm=2{bM{CCAcTVkG$Z}6U&NgCotB-Do=I2V znDl2y4*(Tbmri~0Gn@eYAe4mC@#NBR{reVFs(&>jqw<)7VP*dRCcWz!p{E9m5)T$w%ESQ!45cyQCu*(#w=L(%<&>v65rk(r zP3{-#Rek=h6T-j!(f#JnZ7PH&BP(m;@r8pv;eT8E+8rP;!)fZ{lLeub91V<8J_v{aR#fEtu*x>M1uutkPL)Wf%ElD~4$Th% zg2ChH)L=jyNH;?f+Kt%X0VG&H;sg5v-+uZ*3*I(sAd+DQF z!ur;X3>U&j zVVW?JA~3@PBp{wSY$m?ps!c?x8*#23R`xgFiiNZ|Mf25bi4D|1zU~Yq?3N@tjwZ<^ zuwtXk;Kcb^Y;X`i1Ss}6breSctx}qai%boMy><+kVN>Z*_}}OfR0Q0^{}AJ=q-hO) zCs`_4U|ypN-HQQI4IB%$_XY#c38t~+cNo5&6E|0c6*WN~mWfH77kOQfzV|_mn01V+ z*azo=z-q5(Lrg@NJaP>uD@LBhLu+69wVH%KkJ`Ea-qO*0PyRs)xTze4j=r$ARLIL| zN0sS;t5V(j0wcGbLnyP_)htaJ#dRX5jxvH{6<5Mlk(Juvh!B$z>ro8uzM%=W34TWl z1b^V{Sq0m}4y$@=> zeeCNORJJGOlh-qr-eKnm!%$yXOd1`LcYr*Do}Y(#8i+}yxw6y6@Dn&8NUO>Z7(TPBDS_@)^c-y&R$sUz{!Q&WwtrL0EewaIID$XA`MH~>(RG05{!&LvV{=&+ z*9?TB$=v-#6{o*zR3pJwNZk7st0MuEHYP7m8-||q^-p8n1N&sj!zlB3D8R+V_Yf08 z6NFTpxD=iw#W$MCu{irj!W3&i2hZC-pu0p_JW>#FH;bk+d0NB^=0o!j(G;Wj(A4YT zW96qk`-T*wUCBxl@ILC4bB<xV6jV(t)Hu-zvGerCL=O>bZZDVe3W$_DosDNhTd8P34^CA zF$>bkm(R^8E)gap@QLk#@`YvfkMGha%3jf+n6WDq78nzPV%`X5EpOTxN5}Va_afD* z;niu-l#}EQ(`yX~tKle3(4u?6kx2%-F1p+Gu%3+8iU$2sSFTVXd8YqiZD?{aI-XdP zz_BOYNjk^?o0I~H|27vdoiqTyZrg7P-HCj%rS)KOL-DTqjXPBg*lNk$d-Nnw-QFNv zu*B&;VD*dMbaX?a>NQm6+9-R(ICFGVfP+zD=W^!6fdDH=>_NT@D(iEt0_IXQuln+B z^t^p5C08%_fvB81czuigw6nGQ@gvI^IJMba(+su}{x$w@Q7GN0kzKzcd{05;8}a*d zj-!^rCY=JaxzZ?+1ppd{5+-L@B(k$ajUJ?Gq5|1gmIN2ETY?NX+#yfMiVX2NIq7&U zRk@6S+JraAhDeImvEP_-6NpR(8B%q!92UD&l&4KZ_h2{w9w_SK0!0nGYLFbp*?F*_tAW{-Wk~{XnU%ZiU0iHqjHBQJ$ae znRqRds%47+yBVQuoAOsgM1^=)fMRF(3?W+>C5=7b z2WzPK*C;I(QAHTtg>nlmv&SY9s~~^iC-S{a2M;*ve29zLTgH7O4-B`h4&ivZawN--Vw8QhjWf@F!jp>_Ae4^GK(YY24zyLu7zNR#J>H&p--+4 z<0zK0@QME2HF%y^m(S=Iq+1%pm|VlfHo@{atLg5UNKz0q0;!`_O<@r#{b@uN z%s~r=o{b81r&wcu*-Hp6X18s z=JIg$;63m*0E{+8;LDjabK;*MHU$VvH9wab5RUMyRd~IN$=nod2g3z|ijs_EKezDP z+>kLW)P<{jtYezMho%w@EMCS#jh#G%cdruK^979;$(a+_^YDgCdQYZEqh~>_yW@4U zKQ+_kQqiIbPXx`%@h3^a!a>8$&Uj-e;T|#)VMv~>^%x3^r--mStv@YvTD$!Zfn>*b@}9l+L$_C5Cc>s_-2PjUMZ)#;^M+x;=j%Ezhy%pF^e&#bQXFz2t&h= z&iI$_+I3q#QVY^?YIJzS%g(bpUIz8+boWg`mAuGNI&q`bGNoNPtlS% z6JjhAU%zWHMu?CY*KGysl-bYGDNUl`5XYV4rJsIYAD0XlG|;=EFTzh9Vq`L>qFE!D zxJ)TPR#-ZVa}(Wz5#KuStfh#?$kyM`;x*D;+=|4oJs)YjIZt|xX+HdPf9XFIR+{?`Kw&>w ze7R@7;(jV9DQi>P3%mT&?42f|je0+BwVP@=8zLzM@A>VbyIs&V z3Xs!vVTH_EsWVt*Il6}uRu@@!FWA|{zivYQj+ccVVPHg^o-hX0qbXhJ?@J~2%V8_v z(D#bO*6S4HTo|~rvVODt?V_u+))1dA(P2f$7q2>*9Elb5ZyUVh0s9|OrnXE(GU{$D z>^^(rn**>1k?r|R1f+SELQ>JtdAwH)Y~2F`6S>C6Oe}DUwKWAR$2cS?WE?fN9d&d@ z6cd)9o4RW+_r@n{{M}%M{oAMo0jH8B!trH3NtS3`M&m;iNkA+Rvwvu@R)VjrNJ&%2 zpkcYl%_=(Sh@wBoq};@OUPCi;`$;J=qcqn_BB42aG>NP(Mb)AOxqbGHW027^F@BX^ zUL33P@K+PT5u*HTN@8LT%e9^E-uG?IT!L5+ z`3@w@K=*HA5O`_1o40MO(ldQ!>2{y!uwo9cS*gOrYoH)k@TtjdAI!arv*_F3gMToV6p>E2 z6Yo$k?-bj$iKK+HHI6Y4+uez-ts3T z8&aR!6ocEbgMWDh;$W{fz!1BIqA2PgPq7jC`4&X|l0Uv4rnKc6WqgQHsim%10-=t; zI&RrKdP3}XffkcuqX`prpvL=rnU0pO;Xtn8u!ynbL^$O5HLUhWq0Ols_EVDkzF(lk zX@L>d^yaWYaMYsA_ifRx-+%E2mjS1VziT3y=FPeCexqjwf3yF#nbKM*us;pzPeovv zha4$p)u`uMEaRvoXYFEol5VOL$Bo5QE@%V=^=zqEcFlO^P?W|ak8&D z@_rN@^Jd=~`_I8pU?o;Dashj3S}y}@Z5FCD3Poio_?$2*5EuS$KlQ&uYPPT>`{kBe z=LkYwl-k<*-~7Q9-+BqFgvr|pBN-x*BQ~CRe+Vep3jWw!oSm>AH^?&WOBV&g%*nM8QT_n| zhI=M9A5=|=?>BT5*W5{hLSAQ%h$1VPmF`8i4s!8dD&V&>voEfJZj zh^Sm;%0HoZR+ZoQ@1=3T(>x+xIM9{ zbza!9NWtu5FQ8v-_r9r=s#izbu_9`&eiZHC#sZ2x89)XRSWQ~~#xmv8U1)0WCNMG5 zg+&1oV#L=mkOw|L*rqoePLXy4)6m;h6ZX;blo7*V>rE}4IDM>j&R2$|;uEe{OOqU7 z8vc=oK+UCmLD%=X0-eEsh-!HGwk+}oT7O+u=(i3qR{Mr;=S38Hm&U`CB+vgj$+5^0 zW)g!Of&wC`BfP%X<{G9G9w`MzR4I*a!+0)c8(bDvR}4EjHtYznc-~1?;zbD=CVVyV z)!)r1*mn{z$pA>i05hCzWTh`S#RZYG;y9ll=7b86!9;+#bsU>GORXK#UHd~TcbSq@b*mX$4|xZDY2bcNOo7h=g?_T#CLH3Lx~BT$WyW$cR< zea?BVZY9ie;q&7^=Na#NN)a}f!REo1(4mD#i{q1~?JGaS;U;P?;zl)1*SHk$42AO}r0@<`s z``Bc>8_%l^fNRieC!d`OS&Y&g+IM*l#I>sG9u0IhbKpjx#3^F-JYPJ8%>~&Uw$vBi zze91!*}39m9vT^<+S07 z9pQRmA>R=DWk|ynG|+t0ZbRZ}=aXaNGH^vGsKq+8jE{F1qW!%=t5`4~6BduXpIwkb z(@Wq_uf1r`m)`o3+3E_8POqrE45@Fku~jS%f21Qw+Ug;izhjOpN9sgGBNQ+n&?Mht3sFo3*DH7slkvI5r)(Y30PSUnh11~S z^I3{9Je|z(pqbU|<#Y)NX!kHZCYnZPiH8zmlF{U6o3;S5pmc(*C))pvtUa_c`N)Rm z*&;?4>muKs1jLl;*qsq{6AgoN;r`DTAjXKS(_6;3QEYzZi@CanhS1IX9ih9whd z)1PjV2s>q@nSJZ~$$fU17jr~i$~lthtP$tM*>oM4(@=Qbg|PYELssMwA5XEa4r>?x zNLh`L*)(srGuMM&n7{%1(Uskglp)RWh(UCp(**g!0$r7%1;iH0+&lv_j)g*j-=X0< z@*z(&&oji)C)ySrfz-=5JpphEma=ln(lqcLvYi|SLK&Fxae(*%?16Xhi$C=+nnH2R zOLbcY9Au3uJ-h5TFnI@we;sIqLF7)RkI;EZG^Lw04AtFjeb;sC$dcV06q#G-JlYTXchN6CHG(Q{RR+JP00|EV_^RP;x zB_6@SJd)bbjEq`gM&ii-Mll?rX53G-Y#-nBR-vOQO&z0e&VkKelgd5}-=*Aiqhr{T z&CKnldLL8kxw~g3vznBqV7L9ImpWbDG&qbg>*It`3+HdjrEN25AG)`lg^a#D5@$D9 z46uPIh%xM3P91dA7n&^cZLBv1+a~OnbF(46mYfz?pM8>ls4kSuO>uwXX{zggnJk9A zFj`p4XSMQ*K?O_m`qs!(m&c1y3h|3=J~1Tb4=!ZB74waLj`Vvm7`>i{Me`jQF|j_} zugtWB7x`G+KJoq*ePQWT?xP)YsA%xc4*L6MhRRe`P$A!apwQNV9QMeU`~FqZ;?>rb zmdspNg4(iUyKjvnL4UeB9(0qc^mbc!M@ExXaR^~SJSE09=3@N|i=H-aC}>aU*K$WH zUzkZdr{(B1n8`RJzkTBPA}9atNy zC6y&$eq(?cm|gOsV5Xc+AaZ`2xbyMyipFRyK?&3_%+kDO7D*IiBL0gg0zHSA`&^My z#$Z}2w6KuUSzC-^U>?2BCac_EfcyC|FB*X5j1d=|#0knstWtw{1C*6O=TaL*V6w{6q=ALxmNSJ6J zEgVH@zMOJOYBtJAPENHk8TtrH`w9#5mDnF%k|tu^$m1+hm9hoF)yyjFP?N}>qOa67 zv$c#0j04MOS6$NY^G5jhtUSm{OTPUzU9k9({R)m!MQN^&VO+^(ltW0pP-{3&k=kS^ z=t$ra{~MqjAwewogoj#ua5Qmf8+BBbQiW!38%u=J*o;GCkpX6jOwIF8GSh60W!t2V z88P0iTS$w@SV-AMNczTPIE$C^S*X`&(Q81K4`$?`jGMkHr8*UwyX}1SetD>VO-uju zt+t~>kfe(xwEMIFnW(h#z%k8>g}6>x4grJO(ccg6(LBA0g$#+obXs^E$yECEhUt6_ zWjRnl7}HyNnBjkb-99c3s6g`*f?t_nN~ozAGja3!0yQ4T`vv()!rd-6tM9Mh6}TQo@Lw{U!M^{bQUywinh9DKkULE|5DVre&x ztqZ~n-gjKr((wU`MliEl5+z0U+;%N7V1PXeHH^tn8_*5ychz0dpix7sZ5ALLA`uZ1Ds7w9G}9p_)bkPTw=YT@4d<{gpee?fmJHk|EPO$ruK zGO3RUs4!P#iowB^n=jT~$;Qho*xxulFc+u<(o2W=SJ0J&`4?GOAg6=|*_IHrJPh$` zP*MyrgjM8}Fw_?0S`t!%sgj(TlqFsl4!!Wsa7o7EhK7jD(6AK7i^-`k7C~1G08rY~ z$K3>_iyO|6gC!?bB{>c@K&ZIdl-xTx#V@ApvAUQ66O#W7J6STnDx%del)w zj$(K#b1|v(_B`pB;=zpgzgQbHtN#Z_BlnX0{}9&n+DF)m2figw9?3m4D%SfS{O?-} zryMvAe&oSxS;s4ll|&$)4u(nL;p3O8l~3eB{tx`y1D|>QUObU~Kfx&%3k-$Oz<_Kw zJX65`;E5fQ3jW2Gg3EFa*+Y8Aalq#T+5f>D?{?z1&4qBKAuRu!GV4I|6N6wp-o!2g zdPqK|PzdrS4mc^c04H@{yqFWg#C-`)76pbr7UU4orA3tvmkif^OO=Zc;mN=(2IZRM zlmi~r?MVN+s-hGC?Zt@$0}LSq)CJPgwGkCiX}^S`0!z`ff`8*6=xd6FOc;#>goa)b zE-g2|1T;MFt=Y31Pg7tC!GRA@6elPwhzkJHLmk4HMB<>*wf=dQC1DXaqQ9|cUNP(m zu;k#BRHde!R0#E;Bi>=0Q6Xa4NQu(Z>?-?Kt@D;J&4A(`n=m^vpcLp`peL#%ZHlX_ zJgI^J_QyY0gIR0=gAgM^AcB~BjOl)0+wVWBf11__UCE~Sy}DlAI{R&D#cOOp9%0Py zo1%hu`wj3XAs@7nkp@Xmy2Lbn5_=AuUo6_YnuqI1s^75Lr#XG{9q%saYj^>D!mHD% z`Z)0<&~sXbbVk7Kx_|U}XKja_e^5+-iIZO{SB*S=AVtJ)w>jc7+58WPapb*v$DoJh zXqiu6<*ja%Cj(=(ZUMV`9`Fal*S>F27Q$@SwU=is(XQx=0_Q2xgifuHMfsnRIPAr4gucp zIO}QFYLp9}Ch9AZQxN~MQ{Z0os_%V%d_?=1vZoowfdG*ZwbuodOKRzC4EMXM;WL`x zPU<&h%OZJ3?|;H@+bw`t&)wbp(j4Y}S3@&#c^QeC6@NpzI(F;!6St0Iwsej%%psab zVC$qEkDnGRRhE^5<5O?hF}7>;{_!cB^nu22>Z&nXeUVb79>wgJ-;dUjAKMaj^OIoi z#82&@%Jq0VKcN-F&DF;H?PKhg#Y>f>WH)eNZ5-f1F#r5b@t@*fe?&eO!5>yDa|dOh z-Yhi#*?GwgY7ta0YY-Pf3%WOh!NZgN-_~}|#+iNYrf9_$9Og~wFa$FZ}aNb2mXogn(*72uWp6H5WwACrr;07pqT|@ zoPdct*l6Sa@N-DDT$+>Z)z;6~PUiNa)70|b`Q^z|c65QkAua}pEjea<<9o;I#GI+9 zC^|(`@V&R>PtiO(L4;Tv0U!~-{I~B^DMI}b(+F;$x3RcmHHLaefB)%=XBF$$wzQ9I z9%r{+XXBnv)vvoBSDIgniE&;~eOoNfwT>l-G-BUii+=0+xo9%$I{6$z?lEOFIs^HG zZ3I({Zwc2Y`vy_-t8GkX;Vn(Rg2n7DOUUAiuOqI07W7PtN# z!>Hcm_Ck4IsQSB{@8S3Rh)m@tMgMpldaeVeYG#mraK>B@M)k~ZB8>*{_!9$zD>Aco zUi}Tqp-3oQx^c(ZQ>)6fh5!7D*`7rGsH!#ph|vopv2r2}Cc0fPOY3_I4{q5_9UH!= z8%DnEQx@beipXs7EXMPu>#_vD+{;GFN9V0(9TfBuQ^4FmoIS*~8}@LxE+pUbYM5Pr zB{fwuEln%T5vV@5n~7mNLEp2^jKLX!%E~6F{FDek+@-SelRw~t|3T5qCdDf>a&XU^ zWae~WmNZ#y_rycP*CsWR5loUSi}0Z%Zaoza+iNhl)mhYFJ0>}q?kmcDHRo=bpPbJQ zad~y;xQQuYAziFzRi-dKCYyb*Q=@!}1>VI406{GOEYaKxnMyz3;cu%>OwyDCp_zle zdLt8rW)Ah4fnhv<@N+;nnN;JH>rOmMw;IhyAzY4LS_L?Yc99ZOZ3QQlCca zv?jg`c_*e)z|mZ}q@&3{NRN!9%z+B4EqVGs(UFP!MGLql`rBur?wUc9omDjRmI>{= zJmbf6L}K^{MX5$)NYFmiuLvw%+%{RWOhHRjKoc!GGYSc6&Y^`u&)KO8)Gxx*9YG>n z$o6a|XJ>=fnJsO~j|xnzrj`piUaE=KMBja-T~GbUo@ZiKp5R`;R*7rQb%KR7CVrR5 zEqRGA6VAk1k5`UPYp~>hCF?*&(V|f}zHkQB%G6ZE&k&XDqgB7Q2Al!`VO5kujiCfH z=WaiGKb*X*Dx~Q6AlsOQmeVZcz3w}P_lAu7#P3XDPPB;_{yzY$KvTb_!Sy+bngvp# zqSYGMvoq9WmtMca)~-YETn6|m-gOheWz^c?{@?!ucXk=IJjccSR~o{+Jq@c`XZgZ1MLUBo=}2mYe6hsxl8O0l3wQT1r{h1M15}yb&1WYCBF4w}X}$S> z^LFh&(r(Q0?^d5r@YEVFF*9?GzqmP_T%>98b6?&k8Xn>A|B$!$R=DM?^Pm1`?kf(S z6QIY@t#^3%#zVBf_zSAJGFME{zlM&MZ%{r^0E2wFh>=OLZ4*rnff4FN`gDf;#3l6l zbRCa@7l4sLic=KFMJ<+47ZxZ=KACt-ujepyd^{JlQW1NhPCoXL{4tK>;EY0?iGf|r zQ*RVWhsR8Mr|kC?Le*w=d5&V%BrAV`PhIf{qCQ6(_t?2}o40;(hn;bT_`BO=ix%yu zOi7Z-$N&jZ;Nv+xM(&W2>oV4=#8!=^ia|N6e&m>^O_yeSJWZ~W3K(QdIf{)Ea{qwf zc!PWI{G9h6obrp4Wfmoyl>-;0Zj;UbekPW2r$>8Zi}uLC&ox+F%CXeY&{Ty&DG4PB!GsO&xWu4GHV%XZ@(Vcg=_3kde ze9xt-HMqKe!k_>HGs1No#*xZctx+vzm@gYlKj?V4C&!Fq2`|4u&XlPc6I{o^bw)T& zOloAY7iO{LG1*CrgCm#D5HfQs6v{-RPH4UFgG4294^_J{NaE<<0&jVxwnipx&^ zXAI87pi(PPtz}92`?&o+`#l*oSE9Z&M^0DH>aNcx8aMF=2W;GVhdb|W@a9`X)Fp{4 zyC=-ZW&DbU-|OKGT%1t?nf&xU!Ea*>PdExoOtb>avsv;P4H<}hhtM0)amEZMF&G)- zAVzjv)LIRFu0atGJsi>NIk>|B&jqtq!Jey;i#!NMXYUCK&cvWtEif~iBk3P7>UBBj z%Sf35vrBX2O^xg`N<_)nCER;Vw>e@jY;dJgVRbo2HlrdD0|8;+F?1(Hx{a2rQqWbb ziHq;_=sQFD&In42=yrjc(jgjq?6)9OE>c^nVF5-OL+IjrBYKXD>&Cb)losj~YB{oT zAHTKB!Gk;8dg~5v{bGywJ3ADO1KM*%%45(*eMZiZp5sDs0WDWzM(GfZ#~if4Dix_M zf9QPC2+tkT={SsIg^6roBz;sVB(hg9vjysQNa_vo`h6UC%s2$OxI{LWqn7aqeHYhp zaGV%FRLCyWv27W9(xTUPI5=`KXP2>O=e}Sabk_4QIHj|5kB9HQ&3kYDoXzYX;Wmow zcgxJrUo-1_a|i182l=oG=P)a^(VZ3q^EULVa?~(U9(Oi;)*HK_*RPWLd|W zoh6f1uqVf~cJ8uq`yGDy_JD`GGFPvia(!-&aU)O0(w^}Li%<6r3!VzOf6l%&#@up)Tv;dT?$O`9&-)uGu0G3EyU2=ek}Ky90WiCjIs?gZ2@}@;bwef_b$}A*&*j zkobMV-XjjShdkK#FboMv(wO*Js>^e%&DdyzeMXzV;HQ885B&Jffc?e)K(Z8a+VrT~ zCj8z7&uRD3sNLn{!8V<7jlgbDud2-Ce8M1R*gC~OJ)*m)k<@Zzb0zd~7p=9;$@U(* zJ101mYnY97@)?bc;S)Khw0C#d+&Q3ExQbRgM^@Fyj1D<@_!jrOHV^$p{`ikCkga5~ zCD2JBjfn!Ez#r4=wCJ}_7_^UQO)?y6aCNm#qofgzoAmc@@!t3UAAa~=mp7a%+*}^B zcG|{E6NcJ|gU63K^(;Dtn_NDpPJ?F}5PChD8+T~=I?h^+rHV>Tbs4pGIo>>q#P#Xu{;lS$M9^&Ev>hC|h+eF++Q?DLKoUBP zn%f-hp0c?gqAi@Go-I%~-9|oo#CzjalHwxyygCi*6OVqUMZa^(pnXJpEOV$PtSm`1 zie*y2gL`_H+duek{L9a``N`l4D+QnB(=HR+VxlLs4$X*xZkxI>&4#PqDX$b@&0d z#;c67%hIz$NG&aJ;?G#?}$?`PZ@M zmqFUbYjt`1KGZI*A}`jc#1oWY%w*JMFzC_j_BcHna=JgF@ts$hvn>|W9`5m7Hh=Pe z@h@+*_}jPO+cQnRd;JtQG;k&fX7_;OoqaYQA0Vy2PIf^CX`4yA!`t_vyuQM<#VXY_ zL=8RsVTXRZ&Czk2L8vfRvdGRhQZy!7E>NA%QL;j$QJdl6CR=-Lj-8aur5ugL3>tzK-C}D$p*^-((li>fPBPfz;NcE$zq3!~`kzvhXV7OAvfAf;r=EtyiF-)%{ttNb zU*6<@`@6eX-^pkJK&&GpuW09rk3Yn9T=!3`PNop%ac`NOVd!O{#SZX}HJ!?Kk)z|NGzaMthkJ`382=;X=mcPY7>WJUi4{u zhq!m&;e(+|*HT$OXH&Migb&}~aYyBS_Z*j3<~e^pfrB6OXlIvyc*9^mDD&sv15^_U z6}^3z-u*59{%)O>q`>^2q$Fv~WHR9B)(^P#sK=Y?pL1;-vu5qlAGbMZ_jvTnyL6Kp z?fMlwtxTf%4*#fOb!Ol{zXz26j$l2Fl@)#PPx(^ZINPd)(vaZ}f?k94oKL z82LPMV}^`gqLekUzV#B9Z(d|&u1N9svVV5EoKmmV=JDMl94}8~1vrl%up=Kc3|lmh z;ONxirGK-?rTVlRUC}~J+oijAi+BFtpD_HxBBlH_Zo~>ROo(W+_uyTA`rm$r_qvIF zr9yGgraalEGuYzc?S!AczkyxPU|*VJT>(AuAn0*;=ROS zeE5TFT)1)r%dBAL=BODObLAqlOW$Gb$_;Lwn`JKBMd|Nz^wy8KKge_P$`x{T1xO)0 z#BJT@r~mXb_QMM0@+;(vIY=Qmx<~W9clqasGcdEj|Fjen%COlC*}eAxhr9cn1oNy< zimVsP*!r{@iii6QPWSlbdq?yli`vy|w?GC4!hBDwnhi*6hooO7P@#P3rGhkvCj{WGx|VUO!{1_lGe zPrMi-mXL9l6Q=Vflh!GZ{`O7Yc{XFU^IIIe{2Kq&p~_xDM3mM9#4h^ynD4$}5w1;n zExn?7`61)8OFmev2^C!qH!M~+J=$-5fR@|G&R5ut-w@Y3{PlM&v_pfvgBUS2ff_E8 z_Nx?QA@Zdc*t+wO=Qi_H(mwpk0^jU}S^b*twjWR!O)2n{#?wa}pWHB*7Rft#?qntW z^DD+D-{a9d!{xfd-R%u_b5m%3haa6y__uGUypr1Hw_X*Iya3)>M|^y%ct(HT=c?V|^o=Ljr7^wo^L&0&rI7vf%OI*T ze32?CH9#>EBwu*Q#@*+5xLKr{3=@zATya2q_KY9?(8fy)crB^Z?Vd3^dBO(^6E7{Z zw-Y7c-e8};!Lv!6W24I5?JB$3CA7Z7`xkxw?HelhLKVL9Ie-A7qo;2VP@l3#j}7r*cV+l3S{Wr-Iz znRKss_b-1)__tN!?gFj%h;hBoo6lg|lZn547pMCH{Mldd zEE?w6x4~D7F_Qi`xwu3$pU2*)vAes^=XUaJ#Dc_q3#7Qov|s1l|L{FRU-l96bhQ45 z%<4VfdPcwY%z3D8gl2z9=lGJ*qi^uUh$0#_ zvacp6hCgl2o&fmpsXk(Kp^en;zla-R3YRQS+9}AN>_)7cKs(caMB=lds;7(0%g*+9wxiJ6ouw1o=>b zXfR1haVV5)EV7^FaBr7`LV~zY`6thBh2L<+(_61_M|CE{DQB|?(RhZ)3k~V zV|P#@S@KyM`?|}?=z=GmEu0wy1yDp8JP}6MXst7zb|U!Z9sv<#JZ#5ca(zncxKBO( z0`Y{O;mF05*Q~LaHgD*zmN;XRiFqp`J~c!i$T4J5A|N~X3pJ9tI~>)D?4$!kWfT48 zlJUh89zSi-S%-;N{A48X&pXVAYwGPVGu^_$!NXwQzv1%xZ*X>JpK(6T7vhkM2n1Cd zpCF98mOF?||Db{SjO-G?P)3Jbn z>PPm6kv*AEC_v%x9<|+VwzDxp(uCPWV{lQYHA|vB1Vg-y13B{>W;Y-3{^Kj!ioi>r zji-npDmarCi^e0)ZjSKmFq8fWAsj;Y{Fu`o%hm~v_z1OTvbNn@uf4sbckzs?<71xG zMeIa{OelajF|m4=^xX$EER)sR`IRp7&x14XqjxXpPAtZDlvKn)VM?zx#h5l2wmV#p zC6?+2yLUdvi!a^frJ_Pu7QvM8Ib+sZgOj>T#1q)-C-Dr2wG_aocu3}m(OHkqXvDK; z9m0WKG8+*TO~adYX?GNSd7ZWEfloyaMvzem_#))@4yo<$vYk(o3<=0<6LH#N)qcih zGmGJ77?fdUiwL1GiXtO9E9}XXcK?`1?;dj#@S~-p1QU7kAqV@sjy|07c6*!61q3Bf z6amv(F}-+3cbMb4te`patxOhO*2bLC`4gI*I9KUcXpe%7Ck{FmhOINXykfb|;LIIn z>tAdgL`6jjMUVxBfD$6Nb3ko>pY1}DOvHyGuJJoloW%`i*ADj7W~Y}z)h$dZh$1UU z!Ww5*r#os=fA55*e8@N%!5`0)PYkiIZdeaHyw~0$qd`a%kWs*KVQ|5uHRQu?oZ7-8 zD0xVdi@DYqUp}K%m$*v5#8%d0GB#lQOk z8=ITlEhdNtRs3k6>MW*xPCvXLwS7dkl*hMpusj9T4?YaMMT_?NF{|@aPP%1$`7C!+ zMY770q}OHE9q{C-LDaAi{Q`n;yO$N?npShbqf$3t zj8alSw-t>)-e%!REmR*5RuS7 z@K+iJ?n+~=NGwD@Q+JEKy&TVdPR8_tSf+`u_cczRwfNVMi&Tyd_=Dftpc?n1x+6?| zjfIR@-XxkWP!t5hc({&(KGiTx2c=jep3PGdL_!j*d;;!TMOlj!19Jk=Af4y|+wnTZ z=!(bT1mks-VoV|x;uj;47DSL0^u>z#U`iyLBeu6kJ{>3ScaVe;$8Y@&@BOF0;Zf}m z@E^X)=Qm@-n@_m-_6ZZ~bEwZBkx0cz;2~q<*ao_x(QEd&dDdZ8O>$U?^YUks_(MT5 ziin~wnd@VwPMoDb&Wn{KTSY(dpn&>c5-ZFng|h(jivD=WldgpD>H%9Xe1`qK7`whE zj_WbZC8#I&iN)f?xJ@1r9SgzG&~-lpqsCD(!Cuu*I3OSiE~;$f5=66#WDJA2FvY*U zLsIgS>YVakQp8S12?qnDWE-C_WmtcYcmMQH`A|9Hc<1-|Z?^Kh5L+_(VS`qEjko_r zBGn>UKvoQf@`~O}V&TLo@0KZ6QbYn0lDo!nOy;XNkY%$NB9~Oa^Dr70OrL&8!&F%$ zvlP<`nW*p9WpBHP_KILa!iYv0FACI(QJ%je^Va(@#-~2gsVMPu6iJrA=)(M%H~;Dl zzW-K>z*ql!c3wJQH#MXAexEnq(6PS|<=|n7Nc@*x9r#OyIqWd6pVBi#rh-D%ykyzd z_|X_dQ9+ceY<~G34-#q8*#g;Agot$8p~`bTSn0Ut5;Guy8ewD~@WTCv?0s26cTC3h z6B=Ltzc_P?JgfZyzqeK4^{mb4y(W#50rtl0gtp3LB$0@0vhpn$Y9iBhl-x#{@DZ$g0b_t1*k)0Cox^($U-Da6Y6`&^;&1#%e(-e3pFZ8?vtQch_rFk~ z9Q7ejT~?h#L>DdG{33>Mys@f;i_LFI6pWG;Ez2*C%EEaK=Y0o$V*o+3so z3}RI_qi2N75%2gDpoXa?MB=|7*}SNj2&%?tDKOTfWU58Vn^|Jv+YUlF+};Vs0#g;y z{UOHAE-xLvz@0BC=uQY*AEWlZ#=EC8{^gSmo_}qhKm1aSQq+gC?4vJDEXj|wu}eIa zrN}KMk7e7e#&gz=h+5krk-q&*R=|=)979F5Y|5b-a&&`kY@5yK8F6pQdw~R&FHAWu zlK7=;bqg3U*ZMp=Uocw4h-4*IH~jo5>(w zF+lZNgvQ_G-@j4k?I&xF{$QKCFWsS>2oPDU!J1?FVmPrp#dwHn_@e@vwE^=nh8)IB zmMJ8|RHHIJDTLvQplkR;9)VDpg`LF{3fw!AN%oV>1%+fROfVlu_5UInk+%hBST@Z1 zSfYSdy-O@#VK*KkE`i%=(EaY8^TSh*e{*w>-#gsncONDQ*1yH+#RXS^P0Fb)_97xF zA9!nzwZ3LCnlWr$)3284yW2z$54e{~Vr=A)0}er!tQ?ERz>j<M|cSWKpj+kJGqiX6+3DikPG%aqDt zM0d(+*yn@qoN{`ip+#ROT-stMA0{l>coUs*cgk#P6Dk!+gsl$!lN+W>n~CxB zeV>97KnQ2aBo%5|i_uJ{)sqNqB}r~%2!~X})ivwkk2rpE%6Fd55lRkaS!dBWLGNi? zbBEOCK40D`Q%#4sEvxbqPQ<7k=6S!{q5t+bd9wFKvYBmiWr?_7!7qw9^D##6hKU%( zNNiAuhbe?4ROy#Zz44m?PoLxUPZa^j1fLgKkDCmpW3KfKb|%hewl}!D6~~VU<^+2=q_+&SlCspw z5%OsjMRH*=#-8^XxlxQjj!N1`CT!w4b6S^IJbQe@P<)Y4szNcSkWg$qZ^dM`pgYiU z{2@ZgEQxHMRMI0IonSjYG$TnW9i@<0Q5275yUy%`Z}8}>$9p4@mpubXAJBZ-qBoaW z?|hlvy*)}>VKU)gSF6Sa9A>>9t@9RRa!C0-_P5LImy=`w*%v|yL`mjyBoi^xAqC&x zm3ezOj>UX1WYV8t3RR-9EV)vFLN!h~1=jFKn9Fk>9S>;5lK3Bp*jk&>>4NJ;oJAtZ zi`!e=*-a6WMCA1hciCgG^fU9KY!ssuvnoCrL_ufm&KV3AbO$zWFh)3)CtoO1PWvc_ z9k8d2u3CKf_G3=_DfGf`6E9WR&Ia&fVH!&Yttl%bNn*Q3Je?rqIf&~K=3-1|8pMhx z+1ZMbN%|2y2kaS^F`+*J%?=StrAXy6WYZ~9_7iM-#mb*Yh-RoI9DK`b+Px0LrHi)y z1aeW0AcnIPBV7`77%qH_-5eYF80DNFzYj#w!?kU+;fP6hgy~iBMYCjz1q#&+a`)FIb#tLW9K%39$x)JiRG}%&_d?`yM ztKefn`-cP0zWFV#Zgy#Vd0cCZIi7Po>@gKH6u$5;*xTLUDDDzfG$y7-v*TjL!)zUt zNTtH4^6j~H%@xkDfi|1db;9H#AB|VeQOO1HVKIIBjQ;n&Nuyt;72MV|d-dkRPB_cgaMI zbd*eNMpqOV%u@tIaWX{(X?0EaY({HhVOXCaPANrD0#ORp6sm48Y4p&|B!Qh>;+YI7 z85E;Ur}d1tzWGCwsh-iE?*4f*p<^utd>&EEfiiWuA06$!;x3Jigg(W#FZ_H`03=Sb#L$O4GWv8)c)oh6;ALatOGTS(nn5ylXEeob>| z&{#zYrMAhHiaabSDD@$o_BBH>$s)NyAt?};pK$fP|HS)EohO5zPc$q>^2pU!dHF$% zT*{_5F}Ui)2t^Z=OHou+!0KM3z4ISD1 zr2m;JodDiqhJE#n$zaKiTVlId<>;_VIR|+pynu!06^JVd3h|FOsI#17)SofYeROgM zgcEV9AqWWIt!CKGW9HL7ZNM4WAHwnbZ^7XfUC1UbNP9qyRY%;YoF)&trSVPl?#H1D2lhep?}KT;NaK>{brkP zW60Wmj%239-Mc9=K?Tp8pr5}<^U+&;ryIiOSNO93iup;G?+#|n@-LITf5dWMzSE_i1$JTnuFN``aXn74Bpu5&^(-z-ln*9P^!T zH)$qw?EmpSDqC^FcnBCwS2J$T2WU==+{+bW={U0I!lH{Y>2ouVvW_QtSoV>Pi{M#c zbkV0x>I0ijAW0~{N2!$MeonwU>rfx`>8T}B$s)C+jd1<}CvW~OZ+C32CO<#TLP~8R zS6}7Jn>}7n*Xb{VOvD(krTvsrKfY{TTzAc|-JxCYVXW^{N|iV|N|8$kZs)#kGV4F% zjqf(OQuf&T<2zJ$vPAFzgM~h$c`;j`#UJxX;#fgoV6{kdI9qL_xbAo^g6pKg^Fq+2|b#t5f@EAW;D*uPLkM9$U|4x zFq-l8D;Mp-V z%2=fxBEbmpk0N>6WJdk?8eM*!(9QwLNR&8hL`IBeV;-H)8J7bDHcM2u@>FY}u1}fF zOpg0Swu1?_iY`LyUCu5a@z+l@2HGcR3gF))lH27=zyE*&EBa?WR(hPw-WIV`0@XDU zX2DVEb$tut9 z<=84mP-OvaIHX^{!3@<ec<0&!L@fx*Ugs4wM#KrSmTvtSjZV*2F z10EiIjrQf++?<{8{u@ALdDdWV?GoHNAQ?@N5|>b&>vZJ^en~QO%9IAWWp-yYK+r)%+U7Hi|+B+ScGcGz$f+? z3|qYSXo^|OP~3Zt+CiG5tdE_VjxSyyR7z2bDZkz+9Nq%l4*mXwtEP)oE|J;VA{U8} zP*wat_X~bZ!?dJ54-wxzqO`R^-X{|l9c*h!|FS{5KEMb+AYHk~{^mUrr7Z%6i5aUA zN@vMORRZ#@Tw9O&Xg60(d}+*Fja<-AM*c|gTn(^CS9B&46N+pf)!5xh6Y`130(iEC z-M?Vku5-G|K)J{Z8!Cl_h~S#I^HV16r@UPkX{FjwJuIWr}Np5G)B3Vb|w7PwBAW^YMjq!QlTr@XsOA>bn-3g9|egG*-3 zbIz9uR)rFut@$Y>B?QmLou4slKjz(QkA@njS_+d%_->WUWgm0Yr8y6?3S`*X@RLtT z;MrJ%7FxSO%S&LVYur1?Qp<;s1&?vB&!E-CitiA~7s&XOTl%ekB%Jjcz1?KgpVAYu z6w_JmZiUDtMSSYd8sH=OG+)p>X)rY-C4XLlvg#1bFl|_=_pBQhkpmlb`#0a1qyg<41 z5$CJ|g0legoL+yxTW0~pI|=r8)5IexlDC5OfL7z0AH30FR`!!UC=&_?P%wcpli8G$ zGaK&3sB9MqhXaWA0`x0plPTxJB*7P|RA1btl!*|sdU)M4W=oxV12w;-+$KCz-`bOuDBe8 z2*0*X?P!~9z)$F-N32nc#my;AGs8Slqn1}G##{tk){`ry&G&iS^0;!6d_JX;jf=Q| zs|~RRH*{BF=1PHD-bWz`uII5H^w6#^845-0{4R$(X}0oyMA2n77}KeDaKaHnr4mso zNIDtAOXSGg9`+~ctR~7JtH{%LklUwBG?$iG^;=#5yg#r z939rk_(2stJlDY!#9Pl7CXVBx8!D2oL|`M$gS%IFm+$hwT+}&np3?q8nOa#T=q(uy zCe*JcNcY2}H*@$^1;HWZVkO<$TWd*)@Yj4lA=mCLN^~=QhMf5R@PhAo5f1 zSuC_U^@}b`_khICHt|S=h~pxPYnJmlSH~SD>xZcOuTZHJ*iE^p-jwcQ!g<5QJP5FP zm?so-SgjUxt~v|`23}^9RP7FXTMx*n5K}zx+*{HoK}5jA^ITTFF0*zEExJn}UnC=o zL~-zNnY4%W>I2MhieR=tT(Dt0rgPP1Fx6QpDydqETqTJtNqEZ<-tZZNna98@^Wxqn zhld3cevk%ZjLRpCon5A>9g?8{c|}7Onha+H9$%O^`#CCy57;;;aHkd_EQ9E|xUPpF zig^4Q8#cCvx31G0Ot|Vw_#PH1Y?ev*RX$O~)SY6PZ5q8bon?&l;USgnDrreT#YVSQ z^c!7z?Kw`oOrm;+osIjHqcRB#iW1qdi)R~{qhkim6W(k4@uG1) zpI1mmL~Pq;*=aKA^%%uoAd)Fj4nRzHaV;C8+ho@6&{vD3i(5Rum!O>V;hIZE?E!=C z7$Kb|l+6+KRATuZ+-#MS4bCU2E0YwERm^w4#pv>qg<8Q*R!R6(5}u2Q2<{BWYSQc* zH0MzgU)ZOzw?SG_2y%O=TZ0Dk_BA(7hG3-3Zb2p&aq*lr>+u<*=2JdsD=Z2LKA%-c zMMWIfL74S0hh18F0wa-QyC{>5gYCE&-4^p!i-A%=DQ@#%H%T?C;<+m(odNy!5Ryqk z`5Z9>{Qqcq>fx9Mvu2xNZ_db9U?-L4ZdtlDE`QFuu)dW~r(I@tj_89|s1$Q-1#k1y zS!ysmWip;}y3FuGrp(@hJq{{ic47)L-YqS!>mrH*tk<}M8y3S6H%^XSe}cWVO;o(S zPX#PinnwM!%hWm~^59h}#XMWVj}#Si0gH16!wK)T5*!BN>~BPfhCDo5!&zR^yl(Ku zn^RT=KZVU4fq);+Ss>75I-PM`myuqKvayxIA5?IRCBpcU*+k=FQRIH9#&a)J$tS_L zTCi@OGh4WH(nt6s2{wWfiNFEA!Yft3gz?;2b-JR%|+OW+!{2V=izcIw*@>*bIp2f;}4ceZ5Vv%D|Z>{&-mK^ z@lCE@-r~{XHR6&-SYM%C*O@I97S)$g;|X$}gCCJ!r8ND`R6HFM#&wpR2G=7nDhX-_ zWumFbfBx)p43kF;C(v~_IIJW%*zyth32V<RdApq|uc!S!YR5pD`rN{ANh|Vt|-_jojv4Hp4-(l8(TP z;ZUR5bcsA4BA5#>9W9wD0pck?KA(W{iE@_(5JlF@Ig^VEW?qF=X&=>c2u_A9qX9Gz z))u(FAYvpzIOrp)f+XJN7P|8ZdLW7w3=(o0ET$_ST^5K&BiuVsNpDx#|J+@QJ(2Z% z!qTx=xC^rG4TJHTi&mWAt1{`d#e8WnRv{TxZiUQ08?1pLGp)n3vl%0Mm*Cc2lF=k7 z;Qu?KahvqNSWUQU3V0Pi`*$Kpio`e_{L-x=;m*da5z)g5R2hQk3|b>v^%22jm{>ZD zKAxfbP2{M=aHP>2n1pI+l5s!uj#)Z=#sLLwoy1!}Ddv)nEa^{Wr0I}I^O%tkWm+sz z&d1n^gDSydG^hWlj;$OKs^ySZ8g^G>nKaN{u#u5MF@o_lVZTa1@(=|FW3gg6UgAqe z@kK%?@GGgp{CRM-5pMSp-O-9GJ4HI1Azw=1^L;`QJMZFj&gjm9%;hA9+Yz?P%1 zWqy6eNXcLpkJv~BDThApl;ycD>%{_Nxniw`5Cb71f`Dqzu@@a0jV|XmD|9JC+8?4y z583wU*%rgbh(ULbn#mE3$MKEFxKf0bw5HqWGw3YvR}aWEkQ^`Xc=p1j9;xxiQ~S^H{)17X5~h-z!NdY_w{E@zDe zOHzaaVJgA^)mG`R0t9*;7Q-&1NC96gPsy6$(-nG>#Qb_duQ|n6`aHSq1GWP`QsNTN z9?GdG_LYR2c$FMgiNQgu#^t0#tGhtvFGp;YYw0ag^YL`^Jz_tvrpDUs*pvPi4 z;Jgl2F3I7`JEU{*+b}QOYF)Mst_TDOg+jz*A^eJrgoA+2V(Bs-Loyj4h{dAaW;)fF zXAd9{BiU==#*)lJDj|9-mVGYjuugt7UOxUY$E3A}*$8uKGA06kAK|czEbHjY0fUP( zx?PPqMbZ(KT0+OvB}P4s1lLR_2IqQ>y^zdCcEZ|@(C^O)H!hg2JX*z<_K!WM1YWA{wVEZj%hhe)?s`;hh#|S=4woF?naE-B?b92^Zq2(y-7V!B*wC)fw8AS{RK3>azV*Cavp0@xqL=mZ(I*6Frt|4^KOd!msEQdWbA&c${Am}E-O2hIw^!qa!4IOp6M0{tP zTrfaHSO8G1CE6%*(^+(mF1BXKdsW{v07-1uWp#@X^c<+$8vEOYn)IJIS?e8 zOA}Z)pmiAcqTDoX!is{=u@DUdPYfdZZ9Kh$*6;G+2O9BAfovg9(B~(V%aBOQ zNkVj%VB9dCuBZ?F#4HO{T46dq49EM$D4~8+;n_IW3}bder+Ln!YmIJDLfMER3No0^ zZCjpya7}ISrs(>d?f}fJMP=_wN_UrX*x{3abJ7VoH0d zk}_RF&IH#^Vc^4`NDvy%P|Oj_QHw@1L`0DZ2p}2;f=|T@cqsB3&zmzCOlaNAAm*U> z1XhNPu{H?fF`q9P4_0`ECGL7be>5lLH}C~L9LJBb44~KsvbSJmhnNn>tWVF-%3 z-ldR=Q3~D?+K*=ox>p_6z7=v3Onr^6Nhq-({y>OqE=bE+u;|^;Z%1jP;*$lOwaZ#} zh}iD0r?7HuJX=TWH5q>JA=jM=9otV@5ecXsjsUg?^3OTe_N2$Eb;ZDnp+`2^DTS$~ zq>p%gMFi1DFcBvl3*xh<=;Jmw%?PWIN=OoJ*H)780v;k3FlRiP(x^{y?%JrTfMMF` zrcD%^#bChj>V{S50AFU0Qangj{mF||06|6oWOs=%?bE!tLI}CUtPz9p3~gm#h#?$6 z&^25$j3L2fs4*RA$Z-Kl5-=8P*0CjmZ$+m)r8{tl>}*opsFL+bgajSk9Wxy342Cue zHj-d5)nyX4KtlR7Rm29MF`G=OUrn&@Sor+{re&j>4iV48o((aYr;LmsTByuMG0J9I z;-|XW3K9q)xl7D>pY~N9e?mZW=eKiT>KI}eRS<4FQJDs&;)m&k<#>Q5=LyL^#DxyJ z#yTL->CEVKZ9>~Mid!|Zeuc2Gz+%jJqSG7LNUn`6+RStteeM3|5xh@?+d{Xs)?v~; z#PZJJ?D28oV zj2YZCX&EYVSVU25OjBWLsYDzH$6Yh;^q3CDto%MKQNo?+xPBWuAmFWAEVThw!yrx} z!`=IN(rNkQ@jSp45d;@WS+kn;>0e*ch`Ow-0gKs`nc<+@Awj)tMO~jP2)1DWG!u7uyxz>cvNMdVN%eA{q=36g^}?N7rXGZzfEPFu|gLhljSZ2t>V4 z7K!=)3BQ)I{2#_`e^+yS!{FjQ-u>ne`OX_psB1d%azd$CO{pE<` zE@Ueb2L;d_i^Y6_z3i}bvs?`BP$;da<_A2QuQ>09h`ySkx}8S#%iLO#8k%X+n^=s_ zn~0ef=JpXzGz6cD+Tiy8(6uG~MxSM`j$`NP8&!sOoRk-&Tr3j((o6j48w0$z{!hLs zKcs%O$yPE(u23aW-eW@yQx$03^T$yEx(G1V+$Yer*Yr|@S zwHl(iGtAL~lM{pDeVJS}P4?BhjI4KweEn-Yu}=7(m-pFBMX6-6B+Gjg@^Ln0iJ-W` zb9#*C3!2>(vuKk*euU;Pu~t1~MmUDaayVu5`1>p!fwvOR@j^l3UbM}3r4^IaipKSf zmN2oYRF9E`QWcs^(4)-kLLy7udglrkCFVY~d8FMc-3 zzwW%ryZ@@kf4th|e5*({8l_OJQrOxd>Q_+t^($WU>?QWBPW|{1-~0MIJiFAHI|;OY zn^~vLq*5js3=oxmwr4>f4w>Avm<8`ce2-crNJ07Wd}S0hLhb%OyVD`@uYZdR?SlW+ ze86TVK`osmQQjwC%&;ZNgv1#FeJ0wRM%Q2xYZJ`$&>rM510f{AWYV2-eKx_DNm1C} zARW9-J^Jx)6eNj2rbH|iCo%hg?vGmhOSMg7w1sVgzDHqKG*jGr6iWRUhD%4=G2(lzbn1dl5wf=_0Xo znxyuG#=A}aBG93-xkI&3;!yI)Wj$PJNM|wONk=4^6*)+IEUYz4O~YBWna>NH4g}&E zjlKAUx2GnJahUYybChZ+WLW~%;AzZ_HT|K<==7Rsrpo%@4$h}0DgQAXJa@&k{v$5m z{(pG$X@|#61vA@ZG8=H-*V#*|gd;ygp|?#KUEbznI#u$~0?%*AWPb7|1yfLZh3ZC+ z->!X&_dobs{(XN)@2J2|B~LP%qg>5X+f3v0fip6h_Qp(FH#n0M=Bqq)`#BC=KXFG# z8eOp*4yfA|QjsDrY{+CoKXGb&VMuKgOC7TlJ*V@|U-N%!bLyochZ`!pdkKgJ!O$4C z#`vy=9A#suQ4g`R#I`ha=5%{=8kY+;kCGr7+|*kv>^-v2-6fsN5W_>p!FJZnmJ75| z3(I}P@bELVbN0UbJ#L=1 z`QQJyKKrE%yM;8#{04q(AwSKlzhik$&m{Ci1|E(^&H(4m{<{#g*2r~ilFDB$Pv`aeQG(-iQa-@AM2rF_u;N%4|79nuN0PL zOQLCtCkTMJG57TJF@4sx4;myv5(EJd91_(36M-k_?yAbJtnA-pWo70m+YlX(**^?O zf*FoqJjaRE1y0PCsAOC`^)()c_q3%EaX4ah*khPvNlPmnJA09{XHW9t$ypYsixho} zYaaBCBtXdEFkw9Osh&8)+=(?#t<;#C%8*M3j6ka9R9Q^4M%&(v0`4=v5`q(Th^*ja7 z;(pGtN4*2W#4xB_U}Coi(Hw#up18K#RKd7Cn9wTZ}(J@p+{*H^hP>r?bT*58RzF);yg z>``gVP?}qyF*C>P^fb#gOQBd{?9Q>cc7pYjYaFZPn9f?FP8*$>B3L=k>e@1EE3?${ zDLE6;Y()e_boU2@ zNtVi~7g#>A%IVbxGo=i9N0AI#L=p7M7ig@m^9N^YG>YDbzX_9n5sC7NVxMxoPI-QT znVETJryDF+4dqgakvqrSu@kJHI>E8&BC`dDbkHI)S;mD`mX96h%-Rz3HJ54*y3G_@ zSY-L7Z?V2qVAePvWzT@!ApK=BqR8mu^_6e~^9;_k(zKya5rkp_8jr?|V6;#S-t zxO;FCtVoJ$!VT~D_gmk(cdeU0XRn-n_C7P2J$vTKInTURFWq99ci^*Fwto3cGh8)-$ylsh;Zl|b zD%#y_PMT@7de#TU6Elwv@(NnKA=xSs(AXlXpE3*67)`l23S~V`(bLaWAJmv*gJwdq zjbO_`Ov)Uj1An-WjTI~7U+^ibei0mPB0en}QBiX_2ML#5*JdLl=Z(Ch5MYFNhOqaA zvpnh(yArK6e5APbWa!NuoJDlwY;QV4)HjRF$irVc|2T+n_q0Gxky1udO_sb*5}Oq? zIoDT>S^8cNJmZ-Hp||EUO(4&k+4RP=J?np?L=+du5PV45=BzjjfBrpN9?~<(AS}~7KgJs8*2!mXquvO;7@&vbz5q6omdRm<(5sOL?B|!aed~XZL46vD*`1*@R%zP5|DB1YfIW#d|1E|sBUL-8w|*2=~0M_wVDCVASM(HZmg3`*JIDA6Zpx?Cn-s=3$O#U(kGoH z;|4+Bvpe+;+L|frFUT)$>oSOjhE-4AW+_**=gm`bQ}zSJ{9w&MEeSqm?2tex%j{Si z{k!;XmL5*Ed%xUK;zhi6kaJX*6?1lk^cew~OI=*OBh(H2SY|^ffZxJJiFVNwjAxC5 zrk4tmgmIg6!>u*?`vge)ts+r(`>&&<8J!2XeiztGDTD7^*{lIXF}LWqS8sl7a1H(- z1e~<vala-c48$;=fn}MWtB)2Bh$N)B7190{rz_Ef+dKNkp`dq zxBgcKrC~U$h0u_HfTlzquxR#!6|Xg!VJR0o(-Acnjs$@{&AtbmI%1YBJjIEesGqG- z1VWkN?Z&P2`lg%!rY4y-3cI3duO~+~wk=-{t?uz0&?pblm^}wX%-WntItej{K_X2R z@;KUUxun5=KG36Qb<_JQAvJ6^h$XTW8KB7!2h4lV#cDqIEdIt3zW`l35LhJaq6Vd> z)iJ=8E{qI;VI>PkQPc@U27{*TX!ft4juqhho@|`3$?-8qzlNAdX0WQ;S4a1`Wy&nL z;|!*-%9BT{*MXcxtpY?Y42yd={H$h*m#BGhH113><}aNSAe7)rZK_a-II)N>U~as{ zu~4%3x}s0OOhLPpn$Ti)ePh>?i)4so4-L#HWK5M;7Elw!A-2A*|J)lCd`Qo~1FA(a z68|JgjQn^IR0jp|cxEfkWh?4rLl|KgFN0NYJ+#0JYO%o*c_<794vF}lZ-7?9KXA~y z*{oQIuyjvoKt*3OK~pPl$%pvj>(Km{Ftw0Xy4-1A_$kJ<0axlDd8#rk)>jxH0&i@j z$v=olRqG++b>p%Cvc|@WE&)s_aUn_`tNO4yra?H1xf8PrL{$Fe6q=$>!{@4p_5?Vsn%(4vZ}3u`ObJInes|~P204JP`@z(tNKCk zX>XbT{oSSU-=MrUC<6bLM3E!9Pi&OyvUO@5xzCC zj2jj3F!HOxL5i(Q`5$A~&_pL2m*24BfhNUhhKrXR%?&e_NwlmV+aqSzSqk`uES!1S zG>*(@zIgk(sglC19`|=0Yy5-PSkdP!viR2e5uvCmu)VvBNlCp7GznR6zG>&z00FYm z3~^Al{%ccnkMOLf_3J`4oQy$A1T8HWSIGon`aQX@QPM!a?>NkxhX6-!YopEjA_p4KW~3 zzilmfe04*6d;f(`&^fWE z!*|iWwL&2QFFNvx;(*MQAvuR;G>YK0IA8Rs-uZ;5D)d=w+)lE#FoF+8;N;^O?ir|5 zQUn*WKtT`}J%@}WdgSp0^4&?>)s1aOpV1ATUFc~7IAu&W89ilan@E@mN}4l**25{gddvk0;k`H<1G0 z8gDM|2aMvnP+MP|My}%mEcuLEPX#_Wx?N>As5sfU`EPk)GEujQKR+Hp>#i3Rj@g2~ zs_Ev!wiUTFe0QU5ICDT*>|n1kD-mqoXHq(B@=ekbzOlutIQ}Y>@PgdVftz(CA4ikY zQ(;c?Q}@p$iwTD*3P!wq5-O|B1+HDI@cP%Y*F^3!Ow?bk-MqhIvbAwZc>ekXf&@5y znn6q+y6>HV1Q=q}3qRR6qp84#bLz)4lQ27E!PR12-v&g)bfU0s1KaQ`TF)sF?apCe zkiQgkj5cSk-?F#&STV;u!()qm5rv7@{MIWZ1aRSnw=BTU+b7 z1l(n@!q!rIqa5$ywh}|JChZNqEQ557o{}I5c&5&7C17_+Rym2^u z(2l|!yFn zaB#7uJtDBw5v`XsFY*h&>0P0AcQn|Ua~LS^VSi&=3uoudZ=*U`X&ve?P?D3!&0y2g zsUgt4#2yl{<5}+L6%@Vya(9M61^L=#GB;=yw=bG4u=1u2=1)D%l$6|vH7f4wS|!D5 zNw0JNu;4WQf((Cg+Wf>ngxO*NS_D4CUv;E7qG1=Jj#~hLEwj!+Qro{M^cwDihCA-` zQQMBIn=T#7sF?Ao1N?)_RoCy${8m@@tOi%u$-57oZI)}8E4r@^=I7Y ztuGpGMcUF4C&QzsaQ>DVj~L`W!euW?;ebX_gM8d6k7WJG!3p-EW4nU4$9;>j*z&FI zAi+{4>Ko^p{FUzt;_pgoiX)p7AR9=q*M7~r+L8KK;c{Fx5!3fkLGPKj_p;~VE7Z9~ zcjaOB3D#r^Xw}VKKyY~(x^*Jjru433$N?tQQRy-=CM36r3%oXG7fWg?8&*UD5^LYN zn7Cza$!9&{GD+|1V%GLKWlj%qYII84>x97#;=iG;g;!VhYq`z=eKx`3-_zf8fCI9T zaLw>F@6-~qXx=q&#Dzn;Z=+#;f_A}Ec!X{d<*U{M?x96bPSh$T+1N}9Co7N2iyX8+ z`u_f3E2@KSm_-1Gm_WQU+Vi>sZgH?$O&h6z{F~h|=AITuSL7Ip*gJDeeiN?HP@$O5 z5Dy&`acv?A_bl9^%=azz&U zTkMQFM&9|}U>1M5k-*WwXHQ-7P$T@-BUQ+`zR`l_JGMYyE$;*OedTWWGTrpkbesfynTDu#Tnfo?~7UjbmUw| ze_%YpjTE>&Lv8q;LX>;Qd3}2ul9Z3N_dVIgc_diIzsqTOc87E&QiG5L zqdr5*A7nh;llo?@7z#k&*db=KGSQkX;jSrocp85l)Nh? z9BdTDsBe<%5$a&LHFR$1#fBFbl0>QsTqHux05m(4R+hJNEEl6JbbgWPT0J%UF58lh zcWJa$-cWHetngMxwj@=gO&>9}Oyd$?I|9%LIoKyH%KMcPaZP`g_f)2(ad_)&-z`nRbl4H#2qNg?iVN~4&Rl>|r3{P|ul-oG8C>VWSc-e6AGe!suA zR_5IHdm5({+@o)>bZ!M@OQNLsV~%V67)cEo#XsV&J#*1i9{1uKqMRZR7L0^8G}Q}k zA61DJ{gwtf1n*el8?0FP-g375RyN48Xs_zIg1$!`C$2fY`Df+IBuLYbNzgkEit7te zs5ktxSb9L)d{EOHk%;~q+UaeezV=@0#hM4wt5>vP|GPGItguI>FY?g-Ce<8Tj7sx8 zT?UZN|AGUaLH$H}rlzRwu4ysvuG;23&y~Bcx9(-Z4nN{O2cS)t8(6NgK^Me7A{Jz**X&mw|naDQEO&eDI*2!)>!MNBm^GEa~Ku z@(pkR%{mU%RdP<7gtI6+gd}{}RLa-3|3bY1KXF2M6+WNn7TN!!`EoSV-l6q^04v{h zx2bmIUFOEwG121UtGJw@pLM-@3^J3NyXa(%PE8~zQKJlezvk-xm!QS5t*daYU8Q~5 z7nE3Eu>myTa7ia}OsQth?voh2}hr<_)F%BB~N_EpCy=9Rl_RqHY?#K#0paRHi1@V)Tw zRiF6F{!+7=_ZFw&;mNU#*vL$LJ*saDqlS$7U(EPze*&?szmsK#YsU1!@07Mmfl2R6jrj(&jo^|y)YoIIXXwtP20kWg3u*|0MDVYc{Dx2(HHM+8-INo7p zd7p~be%s5w+G(haPnf%!J{PJtz{)hRyrqNB`Gkp=bRK;5t4?D~JkCUGrR)bYU)Y>l6H-t;0x+AOH_Ib#6_Q*9Rnw_f2sXc}vqtUkb{7HK#g^~65@?$`d5 zvH)5SpnUF4OW^4K^6CQ2W}0Z5Y;gM*obW=`bN4BYemc6YEMPt97YX~TR;_f#^r`X2 z19V;2TP?!b-UH)jC@`l((sp_2h zkiIqfqfB|q&R6HKdg9@_FRfjUOKfcp+0Gd+XlaZIIjaS^?0~LiR}z0LVzfFUBP15@ zdBz*iVSBL>I;@Z3cuMfm>UZO@PyAmy$yn}4$jbl|S&=9-!*#Xyxqz_atJ<&Oi!RX= z+1`X*jPbsD`Jcln-&61*T=swWEFkg-FBfdljEOP@(E(GzRf3EFyz2I%*#noV6|rQJ zXj}60{=j?@WiDgy(!P}_?$lW(i*3#LB?T3vYxhpIaO@@ZobrgIIwzNr>XzC2 z^$^(BKjaTG{EywOFVXQ5%`}I-xLd%{;wfGLKrg87+meB|C86QlNgYAy_u2IQaDG9Z zgfUh7 @$s+Fj?OLvFUFVdxDWi*x!?pb!GDfRXltD1T%l-dv$R%VfUzCf{osuhMX zhY80@Czh<@?i82izByEE`Y1qSt%SEjC#-DHa(#Bz*xz-A!5#HqbaY!JEPqCi79!j#XCYZ+p-;rM;Z`@adEF21%*!oi^NJoVfC{?ysGJXEf3h?lq!d%yPeFtUk(qnb;8h_=hSmB@D0rYL55+(?bJ z8t?JRX9)AlUGNTsuYal_APB7ez_ zTM-|&OtRQnyiPgtQwl&}-KXpHIGloPp1n*yj37Ny`iTI4jQbOuihxp$NM-VKnLv}s z6CU#ZuIZf5kF6yu7cCZOf535oKA#}})lIXi=C=c^l~bK@{1 zL-+)fRtMtVc|4`?M%?XriVEI+t1cz4)40qjQq9knf@gN!c2C z>Ib$_eOjhaAtDC!Yxb^j~X~6)Fu?!JdVHt!(y8S(U*|dOR?^G^b8l!Id3!hn^ zk&dYWBmlf=FP@FUg5>JV!A+xJem*si(KyHz<-aNX)Lh;nS9cr(*%rr1tA;s z)E$BkD?ExD^Y_9^d+$4+8-XGN)0+_<(yNTwS%jQlXy)+JvL`6seUQZoHJ?beE#oyqN}?eeOBA_mEky(ap%@u2)yKe>;vs0p3ClN*snLy6lhF)xLP z=?ZEstr~=e7y=@eNnfHRIr~@lJs3sSW#1zZ$a4-PBh!U2Ukudt;~tt?D*U*U%iAKz zn(m4yM&|*uBSqi#caTnwq-V3F=gpNimM0Z;wxN1(|%i2U+gI&QN8-dLNV$ky-q;*T)BRri7aQbH#-*7f5z3bPMg~M znv#~*eTgit8D$~JN|X$u0u|tFf z?4aB4I_`KzE#e&-Za))9|GOBzw_UZ5Pfbl}EGEfM^MTp*|6?AT^zhZj2T}ejqWS;P cK6>x58o?EALJP3jN9d27l(J-nxbZ*#2Q|@b$p8QV literal 0 HcmV?d00001 diff --git a/src/genbench/tasks/latent_feature_splits/roberta_closest_split/task.py b/src/genbench/tasks/latent_feature_splits/roberta_closest_split/task.py new file mode 100644 index 0000000..c6ec3fc --- /dev/null +++ b/src/genbench/tasks/latent_feature_splits/roberta_closest_split/task.py @@ -0,0 +1,99 @@ +from collections import OrderedDict +from typing import Any, List, Mapping + +import datasets +import evaluate + +from genbench import Task +from genbench.api import TaskType +from genbench.utils.logging import get_logger + + +logger = get_logger(__name__) + + +class LatentFeatureSplitRobertaClosestSplit(Task): + def evaluate_predictions( + self, + *, + predictions: List[Mapping[str, Any]] = None, + gold: datasets.Dataset = None, + ) -> OrderedDict[str, float]: + """Evaluate the predictions of the model against the gold data. + + Args: + predictions: A list of dictionaries, where each dictionary contains the predicted + values for an example. The keys are strings and the values can be any type. + gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. + + Returns: + A dictionary containing key-value pairs for the evaluation metric(s) computed on the predicted + values. The keys are strings representing the name of the evaluation metric and the values are + floating-point numbers. + + Raises: + ValueError: If a metric returns None. + """ + result = OrderedDict() + for metric_config in self.config.evaluation_metrics: + hf_id = metric_config.hf_id + if isinstance(hf_id, str): + hf_id = [hf_id] + + metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) + + refs_lst = [g["target"] for g in gold] + preds_lst = [pred["target"] for pred in predictions] + + ref_type = type(refs_lst[0]) + pred_type = type(preds_lst[0]) + if pred_type != ref_type: + if self.config.task_type != TaskType.MULTIPLE_CHOICE: + raise ValueError( + f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " + ) + # Convert predictions to the same type as the references + if pred_type == str and ref_type == int: + logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") + converted_preds = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + preds_lst = converted_preds + elif pred_type == int and ref_type == str: + logger.warning("Predictions are ints, but references are strings. Converting references to ints.") + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_refs.append(ref["target_options"].index(ref["target"])) + refs_lst = converted_refs + else: + if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: + # Convert both predictions and references to int + logger.warning( + "Predictions and references have the same type, but it is not int. Converting both to int." + ) + converted_preds = [] + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + converted_refs.append(ref["target_options"].index(ref["target"])) + preds_lst = converted_preds + refs_lst = converted_refs + + extra_kwargs = metric_config.compute_extra_kwargs or {} + output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) + + if output is None: + raise ValueError( + f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." + ) + + # Update output keys to include the metric id + metric_id = "_".join(hf_id) + output = {f"hf_{metric_id}__{k}": v for k, v in output.items()} + + result.update(output) + + return result diff --git a/src/genbench/tasks/latent_feature_splits/test_hatespeech.py b/src/genbench/tasks/latent_feature_splits/test_hatespeech.py new file mode 100644 index 0000000..523cad1 --- /dev/null +++ b/src/genbench/tasks/latent_feature_splits/test_hatespeech.py @@ -0,0 +1,8 @@ +from genbench import load_task +from genbench.api import PreparationStrategy + + +task = load_task("latent_feature_splits:bert_closest_split") +ds = task.get_prepared_datasets(PreparationStrategy.FINETUNING) +print(ds) +print(ds["test"][0]) From 86e123db71b856fa1de3cb29782f50c501c915e4 Mon Sep 17 00:00:00 2001 From: Verna Date: Sun, 19 Nov 2023 16:28:53 +0000 Subject: [PATCH 47/57] Minimal example for data loading, training and evaluating with our latent feature-based splits --- .../latent_feature_splits/usage_example.py | 84 +++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 src/genbench/tasks/latent_feature_splits/usage_example.py diff --git a/src/genbench/tasks/latent_feature_splits/usage_example.py b/src/genbench/tasks/latent_feature_splits/usage_example.py new file mode 100644 index 0000000..4ab9ed1 --- /dev/null +++ b/src/genbench/tasks/latent_feature_splits/usage_example.py @@ -0,0 +1,84 @@ +from datasets import load_dataset, DatasetDict +from transformers import AutoTokenizer, DataCollatorWithPadding, \ + Trainer, TrainingArguments, AutoModelForSequenceClassification +import numpy as np +import evaluate +from genbench import load_task +from genbench.api import PreparationStrategy +import os + + +def tokenize_function(example): + return tokenizer( + example["input"]) + + +def compute_metrics(eval_preds): + metric = evaluate.load("f1") + logits, labels = eval_preds + predictions = np.argmax(logits, axis=-1) + return metric.compute( + predictions=predictions, + references=labels, + average="macro") + + +def main(split_name, num_labels, lr, epochs, checkpoint): + """ + Basic functionality to load data, train and evaluate the model. + Args: + - split_name: str (bert_closest_split | roberta_closest_split) + - num_labels (int) + - lr (float): learning rate + - epochs (int): number of epochs + - checkpoint (str): should be a valid HF model name + """ + # Convert GenBench format to HF dataset format, preview dataset + task = load_task(f"latent_feature_splits:{split_name}") + ds = task.get_prepared_datasets(PreparationStrategy.FINETUNING) + ds = DatasetDict(ds) + ds = ds.rename_column("target", "label") + print(ds) + + # Load and preprocess data + tokenizer = AutoTokenizer.from_pretrained(checkpoint) + tokenized_datasets = ds.map(tokenize_function, batched=True) + data_collator = DataCollatorWithPadding(tokenizer=tokenizer) + + # Load model and HF trainer, WITH evaluation during training + model = AutoModelForSequenceClassification.from_pretrained( + checkpoint, num_labels=num_labels) + training_args = TrainingArguments( + "test-trainer", + learning_rate=lr, + num_train_epochs=epochs, + evaluation_strategy="epoch") + trainer = Trainer( + model, + training_args, + train_dataset=tokenized_datasets["train"], + eval_dataset=tokenized_datasets["test"], + data_collator=data_collator, + tokenizer=tokenizer, + compute_metrics=compute_metrics, + ) + + # Evaluate for random performance level, train, evaluate again + predictions = trainer.predict(tokenized_datasets["test"]) + f1_pre = compute_metrics((predictions.predictions, predictions.label_ids)) + trainer.train() + predictions = trainer.predict(tokenized_datasets["test"]) + f1_post = compute_metrics((predictions.predictions, predictions.label_ids)) + print(f"Random f1: {f1_pre}, f1 post-training: {f1_post}") + + + +if __name__ == "__main__": + os.environ["WANDB_DISABLED"] = "true" + split_name = "bert_closest_split" + num_labels = 3 + lr = 3e-5 + epochs = 5 + checkpoint = "prajjwal1/bert-small" + + main(split_name, num_labels, lr, epochs, checkpoint) From ad31c991cc17f16d04381fcbfe552dc36a7f9534 Mon Sep 17 00:00:00 2001 From: Verna Date: Sun, 19 Nov 2023 16:39:53 +0000 Subject: [PATCH 48/57] Add batch size to usage_example and move tokenize_function into main --- .../latent_feature_splits/usage_example.py | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/genbench/tasks/latent_feature_splits/usage_example.py b/src/genbench/tasks/latent_feature_splits/usage_example.py index 4ab9ed1..6551f55 100644 --- a/src/genbench/tasks/latent_feature_splits/usage_example.py +++ b/src/genbench/tasks/latent_feature_splits/usage_example.py @@ -8,11 +8,6 @@ import os -def tokenize_function(example): - return tokenizer( - example["input"]) - - def compute_metrics(eval_preds): metric = evaluate.load("f1") logits, labels = eval_preds @@ -23,16 +18,22 @@ def compute_metrics(eval_preds): average="macro") -def main(split_name, num_labels, lr, epochs, checkpoint): +def main(split_name, num_labels, bsz, lr, epochs, checkpoint): """ Basic functionality to load data, train and evaluate the model. Args: - split_name: str (bert_closest_split | roberta_closest_split) - num_labels (int) + - bsz (int): batch size - lr (float): learning rate - epochs (int): number of epochs - checkpoint (str): should be a valid HF model name """ + + def tokenize_function(example): + return tokenizer( + example["input"]) + # Convert GenBench format to HF dataset format, preview dataset task = load_task(f"latent_feature_splits:{split_name}") ds = task.get_prepared_datasets(PreparationStrategy.FINETUNING) @@ -42,7 +43,8 @@ def main(split_name, num_labels, lr, epochs, checkpoint): # Load and preprocess data tokenizer = AutoTokenizer.from_pretrained(checkpoint) - tokenized_datasets = ds.map(tokenize_function, batched=True) + tokenized_datasets = ds.map( + tokenize_function, batch_size=bsz, batched=True) data_collator = DataCollatorWithPadding(tokenizer=tokenizer) # Load model and HF trainer, WITH evaluation during training @@ -52,6 +54,8 @@ def main(split_name, num_labels, lr, epochs, checkpoint): "test-trainer", learning_rate=lr, num_train_epochs=epochs, + per_device_train_batch_size=bsz, + per_device_eval_batch_size=bsz, evaluation_strategy="epoch") trainer = Trainer( model, @@ -77,8 +81,9 @@ def main(split_name, num_labels, lr, epochs, checkpoint): os.environ["WANDB_DISABLED"] = "true" split_name = "bert_closest_split" num_labels = 3 + batch_size = 16 lr = 3e-5 epochs = 5 checkpoint = "prajjwal1/bert-small" - main(split_name, num_labels, lr, epochs, checkpoint) + main(split_name, num_labels, batch_size, lr, epochs, checkpoint) From a6b6019f94dac6e7f46cb33a11cf566bcf2baa48 Mon Sep 17 00:00:00 2001 From: Anssi Moisio Date: Mon, 20 Nov 2023 07:53:42 +0200 Subject: [PATCH 49/57] Add usage_example.py and requirements-usage-example.txt for europarl_dbca_splits task --- .../requirements-usage-example.txt | 1 + .../europarl_dbca_splits/usage_example.py | 188 ++++++++++++++++++ 2 files changed, 189 insertions(+) create mode 100644 src/genbench/tasks/europarl_dbca_splits/requirements-usage-example.txt create mode 100644 src/genbench/tasks/europarl_dbca_splits/usage_example.py diff --git a/src/genbench/tasks/europarl_dbca_splits/requirements-usage-example.txt b/src/genbench/tasks/europarl_dbca_splits/requirements-usage-example.txt new file mode 100644 index 0000000..765824a --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/requirements-usage-example.txt @@ -0,0 +1 @@ +transformers==4.35.2 diff --git a/src/genbench/tasks/europarl_dbca_splits/usage_example.py b/src/genbench/tasks/europarl_dbca_splits/usage_example.py new file mode 100644 index 0000000..efd52f9 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/usage_example.py @@ -0,0 +1,188 @@ +""" +Usage example for the Europarl DBCA splits task. + +Training of the NMT model is mostly based on the HuggingFace NLP course chapter on translation: +https://huggingface.co/learn/nlp-course/chapter7/4?fw=pt +""" +import argparse +from genbench import load_task +from genbench.api import PreparationStrategy +from datasets import DatasetDict +from transformers import FSMTConfig, FSMTTokenizer, FSMTForConditionalGeneration, pipeline +from transformers import DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer + + +def tokenize_corpus(dataset, save_to_file): + """ + Tokenizes the dataset and saves it to disk. + """ + def preprocess_function(examples): + inputs = examples["input"] + targets = examples["target"] + model_inputs = tokenizer( + inputs, text_target=targets, max_length=MAX_LENGTH, truncation=True + ) + return model_inputs + + dataset = DatasetDict(dataset) + tokenized = dataset.map( + preprocess_function, + batched=True, + ) + tokenized.save_to_disk(save_to_file) + return tokenized + + +def translate_sentences(model_name_or_path, eval_dataset): + """ + Translates the sentences in eval_dataset using the given model. + """ + translator = pipeline( + "translation", + model=model_name_or_path, + device="cuda", + batch_size=BATCH_SIZE, + ) + return translator(eval_dataset, max_length=MAX_LENGTH) + + +def train_from_scratch(tokenized_corpus, output_dir_name): + """ + Trains an FSMT model from scratch. + Model architecture is similar to that in Vaswani et al. (2017). + """ + config = FSMTConfig( + activation_dropout=0.0, + activation_function="relu", + architectures=["FSMTForConditionalGeneration"], + attention_dropout=0.1, + bos_token_id=0, + d_model=512, + decoder={ + "bos_token_id": 2, + "model_type": "fsmt_decoder", + "vocab_size": 42024 + }, + decoder_attention_heads=8, + decoder_ffn_dim=2048, + decoder_layerdrop=0, + decoder_layers=6, + decoder_start_token_id=2, + dropout=0.1, + encoder_attention_heads=8, + encoder_ffn_dim=2048, + encoder_layerdrop=0, + encoder_layers=6, + eos_token_id=2, + forced_eos_token_id=2, + init_std=0.02, + is_encoder_decoder=True, + langs=["en", "de"], + length_penalty=1.15, + max_length=MAX_LENGTH, + max_position_embeddings=1024, + model_type="fsmt", + num_beams=5, + num_hidden_layers=6, + pad_token_id=1, + scale_embedding=True, + src_vocab_size=42024, + tgt_vocab_size=42024, + tie_word_embeddings=False, + transformers_version="4.35.2", + use_cache=True, + ) + model = FSMTForConditionalGeneration(config=config) + + data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model) + + training_args = Seq2SeqTrainingArguments( + output_dir=output_dir_name, + evaluation_strategy="steps", + eval_steps=5000, + save_strategy="steps", + save_steps=10000, + learning_rate=2e-5, + per_device_train_batch_size=BATCH_SIZE, + per_device_eval_batch_size=BATCH_SIZE, + weight_decay=0.01, + save_total_limit=10, + max_steps=100000, + fp16=True, + ) + + trainer = Seq2SeqTrainer( + model, + training_args, + train_dataset=tokenized_corpus["train"], + eval_dataset=tokenized_corpus["validation"], + data_collator=data_collator, + tokenizer=tokenizer, + ) + trainer.train() + + +if __name__ == "__main__": + argparser = argparse.ArgumentParser() + argparser.add_argument("--tokenize", action="store_true") + argparser.add_argument("--train", action="store_true") + argparser.add_argument("--eval", action="store_true") + args = argparser.parse_args() + + # Load the task + task = load_task('europarl_dbca_splits') + + # A pretrained multilingual tokenizer, used for both models and both languages + tokenizer = FSMTTokenizer.from_pretrained('stas/tiny-wmt19-en-de') + + MAX_LENGTH = 128 + BATCH_SIZE = 128 + + results = [] + # "comdiv0" is the easy non-compositional data split, with minimal compound divergence + # "comdiv1" is the difficult, compositional data split, with maximal compound divergence + # English-German corpus is used for this example. + # For other target languages, replace "de" with "fr", "el", or "fi" in the subtask name. + for comdiv in ["0", "1"]: + if comdiv == "0": + subtask = task.comdiv0_de + else: + subtask = task.comdiv1_de + + subtask_dataset = subtask.get_prepared_datasets(PreparationStrategy.FINETUNING) + + tokenized_dataset_dir = f'ds_de_comdiv{comdiv}_tokenized' + if args.tokenize: + tokenized_datasets = tokenize_corpus(subtask_dataset, tokenized_dataset_dir) + else: + tokenized_datasets = DatasetDict.load_from_disk(tokenized_dataset_dir) + + # Extract a validation set from training set + train_val_split = tokenized_datasets["train"].train_test_split(test_size=0.01) + tokenized_datasets["train"] = train_val_split["train"] + tokenized_datasets["validation"] = train_val_split["test"] + + nmt_model_dir = f'FSMT_en-de_comdiv{comdiv}' + if args.train: + train_from_scratch(tokenized_datasets, nmt_model_dir) + + if args.eval: + cp = 'checkpoint-100000' + print(f"Results for comdiv{comdiv}, checkpoint {cp}") + preds = translate_sentences(nmt_model_dir + '/' + cp, + tokenized_datasets["test"]["input"]) + + # re-map the keys to match the evaluation script + preds = [{'target': pred['translation_text']} for pred in preds] + + score = subtask.evaluate_predictions( + predictions=preds, + gold=tokenized_datasets["test"], + ) + print(score) + results.append(score) + + if args.eval: + print('Generalisation score (maximum compound divergence score divided by ' \ + + 'minimum compound divergence score):') + print(results[1]['hf_chrf__score'] / results[0]['hf_chrf__score']) From 94f8119b6a2098aca61a6b969b5c89073822e105 Mon Sep 17 00:00:00 2001 From: Anssi Moisio Date: Mon, 20 Nov 2023 07:56:09 +0200 Subject: [PATCH 50/57] Fix style --- .../europarl_dbca_splits/usage_example.py | 55 ++++++++++--------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/src/genbench/tasks/europarl_dbca_splits/usage_example.py b/src/genbench/tasks/europarl_dbca_splits/usage_example.py index efd52f9..c3c9b12 100644 --- a/src/genbench/tasks/europarl_dbca_splits/usage_example.py +++ b/src/genbench/tasks/europarl_dbca_splits/usage_example.py @@ -5,23 +5,31 @@ https://huggingface.co/learn/nlp-course/chapter7/4?fw=pt """ import argparse + +from datasets import DatasetDict +from transformers import ( + DataCollatorForSeq2Seq, + FSMTConfig, + FSMTForConditionalGeneration, + FSMTTokenizer, + Seq2SeqTrainer, + Seq2SeqTrainingArguments, + pipeline, +) + from genbench import load_task from genbench.api import PreparationStrategy -from datasets import DatasetDict -from transformers import FSMTConfig, FSMTTokenizer, FSMTForConditionalGeneration, pipeline -from transformers import DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer def tokenize_corpus(dataset, save_to_file): """ Tokenizes the dataset and saves it to disk. """ + def preprocess_function(examples): inputs = examples["input"] targets = examples["target"] - model_inputs = tokenizer( - inputs, text_target=targets, max_length=MAX_LENGTH, truncation=True - ) + model_inputs = tokenizer(inputs, text_target=targets, max_length=MAX_LENGTH, truncation=True) return model_inputs dataset = DatasetDict(dataset) @@ -58,11 +66,7 @@ def train_from_scratch(tokenized_corpus, output_dir_name): attention_dropout=0.1, bos_token_id=0, d_model=512, - decoder={ - "bos_token_id": 2, - "model_type": "fsmt_decoder", - "vocab_size": 42024 - }, + decoder={"bos_token_id": 2, "model_type": "fsmt_decoder", "vocab_size": 42024}, decoder_attention_heads=8, decoder_ffn_dim=2048, decoder_layerdrop=0, @@ -130,10 +134,10 @@ def train_from_scratch(tokenized_corpus, output_dir_name): args = argparser.parse_args() # Load the task - task = load_task('europarl_dbca_splits') + task = load_task("europarl_dbca_splits") # A pretrained multilingual tokenizer, used for both models and both languages - tokenizer = FSMTTokenizer.from_pretrained('stas/tiny-wmt19-en-de') + tokenizer = FSMTTokenizer.from_pretrained("stas/tiny-wmt19-en-de") MAX_LENGTH = 128 BATCH_SIZE = 128 @@ -151,7 +155,7 @@ def train_from_scratch(tokenized_corpus, output_dir_name): subtask_dataset = subtask.get_prepared_datasets(PreparationStrategy.FINETUNING) - tokenized_dataset_dir = f'ds_de_comdiv{comdiv}_tokenized' + tokenized_dataset_dir = f"ds_de_comdiv{comdiv}_tokenized" if args.tokenize: tokenized_datasets = tokenize_corpus(subtask_dataset, tokenized_dataset_dir) else: @@ -162,27 +166,28 @@ def train_from_scratch(tokenized_corpus, output_dir_name): tokenized_datasets["train"] = train_val_split["train"] tokenized_datasets["validation"] = train_val_split["test"] - nmt_model_dir = f'FSMT_en-de_comdiv{comdiv}' + nmt_model_dir = f"FSMT_en-de_comdiv{comdiv}" if args.train: train_from_scratch(tokenized_datasets, nmt_model_dir) if args.eval: - cp = 'checkpoint-100000' + cp = "checkpoint-100000" print(f"Results for comdiv{comdiv}, checkpoint {cp}") - preds = translate_sentences(nmt_model_dir + '/' + cp, - tokenized_datasets["test"]["input"]) + preds = translate_sentences(nmt_model_dir + "/" + cp, tokenized_datasets["test"]["input"]) # re-map the keys to match the evaluation script - preds = [{'target': pred['translation_text']} for pred in preds] + preds = [{"target": pred["translation_text"]} for pred in preds] score = subtask.evaluate_predictions( - predictions=preds, - gold=tokenized_datasets["test"], - ) + predictions=preds, + gold=tokenized_datasets["test"], + ) print(score) results.append(score) if args.eval: - print('Generalisation score (maximum compound divergence score divided by ' \ - + 'minimum compound divergence score):') - print(results[1]['hf_chrf__score'] / results[0]['hf_chrf__score']) + print( + "Generalisation score (maximum compound divergence score divided by " + + "minimum compound divergence score):" + ) + print(results[1]["hf_chrf__score"] / results[0]["hf_chrf__score"]) From 32b5f34eb1236851683399a8f0cf2bae30dc3f8e Mon Sep 17 00:00:00 2001 From: Verna Date: Mon, 20 Nov 2023 16:43:12 +0000 Subject: [PATCH 51/57] Add validation split, replace checkpoint name with bert-base --- .../latent_feature_splits/usage_example.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/genbench/tasks/latent_feature_splits/usage_example.py b/src/genbench/tasks/latent_feature_splits/usage_example.py index 6551f55..b074a50 100644 --- a/src/genbench/tasks/latent_feature_splits/usage_example.py +++ b/src/genbench/tasks/latent_feature_splits/usage_example.py @@ -1,4 +1,4 @@ -from datasets import load_dataset, DatasetDict +from datasets import load_dataset, DatasetDict, Dataset from transformers import AutoTokenizer, DataCollatorWithPadding, \ Trainer, TrainingArguments, AutoModelForSequenceClassification import numpy as np @@ -6,7 +6,7 @@ from genbench import load_task from genbench.api import PreparationStrategy import os - +from sklearn.model_selection import train_test_split def compute_metrics(eval_preds): metric = evaluate.load("f1") @@ -34,10 +34,14 @@ def tokenize_function(example): return tokenizer( example["input"]) - # Convert GenBench format to HF dataset format, preview dataset + # Convert GenBench format to HF dataset format, get devset, preview dataset task = load_task(f"latent_feature_splits:{split_name}") ds = task.get_prepared_datasets(PreparationStrategy.FINETUNING) - ds = DatasetDict(ds) + ds_split = ds["train"].train_test_split(0.1) + ds = DatasetDict({ + "train": ds_split["train"], + "validation": ds_split["test"], + "test": ds["test"]}) ds = ds.rename_column("target", "label") print(ds) @@ -61,7 +65,7 @@ def tokenize_function(example): model, training_args, train_dataset=tokenized_datasets["train"], - eval_dataset=tokenized_datasets["test"], + eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, compute_metrics=compute_metrics, @@ -82,8 +86,8 @@ def tokenize_function(example): split_name = "bert_closest_split" num_labels = 3 batch_size = 16 - lr = 3e-5 + lr = 2e-5 epochs = 5 - checkpoint = "prajjwal1/bert-small" + checkpoint = "bert-base-uncased" main(split_name, num_labels, batch_size, lr, epochs, checkpoint) From bb2829e5c4bc89558770bbc347f3c2f7572f48d6 Mon Sep 17 00:00:00 2001 From: Verna Date: Mon, 20 Nov 2023 17:31:18 +0000 Subject: [PATCH 52/57] Fixed style errors in usage_example --- .../latent_feature_splits/usage_example.py | 42 +++++++++---------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/src/genbench/tasks/latent_feature_splits/usage_example.py b/src/genbench/tasks/latent_feature_splits/usage_example.py index b074a50..8aef633 100644 --- a/src/genbench/tasks/latent_feature_splits/usage_example.py +++ b/src/genbench/tasks/latent_feature_splits/usage_example.py @@ -1,21 +1,25 @@ -from datasets import load_dataset, DatasetDict, Dataset -from transformers import AutoTokenizer, DataCollatorWithPadding, \ - Trainer, TrainingArguments, AutoModelForSequenceClassification -import numpy as np +import os + import evaluate +import numpy as np +from datasets import DatasetDict +from transformers import ( + AutoModelForSequenceClassification, + AutoTokenizer, + DataCollatorWithPadding, + Trainer, + TrainingArguments, +) + from genbench import load_task from genbench.api import PreparationStrategy -import os -from sklearn.model_selection import train_test_split + def compute_metrics(eval_preds): metric = evaluate.load("f1") logits, labels = eval_preds predictions = np.argmax(logits, axis=-1) - return metric.compute( - predictions=predictions, - references=labels, - average="macro") + return metric.compute(predictions=predictions, references=labels, average="macro") def main(split_name, num_labels, bsz, lr, epochs, checkpoint): @@ -31,36 +35,31 @@ def main(split_name, num_labels, bsz, lr, epochs, checkpoint): """ def tokenize_function(example): - return tokenizer( - example["input"]) + return tokenizer(example["input"]) # Convert GenBench format to HF dataset format, get devset, preview dataset task = load_task(f"latent_feature_splits:{split_name}") ds = task.get_prepared_datasets(PreparationStrategy.FINETUNING) ds_split = ds["train"].train_test_split(0.1) - ds = DatasetDict({ - "train": ds_split["train"], - "validation": ds_split["test"], - "test": ds["test"]}) + ds = DatasetDict({"train": ds_split["train"], "validation": ds_split["test"], "test": ds["test"]}) ds = ds.rename_column("target", "label") print(ds) # Load and preprocess data tokenizer = AutoTokenizer.from_pretrained(checkpoint) - tokenized_datasets = ds.map( - tokenize_function, batch_size=bsz, batched=True) + tokenized_datasets = ds.map(tokenize_function, batch_size=bsz, batched=True) data_collator = DataCollatorWithPadding(tokenizer=tokenizer) # Load model and HF trainer, WITH evaluation during training - model = AutoModelForSequenceClassification.from_pretrained( - checkpoint, num_labels=num_labels) + model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=num_labels) training_args = TrainingArguments( "test-trainer", learning_rate=lr, num_train_epochs=epochs, per_device_train_batch_size=bsz, per_device_eval_batch_size=bsz, - evaluation_strategy="epoch") + evaluation_strategy="epoch", + ) trainer = Trainer( model, training_args, @@ -80,7 +79,6 @@ def tokenize_function(example): print(f"Random f1: {f1_pre}, f1 post-training: {f1_post}") - if __name__ == "__main__": os.environ["WANDB_DISABLED"] = "true" split_name = "bert_closest_split" From 340937ad3d5ba5a836012306b7b022cfd7d4bf71 Mon Sep 17 00:00:00 2001 From: drndr Date: Fri, 24 Nov 2023 16:17:45 +0100 Subject: [PATCH 53/57] update usage example --- .../codesearchnet_adv/task.py | 10 +- .../codesearchnet_adv/test_mrr_task.py | 36 ---- .../codesearchnet_go/task.py | 5 +- .../codesearchnet_java/task.py | 5 +- .../codesearchnet_javascript/task.py | 5 +- .../codesearchnet_php/task.py | 5 +- .../codesearchnet_ruby/task.py | 5 +- .../tasks/nl_codesearch_mrr/cosqa/task.py | 5 +- .../tasks/nl_codesearch_mrr/mrr_demo.py | 27 +++ .../nl_codesearch_mrr/statcodesearch/task.py | 5 +- .../statcodesearch/test_mrr_task.py | 36 ---- .../tasks/nl_codesearch_mrr/usage_example.py | 191 +++++------------- 12 files changed, 83 insertions(+), 252 deletions(-) delete mode 100644 src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/test_mrr_task.py create mode 100644 src/genbench/tasks/nl_codesearch_mrr/mrr_demo.py delete mode 100644 src/genbench/tasks/nl_codesearch_mrr/statcodesearch/test_mrr_task.py diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py index 269be7d..52535c5 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/task.py @@ -73,7 +73,6 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} new_data.append(new_item) - # Convert list back to HuggingFace dataset output[split] = datasets.Dataset.from_dict({k: [dic[k] for dic in new_data] for k in new_data[0]}) # Create negative samples for training @@ -84,11 +83,11 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: new_dataset = new_dataset.add_item(item) other_items = [other_item for other_item in dataset if other_item != item] # Randomly select 49 other items - random_items = random.sample(other_items, 1) + random_item = random.sample(other_items, 1) # Split input into comment and code input_parts = item["input"].split("[CODESPLIT]") # Split random input into comment and code - random_input_parts = random_item["input"].split("[CODESPLIT]") + random_input_parts = random_item[0]["input"].split("[CODESPLIT]") # Combine the "input" fields of the original and random items new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} @@ -99,16 +98,13 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: output[split] = dataset return output - def evaluate_predictions( - self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors - ) -> Dict[str, float]: + def evaluate_predictions(self, predictions: List[Dict[str, float]], n_distractors) -> Dict[str, float]: """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors This function assumes that the predictions were made and passed onto this function unshuffled. The test data is ordered with each true pair followed by n number of distractors Args: predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. The keys are strings and the values are floats (logit scores or similarity values). - gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. n_distractors: Number of distractor comment-code pair for each true pair. Must be the same number as in the get_dataset_raw function diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/test_mrr_task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/test_mrr_task.py deleted file mode 100644 index 62f81d8..0000000 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_adv/test_mrr_task.py +++ /dev/null @@ -1,36 +0,0 @@ -import dataclass_factory -from task import NlCodesearchMrrCodesearchnetAdv - -from genbench.task_config import TaskConfig -from genbench.utils.file import load_jsonnet - - -def main(): - high_mrr_test_list = [] - for i in range(1, 11): - score_dict = dict.fromkeys(["score"]) - score_dict["score"] = 1 / i - high_mrr_test_list.append(score_dict) - - low_mrr_test_list = [] - for i in range(1, 11): - score_dict = dict.fromkeys(["score"]) - score_dict["score"] = 1 * i - low_mrr_test_list.append(score_dict) - - cfg_file = load_jsonnet("./config.jsonnet") - factory = dataclass_factory.Factory() - config: TaskConfig = factory.load(cfg_file, TaskConfig) - - task = NlCodesearchMrrCodesearchnetAdv(config, "nl_codesearch_mrr") - output_ds = task.get_dataset_raw(9) - - high_results = task.evaluate_predictions(high_mrr_test_list, output_ds, 9) - print(high_results) - - low_results = task.evaluate_predictions(low_mrr_test_list, output_ds, 9) - print(low_results) - - -if __name__ == "__main__": - main() diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py index 84eff2b..beff8ca 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_go/task.py @@ -80,16 +80,13 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: output[split] = dataset return output - def evaluate_predictions( - self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors - ) -> Dict[str, float]: + def evaluate_predictions(self, predictions: List[Dict[str, float]], n_distractors) -> Dict[str, float]: """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors This function assumes that the predictions were made and passed onto this function unshuffled. The test data is ordered with each true pair followed by n number of distractors Args: predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. The keys are strings and the values are floats (logit scores or similarity values). - gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. n_distractors: Number of distractor comment-code pair for each true pair. Must be the same number as in the get_dataset_raw function diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py index 7f755ff..b5ec8e0 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_java/task.py @@ -80,16 +80,13 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: output[split] = dataset return output - def evaluate_predictions( - self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors - ) -> Dict[str, float]: + def evaluate_predictions(self, predictions: List[Dict[str, float]], n_distractors) -> Dict[str, float]: """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors This function assumes that the predictions were made and passed onto this function unshuffled. The test data is ordered with each true pair followed by n number of distractors Args: predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. The keys are strings and the values are floats (logit scores or similarity values). - gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. n_distractors: Number of distractor comment-code pair for each true pair. Must be the same number as in the get_dataset_raw function diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py index d88eea5..aeb2056 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_javascript/task.py @@ -80,16 +80,13 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: output[split] = dataset return output - def evaluate_predictions( - self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors - ) -> Dict[str, float]: + def evaluate_predictions(self, predictions: List[Dict[str, float]], n_distractors) -> Dict[str, float]: """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors This function assumes that the predictions were made and passed onto this function unshuffled. The test data is ordered with each true pair followed by n number of distractors Args: predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. The keys are strings and the values are floats (logit scores or similarity values). - gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. n_distractors: Number of distractor comment-code pair for each true pair. Must be the same number as in the get_dataset_raw function diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py index 78af97f..797855b 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_php/task.py @@ -80,16 +80,13 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: output[split] = dataset return output - def evaluate_predictions( - self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors - ) -> Dict[str, float]: + def evaluate_predictions(self, predictions: List[Dict[str, float]], n_distractors) -> Dict[str, float]: """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors This function assumes that the predictions were made and passed onto this function unshuffled. The test data is ordered with each true pair followed by n number of distractors Args: predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. The keys are strings and the values are floats (logit scores or similarity values). - gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. n_distractors: Number of distractor comment-code pair for each true pair. Must be the same number as in the get_dataset_raw function diff --git a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py index 687ce20..f2525c1 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/codesearchnet_ruby/task.py @@ -80,16 +80,13 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: output[split] = dataset return output - def evaluate_predictions( - self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors - ) -> Dict[str, float]: + def evaluate_predictions(self, predictions: List[Dict[str, float]], n_distractors) -> Dict[str, float]: """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors This function assumes that the predictions were made and passed onto this function unshuffled. The test data is ordered with each true pair followed by n number of distractors Args: predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. The keys are strings and the values are floats (logit scores or similarity values). - gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. n_distractors: Number of distractor comment-code pair for each true pair. Must be the same number as in the get_dataset_raw function diff --git a/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py b/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py index f86b0a6..64b959e 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/cosqa/task.py @@ -80,16 +80,13 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: output[split] = dataset return output - def evaluate_predictions( - self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors - ) -> Dict[str, float]: + def evaluate_predictions(self, predictions: List[Dict[str, float]], n_distractors) -> Dict[str, float]: """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors This function assumes that the predictions were made and passed onto this function unshuffled. The test data is ordered with each true pair followed by n number of distractors Args: predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. The keys are strings and the values are floats (logit scores or similarity values). - gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. n_distractors: Number of distractor comment-code pair for each true pair. Must be the same number as in the get_dataset_raw function diff --git a/src/genbench/tasks/nl_codesearch_mrr/mrr_demo.py b/src/genbench/tasks/nl_codesearch_mrr/mrr_demo.py new file mode 100644 index 0000000..6246a78 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_mrr/mrr_demo.py @@ -0,0 +1,27 @@ +from genbench import load_task + + +def main(): + high_mrr_test_list = [] + for i in range(1, 11): + score_dict = dict.fromkeys(["score"]) + score_dict["score"] = 1 / i + high_mrr_test_list.append(score_dict) + + low_mrr_test_list = [] + for i in range(1, 11): + score_dict = dict.fromkeys(["score"]) + score_dict["score"] = 1 * i + low_mrr_test_list.append(score_dict) + + task = load_task("nl_codesearch_mrr:statcodesearch") + + high_results = task.evaluate_predictions(high_mrr_test_list, 9) + print(high_results) + + low_results = task.evaluate_predictions(low_mrr_test_list, 9) + print(low_results) + + +if __name__ == "__main__": + main() diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py index bb6180e..2566044 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py +++ b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/task.py @@ -79,16 +79,13 @@ def get_dataset_raw(self, n_distractors) -> Dict[str, datasets.Dataset]: return output - def evaluate_predictions( - self, predictions: List[Dict[str, float]], gold: datasets.Dataset, n_distractors - ) -> Dict[str, float]: + def evaluate_predictions(self, predictions: List[Dict[str, float]], n_distractors) -> Dict[str, float]: """Calculate the MRR score in chunks. One chunk consist of a true comment-code pair and n number of distractors This function assumes that the predictions were made and passed onto this function unshuffled. The test data is ordered with each true pair followed by n number of distractors Args: predictions: A list of dictionaries, where each dictionary contains the predicted values for an example. The keys are strings and the values are floats (logit scores or similarity values). - gold: A HuggingFace `datasets.Dataset` object containing the ground truth data for the task. n_distractors: Number of distractor comment-code pair for each true pair. Must be the same number as in the get_dataset_raw function diff --git a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/test_mrr_task.py b/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/test_mrr_task.py deleted file mode 100644 index 90c0cb4..0000000 --- a/src/genbench/tasks/nl_codesearch_mrr/statcodesearch/test_mrr_task.py +++ /dev/null @@ -1,36 +0,0 @@ -import dataclass_factory -from task import NlCodesearchMrrStatcodesearch - -from genbench.task_config import TaskConfig -from genbench.utils.file import load_jsonnet - - -def main(): - high_mrr_test_list = [] - for i in range(1, 11): - score_dict = dict.fromkeys(["score"]) - score_dict["score"] = 1 / i - high_mrr_test_list.append(score_dict) - - low_mrr_test_list = [] - for i in range(1, 11): - score_dict = dict.fromkeys(["score"]) - score_dict["score"] = 1 * i - low_mrr_test_list.append(score_dict) - - cfg_file = load_jsonnet("./config.jsonnet") - factory = dataclass_factory.Factory() - config: TaskConfig = factory.load(cfg_file, TaskConfig) - - task = NlCodesearchMrrStatcodesearch(config, "nl_codesearch_mrr") - output_ds = task.get_dataset_raw(9) - - high_results = task.evaluate_predictions(high_mrr_test_list, output_ds, 9) - print(high_results) - - low_results = task.evaluate_predictions(low_mrr_test_list, output_ds, 9) - print(low_results) - - -if __name__ == "__main__": - main() diff --git a/src/genbench/tasks/nl_codesearch_mrr/usage_example.py b/src/genbench/tasks/nl_codesearch_mrr/usage_example.py index 7b108b1..8bb1455 100644 --- a/src/genbench/tasks/nl_codesearch_mrr/usage_example.py +++ b/src/genbench/tasks/nl_codesearch_mrr/usage_example.py @@ -3,13 +3,14 @@ import logging import random -import numpy as np import torch from torch.optim import AdamW from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSequenceClassification, AutoTokenizer, PreTrainedModel, get_scheduler +from genbench import load_task + ########################################################## # Data Loadig Utils @@ -100,23 +101,30 @@ def _convert_examples_to_features( return features -def load_data(tokenizer, batch_size, seq_len, train_file): +def load_data(tokenizer, batch_size, seq_len, train_file, is_train): # create dataset comments = [] codes = [] labels = [] skipped = 0 + if is_train: + do_shuffle = True + else: + do_shuffle = False is_sep_token_set = tokenizer.sep_token is not None is_cls_token_set = tokenizer.cls_token is not None is_pad_token_set = tokenizer.pad_token is not None is_eos_token_set = tokenizer.eos_token is not None - with open(train_file, "r", encoding="utf-8") as infile: - for line in infile: + for split, dataset in train_file.items(): + if is_train and split == "test": + continue + if not is_train and split == "train": + continue + for sample in dataset: try: - item = json.loads(line.strip()) - input = item["input"] + input = sample["input"] # split at [CODESPLIT] token input = input.split("[CODESPLIT]") if len(input) != 2: @@ -143,7 +151,8 @@ def load_data(tokenizer, batch_size, seq_len, train_file): continue comments.append(input[0]) codes.append(input[1]) - labels.append(item["target"]) + labels.append(sample["target"]) + except json.JSONDecodeError as e: print(f"Error: JSON decoding failed - {e}") continue @@ -165,7 +174,7 @@ def load_data(tokenizer, batch_size, seq_len, train_file): # Convert to Dataset features = Dataset(features) - return DataLoader(features, batch_size=batch_size, shuffle=True) + return DataLoader(features, batch_size=batch_size, shuffle=do_shuffle) ############################################################## @@ -215,132 +224,24 @@ def train(model: PreTrainedModel, dataloader: DataLoader, args: argparse.Namespa ########################################################### -def load_data_for_mrr(tokenizer, file): - # create dataset - comments = [] - codes = [] - labels = [] - skipped = 0 - - is_sep_token_set = tokenizer.sep_token is not None - is_cls_token_set = tokenizer.cls_token is not None - is_pad_token_set = tokenizer.pad_token is not None - is_eos_token_set = tokenizer.eos_token is not None - - with open(file, "r", encoding="utf-8") as infile: - for line in infile: - try: - item = json.loads(line.strip()) - input = item["input"] - # split at [CODESPLIT] token - input = input.split("[CODESPLIT]") - if len(input) != 2: - # skip cases with more than one [SEP] token - logging.warning(f"Input contains more than one [CODESPLIT] token: {input}") - skipped += 1 - continue - # skip every sample that contains special tokens - if is_sep_token_set and (tokenizer.sep_token in input[0] or tokenizer.sep_token in input[1]): - logging.warning(f"Input contains special tokens: {input}") - skipped += 1 - continue - if is_cls_token_set and (tokenizer.cls_token in input[0] or tokenizer.cls_token in input[1]): - logging.warning(f"Input contains special tokens: {input}") - skipped += 1 - continue - if is_pad_token_set and (tokenizer.pad_token in input[0] or tokenizer.pad_token in input[1]): - logging.warning(f"Input contains special tokens: {input}") - skipped += 1 - continue - if is_eos_token_set and (tokenizer.eos_token in input[0] or tokenizer.eos_token in input[1]): - logging.warning(f"Input contains special tokens: {input}") - skipped += 1 - continue - comments.append(input[0]) - codes.append(input[1]) - labels.append(item["target"]) - except json.JSONDecodeError as e: - print(f"Error: JSON decoding failed - {e}") - continue - logging.info(f"Skipped {skipped} samples due to special tokens") - - return comments, codes - - -def mrr(model, tokenizer, file, args): +def get_scores(model, dataloader): random.seed(42) - - # load data - comments, codes = load_data_for_mrr(tokenizer, file) - - # create mrr chunks with (default 99) distractors - - chunks = [] - for i, sample in enumerate(zip(comments, codes)): - comment, code = sample - codes_without_sample = codes[:i] + codes[i + 1 :] - # select 99 random codes - distractors = random.sample(codes_without_sample, args.distractors) - # create samples - codes = [code] + distractors - comments = [comment] * len(codes) - labels = [1] + [0] * len(distractors) - # convert to features - features = _convert_examples_to_features( - comments, - codes, - labels, - tokenizer=tokenizer, - max_seq_length=args.seq_len, - cls_token=tokenizer.cls_token, - sep_token=tokenizer.sep_token, - cls_token_segment_id=tokenizer.cls_token_id, - pad_token_segment_id=tokenizer.pad_token_id, - eos_token=tokenizer.eos_token, - ) - - chunks.append(features) - # make predictions for all chunks device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("Using device:", device) model.to(device) model.eval() - ranks = [] - for chunk in tqdm(chunks): - # calc correct sample (always the first one) - correct = chunk[0] - input_ids = correct["input_ids"].unsqueeze(0).to(device) - attention_mask = correct["attention_mask"].unsqueeze(0).to(device) - labels = correct["labels"].unsqueeze(0).to(device) + score_list = [] + for batch in tqdm(dataloader): + batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): - outputs = model(input_ids, attention_mask=attention_mask, labels=labels) - logits = outputs.logits - correct_score = logits[0][0].item() - - # calc scores for the rest of the samples - scores = [] - # add correct score to scores - scores.append(correct_score) - # create batches of size args.batch_size - batch_size = args.batch_size - for i in range(1, len(chunk), batch_size): - batch = chunk[i : i + batch_size] - input_ids = torch.stack([sample["input_ids"] for sample in batch]).to(device) - attention_mask = torch.stack([sample["attention_mask"] for sample in batch]).to(device) - labels = torch.stack([sample["labels"] for sample in batch]).to(device) - with torch.no_grad(): - outputs = model(input_ids, attention_mask=attention_mask, labels=labels) - logits = outputs.logits - scores.extend(logits[:, 1].cpu().numpy().tolist()) - - rank = np.sum(np.array(scores) >= correct_score) - ranks.append(rank) - - mean_mrr = np.mean(1.0 / np.array(ranks)) - - return mean_mrr + outputs = model(**batch) + score_dict = dict.fromkeys(["score"]) + score_dict["score"] = outputs.logits.cpu().numpy() + score_list.append(score_dict) + + return score_list ############################################################## @@ -361,12 +262,12 @@ def main(): parser.add_argument("--num_warmup_steps", type=int, default=0) parser.add_argument("--output_dir", type=str, default="models") parser.add_argument("--seq_len", type=int, default=512, help="maximum sequence length") - parser.add_argument("--distractors", type=int, default=99, help="number of distractors per true pair") + parser.add_argument("--distractors", type=int, default=2, help="number of distractors per true pair") parser.add_argument("--log_level", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], default="INFO") args = parser.parse_args() - TRAIN_FILE = "./codesearchnet_adv/train_adv_clf.jsonl" + TRAIN_FILE = load_task("nl_codesearch_mrr:codesearchnet_adv").get_dataset_raw(args.distractors) # logging logging.basicConfig(level=args.log_level) @@ -377,7 +278,7 @@ def main(): # load data logging.info("Loading data...") - dataloader = load_data(tokenizer, args.batch_size, args.seq_len, TRAIN_FILE) + dataloader = load_data(tokenizer, args.batch_size, args.seq_len, TRAIN_FILE, True) model = AutoModelForSequenceClassification.from_pretrained(args.model) @@ -391,25 +292,25 @@ def main(): # also soave tokenizer tokenizer.save_pretrained(f"{args.output_dir}/{args.model}") - DS_FOLDER = "./" - - FILES = [ - ["statcodesearch", "test_statcodesearch"], - ["codesearchnet_adv", "test_adv"], - ["codesearchnet_go", "test_go"], - ["codesearchnet_java", "test_java"], - ["codesearchnet_javascript", "test_javascript"], - ["codesearchnet_php", "test_php"], - ["codesearchnet_ruby", "test_ruby"], - ["cosqa", "test_cosqa"], + TEST_TASKS = [ + ["codesearchnetadv", load_task("nl_codesearch_mrr:codesearchnet_adv")], + ["codesearchnet_ruby", load_task("nl_codesearch_mrr:codesearchnet_ruby")], + ["codesearchnet_go", load_task("nl_codesearch_mrr:codesearchnet_go")], + ["codesearchnet_java", load_task("nl_codesearch_mrr:codesearchnet_java")], + ["codesearchnet_javascript", load_task("nl_codesearch_mrr:codesearchnet_javascript")], + ["codesearchnet_php", load_task("nl_codesearch_mrr:codesearchnet_php")], + ["cosqa", load_task("nl_codesearch_mrr:cosqa")], + ["statcodesearch", load_task("nl_codesearch_mrr:statcodesearch")], ] results = {} - for meta_data in FILES: - logging.info(f"Evaluating on {meta_data}...") - metrics = mrr(model, tokenizer, f"{DS_FOLDER}/mrr/{meta_data[0]}/{meta_data[1]}_mrr.jsonl", args) - results[meta_data[0]] = metrics - logging.info(f"Test results for {meta_data}: {metrics}") + for task in TEST_TASKS: + logging.info(f"Calculating Logits for MRR {task[0]}...") + dataloader = load_data(tokenizer, 1, args.seq_len, task[1].get_dataset_raw(args.distractors), False) + scores = get_scores(model, dataloader) + mrr_value = task[1].evaluate_predictions(scores, args.distractors) + logging.info(f"Test results for {task[0]}: {mrr_value}") + results[task[0]] = mrr_value logging.info(f"Test results: {results}") From ee8608976e994b3a9ca24807c730a1dd4fd61bcf Mon Sep 17 00:00:00 2001 From: Anssi Moisio Date: Fri, 15 Dec 2023 15:04:52 +0200 Subject: [PATCH 54/57] Remove repetitive Task classes from subtasks, inherit from _base_task.py instead. --- .../tasks/europarl_dbca_splits/_base_task.py | 116 +++++++++++++++++ .../europarl_dbca_splits/comdiv0_de/task.py | 117 +----------------- .../europarl_dbca_splits/comdiv0_el/task.py | 117 +----------------- .../europarl_dbca_splits/comdiv0_fi/task.py | 117 +----------------- .../europarl_dbca_splits/comdiv0_fr/task.py | 117 +----------------- .../europarl_dbca_splits/comdiv1_de/task.py | 117 +----------------- .../europarl_dbca_splits/comdiv1_el/task.py | 117 +----------------- .../europarl_dbca_splits/comdiv1_fi/task.py | 117 +----------------- .../europarl_dbca_splits/comdiv1_fr/task.py | 117 +----------------- 9 files changed, 140 insertions(+), 912 deletions(-) create mode 100644 src/genbench/tasks/europarl_dbca_splits/_base_task.py diff --git a/src/genbench/tasks/europarl_dbca_splits/_base_task.py b/src/genbench/tasks/europarl_dbca_splits/_base_task.py new file mode 100644 index 0000000..3e4be76 --- /dev/null +++ b/src/genbench/tasks/europarl_dbca_splits/_base_task.py @@ -0,0 +1,116 @@ +from collections import OrderedDict +from typing import Any, List, Mapping + +import evaluate +import numpy as np +from datasets import Dataset + +from genbench import Task +from genbench.api import EvaluationResult, TaskType +from genbench.utils.logging import get_logger + + +logger = get_logger(__name__) + + +class BaseDbcaTask(Task): + """This task evaluates how well an NMT model generalises to a shifted distribution of + dependency relations. In practice, this means that the test set includes novel + (, , ) tuples (=compounds) that were not seen in + the training set, while having similar relative frequencies of the lemmas and dependency + relation tags (= elements of the compound tuples = atoms). + """ + + def evaluate_predictions( + self, + *, + predictions: List[Mapping[str, Any]] = None, + gold: Dataset = None, + ) -> EvaluationResult: + result = OrderedDict() + for metric_config in self.config.evaluation_metrics: + hf_id = metric_config.hf_id + if isinstance(hf_id, str): + hf_id = [hf_id] + + metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) + + refs_lst = [g["target"] for g in gold] + preds_lst = [pred["target"] for pred in predictions] + + ref_type = type(refs_lst[0]) + pred_type = type(preds_lst[0]) + if pred_type != ref_type: + if self.config.task_type != TaskType.MULTIPLE_CHOICE: + raise ValueError( + f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " + ) + # Convert predictions to the same type as the references + if pred_type == str and ref_type == int: + logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") + converted_preds = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + preds_lst = converted_preds + elif pred_type == int and ref_type == str: + logger.warning("Predictions are ints, but references are strings. Converting references to ints.") + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_refs.append(ref["target_options"].index(ref["target"])) + refs_lst = converted_refs + else: + if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: + # Convert both predictions and references to int + logger.warning( + "Predictions and references have the same type, but it is not int. Converting both to int." + ) + converted_preds = [] + converted_refs = [] + for pred, ref in zip(preds_lst, gold): + assert "target_options" in ref + converted_preds.append(ref["target_options"].index(pred)) + converted_refs.append(ref["target_options"].index(ref["target"])) + preds_lst = converted_preds + refs_lst = converted_refs + + extra_kwargs = metric_config.compute_extra_kwargs or {} + output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) + + if output is None: + raise ValueError( + f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." + ) + + # Update output keys to include the metric id + metric_id = "_".join(hf_id) + output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} + + result.update(output) + + return result + + def chernoff_coef(self, vec1, vec2, alpha): + """ + The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) + = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) + distributions P and Q. The alpha parameter determines if we want to + measure whether Q includes elements that are not in P. + """ + if alpha < 0 or alpha > 1: + raise ValueError("alpha must be in [0,1]") + # use log to avoid underflow + return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) + + def normalize_vector(self, vector): + """Normalize a vector to have sum 1.""" + return np.nan_to_num(np.divide(vector, np.sum(vector))) + + def divergence(self, vec1, vec2, alpha): + """ + Calculate divergence between two vectors. + Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. + Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. + """ + return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/task.py index ed51ebb..898b036 100644 --- a/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/task.py +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_de/task.py @@ -1,116 +1,5 @@ -from collections import OrderedDict -from typing import Any, List, Mapping +from genbench.tasks.europarl_dbca_splits._base_task import BaseDbcaTask -import evaluate -import numpy as np -from datasets import Dataset -from genbench import Task -from genbench.api import EvaluationResult, TaskType -from genbench.utils.logging import get_logger - - -logger = get_logger(__name__) - - -class EuroparlDbcaSplitsComdiv0De(Task): - """This task evaluates how well an NMT model generalises to a shifted distribution of - dependency relations. In practice, this means that the test set includes novel - (, , ) tuples (=compounds) that were not seen in - the training set, while having similar relative frequencies of the lemmas and dependency - relation tags (= elements of the compound tuples = atoms). - """ - - def evaluate_predictions( - self, - *, - predictions: List[Mapping[str, Any]] = None, - gold: Dataset = None, - ) -> EvaluationResult: - result = OrderedDict() - for metric_config in self.config.evaluation_metrics: - hf_id = metric_config.hf_id - if isinstance(hf_id, str): - hf_id = [hf_id] - - metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) - - refs_lst = [g["target"] for g in gold] - preds_lst = [pred["target"] for pred in predictions] - - ref_type = type(refs_lst[0]) - pred_type = type(preds_lst[0]) - if pred_type != ref_type: - if self.config.task_type != TaskType.MULTIPLE_CHOICE: - raise ValueError( - f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " - ) - # Convert predictions to the same type as the references - if pred_type == str and ref_type == int: - logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") - converted_preds = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - preds_lst = converted_preds - elif pred_type == int and ref_type == str: - logger.warning("Predictions are ints, but references are strings. Converting references to ints.") - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_refs.append(ref["target_options"].index(ref["target"])) - refs_lst = converted_refs - else: - if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: - # Convert both predictions and references to int - logger.warning( - "Predictions and references have the same type, but it is not int. Converting both to int." - ) - converted_preds = [] - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - converted_refs.append(ref["target_options"].index(ref["target"])) - preds_lst = converted_preds - refs_lst = converted_refs - - extra_kwargs = metric_config.compute_extra_kwargs or {} - output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) - - if output is None: - raise ValueError( - f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." - ) - - # Update output keys to include the metric id - metric_id = "_".join(hf_id) - output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} - - result.update(output) - - return result - - def chernoff_coef(self, vec1, vec2, alpha): - """ - The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) - = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) - distributions P and Q. The alpha parameter determines if we want to - measure whether Q includes elements that are not in P. - """ - if alpha < 0 or alpha > 1: - raise ValueError("alpha must be in [0,1]") - # use log to avoid underflow - return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) - - def normalize_vector(self, vector): - """Normalize a vector to have sum 1.""" - return np.nan_to_num(np.divide(vector, np.sum(vector))) - - def divergence(self, vec1, vec2, alpha): - """ - Calculate divergence between two vectors. - Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. - Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. - """ - return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) +class EuroparlDbcaSplitsComdiv0De(BaseDbcaTask): + pass diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/task.py index 1197055..1124f49 100644 --- a/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/task.py +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_el/task.py @@ -1,116 +1,5 @@ -from collections import OrderedDict -from typing import Any, List, Mapping +from genbench.tasks.europarl_dbca_splits._base_task import BaseDbcaTask -import evaluate -import numpy as np -from datasets import Dataset -from genbench import Task -from genbench.api import EvaluationResult, TaskType -from genbench.utils.logging import get_logger - - -logger = get_logger(__name__) - - -class EuroparlDbcaSplitsComdiv0El(Task): - """This task evaluates how well an NMT model generalises to a shifted distribution of - dependency relations. In practice, this means that the test set includes novel - (, , ) tuples (=compounds) that were not seen in - the training set, while having similar relative frequencies of the lemmas and dependency - relation tags (= elements of the compound tuples = atoms). - """ - - def evaluate_predictions( - self, - *, - predictions: List[Mapping[str, Any]] = None, - gold: Dataset = None, - ) -> EvaluationResult: - result = OrderedDict() - for metric_config in self.config.evaluation_metrics: - hf_id = metric_config.hf_id - if isinstance(hf_id, str): - hf_id = [hf_id] - - metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) - - refs_lst = [g["target"] for g in gold] - preds_lst = [pred["target"] for pred in predictions] - - ref_type = type(refs_lst[0]) - pred_type = type(preds_lst[0]) - if pred_type != ref_type: - if self.config.task_type != TaskType.MULTIPLE_CHOICE: - raise ValueError( - f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " - ) - # Convert predictions to the same type as the references - if pred_type == str and ref_type == int: - logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") - converted_preds = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - preds_lst = converted_preds - elif pred_type == int and ref_type == str: - logger.warning("Predictions are ints, but references are strings. Converting references to ints.") - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_refs.append(ref["target_options"].index(ref["target"])) - refs_lst = converted_refs - else: - if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: - # Convert both predictions and references to int - logger.warning( - "Predictions and references have the same type, but it is not int. Converting both to int." - ) - converted_preds = [] - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - converted_refs.append(ref["target_options"].index(ref["target"])) - preds_lst = converted_preds - refs_lst = converted_refs - - extra_kwargs = metric_config.compute_extra_kwargs or {} - output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) - - if output is None: - raise ValueError( - f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." - ) - - # Update output keys to include the metric id - metric_id = "_".join(hf_id) - output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} - - result.update(output) - - return result - - def chernoff_coef(self, vec1, vec2, alpha): - """ - The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) - = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) - distributions P and Q. The alpha parameter determines if we want to - measure whether Q includes elements that are not in P. - """ - if alpha < 0 or alpha > 1: - raise ValueError("alpha must be in [0,1]") - # use log to avoid underflow - return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) - - def normalize_vector(self, vector): - """Normalize a vector to have sum 1.""" - return np.nan_to_num(np.divide(vector, np.sum(vector))) - - def divergence(self, vec1, vec2, alpha): - """ - Calculate divergence between two vectors. - Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. - Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. - """ - return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) +class EuroparlDbcaSplitsComdiv0El(BaseDbcaTask): + pass diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/task.py index 7e82a88..7bf9f32 100644 --- a/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/task.py +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fi/task.py @@ -1,116 +1,5 @@ -from collections import OrderedDict -from typing import Any, List, Mapping +from genbench.tasks.europarl_dbca_splits._base_task import BaseDbcaTask -import evaluate -import numpy as np -from datasets import Dataset -from genbench import Task -from genbench.api import EvaluationResult, TaskType -from genbench.utils.logging import get_logger - - -logger = get_logger(__name__) - - -class EuroparlDbcaSplitsComdiv0Fi(Task): - """This task evaluates how well an NMT model generalises to a shifted distribution of - dependency relations. In practice, this means that the test set includes novel - (, , ) tuples (=compounds) that were not seen in - the training set, while having similar relative frequencies of the lemmas and dependency - relation tags (= elements of the compound tuples = atoms). - """ - - def evaluate_predictions( - self, - *, - predictions: List[Mapping[str, Any]] = None, - gold: Dataset = None, - ) -> EvaluationResult: - result = OrderedDict() - for metric_config in self.config.evaluation_metrics: - hf_id = metric_config.hf_id - if isinstance(hf_id, str): - hf_id = [hf_id] - - metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) - - refs_lst = [g["target"] for g in gold] - preds_lst = [pred["target"] for pred in predictions] - - ref_type = type(refs_lst[0]) - pred_type = type(preds_lst[0]) - if pred_type != ref_type: - if self.config.task_type != TaskType.MULTIPLE_CHOICE: - raise ValueError( - f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " - ) - # Convert predictions to the same type as the references - if pred_type == str and ref_type == int: - logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") - converted_preds = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - preds_lst = converted_preds - elif pred_type == int and ref_type == str: - logger.warning("Predictions are ints, but references are strings. Converting references to ints.") - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_refs.append(ref["target_options"].index(ref["target"])) - refs_lst = converted_refs - else: - if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: - # Convert both predictions and references to int - logger.warning( - "Predictions and references have the same type, but it is not int. Converting both to int." - ) - converted_preds = [] - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - converted_refs.append(ref["target_options"].index(ref["target"])) - preds_lst = converted_preds - refs_lst = converted_refs - - extra_kwargs = metric_config.compute_extra_kwargs or {} - output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) - - if output is None: - raise ValueError( - f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." - ) - - # Update output keys to include the metric id - metric_id = "_".join(hf_id) - output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} - - result.update(output) - - return result - - def chernoff_coef(self, vec1, vec2, alpha): - """ - The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) - = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) - distributions P and Q. The alpha parameter determines if we want to - measure whether Q includes elements that are not in P. - """ - if alpha < 0 or alpha > 1: - raise ValueError("alpha must be in [0,1]") - # use log to avoid underflow - return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) - - def normalize_vector(self, vector): - """Normalize a vector to have sum 1.""" - return np.nan_to_num(np.divide(vector, np.sum(vector))) - - def divergence(self, vec1, vec2, alpha): - """ - Calculate divergence between two vectors. - Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. - Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. - """ - return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) +class EuroparlDbcaSplitsComdiv0Fi(BaseDbcaTask): + pass diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/task.py index bfff4f1..943fe65 100644 --- a/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/task.py +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv0_fr/task.py @@ -1,116 +1,5 @@ -from collections import OrderedDict -from typing import Any, List, Mapping +from genbench.tasks.europarl_dbca_splits._base_task import BaseDbcaTask -import evaluate -import numpy as np -from datasets import Dataset -from genbench import Task -from genbench.api import EvaluationResult, TaskType -from genbench.utils.logging import get_logger - - -logger = get_logger(__name__) - - -class EuroparlDbcaSplitsComdiv0Fr(Task): - """This task evaluates how well an NMT model generalises to a shifted distribution of - dependency relations. In practice, this means that the test set includes novel - (, , ) tuples (=compounds) that were not seen in - the training set, while having similar relative frequencies of the lemmas and dependency - relation tags (= elements of the compound tuples = atoms). - """ - - def evaluate_predictions( - self, - *, - predictions: List[Mapping[str, Any]] = None, - gold: Dataset = None, - ) -> EvaluationResult: - result = OrderedDict() - for metric_config in self.config.evaluation_metrics: - hf_id = metric_config.hf_id - if isinstance(hf_id, str): - hf_id = [hf_id] - - metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) - - refs_lst = [g["target"] for g in gold] - preds_lst = [pred["target"] for pred in predictions] - - ref_type = type(refs_lst[0]) - pred_type = type(preds_lst[0]) - if pred_type != ref_type: - if self.config.task_type != TaskType.MULTIPLE_CHOICE: - raise ValueError( - f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " - ) - # Convert predictions to the same type as the references - if pred_type == str and ref_type == int: - logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") - converted_preds = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - preds_lst = converted_preds - elif pred_type == int and ref_type == str: - logger.warning("Predictions are ints, but references are strings. Converting references to ints.") - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_refs.append(ref["target_options"].index(ref["target"])) - refs_lst = converted_refs - else: - if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: - # Convert both predictions and references to int - logger.warning( - "Predictions and references have the same type, but it is not int. Converting both to int." - ) - converted_preds = [] - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - converted_refs.append(ref["target_options"].index(ref["target"])) - preds_lst = converted_preds - refs_lst = converted_refs - - extra_kwargs = metric_config.compute_extra_kwargs or {} - output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) - - if output is None: - raise ValueError( - f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." - ) - - # Update output keys to include the metric id - metric_id = "_".join(hf_id) - output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} - - result.update(output) - - return result - - def chernoff_coef(self, vec1, vec2, alpha): - """ - The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) - = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) - distributions P and Q. The alpha parameter determines if we want to - measure whether Q includes elements that are not in P. - """ - if alpha < 0 or alpha > 1: - raise ValueError("alpha must be in [0,1]") - # use log to avoid underflow - return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) - - def normalize_vector(self, vector): - """Normalize a vector to have sum 1.""" - return np.nan_to_num(np.divide(vector, np.sum(vector))) - - def divergence(self, vec1, vec2, alpha): - """ - Calculate divergence between two vectors. - Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. - Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. - """ - return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) +class EuroparlDbcaSplitsComdiv0Fr(BaseDbcaTask): + pass diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/task.py index b89d8aa..3b9ec0a 100644 --- a/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/task.py +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_de/task.py @@ -1,116 +1,5 @@ -from collections import OrderedDict -from typing import Any, List, Mapping +from genbench.tasks.europarl_dbca_splits._base_task import BaseDbcaTask -import evaluate -import numpy as np -from datasets import Dataset -from genbench import Task -from genbench.api import EvaluationResult, TaskType -from genbench.utils.logging import get_logger - - -logger = get_logger(__name__) - - -class EuroparlDbcaSplitsComdiv1De(Task): - """This task evaluates how well an NMT model generalises to a shifted distribution of - dependency relations. In practice, this means that the test set includes novel - (, , ) tuples (=compounds) that were not seen in - the training set, while having similar relative frequencies of the lemmas and dependency - relation tags (= elements of the compound tuples = atoms). - """ - - def evaluate_predictions( - self, - *, - predictions: List[Mapping[str, Any]] = None, - gold: Dataset = None, - ) -> EvaluationResult: - result = OrderedDict() - for metric_config in self.config.evaluation_metrics: - hf_id = metric_config.hf_id - if isinstance(hf_id, str): - hf_id = [hf_id] - - metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) - - refs_lst = [g["target"] for g in gold] - preds_lst = [pred["target"] for pred in predictions] - - ref_type = type(refs_lst[0]) - pred_type = type(preds_lst[0]) - if pred_type != ref_type: - if self.config.task_type != TaskType.MULTIPLE_CHOICE: - raise ValueError( - f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " - ) - # Convert predictions to the same type as the references - if pred_type == str and ref_type == int: - logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") - converted_preds = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - preds_lst = converted_preds - elif pred_type == int and ref_type == str: - logger.warning("Predictions are ints, but references are strings. Converting references to ints.") - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_refs.append(ref["target_options"].index(ref["target"])) - refs_lst = converted_refs - else: - if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: - # Convert both predictions and references to int - logger.warning( - "Predictions and references have the same type, but it is not int. Converting both to int." - ) - converted_preds = [] - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - converted_refs.append(ref["target_options"].index(ref["target"])) - preds_lst = converted_preds - refs_lst = converted_refs - - extra_kwargs = metric_config.compute_extra_kwargs or {} - output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) - - if output is None: - raise ValueError( - f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." - ) - - # Update output keys to include the metric id - metric_id = "_".join(hf_id) - output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} - - result.update(output) - - return result - - def chernoff_coef(self, vec1, vec2, alpha): - """ - The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) - = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) - distributions P and Q. The alpha parameter determines if we want to - measure whether Q includes elements that are not in P. - """ - if alpha < 0 or alpha > 1: - raise ValueError("alpha must be in [0,1]") - # use log to avoid underflow - return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) - - def normalize_vector(self, vector): - """Normalize a vector to have sum 1.""" - return np.nan_to_num(np.divide(vector, np.sum(vector))) - - def divergence(self, vec1, vec2, alpha): - """ - Calculate divergence between two vectors. - Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. - Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. - """ - return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) +class EuroparlDbcaSplitsComdiv1De(BaseDbcaTask): + pass diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/task.py index 1db49a0..7fcf724 100644 --- a/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/task.py +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_el/task.py @@ -1,116 +1,5 @@ -from collections import OrderedDict -from typing import Any, List, Mapping +from genbench.tasks.europarl_dbca_splits._base_task import BaseDbcaTask -import evaluate -import numpy as np -from datasets import Dataset -from genbench import Task -from genbench.api import EvaluationResult, TaskType -from genbench.utils.logging import get_logger - - -logger = get_logger(__name__) - - -class EuroparlDbcaSplitsComdiv1El(Task): - """This task evaluates how well an NMT model generalises to a shifted distribution of - dependency relations. In practice, this means that the test set includes novel - (, , ) tuples (=compounds) that were not seen in - the training set, while having similar relative frequencies of the lemmas and dependency - relation tags (= elements of the compound tuples = atoms). - """ - - def evaluate_predictions( - self, - *, - predictions: List[Mapping[str, Any]] = None, - gold: Dataset = None, - ) -> EvaluationResult: - result = OrderedDict() - for metric_config in self.config.evaluation_metrics: - hf_id = metric_config.hf_id - if isinstance(hf_id, str): - hf_id = [hf_id] - - metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) - - refs_lst = [g["target"] for g in gold] - preds_lst = [pred["target"] for pred in predictions] - - ref_type = type(refs_lst[0]) - pred_type = type(preds_lst[0]) - if pred_type != ref_type: - if self.config.task_type != TaskType.MULTIPLE_CHOICE: - raise ValueError( - f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " - ) - # Convert predictions to the same type as the references - if pred_type == str and ref_type == int: - logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") - converted_preds = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - preds_lst = converted_preds - elif pred_type == int and ref_type == str: - logger.warning("Predictions are ints, but references are strings. Converting references to ints.") - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_refs.append(ref["target_options"].index(ref["target"])) - refs_lst = converted_refs - else: - if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: - # Convert both predictions and references to int - logger.warning( - "Predictions and references have the same type, but it is not int. Converting both to int." - ) - converted_preds = [] - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - converted_refs.append(ref["target_options"].index(ref["target"])) - preds_lst = converted_preds - refs_lst = converted_refs - - extra_kwargs = metric_config.compute_extra_kwargs or {} - output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) - - if output is None: - raise ValueError( - f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." - ) - - # Update output keys to include the metric id - metric_id = "_".join(hf_id) - output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} - - result.update(output) - - return result - - def chernoff_coef(self, vec1, vec2, alpha): - """ - The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) - = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) - distributions P and Q. The alpha parameter determines if we want to - measure whether Q includes elements that are not in P. - """ - if alpha < 0 or alpha > 1: - raise ValueError("alpha must be in [0,1]") - # use log to avoid underflow - return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) - - def normalize_vector(self, vector): - """Normalize a vector to have sum 1.""" - return np.nan_to_num(np.divide(vector, np.sum(vector))) - - def divergence(self, vec1, vec2, alpha): - """ - Calculate divergence between two vectors. - Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. - Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. - """ - return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) +class EuroparlDbcaSplitsComdiv1El(BaseDbcaTask): + pass diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/task.py index 3e7b7f0..8fc677b 100644 --- a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/task.py +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fi/task.py @@ -1,116 +1,5 @@ -from collections import OrderedDict -from typing import Any, List, Mapping +from genbench.tasks.europarl_dbca_splits._base_task import BaseDbcaTask -import evaluate -import numpy as np -from datasets import Dataset -from genbench import Task -from genbench.api import EvaluationResult, TaskType -from genbench.utils.logging import get_logger - - -logger = get_logger(__name__) - - -class EuroparlDbcaSplitsComdiv1Fi(Task): - """This task evaluates how well an NMT model generalises to a shifted distribution of - dependency relations. In practice, this means that the test set includes novel - (, , ) tuples (=compounds) that were not seen in - the training set, while having similar relative frequencies of the lemmas and dependency - relation tags (= elements of the compound tuples = atoms). - """ - - def evaluate_predictions( - self, - *, - predictions: List[Mapping[str, Any]] = None, - gold: Dataset = None, - ) -> EvaluationResult: - result = OrderedDict() - for metric_config in self.config.evaluation_metrics: - hf_id = metric_config.hf_id - if isinstance(hf_id, str): - hf_id = [hf_id] - - metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) - - refs_lst = [g["target"] for g in gold] - preds_lst = [pred["target"] for pred in predictions] - - ref_type = type(refs_lst[0]) - pred_type = type(preds_lst[0]) - if pred_type != ref_type: - if self.config.task_type != TaskType.MULTIPLE_CHOICE: - raise ValueError( - f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " - ) - # Convert predictions to the same type as the references - if pred_type == str and ref_type == int: - logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") - converted_preds = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - preds_lst = converted_preds - elif pred_type == int and ref_type == str: - logger.warning("Predictions are ints, but references are strings. Converting references to ints.") - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_refs.append(ref["target_options"].index(ref["target"])) - refs_lst = converted_refs - else: - if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: - # Convert both predictions and references to int - logger.warning( - "Predictions and references have the same type, but it is not int. Converting both to int." - ) - converted_preds = [] - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - converted_refs.append(ref["target_options"].index(ref["target"])) - preds_lst = converted_preds - refs_lst = converted_refs - - extra_kwargs = metric_config.compute_extra_kwargs or {} - output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) - - if output is None: - raise ValueError( - f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." - ) - - # Update output keys to include the metric id - metric_id = "_".join(hf_id) - output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} - - result.update(output) - - return result - - def chernoff_coef(self, vec1, vec2, alpha): - """ - The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) - = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) - distributions P and Q. The alpha parameter determines if we want to - measure whether Q includes elements that are not in P. - """ - if alpha < 0 or alpha > 1: - raise ValueError("alpha must be in [0,1]") - # use log to avoid underflow - return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) - - def normalize_vector(self, vector): - """Normalize a vector to have sum 1.""" - return np.nan_to_num(np.divide(vector, np.sum(vector))) - - def divergence(self, vec1, vec2, alpha): - """ - Calculate divergence between two vectors. - Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. - Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. - """ - return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) +class EuroparlDbcaSplitsComdiv1Fi(BaseDbcaTask): + pass diff --git a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/task.py b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/task.py index 9358e5f..8e27ac1 100644 --- a/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/task.py +++ b/src/genbench/tasks/europarl_dbca_splits/comdiv1_fr/task.py @@ -1,116 +1,5 @@ -from collections import OrderedDict -from typing import Any, List, Mapping +from genbench.tasks.europarl_dbca_splits._base_task import BaseDbcaTask -import evaluate -import numpy as np -from datasets import Dataset -from genbench import Task -from genbench.api import EvaluationResult, TaskType -from genbench.utils.logging import get_logger - - -logger = get_logger(__name__) - - -class EuroparlDbcaSplitsComdiv1Fr(Task): - """This task evaluates how well an NMT model generalises to a shifted distribution of - dependency relations. In practice, this means that the test set includes novel - (, , ) tuples (=compounds) that were not seen in - the training set, while having similar relative frequencies of the lemmas and dependency - relation tags (= elements of the compound tuples = atoms). - """ - - def evaluate_predictions( - self, - *, - predictions: List[Mapping[str, Any]] = None, - gold: Dataset = None, - ) -> EvaluationResult: - result = OrderedDict() - for metric_config in self.config.evaluation_metrics: - hf_id = metric_config.hf_id - if isinstance(hf_id, str): - hf_id = [hf_id] - - metric = evaluate.load(*hf_id, revision=metric_config.git_commit_sha) - - refs_lst = [g["target"] for g in gold] - preds_lst = [pred["target"] for pred in predictions] - - ref_type = type(refs_lst[0]) - pred_type = type(preds_lst[0]) - if pred_type != ref_type: - if self.config.task_type != TaskType.MULTIPLE_CHOICE: - raise ValueError( - f"Predictions and references have different types: preds: {pred_type} and refs: {ref_type}. " - ) - # Convert predictions to the same type as the references - if pred_type == str and ref_type == int: - logger.warning("Predictions are strings, but references are ints. Converting predictions to ints.") - converted_preds = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - preds_lst = converted_preds - elif pred_type == int and ref_type == str: - logger.warning("Predictions are ints, but references are strings. Converting references to ints.") - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_refs.append(ref["target_options"].index(ref["target"])) - refs_lst = converted_refs - else: - if self.config.task_type == TaskType.MULTIPLE_CHOICE and pred_type != int: - # Convert both predictions and references to int - logger.warning( - "Predictions and references have the same type, but it is not int. Converting both to int." - ) - converted_preds = [] - converted_refs = [] - for pred, ref in zip(preds_lst, gold): - assert "target_options" in ref - converted_preds.append(ref["target_options"].index(pred)) - converted_refs.append(ref["target_options"].index(ref["target"])) - preds_lst = converted_preds - refs_lst = converted_refs - - extra_kwargs = metric_config.compute_extra_kwargs or {} - output: dict = metric.compute(predictions=preds_lst, references=refs_lst, **extra_kwargs) - - if output is None: - raise ValueError( - f"Metric {metric_config.hf_id} returned None. " f"Please check the metric implementation." - ) - - # Update output keys to include the metric id - metric_id = "_".join(hf_id) - output = {f"hf_{metric_id}__{k}": v for k, v in output.items() if k == "score"} - - result.update(output) - - return result - - def chernoff_coef(self, vec1, vec2, alpha): - """ - The Chernoff coefficient c is a similarity measure C_{alpha}(P||Q) - = sum_k[p_k^alpha * q_k^(1-alpha)] e[0,1] between two (probability) - distributions P and Q. The alpha parameter determines if we want to - measure whether Q includes elements that are not in P. - """ - if alpha < 0 or alpha > 1: - raise ValueError("alpha must be in [0,1]") - # use log to avoid underflow - return np.sum(np.exp((np.log(vec1) * alpha) + (np.log(vec2) * (1 - alpha))), axis=1) - - def normalize_vector(self, vector): - """Normalize a vector to have sum 1.""" - return np.nan_to_num(np.divide(vector, np.sum(vector))) - - def divergence(self, vec1, vec2, alpha): - """ - Calculate divergence between two vectors. - Atom divergence is 1 - Chernoff coefficient, with alpha=0.5. - Compound divergence is 1 - Chernoff coefficient, with alpha=0.1. - """ - return float(1 - self.chernoff_coef(self.normalize_vector(vec1), self.normalize_vector(vec2), alpha)) +class EuroparlDbcaSplitsComdiv1Fr(BaseDbcaTask): + pass From 416d9e2bf6a99c1b61bb8bbd8c84a3734265a48c Mon Sep 17 00:00:00 2001 From: Amirhossein Kazemnejad <2122102+kazemnejad@users.noreply.github.com> Date: Sat, 30 Dec 2023 21:16:56 -0500 Subject: [PATCH 55/57] Add statsmodels --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 30daf88..1d0752d 100644 --- a/setup.py +++ b/setup.py @@ -19,6 +19,7 @@ # Numpy is needed for some of HF's metrics "numpy", "typing_extensions>=4.6", + "statsmodels>=0.14", ] From aed4787008fb5ef9d2bd5b44f34d9c264f9a1181 Mon Sep 17 00:00:00 2001 From: Amirhossein Kazemnejad <2122102+kazemnejad@users.noreply.github.com> Date: Sat, 30 Dec 2023 22:08:52 -0500 Subject: [PATCH 56/57] Manually copy files from https://github.com/drndr/genbench_cbt/tree/nl_codesearch. Credits go to @drndr --- .../GenBench Evaluation Card.pdf | Bin 0 -> 72032 bytes .../tasks/nl_codesearch_clf/__init__.py | 5 + .../codesearchnet_adv/__init__.py | 0 .../codesearchnet_adv/config.jsonnet | 58 +++ .../codesearchnet_adv/doc.md | 19 + .../codesearchnet_adv/task.py | 46 +++ .../codesearchnet_go/__init__.py | 0 .../codesearchnet_go/config.jsonnet | 56 +++ .../nl_codesearch_clf/codesearchnet_go/doc.md | 19 + .../codesearchnet_go/task.py | 46 +++ .../codesearchnet_java/__init__.py | 0 .../codesearchnet_java/config.jsonnet | 56 +++ .../codesearchnet_java/doc.md | 19 + .../codesearchnet_java/task.py | 46 +++ .../codesearchnet_javascript/__init__.py | 0 .../codesearchnet_javascript/config.jsonnet | 56 +++ .../codesearchnet_javascript/doc.md | 19 + .../codesearchnet_javascript/task.py | 46 +++ .../codesearchnet_php/__init__.py | 0 .../codesearchnet_php/config.jsonnet | 55 +++ .../codesearchnet_php/doc.md | 19 + .../codesearchnet_php/task.py | 46 +++ .../codesearchnet_ruby/__init__.py | 0 .../codesearchnet_ruby/config.jsonnet | 56 +++ .../codesearchnet_ruby/doc.md | 19 + .../codesearchnet_ruby/task.py | 46 +++ .../tasks/nl_codesearch_clf/config.jsonnet | 35 ++ .../tasks/nl_codesearch_clf/cosqa/__init__.py | 0 .../nl_codesearch_clf/cosqa/config.jsonnet | 57 +++ .../tasks/nl_codesearch_clf/cosqa/doc.md | 19 + .../tasks/nl_codesearch_clf/cosqa/task.py | 46 +++ src/genbench/tasks/nl_codesearch_clf/doc.md | 43 +++ .../requirements-usage-example.txt | 5 + .../statcodesearch/__init__.py | 0 .../statcodesearch/config.jsonnet | 57 +++ .../nl_codesearch_clf/statcodesearch/doc.md | 19 + .../nl_codesearch_clf/statcodesearch/task.py | 46 +++ .../tasks/nl_codesearch_clf/usage_example.py | 331 ++++++++++++++++++ 38 files changed, 1390 insertions(+) create mode 100644 src/genbench/tasks/nl_codesearch_clf/GenBench Evaluation Card.pdf create mode 100644 src/genbench/tasks/nl_codesearch_clf/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/cosqa/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/cosqa/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/cosqa/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/cosqa/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/requirements-usage-example.txt create mode 100644 src/genbench/tasks/nl_codesearch_clf/statcodesearch/__init__.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet create mode 100644 src/genbench/tasks/nl_codesearch_clf/statcodesearch/doc.md create mode 100644 src/genbench/tasks/nl_codesearch_clf/statcodesearch/task.py create mode 100644 src/genbench/tasks/nl_codesearch_clf/usage_example.py diff --git a/src/genbench/tasks/nl_codesearch_clf/GenBench Evaluation Card.pdf b/src/genbench/tasks/nl_codesearch_clf/GenBench Evaluation Card.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3d4e16e3e1eb452ad3de5bf0c25dca0cae7c8c2d GIT binary patch literal 72032 zcmc$_Q*>qDx-S~rc4lnbPAV1Kww;P?+p5^MZKq<}wr~B<+WWM%+d2>D;kJ9{Yy+KR z{xH7TAHGf|FDg#QM9&68*7KbI1;YYh1lSo`!tn9}7^E$1O$;0@JWPxMOn*xN4rWFc zW&pzvfEIw0jTOMe$^y^WU_&e_@!|nmL;TIN1RV;uhAnd;H zXyXLKZH-Ob{#LL6 zSh)UGHnscP!U#|VFeuvDIscvEf88tnB^1E=m&d=;x&AMrxc-A221!wX7B4%8DZ2@m z0UL{{2@|J*sevJ*p~+tf7#R%=n3xTX*!X!lxlD~1O&I@jZOqQW#lgg4YGi0^z{bSJ z!f3+A%E`vB^N$8j&WK>q?Eg06|7bHM z0Lxzo{+Aiq+5e|Z`v0h;k6@&)ua72c*7dV=QM9~VXlf`(sq1%=A zkodBYUKXVuV*TaTMZ7Q;N`gcv*k~*cACT8(ocDWRXEG%2#tkdUP~V!6fj17u2z-SY zjmjMgNk_KEED^J19wqiSl!z&!sQO4FGn7&KQ^X|30m-Mgl1Xg50&7{l41sq}S9WLm zqZf_#U{x1!DvQ^TzO32-tbl^(6UGcuR*Dq&2_jNp2+SXHQOwdf=80~4qM8fFx+eEW zhUi1$`$JRY8}#>ZPSUpm1K^)*H4(H7bIj~ZxE{D%l8Ke03-xfPJ*{F9PhX@;SPpPm zR41L@d)lrk`DUkS%n+#N*2-wGC39#GKqUpQoC8#Drj_pe(U-T?^J(mp2Ff6d#N6Fi z^SpEkDyq#Wkz^>kVkjpIYMsCt2#l=U9ZDgM3(*^pOpKma5XzyfKj_1DQ?PB;I&Im{ zYlQSNnPJ1h7bS19WGnJ>)LiP4^Lq)qs5BRt4{A@M9zNiABz4|NSY-CQuSHG&`265tQZ&qUQ6VT-Bz%QEOkoum_N zsqpQ`dEY<PHNltWb)^NMT6B;lf0yxN_n=EgwL#zo)yG&@cRilcZ()yl1q&8)tW7fk03{B4{9)YK*+k4f) zjJMG&aoDm!1Ar|@0e0WAVnx@#kuL0UU_E&Zs-#~acduU@2Yub(j#Cq zw(gwcqxNif-7JzwJHoAjH48r;g+8 zAEe$Nri=>Vr}I2IEK(x2NQd?gW2U<}VjlarExe_Vg`_E&vVgqa=@Go<#kge>^B>>o zt9-6f9{U_5c7dKuIKf>ff1gk5YmzNu-!zQkJpZZ1>{F$N+}Zt!AhC(^WO=(%|Dfdg zZKluD^(|jAq*Rc$AM1ls;44p>qRG_Yjo$B1SAHQNl9prepKx>@0s5 z0RM_y%m5}PMwY*eo_`hpSuy>0AZKD`{2My|{VzAV1za9?4TByI?N_Q{UqnYoM`Np$ zA5foUfG5?}m8=5|J)J70VwUjl)(&*4t&w=o>$c}_v#rcY(s!-yrLUzNXwWHK8&?Y} zOsTklNS3JV%s?qHKn1-~?-cmX-rnKgL&!{j7I*+p=sTf+<-{K#UmMvlDnM&Y1OQxU zkV6LX;g)xBf+}y|18;N!*#aZm3L(?e10iCj-t@rf0EmIO@*r1m@}`019ULHp=CF-* zw7ZD(wRl#Xa(wZDrBx?^Yz+*Anguq2=VbvtmDLC_Phw+=Q0X9&L- ze5|)r;q@ViBj_MMpcxQ}TJ%xuw~M_iYxn@5oCeUw(G8%a?Ta))oA-?BGk}VS=V*|*l79CC>x*nXT;G5#D0n7Lv)2G`PY6Vt*@=;pA7HF9;*Ed%>7u)o5 z@8aZ>@Lr%MP3g8NE5Duje4FlDi|TIx0>3di#ssRpzi|R?Z3BM+3YdZX{*Jkafcol) z@qMA@S#Jdj`lfz%m;I(*-M2$v{Okfu`hCX~?cPmlBl(>Qh;r=i9oKt&?S1(czvt3_ z`_g`sjDIJed~3xAbQJ98dKCA-eF=Evp{~rm+eMlkwGZU92yPy_V`hCXD-%8}pN+S2 zYJS(qVncXsfm@hqjrhhQ!>Isy`d4oI8K}DY&K)weT@gkeLAgS7Irs;ByJ-SlY-nqL z7kF;b(}W%$gT76U38>^9AHH5E-A${~FZo(y&)ma<5+UVryxo%tkofp9Jl}I`26lcK zGx;SBtb#vXfVkOC6YB4S2z~F1gSX{|--n(skV5S!e{-Phf!kYt<3bCZ`+W=b1IbqS zl3bbDItF|Jy8~shfBSg>$#(b(`T$Y95XwXKp1u&iOzOxU`}Q7>(trES#Or(m=aPtg z{d4*T1_#Qj{o8`D@wf8c;{)=Jm*VaV^=sL$YJ({5*z9Nq-7V*<=O)(<2kHR4URW>9 zsSd^4Z2RVFU#5$-jpF^O^#yLA*$0B9;~L~3@g1N2C$*E#^t#Gulk=H9;njRG??Un) z#50NQfhY2$n;7G%Y0nau1jWZnxT+uBI0tUqCa2vV0@j=l4~Y) zGhoJkZQsqOsoY+>2gKis2y8u4OqHL-&p~t&1$S2HW6mHbo|ETYk3Bbzhj5;0$e07^ zGAVN8zlAU-(X5d!&%>u?9xM8R+7X^oHYLLCRA~r2Dn8<6pSU)>kPRh9*pFPr@F?g+ zqFOs2LO}6u5Id;?m!K`iDcbC)p7@w|lIIXC&_ARuTJc49_ZPv>%N|>3#P3nYHZqP)pQ02MEGhDkqzD}ta`xvR#-PD4@K869mY4tY2 zj?yB<4Y!ya<0e8=fTnRx37*nA@*ARQdCFFYh0?0iuXSXrz%_#F;SbtdpG@SFL%l6y z(iIV~^SsDvhR1uDaUsr&_$e)NejMGClx|V!F27WgmraBS$xbct54++mcomu8MK72| zpJEG8{3qj+-|h0EpU}p8F&Sm9#itk)#i@m~e3)-{JE~YO$U|Piw~PJ(+7-3L*}BK? zT2zRkr3UJ_p}fJlaw;tf$edcNa{Hi1Rw|aVzj>I~SpHCLz>u912Ix2?pfswu54Dlj z7Bm{jDWsHLLP_{U*}{Q?(_(*C@m$H7)Qq1UTr) z8k%l$Ha_IvTd3+x4?AFt$nzh9YPT;4L&LW-SZJ=9_Z#Wj_nWjxPy;pip)nNuWjzz3 z9qvVAU#pLo^hO62rP7aT@BuPzmu?n}1&f}ds8A+(gudk-8n^;B`&ud0+*!8c1%!}> zk4*bzjt)%G*4dVr>`#?c;SwY|x$QmudW1)a$MYTzkDv7go(fH8wzQ7}zTS5JuJmBu zE1FyiIpERo`rBPb?JPTebFlZ85@h9qJTi`P*KE$T)6W%?KF2)bkBh52A9 z5lU*MwkRAE2CF>FR0|%cI6;X<^kYpgo1Jg!Shug^LWYQ0?{#*&P?#}sy%qdNv-O)@ zatWQR)@LH~NPcwp$~E{I=L?B%wRASY=quqxKiC2_9>gZc>-Hv2MPZX;n=2J79wQMg z>kdJ}+AIoc{Ru2lgUA*n0|SU8*6kR?`MF5bahPX_gfXh)g0SRxNL81?lJ8)->2}`t z<@q6)pIR9Ag=Tr!HH-Ig?$JY!&ycn-yBJ|cQ#NHJVwVmD2NPmbF13%RB<^w>K)FgD zQ*B03NmUm%ASU>+JfmaW7rCFu+UM97PW^Z|?HT$w-nO}B`qpWQ63Q_ILhfPS9WV*T zeBZCvBVvA0uDbFD8$@LW0qJ@3 z`aNPhVVW|8`H%DDbzgMtD!ti;+UU%mRH|4W%SPTr!>R8XTHW1zp3db<1m*NshBsyp zK&D2eXcn?<;1P*h8V5-aE+}CfTrniY#?3`3}T;2nbMMGd=C>Q@}o2ej%4=WF35=?zz;^N!^VjUMtfd1wn| z5K8L6WG#(vpP=w){oc{|+mL``C^{QO_gIp|=fv#@9N)oO2k3p&G z4@ACuh_46fH)Uz^vPSaJ_yLLm6&+_6ID%(4g!knY0vYiHLshlGDS>yL4 z;3e&153$dZ;xwF(*Lrv_#&)X>O>x$Hz(FHO13=#@{hb)L;M&s*BO=W_k4-CkHLCaB zCR4kagRgr3(8sI^fv_ov8!VHdjIQ^*Z8_t3lnqu~&>x*X_hRZNB z%&rcNwI$qkmNS(j{}nRpm|p;Y{p_+5PlsF8eL0AI%^Us0Nk># zz7g)-C~N%XIzz(M78|)Z(YkAh(t2@CdnK*G9HabMH|fQzIp%A3zEk+A7QNrGWzDCj zkI{2zTs|{}q z$)jR%cKh~{gF7usKg9lkv`jJ^zQk^}0h84i6>(OXBaDkU!Wh_F+LCPB8@mR{`({cE z?xO-2gS0Tq`kUs36XNL*5brl0)`)NG9pI4bt8R$xM+RsyJ5 zxL7eu6lc(IJ%#OveMcY*r5MO0KVG^U#q1*5=FSF;rA1+jChX#*2_quoOL3}4ZGMFE zb#;9vLBV<>_B(@y4dHc2_{*IgC;h-qv)2)FqnI7KzPz{MJW?WjzaB9oLmMu5Unt;B zU>yj(bhyjC(IqC`Ts(q7Vmiio@%<>of3cw$`U+NpP~ykVUa643>g2n+-|es&n-dgx4M-QWgrIHIl4*6)_qC% zKC@}Dbua%82)}!pfP!+l8r_5nkD%i+wVi1nozBl9J)MX^7DRzjdYVaf>%y~;NcyOQ z&`iU~iu{#3?yV3+XBoT{X1`a!@#agpGts5!$#Z7L|31j1trK?u%&WezC5wsv#{|m- z$-AyBgo?z-0mO;&RwD6web@qlIb43e|BYF^BF^33+E0t{s^x&s;f$cRD%?!%aQ3pV zB$Qc65lF5vwx)cWDTl^cHwR^9L<$MgHhp zMY@LsGxQmbDzUKC1Di3!=pmp6$KG+?tK6T5e3z^}fG$I* zzj3>to-0k139hrI79!%mNL;ZPOhh+ekA#AQb5^b0UryElfFg7uJOmAf`2V#7|KDdmQ z5us|~F1;qR+6-dG|3gSn`fw!%rh^t#AtV41woc%P?k6LEtECikqyJ~O_;&xtPLzT# zvR1@q0&IEY_K`Gff?y~`cBWo;_MlCA`NP4kR^KPVn$eVnJ29sIr1q~!!wEIj(T?ZV z$`_cJ1r?IQSYV1|N1?_e5?XgiSbiNa{*XT|*Fg7vH_bEKN_>$}v8dCSm!G08@=Tba zCSq^Je9?2JSxM^7V+c#@=M;q3TqAaqJ%kpxK#^u@<-uU6nM8aO?XvU>=@g}~ykz#O zP^Vv$LT#BH{9{(c#)fW$KH0i--Ab)R;xzWBJh)JORio=`O3DMb1aq%MFG|urx6b=z zBXo)ag3VyT&Kct0zohfXMkRk{k} z?FO+9n8+lA_q=|@t3!&1GQhIPR+&f~!-&-&mZ80OoJ9uy?1sadljIk=+m2 zI9MmMM`~~SLp`^}4_-DcHTO<8db(^ZKBMi}J!dY;o+*83va8s&2D5eTtNk`Vej#w5 zuWF*Vnc|v}^DTR_(K*rl6JwlYop!W1BQusIlOmjZi;KTcyEYN0uO+}ev};fXa{G8* zA$e)*r*NUPjehd`(XyAIF%3ta8zK73{bkDPJ8-u|z5(!fI-05H_;QhepQxV?ASFHW z*{D)OXz_u@AYk!fl;+Q7A9YB+EfdpBw4(#$Skf(Kq}-F@qTL52UUfL=YXo{uOn=@N zEktbYv5M5?1T#aWwhSmJbMT-?Av>xEiO+wjOV+dLi{|bl*jAF(;cnr>ru9Gknp0ceB8?#w9L#YC!IQK>+)jUD zGbR~Rw_39EEU2HRihTc=GIQ%slWT3z9V^lN{^_gIgcm($xLjF|_j=Q)a$N&jY*R-J zI^PGT7_-55CRz09N~=q@7bes?3zPg((T!;bW|yFN+O0TbRoAR>TCJvVc2YUOa>%6A z!aG`#p~1k+`Z)XLH6L>%k6$|Jdq{DDam7{f@`F9sJ5{2m7?Z0=VTJAc;DnWKXV@qo z52H1OV4)6-W64`t7?%kAz~oATq1SI2X+sIwx={S1d74hWj<00xV1St6XS)Xvvv`Er zZa+{FG1gLo*N~v}-li0af<<0#b#;sUHG>wDs}$c8+j6sKP5%-4dfeE@#_3Q-8noHB z!mMGU5VAsap3!N8>3!EG-lo5CVZO+F^4E$znTgDx7;9#n!Kyii^^IDRyKIJ6IGA8thpr!K?{~ECta_UZEGE1s=gg+?Zw$uWYeXMY2-4jw$u{x z#d`nl(5%sE6s0QO9wvm7h2Y{#4aUy!GwNa~!$!f%{toz&^%UO~((IzE_SVF5lQ$KN zys52X(JmV&;qymOEO1Hjs}=6{vzi@*HC|G`kdMvdP!N8TG`AjiK`L^L8opUtuU(sg z6SlJAuE#6zPf>mH!trc}>nX#DbjLNlFOk|eL0ms72y)hD!k=|#zJH$Oy<^4M8`bws z^lyR@gjUIv8F5Iek3FF6T=5IEhb%>m(^N9cc?wa}q2s!bVQ96)3i)sXGP-}(Xl*b~ zIka+KE!ZmZJo0NY`PTQmX8eBa61#9ly>V*v#z601)m)THu!)@SIFM;;%h#L~Pntaz z9<8Yg5-Z7w6*2sPx%Xr{oi17gG6lw-ms6;-K6ka$9-M)%z|||n#Cq1C3M%aEVrxR& zl(>;qB8T(JxyyRObnEKY6Aw_aFtp0MnoE>inu~_rLsDEtgh;@QArw)Rcm#Q@>Pp3Y zRk26GDS5(6L1;rLv1M%J;l6B4RatK_OkWR+3n&ff-d5?3lD2WRZqN?RiC-3Y?I(<& zd}r6_1TpX6pYj>7_G1|TQOTxVN9RdIqBqD{(0(UvIH2ws`H7;fpav2abu|ZgK?*nJk3sVj0+r}#*)!{AdVk~_y>tFK{ zU@wWwHsvdnq(*CVt6!*b0l?3zvI=1&z`x$}2w#6#4@uGc$&FKGe!MXseFL z8zXk@!kg>?e=&lW`IKJ8Ro-Ud1gvJRd_-nbbb ze%2#}T|?JU#&%WnkSws31_r!1FO%naBC^likHl`Rd1}?u3{4KQG~4MVX&TN*uB~(d zts0$!VOnQu72|#6RQjZT??ykV)IgRbI|3Yn$exlNAAvBu+qVP~n#EVN#L6pfdQ&{Y z2{s~)FDM)|F~)hW)+jUhs##`w$JRRhG|I?NnICA(VhJFkFqg$u&I9c4`dq(Mx0~7- zgNEy0k$yZmgT1;m+Fz;B5YI0vk<6p z1mVSkyAi~0J-Z%wqU;xTArmrKZf-bWhz=$$zDwU<}Wk9pVqP_6sY9fbQ0@QI17bfTwJ+D{9YtaGQ*tP>U-)b1Otoo?bJMZc=v0lRXONG{b)^;zp49u3*h$&CD{!Jh*w#yV7Lh(YuDvU*kN3pt`jo;8SFak?lia{;xk`?>uk5F+G9=ArdSN1Q1* z^Hx#kJg*b^Z(d|%SOz;j5|6u&K~LksLrSe+JGu_3Rl6CwWY%aaC<>a=X$u_ffRqL% z^WC(Utm!(3Fi}i~W-?Y}%){-Fv+Ho;D?RvHN5#X{KL@Yj;YB(EiuHaX+3X74S94~_ zK2M9-@|hD=228)9`woW<37ud{F+c(WoJJvBw6CkAHLVdk9= z;vkV(L{-CL*6E`y1ntuM6NnH9mrNZ6a?N#nS&p12gxLdMD5)V?lo<}or^>JRFIB0N zih6C_q+Ul8l|tIkiZM^FG?uptU~tC;N!J@)kb_dzxOjG9GW(@L*=r|YZhsmlvn@So zvnSUd7qAHmVShYW6L!UYKTOw#wW`8d0yC3 zTmaC^#)Pcr;E0G=7<5~itY*+-e7!gwA z0+~NUy(VZy5Is{mk@{;+JjOT)qiKPD{4N%RFMz+p;Ho6-@njSy2Oxy=I&GurH{M(_ z61qj?n7+_6dYt0wbau07I_)tdylkp~4{&d9E&VbJ@5rC%Y(LaXhfNoNUnY(|tiDN# zY8ywNi_?XjVMg*)Fh#ZSDIPX~M*8*A+A^~t`QqtI@i_)T*<_FONR}ao6i`mTUHz64 zt4rL$fLCMrKy&vyzL%feg*p+m*g{LQY4q814PnuwSVg&0B5Nlqm$E%WF_0oS=W+OVH4Z3p`V7n!HD}s^jcMe>Y~GLne`y#7mTLK3~y){$L~+var)g} zf(o~>w`9WewpirobVC)E5F%t6KyF@**pz>z+%cYc8F(42{f=0V*SF)4ERIh1Ayp+B z(N=&(fcK__jx)8U_ypK@f30*Tb^ETdGw2SF+Mgj~v||YOP6z_%CksB<0g~<=Am~OH zv^trD%l8hMlnh5jVO>?Jn2>YBXPIc4XGH<$$0!CDH-_Z0pd#{yE%mn&Y0-!)W)3MvTji8>^j`#Obf*^Cwz zj3jA|0U`s)*Q!N-ew(*A%Y=v7yatxf8TsT#=PSHo3xIxaE|WcCEeiyNX~q{|t%f5p z@VE+C;?!g=8=)>-QO#Z?yd{HyRgYw(C6xQn>;}%8Xkf6xSi9zRb(fcr7pe;WCNvg2 z$@6Hd=f&G|+Yc(^rpV5IQb%RUQ&v#HDPI4XFF8LtyzhoG9MkZ}YFSnQEa}qu#d}Ra z9yu}987kX`8iD{6vHT8U#4)Ju6u0>l3C`Phf5%skZ580uKqkEF`dRvWUCRE)d8eD1 z0tF8z=uihJE+6}^1V_lQjkN44Y`W&Tt<~$fsg6%x@qGJT-^d15k->9Mkl6E3M&~Fr z!a!fu_hvBCybW&l1WF%u4r_spWf5pcbBV&GuuM5;D_R? zhaYkK07_<7`g3p3C)o3nQWTvr?y!;NyIYV1Zlcy4mG>!3LNdDscT%lKaMISFFo(u( zVpv%}@50yK6=ivP@T|EL#dDmjdcs-SQ3k8sd2T5LLJrLDvD-N}A}t~%?SCEfRpxW5 zc8VvSNfQOr8SDb*m~Xv!uHS`d^`WIs?sWxx-ooufMs11Zldp+DSK$@Qj$3s^gB(313&O;-v)` zA~YP!f(-@gMZ68Xjz2NeAea$N)FawSqikJbPu+=jhjGDPkUV*q;~IoZc-*y_F0&|S zOIBP#kw-E@9z;OLnc(EwglsW%GR8gmVR`MnD4*JS(&x%qVo?lZ0;m7Ujc9@1!OdJR z@47P;TIY}LTH5VbBFUQqgOE6tCa|>nMKv9A{)IEJ zSKfuc(tcf4m!>Tawk(<==~tyyVeSl-2S3fIgP*gma0GWc_%gqfU2?2Q$KYF&koPAN zzGa0~c(X!IEwxP~Uw|;lovKfmbqaFXDT5ev+P%bCgY)phfGI-PM#{_HP3#X8kn9$| zGLKfm^Eu*HQE{vkO;4}8o}x$Axla>C!Ifj z8r&B?ZTJy&=X6?xkR5kypd&gDDZ)71CRP3}FB}xzowV<_ z)A_yRHkS+@|J`!)+)5!m7Sp5JvrcyMM7#QcNG;Sl$sM@*mF995FWIpH$0-0gBy7(2 zL4+qTihx$C%SW)*%Z;qG&BJDMFrX%tJ#0!vi~50inWv5$uwWWzcTIPS>N%Ac?V~>v zK8cm{xiNiIX(KeK-rQ&&<-}004}p==JihMP5O!Z{WY|$0-jR?6w(-m?HUdZH6eOhS zb6g-cZK~K=3-cQaDeAh)4p6RfhG)Qc27w{}{Q>SAL$0=*e(c{1B@2AODE1vvptfK| zB-fI}l#*?cG-T*6R*Gv?fMQv3WEB|4d;z_xc&O5^5gr@*Y%bt-kd71~#$5(6XO8N> z)Z=mCAyVndO@bmnm$;-CSMADL;~R0h@?(SZVQTb|Tv-u4fG6N$Ctkc&`%nlqf~@F6 zee1^Fi*HogZjG2B8Ey_L_|8?*34MOm>LGU32TD!zseX+!@M4e+n#uCjy ze0qbAOGhC0dxuSiz8FH&MO>heCJq0!Yi@UW5P&5j#C+Clh6jEwIMHnw87q<2z7#lM zQaohRv{v%25{QSqoYcsNC4(lwa_+MHNt|dp7G&JXBEN$Kqcbtr!VEIn;G9b*f%LrZ&pg6mm*&7+ILu-4b97Fspp zSlWqb1nBEBe_E}<23_w(Aes6@tYOHQ-q9!>t+yg6c=Ib=!i_eL3FxK3oA-g_lzOh>) z8CSg^b=VRN-9S?DXQ&EqmCwvBzjUPwuXD9APxTEK%NlJDB6q2Xm!)du_TbAsTvxo~ z@axH)Sqs{{p$Vmpl|5@ONXlb>xd9i9XPjnYi;=%K9x)L&U#5r}86nMSPf#^l!>G&& zZ6Nrf0B^q-k{kB94z8}m$2z%w9EL<_h6piU9)jjF5Q9v!M$^Ef;v@|RxBZcGi#>0k zWEkr&N1ljsmA@^$_&COP_rJ@Zha?V&O1I)#!-dF*Yi6{3>oT5$0LP4V^j~@TGjjc6 z%0=^#*ePKvmee6in&ILGVG~mO()ULWZU{{YjYl8=t{__>d5?XFD5t+l$g;M(kFFV#i0Kxj9sUsb2g99Ut@5YQEgp zBQ#CPE0LDhKYTQt8k)JQe{Ot^nt~ozys$FLwe(1xa+kWIZ)@L%_U1@$z?*zkE@8$t z2eMrYUnX`o;EgMyS~75t*t7NaE)LibR(Pco!1;q##0AHpuKAnn1%vG2It`w~xwz%> zzNUkYU`VA0tH^mp5IhHS<)oK;s#-No1?Haa6PG$L6Mi6e(TqC>%`T{0dPRNngH7WJ zZV1WeWC1K@aU0Z;Os2B$^6*j%U`$(R0uzF>d6gG%$d-_0&oX}hp(iQ~fv1$PdvKfg z;^-dxQ^XXYxo=f?GDVI^PwBivA)EdK%A)m}jn9xPj0o1j)QP_Q z(?KS+&a>fX>GXvPoA1Y!S{f#Mf>Y~Tis)&RpMKUkoz2qn;f(_sgsTe2p5WW>idkIY zY)fB!u7Pd5zga%}ZfjnIjxq@-E@Oiiof{enB;W{>%=eLHCO_8LLTH3hkdi2gtAt|I zQCj9O6(+aS;IY%hX_J{Dx&E+tj&eD0=^h<~v+TZ4(J%`TA$+oXg4;`4WhDKDIfgsT&QXTk8=iJ zlFQ1Dm!aUuy_{JzuLzWx9nFt!@o7o z4XeLg(G(I~FOCc8+DO$3`ZYXiARe}eq7kz+|EY2Wil((>b#?G4tuR$I>3@wH#dPY~ zxTm&Ny?R1?IuqpN^N~*ZEy+U`_M7$(ol*?(2|feJ zlzezIJhDVo{oCG~nAMY>N;xh6pQV1BZHuYPq>z1LAl+hf`A^DI8M{ZI(L4f{Sl6Vu z+4-BtoEeNZIQ+wmFXydcVO8-v_yircEq!-AS^{tR1QIy{Rkp^?Sbhx(-J)J_!mff9 zr(37UeBqg;&pD@dgeU)^*>t)1Be5t)4FL7b8?) z{`c3{Aw+qo%$xIh6Hhu7#hc;g5RyDK}; zw44QVn-`K(XlL`=e5CyLIO;7rT-_YjxBa-Z?Ri1_W(KpOnAnS4w#sALkV12ZJLxHXpXp}lzBP%7u#q~bmuurnL5BK3U_B+ngq_y!YLAkcm3vR=jOuc_kJ4bT ztMliRdDf0w_(BipCq3}&?eEIjQxshVB1#o#%?D(zc-4p^V@#5cu3GhzEn_e>I>D|4 zjd$NG|3vS&dY1I@X6HpmUX7z#Kgm$fX~DngOOSmOSR$v=m6Ocr3)}(J%kC3-wUq^~ zg{#@U@;+2X!arO_@0_Om+f9$%Z zXK)L5;p)F{Ok}bm56KknvTq+`j*VxR_C1<*Q!Z_myVKHeK9XGoju@}eifLy=s!l?x zoL#VVZYA7wK?Gmo3qb-a8Yxe7&QvMowa$pz^lMBpd@XNy1(qE+jvq&#)F6#vssyj z_7>!I+q>dU2?!S2AGF*oZRN_&xAyZud2q@-d_SB*9TMf)?!nMjjuo6(J6v-nE0p~( z$QrUM$v#jjv*YShAk;tf?mx5jnf}?I^naJG&%ycc>G~`zO#gec)&EG>XJuvO`j2$| zto3<%xIVJh^PQ`!D}oJ|+Ft#@j*uPyoj#)04v3bWE2ONQEBn>E?5>&TZGn2!2LJ>$aAHTw~ z%xG}IaBRRe5J)>fQIeC>gL_?FBXE2EV}`_UDGQ-@cw}VYrG!UtlHkPJ$QA~KGCiQm z|H%$}O6C&i4B?1fu=dzDI1jxMA`t&oLw#d&GhOCVCk;QVD(Dmo_=;WMsuwa3c6Jrs z9QdWcz^}fH@1|xt4Y>>mXNjHOcWe=#w(qKVTCg{>cX}8P%F+Ga9$W(`8`y3P_{=c^ z2wehj!eBJi1BV6dI|I=>Jo%Js@9X)EJPPqp(oojc7T8rlHM#&{3|`w<2MVHGh@Rd@ zpfB!6HM)l=qQrw4%HgZTp@hq$)W8uaK*It4BeD!wdK>4XgAdcm3|FESwjnZllaQ|d z1Nq2iToC{Z4e99#fVv2Hk?>AnpBB7tsq?J=aH%I0Lbw84_xKmVkM`filGAGAYp@Ej z|J^9SHP$1uB^vfUTIr_`gh@j~gGdMobOHwCk*3w~fvh{Qfc#RDaEW-U1=+n7gAfBW z(L(Om&7RWRLMUp7sl)*Qy10OOc>HSF>p>>Q#qI4M#)D%7)m-&M`VRjhhtT{A-bUD7 z9R|&tc^l?q2RbLr{pQ8)o}1_gHa>ohea$_d$S>8!(m z%KHpGfT+~2JhIcX{+?0(UO4=Y-SriH?TP>J#U#D7GBF_q z>9d2)JGg0v$osY_hySp$NcO{EuYUU0qJd=h)&gUSOWyPyHs2?<*#~ZvZvxKL`W{N@ zImqy}PS1viDm~jVydP--(f8oV_%8C)qPvf^QwyEICHqze-ey1UQTpjWiBJ2b&g4pO z>sQv$P)4!0PSit0@5iyePOk~X_eIL&7mR%XntB>ufLnu`k-z21IM<8}=o< z0fceui-hPWwnsP!sUQD_xMu)bJNgmm224BtE!1hp-QwCCpELJPP7I`hp}aaxB`TpJUqh$l_w@Q2m++BSqM zC8+?-^MaYg+f>9vo~kfdRL;-+*HFVI??EV0X`K8H{ux<*O@G4Q8AQILN+vvQclo79 zhFfjV<8FVRl4)aurY=h_RUK&ne96*GVpihVU>MW#hiN90d+`GS>0!I4Ow+vbX4Q7Z zO!dQhl7~HvfHwj$Cp5Qno7$0$8IP(Ii`ow7J*B<#U1Sj*qbO-b&&X?vvHfXJO!UeJ z1~*n%j5yOSGihA|VWTZGxthjTBxuWvGFM!U3;p$JS+Jm4q$eX=3S~?VLu{VJ3VpR^ z90|jK3NC(-@Mj*{A+>ee#&%kp#PF1tVoN<>mTQ+PM9_@(PQvpn)zFalblZUht*a26 z(3WY_YlFX zGhZt!Jvtfr;z&d(v{(6$nYs@j&BpO@Sx)Xto$d*<+q)P2J z0nf1ru(gXlIXOnouUBv6#Wl1*1A5VuVN9NbGpv1R0GEzBVeHg*`C=Ft$k1w;_*RRGzD1T zKP@YIb7N@7nUyJ^w!8@WPzVgA>{B4DYRTD8KR}DALzNLUpiq6;xZ&T1CxWmdcrD3T zCJpVPgFdi=cw4~&+D9@W*<(6z;)&(0^3C$^?OkPEQy6!d6N=oh#5}gfv$ZqMtt#K! zJUENw4z2P6Gj>+C)R|G{3&ZsS^&y~ft?CfH&MNfJ{XZ>q_CNbunuyLDnl9oV?*f5d z-rmZ~o#TnTP~)X?3Ptcp_v5XxJ_lo@bP-jAWSceiq=;@_vS|lV&OCvy{v46(nSrY{ zgT{=o%0NS5WwK^*YuLL_9W-7?QxJ@8MCNiqC-=24SojG^P0*LXl;EcOwBJ@~OW7`3 z*c_s|epkDb0-tj`oCYU+bSb9LTdn@6%;b3H-dg4g>>%jnIi@Z7&FYBY^;@g71{+7I z9cGdS#;qsxq#et_Bm})vW`OKr(1dv%Su%_-)k1+7&Ai`ZMMb@&DS@W-P??JAk^qrl zJR=Rop5uY%7QxJ~KqUSbDA>&l49WeU3uRTkDe5l%&GFcs66SX97pmmC=RNnTw&4;} z-;CX2Ijf@YukM<6@6!XC?by0}m-sE-;2O>&XE%WnJ74T94HN%~GO>sj!(q=F0-;)* z(5p-64x6M^jByAQnbTJzDQXU<{e1AX3xdM;PFdp!3?+1}AcRn#b!$ypg}^=G{6#gy z5#!2Ropvq+-RGo{XVex@VCAku0rJq=P2Z|dLk?%kQw>OEEP=>Tjc-wx%^Bntf%uT2 z!x}RYf4v{|ehdT;2i@>z+M1w9QaubXeM(>sWqPejm@N3RYAUAVZky=0pTnp)h2h)9 z`0loep7p9erJ46zs(nCtg|$Ld?ddaXMguEg z+S5QpzV#Dap_VffYjb)H-8T~rmSu~mhjIqw^2x+Y=&MKTY19Lp`hzuyCv z^NBeiyH&N)0&8bxZy0Iwxa2sxL4u2q}^AblbU+?ei0Q zA?@`ocT4iCZX2*$F@_@(G#A$8)ag zQ~uSYj#l?bVez*f0>hQ`t>k#;aS)aN9F+D!-Nu1vkB7GN-#O~bfC;wuX!cTn%it0` z#y^>Vpf@#(3&8TEH#1H`|mN@^uG&O;sIc$m!mMHERsg%=I znAoi`w%k3W&cp-bJO>498iQei*@P!b*zr(`?^qnC@sxU<%|S=k7dACioK0vlWKzWH z5?OE3rpac0S>9i~?ItTmL$z2B!6{C*oENFgsc-F+?s!>H%B zrq1$+6{nvE*`o#cs2xNjDaL6bU6oR=!<7>c4vLbMnmr0+5wz-wzCjz0UW30$;t;57 z6#WRx{p+2|IWy~!h{;0PuhCNX``AptM>ZqD|n_g@Ddt{Pcugl2st! zYErK?@#KSPC}{`IpMgCvHg6R3jLMn5CanRg(!c5zUhy<@*IlK}lQWOQQg(meCqm=y z&~@&bA%yuz^k|$@8@i&MeMv{aS}vwgr0IK|gQls6D{iAb2ugb^|FR%g$b)9^(8jNQ zc##dz$$ti*p;-`0elQ(!759(U0Y#-X-i{fKzdJUUxqP-l4E)}4L}>Y>6Q+~O)8*`a zfLxgv?n#Mf1Y6=BYuYu#zbG11kw#0P|0{2Ovz09b|58dFZb_1Jv~S+2e4Kg0y%6cT z_X#X!+@K=&@H_4<_L-~uZrL}VH~IA3w<4~fs5W7EWG6PfS&_GNm350Y&A6Vxzk7=D%#|EUc?BkqS=$-mMUDXL>vt3#VRLtMV4Gjt6n$X z&lbDGrUbU7iGE6!4tyF~u3)iD(L!-Iq&~ppEuO`_;6g9vgT2&!Gr+sz1oYXeL=TUL zCbC0!=BwbvD;{ z@bIkY#%-b%7r+!>^#Jc|VVWw_h0WqQYvG*zW45ZK!9SWWOyMztgG1uJNun^E*>jzT zY!QC)GC8~pwfJ&1WhRQhG_%?%(D!2>D}zG)1apuk^Tg^zkoDD_H^{inCWpGFrs?X$ zfn2+;Nb$p%b2FkVr;XIm`9)99?|CKvTu~!9(l$i5W(IemZd$mj$9)#RKS4qwPTb=# zbCA*U^MVyCE8(i)ARvxy8e5=BS4oMMES&_#o?^s&u^br~%a_lXDSyL|;FI~Yc;)_v zpC0C!goQyf2G-3TwH+wohq)H+r2^wO3DsqY?vP~>6z22x)p+Cis}u)UY=MVK|7+dL zW4QQ8&Q1L$!t_u0ohujD0z^ka&PAy2X};R6;^fU0?$8O`1QMcH0@K#G`t2(QaZxWQ zUTqCuHnL<@=4B{|=1!UCAF#1dQ-hE1kOkigB+XR(v_ZjzVWLyGo&&8e@6GN{A}c{2 z-AY)Gkrb8`58Ihy?-8{Fis@N`9jae%wVOczXM}luVGq@7zp9ZiAoP`*^6Oe z=wp&^{xS3MXd#?m{abZ@Q^i=EJvL|kP45C-r1JF-fvxbj!1nU(Sb+DK zq5t198zv?bc}RSM!yOM}Q&(oShE0pH76`rJ}&08$m5 zjKd8`KqTICb0VShLkVpii0pBBg00&1E7W}1%3_3VEy=1*fYAjFSz9Tuk}MWDmdIntZ{B(lI;HGj-Ci-Hien~7cA?6mnvd%*XQjRM;w>$Fn_v_M zW;qisPJXg22doOBFDO11EFnwpJp{`7zY5dq!M~|e?<6faNtyfuC{UskB5lY^rkLOs zaRRkU{cz4*W{0bXxyo9nghr|^1UE^wQ1PSVXNb#=4wQNc)) zqz}mW+P0fU$gsE_F7`vHBs46Ju4}Wu7|X_-&a&()izdHk>=W_$dz^9yN*_`b1TFWF16WpP3v_jP_3MpYdEZSTklkh&#?W zc^1qmLazP|=PtJIM)VC_s)C-WX4Fyslzw1Ci<(&UGDvlt1E#f~7+o^V-+R5Y9^HYA zf{R8^Fy)5KFZF zq%G^2smR)4P|a9yK0oWve(!YOpKDQM2K~8c0p`ly71|BXM)R`|;mENh|D>pDFCKCQ ztz9!edAntK%A-jWnj|>gtQptk}5OVoG*Fi|mHW2y+F;leFzM%oE{%Z6Q? zv!y&rXP*$Yy`dwe@G*FSyA^o~wv~cy*ETFkb9u@!GOv8?A!)2PuB zb#BywzAdfitGAOb^fTT{MO53F3?W7faAv#hbT3bG? zuNatI-lZ|4Ii4!mKPpAGCKwZS+y8Rq6V3!;;kq=-!sbtz-OZmitd^a)$Y+}5IXoFu z>p>My`yHFgifj+J0lhan`71NjW#MpL25vZJ3D%qDwx8c3nF?#cNksDnr&^Jx4<-1I zHUi$?AaVDII)1+{VP3Fvv2-mao&!cY7eO% zBYR{8Y7JGf z)_Qmu1VoucDqe!9iE{Tq_aHMToF`NORbf0FCCf!gY$7-_;gGoFI&fyN3Sw&m9 zUVawZ(6pE%9HOB-IOx|*8EN0J#Hpo*Wu18E1=!+O<-E&{UtTLoW3&jw zv^-XN76Quasj~80o5ordwA$}3yl&?!cj)#<>QaintK=OeBH$}-jq-yWh6rtXM)KD7 z_!CHXA3Bn{-)bs)fawsA7i=L}7Lg6~SQ{}Ei*4{=ULbRAqRf!;!yxkFKot-!+gai-|x2K z?yp?{8_kBj58@D=&oxoE7c%y36_GGl6#S zx6Y_$s_BS)4LPRo>@?PtP?^+f^+PoW*^G$ z=$yM3u4-y;vkxbiIk&`f{-m%9%+TTb5}oIlm>@cZVpqmk1{C^!Y z1Tt7^pBV!(LUTY0BB$Q->j{%-0g$w%4ajvtFDIOZ+d*Z6JQoE(Nxwf;ZGv8MRVEan z8j>=x4D|f#p@wg7K~^GGEW3tAZmi_iX-^>;>)hHDl4e~L!cdgdJB3{W8{xAvRWRho z$Zb-BtYp2j{k##>!2TiH;Kbmle4NA1SI4?w<+>I>775Xv%pDG1(E2H1CDcFf_B`t7>*tM3S@^BV!%o9SDWLOhfED_eIVA0gtx*?ptgs`mN3mW-_~ znGZhU@v}!CHN6jzWr!70D*bUue#!S%g#oo! zWh!+bFw}?4b+QA>7W&-At-O04x`(S{!bRmWcNC!V#Zb{LD;{R3tENG{3fV7RZ-#x) zcAb&fftIA$YQ3pAX??rIufz%v^t2_BSgE3SAG5N3u}m16jgs}Ar68BH-2}52yI7|w z3(VW7cdZJAKKrt&-_jc^Qpv ztZl>v@OpG;3fbExV(7k?;FMWSaJqq!-{1U4k=X4KDugTUk1SE^NO?CK5;OEJDf8;%&DaG+-Qpy1&HJ_B}91H$(k-*S>z;A39vfQF4^1JY=)GMj?w zaK*U8;ob0M?_5I3Q6P(RaMyW66U9bkwZ@b7<66K7NtW3c)g*7GmSG?b%49#152)6t zZV*#Ba107IoRs~lrz$=TvGz@qWP%6A!3ZK=XglBs8?JCj0-F^K~*%Q-{-MEU6;ac$Q-F1Gc`=^zUK2U*a9yp=Au=3 z=(tApZ?%==(KHxM?UH0SvAU2QHrk&W66#d@T~i=>z8}eV+ujZs=szW*P(GFTzYuwh zy|j=21opc7f=)LX38)|?hKESU4k3_wgMjTd*;}A?y|uIS2ZHPs+$Gwn8P_2l{C@>8 z$7JtLA1Y-^;{SR9E`vL&IHJA+0EmOWlrNPBt@KM!A0IXiEtT?a-=0TY>lsz}nIUQ6 zlKfVWXRa+b93o(y)0a@##1TJx{)kZ*9fX%hggqr^=3WG!i*so2C5*L%0Ui=lfAOeK zD_0AT3%w#!r^}$QA>=P1SA<8ZDYd(T`e)UWN^KvNF2xYhB$1wXQaHn3yLvN~BOpx| zwH{aMfzGnr7OpO(3Qm_8@#FC#2qFH}k_R|7rzEH~Kq!~Gs~Gfln816#v6l@8{Mc9R zEY7H0YOjIcK=XWvL{E1P+_@gRkIco)r6HtVDAMzfVO~qFt0$CCEynjRGrmffjvbop zFwvS_W?RErHf(*~6omO?Wz{Y<%oE(BKX{pz_LAKIZklN|Iw?F&#}X*K8PAGnh8K?+g+apBaT3Dsl$%s1YVK{N6U% zhuW2)qM8lN2T;e}whkl~6PnO4KZD-ow?CRy>wxaq*LsRU$Nzo42?!xR8U=X+{@qob zoM78k-9v*)+Cm=PdcTh8T5xHye5755?crtTnr;ekMczH>vPT>#CNX!gVp5nAtB0le z9;}VmA(JAJj9S%!?$Z2o_HalybJw}l+`A7Odee{-RyXEt7 z;aR6cc<0^x!h2ClLnc)<8l{w*w|z;`Thop_TnLqbBWesKS3~!CZGlkUejb||&dZD; zwm&=5vUa3=ymsR=(ns7}NI z`eJrZJD-DE?qY_d9$As*Y5aLv_1v&%iBj1)Ys($ttzm-wj7fB!vB$I`2e+I%SZbky z2}aFMfThCk42vy-u}e)`6Eckz>cqz+RH%W8ya+#{tXPcR zfn><2;2jBhznMN0Mc}~!`U_!okX~@MNEMC1q_#>u`fT$_O7VAMb$uTiK7V41RCekP zoAXrOf_ck)I=uwm-GniNfBe})4=6Ophs)wS_)Mvk0A`#ki{r_tt@-aWjT8c*d_)r>DokuNA*X+J@Bstv?g+u zcnPEB4c?%fp1TiXTmTMNoR!ZwD^1?D%BWOY5&!4{m|MO3KVopa<31F-$G6D!7Uq$N z|Ik(iMK+me`|s5PQY5rwNTyDIZGBL*dAZ9(6P;(mdfQugh|7B{Rhj0UB$lilEJT93 z>sHuxOqi4)VjJ|T(qeUGh|Gi)qhsQ+1S5hsYh%Sgj}Whb*-iog8aa^4bQF(5NHl0iIoJaAY!+d15jf!oCp7d2 z`$&{$dFiW2T&jNWC&`@*+z=r>q$QYXzd+Y`FJA}?0(NmE!T&h-l|+&CzF+gq_wf+@ zxiGa9hG{Es%h_gewTy|dL=wUt$42S8PBIXt-enE$PG}0MeBFwq)_h)~P+Ff%OmJB? zwS!sQg)f4Ym1u1meZOn8fXV|xA;Rd5t=RYlR6{srC4133Mu}iZLgLR4=O2)g=n;0O zB|V1i<-=FW_mnM;OkwGwy|Ec%?usGxMUTLVv%G5a8}Q@Le>~7z3O_Jz#iequsmgi= zkUOdbj{GnU=%M9Fgu~iDCvo`HEsd?1_AgyN;R*8av!&6CNS)I|&>)`GcINY13gKc`Dbj=w*AhJZ7sK7pRl;J&vd#jR zBPe{9_>OWfCbPTB+`czs`c7dK>jiBAPH!l>$7*qE|sXOX=LbcIxrKW#B=6z*GvD z1Alj-s>ZpodxA>t)Td#j%nr|QU^4iagsz|{QuZ*fK3dR6a)R>vzISyorI z0(?u0Fx5-wkh*(LzXJ8~tgOsXWiXgglnh2nn_&#|l4nI~dhg=fquRb9 zsQ(RSUHRF8?c#xKq>iu8dt=Oe-?6%3-m{{IWCz(N6BD5Y!)?AAd;OD4=6S1^PBtg&{v{izmA4~LaEiY}25L7hBSjsWl+JEXZBg{*WY0_9-^KvE_>8u_1 zlv;*O=+(4 zLRgWDci7h3M_W(#KQdgJR{d@uBt)Kyu(ABz;Q^(?{RvV+aX8liI=FKb3;(O9!w^wP z1_>}r`LIvGA6cYa^MC@+Pw(4v&{`(AnIaVEmQwAK3=Na`rs(9%SI(kv_i;s8bDV7LY;4MyO5J( zQqaP#6Tk?n%%d-{mZtDcvei~0VN>6t30FigvqO?K8zZNTCMa-a-#4jukpu@nH907W z3p6&ZG05=Fc%Z-{)XA=lB>VF5*?viz$si|O$McBkvDVogc5c+JFMhmbM$UE6R&JwIl?lR-C^X67eqiv_3K@^ z^*MnSapd$X9@{e)$K+H=kOMsCi~EV`Alz8Q_l2YKOzJ7hZA;!i0@>;^*N37-1nw%0 z01>=UBib`J>PBABSz4D(*G3kq_#E&hs9;vkL>C-_hB%Hz0ED zXJrfUyrD3?A~85nqyCes`)5NJckyhVMr=_93VgKeVON;B2ZcxY`Kyv%{Sh}olyO=e zQzj}UNwKlp%abFG$GC|!qBg~JCi_;^c;#NuR(RSh(t2A7>Sj{jspzO*j%fu>Y-Y`Wh=gQhMUokv^D45u$$ z_zu0gp^&fag+eA*I1}a#t$N(IoCenhHpb@SuVU(*3=)pssrj<6YdW7(6CGkEi7?)_ z(0Wc;%K`~JN>>R9?{;S8!oG*Fou8FaA*KHOWV;4Fs{z+&N}lCQ7qgEy>M`e#sW|Kt zgr(2?=MSpijYcRX-4_MxIK7{V;Q4Bo8^T;ZVQTHI(m?@)lJ@(R=I#sx;mSH+<(^kx zFUQoX-UP+ROtn2*QACYa(JdWyNhHX~JMA#W;OH%>lrS~x$qz_B0$&ptHo2WZa20U^ z!u=8Ilg?2;BZ^^83FqmAatG_?Wk1UB(SyOKJ1R9T>h>?3L6|=W^_)V)yHN1(Cz+A> z!6>(O>2+D6bu* zcL0Z*oSWv03x+MqJ|!sK=yA7TTbtzc#tH-_bR}aHsl486BiwE`al^dF!POz!g;JU}6Pc6%KRA`XQpt>)bjLQh+w_8{G|&AOu3fFlxL&*AgO!d^ zE6zo8>#qn7u0YvSKG&}Cw!aKRbi2My`+hT+9~?8_&_))8RbClS`m_|yVGVqHU2Pi5 z-u+^xs2=g}glG4WLY)MJ62&Y=!Y=p3++(rl0XQ#0y#=afp&9qSvyYE)(;QEBsA=La zGlG^8OcV=>F%VoLrRlo2uC%1mUVGSJC8rM9t#|kE*J(3Pf@Mv*r*Dq0;0(d7=t3$P zvByaonH=Eo;oFN?^llQfy0bhCecB+o(Xq?0h?kru4bY7nIAD(zh932HhpD0?^m8{j z3TC0>HdDueIMNmt7?&CCSBrDhrj^!SjAnDuG*w?o!q}yHVMd(fzc^KA3BHkA_CwYoRNg#`1 zwPVd18?02JK_RJ9XD6hMTJkD+&=FS)d7m9tR(29~EozNG9mONBIVjWdLHsI-X}r_f z*(2q9Fp0f`3q0=bB-(qK)?wwqqpDcHAsRJ8T@J z&DkWOg`&qs=T$z1AeL-Cvh@{c0$Y2e zm=S+)TM8&mz809 z3*)qirgX8*ytY85_$KijMAij*sh|zv1sb8nJ9K2_n{thW=Jiwl=X8lZt$0od>l9*m598C?8xtdiwq;= z)KQYOU(s-Xad(paWB_@_Ihb%R4lwn{_C8bvBXQH_8YNZlZ z=Ah9JLacVrs>Zx9|7hM6Nq-|D{KnA9{ew#pw^HILMy;4AWX$!VVsSzgJYAAA<}!P| zM!@YPDxZz}a|n5Xu6EMe$z}6uD2;AvRhzBF5T9K?P*=Y0BYPq zIG2LniJiY?m2Dx*X?d1W(N6Ar<5d_4#4|9MN)R=TTX2g3VQ7I#aMNfo{%9mcg(!zUwxWI=Hu3b! z24|XNk^`*GA{&Ut@i9b;VXTLM(%=G{8U@ zX^l_nJiXeE9XUAZ;IT3x!_d#oB1s8gtBewQ4~4H{*{PPs%VeKDPMhAKJ#Sep!aKIY zw5~NP{*}F_8OK~IQth;igv*Lr9Drvo8G=-{;B; zz*wl&Xp)GQSttyM1hc+a5s*ehf#{KBd##T@Z){sh~F)DL1fr=eOejob*7? zU2Pho(bpmLryV4|R@tcV=W2}Fn%fol!Pd<;RNZl?eWZ`h?A>>liPbL#&{`;)>CaY7 zg_oPErL?_dK}Rwb`+t*BAN!jgd*2vHX`EW21y(i*ch!{LauV^O^vWz~Z+|8$P;Eu>Ffd?OqZBdR3-N?nb%bB|Jq#cC_}b?$*wrZvv!&s;$+1Mzg6sl&ZiKub z;hvF4VxEUSU7;u1wHS0dDC2h4cZlJArh(ItqR&Q_F#=mJGYidfMBBe{HIjA-#v4I+ zw$$2!B%gB-fa~bBJc!$(&dYA|Yqhk=qtbkZTA}jwz^@mNuS9?J6%gH+UPYuQs_oxW3*NqT zYZ=%1q_8zpj#bxL=B`|^&580d_~L`IQsr}FUJj#NS*QdZprK42Omg4)$NQH8@xLEG z>mUjof8Zz?oS$H{Rp8!Al5Lhz2-V!e#eMn~U}Mk>?s#qg6l1=v1E3O=+#I9*l{&JK zLvc-IfzdLBlcg-9*kFiiiG2?QmqJJtxt=kXHO`YNc5oAAUB!5^F*|bUCtzS0i&-kU zw=DMcI|O+DX7v7suAc7RS21h1i78QsmCnnQZZ-#C`&Z(qfWlWW9(=bV)I49`5f*9z zZ9y6-CrhNv-#MY(R>OJ8x@&5krd;INPApDT$kPYDGPk=1@3=BYVDu}nI zM{o|#f`ymgKM?x>4nQCyBO~sz?OqK)Ob&5K zA4a$hcme7R4lEBw0Rj?AC{KvT;8o8Bpn|JkUJE+!0zC8!Wc35O5BR--<4=&c-?#V^ z@tXnx^o0up60l!u%byMpatdMZ&p!a5gG$R=%$^7i5WMjR1XKX=j|%S`GMGQWm7@P{ zH2?tB!v_F?^&sy9fxs;s8)+cSpWkn0{Er&eGAqU4HPT*9jR7X2|7#@=X&4tq@TLp? zFK@^tV4z3e*RKwTfWa$F&uHDN{Q-N3K#sfrQqqrs0(rO3CRU$>UzlH8Sy>nXz!wO> zD=7Qj50&B30r=1Lm#xSY;@&B!3lK-%$RC&hPJbCZ7a!;x5{zJnrytPw@8bOoN=_a@ zA0Chp&{bay0_JXSL&3CuNY85dqf4k8Fcpa66A}o-x99i8IK(iW2M6rsgZ=vr@ky)v z3M1p(@elJuPZkukoxML!O&q76nvMzp0R;sGG&~{#0O&VE3?AZRY0Pi9atLQ1;Gu6* z5R`U}gT8$!I=zZ(@XAO*|#Fpffu z8S3|Vx%~q@R|rASL)g}}AOZndzlKx#t_S)a0YZj)JRHCem!bSZ5Rl)gFeJ=zP?snI z`Q`5$pdyy%xz0%jgzew7^=uaBFlRgetGV% z?jIUA#x*S10qyjFK;pq{-5?kJ)$v8QdtbCcO||P7Nd~-d6u|~B_nXt=;H+wnZ=t@G zo)FKcv|TTeqvdQ~?A5;G_SZw!cKiuU3lK@>V}Iu$qTRv$+Y)=MzNpu~nxo+LrgWFY zA!8pfA}uLtsaNHtjw^@;-GiAly(qg*#4NPKE&w(D3l2slFkB{G-aaq%p9&7-DJsX| zZ$<~8iY}AVu!PQsVc1V4@xc&5Gn^T>JeR59>IAvG-BUO?Ze(7{#p^26nh262!nYOu zNqia1!UFNx6?1|72FmbkYSm(L#--$T(MDRM^~nEV0wvb9I3Cigx#O||Oh*~^d+kOM zARQBV&R&_^Ck+LvucSQX-=N%6yM=6%Q~y zbsxi;pq_Q?5F8A0>o2E-j|FyK4h@BI3LUHu%kxJ_U^!m$g3a=P^MfW8F2CrMWNBG! z!O(H{CF7@m*xgnXpm@s0kMjD4>ebe4!frY7zbQcA5ohs44YgO>KUv49&hZZ~BxE`h z1>>GmaQ8S^Geh4)>39JF-cnt$w#wp=?DuaHr@{yZg#yG0aN-)7W@%h)rwE9TeN00^ zrnDfa(6p$P!n<6luvW=3J}5K@?M&{@qT;?ZaqT`|xxTkAohuTqH)3Bx9IKf?TWInv zCR|t8;~^69C)>rB)W8qt@r3nl$ACe(BLeKk$Nn);xCcI@UYlH2T(tBFc3~}kqeais zpj8{)Zlm)?4C(wiZ=Q~|wUON3i<(tumsk&BQ}#kC#$g;?pc+eA6brR)7I8!)YYvFz zOKz(XhdZCMdGA+Tz4=GM#{<#utQXa!*@CUONpdJj07atL4+w{Y7pxn7HU~cGZ}TJq z(m=a7aY+9pnF2v423{8OFw<_>^6FC}lZ9H+sijr*#87ILwxTEAPdVe8m*;ORq27Gx z=tTJxF+T2>U_Red_9axGD`NP&3O9(=qI{6ZG*$n_#1dV&Y zs*C!lvII)Y(#Mk?R;xr<=_s3!I_RPL>(H$`JF?8RHD(vwxGIN#(0 zNPKPjs!QK#?Ux=7o-E5r{lD6>0K-pjNYmQhL}WbJgXnBnxnG16b2^1IXiAgr9AIZ9 z-5SbLY^_jI0U}r*{(;wmpM%RDFbN0jPayCehj9% zTN>?i|NT>uISe+|-;8k^RiRbc7Hr$5YN{JkRV+8+c?FI}EtC+<9c$zYq>7r7A2Hc% zG==JHtSGbBcGxq@4ZpdelmA3S3e(Zj~!Y!U-D^L~~n7}#`X zAH;yH02wmtm1#f{+YhNcMAa$6cQhCv8$ZPEBemiR15NK(~AH zw8zIGIO?l$zi8-IHDP*kJSomX4Zl;g;E7Vd2J=LpUg6H1Uxm?iE-HWVRUMy315%#g z7jWl2?@!95RrGe|_sty=p$vHh8gz^Yx41d1F!A=WK4I)4TK)u-gwWnjT|}Tx5Sx1* zib5C0&~ksn`Ymo|vYN3aEu{VG6AVy(TD}Yn8V^yKh^G-c6&N3?ezt_MW*Wfcc8^P> zmIty-C?=3wQ%;%VmXT36_!cH{7+w|zPDkQfMYB`%(|cOltJmt21$|Naum#`G%78-r zy9LA6)wIk0;J#9m$X7y)bUh!AeAn&pNrvaQ`l_!~lE&k zSkzBwSYd8I;FzPh4ZIUdU9@ZlX`;qI57uuvPh?9Gg{BQAo{T zUB>^e7>=^2;x&?H@ogUFw~IQllFAykS@W7bs0BFzf6^j-y{l0)5SYc~X2QO4b6{`nc7$CADkf8BcfL03Kz>O&Llqx- zJTN5k+K_uOMM*ctF*r3q{6VMVER{K}ds8_rX+;6TVrtdFO8u-?gL&mZGb67KIn{^& zoj<9wd(|G|nm-(kXf61yHMq}c^FoEUcn>KpWr!m&BV9MmxjZ~$n3yTW9;+)S+`ljPbU%!#|TQGS&ru$*fvM7x$Ke-^Z zlctOBA$~OV*HGXw;HFtMN3`8XD|!KUv@fJvcvl59`DHuP(2N$UR#WyURHd-;Zvoa$ z&Eiz+gne;y*O{7C5y6j-sh-D!fcZbPksV<5m*Kvh|216w zxg^-`RP)e#%8}9LWWa?_2fw;b+Loigvt4EM&HI8=p14}j3mR%aF4BZY597P)u{c7< zYO&iA2zt7MeB4J8O^pEkpT~seaX-y_*^ZGLe_&RkPC4|5ECjl?mSDk@c?H^yYjaxb zbnT^^gHQNnL~6u(pClFmoT>X6n|HY5*a1>A0U`8pYFP9%>g9Iz&?0RJG`G8}tJjGX zFBEVDqFR0IeNY7l|(yktbr5!bhf zj){6(l#;szZHb1`p{&vqs5TByIFBJj`Kb@g_sU`)W5C^Kv?MA4_Z1(4HSZs_YzD&H%XdpOho>yMi->@PVT& z-DKIn*A)xLo8WYd*FyRXmZmQ{_3YjxGxg))5Zr9JZL4(FOKIYZDM`&mEIq>Ibsp}G zR@K1dJ@r&P34-EB-d3m=Cfp=?Y%!gWvwRco{!cnXq~0qA6P&ImVS1ANL(fgUPc}O2 z7q3z^;WfQFcq`}xT3_wabcFf`(iG^pfUN}?-t@BAvzX>Sr<8bzh(r5w_1QL!@8qHA z%~G?c=V1Zwt#7ImXXhcCA|Me=@u2OMS9NDzQ%`z^USwJs_yXN~r~A?Ds;!9tQIu|l z+FD;);^Wm*b1`2|@pfyOJj4HblNOF?;!ni2JIZsICN!|_DpF|jEve_`u54>y6T$-JwJ=cSAr!Z8qSv&~d73Tszd2C-O z=dus@1I{-*&vZV|ZbwW~C-QdPH+@I%tSkQztvGTzR6pjWIPbtvd&ugJVt@4BiLB*psA@HQfsoO zI0FnQGJdc~!bF=zeP&jfuIXEtc;&yZ3$q*|UHf96UCjy-xnPCWk+*47FwTc=JHz59 znRek2-HaM9MCq6Aw7**ibSsmJHr?Aj*_A>OeDb}oTbfjeDX*2+rA3S8)m#NE_|!Ge zm=0yraHIVRckdumayY_Zo(73)Ysz+pjOb2y2h;9b)}@-`{P+J;$6j;;g-PpLTBWKE z^}p#lNg{Q=&U_~+QHV)>1gQaXjI|dtSA}c_~B|o z)LMpdBhyPU9m$Kz_=2Bi^E!6IF4uVFR{klWbEe#M0UL z%uXZ}J@vbcKkpjsqO@6&zt_D)uQTra__6xwV3EUR}~Y}8C@=$O`o9a3st+f z<#1S2DLbgmb@K5j>vA*;%hGaQS}u-V_33vQdXUP(txL8ivIW_j zrTU`9q)2h`$MnXrS#^=+`Ms@fjt0`1LN7qv=XC3QR{`c%4fCG zAi(O5e&+s8q(scQL3T;={E+MLc(SB|ipk@xQu{FVt{B_?AC#R_j3`mtrQ5b`+wRl0 z&C@nc+qP}nwr$(CZBNf6|0Lh!pJe8$kgA)ysqFo{Ydv|Kc$i~aP)qm4 z`9}O!7v_vDStYsJ^&)x*j~LWWWWod1yFZ-?%(P5-caC~wU>FrTQEEDPHFHIu956i5N>v?V+CDs zErT9ha-l}=3hLv#TwW4ZFCG z7mM`F>C+_^Z~J#K#wx)^0pfCBPJTa)#iSZJK|@%O_wbj3k`I}-Z?KzDv4x~N`yobM zS10~4>`JZXm;Bh{^o6GK3$kwS!l&NhwOK60m_ppxx|s;D3#){Ll+)COC0N}mn4z%Q z^F&*?K+Y6B@mJZZD5zaH(MtJswB@k+*C5f2I#NOQP~F=KDcjTFK1i{TV%27qB^X$~OY#&y=TmMQt;7eFE5uPRZk>4=~$#GxUQ~e2I+=UUE20SKf z0d4^UHCS`F2Q^#R=;6M7RQUox!)337$+CcqkCxYz?~I7eiX2ZOk%N*U1hDLyzs);- z*u&Ojl9Sc?oI90oYe1q$tMOfkh-AT9?!KvswL4P+!w9psQ<4ninvT)BCsJ}=-8F|< z?5u|P+*19aLsE6BbZEp`l9Ysk2>N@e2Cc_x1#qz9xHeI^4&_0;8opyPo_XI@Rtf?> zL{M0P(QO0X>>Qq{G~wFR{1xS3Thh>Zj3ZmEZT;l|-W#lX)?i!n(Vf`bCN>Y*^v!E5 zzk-3>g~Y=m4lmLK&%x)37kw-C+L|+(QnG!IPIhWIO{RPPjz)~Q=T%`a=Sh@dxuBKK z=4qp2(p){5(pi4GJIAtws4=iu zNiY_mUMTx!>&Z&a^>V_aBRb@%zootVvO;Ll9x-CcHNCX{VCJLkb?N3O(--AS)ORDY z`nXu_lwIm%1f7yNfuvK<+$Saw)|7#o6A=!#g0Xr?Z4HGwz731fXVJUC@|zV2!%P%a zkA`oHwyNb!8AaCf(dVG9D)S`qfM_hJoGk`qchrNh(?EH^Ha$ zHE@Id=M2+SaxfRTMrxGxgFXiF2zk3qNqH`x5-53TJgr7`G1r(&RJ&^rwml25%~|2A zp1vpP&YkM%ES@FfTE!$16wot{WfFzpLJBfdl7z}6md$F8WaW4NW#*2*Sc_T`x#`bX zZ050U;mn+AzMJH6XTQ33tLa8+rqqanqWy{6;N_TUGfX?UV2)#&@P|aJ)Ui)Pbu+Oc z=rNsOL?Ap#A@N!nQ7%5Aa2GnJYI9x?yT0hP&bT{AoxbTod0!+6;rGTAQ~U$vY=A0H z#&$FKT+m^Us&E6pVCQ-T&6v{8!{&@v@D!crV>}JhuJ^)$aaW=juYvSUIEx{-wDQ*P z{%nlJdSZoEqZ{$hKr8EdGYO$iz|Dp@?ziXaZ|29`J1~c-4pel)m}M4wFgxxW=OjK= z9;dU|Nq4;UQ`-5ThL|M2O)utl-?x4q7c#*XcLbwZ4O@Kb#|lxmd@ApnQ?^r znYN)D69&h&&fE6*G|kd6V0tCYOd|WStseOhiUtn-k|Dwx8|shbH@5{k%^%dOHH9p~ z#KvB?U7T29p^Jzsi2%_~oKCdSak1jF5A~2v1~$(J_b-1%H}DKoTq`0L8KNo~Q1Szk z6WBQb*fdYN#3Hq!w~KEZoc(5{;HDc+(s&g z=8L45hX-V%Fc0qe4ueAT<3)|AlN6g6XV*g!J>qj}s=Bk!w)&MDg9cT|N?(X>QQYb& zHPI_rU&_0}jXDzzbK+t1^_fYnYHEOCixKXbEowQQE50*V;tzI?OxMKZ2m4VE>v4}3 zp1Wbs=(90ZX6v>VbZO(g)aUp<75cM`wfnhk)v=SZm$mlTMcwQ^&(2N*E)n%W2X*DDjvU{rtD< zvi!1tTTXi{?FkK~SLltCpX|6Vj>}BG2z_%deb_%zCY)RZ(CCd={y))A#RMyzv~qKaGTco8^`g$K0t}jzsX<({8Q8t zBM7SUjd7aW&=Xz2bNd^9FV{jUSQr6Y&JpZ?+zH?8FSAwr15T|{)c4;IKc@c=;>YkG z#E*6o%cQ>6esI}TnH zq|&Qv7!*X{aPjeQ@i8c9V3D98<8M&mJYzo#1b7gbQg9%Pf>>J^UnSI|^C;0(Zauq= z&kN*H;3LrR(NS^8Pq^RC>E1aA3J}P}06pAYx>npg1k+qbsBmAyuAex6v2DNzrzBLQ z`^QHR0c{2Xly#jrHGtEwUM_&BT?~$OU^w7!3k+BIeW0HOOo(1c7CU~u@6{*;XLUIU zEVx0;OhkDOq<{!pvHo2%f`1OVb;)${JE+m0z{_922ms%_*trnmhdXEAqCb*>{NHfF z{CO1UY7q0-evRPkguhG8ua0(tN_15K2!VcYK*DJZ6gs?pKmiURD?8tvBq3jyBv?NL z?Es&R0sc7%H5#x$0gfLFg;O=GP09*l{c;L)bPl8lz|V3ySifK)yOC?!N7H&)@kH|Z zmz#b#zreLG3!vj`Dx+}W*0zDB#cx4dvR)snXImP1L3L3@MQtz$7vTP`TsXm=`3%>N zejjcT?=(By*N^smZ9nH$vfXb8ZjNiv+aZ3QU4EK%-0Pc<9^4;oKw)8D1`rhJ#-I)2 zSl=I`k+eN$pXJuQ0z@7DT83y3BK|$ynVz0XyGQ@L*y-&bzn|=y*x>TKuxO^Aoamn` zB?SRDKrhgtKtF#bBqV-BM03d8#6+0cpB!@-g3umgh#zuQX!8)D@y{|e7wMn!)dPA! z*YEHE^w$~f7kQ55em|Zc!nUN^Kn9c-;`<+&ryar{_TZoK`yZk=A9Tl+t+lt7jFXn1 zA6p@9gz0S`K#uwLUjgu}(4LX+r%fs6XN=j?E<<3)gpWs4EkY(jV%#$a6LffJK;Y2N zDq?B}1U<(jfL{>TXWk@!#~FPYCvmJTFbde+#lJogNa&vw_!9b3O=UDvk_(2=o| zoR4ziJc?ENtT0J&AVM>iL2QD^f&_|x+ucMQ0{y!>J!vo~K!ghF+7JjU1$?t`L13R) z)Yovx{zS9#rW}tegS9=KJ-A5u{De4&ggw~r#-Ah2@-8e`+nfpc$xpA>#ORu(heJ>n!JzzABq7Yno&v!odn0^}EQO=2xT#zB;hF z0)xRz2_N*cFqb647Rr<2Sm6OI>~H|1S$K7Zh1MVK#%g(Uz4ov{EQ{Xd&f@7Ut!d#+taEe;Q4Uv- zE!q|n5Tzq?6)j3T{#+|gD)8*fw1)nLnzP@KpX!y!4bR(Y@fg zV9&_vhX7p?jS(7O1aw(i7aYlH*%f@%g+d9nMfA25 z!E|TfeQ>YGQ`=L1mY4_X@eM1zWNq&@TggAj(y)WPl!qXlVwF&#QAVzCe5G^L8&QzcBz%&}&9Qk(1M?T4at0gw5 zZ4|cw5!~7n^X_*B_rdlP255z(ngljGhi+~{agAAcR}WnkJLph-2!FQS3_z0~v4WQb<%)DR{Lg8V|;swB{38N;XUVR0Wm~slF)qIlR ztF51K15N^T%V61F2Cgal109m4b}6~lFhx3yB4=NmJb(~t_&2OXKlYdit~jt4|3MJH zD}y?Qra2HM9-P;J*-!AM)IvY`+xD?}7qY0U4Wt9cCFKUa;jripAHTqI1&qu6qI7Rp z3>6V`XBy_)rouyeTXZ!fUS*9Z8w*#Y0M>0tVz<;eu!pPIVPl4{N;+)d115A5_$|%S zN=uzq=90GNtU`7fV3C3;<$G>A;p?g&3;bSOkPN~)u0eXMVS!H@M<4r1@G6lqA$%OK zi|6H5xTu+fJ-C7mhL2xo$FZ2eA#9vTyrRKqWa5&r4i2U7;-&_PHoV}cdIt5)j& z(8#B_JnB&mVohvZ|AzAf#2BdehRtr{tf6JN!te&t7JNO@E!XrlksZr3UPP7SHm+H} z|8794Tob9@|Jc<5kZpjH{{Xf6XHc0%5|MSG z?3*;IdRorhHIIw)!3v2^vZJ}ui;kirQhjnz`Q7-=TB)bYrfc*xfMaGEibitjY_AU7 zoKm@OntPiq^J{rrbVZQ}#Crs^P+(GPNUj9wuMu{gy>>PSt|SCvSh~oJr=&+i`t9JsPE9?PRgt*kB7wxaAYW8M zPQo$%^Os8Rv0Bu)GkFRU+aM~m$Vf>GWL%-6_U6*W+4g|dIJE-kw5pR)yG1k;?2C2+ zD3R3}-S89}mk$@5_Dfq6LXMdW!l0~t)BUP`DCzdzIOZuPl&H{6{B}ym>{~GpMC}1q zqWNK>Y{U}WLMaKnq+cXtp@0=_W2ThDz4*hF+QE786ouC`a@yY=n3kBp#@?)3!iBaO zTTjx&*e%9jExSUhJH^Q7ce!P-^La`UGT@_nFAY7QEO6x_cT!Iramx0KX*7w zxc|*^^cJlL2Q;(K&CXt4aGIC~_=ZDXbaa=9`i?vJBuOnlR442-hux5%sj*xWne4vt z`hmplv~Xoqa}=gm&3M)#NigZx?(hY2!s2|Nd^kp)=8^!px0b#Kvk4xI!j5ZP$;Go( zoXwk2;K^oR>$nT{A`NAz6KrHPT<&X9`j5Uo?jFm@KD2X61LX(lsgkL(mPw}hp!?%( zbw1+!sHMj1{HC69hOKT6)jlRY4AR?01OvYr63Qr28^UTuGX#veNVNMzgG3qSj-hpm z^jy0ar|UqHyj;XZ>V!uv$YWWFxG;qAx9FTpgZlT2lIeqZwktGOT)g_3It^gwtR|!B zq*=@|p3e_7ywy|6z-@mnVo_&xO`S;Dll4xJ8x&(!go_Elfb_#7fncq^hUBq3>M}au5lJpPwuNA z2BG~qGM6rVsJPkoRLEZ$){U|N_wo-JNcRyhkDh!WQPIT9_MA(?^vM+-)7vY4E!V3E-9l1jE82kFyr|J?_5@lz&Iy1wDxDg0+1wqHN_{EiG?Hx)qEP2?= zfP;~d!QpN5@2kUAZ54q>M7`nr0+Fr4X3tbwaYHB_RR%!6-%oq#c3|Q%{NC2d4Culf zOXTfjBpxi0&jfCJ;k-`mSd!U<2aC-{Y^ABI=uf>%iG!L-h!dRJztr1Eit>^iUy>AW z4(+It4D<}@6_L7tHf(dW4eJ9W6xA9G>f|xp$7aPv0zfnwQ6m@#JHV zu{P$9@CKrU1pke1se%i{?Er~s=*iO5{zBom8b(-JS54@*1}Mzn(!9J-MLzYgcZm_X z>s68NLVL@P5a4gg8Q8!uB%ldt!Rq*87rnT9#;q(y#nAS;HNDi#k@OI#6^ZkZYR(+Qr5PST<}U;1FOlGqmY zF@H`G82JDsk57a9P4mf+Mq$V7RXpE09f_s3K3Sy;7B11_yi9%RVM{7La+YM4kJ!O^ zWKor~2}@pPhrX=S?wr&EapKk84X7SYFE78_Wx~i=Nyu&_4eJU8d zt$v^uPRg#Y>v_45QCIGA0Okl#hBw3|{RN`z9d*TY-wWd%{?3dyn|e3B8M;*Pn+za& zK57vtz3S z!)qa>Q08-N`WqlG_x|DLfZ)-xSKC<53yuR%dzQHIfHayZIMl6;_PodhmgdvjR6MPy zHw!Fc1=b|9u=X7>wC-&5q?Yo^c~!{n%mzDBV3eTi21OZ%55yInk5Y$%>xj>4{Hf-a zpMguc$92H2$=J#>&+qCRcS-~V)nh9Zn##V56&?fN=tp4gYh&St8>)lY+T@%oqa+>iH&4gA43yo z`8GK>d*E;E%Tl*Yj36hn>R^{AJkj3*qYF+iuuH%)&ndl|k>tfH0x}DZmk!W@wmDQH zU>>q{(L=E!17AZ21Pk2X-zvbHZMl*fP??0hvoHYVd~Mo3A-)(G{OcFXO>H|~pLT~v zQ2l1~O(7ll{#dX25Lh_cvE3rfu~36+bD*l8*YfQn9$#RE8PRB3PEhlN!SV9d_NPW{ z1g3T;WVOc;h$rOX07SvvRLXwck&30=A-2Kk&+`%)zQ%rIB_5kIq{>lx(H3U9^z}>+ z46{Fw5fb!t6wscS4+xExu0XXB;rv57_M$#&#Yf!?mr4T7+lEPBUGr{Jmkn}> zDKbJbY5Ma%c}|=D#c?AmszZ-m*LSZ({&>a%J*f_i2*-;wF(W%75h&J18qYFmCL?#% zV4TaarwPqXXgO1_E1F%otb)@qlAiQ|y~btJp{x7EdJ9m1JW~dnpzrK(D7lD~z^P$1 zrV?BB&N!q{U<|hx%6lhzD47t%lltD}pBEKT_wmg=gr)>r^_-$q3xFf(fZnW{p{&EY zWcP?d(ctyvrBpqsFM zgIwr-u~+W1bQ=q+2YTmyXJ^*6;{FsSIi#1qxmD@_mNXP_zRV6k$tQdgwqg63+M_Y9 z+K{c1H(zNm$OT7QAZO|eRx@eV3VVp>ei$;V+m!99yzwNQU?UF}yKb^2Rf)C4n?|1c zeD*9{HWy3$Iv`U*P$JfNR5>PSC2WZ-A*Lq$EA|vOG@NZQ+$m zFWTA2M_M^n@|=2TdAH3SOVS3<%{UxqW0ag!Vl&x8#8#X3x8ZG(_>b$#2w!)2s0B}t z=7V|o+DBJJFKVYDk=09;O92pps`ts6Rl!`h96y5Os4y3UHEsXxJxcW{ut}5qJ%g9R z`j~JN1?6qYRDqzpnuGOq-^_pkODPssUtKy9CJ zrc~SeXAVylCDv(~X?2HM5+8jT@;LA)HRzsJ^2<~ebp?#gwwEq@XkR@_B?bIj!t9fm zjZ89SEN?n_N8nV7O4hFMg5AGn=BBbkN~kN4s4k`jMw9#ePCg?|B_C^$+zrRGHCCaI zwIA3(6W#~h^MPX7ckYmuM=>DsDCBlBK5^2T^-eXfg`SZHAoz=^TH}#ogw733>aKyS zc=SQo5iaDi2sh`Jv*Ho)u%KSY4adyZEo2w+yPkP>D|X5F#LKKxP2DnLap!n9$B?O> zw1hHEDBGfQeo;)K=n)2Y;aEQ0%bO}&i)#7q-D%smtH4+1ZIq)sf{riAXBu9$j1ML0 zupl%traWQ-S)eJy&OSO2);5{Sc6G*&aN|~8eckpi$)@jsQ5NW$SUY(4Fr#RL`7TG_ z4*SB{we)>G2^Uh3sN8_n(`l{at&N5JQZ<_9GfH@Esgsz(RszTx!Yy=ydurFbQK|aP zKSq|zcg5Eo$^TM1hUM0xNo?e^TZ#De?)}?8epTU2*>Yc^4cE#2pc0S4rcCC!eVfPY zDq2U!Tki%2osk!kgfCjF^4+XX^gG$lp_tgI^wKvIshrM}3cl!zFR2TbW7NO|^ER}y zTsebLI`r+7GD52exwJi38+P^$Vl*$${$rdOgER6JLyyV#E*2fw$4`IN7{RvPB!;e< z9TJJ(4yc|x4<{wKgmU#Xs*)5e2UaCHuVZae8Jj(y&m~J}YtZj&vR7tg&-h+n{cNTW zP%Hm=y=@JDZ2LFm)^uSjz>< z84p>*r;bMO_*AWc?UCqsd$~Q2sHddhT1IVki5BNb?=1H`0i4C7sNl}a9!jd0^R$_V z^z;ZV&*L*jz3a#g_f<24cNF{IU}5VjYyEIQNNsVs5TiKQ5szG%K}iM^bc{3PWS8s{ ziIFn~C0gXR`YfJ~U?hc~r)y2xa;*iRwN)t0VsYuVHSP2pqSYg-^WpS{J)0YL*xXma) zoR=iMxtqK|nsV(oyk_B#)id(~yGZP59ib4cV?bz=;C?~_r)-0JRCO79O(CA%>WS^x zge$kO05kmrRq?zrS8Zz*iLJZUHlM82CMettRn0|5?zH38_c>}(>FX54v)4eg#=F-A zEhZICo2TWq67)1*QmM_z0}^hD!m=BL7$9&k-Z2F(f~9vv^3_aqwBJ z{83}c6^>>iRc!K=4b;B7Z z$z%UgMziG)yF{cikGtl*{-LW_s_M#Q zv!mWGa_f#Y*yU{o8z_wGP)T0w1b;PBk$_#{ceogZQjg<2piim=1dR@IzMuWlcgb=8 zIbYj|>0z;}1Z;#*?y!Gd=1vEhnglc&gjyTyBzw)<_6ucgYiJ+Y1)G$r#J)e}Y9&!* zSc^jT>n%V|_mR>z*qv6=hIy$ZVn0)wAMm|QQ;8;>JII#!iaSJLXRTZC8*Z+_GR1$~ zE%N$Y7Y4U|7bS;(n^$3P#V`?Ry6TV+6+kp;7EY0hZ6m+6T2^!1RWLNdQDOe@dqcFw zcFT2CQ)Te&z&B^|a8}^Cjr9THpDblq1q^zrN&XL@Qbv*de}nw~C$IZ|3;A)dviuk1 zV2K9CpV4dP5sdY+n41>PGL_A44cTv%IC!(II)1%I{>J)duX`3ZKwxY z)zEnR9+}+^!za++2V(-KU;;k8?Bh2Dp|bM;mm`lYN}AYU{CR-PWi0@%y}P?(_&$R} zXaeT)o0RSa5J5(#&fVGwGa+jRVD(@|m>qiP5uAn6Y-{DXZK!SS=wQxRY%9$cFGkb7@BiE8c zSV7hTtN{T^Dnd=t;n76`AlJU(2a`2VKihnh{UamLLSFb488Wj2iied0NN+=ZcJP=o zlg9=`FgGxdKF1(wd_z1e8%xx-wRUCUB3$lA-imo65s>GuX>IUoe79?0*LHEQe%RCk z_)XD%Q}k~x`H29-*gJxbOMY74lL@|xnLs)M+Sb+A-`6_;`$hxghNPiR;O{N*;`;hf zt$ic(sO(-`>D>U-x>5t3Le~Rye+k_>v)KUyX=iHtb#MQuezFtR)&Wimn9uP7gup86Fsd-gbJue!E~8BhT)b*M1*RLpRrg-+m=sX=Qwq8@BX- z`##?sd&8t4fvA_$;b-LTBa z*yPOAC%X2r4Jf0)X4NqEj1BLWM&I$4pLG_^1PtdsN%QZ$#mjGj z=9b0Y|8@HNwmHWq^v<-?J9>B4#V5U|W&OnOaR-<}NIt%r0uijV6k5M27lGVLb0%Nqj02}5uGwvJCb`>sR`wqR zz43AI{OY7w|2kMX`H+buevV%u!?wmz#U6moX`K1wkb1geKr80M^eScu#EZ^E)n4-% zb8$Mua`2s^wVO^?3E8X>HaWA!#HDvWVS+CNEz6*`>CwpQn`6OaCKiwt_6Z(1I6C6@ zhdD!-x-3PhK+O7@qm(-Hq-3(Y4^lth{Zr+Wm}Tjq*A|Hg)R-jd>Kb}U;FIL`O6b&+ zlEJ_23dJDRv_Kv9f+2?OxP*r!Zh6jAOtxBCjx|wvJG9ezHM?bl?m`KdcOoo2M>D}* zrh?j*F7HTW?j;a>qpYHCr*}*L=havkw)@)NM zl451^Ym2{7HTx(oSF{9Y@kCmna-yROdl?qi-i614@$*gqnE51QwL%9Pb47XQV1<8F z&_MPIn$rQ}yF7=` z+DiLM#Kxe>J*mwDg>w)t3iBYl`Bxt@egtS_3|(p(Z>f;-47!XFPCf5xnW+;FBC zjj4xj(?kHtO5Xj+telhF;P@R2BsS0|s?!472VaCF%DJ>y`mf`#358Wxgj`K@!ERL)5Pi?V_fogDkHB zL&(p$Ms7@UsRQ&IX$(P&iMLbOPzlYKpZ|VEYl_11FJ~oE&Gxl!Vg~4rY(~VE-T*Dr zD$(Zq7K0V^x`(^F?NyTjwO&zGJ7nXHc*O)*6Zk)aJliYIVR&pYt-P6lmNuiFg-F7@u@X|6WTN`{Qu4QJ89ZF*G zFqvNW0bX$@8wqF<%E1mODWEvG&RgoIdtVRUVYkP>k^#e}9g%c!c}_Bqu-5te0)yK{ ze5A~bz9H+<*zt8PlP7Twx~v^nd7K1!tFGK{b501*40aWhbal&_lG8fUkmSR0#ft zv8E#?-V>~nOE9KTj%89+A9c3(;UUfHH+S|2QFrl;8*&J>@Q7$$3C1b2D`N5MlylxBRgmtlHwn4(nZ zMRB1f`-h@R-h?X6#2~H9FpI$&`@##I!uv*Hcu(wJ=x(~E(INPq5NC{VK;-*cg%xv? zJ%GmhQh!@x3Gur3P`}6X%kE%s=G^bDU~9JfT=71bzXK7jXS+Gk#TqPNk(ABNcaxUJ z6C=5m0aFFPdsgQAU;-$9U-ugn(*gviOHXD#A(-OY4hc)o^1M2&Q3A5Ue&Dk#evgCK zEH#sYRwuz__v30CIlvV*U8M{o4o z!;k}Yr;y}lW#<_wR;g^gkQtrPngj+CV<=W4R^7%(Le(*7{E9;ac$S3O5rx$B7jz|_ zSeZ)SE8&DRvvfk|Fq1?%e)K+jKZ3Pt-=w>D&;%sTmn-Dv8{^BrOx@tiHObHt9?P8| z%6EBV|NfOkiBl51l2gh+*T>%!jhlij)F8uttBikgVQZyOspp z8$F^_b#EU77)0F88T89)Zd;50l|@87$5Snr){bc;j)wDKgy-wT9gpG-D=nXn zRA*P?1SKQ5fDizr<^CfVf=Z7c_W-+5_DQLc397TIjpE}SsGkvgxpy)`~N_VN0H||gu;2x@+70y6|YGDWLX+JZE9~Ryjyc7CU|nBSMBOIS31+P?C4wcICN)^}Sq{hEL}hrn zA3yC{ZophRp`XBvL4w@cZWl`6CAKgMs&M%mwVRNNAzp0CGpw0hnwN>~kL#CUmblG; zbTK>rwMha54UPL=QxU#nbLv73P83cU$JgneM)Ocq8>zAA${E&ma{YaW`b(*L0k4!N zS-t#fI9oVE|NXK{=({xC-=+0#VJd_^Iu9`Yox!%XVlc(m;l+!;DNl;V7y<1{S(U0& zVOZf_eH~*RjbL0x7Hi;soPi80RjoDDeNb)C7!MaKx6(aC*#=`Cy85ruCM-pbVs2(! zKBv~+Z~aGuS{*E$g^voQ{OJLD%_?gwOT}sak0L19NzzPEVgaBt#5j^Y3CAUmzBJ^I zg6BwbCL~-UT6o}FM&=0c+!nhRDVclqCFR!}#Z0bbcDuU>9nTFukNmy)s_}1xZ0D8i z4Hvz*$l?6?8bW1fUTcVg(2|{HWi|`iL`)-qnh#R_{BpIX>mJh)R8xuiF6?9v*GBtX zqa|ImBsfYGd&cbw#|Nvk-kLnbhfTNYX*%x-%GbPc z37jbFTgJQ>^{mcVhSjY1G-2GPG{ezHd?va+bxMM6y}J0_YV@qC8X_pjD} zwJm{2V-}^$Lbo|_uPHp1wbgqCV`} z@Z9%D|CXR8>cvDgLnko@89KS-ur`>N?O4#Lr7q8={uL}7N0(MOKowc)F71Q-HknA1 zJVrswP}*K3W7xL9pTuzKo~tmKOR|=Ed{7d34l!W!7jqvJBW#h(zP!0%J3lmN|A|YE zam+dDmtRR@fQVi^SVPLqSODhNU>tT#ulsI)Aw^7~9wKQSBfj{P*8KztIZi;5W>kDN zwvij6{y4Wk6)yrOA6Ug@&K+&0J3SDe=tskxd_X}T$_P1b~5Uyo=wGN0^kPxmDeD3zX6wbyHJj2jPR!(WdP z3>pq3o!6j-rtiK7P+qSlL%CAYwmq?^fVV1Z7tz1BLkxkj8ZG%oPy8&+&BRvNxis+j z;=PSSUxS)O9RycK?c`b^NK6$$`k@8!>8s_f85BasBbnPZgWp58vW++eEEYiQ71onS zeMM98kc+LJxn-=aQi96_Qz;rSB;4CJ!m@E#B$0x2_5OOCFB7Fmo5AW1=u!iuFJl)d zy|FDdS;aG^cG!*N^2qU}@lWbp2QN+!PPIUEPOsI4%{bl<2k8(SWXdZdUQOntV;rUT z4yF2)NiWQd21c3Y1}BAT=o!I_sWi*m22b=vtAx-+{+M|8ODS z{;=g*Uxo{O-trHqU7y6r?cgcK!EIxXKf;$LTmk%<%u7I4n!QLgckmo( z_}L_%500@^pJi>rQwVKkPEB9;_U=n5Ld4##x*n^kfUo*Ad`TnvAxPGIaTZU>GV)u0 z*8%J1V~tSO!QVy}nF94aiD$#|kTdN%~X5*TP;-TxTzAtvY7^N@yV`Rs7MGshK~E zmnrYL5QK!Y>(#i(>?ummDt$bcJF5FM{wjY}#9FLK?^^mtS&Tni?CIWj&AnJZB9aKG zClzRjss1_XMN&^=O-MO1!9L8^S$Ql!bGomi=-+D|8$P>ftbi@vcj((-^c!t2%xLmQ z)B08le*ZZyNLt&T?oWdVZ2JmZ+FVENtoG~vV{-tVfhQJKb zHwvu-O3AHZU?^!}b@AGfNq@S#3HCHnC+E!1?BwGXy=pRrooipX@ev2~{D4s^4U&p2 zm;g?nwe13j_un1Yrnk1Ln{>vQIYWzQ#YS|LRWEO{p5_S7^5yo<(L-06mZ-?5IE2dw zcVB32+_=908`Z|kDpUCvc1)ZKa~sqp_nE9p2uZhVT%?W2<(L!d5%dZh2lXsdgA~Ms?s<<$ zgu=7a4fqPZSlr1;>H^l8(zTIfq6t5Z&|B|;NlRE4y(CjuNcORZfcjRGOc@OcHEkG@ z0R-=m2m96DY-l`r%^`oCXPw4plb&x=yw_k_T-ZGFRO0ZHl+_}*VvJ*F2CwOx`kaId z-t74`s@0-}CurU%6DCoO#$#zwox((h8CY5ojG6*rQ+DUGZ2C8wqgPX`VlOt`@jJsk z7L?u?4%BgBv?&Hs*ZJfk#4gK?Xx~%P-ej}kul$}AVUj8Wh9WM^OV966YJy7>3;*;$ zSmg80X0NG^e@C*oqG#a}DQh!WiD=OQ;^ax+-Zm+p*$JCf(mV%^Wwedf-6CDe!+3zgIW{dRL7T^+fKh z_Thk34IcA;onXlY1AhE5(=K$PfD$IS9FEcCe1gkFqY%ZjUY>%Gq|qXr5Dj5u={*SZiZZ`^QJjgS=3aB?}xmi>iDJ_ERkke4%v9&a?{>`uc9zlzK zO(c189wk!Wf%){L5GfKG?J&6Uk9JL;ssTfsE9X$=-GM88#50wG6W~=WXRP(i9ZWdf zOlE!lcwIN^+TKB0H-IRWJY8R3>`}ix>N~g;t@0M(4gD6cv_jgRX{lvV&~kiWdfI7a zO(Ly(AkB$nk>cHWyb%v>qU>!$E9AVfcaCxhj>Y=XTJ~F%ZoJ0O>tTJ#Kfa|btO$Em zZqqo*!ZC*UjBp9Hr-79t$oHawy~d?(>Opa4<+ZwogTP<<{av`@ZNzQX7%Uh=j>vc2 z^T6TUxW23k1MVf)=_6&25t_(q2Gf1>hIg#^H(X#EY$kYZ5z~gjiU&??CEmQiDqKGc z!w4*DTGs~D6Ia6S4>MDP|JF5b-*rE`pn2T2s$(}ZV2p&rM^YW(rp3~2kD^>D1*Wnj z%vc?dQ1f$5-&&;BwGe!`fxaaf>V}=%2^kWG`W`)dmAt2F#Zyn>Z+tGXiJZ^WBvO>Q z32^B!!OCdJ97%5eK_FTIp_MBR&D&ezrcEb?!<6UKVDMvezao+c89(GHOz?IZ?5z?W z@-6zj>|<3qn8OIH|F9^BPS6)%xS-aSz_s=G6L{GC@6`ZO>%aQno_HP?IGYld9y^qz z{?M+-g7%vNtDR{#i(UE(9#{IVB=vL{2I()dRNulqYzj?aTi(f@=+zgkK@Ga9cDg6?NCr$_`-p$>m;26<<-u^@6 zI9Io}icS%Xhq%Q5MBJw#@SD08?bHh}t+XQ89IF;A-u zwIJ-CvoRN}ymhce{+O%trDOaK?+tABb0h0!RfL3T=g!O%LQ|$0!{Dy$dB?g%t4BHK z4kmJiVkq+e`Jif>54C?t@Phha3$W+D`$ERHGV%?BeXZk}JU}qNxrlTeI^yb;K~vpB z+UXZtIuW&csppr__RJnn$nEk1Bwqzf9xZ|-Uxpy-&wxwXT9$i3G31P9s~lJ`7DVHl zml`m<^2QhU2QuD_!tXqjXo&bXGTnc3SSw1vx3sDdA+PKz!T-+vEP(N74cPNYrpt87 z)g4iNIz4~zZ(4n?i0sTE_!>nVNSASTIGyB(X@-$LCDVa>ptMu*FRMiQ12t@w(M6!8 z0cmXl;Coy&bAMf7ycG4_NuF7AxY=3M!S1`s#CwqqL4F*#wZ}(#f=3cJ&RDqDu&sw? zWHEZJy8#$w&Lk_BbEg`~(EeY_`7?k{eD9&%T3Hwo_X+3k)Wgb{<{P;KVA{J=;#TaU zXt7^8>YsTg2Y@^QZ6GarC~J+4X09y{i8FfaJRRC%BWNeJOd3PCIS*W-82EMpnwJf6*bN5y5tb zLsdC-iNzNy2;B0J62a_2iY=Tf`DA+TQGBd7Jtdq)`Y>=DkbPoc3 zzGg{G@UH9E22pvdD5eJK35kLjah2m8a)`iQ|JKadwEqw8-XTboXkE80+qP{Rt8Cl0 zZQHhO+qP?!ZQHK9PUpVZFZPQ#?bpoAMt0_ioRR ziE;%I#jSrYsYfd)?hLa$p&*>&?_&SFx%6Mmk@X2 z6aS>^Hn}Bdm12EeVI_1~71!&A^EzLn`1UNvZ&8)6CtIE?>2~jMEv3H)5t#o#EdwY5 ze5@j^yCc*OPK5gU#0j`iR%!c;!6o-b43RNaI)itfDq_WrKuDWOifu9aH~ru-cB{eI zr6#u*n*zr8u!W*}V%!j%g%A2}%{bNvq4%0t73G&ng6a6=@ZJ}8Gq@p)zh}{><7P4Z z_Y-;f!LZ(jq*#P{iN7&U&>|vP7Cm~v;m;LKoG3fDNALXZkcJ6EXQN<-c@`0=`P|ku zNYLTtAU*>V5lk)U*pB2B9$+=pz^c|BIU@qo`mvwd_8)Ege1rvE!dkW?K-~1izD{Wy+4)A&$>vj#!b^P9W?$LdR(wy zdzWRp(+j)O=+Y6gX^DmJ>bamHsK9QzN|BUX&6PROOvl;xp5rCv_xo_;D>N#l1j|9T zM&7hu)&XV47K^BLAv1YAf?t4D-Z-V_V&Oe2V7~XY&PGiTQi1lih+BOsXnuEr>Q40t zCTJ3z0V7EegMyO8$}u6}urKQLv!_z{k;Po|C{3ekA2z~wKC{=ZfN|in;Ypr-IE+I6 zDH))o!c^?pxh=&7j|$|RPzBHX`0d`=1&G_C(ZKe$%}Ei8?Jb@9qz!W=Bx z;EPQ&Wz2oXZ})qw)HQg2#1heN@qDPRW+~u}>wazEw6xkycGhG6#z{s`Np9`0F*}exaNb zsk9z@%OH`=lumhZ1tCnrLAMM$Q96Khf7S3T-oLw#A|cfC zv>SKkVi9;4@f;Z=FX^3Fmcj`cXgot6Dq%z*@GB6T_LJluUZ0nU8#^i@hr|qFE1;U& zgqHiJ$EmN?u=rcdG=c(f6Cd?(cvnpj$Li%`wIxBPziEB#Wx-k)Vz1V)Fm1?lpJX=g z?J(7E#8mj-U|v-NSgxNnP1(PB4OD3nA&p>tOc&!wHr9AS4vwUcJl0bJ`D%V{{vzK+|_%^gv{Xo=#=kk1^fG zO}?Z3bgYL-Q)W1mN1BZ??)Pbu(7-<04KG&5KR|sKpj2I<0JDiE$oPkDHSAT=xLm>> z+o(d!2EY!cXcGE(5K27Xrh>+{zYa0;$qQVNhdlh|Z5f8g-wjxT4PO)}tD(#yb0{P* z&{jBDIY;Hsw~G%NFBnqe-S_C9=VDb|Ma1>%2>>n2T`;&%vmh(mY4 zY#nhCmi`jMHhC^pcManJm@eSg^*5G9247RBh0I-4i9D}>zqB~x{7Jc<{r zE`e7LELZdb^m$z?xQey5w7SL*3=thK3#%_Yoqkf}^~C@64{1&Eahs5?lg%}}AmZ9s zuTYs`j=uiq$xHMWmro47I6|kiA9Bt^dzC95&F!)qtxGi~x{RSv65Vrpn}~3b6avo5$84;NEJ9kyK%B62=H)VWu*?wh0MtILUX* z+pD;MO8r?Bl*l0XueLh zMxvFEzt}mqW0D**S%JlA$z+hlj|q&-C*`<8qIt%e1;S-ZA=q<9M@-SJd4Nxm*;vn- zR;NXTELyCqjN+)0FAi!)d*$4A2z`3eyz&RJ`!Asbz#JP)go2#-`JaZnrU`?*sl9@j z?@v?u^RSCzdE}Z8I0}hE7j8ZDWMwXLv*t3&6Z2?Ga{w7`r&wPp3gSHCdH;)*fMG1; z)e+u*jRXhj;1M0_f#+5`PnYPOM|`m%comeV_39PZvqjdOcvivIiO87`di9dZDw$xo z_V_t`z&Y)RtgSK(IQQ^$Y`m)jmZS{a1{kYz{@9q`>5!Y{^B z010=XfD@zA=1wv#YuvH+9&{IY5$fV>`2rfuZr08May#W~n9H{$M6%H6lwkdD}E!5ZUM?l}mz`3CA)X`u3L&X61T^I{s7>=yL0xK02`vFvoQjdBbqj z@F7p4Rf|kZ)ksf>);iX#>PBo6ynku19In$EZ(|G06_HR(haB+nX<1>kJU~8l+`^<^ z2i3*Fc0-uLs-9aYys8^xVY-`jl+M=TuDHcVB&+>TX{yeKKi)vRyHBgR1(krhBO zDd$y9c@Gyd_JJLYxYSjPRU9AX?-|xn*OTm({_PErj z_bw!fnq6mcnCEYlMbJ;bVHT z&I|*)@_cx|Y$uc*N{!q(_jjh)s)F6*IDuM`oy&Wduf7Z`!9I)Pg_Ux6+=Qcn28Pv- zzBgt{lI&fC%bVzZI4ql=3PkPB{}yRX`PEi3HLQ3J0+j5zOZPe-QUCtx=Tq*Y7Qln5 zA~;qI9r<-eJYTad$z6=2&Nd-qaGG3w?ccun(;#d}Yi|vgyvv}A5^Wj_0A4H>$ zkfx=EL@HhUemCReq9r1&m8!iLIi}2&o_%PTzE+E$%6P`xI0T-KhQ5MzQPW)(S>Hf(Q;!?(I_UUe$6{FBUJ58{jGCwZXW7R%uT`}rckJ5Y zY|eCJi4+qS`sN<6+YYJr zk8(pfmFyCh*+KCA$av9qv#{d}sg^MkDEDmnk_OW!Qka! z!5ayzYmz4f4!&dzuX|E!5=kBxK$XebfN(vVQMbfJTS*9kI}6ODC@3PJAn6Ih z&r6bmyw3WG;}!|J!1Q_{ORP+EAlipag=YG#u!twfqNvfbGNm1zYQkaU*nR{$_WB*! zWjOUj@RrCnilIV%T?&vVdBKo@AO*Sry_Ph&Mt#g-`1uQ=?-pH3?a~0>tNPuMgVk!= z8XYSYI_>$_sbHi`3qU3@SZ>ogOH(5%$`3l>Pezt)wP%FTq5i}J1eX)=1{{odhbU<=;OR`*0EOXTs+OBYcEr`Qz3g9QQq zd*p;!7&9AE9gKe_fJ)3#vQco1UPKaZmRph)1!w;`1Vqst4t?Ijk^9^ot9Xywe$VP0 zi|^2W(DEgfVd)V?{ZUsrbcmoTDt}U>I#;Ca?`khEh8(g%l|{vhHJuYagcmt04{uzL z#P+V!4iL@^TY6h78CyZjbA!0rs+!Jp)=@U=5W0;WzH;r{KNQFl#^?!ts3 zofCg!h7%Zv4XF)F2V-AOC=uA!9}IgY{k3(aCQpqtiqi&Hqt)0;&WhqO(Kp_DHE7*v z9T*0wZknXwtc;^Etnk?+$DFZ-LN^w#3?Z+zZvY|S%a2P4=uD<}yzlR4IHa*^ed0VY zQboO}8ud0(i0Yn40p7&<1pCTQ!eHk8s7||t6vYBBBz^3~IhKBeGhS6K$qx*>arWNx z^L>nl+)2Vq)QMaiO3r{TQkvz$ij~Kwp|0F%($g=qKVr0V1j~Q{g_ykLo!Cz5IqB4M zj@^FPiC4N?9wTQ}`EjuWW8%McPueGJ!P$a&zmo&o_B$PpT-Atgit%B9zp_-SP z3Na&Ku=17xh1-fj`}Y5}>pEH!fp}~+{S6eM$Tl(Sxb!$bEPJ0i3A!2`KNknhS>0gW zG77#;l+F1LK#xolR*Ej?6xu^DGiaFk zfE~rtzzDAF&D~Vy*DJ~Iq9A?fJt>30iYe8xgz1PXEo>cpq9;?@Ft&C%b>-%gwe~Gw zIziP{-wu`;mUxX}k;57Q3-$U_ZYECmu0od`Y->duUV`LsxD0bgA*~?WM9V6?7XLVG z3@&HsqO-^VgA_5MPvkI3$ zUsR%b`o#-Zg( z-q$SF6TbPgrAN#b)&D%ELh~rJ5*$%y3=|6+hLK(HX@fHK!;= zCJfQ@#`?ubkLeyrCS`kSuP|D6StQg^$f%^R6$DHeOf!BhgM`K<@Adb%Ra>brSYc}Q zz93KT+)peO%Mll?XTu4PtDblYv2ttFUxqCoXGKbd>$4%%+jn2!f=Yts&9snDk{gCJ+D3mo^eSalS6 z2`@EW2RZqKge9lLa92ZnhrK@^i=RGz_AQV!tfk=rB}y_qFN=Q!wc4RTg?J1)GTF!2 z0*0tW%UVL*_GB6xpKEf$uK%!M&4#&lel=eYOI2V@;x2#i9q^H;Gh&ZTRuP(=aeS-f z+K*o`yWVukMKmg2ws;lP;b2Vl&WK?u1ycHe2N;<4Y1VG@IYLu)u%pW07wPhP$DK3O z^!`1_d0{JmU@|5MF-OTr0%CM~B`~}Fsa3)ErOR&vW^HifW<{D*U-yD!%gbbHjv(J|Ceh-isQpak@3USRzAZ>k}~_54xSJ6yj9dz z#iyvtd0h9GE?g1tZ1-TJrMYqSM>7^TbaMcYj4dUK#nlot23T?%YTzG1lib5qWNnSn z0`qdRO{JYM;HJxhlp7VdHncG^aFEsu@16QVpbOW@K+dX}>4)d&e9UZfZ{Y0)s7BW- zGYw5?NmX>b&$YnR0Xo7_Mm9wrtp~E z0DCv;Giv(|$(bb=8w}6o#YKgS{0vLJ!!j4CbY+8R5(UEqc74j;T_?{yB_M84c8xJI zhC|$z)W`L#Wy!6Hq$N$Bd(W?|%A9xvl-lFjCslJ@#NPf^v^Vm1C{ZtRn1gBy_#(#S zx=hNbMpv06fx1)S8Ts~zm+|{`cheWJqhyRp5@tLENnLa6iy6jYX_uDDLP~Sh%vv&* zt7+M)yt#i1jUZl2r&c@AZxZb1wC>M3lCBPNMcqhPZyjhdki9>uyefVm9 z^hVMnBEGlc~0A$q@QzWGVxk$7Dfq zxD#KzzH$iCGKl%DEHCQSjBxh8WkOQT9Q*OPl>$rRJ_6$Sh^*Ma#4+E==pQQ9YyBoj zQe2pUB-D&I@JG1}!vwq1S*SMtO24Qc*0jN_6kgm+Z%gSDhx?uNXm!AFrV0W77QU)F z5ATm>BmP0ap(*3)Mk^9QK=Lt-M)i2-GEwlIIa(J4qY9goa1fhHLvm$ zpj&?PO|iPY!iw!ZI?q>`3#t~Wq`UY-W{gLzy|9vF$oG_<$*w9Nd?$=i#N5V6#QpBAk{3Q|Z~oFyyF^K>%g zFe42)_bL;ne3S#P+*PPzQ+luk*RXG?R~_)wG=Q{G83Ec0XD?%)H4GV;%A!&j1m=H8En*QaO3D>B||0dO7#w#a&< zRCWYYV|7|yw{IFmZN>!ZNXkbGKR*VgOKLs95e8}9Yh~E|$H7NigsH(p5P5}$$94W3 zDLPBHCmiE7{Zn*y@sO%?0uHjb;_DR}?j$!Xxos-!gSlk~?v+(5?Zf&j$n`0(pTIq0(nb?g{6y z9X)f+ILKQg(a*H(3W4`~H7_R(mgKfx;yAfqLl&h1il)z4IoljL(`1=w%z3JwqD}h& zrY&!`ji(|Ptif79F5kjS^oN-%S{cNqpfXK!jb+IvoO`v8-PtZX9(*||$PO)RV^D?~ z1X-Od^D@G?1GW*Y_m>i~;BK|zYl$O{5&}58MjE{j!G0TSbSuArgkJYy|0SBn^1qIz zF*E$%{>gv-Ii~;T>Oa~j85tS=GnnT3&p&s)!bZ4FNXY68FBBHOC=O3384k?ckHE~p z%mPu~KM#wPh_tvUP6e^JO&~569!Nlm|M8LQwDb6D<+GdFVw(H8vAXjbYsGKn&IubM zl2Ra!oeqW+fQSknhyvg_I_Y^p002P(2@*ge?%^S`hhet1%Rza|iva~IMuhScR1g^` zz@V0n1@agbE)3ATodbA~2;hJT>YxFN06_oID-amiJ_VdUU z9UK5qY@-0z`mgpO7X8Hf0u5aKa_Uav0=)of{-`}5gZumX{1ec*lrT^r?O%P{ecU2O zWt>@TGk24Jl^ywEswl_n0_bu2mnToBXp@qT03N7yg1!ARM-zhlkjMPm_e9kT0U7^J zzEsNinO)tB0Rj2F2qE0fseg=9?+*e1{jPM7ienjjJ(K?Useam}{pOAPiM;E%{On>{ zv~zR+wnBf2y!l-rptm=@>cP0UnPFV?L7LY0y{N#0g>-d&wLQ~;Iko^zLU^6}wZO-& z_czLS5{`ma##4x0z zPp3MYB3XDnW*99uog7MWm4&}}!Em6fVJe;#!F#80r^d4@#8t$W;;nX{BNWoom7%2YcP)vC1J(5P8B|+weC?Z2Tb2Gy?aUx^?5Ze4g zN8JiOjJ;mvv!xDNRxrVLxN9H^a(VBq>|2GZ5tr9T=As5 zpT0>IqNZ76T7ug>W?5^9Cgh4%0xQ*;dOqWu2VoJGcR)oSE8Q6Qb2Z5?2pXJ47`2PLn0@5#ECU@V z-S*B|JFpskBZag4vX6c0MbMP<%&tKmjBKL1MQc(v3!<8tj+%uf8GCyScP-kWhH?7uoNL5-|Kz zZ0AOxBjDkw;L6St9iS0bkCeiR(T1JH<*jN)nI_EV0p6FqBKuwz`mh0>x40|=;iQ9d z1Wz(*{Psy{V@vw=DA{pdx9v$+Ge1U~^bO=eHCcy>8N!UO<;|N29kR-S(rUM~E@B|% zq*o>3PPyM_$2uKy40wuF_9zW?FAcb3Lv|BMXuuIrf@~+l;x7A~O(Ih>WKt)Yn?nsj zERvj>Wrr?Rhac#A)`Y~k0CD#KoC$^Gg}cxgZdk?Cov8M-I4|{5drqSvxmb5lhv?h+ zwP=@r1mm)JVH~ZP>_vnFtX$u>fR4dV>1>IJcIOUK-oOm&RV0I7f6mE-PpH(nc>xA1 zBoN~gK}2laT^poT>?NUGUtGtIJwb&}`s=uX>E0=N0hH?`eBRT7lTf>kgj>~mL)&dl z*Dc=wUoYW2ZgKWq=HbzR;EHt)n4lOuyW@1TuU8r>Ii$_NOZiS zTXSb@j06l%-MmcrtRRsFEGK$dE-GPe$HB3=HBTUIuEgY1MvGJNF_r3*9TZowtwdBm zBhEI_@(}nQo#7kIob3*36Z8hq=K~a~Ly-}w5ZpTkK*z>XfgH(LJTdVqmykzrvGLpo zgeeQ}5qnY15V0yRTb1H$(=qvz=)T@@$w9Ufyb<~B(_~yFnz)FJ4F53fVx#lxny{AW zCW#B~GLHtb#EV(d2%-CCn@r$1%MLCK_%Lt0HTZy~GiInV{DA;To54s>K4^rY|+xBDL6~wqC zDvl99X6+6TiejRpjGQV&yT`(Q3aeDJn^PTcfDPeOTq#ZlEHsxlt&SCcGBxd%N_nzP z58Ud~SRFre;$nqL;wzi&qBw4^WXtmZETJ^z1&Pblj3=NVE*xcTe)4(h0GCmOwm7*K zgzLA>WDRfZ0p_5zcSpV#Lmu+ZwIxzl2Jf^%aMfd|>OSl{$gpomQL5AXO9Gwz9kM^R}&6njgR>o5=am-Rbq`JeU^{Z!opr_l&brM;xlN1o%6y?zuomtjqprGQhXT1*PF6-qkv zT9m}aW29?vcTQSdAbj2ikrM}-Jfj{A&*l&7Dzu=ygeW>azBuY<8LHc1fr}ki>>oy1 zl~Dt^##{PW|kAKc0C8c}4ol%LS zj7E7ng}7wIXv$h0GW)&|N*IXl>ydO|e+`?*CDVJPbO=we@;SI^IklYbx{cNI=|*+P zIT7)WItRBsOFzCY>;9}kX!04kDi7oP_;;ddqz%GRuN074@GX??GZR1x+VwO_%|TuZ z8;Yg*BMCu1vB$_J2s_&Kg?)*uOWdjVO)ximtl_SDW#>61qS1T%pQEz%s#5v;BsOFMQj?lhV zqI3PdH-Jn7tpEchQ?c8@yjDjJ;Y(^FZ+*kpX9yBmp}hE$N^^pR)pfq|XJ;z#%cw+) z5*az1#InD@9V@)5CvC!c$-Yr6T{n+f>PN zC`a@j-cVEwSfMPb)ph1^BXjIZ&CjV~;yrZslrLC^o6aCaA14J~X|j(fHnF zeb5Zmh{P4S29qBq@}s}bS?xiYY)e}5pYt?`@s8~0$wTHGdm4(}g%zaT2Z=)J7ud!^ z1unM_+{FZ%CYTf%u%50T9-GGoIlDdK7uG5dLq(Xvr}ZOr>rM{CK(UHW?|_o5>U}K+ zGKav%we~F+vZ#_FPCi@!09=**Q)E1_MCItr8cBDl=0BrFYeuU2pG>jxk`*XNUUL}Q4Isr;3sB6$&Pc)a zYW+I9epGvXRijoMADRQ}89Ov*BNLc}Vk;06<$Q0xD8deqHs7s*50xvzp$K@sAM_u> zt2jz`gr2+HJjymQt;-8SHefZkp2Hm%7h+S%V0oxMVP0Y&p!QQT?*`V~YC#pVf^N?9 zauM?+yj7GS8>X7EPU+8vIgneHf*bKEU?Og%5o;+zFanfCZN7KhjBWZ*xI8mD{WA0A zMV5y|zHARn(?yTZ$rBfvYwoxdD}S6D7--HvyplZB2nD;?x+`H<<@tSzKBW=yBU+9z z?UIZns(U5%V|LUm81}QC;L~dSHYpAfeI5HJ6i$X4q{Q%071eqWtFZaqr#{magqIY6 z?6wL*{Ks7iT|uagD&xtk5F4L!^f*yFbDY)r`I@txp~4uhSxvI!N)A{q{2t^0VK=;` z--Ye=Gu|8CbboA<&`!METTCdH(W!N7ny>0>Vr%|Y=wr&g*Z80=?wYU;{{^wF*pY6= zcY?Y(0;!*yEuP{U8CZ{L9a^cS_btPN+@f*AnRmIUy~MiV;-7~-cMMWiPA`vAavTkr-?ou>XLmSlZrO=EO^En z?y2@P^x$SBc9Nd-GF^5U_{lVjKO{eR@hb4VFb(Un&yBcCW_;+DMjj3nEHQVIGQhV& z*ih0kF#F0*Y4t7FWa73(&ZU|%isH?+|1sashJjYbiQR$J>Pv5Vb*l9tDu0cZHX6K} zRVKBXkJk3ES)*Rb#I&(D()m-SE*#0md=Hg;L4ABZNzhb+v=bZ&&nQS$x_D^0qx%$9 zay%Pk2R9biV-B|+f^pIXs!vF14n@_WqhgT>1?vUch_||$1H*4;#Y;=M7{j|2pDd|Y z&9>H8x51Ye)iGL*#>OH)!aspwT-?#E>qMLTatsZ$r3K}hy#xQ}jtQFQ7!GLVr>;fS zh}CwqJ!RQ=)B*w@GtqgViN1v`{|AWrx3&#=E*Ay78j?h~RnjgPi>idm{OqVnh@wTK zXMD1OLzeO{wr_=V@RH9rZ*+i%$k19vPh@XC`8#!x^K+)9P(qb*$kmn=qM<L~Z_G zopaD1)IMMF>=1nTIk|y>tb|1C-lm~ew@}fq>b+j%2Vn?d%y^wPVNO6Z?~+WAP!scP z`t+;s`p0vn<~#ya#N<~pp=T8?5O3Hj6;o0$`%L9#6Jk$*%10xN_0Q$n(^Oa5I$hb` z#n0ok>Eg^DFXBB*Zr@y>knfZ;#avnurG>$Gn^t;=DE1IR-`2N#E=g-fj zoIeU3!%L`)i46j=`5^XFxoL!AfTWMkuz@0}`e9-kg=U0RMlO`I3CiCeS>QQhg+>v2 z)n}!BDQ##@9mB_k9z5Ge|43v#nWXBvIGsP}~ z*%y>HU^HLY?g0FMC6Oci$Y(x|_uL^EzsCs{{Egh={$8FW0;%M*dJy7-5+yqclE0qD z^3`1Ml_gx3t*rW%Dq8tbw`7oe63=0A+6rE>8MCVOqz;PDi!Ik#4UP={W;@mzy4rqC zygm?!oiG#}Qlw=?ksHUyL@ERhI#9ZG7Jt?NeYQ3@irfM~B$Vl~otD||uU9>CL5)JtJB>$JR+rN`!^RFqR{j#+l+M`d zrrDdJ{C=XA+8$0?OATdHcl~@Da`e7&)G^Wyq30CUvfv^mb0p?3J9Y%H!r={_RBOU==aHje}wG1(66{bd~xU&7X&@a{BfDjBCT+$BS!ontj6|3ah&tAzDN*~&I*e# zS%@9`0}eM}8J}%tStXkX#Mf%fCbiIsNf^fd-H~jmY^fMyIn_(^jb`SqDxgL796|jz zsHGwZSCHH-!gcCJmAiu=(lZ+!8;l1&GhDRu5>QtH|3OIS9Hdc+{I0=HR{dkS6A9X; zjo%Rayn5Lq?)4h+=z2ufw)LTPx?)w!Vf)(*y?2yh2jigiypby_>7o1NV`o@sPrjjX z><@Dkj-qiKS2T8akX5fpC!K#;D4d&B;@pM(tCulNH4ekKG&2b+MybG>yB7tuqUu5X z@rSO2=IMC)=p6frnK7W=2u+tOoMMU}NHof-eQAl`orU`DQ$Y&Kp{ohjxfr?Zv10kp zo2bn?J#Geic_=+bW2s|Xa=a`MbK89{Qw`g=3OVyiksWrH$y_E?NJD#Z-Lrs%SA&?0 z@qy_lB6oaMo0>_ws4Za!d;=t(ZZf@n#$iCu#aeEjAQDrT3%_8NUQtIPP-ZR?OE4*@ zJqRBxTHbCPbNFKn)08xp&v>1Y(&N&rH=?+#25;KSpCeFo0hfKr8Y5G_gD@RdHnLru zn$uQujhh0?{1u0X z-~ryUxbSM|I^I@BzoE;MnAPJ1`|?Yc8;nWry|9=QB8#X*lJ)wDs}=isv!K($*g($? zj6_Da+s&@z#%0r30Ag$nHk({N{e>r|$Ezv>tDNy!zmRq@JnFmsbi0uDWg(RokUwh9 zyUzEaCiPBoae#6S@a zr}T%OpDkZ4AL{frjf^3FwBOs%!&yElHV0yYbOe-NtS95OvY{-G@G?aE?;|$0a^(lv zp(RAcX$}K3T|CiRiF-HNLrd$@y4?kdghnsylDK54=?Ey^-5&chpS`lDd98o-iW%W# z&W8+wu17glS`)nrPeugkk;a)Smz;ujs%kzLD-oT*?SMaS4x4&?jnM27&=da1T@~-Z z!CXwa`SQ`hz2H)8#y0sO%pjRnOheD9!_+4?MUCwiSrk)irk=R>EciUHaJy37?toSr z0l`ZOll1M(#8sTQB4Heush6|m(s?srckQN=Y(kHnA{Su9FVt2Fo%t6t33753@LBac zGHSTO{lK{!R{Yry&DVTfox8@OMFrJI|o0%5CSb1P|_X2(&|{Q*a7rnUAZ`m@d58)-3;3dO zc(V1~mZx!3esck#9UTQE2s4QxU~i1p{^6%MkEN}S9(_Q`*Jn{mb)_s8H(kSDxf z@0hQu0wjG<+Bg$KwUp=!wiC+-2&<62=v`VS4i)s~>{){9gj2SsC_?RbFDn`wYm&z` zJ)uA{6pplJ2NHVeau&+x^!XLd2DOF1 z*_iHM_|4b_8$qeal#fk6tM)Log0uwe=(D6zjjVjLNqg;yS#Uo}LqP2HhI+iEL^nlE zX8ec~vH{-}=_vRitLf}xVp`&}C@&DyBNg^?;6SEe>E$X^viyJ7iB_s83rIuW5RLMzBG88r`D&k zgMM}x`~?^p$Oru|V?);e-Pn+To#j8}bT%pjwg(I--KW&uVUpJYhTJ3pP=xD{2^`j1 zsLdPHB&FDpNGO5L-=EKKr0v#T05N%EbIdRLH@@AqVwKjerYL-lFQ&6ICA8BN-yOW5 zZf+V%+OefKxiOS7oYT2^YgU^mR(~5lol|NqD!uBDRCI9YN-))TlbSv1Eae9Hu6&Jr z<021l>h_AZOSqg>cn=8dDsS{9c4wKjjN3#`(7u_jJ1j5A$?!N&TkET*nmGqYIWb-p z*W3}>>Mk0upX+g#Km{8csXBd+dZUVX;c0aJvbmkKmp{qU=hok5WOKV-hAg}xM(J%V zHY$D1UfUM0mOdQ0aAAxY@MzGgS-dtEKKyp@V6pu!?x;Qi?&9mg zauIZa#F~M@V|m_f*knHT4gpAq{rgoh*gBT|ph zu?n-j3(Te?;9(_1UG9(%%+8YHW!=&ulVr2YkoedoKeo!}PeoU*#6hJ{O9!i{}Y$1 zR;p#oy7+Wk67fP3oR8Pbd*c7Z#hSd*`o$FKA1;<3CtIkdi9cH{1KT<)D5pskVS1y< zr8s7vV1Zhxm&)@1$_IQ7Y_s>ys04-=nmp{=-Nu#vl;1)Yn4XT?s!spz<(Gg`x2 z1jJWYNA5E@?v700fuY&)e+*=^Jr_WxO0oI0Bz@2OaD>1QJatgZs4d@K{fK7dtmkgN znCA!H>$ez|aIS@}eAaDG)StV7)g`yZtuUOA1}dY({)apCbOEY8? zn-7C}Qas5=UDBzpmJ?*eMMm~@-4y18LDL+X(~!mmkS-VahXu3VMc-j@6ykE&ORqh5 zTwSdQmZ2;@lOQ`p!;eK}3 z51s$v=98yn>y#3ePp;z0L(*)O+o{j{RcdTA*zM`#0`;`E3BJ(FdRr)rytZ=Ynb$T8 zt!itG_vmmJOmxoU4*veJ{J*s&v;Qw);bi!K!y;q1$%W8;sa78ba6Z7wPohW%16*tj z%=rW?b9Yx8JA4EgZX(nQ|Na`+WR+!;AQZi9-@(m|6Y7@d%Qx%Y=NHTTC=#i6-_9?m zlg+x@8g_6~yX|(WgCnf3pTIH#{hLHkj_t;<>sNx7mNyqZtPKK75UncZ>`YN-DP z5Cxzz$QqJA=pKN3AK)G!$W9?(ey*iQ6nH&YJz+oK{1QM)izWuZQa{rkfIKfzsirZ; z5`*?okXVJHc0pS*C0!~i6V>c|xdCCYN;l{*O=#27rubcbzCJaWCBXa;9Y8xP0J!XI z!=JN*`m=deDO%(`K=(Y-r}Qmm_)OA#fqIR<{+dR{C9t7n@SJOU+wp($*bzQumn;)RSg4lG6uty+Gh%njXN9ol&ZHaYi*P9PNp> zAl*l6YR>OEGhFh%`f>ZwIQI>yj+(I!bha~$I%BY7drH42nDiBJyYeaDw=OSz?CZb1 z(?72&FMVz6zdI(spIo2(<5$1=jlV&&Kk#Iq{K@OTgvP&NWS@PE9;v0>GvaO8wyZxL zhm@T?*xMeNi%EjFs68m|#G#Bkxy?gA{yI-$cKgrwBdc*T_x!c$7kK~W%g6Em2bf`F80b0cYXw#+9{uVd%Y#I8vVUEgsyn^dJw-jeLzQjagvm zjeO%zFHRo5+q*~U|DVRL1FDH_U4uXnq$)*FB!KjmO0OclDJ^tJAOWN#gwPRC5v3}< z69kkl2vQUR0s@K%0;1Fdf*@6jv`BmLo_pRp@A&Roch=0BJ+uE^)}Hl$-}ld&JzK!_ znQe?(Yw>N5-Sr6vjvo&IEss@5{DmL3t1>Mc*XK5C)@GmcIk`Mb#S5qByf44+a`HJl zpkI^AI71AxW>Gc*MYBu^yj)kTyX-Xj zPzr{m1X|WJj18y)0F~CM}f^LUfLWu zb1C<7rH4ks48@J%9ZD3Bu1>NU2Kd2$KJidmT`Bvt9MU@#F|mX?UB$Vqr+u4lBRP9E z9i@iX_SN5yMyZhjQjh0J4>fBqGwKR0m7*sEMfyj0U3!@-uI4+l2;({UW3>6YBr*Y; zxBUv5(^u)lDmi;fi{L$@6uhURi*GmsW=UsIsi4mK~;*1!{n-aR-@M+MBQSVaAfb#<=oGx zjL|gdBJ*06?jPUiGuy1MXdl}OR@``==z^=8{QRhu^ik9J-OabJ(+?Urs>Rz1!#R;9W=Zpk2*iMrm4KjI;tJ9 zeb#Z>A?#?BpZ<2z7Cp}h9X?#;R}t24O3-4Isc0Kp4NDkSE_1sit{=J)oi}< zLdaVN3UoFUa520sf4M4~b&^gW$TQkz@0H}&wK?~!?H52RwB6XSulcyoaLa^y zKl%3iI28NXlb=}W$9xl0RK|R%rkmH59IyKEG+T>^;g-%=TSPOOBd*{ z9J-=nZpCB}SZBDni@8w$0J#@!2dG?;gd4S$q0DiQ={dSilUyMHU>so<{dxr;G??)Ff zNp})T2E$r-67varp9gxf_;&|Nt1z z$VDY>Z%u0te9d=N062&Mj3 zvbH3xcr&EUsC%pES#d$cEMf9{5BBk)M3i3ih{E#&zVTn8HVq#xyjRP2Z>ggs4A6X~ z96!2S@{aRpuNlTltKQ0Q>bnPOU5)E1%snxXe^9cNrw}f_p#DUFB2nvGPV}UwLR9!! zurxr>Y))p~A5Zq)%cTD>4Q(M)m=IOAXbY(hSARW+9PaUewsxZgUf0@JetsvI z*m?3@yj@^G7BMYDjhYKyc63q)hmDomE zM@?wx+p*Q1Zv2>AE4HH1$NtQq0R+6~IZCz6LW8$&(A3k=dl-z8##Q*7Gc<2YM8rCW zm)%canY~Cm(D7LzxZN$@Hn~ZER^&xJi>`DqtJ2quY(KL~WIVP0yP^W?o`9|DvBa0P zNM!}Xg(U{4sp+xwL4ocV7(m~p(#(<>9MX|}aHTDFzgUyXuKP{(MeKQed3MH`L(UOP z02HCZuNAD6s4W|t@#2~bfTt@nPQDIG8L`VD^gd}W{I%u|*RM;-Hc>Y&cTkfh z7hXN^m4PW0Sx7f9KhMn zx5qOst0NMz)J37xXhRr5=q_!s-hP3T>DB z$dWj}jKe4ot3+7uW)@@a-MN>y2=@+dfQUcU4_4`X3#=)h=LV(KcOYyeTl#Ik7=L11 zueI7&^1O?!wF)BEcLdr<9xgxOyo@fkuRRuX4bRiQTq-5_s8{eTfh)4UVHl^SPhicPjW-W?83LV`SN z`>z#)fM6(vqGfeFya;GK50nS1hI}Myo75#hzMe z4s#&TY9j{u<{$b?ib)SSkx+-ON<8@^e!D!&$JCU6+^^&^e@N4rtit&7rt!BUA8h)u8nDK(!!BAP0|w3q)lNSW>~ z`tr3FQ>(F3@?GVRp27LU5FG-#SQ6JCK?*ur;=ujXeH7C@28TK0ITOOq8c!y6d+7|X zlN8kzBL3eoz6?;v-zrHKJWdE@N_PY zNq``D0oZt#T|dvZ?s)=LndwTR%uDZA2cMkas8XH6?utZ-TDHiw4{?##TMVj9Xy|wM z5<4-4Zv@Xfiz=^)IZ3`13{_q&zy5N_X*?Ml^OP^x+E7|iZOmrYK$Qi{9VK(lVT6~X z*tt71(PPQ>Ve4}-+6Alnc=of8$1omw(u7n!?H&$UQ@tX2(T6)y^kqhQ%hng;lRHFs zQnex@WDOU$M*kST*gQ3i_ij`?hs>WpXTk_8)ixJTy)0zin1C{|CAgxQIq#pz90Uq- z(vLzm&N(}AND7@J=n8Y;fXW@=Bq!H;;<`Z{r+Ps(McCWqdE%}{;ImW zo7oC=`n;@wY?7Ghcu8{pJTMP3HCEFNo-1sRF`pup=^{P{F|$-F^euMmLDj#ihIdP< zeTARgar(jD_Qgf*OJG>9TUh@(dHV1RclVM-vAc3|2j8Rx0*cuZXlzy{$7*@?C^Zzh zBT||ehr{LG9MKo$WT)8yNc@P#opFaPcgLyt=&4Okugws-YV5%9TC;DqoCzlU`cQjo zqk?7Qd_@j9fGjgTSDmy6K8fr*I25X__E}$Es^oRrl66KYyvT|dz+xrC8649qjk@7M z!CaC}!5FKk`<|70&kPI?YM&=@a<}O-%BXvrxzDT>GwBY0F;>l;R5kh*rp9Lpo7sDO zPnMXs=sqNz7ub-OZ*N$A^WerpqgF~Dl-BKp$n?x+{e$j>UHa8hwFan#r~lN;5Uk+` zRrPl4$j+tdZ}g=rLdPuiH#Zb#31(Jp9tT76_-T$~x&ZRtQNWYhHafmt8V=YRYMDW1+T%|xRQ0%G_1O$PC0gs)Lecfk)zSXjcktAiez1zozy5I}n8YMnu+TXw3i85Cr-kNa#;8x`xDi z`vFx{C}V>4^7r=f1HylG1MTYJLZA!~_?HF-$|8VvKrj$WIUInp<{lwvN?KO`D$q_v zP97tNRz$*~7&KS`i9w=3C^Q5F1%Z%AFa+fSS5r|?#JGUaAbFUAtDL-|JQ#{`LAfH~ zU^o4kZ{R;RMn3`)PYA-JF^mwf N9HX$Xwy6%|{{U$S7{&kq literal 0 HcmV?d00001 diff --git a/src/genbench/tasks/nl_codesearch_clf/__init__.py b/src/genbench/tasks/nl_codesearch_clf/__init__.py new file mode 100644 index 0000000..b8d3157 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/__init__.py @@ -0,0 +1,5 @@ +from genbench import TaskDict + + +class NlCodesearchClf(TaskDict): + pass diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet new file mode 100644 index 0000000..09feac6 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/config.jsonnet @@ -0,0 +1,58 @@ +{ + name: 'Natural Language Codesearch Classification (codesearchnet_adv)', + + description: 'Natural Language Codesearch Classification (codesearchnet_adv) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness against covariate shifts', + + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'python', + 'robustness', + 'covariate shift', + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', + 'Florian Sihler', + 'Ansgar Scherp', + ], + + data_source: { + type: 'manual', + test: 'https://zenodo.org/record/8310891/files/test_adv.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multiple_choice', + + evaluation_metrics: [ + { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, + ], + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Given a code comment and a Python programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as comment [CODESPLIT] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/doc.md new file mode 100644 index 0000000..8193db4 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (codesearchnet_adv) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_adv).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (codesearchnet_adv).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_adv).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_adv) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/task.py new file mode 100644 index 0000000..4e77608 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_adv/task.py @@ -0,0 +1,46 @@ +import random +from typing import Dict + +import datasets + +from genbench import Task + + +class NlCodesearchClfCodesearchnetAdv(Task): + def get_dataset_raw(self) -> Dict[str, datasets.Dataset]: + """Create the dataset adding a negative sample for each code comment/query + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + for split, dataset in raw_datasets.items(): + if split == "test" or split == "train": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select other item + random_item = random.sample(other_items, 1) + # Split input into comment and code + input_parts = item["input"].split("[CODESPLIT]") + # Split random input into comment and code + random_input_parts = random_item[0]["input"].split("[CODESPLIT]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add negative sample comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet new file mode 100644 index 0000000..01715cb --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/config.jsonnet @@ -0,0 +1,56 @@ +{ + name: 'Natural Language Codesearch Classification (codesearchnet_go)', + + description: 'Natural Language Codesearch Classification (codesearchnet_go) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', + + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'go', + 'cross-lingual' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', + 'Florian Sihler', + 'Ansgar Scherp', + ], + + data_source: { + type: 'manual', + test: 'https://zenodo.org/record/8310891/files/test_go.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multiple_choice', + + evaluation_metrics: [ + { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, + ], + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Given a code comment and a Go programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as comment [CODESPLIT] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/doc.md new file mode 100644 index 0000000..aa3720e --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (codesearchnet_go) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_go).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (codesearchnet_go).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_go).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_go) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/task.py new file mode 100644 index 0000000..9b880ec --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_go/task.py @@ -0,0 +1,46 @@ +import random +from typing import Dict + +import datasets + +from genbench import Task + + +class NlCodesearchClfCodesearchnetGo(Task): + def get_dataset_raw(self) -> Dict[str, datasets.Dataset]: + """Create the dataset adding a negative sample for each code comment/query + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select other item + random_item = random.sample(other_items, 1) + # Split input into comment and code + input_parts = item["input"].split("[CODESPLIT]") + # Split random input into comment and code + random_input_parts = random_item[0]["input"].split("[CODESPLIT]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add negative sample comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet new file mode 100644 index 0000000..1ea6599 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/config.jsonnet @@ -0,0 +1,56 @@ +{ + name: 'Natural Language Codesearch Classification (codesearchnet_java)', + + description: 'Natural Language Codesearch Classification (codesearchnet_java) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', + + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'java', + 'cross-lingual' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', + 'Florian Sihler', + 'Ansgar Scherp', + ], + + data_source: { + type: 'manual', + test: 'https://zenodo.org/record/8310891/files/test_java.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multiple_choice', + + evaluation_metrics: [ + { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, + ], + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Given a code comment and a Java programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as comment [CODESPLIT] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/doc.md new file mode 100644 index 0000000..16abaa2 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (codesearchnet_java) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_java).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (codesearchnet_java).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_java).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_java) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/task.py new file mode 100644 index 0000000..292e74c --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_java/task.py @@ -0,0 +1,46 @@ +import random +from typing import Dict + +import datasets + +from genbench import Task + + +class NlCodesearchClfCodesearchnetJava(Task): + def get_dataset_raw(self) -> Dict[str, datasets.Dataset]: + """Create the dataset adding a negative sample for each code comment/query + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select other item + random_item = random.sample(other_items, 1) + # Split input into comment and code + input_parts = item["input"].split("[CODESPLIT]") + # Split random input into comment and code + random_input_parts = random_item[0]["input"].split("[CODESPLIT]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add negative sample comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet new file mode 100644 index 0000000..f61ade9 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/config.jsonnet @@ -0,0 +1,56 @@ +{ + name: 'Natural Language Codesearch Classification (codesearchnet_javascript)', + + description: 'Natural Language Codesearch Classification (codesearchnet_javascript) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', + + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'javascript', + 'cross-lingual' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', + 'Florian Sihler', + 'Ansgar Scherp', + ], + + data_source: { + type: 'manual', + test: 'https://zenodo.org/record/8310891/files/test_javascript.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multiple_choice', + + evaluation_metrics: [ + { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, + ], + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Given a code comment and an Javascript programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as comment [CODESPLIT] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/doc.md new file mode 100644 index 0000000..86806bc --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (codesearchnet_javascript) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_javascript).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (codesearchnet_javascript).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_javascript).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_javascript) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/task.py new file mode 100644 index 0000000..5e201a4 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_javascript/task.py @@ -0,0 +1,46 @@ +import random +from typing import Dict + +import datasets + +from genbench import Task + + +class NlCodesearchClfCodesearchnetJavascript(Task): + def get_dataset_raw(self) -> Dict[str, datasets.Dataset]: + """Create the dataset adding a negative sample for each code comment/query + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select other item + random_item = random.sample(other_items, 1) + # Split input into comment and code + input_parts = item["input"].split("[CODESPLIT]") + # Split random input into comment and code + random_input_parts = random_item[0]["input"].split("[CODESPLIT]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add negative sample comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet new file mode 100644 index 0000000..c4f0b9d --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/config.jsonnet @@ -0,0 +1,55 @@ +{ + name: 'Natural Language Codesearch Classification (codesearchnet_php)', + + description: 'Natural Language Codesearch Classification (codesearchnet_php) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', + + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'php', + 'cross-lingual' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', + 'Florian Sihler', + 'Ansgar Scherp', + ], + + data_source: { + type: 'manual', + test: 'https://zenodo.org/record/8310891/files/test_php.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', + }, + has_validation_set: false, + has_train_set: true, + + task_type: 'multiple_choice', + + evaluation_metrics: [ + { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, + ], + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Given a code comment and a PHP programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as comment [CODESPLIT] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/doc.md new file mode 100644 index 0000000..024058f --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (codesearchnet_php) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_php).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (codesearchnet_php).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_php).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_php) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/task.py new file mode 100644 index 0000000..1378ff0 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_php/task.py @@ -0,0 +1,46 @@ +import random +from typing import Dict + +import datasets + +from genbench import Task + + +class NlCodesearchClfCodesearchnetPhp(Task): + def get_dataset_raw(self) -> Dict[str, datasets.Dataset]: + """Create the dataset adding a negative sample for each code comment/query + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select other item + random_item = random.sample(other_items, 1) + # Split input into comment and code + input_parts = item["input"].split("[CODESPLIT]") + # Split random input into comment and code + random_input_parts = random_item[0]["input"].split("[CODESPLIT]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add negative sample comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/__init__.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet new file mode 100644 index 0000000..98d7a1e --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/config.jsonnet @@ -0,0 +1,56 @@ +{ + name: 'Natural Language Codesearch Classification (codesearchnet_ruby)', + + description: 'Natural Language Codesearch Classification (codesearchnet_ruby) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual generalization', + + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'ruby', + 'cross-lingual' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', + 'Florian Sihler', + 'Ansgar Scherp', + ], + + data_source: { + type: 'manual', + test: 'https://zenodo.org/record/8310891/files/test_ruby.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multiple_choice', + + evaluation_metrics: [ + { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, + ], + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Given a code comment and a Ruby programming language code snippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as comment [CODESPLIT] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/doc.md b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/doc.md new file mode 100644 index 0000000..012e885 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (codesearchnet_ruby) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (codesearchnet_ruby).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (codesearchnet_ruby).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (codesearchnet_ruby).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (codesearchnet_ruby) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/task.py b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/task.py new file mode 100644 index 0000000..7f4db9b --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/codesearchnet_ruby/task.py @@ -0,0 +1,46 @@ +import random +from typing import Dict + +import datasets + +from genbench import Task + + +class NlCodesearchClfCodesearchnetRuby(Task): + def get_dataset_raw(self) -> Dict[str, datasets.Dataset]: + """Create the dataset adding a negative sample for each code comment/query + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select other item + random_item = random.sample(other_items, 1) + # Split input into comment and code + input_parts = item["input"].split("[CODESPLIT]") + # Split random input into comment and code + random_input_parts = random_item[0]["input"].split("[CODESPLIT]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add negative sample comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output diff --git a/src/genbench/tasks/nl_codesearch_clf/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/config.jsonnet new file mode 100644 index 0000000..3881142 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/config.jsonnet @@ -0,0 +1,35 @@ +{ + name: 'Natural Language Codesearch Classification', + + // @TODO: Add a description of the task + description: 'Natural Language Codesearch Classification aims to measure the generalization capabilites of language models in code understanding using binary classification as an evaluation task. It includes multiple subtasks to measure three different types of generalizations', + + // @TODO: Add a list of keywords that describe the task + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', + 'Florian Sihler', + 'Ansgar Scherp', + + ], + + subtasks_order: [ + 'codesearchnet_adv', + 'cosqa', + 'codesearchnet_ruby', + 'codesearchnet_go', + 'codesearchnet_java', + 'codesearchnet_javascript', + 'codesearchnet_php', + 'statcodesearch', + + ], +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/cosqa/__init__.py b/src/genbench/tasks/nl_codesearch_clf/cosqa/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/cosqa/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/cosqa/config.jsonnet new file mode 100644 index 0000000..5e20f63 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/cosqa/config.jsonnet @@ -0,0 +1,57 @@ +{ + name: 'Natural Language Codesearch Classification (cosqa)', + + description: 'Natural Language Codesearch Classification (cosqa) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures robustness against covariate shifts', + + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'python', + 'robustness', + 'covariate shift' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', + 'Florian Sihler', + 'Ansgar Scherp', + ], + + data_source: { + type: 'manual', + test: 'https://zenodo.org/record/8310891/files/test_cosqa.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multiple_choice', + + evaluation_metrics: [ + { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, + ], + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Given a search query and a Python programming language code snippet, determine if the query accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as comment [CODESPLIT] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/cosqa/doc.md b/src/genbench/tasks/nl_codesearch_clf/cosqa/doc.md new file mode 100644 index 0000000..8973fdb --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/cosqa/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (webquery) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (webquery).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (webquery).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (webquery).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (webquery) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/cosqa/task.py b/src/genbench/tasks/nl_codesearch_clf/cosqa/task.py new file mode 100644 index 0000000..7d1c292 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/cosqa/task.py @@ -0,0 +1,46 @@ +import random +from typing import Dict + +import datasets + +from genbench import Task + + +class NlCodesearchClfCosqa(Task): + def get_dataset_raw(self) -> Dict[str, datasets.Dataset]: + """Create the dataset adding a negative sample for each code comment/query + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select other item + random_item = random.sample(other_items, 1) + # Split input into comment and code + input_parts = item["input"].split("[CODESPLIT]") + # Split random input into comment and code + random_input_parts = random_item[0]["input"].split("[CODESPLIT]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add negative sample comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output diff --git a/src/genbench/tasks/nl_codesearch_clf/doc.md b/src/genbench/tasks/nl_codesearch_clf/doc.md new file mode 100644 index 0000000..18fc30b --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/doc.md @@ -0,0 +1,43 @@ +## Motivation +Language models can serve as a valuable tool for software developers to increase productivity. Large generative models can be used for code generation and code completion, while smaller encoder-only models are capable of performing code search tasks using natural language queries. These capabilities are heavily influenced by the quality and diversity of the available training data. Source code datasets used for training usually focus on the most popular languages and testing is mostly conducted on the same distributions, often overlooking low resource programming languages. Motivated by the NLP generalisation taxonomy proposed by Hupkes et. al., we propose a new benchmark dataset called [placeholder] which builds upon existing natural language code search datasets to systemically study the code understanding generalization capabilities of language models. For evaluation and comparison, we collect several baseline results using fine-tuned BERT-style models and GPT-style large language models in a zero-shot setting. + +## Examples +Given a natural language comment or search query, determine if a given code snippet matches the function of the code. + +**match**: {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def set_sampled_topics ( self , sampled_topics ) : assert sampled_topics . dtype == np . int and len ( sampled_topics . shape ) <= 2 if len ( sampled_topics . shape ) == 1 : self . sampled_topics = sampled_topics . reshape ( 1 , sampled_topics . shape [ 0 ] ) else : self . sampled_topics = sampled_topics self . samples = self . sampled_topics . shape [ 0 ] self . tt = self . tt_comp ( self . sampled_topics ) self . dt = self . dt_comp ( self . sampled_topics )", "target": 1, "target_options": ["no_match", "match"]} \ +**no_match**: {"input": "Allocate sampled topics to the documents rather than estimate them . Automatically generate term - topic and document - topic matrices . [SEP] def _resolve_entity ( mo ) : ent = mo . group ( \"entity\" ) s = mo . group ( ) if s . startswith ( '&#' ) : if s [ 2 ] in 'xX' : radix = 16 else : radix = 10 try : num = int ( ent , radix ) except ( ValueError , OverflowError ) : return u'' else : num = name2codepoint . get ( ent ) if num is None or num < 0 : # unknown entity -> ignore return u'' try : return unichr ( num ) except ValueError : return u''", "target": 0, "target_options": ["no_match", "match"]} + +## Data Source +**CodeSearchNet** : original dataset first published in https://arxiv.org/pdf/1909.09436.pdf , Java, Javascript, Go, Ruby, PHP subsets collected from huggingface-hub \ +**CodeSearchNet Adv** : a processed version of the CodeSearchNet Python dataset, introduced in the CodeXGLUE benchmark suite https://github.com/microsoft/CodeXGLUE \ +**CoSQA** : Python codesnippets from the CodeSearchNet dataset paired with real world user search engine queries, introduced in https://arxiv.org/pdf/2105.13239.pdf \ +**StatCodeSearch** : R code-comment pair snippets, scraped and extracted from public project on the Open Science Framework (OSF) by the submission authors + +For each comment in each subset we sampled randomly another code snippet from given subset, to create a fully balanced binary classification dataset. \ +For the dataset statistics we only consider the positive (matching) pairs. \ + +**Dataset Size**:\ +*Finetuning set:* \ + -CodeSearchNet Adv train set 251820 \ +*Test sets:* \ + -CodeSearchNet Adv test set 19210 \ + -CoSQA 10293\ + -CodeSearchNet Ruby 2279\ + -CodeSearchNet Go 14291\ + -CodeSearchNet Java 26909\ + -CodeSearchNet Javascript 6483\ + -CodeSearchNet PHP 29391\ + -StatCodeSearch 1070 \ + -Combined test set 109926 +## Limitations and Bias +TBD + +## Citation +TBD + +## Further References +Husain, H., Wu, H. H., Gazit, T., Allamanis, M., & Brockschmidt, M. (2019). Codesearchnet challenge: Evaluating the state of semantic code search. arXiv preprint arXiv:1909.09436. + +Lu, S., Guo, D., Ren, S., Huang, J., Svyatkovskiy, A., Blanco, A., Shujie, L. I. U. (2021, June). CodeXGLUE: A Machine Learning Benchmark Dataset for Code Understanding and Generation. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1). + +Huang J., Tang D., Shou L., Gong M., Xu K., Jiang D., Zhou M., Duan N. (2021) CoSQA: 20,000+ web queries for code search and question answering. In Proceedings of the 59th Annual Meeting of Association of Computational Linguistics and the 11th Internationaal Joint Conference on Natural Language Processing. \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/requirements-usage-example.txt b/src/genbench/tasks/nl_codesearch_clf/requirements-usage-example.txt new file mode 100644 index 0000000..b9e2d8a --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/requirements-usage-example.txt @@ -0,0 +1,5 @@ +torch v. 2.1.0 +numpy v. 1.25.1 +tqdm v. 4.65.0 +transformers v. 4.32.0 +scikit-learn v. 1.3.0 \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/__init__.py b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet new file mode 100644 index 0000000..bd6eb74 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/config.jsonnet @@ -0,0 +1,57 @@ +{ + name: 'Natural Language Codesearch Classification (statcodesearch)', + + description: 'Natural Language Codesearch Classification (statcodesearch) aims to measure the generalization capabilites of language models in code understanding. This subtasks measures cross-lingual and domain generalization', + + keywords: [ + 'codesearch', + 'natural language query', + 'binary classification', + 'r', + 'cross-lingual', + 'domain-shift' + ], + + authors: [ + 'Andor Diera', + 'Abdelhalim Dahou', + 'Lukas Galke', + 'Fabian Karl', + 'Florian Sihler', + 'Ansgar Scherp', + ], + + data_source: { + type: 'manual', + test: 'https://zenodo.org/record/8310891/files/test_statcodesearch.jsonl', + train:'https://zenodo.org/record/8310891/files/train_adv.jsonl', + }, + + has_validation_set: false, + has_train_set: true, + + task_type: 'multiple_choice', + + evaluation_metrics: [ + { + hf_id: 'accuracy', + git_commit_sha: '34d6add55811828baef83e0d7c6826e2193f7b6a', + best_score: 1.0, + }, + ], + + preparation_strategies: { + finetuning: { + objective: 'maximum_likelihood', + }, + prompt_based_testing: { + prompt_builder: { + instruction_zero_shot: 'Given a code comment and a R programming language codesnippet, determine if the comment accurately represents the function of the code. Respond with True if the code matches the comment and False if it does not. The input format is defined as comment [CODESPLIT] code', + input_prefix: '', + output_prefix: '', + choices_prefix: '', + append_choices_to_input: false, + } + }, + }, +} \ No newline at end of file diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/doc.md b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/doc.md new file mode 100644 index 0000000..0826a5c --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/doc.md @@ -0,0 +1,19 @@ +# Natural Language Codesearch Classification (statcodesearch) + +## Abstract +*Copy the abstract of your accompanying paper for this task here Natural Language Codesearch Classification (statcodesearch).* + +## Examples +*Give some examples of the Natural Language Codesearch Classification (statcodesearch).* + +## Usage +*Describe how to load your task and what is required for evaluation, if anything.* + +## Data Source +*Describe the data source for this Natural Language Codesearch Classification (statcodesearch).* + +## Limitations and Bias +*Note any known limitations or biases that the Natural Language Codesearch Classification (statcodesearch) has, with links and references if possible.* + +## GenBench Eval card +*Describe what kind of generalisation your task is evaluating, and include a [genbench eval card](https://genbench.org/eval_cards/) for your task*. diff --git a/src/genbench/tasks/nl_codesearch_clf/statcodesearch/task.py b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/task.py new file mode 100644 index 0000000..5134760 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/statcodesearch/task.py @@ -0,0 +1,46 @@ +import random +from typing import Dict + +import datasets + +from genbench import Task + + +class NlCodesearchClfStatcodesearch(Task): + def get_dataset_raw(self) -> Dict[str, datasets.Dataset]: + """Create the dataset adding a negative sample for each code comment/query + + Returns: + A dictionary containing key-value pairs for the raw datasets. + The keys are strings representing the name of the dataset split + (e.g., "train", "validation", "test") and the values are + HuggingFace `datasets.Dataset` objects containing the original pair and the distractors for the test split. + The train split only contains the original dataset. + """ + # Load the raw datasets + raw_datasets: Dict[str, datasets.Dataset] = self._load_data_source() + output: Dict[str, datasets.Dataset] = {} + # Set random seed for consistency + random.seed(42) + for split, dataset in raw_datasets.items(): + if split == "test": + new_dataset = datasets.Dataset.from_dict({}) + for item in dataset: + # Add comment-code pair to new dataset + new_dataset = new_dataset.add_item(item) + other_items = [other_item for other_item in dataset if other_item != item] + # Randomly select other item + random_item = random.sample(other_items, 1) + # Split input into comment and code + input_parts = item["input"].split("[CODESPLIT]") + # Split random input into comment and code + random_input_parts = random_item[0]["input"].split("[CODESPLIT]") + # Combine the "input" fields of the original and random items + new_input = input_parts[0] + "[CODESPLIT]" + random_input_parts[1] + new_item = {"input": new_input, "target": 0, "target_options": item["target_options"]} + # Add negative sample comment-code pair to new dataset + new_dataset = new_dataset.add_item(new_item) + output[split] = new_dataset + else: + output[split] = dataset + return output diff --git a/src/genbench/tasks/nl_codesearch_clf/usage_example.py b/src/genbench/tasks/nl_codesearch_clf/usage_example.py new file mode 100644 index 0000000..9641473 --- /dev/null +++ b/src/genbench/tasks/nl_codesearch_clf/usage_example.py @@ -0,0 +1,331 @@ +import argparse +import json +import logging + +import torch +from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score +from torch.optim import AdamW +from torch.utils.data import DataLoader +from tqdm import tqdm +from transformers import AutoModelForSequenceClassification, AutoTokenizer, PreTrainedModel, get_scheduler + +from genbench import load_task + + +########################################################## +# Data Loadig Utils +########################################################## +class Dataset(torch.utils.data.Dataset): + def __init__(self, features): + self.features = features + + def __getitem__(self, index): + return self.features[index] + + def __len__(self): + return len(self.features) + + +def _truncate_seq_pair(tokens_a, tokens_b, max_length): + """Truncates a sequence pair in place to the maximum length.""" + + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + + +def _convert_examples_to_features( + comments, + codes, + labels, + max_seq_length, + tokenizer, + cls_token="[CLS]", + sep_token="[SEP]", + pad_token=0, + eos_token="", + sequence_a_segment_id=0, + sequence_b_segment_id=1, + cls_token_segment_id=1, + pad_token_segment_id=0, + mask_padding_with_zero=True, +): + features = [] + for ex_index, (comment, code, label) in enumerate(zip(comments, codes, labels)): + # As was done in CodeBERT + tokens_comment = tokenizer.tokenize(comment)[:50] + tokens_code = tokenizer.tokenize(code) + + # update max_seq_length to account for [CLS], [SEP], [SEP] tokens (-3) + n_special_tokens = 3 + if cls_token is None: + n_special_tokens -= 1 + s_max_seq_length = max_seq_length - n_special_tokens + _truncate_seq_pair(tokens_comment, tokens_code, s_max_seq_length) + + # change sep for eos if no sep_token + if sep_token is None: + sep_token = eos_token + + # [SEP] inbetween and at the end + tokens = tokens_comment + [sep_token] + tokens_code + [sep_token] + # CLS at the beginning + if cls_token is not None: + tokens = [cls_token] + tokens + + input_ids = tokenizer.convert_tokens_to_ids(tokens) + + # 1 for tokens, 0 for padding + input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # padding with 0 up to max_seq_length + padding_length = max_seq_length - len(input_ids) + input_ids = input_ids + ([pad_token] * padding_length) + input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + + # check + assert len(input_ids) == max_seq_length + assert len(input_mask) == max_seq_length + + # convert to tensors + input_ids = torch.tensor(input_ids, dtype=torch.long) + input_mask = torch.tensor(input_mask, dtype=torch.long) + label = torch.tensor(label, dtype=torch.long) + + features.append({"input_ids": input_ids, "attention_mask": input_mask, "labels": label}) + return features + + +def load_data(tokenizer, batch_size, seq_len, train_file, is_train): + # create dataset + comments = [] + codes = [] + labels = [] + skipped = 0 + + is_sep_token_set = tokenizer.sep_token is not None + is_cls_token_set = tokenizer.cls_token is not None + is_pad_token_set = tokenizer.pad_token is not None + is_eos_token_set = tokenizer.eos_token is not None + + for split, dataset in train_file.items(): + if is_train and split == "test": + continue + if not is_train and split == "train": + continue + for sample in dataset: + try: + input = sample["input"] + # split at [CODESPLIT] token + input = input.split("[CODESPLIT]") + if len(input) != 2: + # skip cases with more than one [SEP] token + logging.warning(f"Input contains more than one [CODESPLIT] token: {input}") + skipped += 1 + continue + # skip every sample that contains special tokens + if is_sep_token_set and (tokenizer.sep_token in input[0] or tokenizer.sep_token in input[1]): + logging.warning(f"Input contains special tokens: {input}") + skipped += 1 + continue + if is_cls_token_set and (tokenizer.cls_token in input[0] or tokenizer.cls_token in input[1]): + logging.warning(f"Input contains special tokens: {input}") + skipped += 1 + continue + if is_pad_token_set and (tokenizer.pad_token in input[0] or tokenizer.pad_token in input[1]): + logging.warning(f"Input contains special tokens: {input}") + skipped += 1 + continue + if is_eos_token_set and (tokenizer.eos_token in input[0] or tokenizer.eos_token in input[1]): + logging.warning(f"Input contains special tokens: {input}") + skipped += 1 + continue + comments.append(input[0]) + codes.append(input[1]) + labels.append(sample["target"]) + except json.JSONDecodeError as e: + print(f"Error: JSON decoding failed - {e}") + continue + logging.info(f"Skipped {skipped} samples due to special tokens") + # tokenize + features = _convert_examples_to_features( + comments, + codes, + labels, + max_seq_length=seq_len, + tokenizer=tokenizer, + cls_token=tokenizer.cls_token, + sep_token=tokenizer.sep_token, + cls_token_segment_id=tokenizer.cls_token_id, + pad_token_segment_id=tokenizer.pad_token_id, + eos_token=tokenizer.eos_token, + ) + + # Convert to Dataset + features = Dataset(features) + + return DataLoader(features, batch_size=batch_size, shuffle=True) + + +############################################################## +# Fine-tune Model +############################################################## + + +def train(model: PreTrainedModel, dataloader: DataLoader, args: argparse.Namespace): + """ + Fine-tune the model. + :param model: the pretrained model to be fine-tuned + :param dataloader: an iterable data loader + :param args: training arguments (and also some other arguments) + :return: the fine-tuned model + """ + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + device = "cpu" + model.to(device) + model.train() + + num_training_steps = args.epochs * len(dataloader) + progress_bar = tqdm(range(num_training_steps)) + + optimizer = AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay) + lr_scheduler = get_scheduler( + name="linear", + optimizer=optimizer, + num_warmup_steps=args.num_warmup_steps, + num_training_steps=num_training_steps, + ) + + for epoch in range(args.epochs): + for batch in dataloader: + batch = {k: v.to(device) for k, v in batch.items()} + outputs = model(**batch) + loss = outputs.loss + loss.backward() + + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + progress_bar.update(1) + + +########################################################### +# Evaluate Model +########################################################### + + +def clf(model, dataloader, args): + """Predict on test set.""" + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + model.to(device) + model.eval() + predictions = [] + labels = [] + logging.info("Evaluating...") + for batch in tqdm(dataloader): + batch = {k: v.to(device) for k, v in batch.items()} + with torch.no_grad(): + outputs = model(**batch) + predictions.extend(outputs.logits.argmax(-1).cpu().numpy().tolist()) + labels.extend(batch["labels"].cpu().numpy().tolist()) + + metrics = {} + # calc metrics + + # calc accuracy + accuracy = accuracy_score(labels, predictions) + metrics["accuracy"] = accuracy + + # calc precision + precision = precision_score(labels, predictions) + metrics["precision"] = precision + + # calc recall + recall = recall_score(labels, predictions) + metrics["recall"] = recall + + # calc f1 + f1 = f1_score(labels, predictions) + metrics["f1"] = f1 + + return metrics + + +############################################################## +# Run example +############################################################## + + +def main(): + """Main function.""" + # args + parser = argparse.ArgumentParser() + # parser.add_argument('--dataset', type=str, default='./codesearchnet_adv') + parser.add_argument("--model", default="roberta-base") + parser.add_argument("--epochs", type=int, default=5) + parser.add_argument("--batch_size", type=int, default=32) + parser.add_argument("--learning_rate", type=float, default=2e-5) + parser.add_argument("--weight_decay", type=float, default=0.01) + parser.add_argument("--num_warmup_steps", type=int, default=0) + parser.add_argument("--output_dir", type=str, default="models") + parser.add_argument("--seq_len", type=int, default=512, help="maximum sequence length") + # parser.add_argument("--distractors", type=int, default=99, help="number of distractors per true pair") + parser.add_argument("--log_level", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], default="INFO") + + args = parser.parse_args() + + TRAIN_FILE = load_task("nl_codesearch_clf:codesearchnet_adv").get_dataset_raw() + + # logging + logging.basicConfig(level=args.log_level) + + # load tokenizer + logging.info("Loading model...") + tokenizer = AutoTokenizer.from_pretrained(args.model) + + # load data + logging.info("Loading data...") + dataloader = load_data(tokenizer, args.batch_size, args.seq_len, TRAIN_FILE, True) + + model = AutoModelForSequenceClassification.from_pretrained(args.model) + + # train + logging.info("Training...") + # train(model, dataloader, args) + + # save model + logging.info("Saving model...") + model.save_pretrained(f"{args.output_dir}/{args.model}") + # also soave tokenizer + tokenizer.save_pretrained(f"{args.output_dir}/{args.model}") + + TEST_FILES = [ + ["codesearchnetadv", load_task("nl_codesearch_clf:codesearchnet_adv").get_dataset_raw()], + ["codesearchnet_ruby", load_task("nl_codesearch_clf:codesearchnet_ruby").get_dataset_raw()], + ["codesearchnet_go", load_task("nl_codesearch_clf:codesearchnet_go").get_dataset_raw()], + ["codesearchnet_java", load_task("nl_codesearch_clf:codesearchnet_java").get_dataset_raw()], + ["codesearchnet_javascript", load_task("nl_codesearch_clf:codesearchnet_javascript").get_dataset_raw()], + ["codesearchnet_php", load_task("nl_codesearch_clf:codesearchnet_php").get_dataset_raw()], + ["cosqa", load_task("nl_codesearch_clf:cosqa").get_dataset_raw()], + ["statcodesearch", load_task("nl_codesearch_clf:statcodesearch").get_dataset_raw()], + ] + + results = {} + for file in TEST_FILES: + logging.info(f"Evaluating on {file[0]}...") + dataloader = load_data(tokenizer, args.batch_size, args.seq_len, file[1], False) + metrics = clf(model, dataloader, args) + results[file[0]] = metrics + logging.info(f"Test results for {file[0]}: {metrics}") + + logging.info(f"Test results: {results}") + + +if __name__ == "__main__": + main() From 16c3f8046d102016ed1b5cfab635b04168e5acd2 Mon Sep 17 00:00:00 2001 From: Amirhossein Kazemnejad <2122102+kazemnejad@users.noreply.github.com> Date: Sun, 31 Dec 2023 14:02:20 -0500 Subject: [PATCH 57/57] Allow the test to pass if custom implementation is used --- tests/test_task.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_task.py b/tests/test_task.py index 3c1872b..e8ce065 100644 --- a/tests/test_task.py +++ b/tests/test_task.py @@ -68,6 +68,10 @@ def test_split_file(task_obj: Task): def test_task_config_matches_provided_sets(task_obj: Task): """Test case to verify if the task config matches the provided sets""" + class_dict = task_obj.__class__.__dict__ + if "get_datasets_raw" in class_dict and "get_prepared_datasets" in class_dict: + pytest.skip("Task has custom get_datasets_raw and prepared_dataset builder.") + datasets_raw = task_obj.get_datasets_raw() task_sets = [DatasetSplit.TEST.value]