From 33dd649de7b3081a62f174b6f1f4688f5d12ecfd Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 23 Aug 2023 12:38:49 +0200 Subject: [PATCH 01/49] chore: update update script --- direct_indexing/solr/update_solr_cores.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/direct_indexing/solr/update_solr_cores.sh b/direct_indexing/solr/update_solr_cores.sh index 21ab681bf..81664874e 100644 --- a/direct_indexing/solr/update_solr_cores.sh +++ b/direct_indexing/solr/update_solr_cores.sh @@ -31,4 +31,8 @@ docker cp ./direct_indexing/solr/cores/result/managed-schema $solr_container_id: docker cp ./direct_indexing/solr/cores/transaction/managed-schema $solr_container_id:/bitnami/solr/server/solr/transaction/conf/managed-schema.xml docker cp ./direct_indexing/solr/cores/activity/xslt $solr_container_id:/bitnami/solr/server/solr/activity/conf/ +# Ask the user if this is mounted locally, default to no. If it is, chown the files to 1001:root +read -p "Is this mounted locally? (y/N) " -n 1 -r + + echo "Done!" From 1c2034a53d249eb520b180912d0792a85d0185d6 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Thu, 2 Nov 2023 15:30:44 +0100 Subject: [PATCH 02/49] feat: added tests framework with pytest --- requirements.txt | 6 ++ setup.cfg | 4 + tests/README.md | 8 ++ tests/__init__.py | 0 tests/direct_indexing/__init__.py | 0 tests/direct_indexing/cleaning/__init__.py | 0 .../direct_indexing/cleaning/test_dataset.py | 25 +++++ .../direct_indexing/cleaning/test_metadata.py | 13 +++ .../direct_indexing/custom_fields/__init__.py | 0 .../custom_fields/models/__init__.py | 0 .../custom_fields/models/test_codelists.py | 21 ++++ .../custom_fields/models/test_currencies.py | 17 +++ .../custom_fields/test_activity_dates.py | 9 ++ .../test_add_default_hierarchy.py | 5 + .../custom_fields/test_codelists.py | 21 ++++ .../test_currency_aggregation.py | 77 +++++++++++++ .../custom_fields/test_currency_conversion.py | 25 +++++ .../custom_fields/test_custom_fields.py | 13 +++ .../custom_fields/test_dataset_metadata.py | 9 ++ .../custom_fields/test_date_quarters.py | 13 +++ .../test_document_link_category_combined.py | 5 + .../test_indexing_manytomany_relations.py | 25 +++++ .../custom_fields/test_json_dumps.py | 5 + .../test_organisation_custom_fields.py | 13 +++ .../test_policy_marker_combined.py | 5 + .../test_raise_h2_budget_data_to_h1.py | 9 ++ .../custom_fields/test_title_narrative.py | 5 + tests/direct_indexing/metadata/__init__.py | 0 .../direct_indexing/metadata/test_dataset.py | 25 +++++ .../metadata/test_publisher.py | 9 ++ tests/direct_indexing/metadata/test_util.py | 13 +++ tests/direct_indexing/processing/__init__.py | 0 .../processing/test_activity_subtypes.py | 13 +++ .../processing/test_dataset.py | 25 +++++ tests/direct_indexing/processing/test_util.py | 17 +++ tests/direct_indexing/test_direct_indexing.py | 25 +++++ tests/direct_indexing/test_tasks.py | 29 +++++ tests/direct_indexing/test_util.py | 101 ++++++++++++++++++ 38 files changed, 590 insertions(+) create mode 100644 tests/README.md create mode 100644 tests/__init__.py create mode 100644 tests/direct_indexing/__init__.py create mode 100644 tests/direct_indexing/cleaning/__init__.py create mode 100644 tests/direct_indexing/cleaning/test_dataset.py create mode 100644 tests/direct_indexing/cleaning/test_metadata.py create mode 100644 tests/direct_indexing/custom_fields/__init__.py create mode 100644 tests/direct_indexing/custom_fields/models/__init__.py create mode 100644 tests/direct_indexing/custom_fields/models/test_codelists.py create mode 100644 tests/direct_indexing/custom_fields/models/test_currencies.py create mode 100644 tests/direct_indexing/custom_fields/test_activity_dates.py create mode 100644 tests/direct_indexing/custom_fields/test_add_default_hierarchy.py create mode 100644 tests/direct_indexing/custom_fields/test_codelists.py create mode 100644 tests/direct_indexing/custom_fields/test_currency_aggregation.py create mode 100644 tests/direct_indexing/custom_fields/test_currency_conversion.py create mode 100644 tests/direct_indexing/custom_fields/test_custom_fields.py create mode 100644 tests/direct_indexing/custom_fields/test_dataset_metadata.py create mode 100644 tests/direct_indexing/custom_fields/test_date_quarters.py create mode 100644 tests/direct_indexing/custom_fields/test_document_link_category_combined.py create mode 100644 tests/direct_indexing/custom_fields/test_indexing_manytomany_relations.py create mode 100644 tests/direct_indexing/custom_fields/test_json_dumps.py create mode 100644 tests/direct_indexing/custom_fields/test_organisation_custom_fields.py create mode 100644 tests/direct_indexing/custom_fields/test_policy_marker_combined.py create mode 100644 tests/direct_indexing/custom_fields/test_raise_h2_budget_data_to_h1.py create mode 100644 tests/direct_indexing/custom_fields/test_title_narrative.py create mode 100644 tests/direct_indexing/metadata/__init__.py create mode 100644 tests/direct_indexing/metadata/test_dataset.py create mode 100644 tests/direct_indexing/metadata/test_publisher.py create mode 100644 tests/direct_indexing/metadata/test_util.py create mode 100644 tests/direct_indexing/processing/__init__.py create mode 100644 tests/direct_indexing/processing/test_activity_subtypes.py create mode 100644 tests/direct_indexing/processing/test_dataset.py create mode 100644 tests/direct_indexing/processing/test_util.py create mode 100644 tests/direct_indexing/test_direct_indexing.py create mode 100644 tests/direct_indexing/test_tasks.py create mode 100644 tests/direct_indexing/test_util.py diff --git a/requirements.txt b/requirements.txt index f36443e5c..bf83c6e49 100644 --- a/requirements.txt +++ b/requirements.txt @@ -54,6 +54,12 @@ platformdirs==3.0.0 PyYAML==6.0 virtualenv==20.19.0 +# Testing +pytest==7.4.3 +pytest-celery==0.0.0 +pytest-django==4.6.0 +pytest-mock==3.12.0 + # Working with XML Documents (legacy currency convert) lxml==4.9.3 MechanicalSoup==1.3.0 diff --git a/setup.cfg b/setup.cfg index 4a0c3d632..c55b33f29 100644 --- a/setup.cfg +++ b/setup.cfg @@ -9,3 +9,7 @@ exclude = .env, env, max-line-length = 120 + +[tool:pytest] +DJANGO_SETTINGS_MODULE = iaticloud.settings +addopts = -p no:warnings diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 000000000..de7e1ec9e --- /dev/null +++ b/tests/README.md @@ -0,0 +1,8 @@ +# Testing +We use pytest to test our project. + +## Installed dependencies +- pytest +- pytest-mock +- pytest-django +- pytest-celery diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/direct_indexing/__init__.py b/tests/direct_indexing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/direct_indexing/cleaning/__init__.py b/tests/direct_indexing/cleaning/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/direct_indexing/cleaning/test_dataset.py b/tests/direct_indexing/cleaning/test_dataset.py new file mode 100644 index 000000000..a1e85e4c6 --- /dev/null +++ b/tests/direct_indexing/cleaning/test_dataset.py @@ -0,0 +1,25 @@ +# TODO + + +def test_recursive_attribute_cleaning(): + assert True + + +def test_extract_key_value_fields(): + assert True + + +def test_extract_literal_values(): + assert True + + +def test_extract_list_values(): + assert True + + +def test_list_values(): + assert True + + +def test_extract_single_values(): + assert True diff --git a/tests/direct_indexing/cleaning/test_metadata.py b/tests/direct_indexing/cleaning/test_metadata.py new file mode 100644 index 000000000..7b0572397 --- /dev/null +++ b/tests/direct_indexing/cleaning/test_metadata.py @@ -0,0 +1,13 @@ +# TODO + + +def test_clean_dataset_metadata(): + assert True + + +def test_clean_resources(): + assert True + + +def test_clean_extras(): + assert True diff --git a/tests/direct_indexing/custom_fields/__init__.py b/tests/direct_indexing/custom_fields/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/direct_indexing/custom_fields/models/__init__.py b/tests/direct_indexing/custom_fields/models/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/direct_indexing/custom_fields/models/test_codelists.py b/tests/direct_indexing/custom_fields/models/test_codelists.py new file mode 100644 index 000000000..a14d2e1c2 --- /dev/null +++ b/tests/direct_indexing/custom_fields/models/test_codelists.py @@ -0,0 +1,21 @@ +# TODO + + +def test_codelist(): + assert True + + +def test_read_codelists(): + assert True + + +def test_get_value(): + assert True + + +def test_get_codelists(): + assert True + + +def test_download_codelists(): + assert True diff --git a/tests/direct_indexing/custom_fields/models/test_currencies.py b/tests/direct_indexing/custom_fields/models/test_currencies.py new file mode 100644 index 000000000..ab7e4ce56 --- /dev/null +++ b/tests/direct_indexing/custom_fields/models/test_currencies.py @@ -0,0 +1,17 @@ +# TODO + + +def test_currencies(): + assert True + + +def test_read_currencies(): + assert True + + +def test_get_currency(): + assert True + + +def test_convert_currency(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_activity_dates.py b/tests/direct_indexing/custom_fields/test_activity_dates.py new file mode 100644 index 000000000..1f75c50a8 --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_activity_dates.py @@ -0,0 +1,9 @@ +# TODO + + +def test_activity_dates(): + assert True + + +def test_extract_activity_dates(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_add_default_hierarchy.py b/tests/direct_indexing/custom_fields/test_add_default_hierarchy.py new file mode 100644 index 000000000..36de6c573 --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_add_default_hierarchy.py @@ -0,0 +1,5 @@ +# TODO + + +def test_add_default_hierarchy(): + pass diff --git a/tests/direct_indexing/custom_fields/test_codelists.py b/tests/direct_indexing/custom_fields/test_codelists.py new file mode 100644 index 000000000..b0e30f4d5 --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_codelists.py @@ -0,0 +1,21 @@ +# TODO + + +def test_add_codelist_fields(): + assert True + + +def test_extract_single_field(): + assert True + + +def test_extract_list_field(): + assert True + + +def test_extract_nested_list_field(): + assert True + + +def test_check_and_get(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_currency_aggregation.py b/tests/direct_indexing/custom_fields/test_currency_aggregation.py new file mode 100644 index 000000000..06159a0a1 --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_currency_aggregation.py @@ -0,0 +1,77 @@ +# TODO + + +def test_currency_aggregation(): + assert True + + +def test_prepare_data(): + assert True + + +def test_connect_to_mongo(): + assert True + + +def test_get_aggregations(): + assert True + + +def test_aggregate_converted_types(): + assert True + + +def test_get_aggregation_fields(): + assert True + + +def test_index_activity_data(): + assert True + + +def test_process_activity_aggregations(): + assert True + + +def test_refresh_mongo_data(): + assert True + + +def test_get_child_aggregations(): + assert True + + +def test_process_child_aggregations(): + assert True + + +def test_process_child_agg_currencies(): + assert True + + +def test_get_currency(): + assert True + + +def test_clean_aggregation_result(): + assert True + + +def test_revert_activity_tvu(): + assert True + + +def test_process_budget_agg(): + assert True + + +def test_process_planned_disbursement_agg(): + assert True + + +def test_process_transaction_agg(): + assert True + + +def test_process_transaction_currency_agg(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_currency_conversion.py b/tests/direct_indexing/custom_fields/test_currency_conversion.py new file mode 100644 index 000000000..7ed5729ca --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_currency_conversion.py @@ -0,0 +1,25 @@ +# TODO + + +def test_currency_conversion(): + assert True + + +def test_convert_currencies_from_list(): + assert True + + +def test_convert_currencies_from_dict(): + assert True + + +def test_convert(): + assert True + + +def test_get_ym(): + assert True + + +def test_save_converted_value_to_data(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_custom_fields.py b/tests/direct_indexing/custom_fields/test_custom_fields.py new file mode 100644 index 000000000..bcd6a8e07 --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_custom_fields.py @@ -0,0 +1,13 @@ +# TODO + + +def test_add_all(): + assert True + + +def test_process_activity(): + assert True + + +def test_get_custom_metadata(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_dataset_metadata.py b/tests/direct_indexing/custom_fields/test_dataset_metadata.py new file mode 100644 index 000000000..0dccff4ed --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_dataset_metadata.py @@ -0,0 +1,9 @@ +# TODO + + +def test_dataset_metadata(): + assert True + + +def test_add_meta_to_activity(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_date_quarters.py b/tests/direct_indexing/custom_fields/test_date_quarters.py new file mode 100644 index 000000000..e55f6bd1e --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_date_quarters.py @@ -0,0 +1,13 @@ +# TODO + + +def test_add_date_quarter_fields(): + assert True + + +def test_recursive_date_fields(): + assert True + + +def test_retrieve_date_quarter(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_document_link_category_combined.py b/tests/direct_indexing/custom_fields/test_document_link_category_combined.py new file mode 100644 index 000000000..2739075b5 --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_document_link_category_combined.py @@ -0,0 +1,5 @@ +# TODO + + +def test_document_link_category_combined(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_indexing_manytomany_relations.py b/tests/direct_indexing/custom_fields/test_indexing_manytomany_relations.py new file mode 100644 index 000000000..835c50775 --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_indexing_manytomany_relations.py @@ -0,0 +1,25 @@ +# TODO + + +def test_index_many_to_many_relations(): + assert True + + +def test_add_participating_org_child_indexes(): + assert True + + +def test_add_result_child_indexes(): + assert True + + +def test_add_field_child_field_indexes(): + assert True + + +def test_add_field_child_field_children_indexes(): + assert True + + +def test_iterate_third_level_children(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_json_dumps.py b/tests/direct_indexing/custom_fields/test_json_dumps.py new file mode 100644 index 000000000..08a3866bd --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_json_dumps.py @@ -0,0 +1,5 @@ +# TODO + + +def test_add_json_dumps(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_organisation_custom_fields.py b/tests/direct_indexing/custom_fields/test_organisation_custom_fields.py new file mode 100644 index 000000000..b548f3fdd --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_organisation_custom_fields.py @@ -0,0 +1,13 @@ +# TODO + + +def test_add_all(): + assert True + + +def test_index_many_to_many_relations(): + assert True + + +def test_index_total_expenditure(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_policy_marker_combined.py b/tests/direct_indexing/custom_fields/test_policy_marker_combined.py new file mode 100644 index 000000000..8de537b2c --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_policy_marker_combined.py @@ -0,0 +1,5 @@ +# TODO + + +def test_policy_marker_combined(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_raise_h2_budget_data_to_h1.py b/tests/direct_indexing/custom_fields/test_raise_h2_budget_data_to_h1.py new file mode 100644 index 000000000..ebe8195ec --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_raise_h2_budget_data_to_h1.py @@ -0,0 +1,9 @@ +# TODO + + +def test_raise_h2_budget_data_to_h1(): + assert True + + +def test_pull_related_data_to_h1(): + assert True diff --git a/tests/direct_indexing/custom_fields/test_title_narrative.py b/tests/direct_indexing/custom_fields/test_title_narrative.py new file mode 100644 index 000000000..8344e0a8a --- /dev/null +++ b/tests/direct_indexing/custom_fields/test_title_narrative.py @@ -0,0 +1,5 @@ +# TODO + + +def test_title_narrative_first(): + assert True diff --git a/tests/direct_indexing/metadata/__init__.py b/tests/direct_indexing/metadata/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/direct_indexing/metadata/test_dataset.py b/tests/direct_indexing/metadata/test_dataset.py new file mode 100644 index 000000000..c03f558d5 --- /dev/null +++ b/tests/direct_indexing/metadata/test_dataset.py @@ -0,0 +1,25 @@ +# TODO + + +def test_dataset_exception(): + assert True + + +def test_subtask_process_dataset(): + assert True + + +def test_index_datasets_and_dataset_metadata(): + assert True + + +def test_load_codelists(): + assert True + + +def test__get_existing_datasets(): + assert True + + +def test_prepare_update(): + assert True diff --git a/tests/direct_indexing/metadata/test_publisher.py b/tests/direct_indexing/metadata/test_publisher.py new file mode 100644 index 000000000..5077f0989 --- /dev/null +++ b/tests/direct_indexing/metadata/test_publisher.py @@ -0,0 +1,9 @@ +# TODO + + +def test_index_publisher_metadata(): + assert True + + +def test__preprocess_publisher_metadata(): + assert True diff --git a/tests/direct_indexing/metadata/test_util.py b/tests/direct_indexing/metadata/test_util.py new file mode 100644 index 000000000..860c5d81e --- /dev/null +++ b/tests/direct_indexing/metadata/test_util.py @@ -0,0 +1,13 @@ +# TODO + + +def test_retrieve(): + assert True + + +def test_index(): + assert True + + +def test_download_dataset(): + assert True diff --git a/tests/direct_indexing/processing/__init__.py b/tests/direct_indexing/processing/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/direct_indexing/processing/test_activity_subtypes.py b/tests/direct_indexing/processing/test_activity_subtypes.py new file mode 100644 index 000000000..3b5eafbcf --- /dev/null +++ b/tests/direct_indexing/processing/test_activity_subtypes.py @@ -0,0 +1,13 @@ +# TODO + + +def test_extract_subtype(): + assert True + + +def test_process_subtype_dict(): + assert True + + +def test_extract_all_subtypes(): + assert True diff --git a/tests/direct_indexing/processing/test_dataset.py b/tests/direct_indexing/processing/test_dataset.py new file mode 100644 index 000000000..46af4ea1c --- /dev/null +++ b/tests/direct_indexing/processing/test_dataset.py @@ -0,0 +1,25 @@ +# TODO + + +def test_fun(): + assert True + + +def test_index_dataset(): + assert True + + +def test_convert_and_save_xml_to_processed_json(): + assert True + + +def test_json_filepath(): + assert True + + +def test_dataset_subtypes(): + assert True + + +def test_index_subtypes(): + assert True diff --git a/tests/direct_indexing/processing/test_util.py b/tests/direct_indexing/processing/test_util.py new file mode 100644 index 000000000..3b94186b5 --- /dev/null +++ b/tests/direct_indexing/processing/test_util.py @@ -0,0 +1,17 @@ +# TODO + + +def test_get_dataset_filepath(): + assert True + + +def test_get_dataset_version_validity(): + assert True + + +def test_get_dataset_filetype(): + assert True + + +def test_valid_version_from_file(): + assert True diff --git a/tests/direct_indexing/test_direct_indexing.py b/tests/direct_indexing/test_direct_indexing.py new file mode 100644 index 000000000..eae1c5ad8 --- /dev/null +++ b/tests/direct_indexing/test_direct_indexing.py @@ -0,0 +1,25 @@ +# TODO + + +def test_run(): + assert True + + +def test_clear_indices(): + assert True + + +def test_clear_indices_for_core(): + assert True + + +def test_run_publisher_metadata(): + assert True + + +def test_run_dataset_metadata(): + assert True + + +def test_drop_removed_data(): + assert True diff --git a/tests/direct_indexing/test_tasks.py b/tests/direct_indexing/test_tasks.py new file mode 100644 index 000000000..3ec026b68 --- /dev/null +++ b/tests/direct_indexing/test_tasks.py @@ -0,0 +1,29 @@ +# TODO + + +def test_clear_all_cores(): + assert True + + +def test_clear_cores_with_name(): + assert True + + +def test_start(): + assert True + + +def test_subtask_publisher_metadata(): + assert True + + +def test_subtask_dataset_metadata(): + assert True + + +def test_fcdo_replace_partial_url(): + assert True + + +def test_revoke_all_tasks(): + assert True diff --git a/tests/direct_indexing/test_util.py b/tests/direct_indexing/test_util.py new file mode 100644 index 000000000..5b617f173 --- /dev/null +++ b/tests/direct_indexing/test_util.py @@ -0,0 +1,101 @@ +import os +import subprocess +import urllib.request +from direct_indexing import util +from iaticloud import settings + + +# Test clear_core function +def test_clear_core(mocker): + # Define the core URL + core_url = "http://example.com/solr/core" + + # Mock the pysolr.Solr instance + mock_solr = mocker.patch('pysolr.Solr') + mock_solr_instance = mock_solr.return_value + + # Mock the delete method + mock_delete = mocker.patch.object(mock_solr_instance, 'delete') + + # Test the clear_core function + util.clear_core(core_url) + + # Assert that pysolr.Solr was called with the correct core URL and always_commit + mock_solr.assert_called_with(core_url, always_commit=True) + + # Assert that the delete method was called with '*:*' + mock_delete.assert_called_with(q='*:*') + + +# Test index_to_core function +def test_index_to_core(tmp_path, mocker): + """ + Index to core: + - Runs solr post tool with the correct arguments URL and JSON path (which file to submit to solr) + - if the solr post tool fails, returns the error message + - if the solr post tool succeeds, returns 'Successfully indexed' + - Removes the JSON file if remove is True + - If subprocess errors, return that error. + """ + # SETUP + test_dir = tmp_path / "test_data" + test_dir.mkdir() + json_path = test_dir / "test_data.json" + with open(json_path, 'w') as file: + file.write('{"key": "value"}') + + url = "http://example.com/solr/core" + OP = "subprocess.check_output" + # SUCCESSFUL INDEX: + # Mock subprocess.check_output to simulate success + mocker.patch(OP, return_value=b"Successfully indexed") + # Test a successful indexing + result = util.index_to_core(url, str(json_path), remove=False) + # Assert that subprocess.check_output was called with the correct arguments + subprocess.check_output.assert_called_with([ + settings.SOLR_POST_TOOL, + '-url', + url, + str(json_path) + ], stderr=subprocess.STDOUT) + # Assert that the function returns 'Successfully indexed' for a successful operation + assert result == "Successfully indexed" + # Assert that the file at json_path was not removed, as the remove argument was False + assert os.path.exists(json_path) is True + + # FAILED INDEX: + # Mock subprocess.check_output to simulate failure + mocker.patch(OP, return_value=b"msg: Failed to index\nERROR...") + # Test a successful indexing + result = util.index_to_core(url, str(json_path), remove=True) + # Assert that subprocess.check_output was called with the correct arguments + subprocess.check_output.assert_called_with([settings.SOLR_POST_TOOL, '-url', url, str(json_path)], + stderr=subprocess.STDOUT) + # Assert that the function does not return 'Successfully indexed + assert result != "Successfully indexed" + # Assert that the file at json_path was removed, as the remove argument was True + assert os.path.exists(json_path) is False + + # SUBPROCESS ERROR INDEX: + # Mock subprocess.check_output to simulate failure + mocker.patch(OP, side_effect=subprocess.CalledProcessError(returncode=1, cmd="solr_post_tool")) + # Test a failed indexing + result = util.index_to_core(url, str(json_path), remove=True) + # Assert that subprocess.check_output was called with the correct arguments + subprocess.check_output.assert_called_with([settings.SOLR_POST_TOOL, '-url', url, str(json_path)], + stderr=subprocess.STDOUT) + # Assert that the function returns an error message for a failed operation + assert "Failed to index due to:" in result + + +# Test datadump_success function +def test_datadump_success(mocker): + # Mock urllib.request.urlopen to return data with "passing" (success) + mocker.patch('urllib.request.urlopen') + urllib.request.urlopen.return_value.read.return_value = b' Date: Thu, 2 Nov 2023 17:20:15 +0100 Subject: [PATCH 03/49] feat: added pytest coverage --- .gitignore | 4 +++- requirements.txt | 1 + tests/README.md | 14 +++++++++++++- 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 7238fb6e8..470f2d92a 100644 --- a/.gitignore +++ b/.gitignore @@ -31,4 +31,6 @@ session.vim *.log # Python cache -__pycache__ \ No newline at end of file +__pycache__ + +.coverage diff --git a/requirements.txt b/requirements.txt index bf83c6e49..d428561fb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -59,6 +59,7 @@ pytest==7.4.3 pytest-celery==0.0.0 pytest-django==4.6.0 pytest-mock==3.12.0 +pytest-cov==4.1.0 # Working with XML Documents (legacy currency convert) lxml==4.9.3 diff --git a/tests/README.md b/tests/README.md index de7e1ec9e..2dc7a0af3 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,8 +1,20 @@ # Testing -We use pytest to test our project. +We use pytest to test our project. The tests follow the project structure, with unit and integration tests for the respective functions. + +We can do a simple test run with +``` +pytest tests +``` + +or we can generate a coverage report with: +``` +pytest --cov=direct_indexing --cov-report html:coverage_report tests +``` +This coverage report can be opened in the browser, from [the coverage_report directory in the root folder](../coverage_report/index.html). (Tip: With the VSCode extension "Live Preview", you can view the report from VSCode, and the report will update automatically when re-running tests.) ## Installed dependencies - pytest +- pytest-cov - pytest-mock - pytest-django - pytest-celery From fd948725e3e172b7ceb43f08d09777e650152b36 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Thu, 2 Nov 2023 17:20:47 +0100 Subject: [PATCH 04/49] feat: update test_util to 100% --- tests/direct_indexing/test_util.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/tests/direct_indexing/test_util.py b/tests/direct_indexing/test_util.py index 5b617f173..400839bc9 100644 --- a/tests/direct_indexing/test_util.py +++ b/tests/direct_indexing/test_util.py @@ -1,5 +1,7 @@ import os import subprocess +import pysolr +import pytest import urllib.request from direct_indexing import util from iaticloud import settings @@ -26,6 +28,11 @@ def test_clear_core(mocker): # Assert that the delete method was called with '*:*' mock_delete.assert_called_with(q='*:*') + # Assert that clear_core raises its error when encountering a pysolr.SolrError + mock_delete.side_effect = pysolr.SolrError + with pytest.raises(pysolr.SolrError): + util.clear_core(core_url) + # Test index_to_core function def test_index_to_core(tmp_path, mocker): @@ -50,7 +57,7 @@ def test_index_to_core(tmp_path, mocker): # Mock subprocess.check_output to simulate success mocker.patch(OP, return_value=b"Successfully indexed") # Test a successful indexing - result = util.index_to_core(url, str(json_path), remove=False) + result = util.index_to_core(url, str(json_path), remove=True) # Assert that subprocess.check_output was called with the correct arguments subprocess.check_output.assert_called_with([ settings.SOLR_POST_TOOL, @@ -61,9 +68,13 @@ def test_index_to_core(tmp_path, mocker): # Assert that the function returns 'Successfully indexed' for a successful operation assert result == "Successfully indexed" # Assert that the file at json_path was not removed, as the remove argument was False - assert os.path.exists(json_path) is True + assert os.path.exists(json_path) is False # FAILED INDEX: + # re-add the file + with open(json_path, 'w') as file: + file.write('{"key": "value"}') + # Mock subprocess.check_output to simulate failure mocker.patch(OP, return_value=b"msg: Failed to index\nERROR...") # Test a successful indexing From 2e78362b1510925c9c3f8a9f9f75ed2f8d0381c3 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Thu, 2 Nov 2023 17:36:15 +0100 Subject: [PATCH 05/49] feat: test clear_indices and clear_cores_with_name --- tests/direct_indexing/test_direct_indexing.py | 52 +++++++++++++++++-- 1 file changed, 48 insertions(+), 4 deletions(-) diff --git a/tests/direct_indexing/test_direct_indexing.py b/tests/direct_indexing/test_direct_indexing.py index eae1c5ad8..660cb5317 100644 --- a/tests/direct_indexing/test_direct_indexing.py +++ b/tests/direct_indexing/test_direct_indexing.py @@ -1,12 +1,56 @@ # TODO +from direct_indexing.direct_indexing import clear_indices, clear_indices_for_core +import pytest +import pysolr +from django.conf import settings - -def test_run(): +# Test group: test_run +def test_run_clear_indices_success(mocker): + # INTEGRATION TEST assert True -def test_clear_indices(): - assert True +# Test group: test_clear_indices +def test_clear_indices_clears_all_indices(mocker): + # UNIT TEST + solr_instance_mock = mocker.MagicMock() + mocker.patch('pysolr.Solr', return_value=solr_instance_mock) + + result = clear_indices() + # Check that the Solr delete method was called + solr_instance_mock.delete.assert_called_with(q='*:*') + # Check that `delete` was called 7 times (once for each core) + assert solr_instance_mock.delete.call_count == 7 + # Check that the output is "success" + assert result == 'Success' + + +def test_clear_indices_raises_error(mocker): + # UNIT TEST + mocker.patch('pysolr.Solr', side_effect=pysolr.SolrError) + with pytest.raises(pysolr.SolrError): + clear_indices() + + +def test_clear_indices_for_core_clears_all_indices(mocker): + # UNIT TEST + solr_instance_mock = mocker.MagicMock() + mocker.patch('pysolr.Solr', return_value=solr_instance_mock) + + result = clear_indices_for_core("dataset") + # Check that the Solr delete method was called + solr_instance_mock.delete.assert_called_with(q='*:*') + # Check that `delete` was called once + assert solr_instance_mock.delete.call_count == 1 + # Check that the output is "success" + assert result == 'Success' + + +def test_clear_indices_for_core_raises_error(mocker): + # UNIT TEST + mocker.patch('pysolr.Solr', side_effect=pysolr.SolrError) + with pytest.raises(pysolr.SolrError): + clear_indices_for_core("dataset") def test_clear_indices_for_core(): From 030d64eb21fdc58cdb8f4a7fcb978216d2636a7c Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 8 Nov 2023 13:21:56 +0100 Subject: [PATCH 06/49] feat: added tests for metadata util --- direct_indexing/metadata/util.py | 3 - requirements.txt | 1 + tests/direct_indexing/metadata/test_util.py | 112 ++++++++++++++++++-- 3 files changed, 107 insertions(+), 9 deletions(-) diff --git a/direct_indexing/metadata/util.py b/direct_indexing/metadata/util.py index 62bf47e15..171b46782 100644 --- a/direct_indexing/metadata/util.py +++ b/direct_indexing/metadata/util.py @@ -86,6 +86,3 @@ def download_dataset(): except urllib.error.URLError as e: logging.error(f'download_dataset:: Error downloading dataset, due to {e}') raise - except urllib.error.HTTPError as e: - logging.error(f'download_dataset:: Error downloading dataset, due to {e}') - raise diff --git a/requirements.txt b/requirements.txt index d428561fb..ebae3cd8a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -60,6 +60,7 @@ pytest-celery==0.0.0 pytest-django==4.6.0 pytest-mock==3.12.0 pytest-cov==4.1.0 +requests-mock==1.11.0 # Working with XML Documents (legacy currency convert) lxml==4.9.3 diff --git a/tests/direct_indexing/metadata/test_util.py b/tests/direct_indexing/metadata/test_util.py index 860c5d81e..281636cd8 100644 --- a/tests/direct_indexing/metadata/test_util.py +++ b/tests/direct_indexing/metadata/test_util.py @@ -1,13 +1,113 @@ # TODO +import json +import pytest +import requests +import urllib +import zipfile +import tempfile -def test_retrieve(): - assert True +from direct_indexing.metadata.util import download_dataset, index, retrieve +# consts +SETTINGS_FRESH = 'direct_indexing.metadata.util.settings.FRESH' +SETTINGS_DATASET_PARENT_PATH = 'direct_indexing.metadata.util.settings.DATASET_PARENT_PATH' +TEST_URL = 'http://test.com' -def test_index(): - assert True +def test_retrieve(mocker, tmp_path, sample_data, requests_mock): + # Create test file + test_dir = tmp_path / 'test' + test_dir.mkdir() + with open(test_dir / 'test.json', 'w') as file: + json.dump(sample_data, file) + mocker.patch(SETTINGS_DATASET_PARENT_PATH, test_dir) + + # Test succesfully loading data when force_update = False, settings.FRESH = False + mocker.patch(SETTINGS_FRESH, False) + data = retrieve(TEST_URL, 'test', False) + assert data == sample_data + # Test succesfully loading data when force_update = True, settings.FRESH = False + mocker.patch(SETTINGS_FRESH, False) + data = retrieve(TEST_URL, 'test', True) + assert data == sample_data -def test_download_dataset(): - assert True + # Test requests.get with provided url succesfully creates a file @ test_dir/test2.json and returns 'result' + mocker.patch(SETTINGS_FRESH, True) + # Mock the json return from requests.get for the test url + requests_mock.get(TEST_URL, json=sample_data) + data = retrieve(TEST_URL, 'test2', False) + assert data == sample_data['result'] + # Assert there is a file called test2.json at test_dir + assert (test_dir / 'test2.json').exists() + + # Test `retrieve` function raises an requests.exceptions.RequestException + mocker.patch('requests.get', side_effect=requests.exceptions.RequestException) + mocker.patch(SETTINGS_FRESH, True) + with pytest.raises(requests.exceptions.RequestException): + retrieve(TEST_URL, 'test', False) + + +def test_index(mocker, tmp_path, sample_data): + # Create test dir + test_dir = tmp_path / 'test' + test_dir.mkdir() + + # Mock settings.DATASET_PARENT_PATH to be test_dir + mocker.patch(SETTINGS_DATASET_PARENT_PATH, test_dir) + # Mock the index_to_core function to return 'Successfully indexed' + mocker.patch('direct_indexing.metadata.util.index_to_core', return_value='Successfully indexed') + index_res = index('test', sample_data, TEST_URL) + assert index_res == 'Successfully indexed' + # Assert there is a file called test.json at test_dir + assert (test_dir / 'test.json').exists() + + +def test_download_dataset(mocker, tmp_path): + # Set up test path + test_dir = tmp_path / 'test' + test_dir.mkdir() + idm_dir = test_dir / 'iati-data-main' + idm_dir.mkdir() + mocker.patch(SETTINGS_DATASET_PARENT_PATH, test_dir) + + # Test that if not settings.FRESH, we return None and no further behaviour occurs + mocker.patch("urllib.request.URLopener") + mocker.patch(SETTINGS_FRESH, False) + ret_val = download_dataset() + assert ret_val == None + assert urllib.request.URLopener.call_count == 0 + + # mocks and instances + mocker.patch(SETTINGS_FRESH, True) + mocker.patch("urllib.request.URLopener") + mocker.patch('zipfile.ZipFile') + download_dataset() + + # Assert idm_dir was removed + assert not idm_dir.exists() + # Assert urllib.request.URLopener was called once + urllib.request.URLopener.assert_called_once() + # Assert zipfile.ZipFile was called once + zipfile.ZipFile.assert_called_once() + + # Assert any urllib errors are raised + mocker.patch('urllib.request.URLopener', side_effect=urllib.error.URLError("Test")) + with pytest.raises(urllib.error.URLError): + download_dataset() + + +@pytest.fixture +def sample_data(): + return { + 'result': [ + { + 'id': '1', + 'name': 'test', + }, + { + 'id': '2', + 'name': 'test2', + } + ] + } From 1d82dd2646e7ae9eb76999eea566acc9591c199a Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 8 Nov 2023 13:47:41 +0100 Subject: [PATCH 07/49] feat: added tests for metadata publisher --- direct_indexing/metadata/publisher.py | 4 +- .../metadata/test_publisher.py | 50 +++++++++++++++++-- tests/direct_indexing/metadata/test_util.py | 7 ++- 3 files changed, 51 insertions(+), 10 deletions(-) diff --git a/direct_indexing/metadata/publisher.py b/direct_indexing/metadata/publisher.py index 1a3538e71..48f01fa08 100644 --- a/direct_indexing/metadata/publisher.py +++ b/direct_indexing/metadata/publisher.py @@ -40,9 +40,11 @@ def _preprocess_publisher_metadata(publishers_metadata): """ for publisher in publishers_metadata: if 'publisher_first_publish_date' in publisher: - # regex to detect dates in the format dd.mm.yyyy if re.match(r'\d{2}\.\d{2}\.\d{4}', publisher['publisher_first_publish_date']): + # get the substring of the date that matches the regex + substr = re.search(r'\d{2}\.\d{2}\.\d{4}', publisher['publisher_first_publish_date']).group() + publisher['publisher_first_publish_date'] = substr # convert the dd.mm.yyyy to yyyy-mm-ddT00:00:00.000000 publisher['publisher_first_publish_date'] = datetime.datetime.strptime( publisher['publisher_first_publish_date'], '%d.%m.%Y').strftime('%Y-%m-%dT%H:%M:%S.%f') diff --git a/tests/direct_indexing/metadata/test_publisher.py b/tests/direct_indexing/metadata/test_publisher.py index 5077f0989..8da283e6e 100644 --- a/tests/direct_indexing/metadata/test_publisher.py +++ b/tests/direct_indexing/metadata/test_publisher.py @@ -1,9 +1,49 @@ -# TODO +import pytest +from direct_indexing.metadata.publisher import _preprocess_publisher_metadata, index_publisher_metadata -def test_index_publisher_metadata(): - assert True +TEST_VAL_1 = "01.01.2019" +TEST_VAL_2 = "01-01-2022T10:00:00.000000" +TEST_VAL_3 = "01.01.2019T10:00:00.000000" +TEST_VAL_4 = "test" +TEST_VAL_1_NEW = "2019-01-01T00:00:00.000000" -def test__preprocess_publisher_metadata(): - assert True +def test_index_publisher_metadata(mocker): + # mock the retrieve function + mocker.patch('direct_indexing.metadata.publisher.retrieve', return_value='test') + mocker.patch('direct_indexing.metadata.publisher._preprocess_publisher_metadata', return_value='test') + mocker.patch('direct_indexing.metadata.publisher.index', return_value='test') + # assert index_publisher_metadata returns True + assert index_publisher_metadata() == "test" + + +def test__preprocess_publisher_metadata(publisher_metadata_sample): + processed = _preprocess_publisher_metadata(publisher_metadata_sample) + # Assert a date with periods is converted + assert processed[0]['publisher_first_publish_date'] == TEST_VAL_1_NEW + # Assert a date with dashes is not converted + assert processed[1]['publisher_first_publish_date'] == TEST_VAL_2 + # Assert a date with periods and time is stripped from the time and updated to date with dashes + assert processed[2]['publisher_first_publish_date'] == TEST_VAL_1_NEW + # Assert other fields are not changed + assert processed[0]['other'] == TEST_VAL_4 + print(processed) + + +@pytest.fixture +def publisher_metadata_sample(): + return [ + { + "publisher_first_publish_date": TEST_VAL_1, + "other": TEST_VAL_4 + }, + { + "publisher_first_publish_date": TEST_VAL_2, + "other": TEST_VAL_4 + }, + { + "publisher_first_publish_date": TEST_VAL_3, + "other": TEST_VAL_4 + } + ] diff --git a/tests/direct_indexing/metadata/test_util.py b/tests/direct_indexing/metadata/test_util.py index 281636cd8..372daffb7 100644 --- a/tests/direct_indexing/metadata/test_util.py +++ b/tests/direct_indexing/metadata/test_util.py @@ -1,11 +1,9 @@ -# TODO import json import pytest import requests import urllib import zipfile -import tempfile from direct_indexing.metadata.util import download_dataset, index, retrieve @@ -14,6 +12,7 @@ SETTINGS_DATASET_PARENT_PATH = 'direct_indexing.metadata.util.settings.DATASET_PARENT_PATH' TEST_URL = 'http://test.com' + def test_retrieve(mocker, tmp_path, sample_data, requests_mock): # Create test file test_dir = tmp_path / 'test' @@ -21,7 +20,7 @@ def test_retrieve(mocker, tmp_path, sample_data, requests_mock): with open(test_dir / 'test.json', 'w') as file: json.dump(sample_data, file) mocker.patch(SETTINGS_DATASET_PARENT_PATH, test_dir) - + # Test succesfully loading data when force_update = False, settings.FRESH = False mocker.patch(SETTINGS_FRESH, False) data = retrieve(TEST_URL, 'test', False) @@ -75,7 +74,7 @@ def test_download_dataset(mocker, tmp_path): mocker.patch("urllib.request.URLopener") mocker.patch(SETTINGS_FRESH, False) ret_val = download_dataset() - assert ret_val == None + assert ret_val == None # NOQA: E711 assert urllib.request.URLopener.call_count == 0 # mocks and instances From 931b0e53c6ab27eb0aba1e0e8c17c3acc47d4bf5 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 8 Nov 2023 15:33:42 +0100 Subject: [PATCH 08/49] feat: added tests for dataset metadata --- .../direct_indexing/metadata/test_dataset.py | 189 ++++++++++++++++-- 1 file changed, 177 insertions(+), 12 deletions(-) diff --git a/tests/direct_indexing/metadata/test_dataset.py b/tests/direct_indexing/metadata/test_dataset.py index c03f558d5..4da5a1e7c 100644 --- a/tests/direct_indexing/metadata/test_dataset.py +++ b/tests/direct_indexing/metadata/test_dataset.py @@ -1,25 +1,190 @@ -# TODO +import pytest +import requests + +from direct_indexing.metadata.dataset import ( + DatasetException, _get_existing_datasets, index_datasets_and_dataset_metadata, load_codelists, prepare_update, + subtask_process_dataset +) def test_dataset_exception(): - assert True + # UNIT + message = "Test message" + with pytest.raises(DatasetException) as excinfo: + raise DatasetException(message) + assert str(excinfo.value) == message + + +def test_subtask_process_dataset(mocker, fixture_dataset): + # INTEGRATION + # Test successful indexing + res_str = 'Successfully indexed' + fun_path = "direct_indexing.metadata.dataset.dataset_processing.fun" + mocker.patch(fun_path, return_value=(res_str, res_str)) + res = subtask_process_dataset(None, False) + assert res == res_str + + # Test Dataset invalid + res_str_dataset = 'Dataset invalid' + mocker.patch(fun_path, return_value=(res_str_dataset, res_str)) + res = subtask_process_dataset(None, False) + assert res == res_str_dataset + + # Test DatasetException + res_str_err = 'Error processing dataset' + mocker.patch(fun_path, return_value=(res_str_err, res_str)) + with pytest.raises(DatasetException) as excinfo: + subtask_process_dataset(fixture_dataset, False) + assert str(excinfo.value) == f'Error indexing dataset {fixture_dataset["id"]}\nDataset metadata:\n{res_str}\nDataset indexing:\n{str(res_str_err)}' # NOQA + + +def test_index_datasets_and_dataset_metadata(mocker, fixture_datasets): + # Integration + # Test with update = False + # patch subfunctions + subtask_path = 'direct_indexing.metadata.dataset.subtask_process_dataset.delay' + mock_download = mocker.patch('direct_indexing.metadata.dataset.download_dataset') + mock_retrieve = mocker.patch('direct_indexing.metadata.dataset.retrieve', return_value=fixture_datasets) + mock_load_cl = mocker.patch('direct_indexing.metadata.dataset.load_codelists') + mock_subtask = mocker.patch(subtask_path) + mock_prep = mocker.patch('direct_indexing.metadata.dataset.prepare_update', return_value=(fixture_datasets, [True, False, True])) # NOQA + + # run index_datasets_and_dataset_metadata + res = index_datasets_and_dataset_metadata(False, False) + # Assert the result is correct and all subfunctions are called except for prepare_update + assert res == '- All Indexing substasks started' + mock_download.assert_called_once() + mock_retrieve.assert_called_once() + mock_load_cl.assert_called_once() + assert mock_subtask.call_count == len(fixture_datasets) + mock_prep.assert_not_called() + + # Test with update = True, and only the first dataset is to be updated + # Reset subtask mock + mock_subtask = mocker.patch(subtask_path) + index_datasets_and_dataset_metadata(True, False) + mock_prep.assert_called_once() + # Assert the subtask was triggered once with update True and once with update False + mock_subtask.assert_any_call(dataset=fixture_datasets[0], update=True) + mock_subtask.assert_any_call(dataset=fixture_datasets[1], update=False) + + # Test throttle dataset + # Mock settings.THROTTLE_DATASET to True + mocker.patch('direct_indexing.metadata.dataset.settings.THROTTLE_DATASET', True) + # Reset subtask mock + mock_subtask = mocker.patch(subtask_path) + index_datasets_and_dataset_metadata(False, False) + # Assert the subtask was called once times + mock_subtask.assert_called_once() + + +def test_load_codelists(mocker): + # Integration + cl_path = 'direct_indexing.metadata.dataset.codelists.Codelists' + mock_cl = mocker.patch(cl_path) + load_codelists() + mock_cl.assert_called_once() + + # Test exception being raised + mocker.patch(cl_path, side_effect=requests.exceptions.RequestException) + with pytest.raises(requests.exceptions.RequestException): + load_codelists() + + +def test__get_existing_datasets(mocker, requests_mock, fixture_solr_dataset, fixture_existing_datasets): + mocker.patch('direct_indexing.metadata.dataset.settings.SOLR_DATASET', "http://test.com") + requests_mock.get("http://test.com" + ( + '/select?q=resources.hash:* AND extras.filetype:*' + ' AND id:*&rows=100000&wt=json&fl=resources.hash,id,extras.filetype' + ), json=fixture_solr_dataset) + res = _get_existing_datasets() + # Expected res is a dict with the id as key and a dict with hash and filetype as value + assert res == fixture_existing_datasets + +def test_prepare_update(mocker, fixture_existing_datasets, fixture_datasets): + # add a changed dataset to fixture_existing_datasets + fixture_existing_datasets["id_test_2"] = { + "hash": "changed", + "filetype": "activity" + } + mocker.patch('direct_indexing.metadata.dataset._get_existing_datasets', return_value=fixture_existing_datasets) + # run prepare_update + ds, bools = prepare_update(fixture_datasets) -def test_subtask_process_dataset(): - assert True + # we provide 3 datasets, one new, one existing with a matching hash, and one existing with a changed hash + # therefore, we expect 2 datasets to be returned, one with update=False and a second with update=True + assert len(ds) == 2 + assert bools == [False, True] + # We expect id id_test_1 and id_test_2 to be in the returned datasets, in order as found in fixture_datasets + assert ds[0]["id"] == "id_test_1" + assert ds[1]["id"] == "id_test_2" -def test_index_datasets_and_dataset_metadata(): - assert True +@pytest.fixture +def fixture_dataset(): + return { + "id": "id_test", + } -def test_load_codelists(): - assert True +@pytest.fixture +def fixture_datasets(): + return [ + { + "id": "id_test_1", + "resources": [ + {"hash": "cc612755d0b822bb9af82f43e121428634be255a"}, + ] + }, + { + "id": "f783cb92-7039-44a8-b0ad-f6438566a6fa", + "resources": [ + {"hash": "cc612755d0b822bb9af82f43e121428634be255a"}, + ] + }, + { + "id": "id_test_2", + "resources": [ + {"hash": "cc612755d0b822bb9af82f43e121428634be255q"}, + ] + }, + ] -def test__get_existing_datasets(): - assert True +@pytest.fixture +def fixture_solr_dataset(): + return { + "responseHeader": { + "status": 0, + "QTime": 1, + "params": { + "q": "*:*", + "rows": "1" + } + }, + "response": { + "numFound": 10740, + "start": 0, + "numFoundExact": True, + "docs": [ + { + "id": "f783cb92-7039-44a8-b0ad-f6438566a6fa", + "resources.hash": [ + "cc612755d0b822bb9af82f43e121428634be255a" + ], + "extras.filetype": "activity", + } + ] + } + } -def test_prepare_update(): - assert True +@pytest.fixture +def fixture_existing_datasets(): + return { + "f783cb92-7039-44a8-b0ad-f6438566a6fa": { + "hash": "cc612755d0b822bb9af82f43e121428634be255a", + "filetype": "activity" + }, + } From 83f1a431066f9ab516e8a8103bbee8c38cfbe409 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 8 Nov 2023 15:37:23 +0100 Subject: [PATCH 09/49] chore: added requests mock to readme --- tests/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/README.md b/tests/README.md index 2dc7a0af3..18dfab6f1 100644 --- a/tests/README.md +++ b/tests/README.md @@ -8,7 +8,7 @@ pytest tests or we can generate a coverage report with: ``` -pytest --cov=direct_indexing --cov-report html:coverage_report tests +pytest --cov=direct_indexing --cov-report html:./coverage_report tests ``` This coverage report can be opened in the browser, from [the coverage_report directory in the root folder](../coverage_report/index.html). (Tip: With the VSCode extension "Live Preview", you can view the report from VSCode, and the report will update automatically when re-running tests.) @@ -18,3 +18,4 @@ This coverage report can be opened in the browser, from [the coverage_report dir - pytest-mock - pytest-django - pytest-celery +- requests-mock \ No newline at end of file From 1df7ba69fc178d6395f1090acd68695c420fca17 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 8 Nov 2023 16:21:32 +0100 Subject: [PATCH 10/49] feat: added tests for codelists model --- .../custom_fields/models/test_codelists.py | 82 ++++++++++++++++--- 1 file changed, 71 insertions(+), 11 deletions(-) diff --git a/tests/direct_indexing/custom_fields/models/test_codelists.py b/tests/direct_indexing/custom_fields/models/test_codelists.py index a14d2e1c2..ba818c7be 100644 --- a/tests/direct_indexing/custom_fields/models/test_codelists.py +++ b/tests/direct_indexing/custom_fields/models/test_codelists.py @@ -1,21 +1,81 @@ -# TODO +import json +import pytest -def test_codelist(): - assert True +from direct_indexing.custom_fields.models.codelists import SOURCES, Codelists -def test_read_codelists(): - assert True +def test_codelist(mocker, tmp_path, requests_mock, fixture_codelists): + # INTEGRATION + # Test loading the existing codelists + # write fixture_codelists to tmp_path/codelists.json + with open(tmp_path / 'codelists.json', 'w') as file: + json.dump(fixture_codelists, file) + # mock settings.CODELISTS_JSON to tmp_path/codelists.json + mocker.patch('direct_indexing.custom_fields.models.codelists.settings.CODELISTS_JSON', tmp_path / 'codelists.json') + cl = Codelists(download=False) + # Assert the codelists_dict is the same as fixture_codelists + assert cl.get_codelists() == fixture_codelists + # Test downloading the codelists + # Remove the codelists.json file + (tmp_path / 'codelists.json').unlink() + # mock the requests.get function to return fixture_codelists + for k, v in SOURCES.items(): + res = [] if k not in fixture_codelists else fixture_codelists[k] + requests_mock.get(v, json={'data': res}) + cl = Codelists(download=True) + # Assert the codelists_dict is the same as fixture_codelists + for key in fixture_codelists: + assert cl.codelists_dict[key] == fixture_codelists[key] + # assert the file was re-created @ tmp_path/codelists.json + assert (tmp_path / 'codelists.json').exists() -def test_get_value(): - assert True +def test_get_value(mocker, tmp_path, fixture_codelists): + with open(tmp_path / 'codelists.json', 'w') as file: + json.dump(fixture_codelists, file) + # mock settings.CODELISTS_JSON to tmp_path/codelists.json + mocker.patch('direct_indexing.custom_fields.models.codelists.settings.CODELISTS_JSON', tmp_path / 'codelists.json') + cl = Codelists(download=False) -def test_get_codelists(): - assert True + # Test getting a non-existant codelist results in an empty list + assert cl.get_value('test', 'test') == [] + # Test getting an existing codelist value + assert cl.get_value('BudgetStatus', '1') == 'Indicative' + # Test getting an existing codelist value list + assert cl.get_value('BudgetStatus', ['1', '2']) == ['Indicative', "Committed"] -def test_download_codelists(): - assert True +@pytest.fixture +def fixture_codelists(): + return { + "BudgetStatus": [ + { + "code": "1", + "name": "Indicative", + "description": "A non-binding estimate for the described budget.", + "status": "active" + }, + { + "code": "2", + "name": "Committed", + "description": "A binding agreement for the described budget.", + "status": "active" + } + ], + "BudgetType": [ + { + "code": "1", + "name": "Original", + "description": "The original budget allocated to the activity", + "status": "active" + }, + { + "code": "2", + "name": "Revised", + "description": "The updated budget for an activity", + "status": "active" + } + ], + } From 7c82e8c8fcdc4d5ab5781d450c74744e114138c1 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 8 Nov 2023 16:51:49 +0100 Subject: [PATCH 11/49] feat: added tests for currencies model --- .../custom_fields/models/currencies.py | 33 +++---- .../custom_fields/models/test_currencies.py | 86 +++++++++++++++++-- 2 files changed, 95 insertions(+), 24 deletions(-) diff --git a/direct_indexing/custom_fields/models/currencies.py b/direct_indexing/custom_fields/models/currencies.py index 4fcf956da..7ee1472ef 100644 --- a/direct_indexing/custom_fields/models/currencies.py +++ b/direct_indexing/custom_fields/models/currencies.py @@ -66,23 +66,26 @@ def convert_currency(self, source, target, value, month, year): :param year: int: the year :return: the converted value and the exchange rate from source to target """ - if None in (source, target, value, month, year): - return None, None + try: + if None in (source, target, value, month, year): + return None, None - if source == target: - return value, 1 # 1 on 1 relation + if source == target: + return value, 1 # 1 on 1 relation - source_conversion = self.get_currency(month, year, source) - target_conversion = self.get_currency(month, year, target) - if not source_conversion or not target_conversion: - return None, None - source_to_xdr_rate = source_conversion['value'] - xdr_to_target_rate = target_conversion['value'] + source_conversion = self.get_currency(month, year, source) + target_conversion = self.get_currency(month, year, target) + if not source_conversion or not target_conversion: + return None, None + source_to_xdr_rate = source_conversion['value'] + xdr_to_target_rate = target_conversion['value'] - converted_value = value * source_to_xdr_rate - if target == 'XDR': - return converted_value, source_to_xdr_rate + converted_value = value * source_to_xdr_rate + if target == 'XDR': + return converted_value, source_to_xdr_rate - exchange_rate = xdr_to_target_rate / source_to_xdr_rate + exchange_rate = xdr_to_target_rate / source_to_xdr_rate - return converted_value / xdr_to_target_rate, exchange_rate + return converted_value / xdr_to_target_rate, exchange_rate + except TypeError: + return None, None diff --git a/tests/direct_indexing/custom_fields/models/test_currencies.py b/tests/direct_indexing/custom_fields/models/test_currencies.py index ab7e4ce56..91074b652 100644 --- a/tests/direct_indexing/custom_fields/models/test_currencies.py +++ b/tests/direct_indexing/custom_fields/models/test_currencies.py @@ -1,17 +1,85 @@ -# TODO +import json +import pytest -def test_currencies(): - assert True +from direct_indexing.custom_fields.models.currencies import Currencies +MOCK_PATH = 'direct_indexing.custom_fields.models.currencies.settings.CURRENCIES_JSON' +FILE_NAME = 'currencies.json' -def test_read_currencies(): - assert True +def test_currencies(mocker, tmp_path, fixture_currencies): + # INTEGRATION + with open(tmp_path / FILE_NAME, 'w') as file: + json.dump(fixture_currencies, file) + # mock settings.CODELISTS_JSON to tmp_path/codelists.json + mocker.patch(MOCK_PATH, tmp_path / FILE_NAME) + cu = Currencies() + assert cu.currencies_list == fixture_currencies -def test_get_currency(): - assert True +def test_get_currency(mocker, tmp_path, fixture_currencies): + with open(tmp_path / FILE_NAME, 'w') as file: + json.dump(fixture_currencies, file) + # mock settings.CODELISTS_JSON to tmp_path/codelists.json + mocker.patch(MOCK_PATH, tmp_path / FILE_NAME) + cu = Currencies() + # Assert get currencies function returns the correct object, and none if the currency, year or month does not exist + assert cu.get_currency(3, 2023, 'USD') == fixture_currencies[0] + assert cu.get_currency(3, 2023, 'EUR') == fixture_currencies[1] + assert cu.get_currency(3, 2023, 'AUD') is None + assert cu.get_currency(3, 2050, 'EUR') is None + assert cu.get_currency(13, 2023, 'EUR') is None + # Assert None is returned if any of the arguments are None + assert cu.get_currency(None, 2023, 'USD') is None + assert cu.get_currency(3, None, 'USD') is None + assert cu.get_currency(3, 2023, None) is None -def test_convert_currency(): - assert True + +def test_convert_currency(mocker, tmp_path, fixture_currencies): + with open(tmp_path / FILE_NAME, 'w') as file: + json.dump(fixture_currencies, file) + # mock settings.CODELISTS_JSON to tmp_path/codelists.json + mocker.patch(MOCK_PATH, tmp_path / FILE_NAME) + cu = Currencies() + # Assert the convert_currency function returns None, None if any of the arguments are None + assert cu.convert_currency(None, "EUR", 42, 3, 2023) == (None, None) + assert cu.convert_currency("USD", None, 42, 3, 2023) == (None, None) + assert cu.convert_currency("USD", "EUR", None, 3, 2023) == (None, None) + assert cu.convert_currency("USD", "EUR", 42, None, 2023) == (None, None) + assert cu.convert_currency("USD", "EUR", 42, 3, None) == (None, None) + # Assert the converted value matches the expected value as per the fixture + assert cu.convert_currency("USD", "EUR", 42, 3, 2023) == (39.233001205290655, 1.0705273292815591) + # Assert any type errors resulting in (None, None). Trigger a typeerror by providing a string to the value argument + assert cu.convert_currency("USD", "EUR", "42", 3, 2023) == (None, None) + # Assert the return value is the same as the provided value if the source and target are the same + assert cu.convert_currency("EUR", "EUR", 42, 3, 2023) == (42, 1) + # Assert the return value is None, None if the source or target are not in the currencies list + assert cu.convert_currency("AUD", "EUR", 42, 3, 2023) == (None, None) + assert cu.convert_currency("EUR", "AUD", 42, 3, 2023) == (None, None) + # Assert the value is returned when XDR is the target + assert cu.convert_currency("EUR", "XDR", 42, 3, 2023) == (33.6444597384, 0.8010585652) + + +@pytest.fixture +def fixture_currencies(): + return [ + { + "year": 2023, + "month": 3, + "currency_id": "USD", + "value": 0.748284087 + }, + { + "year": 2023, + "month": 3, + "currency_id": "EUR", + "value": 0.8010585652 + }, + { + "year": 2023, + "month": 3, + "currency_id": "XDR", + "value": 1 + }, + ] From 6615c5a82e59742ec454124b818350a8ef4eeddc Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 8 Nov 2023 16:52:55 +0100 Subject: [PATCH 12/49] refactor: extract file name in codelists test --- .../custom_fields/models/test_codelists.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/direct_indexing/custom_fields/models/test_codelists.py b/tests/direct_indexing/custom_fields/models/test_codelists.py index ba818c7be..6a5a161b8 100644 --- a/tests/direct_indexing/custom_fields/models/test_codelists.py +++ b/tests/direct_indexing/custom_fields/models/test_codelists.py @@ -4,22 +4,24 @@ from direct_indexing.custom_fields.models.codelists import SOURCES, Codelists +FILE_NAME = 'codelists.json' + def test_codelist(mocker, tmp_path, requests_mock, fixture_codelists): # INTEGRATION # Test loading the existing codelists # write fixture_codelists to tmp_path/codelists.json - with open(tmp_path / 'codelists.json', 'w') as file: + with open(tmp_path / FILE_NAME, 'w') as file: json.dump(fixture_codelists, file) # mock settings.CODELISTS_JSON to tmp_path/codelists.json - mocker.patch('direct_indexing.custom_fields.models.codelists.settings.CODELISTS_JSON', tmp_path / 'codelists.json') + mocker.patch('direct_indexing.custom_fields.models.codelists.settings.CODELISTS_JSON', tmp_path / FILE_NAME) cl = Codelists(download=False) # Assert the codelists_dict is the same as fixture_codelists assert cl.get_codelists() == fixture_codelists # Test downloading the codelists # Remove the codelists.json file - (tmp_path / 'codelists.json').unlink() + (tmp_path / FILE_NAME).unlink() # mock the requests.get function to return fixture_codelists for k, v in SOURCES.items(): res = [] if k not in fixture_codelists else fixture_codelists[k] @@ -29,14 +31,14 @@ def test_codelist(mocker, tmp_path, requests_mock, fixture_codelists): for key in fixture_codelists: assert cl.codelists_dict[key] == fixture_codelists[key] # assert the file was re-created @ tmp_path/codelists.json - assert (tmp_path / 'codelists.json').exists() + assert (tmp_path / FILE_NAME).exists() def test_get_value(mocker, tmp_path, fixture_codelists): - with open(tmp_path / 'codelists.json', 'w') as file: + with open(tmp_path / FILE_NAME, 'w') as file: json.dump(fixture_codelists, file) # mock settings.CODELISTS_JSON to tmp_path/codelists.json - mocker.patch('direct_indexing.custom_fields.models.codelists.settings.CODELISTS_JSON', tmp_path / 'codelists.json') + mocker.patch('direct_indexing.custom_fields.models.codelists.settings.CODELISTS_JSON', tmp_path / FILE_NAME) cl = Codelists(download=False) # Test getting a non-existant codelist results in an empty list From db743f3862f679f6085e5a3a2c101b30ad888ffd Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 8 Nov 2023 17:25:59 +0100 Subject: [PATCH 13/49] feat: added tests for metadata cleaning --- .../direct_indexing/cleaning/test_metadata.py | 145 +++++++++++++++++- 1 file changed, 138 insertions(+), 7 deletions(-) diff --git a/tests/direct_indexing/cleaning/test_metadata.py b/tests/direct_indexing/cleaning/test_metadata.py index 7b0572397..c8aa8de67 100644 --- a/tests/direct_indexing/cleaning/test_metadata.py +++ b/tests/direct_indexing/cleaning/test_metadata.py @@ -1,13 +1,144 @@ -# TODO +import pytest +from direct_indexing.cleaning.metadata import clean_resources, clean_extras, clean_dataset_metadata -def test_clean_dataset_metadata(): - assert True +def test_clean_dataset_metadata(mocker, fixture_dataset_metadata): + # mock clean_resources and clean_extras + mock_cr = mocker.patch('direct_indexing.cleaning.metadata.clean_resources') + mock_ce = mocker.patch('direct_indexing.cleaning.metadata.clean_extras') + clean_dataset_metadata(fixture_dataset_metadata) + # Assert mock_cr and mock_ce are called once + mock_cr.assert_called_once() + mock_ce.assert_called_once() -def test_clean_resources(): - assert True +def test_clean_resources(fixture_dataset_metadata): + clean_resources(fixture_dataset_metadata) + presumed_removed_resources = [ + "mimetype", "cache_url", "description", "cache_last_updated", "mimetype_inner", "last_modified", "url_type", "resource_type", "name" + ] + presumed_kept_resources = [ + "hash", "metadata_modified", "url", "format", "state", "created", "package_id", "position", "size", "id" + ] + # Assert empty values are removed and non-empty values are kept + for key in presumed_removed_resources: + assert key not in fixture_dataset_metadata["resources"][0] + for key in presumed_kept_resources: + assert key in fixture_dataset_metadata["resources"][0] -def test_clean_extras(): - assert True + +def test_clean_extras(fixture_dataset_metadata): + clean_extras(fixture_dataset_metadata) + # Check if all the items with empty values are removed from fixture_dataset_metadata["extras"] + # Assert secondary_publisher not in fixture_dataset_metadata["extras"] + keys = [item["key"] for item in fixture_dataset_metadata["extras"]] + assert "secondary_publisher" not in keys + assert "country" not in keys + # Check the 6 remaining items are still in the extras + assert len(keys) == 6 + for key in keys: + # Assert there is a f'extras.{key}' field in fixture_dataset_metadata + assert f'extras.{key}' in fixture_dataset_metadata + + +@pytest.fixture +def fixture_dataset_metadata(): + return { + # REMOVE + "owner_org": "5e04afab-3aee-4871-ae8d-c175896c5112", + "maintainer": None, + "relationships_as_object": [], + "private": False, + "maintainer_email": None, + "num_tags": 0, + "id": "9d7eb44f-0b13-422d-9542-0793e785d4fa", + "metadata_created": "2014-01-15T07:49:08.717792", + "metadata_modified": "2023-03-10T05:01:33.194172", + "author": None, + "author_email": "jasper.hakala@formin.fi", + "state": "active", + "version": None, + "license_id": "other-at", + "type": "dataset", + "resources": [ + { + "mimetype": "", + "cache_url": "", + "hash": "asdcd03416fe22532a19d40f625f1e55b2e3fba738f", + "description": "", + "metadata_modified": "2022-05-05T23:56:00.301819", + "cache_last_updated": "", + "url": "http://formin.finland.fi/opendata/IATI/Finland_total_2012.xml", + "format": "iati-xml", + "state": "active", + "created": "2022-04-20T07:16:11.709336", + "package_id": "9d7eb44f-0b13-422d-9542-0793e785d4fa", + "mimetype_inner": "", + "last_modified": "", + "position": 0, + "size": 6845840, + "url_type": "", + "id": "47c2ae77-005a-4df9-953d-8a29b0f544ac", + "resource_type": "", + "name": "" + } + ], + "num_resources": 1, + "tags": [], + "title": "Finland Activity File 2012", + "groups": [], + "creator_user_id": "b9a93bc8-247c-4ad9-909f-8531fed1983a", + "relationships_as_subject": [], + "name": "finland_mfa-001", + "isopen": True, + "url": None, + "notes": "", + "license_title": "Other (Attribution)", + "extras": [ + { + "value": "2073", + "key": "activity_count" + }, + { + "value": "", + "key": "country" + }, + { + "value": "2019-06-27 12:59:51", + "key": "data_updated" + }, + { + "value": "activity", + "key": "filetype" + }, + { + "value": "2.02", + "key": "iati_version" + }, + { + "value": "en", + "key": "language" + }, + { + "value": "", + "key": "secondary_publisher" + }, + { + "key": "validation_status", + "value": "Error" + } + ], + "organization": { + "description": "", + "title": "Finland - Ministry for Foreign Affairs", + "created": "2011-11-25T13:21:36.013765", + "approval_status": "approved", + "is_organization": True, + "state": "active", + "image_url": "", + "type": "organization", + "id": "5e04afab-3aee-4871-ae8d-c175896c5112", + "name": "finland_mfa" + } + } \ No newline at end of file From 00b68e7bcc02b750193e9f0b5ab282a30283a4c3 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Thu, 9 Nov 2023 11:10:09 +0100 Subject: [PATCH 14/49] feat: added tests and safety for processing util --- direct_indexing/processing/util.py | 64 ++-- tests/direct_indexing/processing/test_util.py | 290 +++++++++++++++++- 2 files changed, 317 insertions(+), 37 deletions(-) diff --git a/direct_indexing/processing/util.py b/direct_indexing/processing/util.py index 82c0de1e5..67fe4eab4 100644 --- a/direct_indexing/processing/util.py +++ b/direct_indexing/processing/util.py @@ -15,17 +15,20 @@ def get_dataset_filepath(dataset): :param dataset: the dataset for which to build a filepath. :return: the filepath of the dataset, None if not found. """ - org_name = None - path_string = None - if 'organization' in dataset: - if dataset['organization'] is None: - return None - if 'name' in dataset['organization']: - org_name = dataset['organization']['name'] - if org_name: - dataset_name = dataset['name'] # Name is a required field - path_string = settings.DATA_EXTRACTED_PATH + '/' + org_name + '/' + dataset_name + '.xml' - return path_string + try: + org_name = None + path_string = None + if 'organization' in dataset: + if dataset['organization'] is None: + return None + if 'name' in dataset['organization']: + org_name = dataset['organization']['name'] + if org_name: + dataset_name = dataset['name'] # Name is a required field + path_string = settings.DATA_EXTRACTED_PATH + '/' + org_name + '/' + dataset_name + '.xml' + return path_string + except Exception: + return None def get_dataset_version_validity(dataset, dataset_filepath): @@ -48,21 +51,24 @@ def get_dataset_version_validity(dataset, dataset_filepath): :param dataset_filepath: The path to the dataset :return: A boolean indicating the Validation of the dataset """ - if not dataset_filepath or not os.path.isfile(dataset_filepath): - # If we cannot find a file, we cannot index it and it is not valid. - return False - - version = 'extras.iati_version' - if version in dataset: - if dataset[version] in VALID_VERSIONS: - return True - elif dataset[version] in INVALID_VERSIONS: + try: + if not dataset_filepath or not os.path.isfile(dataset_filepath): + # If we cannot find a file, we cannot index it and it is not valid. return False - else: # Retrieve version from dataset file as the version is not reported in metadata - return valid_version_from_file(dataset_filepath) - else: - return valid_version_from_file(dataset_filepath) + version = 'extras.iati_version' + if version in dataset: + if dataset[version] in VALID_VERSIONS: + return True + elif dataset[version] in INVALID_VERSIONS: + return False + else: # Retrieve version from dataset file as the version is not reported in metadata + return valid_version_from_file(dataset_filepath) + + else: + return valid_version_from_file(dataset_filepath) + except Exception: + return False def get_dataset_filetype(dataset): @@ -72,11 +78,13 @@ def get_dataset_filetype(dataset): :param dataset: The dataset to check. :return: Nonoe or the filetype, activity or organisation. """ - if 'extras.filetype' not in dataset: + try: + if 'extras.filetype' not in dataset: + return 'None' + else: + return dataset['extras.filetype'] + except Exception: return 'None' - else: - return dataset['extras.filetype'] - def valid_version_from_file(filepath): """ diff --git a/tests/direct_indexing/processing/test_util.py b/tests/direct_indexing/processing/test_util.py index 3b94186b5..9367b2d8c 100644 --- a/tests/direct_indexing/processing/test_util.py +++ b/tests/direct_indexing/processing/test_util.py @@ -1,17 +1,289 @@ -# TODO +import pytest +from direct_indexing.processing.util import ( + get_dataset_filepath, get_dataset_filetype, get_dataset_version_validity, valid_version_from_file +) -def test_get_dataset_filepath(): - assert True +PATCH_FN = 'direct_indexing.processing.util.valid_version_from_file' -def test_get_dataset_version_validity(): - assert True +def test_get_dataset_filepath(mocker, fixture_dataset_activity, fixture_dataset_organisation): + # Test getting a filepath for an activity dataset + # mock DATA_EXTRACTED_PATH to /iati-data-main/data + mocker.patch('direct_indexing.processing.util.settings.DATA_EXTRACTED_PATH', '/iati-data-main/data') + assert get_dataset_filepath(fixture_dataset_activity) == "/iati-data-main/data/fcdo/fcdo-set-1.xml" + assert get_dataset_filepath(fixture_dataset_organisation) == "/iati-data-main/data/fcdo/fcdo-org.xml" + # Test a dataset with an organisation field but no name + assert get_dataset_filepath({"organization": {"Test": "Test"}}) is None + # Test a dataset with an organisation field which is none + assert get_dataset_filepath({"organization": None}) is None + # Test a dataset with no organisation field + assert get_dataset_filepath({}) is None -def test_get_dataset_filetype(): - assert True + # Test the function with None as its argument returns None + assert get_dataset_filepath(None) is None -def test_valid_version_from_file(): - assert True +def test_get_dataset_version_validity(mocker, tmp_path): + field_name = 'extras.iati_version' + file_path = tmp_path / "fcdo-set-1.xml" + # Test the function returns False if the dataset_filepath is None or the file does not exist at that path + assert not get_dataset_version_validity({}, None) + assert not get_dataset_version_validity({}, file_path) + # Mock the valid_version_from_file function to return True + mocker.patch(PATCH_FN, return_value=True) + + # Create test file + with open(file_path, 'w') as file: + file.write("test") + + assert get_dataset_version_validity({field_name: '2.03'}, file_path) + assert not get_dataset_version_validity({field_name: '1.01'}, file_path) + assert get_dataset_version_validity({field_name: '0.0'}, file_path) + assert get_dataset_version_validity({}, file_path) + + # Test that if the function raises an exception, it returns False + mocker.patch(PATCH_FN, side_effect=Exception) + assert not get_dataset_version_validity({}, file_path) + + +def test_get_dataset_filetype(mocker): + # Test the function returns None if the dataset is None + assert get_dataset_filetype(None) == "None" + # Test the function returns None if the dataset has no extras.filetype field + assert get_dataset_filetype({}) == "None" + # Test the function returns the correct filetype if the dataset has an extras.filetype field + assert get_dataset_filetype({'extras.filetype': 'activity'}) == "activity" + assert get_dataset_filetype({'extras.filetype': 'organisation'}) == "organisation" + # Test that if the function raises an exception, it returns False + mocker.patch('direct_indexing.processing.util.get_dataset_filetype', side_effect=Exception) + assert get_dataset_filetype({}) == "None" + + +def test_valid_version_from_file(tmp_path): + file_path = tmp_path / "fcdo-set-1.xml" + + # Test with an XML file not containing the version + with open(file_path, 'w') as file: + file.write("test") + assert not valid_version_from_file(file_path) + + # Test with an XML file containing the version + with open(file_path, 'w') as file: + file.write("test") + assert valid_version_from_file(file_path) + + # Test with an XML file without a rood node resulting in a ParseError + with open(file_path, 'w') as file: + file.write("") + assert not valid_version_from_file(file_path) + + +@pytest.fixture +def fixture_dataset_activity(): + return { + "owner_org": "4da32e41-a060-4d75-86c1-4b627eb22647", + "maintainer": None, + "relationships_as_object": [], + "private": False, + "maintainer_email": None, + "num_tags": 0, + "publisher_country": "GB", + "id": "e2fcee3e-a445-4093-a74c-34eeed942221", + "metadata_created": "2023-02-02T09:53:55.415185", + "metadata_modified": "2023-10-29T05:49:15.331301", + "author": None, + "author_email": "iati-feedback@fcdo.gov.uk", + "state": "active", + "version": None, + "license_id": "uk-ogl", + "type": "dataset", + "resources": [ + { + "mimetype": "", + "cache_url": None, + "hash": "da84a2a1186334c6edeeef2e608085d3fa43e1f1", + "description": None, + "metadata_modified": "2023-10-29T05:49:15.354998", + "cache_last_updated": None, + "url": "http://iati.fcdo.gov.uk/iati_files/solr/FCDO-set-1.xml", + "format": "IATI-XML", + "state": "active", + "created": "2023-10-29T05:49:12.216564", + "package_id": "e2fcee3e-a445-4093-a74c-34eeed942221", + "mimetype_inner": None, + "last_modified": None, + "position": 0, + "size": 7743257, + "url_type": None, + "id": "c3bddc8a-8a13-4706-a2eb-15e9b207fe98", + "resource_type": None, + "name": None + } + ], + "num_resources": 1, + "publisher_organization_type": "10", + "tags": [], + "title": "FCDO Activity File 1", + "groups": [], + "creator_user_id": "ffe50b5a-bfa2-4522-93b6-c2adfc7bee99", + "publisher_source_type": "primary_source", + "relationships_as_subject": [], + "publisher_iati_id": "GB-GOV-1", + "name": "fcdo-set-1", + "isopen": True, + "url": None, + "notes": "", + "license_title": "UK Open Government Licence (OGL)", + "extras": [ + { + "value": "806", + "key": "activity_count" + }, + { + "value": "", + "key": "country" + }, + { + "value": "2021-06-07 00:00:00", + "key": "data_updated" + }, + { + "value": "activity", + "key": "filetype" + }, + { + "value": "2.03", + "key": "iati_version" + }, + { + "value": "", + "key": "language" + }, + { + "value": "", + "key": "secondary_publisher" + }, + { + "key": "validation_status", + "value": "Not Found" + } + ], + "license_url": "http://reference.data.gov.uk/id/open-government-licence", + "organization": { + "description": "", + "title": "UK - Foreign, Commonwealth and Development Office", + "created": "2020-08-19T13:55:48.059928", + "approval_status": "approved", + "is_organization": True, + "state": "active", + "image_url": "http://iati.fcdo.gov.uk/iati_files/FCDO_logo.png", + "type": "organization", + "id": "4da32e41-a060-4d75-86c1-4b627eb22647", + "name": "fcdo" + } + } + + +@pytest.fixture +def fixture_dataset_organisation(): + return { + "owner_org": "4da32e41-a060-4d75-86c1-4b627eb22647", + "maintainer": None, + "relationships_as_object": [], + "private": False, + "maintainer_email": None, + "num_tags": 0, + "publisher_country": "GB", + "id": "3faa890f-b58c-497c-86af-2f6adda0ba1a", + "metadata_created": "2020-08-21T12:48:04.440044", + "metadata_modified": "2023-11-03T04:58:51.131135", + "author": None, + "author_email": "enquiry@fcdo.gov.uk", + "state": "active", + "version": None, + "license_id": "uk-ogl", + "type": "dataset", + "resources": [ + { + "mimetype": "", + "cache_url": None, + "hash": "9241767ac307d58276e11c53f319dfa47b6ea112", + "description": "", + "metadata_modified": "2023-11-03T04:58:51.148529", + "cache_last_updated": None, + "url": "http://iati.fcdo.gov.uk/iati_files/organisation.xml", + "format": "IATI-XML", + "state": "active", + "created": "2020-09-01T11:35:05.318259", + "package_id": "3faa890f-b58c-497c-86af-2f6adda0ba1a", + "mimetype_inner": None, + "last_modified": None, + "position": 0, + "revision_id": "125a6887-14ea-45e7-b12e-4debdc3f64f1", + "size": 77601, + "url_type": None, + "id": "59866027-c13c-4397-8d34-b8890e9b1024", + "resource_type": None, + "name": None + } + ], + "num_resources": 1, + "publisher_organization_type": "10", + "tags": [], + "title": "FCDO Organisation File", + "groups": [], + "creator_user_id": "ffe50b5a-bfa2-4522-93b6-c2adfc7bee99", + "publisher_source_type": "primary_source", + "relationships_as_subject": [], + "publisher_iati_id": "GB-GOV-1", + "name": "fcdo-org", + "isopen": True, + "url": None, + "notes": "", + "license_title": "UK Open Government Licence (OGL)", + "extras": [ + { + "value": "", + "key": "country" + }, + { + "value": "2023-11-01 17:10:36", + "key": "data_updated" + }, + { + "value": "organisation", + "key": "filetype" + }, + { + "value": "2.03", + "key": "iati_version" + }, + { + "value": "en", + "key": "language" + }, + { + "value": "", + "key": "secondary_publisher" + }, + { + "key": "validation_status", + "value": "Success" + } + ], + "license_url": "http://reference.data.gov.uk/id/open-government-licence", + "organization": { + "description": "", + "title": "UK - Foreign, Commonwealth and Development Office", + "created": "2020-08-19T13:55:48.059928", + "approval_status": "approved", + "is_organization": True, + "state": "active", + "image_url": "http://iati.fcdo.gov.uk/iati_files/FCDO_logo.png", + "type": "organization", + "id": "4da32e41-a060-4d75-86c1-4b627eb22647", + "name": "fcdo" + } + } From 25bd90dcb97e256348cf18f044c5786bbdbae0c8 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Thu, 9 Nov 2023 13:22:12 +0100 Subject: [PATCH 15/49] refactor: test cleanup --- tests/direct_indexing/cleaning/test_metadata.py | 5 +++-- tests/direct_indexing/test_direct_indexing.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/direct_indexing/cleaning/test_metadata.py b/tests/direct_indexing/cleaning/test_metadata.py index c8aa8de67..7fbbd04d6 100644 --- a/tests/direct_indexing/cleaning/test_metadata.py +++ b/tests/direct_indexing/cleaning/test_metadata.py @@ -16,7 +16,8 @@ def test_clean_dataset_metadata(mocker, fixture_dataset_metadata): def test_clean_resources(fixture_dataset_metadata): clean_resources(fixture_dataset_metadata) presumed_removed_resources = [ - "mimetype", "cache_url", "description", "cache_last_updated", "mimetype_inner", "last_modified", "url_type", "resource_type", "name" + "mimetype", "cache_url", "description", "cache_last_updated", "mimetype_inner", + "last_modified", "url_type", "resource_type", "name" ] presumed_kept_resources = [ "hash", "metadata_modified", "url", "format", "state", "created", "package_id", "position", "size", "id" @@ -141,4 +142,4 @@ def fixture_dataset_metadata(): "id": "5e04afab-3aee-4871-ae8d-c175896c5112", "name": "finland_mfa" } - } \ No newline at end of file + } diff --git a/tests/direct_indexing/test_direct_indexing.py b/tests/direct_indexing/test_direct_indexing.py index 660cb5317..ce4532c31 100644 --- a/tests/direct_indexing/test_direct_indexing.py +++ b/tests/direct_indexing/test_direct_indexing.py @@ -2,7 +2,7 @@ from direct_indexing.direct_indexing import clear_indices, clear_indices_for_core import pytest import pysolr -from django.conf import settings + # Test group: test_run def test_run_clear_indices_success(mocker): From d431b453ae170873ab7e029c1d592a0fc044d352 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Thu, 9 Nov 2023 15:32:35 +0100 Subject: [PATCH 16/49] feat: added tests for and updated dataset cleaning --- direct_indexing/cleaning/dataset.py | 8 +- .../direct_indexing/cleaning/test_dataset.py | 205 +++++++++++++++++- 2 files changed, 198 insertions(+), 15 deletions(-) diff --git a/direct_indexing/cleaning/dataset.py b/direct_indexing/cleaning/dataset.py index 377842228..7b7e09f45 100644 --- a/direct_indexing/cleaning/dataset.py +++ b/direct_indexing/cleaning/dataset.py @@ -55,9 +55,8 @@ def extract_key_value_fields(data, add_fields, key, value): add_fields = extract_single_values(add_fields, value, key, data) # If the fields are not yet at the lowest level of key-value pair, # process the underlying field. - elif type(value) in [OrderedDict, list]: + elif type(value) in [OrderedDict, dict]: # was list instead of dict data[key] = recursive_attribute_cleaning(value) - return add_fields @@ -130,7 +129,7 @@ def list_values(element, data, key, add_fields): data[key].append(' ') for string in ['@currency', '@value-date', '@year']: if string in element: - add_fields[f'{element}.{string[1:]}'].append(element[string]) + add_fields[f'{key}.{string[1:]}'].append(element[string]) if XML_LANG_STR in element: add_fields[f'{key}.{LANG_STR}'].append( element[XML_LANG_STR]) @@ -165,7 +164,7 @@ def extract_single_values(add_fields, value, key, data): data[key] = value return add_fields if type(value) is bool: - data[key] = 0 + data[key] = 1 if value else 0 return add_fields if '$' in value: data[key] = value['$'] @@ -179,6 +178,7 @@ def extract_single_values(add_fields, value, key, data): if XML_LANG_STR in value: add_fields[f'{key}.{LANG_STR}'] = value[ XML_LANG_STR] + # assume the language is not provided else: if key != 'value': add_fields[f'{key}.{LANG_STR}'] = ' ' diff --git a/tests/direct_indexing/cleaning/test_dataset.py b/tests/direct_indexing/cleaning/test_dataset.py index a1e85e4c6..8b5c78279 100644 --- a/tests/direct_indexing/cleaning/test_dataset.py +++ b/tests/direct_indexing/cleaning/test_dataset.py @@ -1,25 +1,208 @@ -# TODO +from direct_indexing.cleaning.dataset import ( + extract_key_value_fields, extract_list_values, extract_literal_values, extract_single_values, list_values, + recursive_attribute_cleaning, XML_LANG_STR +) -def test_recursive_attribute_cleaning(): - assert True +def test_recursive_attribute_cleaning(mocker): + # Test any key with a @ is replaced with the value of the key without the @ + assert recursive_attribute_cleaning({"@id": 1}) == {"id": 1} + assert recursive_attribute_cleaning({"id": 1, "@test": 2}) == {"id": 1, "test": 2} + # Test any key with xml:lang is replaced with lang + assert recursive_attribute_cleaning({XML_LANG_STR: "en"}) == {"lang": "en"} + # Test extraction of fields that need to be appended to the dataset + # This is an integration test and we mock the result of extract_key_value_fields + key = "value" + value = [{"$": 1}, {"$": 2}] + data = {key: value} + mocker.patch("direct_indexing.cleaning.dataset.extract_key_value_fields", return_value={'value': [1, 2]}) + data = recursive_attribute_cleaning(data) + assert data == {'value': [1, 2]} + mocker.stopall() # Reset mocks -def test_extract_key_value_fields(): - assert True + # Test where data is a list of dicts + mock_recursive = mocker.patch( + 'direct_indexing.cleaning.dataset.recursive_attribute_cleaning', + side_effect=recursive_attribute_cleaning, + autospec=True + ) + # Call the function under test + recursive_attribute_cleaning([[{"id": 1}, {"id": 2}], {"id": 3}]) + # Assert that mock_recursive was called for each dictionary in the structure + assert mock_recursive.call_count == 4 # 2 dictionaries in the list + 1 dictionary outside the list + # Assert mock recursive was called once with the child list, then once for each dict + mock_recursive.assert_any_call({"id": 3}) + mock_recursive.assert_any_call({"id": 2}) + mock_recursive.assert_any_call({"id": 1}) + mock_recursive.assert_any_call([{"id": 1}, {"id": 2}]) -def test_extract_literal_values(): - assert True +def test_extract_key_value_fields(mocker): + # mock extract_literal_values, extract_list_values, extract_single_values, recursive_attribute_cleaning + mock_elv = mocker.patch('direct_indexing.cleaning.dataset.extract_literal_values', ) + mock_elistv = mocker.patch('direct_indexing.cleaning.dataset.extract_list_values') + mock_esv = mocker.patch('direct_indexing.cleaning.dataset.extract_single_values') + mock_rac = mocker.patch('direct_indexing.cleaning.dataset.recursive_attribute_cleaning') + # Test if the key is a single literal value + key = "iati-identifier" + value = {"$": "test"} + data = {key: value} + extract_key_value_fields(data, {}, key, value) + mock_elv.assert_called_once() -def test_extract_list_values(): - assert True + # Test for a list of values with key "value" + key = "value" + value = [{"$": 1}, {"$": 2}] + data = {key: value} + extract_key_value_fields(data, {}, key, value) + mock_elistv.assert_called_once() + + # Test for a single value with key "value" + key = "value" + value = {"$": 1} + data = {key: value} + extract_key_value_fields(data, {}, key, value) + mock_esv.assert_called_once() + + # Test for a value dict that recursive attribute_cleaning is called + key = "not_value" + value = {"test": 1} + data = {key: value} + extract_key_value_fields(data, {}, key, value) + mock_rac.assert_called_once() + + +def test_extract_literal_values(mocker): + key = "test" + # Assert if the value is a list of dicts, the data obj is updated with a list for all the values + value = [{"$": "value1"}, {"$": "value2"}] + data = {key: value} + extract_literal_values(value, key, data) + assert data[key] == ["value1", "value2"] + + # Assert if the value is a single value, the data obj is updated with the value + value = {"$": "value"} + data = {key: value} + extract_literal_values(value, key, data) + assert data[key] == "value" + + +def test_extract_list_values(mocker): + add_fields = {} + value = [{"$": 1}, {"$": 2}] + key = "value" + data = {key: value} + # mocker patch list_values to do nothing + mock_lv = mocker.patch('direct_indexing.cleaning.dataset.list_values') + add_fields = extract_list_values(add_fields, value, key, data) + + # Assert the expected behavior + assert data[key] == [] # Check if data[key] is initialized as an empty list + + # Check if add_fields is updated correctly + expected_add_fields = { + f'{key}.currency': [], + f'{key}.value_date': [], + f'{key}.year': [], + f'{key}.lang': [] + } + assert add_fields == expected_add_fields + # Assert mock_lv was called as many times as there are elements in value + assert mock_lv.call_count == len(value) def test_list_values(): - assert True + # List values assumes the following: + # It gets an element, which can be a dict + # We expect the parent to be a value field + test_key = "value" + # Assert if the element is an empty list, the data object is not updated and the add_fields remain unchanged + test_elem = {} + test_data = {test_key: []} + test_data, res = list_values(test_elem, test_data, test_key, {}) + assert res == {} + assert test_data == {test_key: []} + + # Assert if the element contains a $ key, its value is stored in the provided key array + test_elem = {"$": 1} + test_data = {test_key: []} + test_data, res = list_values(test_elem, test_data, test_key, {}) + assert res == {} + assert test_data == {test_key: [1]} + + # Assert if the element is missing a $ key, an empty string is stored + test_elem = {"other": 1} + test_data = {test_key: []} + test_data, res = list_values(test_elem, test_data, test_key, {}) + assert res == {} + assert test_data == {test_key: [" "]} + + # Assert if @currency, @value-date or @year is in the keys, the value is appended to add_fields + test_elem = {"$": 1, "@currency": "USD"} + test_data = {test_key: []} + test_data, res = list_values(test_elem, test_data, test_key, {f"{test_key}.currency": []}) + assert res == {"value.currency": ["USD"]} + assert test_data == {test_key: [1]} + + # Assert if '@{http://www.w3.org/XML/1998/namespace}lang' is in the value keys + test_elem = {"$": 1, XML_LANG_STR: "en"} + test_data = {test_key: []} + test_data, res = list_values(test_elem, test_data, test_key, {f"{test_key}.lang": []}) + assert res == {"value.lang": ["en"]} + + # Assert if the key is not 'value', an empty field is added to key.lang + test_elem = {"$": 1} + test_data = {"test": []} + test_lang = "test.lang" + test_data, res = list_values(test_elem, test_data, "test", {test_lang: []}) + assert res == {test_lang: [" "]} def test_extract_single_values(): - assert True + # Assert that if the value is an empty list, we skip + assert extract_single_values({}, [], "key", {}) == {} + # Assert if the type of the value is in [int, str, float] we update the data object, + # and return an unchanged add_fields + base_key = "value" + data_obj = {base_key: 1} + res = extract_single_values({}, data_obj[base_key], base_key, data_obj) + assert res == {} + assert data_obj == {base_key: 1} + + # Assert if the type of value is boolean, the bool is converted to 1 or 0 + data_obj = {base_key: False} + res = extract_single_values({}, data_obj[base_key], base_key, data_obj) + assert res == {} + assert data_obj == {base_key: 0} + + # Assert if the type of value is a dict, and $ is in the keys, the main key is set to the value of $ + # $ is only provided in a value dict + data_obj = {base_key: {"$": 42}} + res = extract_single_values({}, data_obj[base_key], base_key, data_obj) + assert res == {} + assert data_obj == {base_key: 42} + + # Assert if the type of value is an Empty dict the key should be set to " " + data_obj = {base_key: {}} + res = extract_single_values({}, data_obj[base_key], base_key, data_obj) + assert res == {} + assert data_obj == {base_key: " "} + + # Assert if the value is a dict, and @currency, @value-date or @year is in the keys, + # the value is appended to add_fields + data_obj = {base_key: {"@currency": "USD"}} + res = extract_single_values({}, data_obj[base_key], base_key, data_obj) + assert res == {"value.currency": "USD"} + + # Assert if '@{http://www.w3.org/XML/1998/namespace}lang' is in the value keys + data_obj = {base_key: {XML_LANG_STR: "en"}} + res = extract_single_values({}, data_obj[base_key], base_key, data_obj) + assert res == {"value.lang": "en"} + + # Assert if the key is not 'value', an empty field is added + data_obj = {"test": {"$": "bar"}} + res = extract_single_values({}, data_obj["test"], "test", data_obj) + test_lang = "test.lang" + assert res == {test_lang: " "} From ac9461c52216a3907a78161e9644c8cffbed8d21 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Thu, 9 Nov 2023 16:01:09 +0100 Subject: [PATCH 17/49] feat: added tests for extract activity dates --- .../custom_fields/test_activity_dates.py | 42 +++++++++++++++++-- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/tests/direct_indexing/custom_fields/test_activity_dates.py b/tests/direct_indexing/custom_fields/test_activity_dates.py index 1f75c50a8..e8d5996f2 100644 --- a/tests/direct_indexing/custom_fields/test_activity_dates.py +++ b/tests/direct_indexing/custom_fields/test_activity_dates.py @@ -1,9 +1,43 @@ -# TODO +from direct_indexing.custom_fields.activity_dates import FIELDS, activity_dates, extract_activity_dates -def test_activity_dates(): - assert True +def test_fields(): + assert "start-planned" in FIELDS + assert "start-actual" in FIELDS + assert "end-planned" in FIELDS + assert "end-actual" in FIELDS + + +def test_activity_dates(mocker): + # mock extract_activity_dates + mock_extract = mocker.patch('direct_indexing.custom_fields.activity_dates.extract_activity_dates') + + # Test skip if no activity-date + data = {} + activity_dates(data) + mock_extract.assert_not_called() + + # Test conversion to list + data = {'activity-date': {"type": 1, "iso-date": "test"}} + activity_dates(data) + mock_extract.assert_called_once() + # Check that data['activity-date'] is a list + assert type(data['activity-date']) is list + + # Test list of dicts calls len() times + data = {'activity-date': [{"type": 1, "iso-date": "test"}]} + activity_dates(data) + assert mock_extract.call_count == len(data['activity-date']) + 1 # +1 for the previous test def test_extract_activity_dates(): - assert True + assert extract_activity_dates({}, {}) == {} + date = {"type": 1, "iso-date": "test"} + data = {'activity-date': date} + ex_res = {'activity-date': date, 'activity-date.start-planned': 'test', 'activity-date.common.start': 'test'} + assert extract_activity_dates(date, data) == ex_res + + date = {"type": 3, "iso-date": "test"} + data = {'activity-date': date} + ex_res = {'activity-date': date, 'activity-date.end-planned': 'test', 'activity-date.common.end': 'test'} + assert extract_activity_dates(date, data) == ex_res From 82b01bbe01f83872c489531e7be0dc0d503201a9 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Thu, 9 Nov 2023 16:01:44 +0100 Subject: [PATCH 18/49] refactor: update import order --- tests/direct_indexing/cleaning/test_dataset.py | 4 ++-- tests/direct_indexing/cleaning/test_metadata.py | 2 +- tests/direct_indexing/metadata/test_util.py | 4 ++-- tests/direct_indexing/test_direct_indexing.py | 5 +++-- tests/direct_indexing/test_util.py | 4 +++- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/tests/direct_indexing/cleaning/test_dataset.py b/tests/direct_indexing/cleaning/test_dataset.py index 8b5c78279..092e0b2ce 100644 --- a/tests/direct_indexing/cleaning/test_dataset.py +++ b/tests/direct_indexing/cleaning/test_dataset.py @@ -1,6 +1,6 @@ from direct_indexing.cleaning.dataset import ( - extract_key_value_fields, extract_list_values, extract_literal_values, extract_single_values, list_values, - recursive_attribute_cleaning, XML_LANG_STR + XML_LANG_STR, extract_key_value_fields, extract_list_values, extract_literal_values, extract_single_values, + list_values, recursive_attribute_cleaning ) diff --git a/tests/direct_indexing/cleaning/test_metadata.py b/tests/direct_indexing/cleaning/test_metadata.py index 7fbbd04d6..6049619bc 100644 --- a/tests/direct_indexing/cleaning/test_metadata.py +++ b/tests/direct_indexing/cleaning/test_metadata.py @@ -1,6 +1,6 @@ import pytest -from direct_indexing.cleaning.metadata import clean_resources, clean_extras, clean_dataset_metadata +from direct_indexing.cleaning.metadata import clean_dataset_metadata, clean_extras, clean_resources def test_clean_dataset_metadata(mocker, fixture_dataset_metadata): diff --git a/tests/direct_indexing/metadata/test_util.py b/tests/direct_indexing/metadata/test_util.py index 372daffb7..e5f567610 100644 --- a/tests/direct_indexing/metadata/test_util.py +++ b/tests/direct_indexing/metadata/test_util.py @@ -1,9 +1,9 @@ import json +import urllib +import zipfile import pytest import requests -import urllib -import zipfile from direct_indexing.metadata.util import download_dataset, index, retrieve diff --git a/tests/direct_indexing/test_direct_indexing.py b/tests/direct_indexing/test_direct_indexing.py index ce4532c31..ae4b46306 100644 --- a/tests/direct_indexing/test_direct_indexing.py +++ b/tests/direct_indexing/test_direct_indexing.py @@ -1,7 +1,8 @@ # TODO -from direct_indexing.direct_indexing import clear_indices, clear_indices_for_core -import pytest import pysolr +import pytest + +from direct_indexing.direct_indexing import clear_indices, clear_indices_for_core # Test group: test_run diff --git a/tests/direct_indexing/test_util.py b/tests/direct_indexing/test_util.py index 400839bc9..bf51c4dc7 100644 --- a/tests/direct_indexing/test_util.py +++ b/tests/direct_indexing/test_util.py @@ -1,8 +1,10 @@ import os import subprocess +import urllib.request + import pysolr import pytest -import urllib.request + from direct_indexing import util from iaticloud import settings From 45906f214c1bc8df9faee466be4073bd20b573cb Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Thu, 9 Nov 2023 16:04:18 +0100 Subject: [PATCH 19/49] feat: added tests for hierarchy --- .../custom_fields/test_add_default_hierarchy.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/direct_indexing/custom_fields/test_add_default_hierarchy.py b/tests/direct_indexing/custom_fields/test_add_default_hierarchy.py index 36de6c573..4fcbf3cde 100644 --- a/tests/direct_indexing/custom_fields/test_add_default_hierarchy.py +++ b/tests/direct_indexing/custom_fields/test_add_default_hierarchy.py @@ -1,5 +1,12 @@ -# TODO +from direct_indexing.custom_fields.add_default_hierarchy import add_default_hierarchy def test_add_default_hierarchy(): - pass + default_res = {'hierarchy': 1} + data = {} + add_default_hierarchy(data) + assert data == default_res + + data = {'hierarchy': 2} + add_default_hierarchy(data) + assert data != default_res From 6c3678deab0f6a18d628d2c8b9939ce04ba877a5 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Fri, 10 Nov 2023 11:22:56 +0100 Subject: [PATCH 20/49] feat: added tests for custom codelist fields --- .../custom_fields/test_codelists.py | 2264 ++++++++++++++++- 1 file changed, 2253 insertions(+), 11 deletions(-) diff --git a/tests/direct_indexing/custom_fields/test_codelists.py b/tests/direct_indexing/custom_fields/test_codelists.py index b0e30f4d5..ef33d26cd 100644 --- a/tests/direct_indexing/custom_fields/test_codelists.py +++ b/tests/direct_indexing/custom_fields/test_codelists.py @@ -1,21 +1,2263 @@ -# TODO +import pytest +from direct_indexing.custom_fields.codelists import ( + CODELIST_POSTFIX, add_codelist_fields, check_and_get, extract_list_field, extract_nested_list_field, + extract_single_field +) +from direct_indexing.custom_fields.models import codelists -def test_add_codelist_fields(): - assert True +def test_add_codelist_fields(mocker): + # mock extract_single_field, extract_list_field, extract_nested_list_field + mock_extract_single = mocker.patch('direct_indexing.custom_fields.codelists.extract_single_field') + mock_extract_list = mocker.patch('direct_indexing.custom_fields.codelists.extract_list_field') + mock_extract_nested_list = mocker.patch('direct_indexing.custom_fields.codelists.extract_nested_list_field') -def test_extract_single_field(): - assert True + add_codelist_fields({}, {}) + mock_extract_single.assert_called_once() + assert mock_extract_list.call_count == 10 + mock_extract_nested_list.assert_called_once() -def test_extract_list_field(): - assert True +def test_extract_single_field(fixture_cl): + field_name = 'reporting-org' + field_type = 'type' + cl_name = 'OrganisationType' + # Test data is not changed if field_name not in data + data = {} + data = extract_single_field(data, field_name, field_type, cl_name, fixture_cl) + assert data == {} -def test_extract_nested_list_field(): - assert True + # Test data is not changed if field_type not in data[field_name] + data = {field_name: {}} + data = extract_single_field(data, field_name, field_type, cl_name, fixture_cl) + assert data == {field_name: {}} + # Test getting a single field + data = {field_name: {field_type: "10"}} + data = extract_single_field(data, field_name, field_type, cl_name, fixture_cl) + assert data == {field_name: {field_type: "10"}, 'reporting-org.type.name': 'Government'} -def test_check_and_get(): - assert True + +def test_extract_list_field(fixture_cl): + # Test extact list field with custom field name + field_name = 'policy-marker' + field_type = 'code' + cl_name = 'PolicyMarkerVocabulary' + custom_name = 'policy-marker.vocabulary' + data_field = custom_name + CODELIST_POSTFIX + data = {field_name: {field_type: "1"}} + expected_res = {field_name: {field_type: "1"}, data_field: ['OECD DAC CRS']} + extract_list_field(data, field_name, field_type, cl_name, fixture_cl, custom_name) + assert data == expected_res + + # Test extract list field without custom field name + field_name = 'recipient-country' + field_type = 'code' + cl_name = 'Country' + data_field = field_name + CODELIST_POSTFIX + expected_res = {field_name: {field_type: 'AF'}, data_field: ['Afghanistan']} + data = {field_name: {field_type: 'AF'}} + data = extract_list_field(data, field_name, field_type, cl_name, fixture_cl, None) + assert data == expected_res + + # Test field name is not in data + expected_res = {data_field: []} + data = {} + data = extract_list_field(data, field_name, field_type, cl_name, fixture_cl) + assert data == expected_res + + # Test data[field_name] is list + data = {field_name: [{field_type: 'AF'}, {field_type: 'AL'}]} + data = extract_list_field(data, field_name, field_type, cl_name, fixture_cl) + expected_res = {field_name: [{field_type: 'AF'}, {field_type: 'AL'}], data_field: ['Afghanistan', 'Albania']} + assert data == expected_res + + +def test_extract_nested_list_field(mocker, fixture_cl): + parent_field_name = 'transaction' + field_name = 'receiver-org' + field_type = 'type' + cl_name = 'OrganisationType' + data_field = f'{parent_field_name}.{field_name}.{field_type}{CODELIST_POSTFIX}' + + # Test that the data_field is initialised and returned if no data + data = {} + expected_res = {data_field: []} + data = extract_nested_list_field(data, parent_field_name, field_name, field_type, cl_name, fixture_cl) + assert data == expected_res + + mock_cag = mocker.patch('direct_indexing.custom_fields.codelists.check_and_get') + # Test that check_and_get is called 0 times if the item does not have the field_name + data = {parent_field_name: [{}, {}]} + extract_nested_list_field(data, parent_field_name, field_name, field_type, cl_name, fixture_cl) + mock_cag.assert_not_called() + + # Test that check_and_get is called twice for two items in the list + data = {parent_field_name: [{field_name: {field_type: '10'}}, {field_name: {field_type: '20'}}]} + extract_nested_list_field(data, parent_field_name, field_name, field_type, cl_name, fixture_cl) + assert mock_cag.call_count == 2 + + # Test that check_and_get is called one more time if the item is a dict + data = {parent_field_name: {field_name: {field_type: '10'}}} + extract_nested_list_field(data, parent_field_name, field_name, field_type, cl_name, fixture_cl) + assert mock_cag.call_count == 3 + + # Test that check_and_get is not called again if the item is an empty dict + data = {parent_field_name: {}} + extract_nested_list_field(data, parent_field_name, field_name, field_type, cl_name, fixture_cl) + assert mock_cag.call_count == 3 + + +def test_check_and_get(fixture_cl): + # Test with field_type not in codelist_field + field_name = 'recipient-country' + field_type = 'code' + cl_name = 'Country' + data_field = field_name + CODELIST_POSTFIX + data = {field_name: {}} + check_and_get(field_type, data[field_name], None, None, None, None) + assert data == {field_name: {}} + # Test with field_type in codelist_field + data = {field_name: {field_type: 'AF'}, data_field: []} + expected_res = {field_name: {field_type: 'AF'}, data_field: ['Afghanistan']} + check_and_get(field_type, data[field_name], data, data_field, fixture_cl, cl_name) + assert data == expected_res + + +@pytest.fixture +def fixture_cl(monkeypatch): + data = { + "AidType": [ + { + "code": "A01", + "name": "General budget support", + "description": "Unearmarked contributions to the government budget including funding to support the implementation of macroeconomic reforms (structural adjustment programmes, poverty reduction strategies). Budget support is a method of financing a recipient country\u2019s budget through a transfer of resources from an external financing agency to the recipient government\u2019s national treasury. The funds thus transferred are managed in accordance with the recipient\u2019s budgetary procedures. Funds transferred to the national treasury for financing programmes or projects managed according to different budgetary procedures from those of the recipient country, with the intention of earmarking the resources for specific uses, are therefore excluded.", # NOQA: E501 + "category": "A", + "status": "active" + }, + { + "code": "A02", + "name": "Sector budget support", + "description": "Sector budget support, like general budget support, is a financial contribution to a recipient government\u2019s budget. However, in sector budget support, the dialogue between donors and partner governments focuses on sector-specific concerns, rather than on overall policy and budget priorities.", # NOQA: E501 + "category": "A", + "status": "active" + }, + { + "code": "B01", + "name": "Core support to NGOs, other private bodies, PPPs and research institutes", + "description": "Funds are paid over to NGOs (local, national and international) for use at the latter\u2019s discretion, and contribute to programmes and activities which NGOs have developed themselves, and which they implement on their own authority and responsibility. Core contributions to PPPs, funds paid over to foundations (e.g. philanthropic foundations), and contributions to research institutes (public and private) are also recorded here. Annex 2 of the DAC Directives provides a list of INGOs, PPPs and networks core contributions to which may be reported under B01. This list is not exclusive.", # NOQA: E501 + "category": "B", + "status": "active" + }, + { + "code": "B02", + "name": "Core contributions to multilateral institutions and global funds", + "description": "These funds are classified as multilateral (all other categories are bilateral). The recipient multilateral institution pools contributions so that they lose their identity and become an integral part of its financial assets or liabilities. Also includes Financial Intermediary Funds (GEF, CIFs) for which the World Bank is the Trustee, as well as some UN inter-agency pooled funds, such as CERF and the UN Peacebuilding Fund. See Annex 2 of the Reporting Directives for a comprehensive list of agencies, core contributions to which may be reported under B02 and its subcategories. (Section I. Multilateral institutions). Nota bene: contributions to multilateral development organisations beyond Annex 2 are not reportable in the DAC statistics. The non-ODA components of core support to multilateral organisations included in Annex 2 are not reportable either.", # NOQA: E501 + "category": "B", + "status": "active" + }, + { + "code": "B021", + "name": "Core contributions to multilateral institutions", + "description": "Contributions in this category are pooled by the recipient multilateral institution and become an integral part of its financial assets or liabilities.", # NOQA: E501 + "category": "B", + "status": "active" + }, + { + "code": "B022", + "name": "Core contributions to global funds", + "description": "Contributions to global funds classified as multilateral including Financial Intermediary Funds for which the World Bank is the Trustee and which have gone through the Annex 2 process (GEF, CIFs) as well as some UN inter-agency pooled funds, e.g. CERF and the UN Peacebuilding Fund.", # NOQA: E501 + "category": "B", + "status": "active" + }, + { + "code": "B03", + "name": "Contributions to specific-purpose programmes and funds managed by implementing partners", + "description": "In addition to their core-funded operations, international organisations \u2013 multilateral agencies, NGOs, PPPs or networks \u2013 both in provider and in third countries, set up programmes and funds with a specific sectoral, thematic or geographical focus. Donors\u2019 bilateral contributions to such programmes and funds are recorded here. Use categories B031 and B032 for trust funds managed by the UN (all designed as multi-donor) unless contributions are earmarked for a specific geographical location or funding window.", # NOQA: E501 + "category": "B", + "status": "active" + }, + { + "code": "B031", + "name": "Contributions to multi-donor/multi-entity funding mechanisms", + "description": "Contributions to funding mechanisms (specific-purpose programmes and funds) that pool resources from several providers and from which several international organisations \u2013 multilateral agencies, NGOs, PPPs or networks \u2013 may be allocated funds for implementation e.g. contributions to UN country-based pooled funds and country-level development funds. Excludes contributions to global funds classified as multilateral (see B022). Includes Financial Intermediary Funds for which the World Bank is the Trustee and which have not gone through the Annex 2 process.", # NOQA: E501 + "category": "B", + "status": "active" + }, + { + "code": "B032", + "name": "Contributions to multi-donor/single-entity funding mechanisms", + "description": "Contributions to multi-donor funding mechanisms (specific-purpose programmes and funds) managed by a single international organisation \u2013 multilateral agency, NGO, PPP or network \u2013 e.g. UN single-agency thematic funds; World Bank or other MDB trust funds. Classify the contribution as B032 even if in the initial stages only one donor contributes to the fund.", # NOQA: E501 + "category": "B", + "status": "active" + }, + { + "code": "B033", + "name": "Contributions to single-donor funding mechanisms and contributions earmarked for a specific funding window or geographical location", # NOQA: E501 + "description": "Contributions to funding mechanisms (specific-purpose programmes and funds) where the donor has a significant influence on the allocation of funds. This includes contributions to single-donor trust funds and earmarked contributions to specific countries/geographical locations or funding windows within multi-donor trust funds. When the donor designs the activity but channels it through an international organisation, the activity should be classified as C01.", # NOQA: E501 + "category": "B", + "status": "active" + }, + { + "code": "B04", + "name": "Basket funds/pooled funding", + "description": "The donor contributes funds to an autonomous account, managed jointly with other donors and/or the recipient. The account will have specific purposes, modes of disbursement and accountability mechanisms, and a limited time frame. Basket funds are characterised by common project documents, common funding contracts and common reporting/audit procedures with all donors. Donors\u2019 contributions to funds managed autonomously by international organisations are recorded under B03.", # NOQA: E501 + "category": "B", + "status": "active" + }, + { + "code": "C01", + "name": "Project-type interventions", + "description": "A project is a set of inputs, activities and outputs, agreed with the partner country*, to reach specific objectives/outcomes within a defined time frame, with a defined budget and a defined geographical area. Projects can vary significantly in terms of objectives, complexity, amounts involved and duration. There are smaller projects that might involve modest financial resources and last only a few months, whereas large projects might involve more significant amounts, entail successive phases and last for many years. A large project with a number of different components is sometimes referred to as a programme, but should nevertheless be recorded here. Feasibility studies, appraisals and evaluations are included (whether designed as part of projects/programmes or dedicated funding arrangements). Academic studies, research and development, trainings, scholarships, and other technical assistance activities not directly linked to development projects/programmes should instead be recorded under D02. Aid channelled through NGOs or multilaterals is also recorded here. This includes payments for NGOs and multilaterals to implement donors\u2019 projects and programmes, and funding of specified NGOs projects. By contrast, core funding of NGOs and multilaterals as well as contributions to specific-purpose funds are recorded under B.* In the cases of equity investments, humanitarian aid or aid channelled through NGOs, projects are recorded here even if there was no direct agreement between the donor and the partner country. Contributions to single-donor trust funds and contributions to trust funds earmarked for a specific funding window and/or country are recorded under B033.", # NOQA: E501 + "category": "C", + "status": "active" + }, + { + "code": "D01", + "name": "Donor country personnel", + "description": "Experts, consultants, teachers, academics, researchers, volunteers and contributions to public and private bodies for sending experts to developing countries.", # NOQA: E501 + "category": "D", + "status": "active" + }, + { + "code": "D02", + "name": "Other technical assistance", + "description": "Provision, outside projects as described in category C01, of technical assistance in recipient countries (excluding technical assistance performed by donor experts reported under D01, and scholarships/training in donor country reported under E01). This includes training and research; language training; south-south studies; research studies; collaborative research between donor and recipient universities and organisations); local scholarships; development-oriented social and cultural programmes. This category also covers ad hoc contributions such as conferences, seminars and workshops, exchange visits, publications, etc.", # NOQA: E501 + "category": "D", + "status": "active" + }, + { + "code": "E01", + "name": "Scholarships/training in donor country", + "description": "Financial aid awards for individual students and contributions to trainees.", + "category": "E", + "status": "active" + }, + { + "code": "E02", + "name": "Imputed student costs", + "description": "Indirect (\u201cimputed\u201d) costs of tuition in donor countries.", + "category": "E", + "status": "active" + }, + { + "code": "F01", + "name": "Debt relief", + "description": "Groups all actions relating to debt (forgiveness, conversions, swaps, buy-backs, rescheduling, refinancing).", # NOQA: E501 + "category": "F", + "status": "active" + }, + { + "code": "G01", + "name": "Administrative costs not included elsewhere", + "description": "Administrative costs of development assistance programmes not already included under other ODA items as an integral part of the costs of delivering or implementing the aid provided. This category covers situation analyses and auditing activities.As regards the salaries component of administrative costs, it relates to in-house agency staff and contractors only; costs associated with donor experts/consultants are to be reported under category C or D01.", # NOQA: E501 + "category": "G", + "status": "active" + }, + { + "code": "H01", + "name": "Development awareness", + "description": "Funding of activities designed to increase public support, i.e. awareness in the donor country of development co-operation efforts, needs and issues.", # NOQA: E501 + "category": "H", + "status": "active" + }, + { + "code": "H02", + "name": "Refugees/asylum seekers in donor countries", + "description": "Costs incurred in donor countries for basic assistance to asylum seekers and refugees from developing countries, up to 12 months, when costs cannot be disaggregated. See section II.6 and Annex 17.", # NOQA: E501 + "category": "H", + "status": "active" + }, + { + "code": "H03", + "name": "Asylum-seekers ultimately accepted", + "description": "Costs incurred in donor countries for basic assistance to asylum seekers, when these are ultimately accepted. This category includes only costs incurred prior to recognition.", # NOQA: E501 + "category": "H", + "status": "active" + }, + { + "code": "H04", + "name": "Asylum-seekers ultimately rejected", + "description": "Costs incurred in donor countries for basic assistance to asylum seekers, when these are ultimately rejected. This category includes only costs incurred prior to rejection. Members may base their reporting on the first instance rejection, where a final decision on status is anticipated to occur after a 12-month period, and this facilitates the establishment of a conservative estimate. For further guidance on how to proceed with calculating costs related to rejected asylum seekers, see Clarification 5, third bullet in section II.6 of the Reporting Directives.", # NOQA: E501 + "category": "H", + "status": "active" + }, + { + "code": "H05", + "name": "Recognised refugees", + "description": "Costs incurred in donor countries for basic assistance to refugees with a recognised status. This category only includes costs after recognition (or after date of entry into a country through a resettlement programme).", # NOQA: E501 + "category": "H", + "status": "active" + }, + { + "code": "H06", + "name": "Refugees and asylum seekers in other provider countries", + "description": "Costs incurred in other non-ODA eligible provider countries for basic assistance to asylum seekers and refugees from developing countries, up to 12 months. The host and origin country of refugees/asylum seekers shall be specified in one of the descriptive fields of the CRS (fields 14 or 19).", # NOQA: E501 + "category": "H", + "status": "active" + } + ], + "BudgetStatus": [ + { + "code": "1", + "name": "Indicative", + "description": "A non-binding estimate for the described budget.", + "status": "active" + }, + { + "code": "2", + "name": "Committed", + "description": "A binding agreement for the described budget.", + "status": "active" + } + ], + "BudgetType": [ + { + "code": "1", + "name": "Original", + "description": "The original budget allocated to the activity", + "status": "active" + }, + { + "code": "2", + "name": "Revised", + "description": "The updated budget for an activity", + "status": "active" + } + ], + "Country": [ + { + "code": "AF", + "name": "Afghanistan", + "status": "active" + }, + { + "code": "AX", + "name": "\u00c5land Islands", + "status": "active" + }, + { + "code": "AL", + "name": "Albania", + "status": "active" + }, + { + "code": "DZ", + "name": "Algeria", + "status": "active" + }, + { + "code": "AS", + "name": "American Samoa", + "status": "active" + }, + { + "code": "AD", + "name": "Andorra", + "status": "active" + }, + { + "code": "AO", + "name": "Angola", + "status": "active" + }, + { + "code": "AI", + "name": "Anguilla", + "status": "active" + }, + { + "code": "AQ", + "name": "Antarctica", + "status": "active" + }, + { + "code": "AG", + "name": "Antigua and Barbuda", + "status": "active" + }, + { + "code": "AR", + "name": "Argentina", + "status": "active" + }, + { + "code": "AM", + "name": "Armenia", + "status": "active" + }, + { + "code": "AW", + "name": "Aruba", + "status": "active" + }, + { + "code": "AU", + "name": "Australia", + "status": "active" + }, + { + "code": "AT", + "name": "Austria", + "status": "active" + }, + { + "code": "AZ", + "name": "Azerbaijan", + "status": "active" + }, + { + "code": "BS", + "name": "Bahamas (the)", + "status": "active" + }, + { + "code": "BH", + "name": "Bahrain", + "status": "active" + }, + { + "code": "BD", + "name": "Bangladesh", + "status": "active" + }, + { + "code": "BB", + "name": "Barbados", + "status": "active" + }, + { + "code": "BY", + "name": "Belarus", + "status": "active" + }, + { + "code": "BE", + "name": "Belgium", + "status": "active" + }, + { + "code": "BZ", + "name": "Belize", + "status": "active" + }, + { + "code": "BJ", + "name": "Benin", + "status": "active" + }, + { + "code": "BM", + "name": "Bermuda", + "status": "active" + }, + { + "code": "BT", + "name": "Bhutan", + "status": "active" + }, + { + "code": "BO", + "name": "Bolivia (Plurinational State of)", + "status": "active" + }, + { + "code": "BQ", + "name": "Bonaire, Sint Eustatius and Saba", + "status": "active" + }, + { + "code": "BA", + "name": "Bosnia and Herzegovina", + "status": "active" + }, + { + "code": "BW", + "name": "Botswana", + "status": "active" + }, + { + "code": "BV", + "name": "Bouvet Island", + "status": "active" + }, + { + "code": "BR", + "name": "Brazil", + "status": "active" + }, + { + "code": "IO", + "name": "British Indian Ocean Territory (the)", + "status": "active" + }, + { + "code": "BN", + "name": "Brunei Darussalam", + "status": "active" + }, + { + "code": "BG", + "name": "Bulgaria", + "status": "active" + }, + { + "code": "BF", + "name": "Burkina Faso", + "status": "active" + }, + { + "code": "BU", + "name": "Burma", + "status": "withdrawn" + }, + { + "code": "BI", + "name": "Burundi", + "status": "active" + }, + { + "code": "KH", + "name": "Cambodia", + "status": "active" + }, + { + "code": "CM", + "name": "Cameroon", + "status": "active" + }, + { + "code": "CA", + "name": "Canada", + "status": "active" + }, + { + "code": "CV", + "name": "Cabo Verde", + "status": "active" + }, + { + "code": "KY", + "name": "Cayman Islands (the)", + "status": "active" + }, + { + "code": "CF", + "name": "Central African Republic (the)", + "status": "active" + }, + { + "code": "TD", + "name": "Chad", + "status": "active" + }, + { + "code": "CL", + "name": "Chile", + "status": "active" + }, + { + "code": "CN", + "name": "China", + "status": "active" + }, + { + "code": "CX", + "name": "Christmas Island", + "status": "active" + }, + { + "code": "CC", + "name": "Cocos (Keeling) Islands (the)", + "status": "active" + }, + { + "code": "CO", + "name": "Colombia", + "status": "active" + }, + { + "code": "KM", + "name": "Comoros (the)", + "status": "active" + }, + { + "code": "CG", + "name": "Congo (the)", + "status": "active" + }, + { + "code": "CD", + "name": "Congo (the Democratic Republic of the)", + "status": "active" + }, + { + "code": "CK", + "name": "Cook Islands (the)", + "status": "active" + }, + { + "code": "CR", + "name": "Costa Rica", + "status": "active" + }, + { + "code": "CI", + "name": "C\u00f4te d'Ivoire", + "status": "active" + }, + { + "code": "HR", + "name": "Croatia", + "status": "active" + }, + { + "code": "CU", + "name": "Cuba", + "status": "active" + }, + { + "code": "CW", + "name": "Cura\u00e7ao", + "status": "active" + }, + { + "code": "CY", + "name": "Cyprus", + "status": "active" + }, + { + "code": "CZ", + "name": "Czechia", + "status": "active" + }, + { + "code": "DK", + "name": "Denmark", + "status": "active" + }, + { + "code": "DJ", + "name": "Djibouti", + "status": "active" + }, + { + "code": "DM", + "name": "Dominica", + "status": "active" + }, + { + "code": "DO", + "name": "Dominican Republic (the)", + "status": "active" + }, + { + "code": "TP", + "name": "East Timor", + "status": "withdrawn" + }, + { + "code": "EC", + "name": "Ecuador", + "status": "active" + }, + { + "code": "EG", + "name": "Egypt", + "status": "active" + }, + { + "code": "SV", + "name": "El Salvador", + "status": "active" + }, + { + "code": "GQ", + "name": "Equatorial Guinea", + "status": "active" + }, + { + "code": "ER", + "name": "Eritrea", + "status": "active" + }, + { + "code": "EE", + "name": "Estonia", + "status": "active" + }, + { + "code": "ET", + "name": "Ethiopia", + "status": "active" + }, + { + "code": "FK", + "name": "Falkland Islands (the) [Malvinas]", + "status": "active" + }, + { + "code": "FO", + "name": "Faroe Islands (the)", + "status": "active" + }, + { + "code": "FJ", + "name": "Fiji", + "status": "active" + }, + { + "code": "FI", + "name": "Finland", + "status": "active" + }, + { + "code": "FR", + "name": "France", + "status": "active" + }, + { + "code": "GF", + "name": "French Guiana", + "status": "active" + }, + { + "code": "PF", + "name": "French Polynesia", + "status": "active" + }, + { + "code": "TF", + "name": "French Southern Territories (the)", + "status": "active" + }, + { + "code": "GA", + "name": "Gabon", + "status": "active" + }, + { + "code": "GM", + "name": "Gambia (the)", + "status": "active" + }, + { + "code": "GE", + "name": "Georgia", + "status": "active" + }, + { + "code": "DE", + "name": "Germany", + "status": "active" + }, + { + "code": "GH", + "name": "Ghana", + "status": "active" + }, + { + "code": "GI", + "name": "Gibraltar", + "status": "active" + }, + { + "code": "GR", + "name": "Greece", + "status": "active" + }, + { + "code": "GL", + "name": "Greenland", + "status": "active" + }, + { + "code": "GD", + "name": "Grenada", + "status": "active" + }, + { + "code": "GP", + "name": "Guadeloupe", + "status": "active" + }, + { + "code": "GU", + "name": "Guam", + "status": "active" + }, + { + "code": "GT", + "name": "Guatemala", + "status": "active" + }, + { + "code": "GG", + "name": "Guernsey", + "status": "active" + }, + { + "code": "GN", + "name": "Guinea", + "status": "active" + }, + { + "code": "GW", + "name": "Guinea-Bissau", + "status": "active" + }, + { + "code": "GY", + "name": "Guyana", + "status": "active" + }, + { + "code": "HT", + "name": "Haiti", + "status": "active" + }, + { + "code": "HM", + "name": "Heard Island and McDonald Islands", + "status": "active" + }, + { + "code": "VA", + "name": "Holy See (the)", + "status": "active" + }, + { + "code": "HN", + "name": "Honduras", + "status": "active" + }, + { + "code": "HK", + "name": "Hong Kong", + "status": "active" + }, + { + "code": "HU", + "name": "Hungary", + "status": "active" + }, + { + "code": "IS", + "name": "Iceland", + "status": "active" + }, + { + "code": "IN", + "name": "India", + "status": "active" + }, + { + "code": "ID", + "name": "Indonesia", + "status": "active" + }, + { + "code": "IR", + "name": "Iran (Islamic Republic of)", + "status": "active" + }, + { + "code": "IQ", + "name": "Iraq", + "status": "active" + }, + { + "code": "IE", + "name": "Ireland", + "status": "active" + }, + { + "code": "IM", + "name": "Isle of Man", + "status": "active" + }, + { + "code": "IL", + "name": "Israel", + "status": "active" + }, + { + "code": "IT", + "name": "Italy", + "status": "active" + }, + { + "code": "JM", + "name": "Jamaica", + "status": "active" + }, + { + "code": "JP", + "name": "Japan", + "status": "active" + }, + { + "code": "JE", + "name": "Jersey", + "status": "active" + }, + { + "code": "JO", + "name": "Jordan", + "status": "active" + }, + { + "code": "KZ", + "name": "Kazakhstan", + "status": "active" + }, + { + "code": "KE", + "name": "Kenya", + "status": "active" + }, + { + "code": "KI", + "name": "Kiribati", + "status": "active" + }, + { + "code": "KP", + "name": "Korea (the Democratic People's Republic of)", + "status": "active" + }, + { + "code": "KR", + "name": "Korea (the Republic of)", + "status": "active" + }, + { + "code": "XK", + "name": "Kosovo", + "status": "active" + }, + { + "code": "KW", + "name": "Kuwait", + "status": "active" + }, + { + "code": "KG", + "name": "Kyrgyzstan", + "status": "active" + }, + { + "code": "LA", + "name": "Lao People's Democratic Republic (the)", + "status": "active" + }, + { + "code": "LV", + "name": "Latvia", + "status": "active" + }, + { + "code": "LB", + "name": "Lebanon", + "status": "active" + }, + { + "code": "LS", + "name": "Lesotho", + "status": "active" + }, + { + "code": "LR", + "name": "Liberia", + "status": "active" + }, + { + "code": "LY", + "name": "Libya", + "status": "active" + }, + { + "code": "LI", + "name": "Liechtenstein", + "status": "active" + }, + { + "code": "LT", + "name": "Lithuania", + "status": "active" + }, + { + "code": "LU", + "name": "Luxembourg", + "status": "active" + }, + { + "code": "MO", + "name": "Macao", + "status": "active" + }, + { + "code": "MK", + "name": "North Macedonia", + "status": "active" + }, + { + "code": "MG", + "name": "Madagascar", + "status": "active" + }, + { + "code": "MW", + "name": "Malawi", + "status": "active" + }, + { + "code": "MY", + "name": "Malaysia", + "status": "active" + }, + { + "code": "MV", + "name": "Maldives", + "status": "active" + }, + { + "code": "ML", + "name": "Mali", + "status": "active" + }, + { + "code": "MT", + "name": "Malta", + "status": "active" + }, + { + "code": "MH", + "name": "Marshall Islands (the)", + "status": "active" + }, + { + "code": "MQ", + "name": "Martinique", + "status": "active" + }, + { + "code": "MR", + "name": "Mauritania", + "status": "active" + }, + { + "code": "MU", + "name": "Mauritius", + "status": "active" + }, + { + "code": "YT", + "name": "Mayotte", + "status": "active" + }, + { + "code": "MX", + "name": "Mexico", + "status": "active" + }, + { + "code": "FM", + "name": "Micronesia (Federated States of)", + "status": "active" + }, + { + "code": "MD", + "name": "Moldova (the Republic of)", + "status": "active" + }, + { + "code": "MC", + "name": "Monaco", + "status": "active" + }, + { + "code": "MN", + "name": "Mongolia", + "status": "active" + }, + { + "code": "ME", + "name": "Montenegro", + "status": "active" + }, + { + "code": "MS", + "name": "Montserrat", + "status": "active" + }, + { + "code": "MA", + "name": "Morocco", + "status": "active" + }, + { + "code": "MZ", + "name": "Mozambique", + "status": "active" + }, + { + "code": "MM", + "name": "Myanmar", + "status": "active" + }, + { + "code": "NA", + "name": "Namibia", + "status": "active" + }, + { + "code": "NR", + "name": "Nauru", + "status": "active" + }, + { + "code": "NP", + "name": "Nepal", + "status": "active" + }, + { + "code": "NL", + "name": "Netherlands (Kingdom of the)", + "status": "active" + }, + { + "code": "AN", + "name": "Netherlands Antilles", + "status": "withdrawn" + }, + { + "code": "NT", + "name": "Neutral Zone", + "status": "withdrawn" + }, + { + "code": "NC", + "name": "New Caledonia", + "status": "active" + }, + { + "code": "NZ", + "name": "New Zealand", + "status": "active" + }, + { + "code": "NI", + "name": "Nicaragua", + "status": "active" + }, + { + "code": "NE", + "name": "Niger (the)", + "status": "active" + }, + { + "code": "NG", + "name": "Nigeria", + "status": "active" + }, + { + "code": "NU", + "name": "Niue", + "status": "active" + }, + { + "code": "NF", + "name": "Norfolk Island", + "status": "active" + }, + { + "code": "MP", + "name": "Northern Mariana Islands (the)", + "status": "active" + }, + { + "code": "NO", + "name": "Norway", + "status": "active" + }, + { + "code": "OM", + "name": "Oman", + "status": "active" + }, + { + "code": "PK", + "name": "Pakistan", + "status": "active" + }, + { + "code": "PW", + "name": "Palau", + "status": "active" + }, + { + "code": "PS", + "name": "Palestine, State of", + "status": "active" + }, + { + "code": "PA", + "name": "Panama", + "status": "active" + }, + { + "code": "PG", + "name": "Papua New Guinea", + "status": "active" + }, + { + "code": "PY", + "name": "Paraguay", + "status": "active" + }, + { + "code": "PE", + "name": "Peru", + "status": "active" + }, + { + "code": "PH", + "name": "Philippines (the)", + "status": "active" + }, + { + "code": "PN", + "name": "Pitcairn", + "status": "active" + }, + { + "code": "PL", + "name": "Poland", + "status": "active" + }, + { + "code": "PT", + "name": "Portugal", + "status": "active" + }, + { + "code": "PR", + "name": "Puerto Rico", + "status": "active" + }, + { + "code": "QA", + "name": "Qatar", + "status": "active" + }, + { + "code": "RE", + "name": "R\u00e9union", + "status": "active" + }, + { + "code": "RO", + "name": "Romania", + "status": "active" + }, + { + "code": "RU", + "name": "Russian Federation (the)", + "status": "active" + }, + { + "code": "RW", + "name": "Rwanda", + "status": "active" + }, + { + "code": "BL", + "name": "Saint Barth\u00e9lemy", + "status": "active" + }, + { + "code": "SH", + "name": "Saint Helena, Ascension and Tristan da Cunha", + "status": "active" + }, + { + "code": "KN", + "name": "Saint Kitts and Nevis", + "status": "active" + }, + { + "code": "LC", + "name": "Saint Lucia", + "status": "active" + }, + { + "code": "MF", + "name": "Saint Martin (French part)", + "status": "active" + }, + { + "code": "PM", + "name": "Saint Pierre and Miquelon", + "status": "active" + }, + { + "code": "VC", + "name": "Saint Vincent and the Grenadines", + "status": "active" + }, + { + "code": "WS", + "name": "Samoa", + "status": "active" + }, + { + "code": "SM", + "name": "San Marino", + "status": "active" + }, + { + "code": "ST", + "name": "Sao Tome and Principe", + "status": "active" + }, + { + "code": "SA", + "name": "Saudi Arabia", + "status": "active" + }, + { + "code": "SN", + "name": "Senegal", + "status": "active" + }, + { + "code": "RS", + "name": "Serbia", + "status": "active" + }, + { + "code": "SC", + "name": "Seychelles", + "status": "active" + }, + { + "code": "SL", + "name": "Sierra Leone", + "status": "active" + }, + { + "code": "SG", + "name": "Singapore", + "status": "active" + }, + { + "code": "SX", + "name": "Sint Maarten (Dutch part)", + "status": "active" + }, + { + "code": "SK", + "name": "Slovakia", + "status": "active" + }, + { + "code": "SI", + "name": "Slovenia", + "status": "active" + }, + { + "code": "SB", + "name": "Solomon Islands", + "status": "active" + }, + { + "code": "SO", + "name": "Somalia", + "status": "active" + }, + { + "code": "ZA", + "name": "South Africa", + "status": "active" + }, + { + "code": "GS", + "name": "South Georgia and the South Sandwich Islands", + "status": "active" + }, + { + "code": "SS", + "name": "South Sudan", + "status": "active" + }, + { + "code": "ES", + "name": "Spain", + "status": "active" + }, + { + "code": "LK", + "name": "Sri Lanka", + "status": "active" + }, + { + "code": "SD", + "name": "Sudan (the)", + "status": "active" + }, + { + "code": "SR", + "name": "Suriname", + "status": "active" + }, + { + "code": "SJ", + "name": "Svalbard and Jan Mayen", + "status": "active" + }, + { + "code": "SZ", + "name": "Eswatini", + "status": "active" + }, + { + "code": "CS", + "name": "Serbia and Montenegro", + "status": "withdrawn" + }, + { + "code": "SE", + "name": "Sweden", + "status": "active" + }, + { + "code": "CH", + "name": "Switzerland", + "status": "active" + }, + { + "code": "SY", + "name": "Syrian Arab Republic (the)", + "status": "active" + }, + { + "code": "TW", + "name": "Taiwan (Province of China)", + "status": "active" + }, + { + "code": "TJ", + "name": "Tajikistan", + "status": "active" + }, + { + "code": "TZ", + "name": "Tanzania, the United Republic of", + "status": "active" + }, + { + "code": "TH", + "name": "Thailand", + "status": "active" + }, + { + "code": "TL", + "name": "Timor-Leste", + "status": "active" + }, + { + "code": "TG", + "name": "Togo", + "status": "active" + }, + { + "code": "TK", + "name": "Tokelau", + "status": "active" + }, + { + "code": "TO", + "name": "Tonga", + "status": "active" + }, + { + "code": "TT", + "name": "Trinidad and Tobago", + "status": "active" + }, + { + "code": "TN", + "name": "Tunisia", + "status": "active" + }, + { + "code": "TR", + "name": "T\u00fcrkiye", + "status": "active" + }, + { + "code": "TM", + "name": "Turkmenistan", + "status": "active" + }, + { + "code": "TC", + "name": "Turks and Caicos Islands (the)", + "status": "active" + }, + { + "code": "TV", + "name": "Tuvalu", + "status": "active" + }, + { + "code": "UG", + "name": "Uganda", + "status": "active" + }, + { + "code": "UA", + "name": "Ukraine", + "status": "active" + }, + { + "code": "AE", + "name": "United Arab Emirates (the)", + "status": "active" + }, + { + "code": "GB", + "name": "United Kingdom of Great Britain and Northern Ireland (the)", + "status": "active" + }, + { + "code": "US", + "name": "United States of America (the)", + "status": "active" + }, + { + "code": "UM", + "name": "United States Minor Outlying Islands (the)", + "status": "active" + }, + { + "code": "UY", + "name": "Uruguay", + "status": "active" + }, + { + "code": "UZ", + "name": "Uzbekistan", + "status": "active" + }, + { + "code": "VU", + "name": "Vanuatu", + "status": "active" + }, + { + "code": "VE", + "name": "Venezuela (Bolivarian Republic of)", + "status": "active" + }, + { + "code": "VN", + "name": "Viet Nam", + "status": "active" + }, + { + "code": "VG", + "name": "Virgin Islands (British)", + "status": "active" + }, + { + "code": "VI", + "name": "Virgin Islands (U.S.)", + "status": "active" + }, + { + "code": "WF", + "name": "Wallis and Futuna", + "status": "active" + }, + { + "code": "EH", + "name": "Western Sahara", + "status": "active" + }, + { + "code": "YE", + "name": "Yemen", + "status": "active" + }, + { + "code": "YU", + "name": "Yugoslavia", + "status": "withdrawn" + }, + { + "code": "ZR", + "name": "Zaire", + "status": "withdrawn" + }, + { + "code": "ZM", + "name": "Zambia", + "status": "active" + }, + { + "code": "ZW", + "name": "Zimbabwe", + "status": "active" + } + ], + "OrganisationType": [ + { + "code": "10", + "name": "Government", + "status": "active", + "description": None + }, + { + "code": "11", + "name": "Local Government", + "description": "Any local (sub national) government organisation in either donor or recipient country.", + "status": "active" + }, + { + "code": "15", + "name": "Other Public Sector", + "status": "active", + "description": None + }, + { + "code": "21", + "name": "International NGO", + "status": "active", + "description": None + }, + { + "code": "22", + "name": "National NGO", + "status": "active", + "description": None + }, + { + "code": "23", + "name": "Regional NGO", + "status": "active", + "description": None + }, + { + "code": "24", + "name": "Partner Country based NGO", + "description": "Local and National NGO / CSO based in aid/assistance recipient country", + "status": "active" + }, + { + "code": "30", + "name": "Public Private Partnership", + "status": "active", + "description": None + }, + { + "code": "40", + "name": "Multilateral", + "status": "active", + "description": None + }, + { + "code": "60", + "name": "Foundation", + "status": "active", + "description": None + }, + { + "code": "70", + "name": "Private Sector", + "status": "active", + "description": None + }, + { + "code": "71", + "name": "Private Sector in Provider Country", + "description": "Is in provider / donor country.", + "status": "active" + }, + { + "code": "72", + "name": "Private Sector in Aid Recipient Country", + "description": "Is in aid recipient country.", + "status": "active" + }, + { + "code": "73", + "name": "Private Sector in Third Country", + "description": "Is not in either a donor or aid recipient country.", + "status": "active" + }, + { + "code": "80", + "name": "Academic, Training and Research", + "status": "active", + "description": None + }, + { + "code": "90", + "name": "Other", + "status": "active", + "description": None + } + ], + "Region": [ + { + "code": "88", + "name": "States Ex-Yugoslavia unspecified", + "status": "active" + }, + { + "code": "89", + "name": "Europe, regional", + "status": "active" + }, + { + "code": "189", + "name": "North of Sahara, regional", + "status": "active" + }, + { + "code": "289", + "name": "South of Sahara, regional", + "status": "active" + }, + { + "code": "298", + "name": "Africa, regional", + "status": "active" + }, + { + "code": "380", + "name": "West Indies, regional", + "status": "withdrawn" + }, + { + "code": "389", + "name": "Caribbean & Central America, regional", + "status": "active" + }, + { + "code": "489", + "name": "South America, regional", + "status": "active" + }, + { + "code": "498", + "name": "America, regional", + "status": "active" + }, + { + "code": "589", + "name": "Middle East, regional", + "status": "active" + }, + { + "code": "619", + "name": "Central Asia, regional", + "status": "active" + }, + { + "code": "679", + "name": "South Asia, regional", + "status": "active" + }, + { + "code": "689", + "name": "South & Central Asia, regional", + "status": "active" + }, + { + "code": "789", + "name": "Far East Asia, regional", + "status": "active" + }, + { + "code": "798", + "name": "Asia, regional", + "status": "active" + }, + { + "code": "889", + "name": "Oceania, regional", + "status": "active" + }, + { + "code": "998", + "name": "Developing countries, unspecified", + "status": "active" + }, + { + "code": "1027", + "name": "Eastern Africa, regional", + "status": "active" + }, + { + "code": "1028", + "name": "Middle Africa, regional", + "status": "active" + }, + { + "code": "1029", + "name": "Southern Africa, regional", + "status": "active" + }, + { + "code": "1030", + "name": "Western Africa, regional", + "status": "active" + }, + { + "code": "1031", + "name": "Caribbean, regional", + "status": "active" + }, + { + "code": "1032", + "name": "Central America, regional", + "status": "active" + }, + { + "code": "1033", + "name": "Melanesia, regional", + "status": "active" + }, + { + "code": "1034", + "name": "Micronesia, regional", + "status": "active" + }, + { + "code": "1035", + "name": "Polynesia, regional", + "status": "active" + } + ], + "TagVocabulary": [ + { + "code": "1", + "name": "Agrovoc", + "description": "A controlled vocabulary covering all areas of interest of the Food and Agriculture Organization (FAO) of the United Nations, including food, nutrition, agriculture, fisheries, forestry, environment etc.", # NOQA: E501 + "url": "https://agrovoc.fao.org/browse/agrovoc/en/", + "status": "active" + }, + { + "code": "2", + "name": "UN Sustainable Development Goals (SDG)", + "description": "A value from the top-level list of UN sustainable development goals (SDGs) (e.g. \u20181\u2019)", # NOQA: E501 + "url": "http://reference.iatistandard.org/codelists/UNSDG-Goals/", + "status": "active" + }, + { + "code": "3", + "name": "UN Sustainable Development Goals (SDG) Targets", + "description": "A value from the second-level list of UN sustainable development goals (SDGs) (e.g. \u20181.1\u2019)", # NOQA: E501 + "url": "http://reference.iatistandard.org/codelists/UNSDG-Targets/", + "status": "active" + }, + { + "code": "4", + "name": "Team Europe Initiatives", + "description": "A value from the list of Team Europe Initiatives. Team Europe consists of the European Commission, the EU Member States \u2014 including their implementing agencies and public development banks \u2014 as well as the European Investment Bank (EIB) and the European Bank for Reconstruction and Development (EBRD).", # NOQA: E501 + "url": "https://europa.eu/capacity4dev/joint-programming/documents/tei-codes-0", + "status": "active" + }, + { + "code": "99", + "name": "Reporting Organisation", + "status": "active", + "description": None, + "url": None + } + ], + "SectorCategory": [ + { + "code": "111", + "name": "Education, Level Unspecified", + "description": "The codes in this category are to be used only when level of education is unspecified or unknown (e.g. training of primary school teachers should be coded under 11220).", # NOQA: E501 + "status": "active" + }, + { + "code": "112", + "name": "Basic Education", + "status": "active", + "description": None + }, + { + "code": "113", + "name": "Secondary Education", + "status": "active", + "description": None + }, + { + "code": "114", + "name": "Post-Secondary Education", + "status": "active", + "description": None + }, + { + "code": "121", + "name": "Health, General", + "status": "active", + "description": None + }, + { + "code": "122", + "name": "Basic Health", + "status": "active", + "description": None + }, + { + "code": "123", + "name": "Non-communicable diseases (NCDs)", + "status": "active", + "description": None + }, + { + "code": "130", + "name": "Population Policies/Programmes & Reproductive Health", + "status": "active", + "description": None + }, + { + "code": "140", + "name": "Water Supply & Sanitation", + "status": "active", + "description": None + }, + { + "code": "151", + "name": "Government & Civil Society-general", + "description": "N.B. Use code 51010 for general budget support.", + "status": "active" + }, + { + "code": "152", + "name": "Conflict, Peace & Security", + "description": "N.B. Further notes on ODA eligibility (and exclusions) of conflict, peace and security related activities are given in paragraphs 76-81 of the Directives.", # NOQA: E501 + "status": "active" + }, + { + "code": "160", + "name": "Other Social Infrastructure & Services", + "status": "active", + "description": None + }, + { + "code": "210", + "name": "Transport & Storage", + "description": "Note: Manufacturing of transport equipment should be included under code 32172.", + "status": "active" + }, + { + "code": "220", + "name": "Communications", + "status": "active", + "description": None + }, + { + "code": "230", + "name": "ENERGY GENERATION AND SUPPLY", + "description": "Energy sector policy, planning and programmes; aid to energy ministries; institution capacity building and advice; unspecified energy activities including energy conservation.", # NOQA: E501 + "status": "withdrawn" + }, + { + "code": "231", + "name": "Energy Policy", + "status": "active", + "description": None + }, + { + "code": "232", + "name": "Energy generation, renewable sources", + "status": "active", + "description": None + }, + { + "code": "233", + "name": "Energy generation, non-renewable sources", + "status": "active", + "description": None + }, + { + "code": "234", + "name": "Hybrid energy plants", + "status": "active", + "description": None + }, + { + "code": "235", + "name": "Nuclear energy plants", + "status": "active", + "description": None + }, + { + "code": "236", + "name": "Energy distribution", + "status": "active", + "description": None + }, + { + "code": "240", + "name": "Banking & Financial Services", + "status": "active", + "description": None + }, + { + "code": "250", + "name": "Business & Other Services", + "status": "active", + "description": None + }, + { + "code": "311", + "name": "Agriculture", + "status": "active", + "description": None + }, + { + "code": "312", + "name": "Forestry", + "status": "active", + "description": None + }, + { + "code": "313", + "name": "Fishing", + "status": "active", + "description": None + }, + { + "code": "321", + "name": "Industry", + "status": "active", + "description": None + }, + { + "code": "322", + "name": "Mineral Resources & Mining", + "status": "active", + "description": None + }, + { + "code": "323", + "name": "Construction", + "status": "active", + "description": None + }, + { + "code": "331", + "name": "Trade Policies & Regulations", + "status": "active", + "description": None + }, + { + "code": "332", + "name": "Tourism", + "status": "active", + "description": None + }, + { + "code": "410", + "name": "General Environment Protection", + "description": "Covers activities concerned with conservation, protection or amelioration of the physical environment without sector allocation.", # NOQA: E501 + "status": "active" + }, + { + "code": "430", + "name": "Other Multisector", + "status": "active", + "description": None + }, + { + "code": "510", + "name": "General Budget Support", + "description": "Budget support in the form of sector-wide approaches (SWAps) should be included in the respective sectors.", # NOQA: E501 + "status": "active" + }, + { + "code": "520", + "name": "Development Food Assistance", + "status": "active", + "description": None + }, + { + "code": "530", + "name": "Other Commodity Assistance", + "description": "Non-food commodity assistance (when benefiting sector not specified).", + "status": "active" + }, + { + "code": "600", + "name": "Action Relating to Debt", + "status": "active", + "description": None + }, + { + "code": "720", + "name": "Emergency Response", + "description": "An emergency is a situation which results from man made crises and/or natural disasters.", # NOQA: E501 + "status": "active" + }, + { + "code": "730", + "name": "Reconstruction Relief & Rehabilitation", + "description": "This relates to activities during and in the aftermath of an emergency situation. Longer-term activities to improve the level of infrastructure or social services should be reported under the relevant economic and social sector codes. See also guideline on distinguishing humanitarian from sector-allocable aid.", # NOQA: E501 + "status": "active" + }, + { + "code": "740", + "name": "Disaster Prevention & Preparedness", + "description": "See code 43060 for disaster risk reduction.", + "status": "active" + }, + { + "code": "910", + "name": "Administrative Costs of Donors", + "status": "active", + "description": None + }, + { + "code": "920", + "name": "SUPPORT TO NON- GOVERNMENTAL ORGANISATIONS (NGOs)", + "description": "In the donor country.", + "status": "withdrawn" + }, + { + "code": "930", + "name": "Refugees in Donor Countries", + "status": "active", + "description": None + }, + { + "code": "998", + "name": "Unallocated / Unspecified", + "description": "Contributions to general development of the recipient should be included under programme assistance (51010).", # NOQA: E501 + "status": "active" + } + ], + "PolicyMarker": [ + { + "code": "1", + "name": "Gender Equality", + "status": "active" + }, + { + "code": "2", + "name": "Aid to Environment", + "status": "active" + }, + { + "code": "3", + "name": "Participatory Development/Good Governance", + "status": "active" + }, + { + "code": "4", + "name": "Trade Development", + "status": "active" + }, + { + "code": "5", + "name": "Aid Targeting the Objectives of the Convention on Biological Diversity", + "status": "active" + }, + { + "code": "6", + "name": "Aid Targeting the Objectives of the Framework Convention on Climate Change - Mitigation", + "status": "active" + }, + { + "code": "7", + "name": "Aid Targeting the Objectives of the Framework Convention on Climate Change - Adaptation", + "status": "active" + }, + { + "code": "8", + "name": "Aid Targeting the Objectives of the Convention to Combat Desertification", + "status": "active" + }, + { + "code": "9", + "name": "Reproductive, Maternal, Newborn and Child Health (RMNCH)", + "status": "active" + }, + { + "code": "10", + "name": "Disaster Risk Reduction(DRR)", + "status": "active" + }, + { + "code": "11", + "name": "Disability", + "status": "active" + }, + { + "code": "12", + "name": "Nutrition", + "status": "active" + } + ], + "PolicySignificance": [ + { + "code": "0", + "name": "not targeted", + "description": "The score \"not targeted\" means that the activity was examined but found not to target the policy objective.", # NOQA: E501 + "status": "active" + }, + { + "code": "1", + "name": "significant objective", + "description": "Significant (secondary) policy objectives are those which, although important, were not the prime motivation for undertaking the activity.", # NOQA: E501 + "status": "active" + }, + { + "code": "2", + "name": "principal objective", + "description": "Principal (primary) policy objectives are those which can be identified as being fundamental in the design and impact of the activity and which are an explicit objective of the activity. They may be selected by answering the question \"Would the activity have been undertaken without this objective?\"", # NOQA: E501 + "status": "active" + }, + { + "code": "3", + "name": "principal objective AND in support of an action programme", + "description": "For desertification-related aid only", + "status": "active" + }, + { + "code": "4", + "name": "Explicit primary objective", + "status": "active", + "description": None + } + ], + "PolicyMarkerVocabulary": [ + { + "code": "1", + "name": "OECD DAC CRS", + "description": "The policy marker is an OECD DAC CRS policy marker, Reported in columns 20-23, 28-31 and 54 of CRS++ reporting format.", # NOQA: E501 + "url": "http://reference.iatistandard.org/codelists/PolicyMarker/", + "status": "active" + }, + { + "code": "99", + "name": "Reporting Organisation", + "description": "The policy marker is one that is defined and tracked by the reporting organisation", + "status": "active", + "url": None + } + ] + } + # mock codelists class init to set self.codelists_dict to `data` + monkeypatch.setattr(codelists.Codelists, "__init__", lambda x, y: setattr(x, "codelists_dict", data)) + return codelists.Codelists() From 31900d25185d02a997d522a963055a054e989601 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Fri, 10 Nov 2023 11:41:49 +0100 Subject: [PATCH 21/49] feat: added tests for custom fields main --- .../custom_fields/test_custom_fields.py | 73 +++++++++++++++++-- .../custom_fields/test_dataset_metadata.py | 20 ++++- 2 files changed, 83 insertions(+), 10 deletions(-) diff --git a/tests/direct_indexing/custom_fields/test_custom_fields.py b/tests/direct_indexing/custom_fields/test_custom_fields.py index bcd6a8e07..8117232cf 100644 --- a/tests/direct_indexing/custom_fields/test_custom_fields.py +++ b/tests/direct_indexing/custom_fields/test_custom_fields.py @@ -1,13 +1,72 @@ -# TODO +from direct_indexing.custom_fields.custom_fields import add_all, get_custom_metadata, process_activity +FCDO_IN = 'direct_indexing.custom_fields.custom_fields.settings.FCDO_INSTANCE' -def test_add_all(): - assert True +def test_add_all(mocker): + mock_pa = mocker.patch('direct_indexing.custom_fields.custom_fields.process_activity') + mock_ca = mocker.patch('direct_indexing.custom_fields.custom_fields.currency_aggregation') + mock_h2 = mocker.patch('direct_indexing.custom_fields.custom_fields.raise_h2_budget_data_to_h1') -def test_process_activity(): - assert True + # Test that the h2 function is not called when fcdo instance is false, + # and that the process_activity and currency_aggregation functions are called once + mocker.patch(FCDO_IN, False) + data = {} + add_all(data, None, None, None) + mock_pa.assert_called_once() + mock_ca.assert_called_once() + mock_h2.assert_not_called() + # Test that the process_activity function is called len(data) times + data = [{}, {}] + add_all(data, None, None, None) + assert mock_pa.call_count == len(data) + 1 # +1 for the previous test -def test_get_custom_metadata(): - assert True + # Test that the h2 function is called when fcdo instance is true + mocker.patch(FCDO_IN, True) + data = {} + add_all(data, None, None, None) + mock_h2.assert_called_once() + + +def test_process_activity(mocker): + # patch all subfunctions + mock_ac = mocker.patch('direct_indexing.custom_fields.custom_fields.add_codelist_fields') + mock_tn = mocker.patch('direct_indexing.custom_fields.custom_fields.title_narrative_first') + mock_ad = mocker.patch('direct_indexing.custom_fields.custom_fields.activity_dates') + mock_pm = mocker.patch('direct_indexing.custom_fields.custom_fields.policy_marker_combined') + mock_cc = mocker.patch('direct_indexing.custom_fields.custom_fields.currency_conversion') + mock_am = mocker.patch('direct_indexing.custom_fields.custom_fields.add_meta_to_activity') + mock_adh = mocker.patch('direct_indexing.custom_fields.custom_fields.add_default_hierarchy') + mock_ajd = mocker.patch('direct_indexing.custom_fields.custom_fields.add_json_dumps') + mock_adq = mocker.patch('direct_indexing.custom_fields.custom_fields.add_date_quarter_fields') + mock_dlcc = mocker.patch('direct_indexing.custom_fields.custom_fields.document_link_category_combined') + + # Test that all subfunctions are called once + mocker.patch(FCDO_IN, False) + activity = {} + process_activity(activity, None, None, None) + mock_ac.assert_called_once() + mock_tn.assert_called_once() + mock_ad.assert_called_once() + mock_pm.assert_called_once() + mock_cc.assert_called_once() + mock_am.assert_called_once() + mock_adh.assert_called_once() + # test that the others are not called + mock_ajd.assert_not_called() + mock_adq.assert_not_called() + mock_dlcc.assert_not_called() + + # Test that the remaining functions are called when FCDO_INSTANCE is True + mocker.patch(FCDO_IN, True) + process_activity(activity, None, None, None) + mock_ajd.assert_called_once() + mock_adq.assert_called_once() + mock_dlcc.assert_called_once() + + +def test_get_custom_metadata(mocker): + mock_dm = mocker.patch('direct_indexing.custom_fields.custom_fields.dataset_metadata') + get_custom_metadata(None) + mock_dm.assert_called_once() diff --git a/tests/direct_indexing/custom_fields/test_dataset_metadata.py b/tests/direct_indexing/custom_fields/test_dataset_metadata.py index 0dccff4ed..5b63c1fa7 100644 --- a/tests/direct_indexing/custom_fields/test_dataset_metadata.py +++ b/tests/direct_indexing/custom_fields/test_dataset_metadata.py @@ -1,9 +1,23 @@ -# TODO +import pytest +from direct_indexing.custom_fields.dataset_metadata import dataset_metadata, add_meta_to_activity -def test_dataset_metadata(): + +def test_dataset_metadata(fixture_dataset): assert True -def test_add_meta_to_activity(): +def test_add_meta_to_activity(fixture_dataset): + # given empty activity and empty metadata, assert nothing changes in activity + activity = {} + metadata = {} + activity = add_meta_to_activity(activity, metadata) + assert activity == {} assert True + + +@pytest.fixture +def fixture_dataset(): + return { + "id": "id_test", + } \ No newline at end of file From e15b374a7d3f51d55a41c5b715274ecb503e7365 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Fri, 10 Nov 2023 11:51:47 +0100 Subject: [PATCH 22/49] feat: added tests for dataset metadata --- .../custom_fields/test_dataset_metadata.py | 33 +++++++++++++++---- 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/tests/direct_indexing/custom_fields/test_dataset_metadata.py b/tests/direct_indexing/custom_fields/test_dataset_metadata.py index 5b63c1fa7..dc1d7157d 100644 --- a/tests/direct_indexing/custom_fields/test_dataset_metadata.py +++ b/tests/direct_indexing/custom_fields/test_dataset_metadata.py @@ -1,23 +1,44 @@ import pytest -from direct_indexing.custom_fields.dataset_metadata import dataset_metadata, add_meta_to_activity +from direct_indexing.custom_fields.dataset_metadata import add_meta_to_activity, dataset_metadata def test_dataset_metadata(fixture_dataset): - assert True + expected_res = { + "dataset.id": "id_test", + "dataset.resources.hash": "cc612755d0b822bb9af82f43e121428634be255a", + # dataset.resources.test: should not be included + # dataset.test: should not be included + } + metadata = dataset_metadata(fixture_dataset) + assert metadata == expected_res + # If resources not in metadata, assert nothing is added + dataset = {} + metadata = dataset_metadata(dataset) + assert metadata == {} -def test_add_meta_to_activity(fixture_dataset): - # given empty activity and empty metadata, assert nothing changes in activity + +def test_add_meta_to_activity(): + # Given empty activity and empty metadata, assert nothing changes in activity activity = {} metadata = {} activity = add_meta_to_activity(activity, metadata) assert activity == {} - assert True + + # Given empty activity and metadata, assert metadata is added to activity + activity = {} + metadata = {"test": 1} + activity = add_meta_to_activity(activity, metadata) + assert activity == {"test": 1} @pytest.fixture def fixture_dataset(): return { "id": "id_test", - } \ No newline at end of file + "test": 1, + "resources": [ + {"hash": "cc612755d0b822bb9af82f43e121428634be255a", "test": 1}, + ] + } From 8f529d14c963d4a3773bbaf83f551c3a145bb4de Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Fri, 10 Nov 2023 12:43:32 +0100 Subject: [PATCH 23/49] feat: added tests for date_quarters --- .../custom_fields/date_quarters.py | 13 ++-- .../custom_fields/test_date_quarters.py | 78 +++++++++++++++++-- 2 files changed, 80 insertions(+), 11 deletions(-) diff --git a/direct_indexing/custom_fields/date_quarters.py b/direct_indexing/custom_fields/date_quarters.py index 422694b21..55a648532 100644 --- a/direct_indexing/custom_fields/date_quarters.py +++ b/direct_indexing/custom_fields/date_quarters.py @@ -96,8 +96,11 @@ def retrieve_date_quarter(date): The date object will always be a string object in the shape of an ISO date object, meaning YYYY-MM-DD """ - if isinstance(date, str): - return ((int(date[5:7]) - 1) // 3) + 1 - if hasattr(date, "strftime") and hasattr(date, "month"): - return ((date.month - 1) // 3) + 1 - return None + try: + if isinstance(date, str): + return ((int(date[5:7]) - 1) // 3) + 1 + if hasattr(date, "strftime") and hasattr(date, "month"): + return ((date.month - 1) // 3) + 1 + return None + except Exception: + return None diff --git a/tests/direct_indexing/custom_fields/test_date_quarters.py b/tests/direct_indexing/custom_fields/test_date_quarters.py index e55f6bd1e..fda0250c0 100644 --- a/tests/direct_indexing/custom_fields/test_date_quarters.py +++ b/tests/direct_indexing/custom_fields/test_date_quarters.py @@ -1,13 +1,79 @@ -# TODO +from datetime import datetime +import pytest -def test_add_date_quarter_fields(): - assert True +from direct_indexing.custom_fields.date_quarters import ( + add_date_quarter_fields, recursive_date_fields, retrieve_date_quarter +) -def test_recursive_date_fields(): - assert True +def test_add_date_quarter_fields(fixture_data): + data = add_date_quarter_fields(fixture_data) + # Assert transaction.transaction-date.iso-date quarter field is added, the values should be 2,3 based on the fixture + assert data['transaction.transaction-date.quarter'] == [2, 3] + # Assert no test.quarter field was made + assert 'test.quarter' not in data + # pass + + +def test_recursive_date_fields(fixture_data): + original_head = 'transaction' + original_tail = ['transaction-date'] + + # Test if an empty dict is passed, the function returns an empty list + assert recursive_date_fields({}, '', '') == [] + + # Test if a dict is passed, it is converted to a list + source_data = {original_head: {}} + data = recursive_date_fields(source_data, original_head, original_tail) + assert data == [] + assert source_data == {original_head: [{}]} + + # Test if a list is passed, the function is called recursively for each dict in the list + source_data = fixture_data.copy() + data = recursive_date_fields(source_data, original_head, original_tail) + assert data == [2, 3] def test_retrieve_date_quarter(): - assert True + # Test that if the date is not a string nor a datetime object, the function returns None + res = retrieve_date_quarter(None) + assert res is None + + # Test that if the value is a string but not a date, the function returns None + res = retrieve_date_quarter("test") + assert res is None + + # Test that if the value is a YYYY-MM-DD string, the function returns the correct quarter + res = retrieve_date_quarter("2019-04-01") + assert res == 2 + + # Test for datetime objects, once for each hardcoded quarter + for i in range(1, 13): + res = retrieve_date_quarter(datetime(2019, i, 1)) + if i in [1, 2, 3]: + assert res == 1 + if i in [4, 5, 6]: + assert res == 2 + if i in [7, 8, 9]: + assert res == 3 + if i in [10, 11, 12]: + assert res == 4 + + +@pytest.fixture +def fixture_data(): + return { + "transaction": [ + {"transaction-date": [ + {"iso-date": "2018-04-09"} + ]}, + {"transaction-date": [{ + "iso-date": "2018-09-09" + }]}, + {"transaction-date": [{ + "test": 1 + }]} + ], + "test": 1 + } From eb42aeb6abb5b1ea832ba51fb25da52a2cb0613b Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Fri, 10 Nov 2023 13:15:41 +0100 Subject: [PATCH 24/49] feat: added tests for document category combined --- .../document_link_category_combined.py | 10 +++--- .../test_document_link_category_combined.py | 31 +++++++++++++++++-- 2 files changed, 33 insertions(+), 8 deletions(-) diff --git a/direct_indexing/custom_fields/document_link_category_combined.py b/direct_indexing/custom_fields/document_link_category_combined.py index 27207b86f..415eaea7f 100644 --- a/direct_indexing/custom_fields/document_link_category_combined.py +++ b/direct_indexing/custom_fields/document_link_category_combined.py @@ -21,11 +21,11 @@ def document_link_category_combined(data): data[final_field] = [] for doc in data[dl]: codes = '' + if 'category' not in doc: + continue if type(doc['category']) is dict: doc['category'] = [doc['category']] - for category in doc['category']: - if codes != '': - codes += ',' - codes += category["code"] - data[final_field].append(codes) + codes = ",".join(category["code"] for category in doc.get("category", []) if "code" in category) + if codes != '': + data[final_field].append(codes) return data diff --git a/tests/direct_indexing/custom_fields/test_document_link_category_combined.py b/tests/direct_indexing/custom_fields/test_document_link_category_combined.py index 2739075b5..5430068e1 100644 --- a/tests/direct_indexing/custom_fields/test_document_link_category_combined.py +++ b/tests/direct_indexing/custom_fields/test_document_link_category_combined.py @@ -1,5 +1,30 @@ -# TODO +import pytest +from direct_indexing.custom_fields.document_link_category_combined import document_link_category_combined -def test_document_link_category_combined(): - assert True + +def test_document_link_category_combined(fixture_data): + # Test if no document-link-category, assert nothing changes + dl = 'document-link' + dlc = 'document-link.category-codes-combined' + data = {} + assert document_link_category_combined(data) == {} + + # Test if a document-link is an empty dict, it is converted to a list and ccc is empty + data = {dl: {}} + assert document_link_category_combined(data) == {dl: [{}], dlc: []} + + # Test if a category code is a dict, it is converted to a list + data = {dl: {'category': {}}} + assert document_link_category_combined(data) == {dl: [{'category': [{}]}], dlc: []} + + # Test if two codes are present, they are both added + data = fixture_data.copy() + expected_res = fixture_data.copy() + expected_res[dlc] = ['1,2'] + assert document_link_category_combined(data) == expected_res + + +@pytest.fixture +def fixture_data(): + return {'document-link': [{'category': [{'code': '1'}, {'code': '2'}]}]} From 151b958fcacbf8939503e63ef35154eb17680d73 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Fri, 10 Nov 2023 13:23:32 +0100 Subject: [PATCH 25/49] feat: added test for json_dumps --- .../custom_fields/test_json_dumps.py | 30 +++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/tests/direct_indexing/custom_fields/test_json_dumps.py b/tests/direct_indexing/custom_fields/test_json_dumps.py index 08a3866bd..4a6d88d8d 100644 --- a/tests/direct_indexing/custom_fields/test_json_dumps.py +++ b/tests/direct_indexing/custom_fields/test_json_dumps.py @@ -1,5 +1,31 @@ -# TODO +import json + +from direct_indexing.custom_fields.json_dumps import add_json_dumps def test_add_json_dumps(): - assert True + # Test nothing changes if activity is empty + activity = {} + add_json_dumps(activity) + assert activity == {} + + # Test if an activity is present, but the field is not in JSON fields, nothing changes to the activity + activity = {"test": 1} + add_json_dumps(activity) + assert activity == {"test": 1} + + # Test if an activity is present, the field is in JSON_FIELDS, the field is a dict, + # the data is added as a single json string + activity = {"title": {"narrative": "test"}} + expected_res = activity.copy() + expected_res['json.title'] = json.dumps(activity['title']) + add_json_dumps(activity) + assert activity == expected_res + + # Test if an activity is present, the field is in JSON_FIELDS, the field is a list, + # the data is added as a list of json strings + activity = {"title": [{"narrative": "test"}, {"narrative": "toast"}]} + expected_res = activity.copy() + expected_res['json.title'] = [json.dumps(activity['title'][0]), json.dumps(activity['title'][1])] + add_json_dumps(activity) + assert activity == expected_res From 43820576aa56683167e717ebd33af96a728d1ade Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Fri, 10 Nov 2023 14:48:57 +0100 Subject: [PATCH 26/49] feat: added tests for organisation custom fields --- .../organisation_custom_fields.py | 13 ++-- .../test_organisation_custom_fields.py | 65 +++++++++++++++++-- 2 files changed, 65 insertions(+), 13 deletions(-) diff --git a/direct_indexing/custom_fields/organisation_custom_fields.py b/direct_indexing/custom_fields/organisation_custom_fields.py index c9fa82bb0..28cf0c537 100644 --- a/direct_indexing/custom_fields/organisation_custom_fields.py +++ b/direct_indexing/custom_fields/organisation_custom_fields.py @@ -25,7 +25,7 @@ def index_many_to_many_relations(organisation): # if 0, represent with index -1, else represent with index n. TE = 'total-expenditure' if TE in organisation: - if type(organisation[TE]) != list: + if not isinstance(organisation[TE], list): organisation[TE] = [organisation[TE]] index_total_expenditure(organisation, TE) @@ -35,12 +35,7 @@ def index_total_expenditure(organisation, field): Go through the activity participating orgs and index the given child. Because this is currently used for results, we directly pass the required children. - :param field: a dataset containing the initial child of the activity - :param child: the second level child of the aforementioned field - """ - # Check if the child exists and make the child a list if it is a dict. - # total-expenditure.value.currency - """ + Total Expenditure 0..* total-expenditure 1..1 period-start 1..1 period-end @@ -69,7 +64,11 @@ def index_total_expenditure(organisation, field): for every expense line, how many children value and ref are there -1 indicates there is no ref + + :param field: a dataset containing the initial child of the activity + :param child: the second level child of the aforementioned field """ + # Check if the child exists and make the child a list if it is a dict. EL_STR = 'expense-line' organisation['total-expenditure.expense-line-index'] = [] organisation['total-expenditure.expense-line.ref-index'] = [] diff --git a/tests/direct_indexing/custom_fields/test_organisation_custom_fields.py b/tests/direct_indexing/custom_fields/test_organisation_custom_fields.py index b548f3fdd..cc5ddd0b1 100644 --- a/tests/direct_indexing/custom_fields/test_organisation_custom_fields.py +++ b/tests/direct_indexing/custom_fields/test_organisation_custom_fields.py @@ -1,13 +1,66 @@ -# TODO +from direct_indexing.custom_fields.organisation_custom_fields import ( + add_all, index_many_to_many_relations, index_total_expenditure +) +TE = 'total-expenditure' -def test_add_all(): - assert True +def test_add_all(mocker): + # Mock index_many_to_many_relations + mock = mocker.patch('direct_indexing.custom_fields.organisation_custom_fields.index_many_to_many_relations') + # Test that nothing changes if the data is empty, just converted to a list + data = {} + data = add_all(data) + assert data == [{}] + mock.assert_called_once() # called once with the empty dict -def test_index_many_to_many_relations(): - assert True + # Test given a list of 2 organisations, the function is called twice + data = [{}, {}] + add_all(data) + assert mock.call_count == len(data) + 1 # +1 because of previous tests + + +def test_index_many_to_many_relations(mocker): + # Test that nothing changes if total-expenditure is not present + data = {} + index_many_to_many_relations(data) + assert data == {} + + mock = mocker.patch('direct_indexing.custom_fields.organisation_custom_fields.index_total_expenditure') + # Test if total-expenditure is present, but not a list, it is converted to a list + data = {TE: {}} + index_many_to_many_relations(data) + assert data == {TE: [{}]} + mock.assert_called_once() def test_index_total_expenditure(): - assert True + EL = 'expense-line' + ref = 'ref' + val = 'value' + eli = 'total-expenditure.expense-line-index' + elr = 'total-expenditure.expense-line.ref-index' + elv = 'total-expenditure.expense-line.val-index' + + # Test that the default fields are created and empty if there is no total-expenditure + data = {TE: []} + index_total_expenditure(data, TE) + assert data[eli] == [] + assert data[elr] == [] + assert data[elv] == [] + + # Test that any expense-line is converted to a list + data = {TE: [{EL: {}}]} + index_total_expenditure(data, TE) + assert data[TE][0][EL] == [{}] + + # Test that the expense-line-index is created and populated + data = {TE: [ + {EL: {val: 10}}, + {EL: {ref: 1, val: 20}}, + {EL: [{ref: 1, val: 30}, {val: 40}, {ref: 2, val: 50}]} + ]} + index_total_expenditure(data, TE) + assert data[eli] == [1, 1, 3] # one count for each expense-line + assert data[elr] == [-1, 0, 0, -1, 2] # -1 for empty, + assert data[elv] == [0, 0, 0, 1, 2] # the index of the ref in the list of expense lines, starting at 0 From 39fe0f7fac050a0209c9bc59459978d304b32aa5 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Fri, 10 Nov 2023 14:56:46 +0100 Subject: [PATCH 27/49] feat: added test for policy marker combined --- .../test_policy_marker_combined.py | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/tests/direct_indexing/custom_fields/test_policy_marker_combined.py b/tests/direct_indexing/custom_fields/test_policy_marker_combined.py index 8de537b2c..538d7cb4e 100644 --- a/tests/direct_indexing/custom_fields/test_policy_marker_combined.py +++ b/tests/direct_indexing/custom_fields/test_policy_marker_combined.py @@ -1,5 +1,23 @@ -# TODO +from direct_indexing.custom_fields.policy_marker_combined import policy_marker_combined def test_policy_marker_combined(): - assert True + pm = 'policy-marker' + pmc = 'policy-marker.combined' + # Test if no policy-marker, assert nothing changes + data = {} + data = policy_marker_combined(data) + assert data == {} + + # Test if a policy-marker is an empty dict, it is converted to a list + data = {pm: {}} + data = policy_marker_combined(data) + assert data[pm] == [{}] # Also tests if there is no code in pm + assert data[pmc] == [] + + # Test if pm has code, it is added to pmc with its significance. + data = {pm: [{'code': 1}, {'code': 2, 'significance': 1}]} + data = policy_marker_combined(data) + # 1__n because the significance is not present for code 1 + # 2__1 because the significance is 1 for code 2 + assert data[pmc] == ['1__n', '2__1'] From c229b1c2335edaa6b86955900f42c33ccc5500cd Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Mon, 13 Nov 2023 10:58:12 +0100 Subject: [PATCH 28/49] feat: added tests for raising h2 budget data --- .../raise_h2_budget_data_to_h1.py | 13 +- .../test_raise_h2_budget_data_to_h1.py | 111 +++++++++++++++++- 2 files changed, 112 insertions(+), 12 deletions(-) diff --git a/direct_indexing/custom_fields/raise_h2_budget_data_to_h1.py b/direct_indexing/custom_fields/raise_h2_budget_data_to_h1.py index 3e999e5e4..3abe90eac 100644 --- a/direct_indexing/custom_fields/raise_h2_budget_data_to_h1.py +++ b/direct_indexing/custom_fields/raise_h2_budget_data_to_h1.py @@ -5,7 +5,7 @@ def raise_h2_budget_data_to_h1(data): Check if data is h1, if so, check if it has related-activities """ for activity in data: - if activity['hierarchy'] == 1 and 'related-activity' in activity: + if 'hierarchy' in activity and activity['hierarchy'] == 1 and 'related-activity' in activity: if type(activity['related-activity']) is dict: activity['related-activity'] = [activity['related-activity']] data_present, related_data = pull_related_data_to_h1(data, activity) @@ -34,12 +34,9 @@ def pull_related_data_to_h1(data, activity): related_budget_period_start_iso_date = [] related_budget_period_end_iso_date = [] - related_activity_refs = [] - for rel in activity['related-activity']: - related_activity_refs.append(rel['ref']) - + related_activity_refs = [rel['ref'] for rel in activity.get('related-activity', []) if 'ref' in rel] for _activity in data: - if _activity['iati-identifier'] in related_activity_refs: + if 'iati-identifier' in _activity and _activity['iati-identifier'] in related_activity_refs: if 'budget' in _activity: related_data = True if type(_activity['budget']) is dict: @@ -51,8 +48,8 @@ def pull_related_data_to_h1(data, activity): related_budget_period_start_iso_date.append(budget['period-start'][0]['iso-date']) related_budget_period_end_iso_date.append(budget['period-end'][0]['iso-date']) - related_budget_period_start_quarter.extend(_activity['budget.period-start.quarter']) - related_budget_period_end_quarter.extend(_activity['budget.period-end.quarter']) + related_budget_period_start_quarter.extend(_activity.get('budget.period-start.quarter', [])) + related_budget_period_end_quarter.extend(_activity.get('budget.period-end.quarter', [])) return related_data, { 'related_budget_value': related_budget_value, diff --git a/tests/direct_indexing/custom_fields/test_raise_h2_budget_data_to_h1.py b/tests/direct_indexing/custom_fields/test_raise_h2_budget_data_to_h1.py index ebe8195ec..6a17042bc 100644 --- a/tests/direct_indexing/custom_fields/test_raise_h2_budget_data_to_h1.py +++ b/tests/direct_indexing/custom_fields/test_raise_h2_budget_data_to_h1.py @@ -1,9 +1,112 @@ -# TODO +from direct_indexing.custom_fields.raise_h2_budget_data_to_h1 import pull_related_data_to_h1, raise_h2_budget_data_to_h1 -def test_raise_h2_budget_data_to_h1(): - assert True +def test_raise_h2_budget_data_to_h1(mocker): + # Test note, data is always a list + hier = "hierarchy" + ra = "related-activity" + rbv = "related_budget_value" + + # mock pull_related_data_to_h1 + mock_pull = mocker.patch('direct_indexing.custom_fields.raise_h2_budget_data_to_h1.pull_related_data_to_h1', + return_value=(True, {rbv: [1]})) + + # Test that pull_related_data is not triggered if there is no hierarchy + data = [{}] + raise_h2_budget_data_to_h1(data) + mock_pull.assert_not_called() + + # Test that pull_related_data is not triggered if there is no related-activity + data = [{hier: 1}] + raise_h2_budget_data_to_h1(data) + mock_pull.assert_not_called() + + # Test that pull_related_data is not triggered if hier is not 1 + data = [{hier: 2, ra: {}}] + raise_h2_budget_data_to_h1(data) + mock_pull.assert_not_called() + + # Test that ra is converted to a list if it is supplied as a dict + data = [{hier: 1, ra: {}}] + raise_h2_budget_data_to_h1(data) + assert type(data[0][ra]) is list + # By mocking the return value of pull_related_data_to_h1 + # We can assert that if data present the rbv is added to data's first element + assert data[0][rbv] == [1] def test_pull_related_data_to_h1(): - assert True + # lists + related_budget_value = "related_budget_value" + related_budget_period_start_quarter = "related_budget_period_start_quarter" + related_budget_period_end_quarter = "related_budget_period_end_quarter" + related_budget_period_start_iso_date = "related_budget_period_start_iso_date" + related_budget_period_end_iso_date = "related_budget_period_end_iso_date" + + base_res = { + related_budget_value: [], + related_budget_period_start_quarter: [], + related_budget_period_end_quarter: [], + related_budget_period_start_iso_date: [], + related_budget_period_end_iso_date: [], + } + + hier = "hierarchy" + ra = "related-activity" + data = [{hier: 1, ra: {}}] + expected_res = base_res.copy() + + # Test if there is no related activity, the data is unchanged and railsed values are empty lists + res_bool, related_data_dict = pull_related_data_to_h1(data, data[0]) + assert related_data_dict == expected_res + assert not res_bool + + data = [ + { + "iati-identifier": "act-1", + "related-activity": [{"ref": "act-2", "type": '2'}, {"ref": "act-3", "type": '2'}] + }, + { + "iati-identifier": "act-2", + "budget": { + "value": 1, + "period-start": [ + { + "iso-date": "2019-01-01" + } + ], + "period-end": [ + { + "iso-date": "2019-04-01" + } + ] + }, + "related-activity": [{"ref": "act-1", "type": '1'}] + }, + { + "iati-identifier": "act-3", + "budget": { + "value": 2, + "period-start": [ + { + "iso-date": "2020-01-01" + } + ], + "period-end": [ + { + "iso-date": "2020-04-01" + } + ] + }, + "related-activity": [{"ref": "act-1", "type": '1'}], + "budget.period-start.quarter": ["1"], + "budget.period-end.quarter": ["2"] + } + ] + res_bool, related_data_dict = pull_related_data_to_h1(data, data[0]) + assert res_bool + assert related_data_dict[related_budget_value] == [1, 2] + assert related_data_dict[related_budget_period_start_quarter] == ["1"] + assert related_data_dict[related_budget_period_end_quarter] == ["2"] + assert related_data_dict[related_budget_period_start_iso_date] == ["2019-01-01", "2020-01-01"] + assert related_data_dict[related_budget_period_end_iso_date] == ["2019-04-01", "2020-04-01"] From 397fbf5965f34a62b7057decf98baef8d1869969 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Mon, 13 Nov 2023 11:02:48 +0100 Subject: [PATCH 29/49] feat: added tests for title narrative --- .../custom_fields/test_title_narrative.py | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/tests/direct_indexing/custom_fields/test_title_narrative.py b/tests/direct_indexing/custom_fields/test_title_narrative.py index 8344e0a8a..86c80447c 100644 --- a/tests/direct_indexing/custom_fields/test_title_narrative.py +++ b/tests/direct_indexing/custom_fields/test_title_narrative.py @@ -1,5 +1,23 @@ -# TODO +from direct_indexing.custom_fields.title_narrative import title_narrative_first def test_title_narrative_first(): - assert True + data = [] + assert title_narrative_first(data) == [] + + data = {} + assert title_narrative_first(data) == {} + + data = {'title': {}} + expected_res = data.copy() + assert title_narrative_first(data) == expected_res + + data = {'title': {'narrative': 'test'}} + expected_res = data.copy() + expected_res['title.narrative.first'] = 'test' + assert title_narrative_first(data) == expected_res + + data = {'title': {'narrative': ['first test', 'second test']}} + expected_res = data.copy() + expected_res['title.narrative.first'] = 'first test' + assert title_narrative_first(data) == expected_res From 5ca9a900b2f7c0263d984cef7a3bbd5c6187d1b9 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Mon, 13 Nov 2023 11:33:40 +0100 Subject: [PATCH 30/49] feat: added remaining tests for main direct indexing file --- tests/direct_indexing/test_direct_indexing.py | 115 +++++++++++++++--- 1 file changed, 99 insertions(+), 16 deletions(-) diff --git a/tests/direct_indexing/test_direct_indexing.py b/tests/direct_indexing/test_direct_indexing.py index ae4b46306..9879b7664 100644 --- a/tests/direct_indexing/test_direct_indexing.py +++ b/tests/direct_indexing/test_direct_indexing.py @@ -1,21 +1,33 @@ -# TODO +import json + import pysolr import pytest -from direct_indexing.direct_indexing import clear_indices, clear_indices_for_core +from direct_indexing.direct_indexing import ( + clear_indices, clear_indices_for_core, drop_removed_data, run, run_dataset_metadata, run_publisher_metadata +) + +SOLR = 'pysolr.Solr' # Test group: test_run -def test_run_clear_indices_success(mocker): +def test_run(mocker): # INTEGRATION TEST - assert True + mock_clear = mocker.patch('direct_indexing.direct_indexing.clear_indices') + mock_index_publisher = mocker.patch('direct_indexing.direct_indexing.index_publisher_metadata') + mock_index_datasets = mocker.patch('direct_indexing.direct_indexing.index_datasets_and_dataset_metadata') + + run() + mock_clear.assert_called_once() + mock_index_publisher.assert_called_once() + mock_index_datasets.assert_called_once() # Test group: test_clear_indices def test_clear_indices_clears_all_indices(mocker): # UNIT TEST solr_instance_mock = mocker.MagicMock() - mocker.patch('pysolr.Solr', return_value=solr_instance_mock) + mocker.patch(SOLR, return_value=solr_instance_mock) result = clear_indices() # Check that the Solr delete method was called @@ -28,7 +40,7 @@ def test_clear_indices_clears_all_indices(mocker): def test_clear_indices_raises_error(mocker): # UNIT TEST - mocker.patch('pysolr.Solr', side_effect=pysolr.SolrError) + mocker.patch(SOLR, side_effect=pysolr.SolrError) with pytest.raises(pysolr.SolrError): clear_indices() @@ -36,7 +48,7 @@ def test_clear_indices_raises_error(mocker): def test_clear_indices_for_core_clears_all_indices(mocker): # UNIT TEST solr_instance_mock = mocker.MagicMock() - mocker.patch('pysolr.Solr', return_value=solr_instance_mock) + mocker.patch(SOLR, return_value=solr_instance_mock) result = clear_indices_for_core("dataset") # Check that the Solr delete method was called @@ -49,22 +61,93 @@ def test_clear_indices_for_core_clears_all_indices(mocker): def test_clear_indices_for_core_raises_error(mocker): # UNIT TEST - mocker.patch('pysolr.Solr', side_effect=pysolr.SolrError) + mocker.patch(SOLR, side_effect=pysolr.SolrError) with pytest.raises(pysolr.SolrError): clear_indices_for_core("dataset") -def test_clear_indices_for_core(): - assert True +def test_run_publisher_metadata(mocker): + meta = 'direct_indexing.direct_indexing.index_publisher_metadata' + mock_index_publisher = mocker.patch(meta, return_value='Success') + result = run_publisher_metadata() + assert result == 'Success' + mock_index_publisher.assert_called_once() + + mocker.patch(meta, return_value='There is an ERROR in result') + with pytest.raises(ValueError): + run_publisher_metadata() -def test_run_publisher_metadata(): - assert True +def test_run_dataset_metadata(mocker): + mock = mocker.patch('direct_indexing.direct_indexing.index_datasets_and_dataset_metadata', return_value='Success') + res = run_dataset_metadata(False, False) + assert res == 'Success' + mock.assert_called_once() -def test_run_dataset_metadata(): - assert True +def test_drop_removed_data(mocker, tmp_path, requests_mock, fixture_solr_response, fixture_dataset_metadata): + # Mock settings.SOLR_DATASET to https://test.com + mocker.patch('direct_indexing.direct_indexing.settings.SOLR_DATASET', 'https://test.com') + test_url = 'https://test.com/select?fl=name%2Cid%2Ciati_cloud_indexed&indent=true&q.op=OR&q=*%3A*&rows=10000000' + requests_mock.get(test_url, json=fixture_solr_response) + # mock settings.BASE_DIR to be tmp_path + mocker.patch('direct_indexing.direct_indexing.settings.BASE_DIR', tmp_path) + paths = ['direct_indexing', 'data_sources', 'datasets'] + path = tmp_path / '/'.join(paths) + path.mkdir(parents=True, exist_ok=True) + # create 'dataset_metadata.json' with fixture_dataset_metadata + with open(path / 'dataset_metadata.json', 'w') as f: + json.dump(fixture_dataset_metadata, f) -def test_drop_removed_data(): - assert True + # Mock pysolr.Solr + solr_instance_mock = mocker.MagicMock() + mock_solr = mocker.patch(SOLR, return_value=solr_instance_mock) + # mock solr.search to return a list with 2 elements + solr_instance_mock.search.return_value = [{}] + # Run drop_removed_data + drop_removed_data() + + # assert mock_solr was called 5 times, once for each core including datasets + assert mock_solr.call_count == 5 + + # assert solr's search and delete function is called a total of 10 times + # 2 times for each of the 5 cores, where 2 times represents drop1 and drop2 + assert solr_instance_mock.search.call_count == 10 + assert solr_instance_mock.delete.call_count == 10 + + +@pytest.fixture +def fixture_solr_response(): + return { + 'response': { + 'docs': [ + { + 'name': 'test', + 'iati_cloud_indexed': True, + }, + { + 'name': 'drop1', + 'iati_cloud_indexed': True, + }, + { + 'name': 'drop2', + 'iati_cloud_indexed': True, + } + ] + } + } + + +@pytest.fixture +def fixture_dataset_metadata(): + return [ + { + 'name': 'test', + 'id': 'test', + }, + { + 'name': 'new', + 'id': 'new', + } + ] From 0cf868aa0db0d2b1cd8cb80fbac06c17a1f4ec5d Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Mon, 13 Nov 2023 12:14:51 +0100 Subject: [PATCH 31/49] feat: added tests for activity subtypes --- .../processing/activity_subtypes.py | 4 +- .../processing/test_activity_subtypes.py | 87 +++++++++++++++++-- 2 files changed, 83 insertions(+), 8 deletions(-) diff --git a/direct_indexing/processing/activity_subtypes.py b/direct_indexing/processing/activity_subtypes.py index fc039a24d..fd78d16f1 100644 --- a/direct_indexing/processing/activity_subtypes.py +++ b/direct_indexing/processing/activity_subtypes.py @@ -49,11 +49,11 @@ def extract_subtype(activity, subtype): # get subtype subtype_in_data = activity[subtype] - if type(subtype_in_data) is dict: + if isinstance(subtype_in_data, dict): subtype_in_data = [subtype_in_data] # traverse subtype list for i, subtype_element in enumerate(list(subtype_in_data)): - if type(subtype_element) != dict: + if not isinstance(subtype_element, dict): continue # skip if the element is broken # Get the value of the subtype element into a new dict with the key being the subtype. subtype_dict = {subtype: dict(subtype_element)} diff --git a/tests/direct_indexing/processing/test_activity_subtypes.py b/tests/direct_indexing/processing/test_activity_subtypes.py index 3b5eafbcf..ec1ad0b86 100644 --- a/tests/direct_indexing/processing/test_activity_subtypes.py +++ b/tests/direct_indexing/processing/test_activity_subtypes.py @@ -1,13 +1,88 @@ -# TODO +from direct_indexing.processing.activity_subtypes import extract_all_subtypes, extract_subtype, process_subtype_dict -def test_extract_subtype(): - assert True +def test_extract_subtype(mocker): + transaction = 'transaction' + # Test an empty list is returned if the subtype in our subtype processing list + data = {} + subtype = 'non-existent' + assert extract_subtype(data, subtype) == [] + # Test an empty list is returned if the subtype is not in the data + subtype = transaction + assert extract_subtype(data, subtype) == [] + + # Test that if a subtype in the data is a dict, it is converted to a list + data = {transaction: {}} + extract_subtype(data, transaction) + + # Test that if a subtype in the data is a list, but the elements are not dicts, we skip + data = {'title': 'title', transaction: [None, {'value': 1, 'currency': 'USD', 'description': 'test'}]} + # mock process_subtype_dict + mock_process = mocker.patch('direct_indexing.processing.activity_subtypes.process_subtype_dict') + extract_subtype(data, transaction) + assert mock_process.call_count == len(data.keys()) # once for each key in data def test_process_subtype_dict(): - assert True + tvu = 'transaction.value-usd' + bvu = 'budget.value-usd' + # Test if key is in AVAILABLE_SUBTYPES, it is nothing changes in subtype_dict + subtype_dict = {'transaction': {'value': 1}} + expected_res = subtype_dict.copy() + key = 'transaction' + res = process_subtype_dict(subtype_dict, key, None, None, None, None) + assert res == expected_res + + # Test if key is in exclude fields we do not include it in the subtype dict + res = process_subtype_dict(subtype_dict, bvu, None, None, [bvu], None) + assert res == expected_res + + # Test that a specific value which is a dict can be retrieved + data = {'title': 'title', 'budget': {'value': 1}, tvu: 1.1} + expected_res = subtype_dict.copy() + expected_res[tvu] = 1.1 + res = process_subtype_dict(subtype_dict, tvu, None, data, [], [tvu]) + assert res == expected_res + + # Test that the value of a specific element is extracted from the list + data = {'title': 'title', 'budget': {'value': 1}, tvu: [1.1]} + expected_res = subtype_dict.copy() + expected_res[tvu] = 1.1 + res = process_subtype_dict(subtype_dict, tvu, 0, data, [], [tvu]) + assert res == expected_res + + # Test that additional fields are kept without modification + expected_res['title'] = 'title' + res = process_subtype_dict(subtype_dict, 'title', None, data, [], []) + assert res == expected_res + +def test_extract_all_subtypes(mocker): + # mock index_many_to_many_relations and extract_subtype + mock_index = mocker.patch('direct_indexing.processing.activity_subtypes.index_many_to_many_relations') + mock_extract = mocker.patch('direct_indexing.processing.activity_subtypes.extract_subtype', + return_value=[{}]) + data = {} + subtypes = {} + subtypes = extract_all_subtypes(subtypes, data) + mock_index.assert_called_once() + mock_extract.assert_not_called() + assert subtypes == {} + assert data == {} -def test_extract_all_subtypes(): - assert True + data = [ + { + 'budget': {}, + 'result': {}, + 'transaction': {} + }, { + 'budget': {}, + 'result': {}, + 'transaction': {} + } + ] + subtypes = {'budget': [], 'result': [], 'transaction': []} + extract_all_subtypes(subtypes, data) + # assert mock_index called 3 times + assert mock_index.call_count == len(data) + 1 # +1 for the previous test + assert mock_extract.call_count == len(data) * len(subtypes) # 2 * 3 From 1eb83d1db713ca67d6e00c5daaee3532293d3cfb Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Mon, 13 Nov 2023 14:34:13 +0100 Subject: [PATCH 32/49] feat: added tests for processing datasets --- direct_indexing/processing/dataset.py | 12 +- .../processing/test_dataset.py | 202 ++++++++++++++++-- 2 files changed, 197 insertions(+), 17 deletions(-) diff --git a/direct_indexing/processing/dataset.py b/direct_indexing/processing/dataset.py index e040481b2..dfa02d2ac 100644 --- a/direct_indexing/processing/dataset.py +++ b/direct_indexing/processing/dataset.py @@ -2,8 +2,8 @@ import logging import os import xml.etree.ElementTree as ET - from datetime import datetime + from django.conf import settings from pysolr import Solr from xmljson import badgerfish as bf @@ -147,6 +147,8 @@ def convert_and_save_xml_to_processed_json(filepath, filetype, codelist, currenc data = organisation_custom_fields.add_all(data) json_path = json_filepath(filepath) + if not json_path: + return False with open(json_path, 'w') as json_file: json.dump(data, json_file) @@ -164,8 +166,11 @@ def json_filepath(filepath): :param filepath: The filepath of the dataset. :return: the filepath of the json file. """ - converted_path = ''.join((os.path.splitext(filepath)[0], '.json')) - return converted_path + try: + converted_path = ''.join((os.path.splitext(filepath)[0], '.json')) + return converted_path + except Exception: + return False def dataset_subtypes(filetype, data, json_path): @@ -183,7 +188,6 @@ def dataset_subtypes(filetype, data, json_path): subtypes[key] = [] subtypes = activity_subtypes.extract_all_subtypes(subtypes, data) - if filetype == 'activity': index_subtypes(json_path, subtypes) diff --git a/tests/direct_indexing/processing/test_dataset.py b/tests/direct_indexing/processing/test_dataset.py index 46af4ea1c..b6c9178e3 100644 --- a/tests/direct_indexing/processing/test_dataset.py +++ b/tests/direct_indexing/processing/test_dataset.py @@ -1,25 +1,201 @@ -# TODO +import xml.etree.ElementTree as ET +import pytest -def test_fun(): - assert True +from direct_indexing.processing.dataset import ( + convert_and_save_xml_to_processed_json, dataset_subtypes, fun, index_dataset, index_subtypes, json_filepath +) +TEST_PATH = '/test/path/test.json' +INDEX_SUCCESS = 'Successfully indexed' -def test_index_dataset(): - assert True +def test_fun(mocker): + validation_status = 'dataset.extras.validation_status' + # mock cu.currencies + mock_currencies = mocker.patch('direct_indexing.processing.dataset.cu.Currencies') + # mock cu.codelist + mock_codelist = mocker.patch('direct_indexing.processing.dataset.codelists.Codelists') + # mock clean_dataset_metadata + mock_clean = mocker.patch('direct_indexing.processing.dataset.clean_dataset_metadata') + # mock get_dataset_filepath + mock_filepath = mocker.patch('direct_indexing.processing.dataset.get_dataset_filepath') + # mock get_dataset_version_validity + mock_validity = mocker.patch('direct_indexing.processing.dataset.get_dataset_version_validity') + # mock get_dataset_filetype + mock_filetype = mocker.patch('direct_indexing.processing.dataset.get_dataset_filetype') + # mock custom_fields.get_custom_metadata + mock_metadata = mocker.patch('direct_indexing.processing.dataset.custom_fields.get_custom_metadata') + # mock index_dataset + mock_index_ds = mocker.patch('direct_indexing.processing.dataset.index_dataset', return_value=(True, INDEX_SUCCESS)) # NOQA: 501 + # mock index + mock_index = mocker.patch('direct_indexing.processing.dataset.index', return_value=INDEX_SUCCESS) + # mock Solr + mock_solr = mocker.patch('direct_indexing.processing.dataset.Solr') + mock_metadata.return_value = {validation_status: 'Critical'} -def test_convert_and_save_xml_to_processed_json(): - assert True + fun({}, False) + # First test if all initial functions are called + mock_currencies.assert_called_once() + mock_codelist.assert_called_once() + mock_clean.assert_called_once() + mock_filepath.assert_called_once() + mock_validity.assert_called_once() + mock_filetype.assert_called_once() + mock_metadata.assert_called_once() + # assert mock_solr was not called + mock_solr.assert_not_called() + # assert mock index_dataset was not called + mock_index_ds.assert_not_called() + # assert mock index was called + mock_index.assert_called_once() + # Test that index_dataset is called if the dataset is considered valid + mock_metadata.return_value = {validation_status: 'Valid'} + fun({}, True) + mock_index_ds.assert_called_once() + assert mock_solr.call_count == 4 -def test_json_filepath(): - assert True +def test_index_dataset(mocker): + convert_save = 'direct_indexing.processing.dataset.convert_and_save_xml_to_processed_json' + # mock convert_and_save_xml_to_processed_json, index_to_core + mock_convert = mocker.patch(convert_save, return_value=False) # NOQA: 501 + mock_index = mocker.patch('direct_indexing.processing.dataset.index_to_core', return_value=INDEX_SUCCESS) + assert index_dataset(None, None, None, None, None) == (False, 'No JSON Path found') + mock_convert.assert_called_once() + mock_index.assert_not_called() -def test_dataset_subtypes(): - assert True + mock_convert.return_value = TEST_PATH + assert index_dataset(None, None, None, None, None) == (True, INDEX_SUCCESS) + mock_index.assert_called_once() + mock_index.return_value = 'Failed to index' + assert index_dataset(None, None, None, None, None) == (False, 'Failed to index') -def test_index_subtypes(): - assert True + # Test that if index_dataset raises an exception with error message 'test', it returns a tuple False, 'test' + mocker.patch(convert_save, side_effect=Exception('test')) # NOQA: 501 + assert index_dataset(None, None, None, None, None) == (False, 'test') + + +def test_convert_and_save_xml_to_processed_json(mocker, tmp_path, fixture_xml_act, fixture_xml_org): + # mock recursive_attribute_cleaning, custom_fields.add_all, organisation_custom_fields.add_all, json.dump, dataset_subtypes # NOQA: 501 + mock_clean = mocker.patch('direct_indexing.processing.dataset.recursive_attribute_cleaning', return_value={}) + mock_add_all = mocker.patch('direct_indexing.processing.dataset.custom_fields.add_all', return_value={}) + mock_add_all_org = mocker.patch('direct_indexing.processing.dataset.organisation_custom_fields.add_all', return_value={}) # NOQA: 501 + mock_json_filepath = mocker.patch('direct_indexing.processing.dataset.json_filepath', return_value=str(tmp_path / 'test.json')) # NOQA: 501 + mock_json = mocker.patch('direct_indexing.processing.dataset.json.dump') + mock_subtypes = mocker.patch('direct_indexing.processing.dataset.dataset_subtypes') + xml_path = tmp_path / 'test.xml' + xml_path.write_text("test") + + # Test that if filetype activity, but iati-activities is not in the xml, we return False for data_found + assert not convert_and_save_xml_to_processed_json(xml_path, 'activity', None, None, None) + # Test that if filetype activity, and iati-activities is in the xml but no child activity, + # we return False for data_found + xml_path.write_text('') + assert not convert_and_save_xml_to_processed_json(xml_path, 'activity', None, None, None) + # Test that if filetype organisation, but iati-organisations is not in the xml, we return False for data_found + assert not convert_and_save_xml_to_processed_json(xml_path, 'organisation', None, None, None) + # Test that if filetype organisation, and iati-organisations is in the xml but no child organisation, + # we return False for data_found + xml_path.write_text('') + assert not convert_and_save_xml_to_processed_json(xml_path, 'organisation', None, None, None) + + # mock the value of settings.FCDO_INSTANCE to False + mocker.patch('direct_indexing.processing.dataset.settings.FCDO_INSTANCE', True) + + # Test that if there is an activity, we call recursive_attribute_cleaning, custom_fields.add_all, + # json.dump, dataset_subtypes + xml_path.write_text(fixture_xml_act) + convert_and_save_xml_to_processed_json(xml_path, 'activity', None, None, None) + # Assert that recursive_attribute_cleaning is called with the data + mock_clean.assert_called_once() + mock_add_all.assert_called_once() + mock_add_all_org.assert_not_called() + mock_json_filepath.assert_called_once() + mock_json.assert_called_once() + mock_subtypes.assert_not_called() # not called because FCDO instance is True + # Test that dataset_subtypes is run if settings.FCDO_INSTANCE is False + mocker.patch('direct_indexing.processing.dataset.settings.FCDO_INSTANCE', False) + convert_and_save_xml_to_processed_json(xml_path, 'activity', None, None, None) + mock_subtypes.assert_called_once() + + # Test that if there is an organisation, we call recursive_attribute_cleaning, + # organisation_custom_fields.add_all, json.dump, dataset_subtypes + xml_path.write_text(fixture_xml_org) + convert_and_save_xml_to_processed_json(xml_path, 'organisation', None, None, None) + # Assert that recursive_attribute_cleaning is called with the data + assert mock_clean.call_count == 3 # +2 for the previous tests + assert mock_add_all.call_count == 2 # not more than 2, because only once for the previous tests + mock_add_all_org.assert_called_once() + assert mock_json_filepath.call_count == 3 # +2 for the previous tests + assert mock_json.call_count == 3 # +2 for the previous tests + + # Assert if json_filepath returns False, the return value is False + mock_json_filepath.return_value = False + assert not convert_and_save_xml_to_processed_json(xml_path, 'organisation', None, None, None) + + # Assert if ET raises a ParseError, the return value is None + mocker.patch('xml.etree.ElementTree.parse', side_effect=ET.ParseError) + assert convert_and_save_xml_to_processed_json(None, None, None, None, None) is None + + +def test_json_filepath(mocker): + # Assert that given a filepath with any file extension, we return the same filepath with .json appended + assert json_filepath('/test/path/test.xml') == TEST_PATH + assert json_filepath('/test/path/test.csv') == TEST_PATH + assert json_filepath('/test/path') == '/test/path.json' + + # Test that if json_filepath raises an exception, it returns the provided path + mocker.patch('os.path.splitext', side_effect=Exception) + assert not json_filepath('/test/path') + + +def test_dataset_subtypes(mocker): + # mock activity_subtypes.extract_all_subtypes and index_subtypes + mock_extract = mocker.patch('direct_indexing.processing.dataset.activity_subtypes.extract_all_subtypes', + return_value={}) + mock_index = mocker.patch('direct_indexing.processing.dataset.index_subtypes') + + # Test that if filetype is not activity, we do not call extract_all_subtypes or index_subtypes + dataset_subtypes('organisation', {}, 'test.json') + mock_extract.assert_not_called() + mock_index.assert_not_called() + + # Test that we call extract_all_subtypes and index_subtypes if filetype is activity + dataset_subtypes('activity', {}, 'test.json') + mock_extract.assert_called_once() + mock_extract.assert_called_with({'transaction': [], 'budget': [], 'result': []}, {}) + mock_index.assert_called_once() + + +def test_index_subtypes(mocker, tmp_path): + # mock index_to_core and json.dump + mock_index = mocker.patch('direct_indexing.processing.dataset.index_to_core') + mock_json = mocker.patch('direct_indexing.processing.dataset.json.dump') + mock_open = mocker.patch('builtins.open', mocker.mock_open()) + + # Test that we don't call index_to_core or json.dump if there are no subtypes + json_path = tmp_path / 'activity.json' + subtypes = {} + index_subtypes(json_path, subtypes) + mock_index.assert_not_called() + mock_json.assert_not_called() + + # Assert that we index the result subtype + subtypes = {'result': {}} + index_subtypes(json_path, subtypes) + mock_index.assert_called_once() + mock_json.assert_called_once() + mock_open.assert_called_with(str(tmp_path / 'activity_result.json'), 'w') + + +@pytest.fixture +def fixture_xml_act(): + return 'test-org-1' # NOQA: 501 + + +@pytest.fixture +def fixture_xml_org(): + return 'test-org' # NOQA: 501 From cee1bad38f4caeea7084078132cc188eee1ce1da Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Mon, 13 Nov 2023 15:32:29 +0100 Subject: [PATCH 33/49] feat: added tests for tasks.py --- direct_indexing/tasks.py | 4 +- .../processing/test_dataset.py | 7 +- tests/direct_indexing/test_tasks.py | 137 ++++++++++++++++-- 3 files changed, 128 insertions(+), 20 deletions(-) diff --git a/direct_indexing/tasks.py b/direct_indexing/tasks.py index ec599b6e8..599df1cf1 100644 --- a/direct_indexing/tasks.py +++ b/direct_indexing/tasks.py @@ -11,6 +11,7 @@ from direct_indexing import direct_indexing from direct_indexing.metadata.util import retrieve from direct_indexing.util import datadump_success +from iaticloud.celery import app @shared_task @@ -89,7 +90,7 @@ def fcdo_replace_partial_url(find_url, replace_url): # find datasets that need to be replaced if 'resources' not in dataset or 'name' not in dataset or 'organization' not in dataset: continue - if 'url' not in dataset['resources'][0]: + if 'url' not in dataset['resources'][0] or 'hash' not in dataset['resources'][0]: continue url = dataset['resources'][0]['url'] # always 1 url if find_url not in url: @@ -129,5 +130,4 @@ def fcdo_replace_partial_url(find_url, replace_url): @shared_task def revoke_all_tasks(): - from iaticloud.celery import app app.control.purge() diff --git a/tests/direct_indexing/processing/test_dataset.py b/tests/direct_indexing/processing/test_dataset.py index b6c9178e3..7383e066f 100644 --- a/tests/direct_indexing/processing/test_dataset.py +++ b/tests/direct_indexing/processing/test_dataset.py @@ -7,6 +7,7 @@ ) TEST_PATH = '/test/path/test.json' +TEST_JSON = 'test.json' INDEX_SUCCESS = 'Successfully indexed' @@ -83,7 +84,7 @@ def test_convert_and_save_xml_to_processed_json(mocker, tmp_path, fixture_xml_ac mock_clean = mocker.patch('direct_indexing.processing.dataset.recursive_attribute_cleaning', return_value={}) mock_add_all = mocker.patch('direct_indexing.processing.dataset.custom_fields.add_all', return_value={}) mock_add_all_org = mocker.patch('direct_indexing.processing.dataset.organisation_custom_fields.add_all', return_value={}) # NOQA: 501 - mock_json_filepath = mocker.patch('direct_indexing.processing.dataset.json_filepath', return_value=str(tmp_path / 'test.json')) # NOQA: 501 + mock_json_filepath = mocker.patch('direct_indexing.processing.dataset.json_filepath', return_value=str(tmp_path / TEST_JSON)) # NOQA: 501 mock_json = mocker.patch('direct_indexing.processing.dataset.json.dump') mock_subtypes = mocker.patch('direct_indexing.processing.dataset.dataset_subtypes') xml_path = tmp_path / 'test.xml' @@ -159,12 +160,12 @@ def test_dataset_subtypes(mocker): mock_index = mocker.patch('direct_indexing.processing.dataset.index_subtypes') # Test that if filetype is not activity, we do not call extract_all_subtypes or index_subtypes - dataset_subtypes('organisation', {}, 'test.json') + dataset_subtypes('organisation', {}, TEST_JSON) mock_extract.assert_not_called() mock_index.assert_not_called() # Test that we call extract_all_subtypes and index_subtypes if filetype is activity - dataset_subtypes('activity', {}, 'test.json') + dataset_subtypes('activity', {}, TEST_JSON) mock_extract.assert_called_once() mock_extract.assert_called_with({'transaction': [], 'budget': [], 'result': []}, {}) mock_index.assert_called_once() diff --git a/tests/direct_indexing/test_tasks.py b/tests/direct_indexing/test_tasks.py index 3ec026b68..da0ec49d0 100644 --- a/tests/direct_indexing/test_tasks.py +++ b/tests/direct_indexing/test_tasks.py @@ -1,29 +1,136 @@ -# TODO +import pysolr +import pytest +from direct_indexing.tasks import ( + clear_all_cores, clear_cores_with_name, fcdo_replace_partial_url, revoke_all_tasks, start, subtask_dataset_metadata, + subtask_publisher_metadata +) -def test_clear_all_cores(): - assert True +def test_clear_all_cores(mocker): + # mock direct_indexing.clear_indices + mock_clear = mocker.patch('direct_indexing.direct_indexing.clear_indices') + clear_all_cores() + mock_clear.assert_called_once() -def test_clear_cores_with_name(): - assert True +def test_clear_cores_with_name(mocker): + # mock direct_indexing.clear_indices_for_core + mock_clear = mocker.patch('direct_indexing.direct_indexing.clear_indices_for_core') + clear_cores_with_name() + mock_clear.assert_called_once() -def test_start(): - assert True +def test_start(mocker): + # Mock subtask delays + mock_subtask_publisher_metadata = mocker.patch('direct_indexing.tasks.subtask_publisher_metadata.delay') + mock_subtask_dataset_metadata = mocker.patch('direct_indexing.tasks.subtask_dataset_metadata.delay') -def test_subtask_publisher_metadata(): - assert True + # mock datadump_success + mock_datadump = mocker.patch('direct_indexing.tasks.datadump_success', return_value=False) + with pytest.raises(ValueError): + start() + mock_subtask_publisher_metadata.assert_not_called() + # mock clear_indices + mock_datadump.return_value = True + mocker.patch('direct_indexing.direct_indexing.clear_indices', side_effect=pysolr.SolrError) + assert start(False) == "Error clearing the direct indexing cores, check your Solr instance." + mock_subtask_dataset_metadata.assert_not_called() -def test_subtask_dataset_metadata(): - assert True + res = start(True) + mock_subtask_publisher_metadata.assert_called_once() + mock_subtask_dataset_metadata.assert_called_once() + assert res == "Both the publisher and dataset metadata indexing have begun." -def test_fcdo_replace_partial_url(): - assert True +def test_subtask_publisher_metadata(mocker): + # mock direct_indexing.run_publisher_metadata + mock_run = mocker.patch('direct_indexing.direct_indexing.run_publisher_metadata', return_value='Success') + res = subtask_publisher_metadata() + assert res == 'Success' + mock_run.assert_called_once() -def test_revoke_all_tasks(): - assert True +def test_subtask_dataset_metadata(mocker): + # mock direct_indexing.run_dataset_metadata + mock_run = mocker.patch('direct_indexing.direct_indexing.run_dataset_metadata', return_value='Success') + res = subtask_dataset_metadata(False) + assert res == 'Success' + mock_run.assert_called_once() + + +def test_fcdo_replace_partial_url(mocker, tmp_path, fixture_dataset_metadata): + testcom = 'https://test.com' + mock_url = mocker.patch('direct_indexing.tasks.urllib.request.URLopener.retrieve') + mock_os_remove = mocker.patch('direct_indexing.tasks.os.remove') + mock_json_dump = mocker.patch('direct_indexing.tasks.json.dump') + mock_run = mocker.patch('direct_indexing.tasks.direct_indexing.run_dataset_metadata') + mock_retrieve = mocker.patch('direct_indexing.tasks.retrieve') + mock_retrieve.return_value = fixture_dataset_metadata.copy() + mocker.patch('direct_indexing.tasks.settings.DATASET_PARENT_PATH', tmp_path) + + # if a file does not exist, we expect fcdo_replace_partial_url to return 'this file does not exist {...}' + assert 'this file does not exist' in fcdo_replace_partial_url(testcom, testcom) + # Make the files for the matching datasets + path1 = tmp_path / 'iati-data-main/data/test_org/test5.xml' + path2 = tmp_path / 'iati-data-main/data/test_org/test6.xml' + path1.parent.mkdir(parents=True) + path2.parent.mkdir(parents=True, exist_ok=True) + path1.touch() + path2.touch() + + mock_retrieve.return_value = fixture_dataset_metadata.copy() + fcdo_replace_partial_url(testcom, 'https://test_update.com') + mock_url.assert_called_once() + assert mock_os_remove.call_count == 1 + mock_json_dump.assert_called_once() + mock_run.assert_called_once() + + +def test_revoke_all_tasks(mocker): + # Assert app.control.purge was called + mock_purge = mocker.patch('direct_indexing.tasks.app.control.purge') + revoke_all_tasks() + mock_purge.assert_called_once() + + +@pytest.fixture +def fixture_dataset_metadata(): + return [ + {}, # one empty dataset + { # one without resources + 'name': 'test1', + 'organization': {}, + }, + { # one with no name + 'resources': [], + 'organization': {}, + }, + { # one with no organization + 'resources': [], + 'name': 'test2', + }, + { # one with no url in resources + 'resources': [{}], + 'name': 'test3', + 'organization': {}, + }, + { # one with no hash in resources + 'resources': [{'url': 'https://test.com/1'}], + 'name': 'test3', + 'organization': {}, + }, + { # one with a mismatching url in resources + 'resources': [{'url': 'https://mismatch.com/1', 'hash': 'test321'}], + 'name': 'test4', + 'organization': {'name': 'test_org'}, + }, + { # one with a matching url in resources + 'resources': [{'url': 'https://test.com/1', 'hash': 'test123'}], + 'name': 'test5', + 'organization': { + 'name': 'test_org', + }, + } + ] From 0d1f278428a7cc2e04fa6a5d6de09bfb478e7d8f Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Mon, 13 Nov 2023 16:49:15 +0100 Subject: [PATCH 34/49] feat: added tests for many-to-many relations --- .../indexing_manytomany_relations.py | 15 +- .../test_indexing_manytomany_relations.py | 166 ++++++++++++++++-- 2 files changed, 163 insertions(+), 18 deletions(-) diff --git a/direct_indexing/custom_fields/indexing_manytomany_relations.py b/direct_indexing/custom_fields/indexing_manytomany_relations.py index 20151e3c9..a8da3d4fa 100644 --- a/direct_indexing/custom_fields/indexing_manytomany_relations.py +++ b/direct_indexing/custom_fields/indexing_manytomany_relations.py @@ -7,13 +7,13 @@ def index_many_to_many_relations(activity): # Index result indicator, starting with baseline: # An indicator has 0 to N baselines, if 0, represent with index -1, else represent with index n. if 'result' in activity: - if type(activity['result']) != list: + if not isinstance(activity['result'], list): activity['result'] = [activity['result']] for result in activity['result']: add_result_child_indexes(result, 'indicator') # Index participating organisations. if 'participating-org' in activity: - if type(activity['participating-org']) != list: + if not isinstance(activity['participating-org'], list): activity['participating-org'] = [activity['participating-org']] add_participating_org_child_indexes(activity, 'participating-org') @@ -47,7 +47,7 @@ def add_result_child_indexes(field, child): # Check if the child exists and make the child a list if it is a dict. if child not in field: return - if type(field[child]) != list: + if not isinstance(field[child], list): field[child] = [field[child]] add_field_child_field_indexes(field, child, 'baseline') @@ -77,7 +77,7 @@ def add_field_child_field_indexes(data, target_field, field): continue # make sure the baseline is a list of baselines. - if type(target[field]) != list: + if not isinstance(target[field], list): target[field] = [target[field]] field_index = total_field @@ -104,7 +104,7 @@ def add_field_child_field_children_indexes(data, target_field, field, children): for target in data[target_field]: if field in target: # If the second level child is found, loop over this and check if the third level children are found. - if type(target[field]) != list: + if not isinstance(target[field], list): target[field] = [target[field]] iterate_third_level_children(child, data, field, target, target_field, total_field) return data @@ -117,11 +117,12 @@ def iterate_third_level_children(child, data, field, target, target_field, total Use enumerate to only save the index of for the first occurrence. """ for item in target[field]: - if type(item) != dict: + if not isinstance(item, dict): field_index = -1 elif child in item: field_index = total_field - if type(item[child]) != list: + print(item[child]) + if not isinstance(item[child], list): total_field += 1 else: total_field += len(item[child]) diff --git a/tests/direct_indexing/custom_fields/test_indexing_manytomany_relations.py b/tests/direct_indexing/custom_fields/test_indexing_manytomany_relations.py index 835c50775..d79987cbe 100644 --- a/tests/direct_indexing/custom_fields/test_indexing_manytomany_relations.py +++ b/tests/direct_indexing/custom_fields/test_indexing_manytomany_relations.py @@ -1,25 +1,169 @@ -# TODO +from direct_indexing.custom_fields.indexing_manytomany_relations import ( + add_field_child_field_children_indexes, add_field_child_field_indexes, add_participating_org_child_indexes, + add_result_child_indexes, index_many_to_many_relations, iterate_third_level_children +) -def test_index_many_to_many_relations(): - assert True +def test_index_many_to_many_relations(mocker): + res = 'result' + p_org = 'participating-org' + # mock add_result_child_indexes, add_participating_org_child_indexes + mock_add_result = mocker.patch('direct_indexing.custom_fields.indexing_manytomany_relations.add_result_child_indexes') # NOQA: 501 + mock_add_participating = mocker.patch('direct_indexing.custom_fields.indexing_manytomany_relations.add_participating_org_child_indexes') # NOQA: 501 + activity = {} + # Given an emtpy activity, assert add_result and add p-org are not called + index_many_to_many_relations(activity) + mock_add_result.assert_not_called() + mock_add_participating.assert_not_called() + assert activity == {} -def test_add_participating_org_child_indexes(): - assert True + # Given an activity with a result which is not a list, assert it is made a list + activity = {res: {}} + ex_res = {res: [{}]} + index_many_to_many_relations(activity) + assert activity == ex_res + # Given an activity with a p-org which is not a list, assert it is made a list + activity = {p_org: {}} + ex_res = {p_org: [{}]} + index_many_to_many_relations(activity) + assert activity == ex_res -def test_add_result_child_indexes(): - assert True + # Given an activity with 2 results and 2 p-orgs, assert add_result and add p-org are called twice + activity = {res: [{}, {}], p_org: [{}, {}]} + index_many_to_many_relations(activity) + assert mock_add_result.call_count == len(activity[res]) + 1 # once per res +1 for the previous test + assert mock_add_participating.call_count == 2 # once for the list of p-orgs +1 for the previous test + + +def test_add_participating_org_child_indexes(mocker): + # mock add_field_child_field_indexes and add_field_child_field_children_indexes + mock_add_field = mocker.patch('direct_indexing.custom_fields.indexing_manytomany_relations.add_field_child_field_indexes') # NOQA: 501 + mock_add_field_children = mocker.patch('direct_indexing.custom_fields.indexing_manytomany_relations.add_field_child_field_children_indexes') # NOQA: 501 + add_participating_org_child_indexes(None, None) + assert mock_add_field.call_count == 6 + mock_add_field_children.assert_called_once() + + +def test_add_result_child_indexes(mocker): + t_str = 'test' + mock_add_field = mocker.patch('direct_indexing.custom_fields.indexing_manytomany_relations.add_field_child_field_indexes') # NOQA: 501 + mock_add_field_children = mocker.patch('direct_indexing.custom_fields.indexing_manytomany_relations.add_field_child_field_children_indexes') # NOQA: 501 + + child = t_str + field = {} + assert not add_result_child_indexes(field, child) + + field = {t_str: {}} + add_result_child_indexes(field, child) + assert field[t_str] == [{}] + assert mock_add_field.call_count == 2 + mock_add_field_children.assert_called_once() def test_add_field_child_field_indexes(): - assert True + target_field = 'indicator' + field = 'baseline' + ibi = 'indicator.baseline-index' + + data = {target_field: [{field: [{}]}]} + add_field_child_field_indexes(data, target_field, field) + assert data[ibi] == [0] + + data = {target_field: [{}]} + add_field_child_field_indexes(data, target_field, field) + assert data[ibi] == [-1] + data = {target_field: [{field: {}}]} + add_field_child_field_indexes(data, target_field, field) + assert data[ibi] == [0] + assert data[target_field][0][field] == [{}] -def test_add_field_child_field_children_indexes(): - assert True + data = {target_field: [{field: [{}]}, {}, {field: [{}]}]} + add_field_child_field_indexes(data, target_field, field) + assert data[ibi] == [0, -1, 1] + + +def test_add_field_child_field_children_indexes(mocker): + # mock iterate_third_level_children + mock_iterate = mocker.patch('direct_indexing.custom_fields.indexing_manytomany_relations.iterate_third_level_children') # NOQA: 501 + # sample usage: add_field_child_field_children_indexes(field, child, 'period', children=['actual', 'target']) + target_field = 'indicator' + field = 'period' + children = ['actual', 'target'] + + data = { + target_field: [{ + field: { + 'actual': {}, + 'target': {} + } + }] + } + add_field_child_field_children_indexes(data, target_field, field, children) + # Expect 2 calls, as we have two children. + assert mock_iterate.call_count == 2 def test_iterate_third_level_children(): - assert True + index = 'indicator.period.actual-index' + target_field = 'indicator' + field = 'period' + child = 'actual' + total_field = 0 + # sample usage: iterate_third_level_children(child, data, field, target, target_field, total_field): + + # Test a single dict + data = { + target_field: [{ + field: [{ + 'actual': {}, + 'target': {} + }] + }], + index: [] + } + target = data[target_field][0] + iterate_third_level_children(child, data, field, target, target_field, total_field) + assert data[index] == [0] + + # Test single value fields + data = { + target_field: [{ + field: ['1', '2'] + }], + index: [] + } + target = data[target_field][0] + iterate_third_level_children(child, data, field, target, target_field, total_field) + assert data[index] == [-1, -1] + + # Test multivalued targets + data = { + target_field: [{ + field: [ + { + 'actual': [{'value': '1'}, {'value': '2'}], + }, + { + 'actual': {'value': '3'}, + } + ] + }], + index: [] + } + target = data[target_field][0] + iterate_third_level_children(child, data, field, target, target_field, total_field) + assert data[index] == [0, 2] # The first starts at 0 with a length of 2, the second starts at index 2 (0+2) + + # Test child not in item + data = { + target_field: [{ + field: [{}, {}] + }], + index: [] + } + target = data[target_field][0] + iterate_third_level_children(child, data, field, target, target_field, total_field) + assert data[index] == [-1, -1] # The first starts at 0 with a length of 2, the second starts at index 2 (0+2) From a92662304f3bd8dbca85797e7f50ab45863341b0 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Tue, 14 Nov 2023 14:43:59 +0100 Subject: [PATCH 35/49] feat: added tests for currency conversion --- .../custom_fields/currency_conversion.py | 7 + .../custom_fields/test_currency_conversion.py | 2189 ++++++++++++++++- 2 files changed, 2185 insertions(+), 11 deletions(-) diff --git a/direct_indexing/custom_fields/currency_conversion.py b/direct_indexing/custom_fields/currency_conversion.py index f7d3877f8..b854160e6 100644 --- a/direct_indexing/custom_fields/currency_conversion.py +++ b/direct_indexing/custom_fields/currency_conversion.py @@ -38,6 +38,9 @@ def currency_conversion(data, currencies): def convert_currencies_from_list(data, field, currencies, default_currency, value, rate, first_currency, t_type, curr_convert): + if field not in data: + return value, rate, first_currency, t_type + for item in data[field]: c_value, c_rate, currency = convert(item, currencies, default_currency=default_currency, @@ -56,6 +59,9 @@ def convert_currencies_from_list(data, field, currencies, default_currency, valu def convert_currencies_from_dict(data, field, currencies, default_currency, value, rate, t_type, curr_convert): + if field not in data: + return value, rate, "", t_type + c_value, c_rate, first_currency = convert(data[field], currencies, default_currency=default_currency, target_currency=curr_convert) @@ -123,6 +129,7 @@ def get_ym(data): now = datetime.datetime.now() if year > now.year: year = now.year + month = now.month if year == now.year and month > now.month: month = now.month diff --git a/tests/direct_indexing/custom_fields/test_currency_conversion.py b/tests/direct_indexing/custom_fields/test_currency_conversion.py index 7ed5729ca..a30d4185c 100644 --- a/tests/direct_indexing/custom_fields/test_currency_conversion.py +++ b/tests/direct_indexing/custom_fields/test_currency_conversion.py @@ -1,25 +1,2192 @@ -# TODO +from datetime import datetime +import pytest -def test_currency_conversion(): - assert True +from direct_indexing.custom_fields.currency_conversion import ( + convert, convert_currencies_from_dict, convert_currencies_from_list, currency_conversion, get_ym, + save_converted_value_to_data +) +from direct_indexing.custom_fields.models import currencies -def test_convert_currencies_from_list(): - assert True +def test_currency_conversion(mocker, fixture_currencies): + ret = (None, None, None, None) + mock_convert_list = mocker.patch('direct_indexing.custom_fields.currency_conversion.convert_currencies_from_list', return_value=ret) # NOQA: 501 + mock_convert_dict = mocker.patch('direct_indexing.custom_fields.currency_conversion.convert_currencies_from_dict', return_value=ret) # NOQA: 501 + mock_save = mocker.patch('direct_indexing.custom_fields.currency_conversion.save_converted_value_to_data') + # Test nothing changes in the data if no currencies are provided + res = currency_conversion({}, fixture_currencies) + mock_convert_list.assert_not_called() + mock_convert_dict.assert_not_called() + mock_save.assert_not_called() + assert res == {} -def test_convert_currencies_from_dict(): - assert True + # Test the convert_currencies_from_list is called twice when data contains a budget + data = {'budget': []} + mock_save.return_value = data + data = currency_conversion(data, fixture_currencies) + assert mock_convert_list.call_count == 2 + mock_convert_dict.assert_not_called() + assert mock_save.call_count == 2 + # Test the convert_currencies_from_dict is called twice when data contains a transaction + mock_convert_list.reset_mock(), mock_convert_dict.reset_mock(), mock_save.reset_mock() + data = {'transaction': {}} + mock_save.return_value = data + data = currency_conversion(data, fixture_currencies) + mock_convert_list.assert_not_called() + assert mock_convert_dict.call_count == 2 + assert mock_save.call_count == 2 -def test_convert(): - assert True + # Test the convert_currencies_from_dict is called twice when data contains a transaction, and list when data contains a budget or planned-disbursement # NOQA: 501 + mock_convert_list.reset_mock(), mock_convert_dict.reset_mock(), mock_save.reset_mock() + data = {'transaction': {}, 'budget': [], 'planned-disbursement': []} + mock_save.return_value = data + data = currency_conversion(data, fixture_currencies) + assert mock_convert_list.call_count == 4 + assert mock_convert_dict.call_count == 2 + assert mock_save.call_count == 6 + + # Test the default currency is passed to convert_from_dict when provided + mock_convert_list.reset_mock(), mock_convert_dict.reset_mock(), mock_save.reset_mock() + data = {'default-currency': "KRR", 'transaction': {}} + mock_save.return_value = data + data = currency_conversion(data, fixture_currencies) + assert mock_convert_dict.call_count == 2 + # assert the 4th passed argument in call_args is the default currency + assert mock_convert_dict.call_args[0][3] == "KRR" + + +def test_convert_currencies_from_list(mocker, fixture_currencies): + # convert result for convert({value: 1, 'value.currency': 'EUR'}, currencies, 'USD', 'USD') + convert_ex_res = (1.0705273292815591, 0.9341190763164442, 'EUR') # Values based on fixture_currencies euro to gbp + # mock convert to return convert ex res + mock_convert = mocker.patch('direct_indexing.custom_fields.currency_conversion.convert', return_value=convert_ex_res) # NOQA: 501 + data = {"budget": [{}]} + field = "budget" + currencies = fixture_currencies + default_currency = None + curr_convert = 'USD' + + ex_res = ([convert_ex_res[0]], [convert_ex_res[1]], convert_ex_res[2], []) + assert convert_currencies_from_list(data, field, currencies, default_currency, [], [], "", [], curr_convert) == ex_res # NOQA: 501 + mock_convert.assert_called_once() + + # Test that if the field is transaction, the transaction type code is added to t_type + field = "transaction" + data = {"transaction": [{"transaction-type": {"code": "11"}}]} + ex_res = ([convert_ex_res[0]], [convert_ex_res[1]], convert_ex_res[2], ["11"]) + assert convert_currencies_from_list(data, field, currencies, default_currency, [], [], "", [], curr_convert) == ex_res # NOQA: 501 + + # Test that if no data is provided it returns empty lists + assert convert_currencies_from_list({}, field, currencies, default_currency, [], [], "", [], curr_convert) == ([], [], "", []) # NOQA: 501 + + +def test_convert_currencies_from_dict(mocker, fixture_currencies): + # convert result for convert({value: 1, 'value.currency': 'EUR'}, currencies, 'USD', 'USD') + convert_ex_res = (1.0705273292815591, 0.9341190763164442, 'EUR') # Values based on fixture_currencies euro to gbp + # mock convert to return convert ex res + mock_convert = mocker.patch('direct_indexing.custom_fields.currency_conversion.convert', return_value=convert_ex_res) # NOQA: 501 + data = {"budget": {}} + field = "budget" + currencies = fixture_currencies + default_currency = None + curr_convert = 'USD' + + ex_res = ([convert_ex_res[0]], [convert_ex_res[1]], convert_ex_res[2], []) + assert convert_currencies_from_dict(data, field, currencies, default_currency, [], [], [], curr_convert) == ex_res + mock_convert.assert_called_once() + + # Test that if the field is transaction, the transaction type code is added to t_type + field = "transaction" + data = {"transaction": {"transaction-type": {"code": "11"}}} + ex_res = ([convert_ex_res[0]], [convert_ex_res[1]], convert_ex_res[2], ["11"]) + assert convert_currencies_from_dict(data, field, currencies, default_currency, [], [], [], curr_convert) == ex_res + + # Test that if no data is provided it returns empty lists + assert convert_currencies_from_dict({}, field, currencies, default_currency, [], [], [], curr_convert) == ([], [], "", []) # NOQA: 501 + + +def test_convert(mocker, fixture_currencies): + # Sample usage convert(data[field], currencies, default_currency=default_currency, target_currency=curr_convert) + currencies = fixture_currencies + default_currency = None + target_currency = 'USD' + value = 'value' + + empty_res = (None, None, None) + # Test that if no value is provided we return None + assert convert({}, currencies, default_currency, target_currency) == empty_res + + # Test that if no currency nor default currency is provided we return None + assert convert({value: 1}, currencies, default_currency, target_currency) == empty_res + + # Test that if get_ym returns None, this function returns None + mock_ym = mocker.patch('direct_indexing.custom_fields.currency_conversion.get_ym') + mock_ym.return_value = (None, None) + assert convert({value: 1}, currencies, 'EUR', target_currency) == empty_res + + # Test that if a value.currency is provided, it returns the correct converted value, rate and currency + mock_ym.return_value = (2023, 3) + ex_res = (1.0705273292815591, 0.9341190763164442, 'EUR') # Values based on fixture_currencies euro to gbp + assert convert({value: 1, 'value.currency': 'EUR'}, currencies, default_currency, target_currency) == ex_res + + # Test that if a default currency is provided, it returns the correct converted value, rate and currency + assert convert({value: 1}, currencies, 'EUR', target_currency) == ex_res + + # Test that if a different value.currency to default currency is provided, it uses the value.currency + assert convert({value: 1, 'value.currency': 'EUR'}, currencies, 'CAD', target_currency) == ex_res def test_get_ym(): - assert True + now = datetime.now() + vvd = 'value.value-date' + sample_date = '2019-03-31T00:00:00Z' + malformed_date = '31-03-2019' + future_year = '9999-03-31T00:00:00Z' + + future_month = f'{now.year}-{now.month + 1}-31T00:00:00Z' + empty_res = (None, None) + + # Test that if no date is provided, it returns None + assert get_ym({}) == empty_res + assert get_ym({vvd: ''}) == empty_res + assert get_ym({vvd: malformed_date}) == empty_res + + assert get_ym({vvd: sample_date}) == (2019, 3) + # Assert that if the year is in the future + assert get_ym({vvd: future_year}) == (now.year, now.month) + # Assert that if the month is in the future + assert get_ym({vvd: future_month}) == (now.year, now.month) def test_save_converted_value_to_data(): - assert True + # Sample usage: save_converted_value_to_data(data, value, field, rate, first_currency, t_type, curr_convert) + value = [1, 2] + field = 'transaction' + rate = '1.2' + first_currency = 'EUR' + t_type = '1' + curr_convert = 'GBP' + expected_res = { + 'transaction.value-GBP': value, + 'transaction.value-GBP.sum': sum(value), + 'transaction.value-GBP.conversion-rate': rate, + 'transaction.value-GBP.conversion-currency': first_currency, + 'transaction.value-GBP-type': t_type, + } + assert save_converted_value_to_data({}, value, field, rate, first_currency, t_type, curr_convert) == expected_res + + +@pytest.fixture +def fixture_currencies(monkeypatch): + data = [ + { + "year": 2023, + "month": 3, + "currency_id": "DZD", + "value": 0.0055024487 + }, + { + "year": 2023, + "month": 3, + "currency_id": "AUD", + "value": 0.5004460909 + }, + { + "year": 2023, + "month": 3, + "currency_id": "BRL", + "value": 0.1435369091 + }, + { + "year": 2023, + "month": 3, + "currency_id": "BND", + "value": 0.5580111364 + }, + { + "year": 2023, + "month": 3, + "currency_id": "CAD", + "value": 0.5469695455 + }, + { + "year": 2023, + "month": 3, + "currency_id": "CLP", + "value": 0.0009245674 + }, + { + "year": 2023, + "month": 3, + "currency_id": "CNY", + "value": 0.1084970435 + }, + { + "year": 2023, + "month": 3, + "currency_id": "CZK", + "value": 0.0338277913 + }, + { + "year": 2023, + "month": 3, + "currency_id": "DKK", + "value": 0.1075872609 + }, + { + "year": 2023, + "month": 3, + "currency_id": "EUR", + "value": 0.8010585652 + }, + { + "year": 2023, + "month": 3, + "currency_id": "INR", + "value": 0.009096549 + }, + { + "year": 2023, + "month": 3, + "currency_id": "ILS", + "value": 0.2065905714 + }, + { + "year": 2023, + "month": 3, + "currency_id": "JPY", + "value": 0.0055926118 + }, + { + "year": 2023, + "month": 3, + "currency_id": "KWD", + "value": 2.4406338889 + }, + { + "year": 2023, + "month": 3, + "currency_id": "MYR", + "value": 0.167557 + }, + { + "year": 2023, + "month": 3, + "currency_id": "MUR", + "value": 0.0160274591 + }, + { + "year": 2023, + "month": 3, + "currency_id": "MXN", + "value": 0.0407320857 + }, + { + "year": 2023, + "month": 3, + "currency_id": "NZD", + "value": 0.4642263478 + }, + { + "year": 2023, + "month": 3, + "currency_id": "NOK", + "value": 0.070986187 + }, + { + "year": 2023, + "month": 3, + "currency_id": "OMR", + "value": 1.9457838889 + }, + { + "year": 2023, + "month": 3, + "currency_id": "PEN", + "value": 0.1980674545 + }, + { + "year": 2023, + "month": 3, + "currency_id": "PHP", + "value": 0.0136560478 + }, + { + "year": 2023, + "month": 3, + "currency_id": "PLN", + "value": 0.1707347143 + }, + { + "year": 2023, + "month": 3, + "currency_id": "QAR", + "value": 0.2055417778 + }, + { + "year": 2023, + "month": 3, + "currency_id": "RUB", + "value": 0.0098281315 + }, + { + "year": 2023, + "month": 3, + "currency_id": "SGD", + "value": 0.5580978696 + }, + { + "year": 2023, + "month": 3, + "currency_id": "ZAR", + "value": 0.0409649182 + }, + { + "year": 2023, + "month": 3, + "currency_id": "SEK", + "value": 0.0714488217 + }, + { + "year": 2023, + "month": 3, + "currency_id": "CHF", + "value": 0.8084539565 + }, + { + "year": 2023, + "month": 3, + "currency_id": "THB", + "value": 0.0216839455 + }, + { + "year": 2023, + "month": 3, + "currency_id": "TTD", + "value": 0.1108243333 + }, + { + "year": 2023, + "month": 3, + "currency_id": "AED", + "value": 0.2036608235 + }, + { + "year": 2023, + "month": 3, + "currency_id": "GBP", + "value": 0.9081411304 + }, + { + "year": 2023, + "month": 3, + "currency_id": "USD", + "value": 0.748284087 + }, + { + "year": 2023, + "month": 3, + "currency_id": "UYU", + "value": 0.0191300048 + }, + { + "year": 2023, + "month": 3, + "currency_id": "BWP", + "value": 0.0564973381 + }, + { + "year": 2023, + "month": 3, + "currency_id": "KRW", + "value": 0.0005730496 + }, + { + "year": 2023, + "month": 3, + "currency_id": "SAR", + "value": 0.1994885882 + }, + { + "year": 2023, + "month": 3, + "currency_id": "KZT", + "value": 0.00164567 + }, + { + "year": 2023, + "month": 3, + "currency_id": "PKR", + "value": 0.00261976 + }, + { + "year": 2023, + "month": 3, + "currency_id": "LKR", + "value": 0.00227131 + }, + { + "year": 2023, + "month": 4, + "currency_id": "DZD", + "value": 0.0054729772 + }, + { + "year": 2023, + "month": 4, + "currency_id": "AUD", + "value": 0.4962634706 + }, + { + "year": 2023, + "month": 4, + "currency_id": "BWP", + "value": 0.0563683588 + }, + { + "year": 2023, + "month": 4, + "currency_id": "BRL", + "value": 0.1477044444 + }, + { + "year": 2023, + "month": 4, + "currency_id": "BND", + "value": 0.5565637222 + }, + { + "year": 2023, + "month": 4, + "currency_id": "CAD", + "value": 0.5496927778 + }, + { + "year": 2023, + "month": 4, + "currency_id": "CLP", + "value": 0.0009223087 + }, + { + "year": 2023, + "month": 4, + "currency_id": "CNY", + "value": 0.1076211053 + }, + { + "year": 2023, + "month": 4, + "currency_id": "CZK", + "value": 0.0346886611 + }, + { + "year": 2023, + "month": 4, + "currency_id": "DKK", + "value": 0.1091274706 + }, + { + "year": 2023, + "month": 4, + "currency_id": "EUR", + "value": 0.8129906111 + }, + { + "year": 2023, + "month": 4, + "currency_id": "INR", + "value": 0.0090398547 + }, + { + "year": 2023, + "month": 4, + "currency_id": "ILS", + "value": 0.2037545714 + }, + { + "year": 2023, + "month": 4, + "currency_id": "JPY", + "value": 0.005560194 + }, + { + "year": 2023, + "month": 4, + "currency_id": "KRW", + "value": 0.0005616467 + }, + { + "year": 2023, + "month": 4, + "currency_id": "KWD", + "value": 2.4214066667 + }, + { + "year": 2023, + "month": 4, + "currency_id": "MYR", + "value": 0.1676217222 + }, + { + "year": 2023, + "month": 4, + "currency_id": "MUR", + "value": 0.016357335 + }, + { + "year": 2023, + "month": 4, + "currency_id": "MXN", + "value": 0.0409774647 + }, + { + "year": 2023, + "month": 4, + "currency_id": "NZD", + "value": 0.4605645882 + }, + { + "year": 2023, + "month": 4, + "currency_id": "NOK", + "value": 0.0705617588 + }, + { + "year": 2023, + "month": 4, + "currency_id": "OMR", + "value": 1.92819 + }, + { + "year": 2023, + "month": 4, + "currency_id": "PEN", + "value": 0.1970261111 + }, + { + "year": 2023, + "month": 4, + "currency_id": "PHP", + "value": 0.0134014063 + }, + { + "year": 2023, + "month": 4, + "currency_id": "PLN", + "value": 0.1754046111 + }, + { + "year": 2023, + "month": 4, + "currency_id": "QAR", + "value": 0.2038464 + }, + { + "year": 2023, + "month": 4, + "currency_id": "RUB", + "value": 0.00913358 + }, + { + "year": 2023, + "month": 4, + "currency_id": "SAR", + "value": 0.1977257273 + }, + { + "year": 2023, + "month": 4, + "currency_id": "SGD", + "value": 0.5565551579 + }, + { + "year": 2023, + "month": 4, + "currency_id": "ZAR", + "value": 0.0407851294 + }, + { + "year": 2023, + "month": 4, + "currency_id": "SEK", + "value": 0.0717207444 + }, + { + "year": 2023, + "month": 4, + "currency_id": "CHF", + "value": 0.8259735556 + }, + { + "year": 2023, + "month": 4, + "currency_id": "THB", + "value": 0.0216301824 + }, + { + "year": 2023, + "month": 4, + "currency_id": "TTD", + "value": 0.1097627647 + }, + { + "year": 2023, + "month": 4, + "currency_id": "AED", + "value": 0.201902875 + }, + { + "year": 2023, + "month": 4, + "currency_id": "GBP", + "value": 0.9228205556 + }, + { + "year": 2023, + "month": 4, + "currency_id": "USD", + "value": 0.74132105 + }, + { + "year": 2023, + "month": 4, + "currency_id": "UYU", + "value": 0.0191143824 + }, + { + "year": 2023, + "month": 4, + "currency_id": "BHD", + "value": 1.97401 + }, + { + "year": 2023, + "month": 4, + "currency_id": "COP", + "value": 0.000159511 + }, + { + "year": 2023, + "month": 4, + "currency_id": "HUF", + "value": 0.00218767 + }, + { + "year": 2023, + "month": 4, + "currency_id": "ISK", + "value": 0.00546757 + }, + { + "year": 2023, + "month": 4, + "currency_id": "IDR", + "value": 5.03278e-05 + }, + { + "year": 2023, + "month": 4, + "currency_id": "IRR", + "value": 2.9695e-06 + }, + { + "year": 2023, + "month": 4, + "currency_id": "KZT", + "value": 0.00163741 + }, + { + "year": 2023, + "month": 4, + "currency_id": "LYD", + "value": 0.155499 + }, + { + "year": 2023, + "month": 4, + "currency_id": "NPR", + "value": 0.0056701 + }, + { + "year": 2023, + "month": 4, + "currency_id": "PKR", + "value": 0.00261554 + }, + { + "year": 2023, + "month": 4, + "currency_id": "LKR", + "value": 0.00230806 + }, + { + "year": 2023, + "month": 4, + "currency_id": "TND", + "value": 0.24411 + }, + { + "year": 2023, + "month": 5, + "currency_id": "AUD", + "value": 0.4962168182 + }, + { + "year": 2023, + "month": 5, + "currency_id": "CAD", + "value": 0.5515419048 + }, + { + "year": 2023, + "month": 5, + "currency_id": "ILS", + "value": 0.2036497143 + }, + { + "year": 2023, + "month": 5, + "currency_id": "JPY", + "value": 0.0054415732 + }, + { + "year": 2023, + "month": 5, + "currency_id": "KWD", + "value": 2.4304116667 + }, + { + "year": 2023, + "month": 5, + "currency_id": "NZD", + "value": 0.4640680909 + }, + { + "year": 2023, + "month": 5, + "currency_id": "OMR", + "value": 1.9390566667 + }, + { + "year": 2023, + "month": 5, + "currency_id": "QAR", + "value": 0.2048260556 + }, + { + "year": 2023, + "month": 5, + "currency_id": "SAR", + "value": 0.1988177778 + }, + { + "year": 2023, + "month": 5, + "currency_id": "TTD", + "value": 0.1103666667 + }, + { + "year": 2023, + "month": 5, + "currency_id": "AED", + "value": 0.2030133333 + }, + { + "year": 2023, + "month": 5, + "currency_id": "USD", + "value": 0.7456007727 + }, + { + "year": 2023, + "month": 5, + "currency_id": "DZD", + "value": 0.0054857624 + }, + { + "year": 2023, + "month": 5, + "currency_id": "BWP", + "value": 0.05536733 + }, + { + "year": 2023, + "month": 5, + "currency_id": "BRL", + "value": 0.1497138571 + }, + { + "year": 2023, + "month": 5, + "currency_id": "BND", + "value": 0.5570951429 + }, + { + "year": 2023, + "month": 5, + "currency_id": "CLP", + "value": 0.00093417 + }, + { + "year": 2023, + "month": 5, + "currency_id": "CZK", + "value": 0.034354895 + }, + { + "year": 2023, + "month": 5, + "currency_id": "DKK", + "value": 0.1088746667 + }, + { + "year": 2023, + "month": 5, + "currency_id": "EUR", + "value": 0.8109615238 + }, + { + "year": 2023, + "month": 5, + "currency_id": "INR", + "value": 0.00906147 + }, + { + "year": 2023, + "month": 5, + "currency_id": "KRW", + "value": 0.0005617361 + }, + { + "year": 2023, + "month": 5, + "currency_id": "MUR", + "value": 0.0163771571 + }, + { + "year": 2023, + "month": 5, + "currency_id": "MXN", + "value": 0.0420284571 + }, + { + "year": 2023, + "month": 5, + "currency_id": "NOK", + "value": 0.0692027316 + }, + { + "year": 2023, + "month": 5, + "currency_id": "PEN", + "value": 0.202329 + }, + { + "year": 2023, + "month": 5, + "currency_id": "PHP", + "value": 0.0133836476 + }, + { + "year": 2023, + "month": 5, + "currency_id": "PLN", + "value": 0.1787335 + }, + { + "year": 2023, + "month": 5, + "currency_id": "RUB", + "value": 0.0094186205 + }, + { + "year": 2023, + "month": 5, + "currency_id": "SGD", + "value": 0.5570951429 + }, + { + "year": 2023, + "month": 5, + "currency_id": "ZAR", + "value": 0.0392001571 + }, + { + "year": 2023, + "month": 5, + "currency_id": "SEK", + "value": 0.071512965 + }, + { + "year": 2023, + "month": 5, + "currency_id": "CHF", + "value": 0.83149215 + }, + { + "year": 2023, + "month": 5, + "currency_id": "THB", + "value": 0.0218094421 + }, + { + "year": 2023, + "month": 5, + "currency_id": "GBP", + "value": 0.93116545 + }, + { + "year": 2023, + "month": 5, + "currency_id": "UYU", + "value": 0.019186505 + }, + { + "year": 2023, + "month": 5, + "currency_id": "CNY", + "value": 0.1066788421 + }, + { + "year": 2023, + "month": 5, + "currency_id": "MYR", + "value": 0.1648854706 + }, + { + "year": 2023, + "month": 5, + "currency_id": "KZT", + "value": 0.00168102 + }, + { + "year": 2023, + "month": 5, + "currency_id": "PKR", + "value": 0.00263966 + }, + { + "year": 2023, + "month": 5, + "currency_id": "LKR", + "value": 0.00254911 + }, + { + "year": 2023, + "month": 6, + "currency_id": "DZD", + "value": 0.0055142767 + }, + { + "year": 2023, + "month": 6, + "currency_id": "AUD", + "value": 0.5022857895 + }, + { + "year": 2023, + "month": 6, + "currency_id": "BWP", + "value": 0.05569348 + }, + { + "year": 2023, + "month": 6, + "currency_id": "BRL", + "value": 0.15451 + }, + { + "year": 2023, + "month": 6, + "currency_id": "BND", + "value": 0.557055875 + }, + { + "year": 2023, + "month": 6, + "currency_id": "CAD", + "value": 0.5641984 + }, + { + "year": 2023, + "month": 6, + "currency_id": "CLP", + "value": 0.0009364994 + }, + { + "year": 2023, + "month": 6, + "currency_id": "CNY", + "value": 0.1047169474 + }, + { + "year": 2023, + "month": 6, + "currency_id": "CZK", + "value": 0.034299745 + }, + { + "year": 2023, + "month": 6, + "currency_id": "DKK", + "value": 0.1091807895 + }, + { + "year": 2023, + "month": 6, + "currency_id": "EUR", + "value": 0.8124538571 + }, + { + "year": 2023, + "month": 6, + "currency_id": "INR", + "value": 0.0091159728 + }, + { + "year": 2023, + "month": 6, + "currency_id": "ILS", + "value": 0.20559615 + }, + { + "year": 2023, + "month": 6, + "currency_id": "JPY", + "value": 0.005312709 + }, + { + "year": 2023, + "month": 6, + "currency_id": "KRW", + "value": 0.0005778121 + }, + { + "year": 2023, + "month": 6, + "currency_id": "KWD", + "value": 2.4416108333 + }, + { + "year": 2023, + "month": 6, + "currency_id": "MYR", + "value": 0.1616965556 + }, + { + "year": 2023, + "month": 6, + "currency_id": "MUR", + "value": 0.0163719105 + }, + { + "year": 2023, + "month": 6, + "currency_id": "MXN", + "value": 0.043475255 + }, + { + "year": 2023, + "month": 6, + "currency_id": "NZD", + "value": 0.4594601053 + }, + { + "year": 2023, + "month": 6, + "currency_id": "NOK", + "value": 0.069311425 + }, + { + "year": 2023, + "month": 6, + "currency_id": "OMR", + "value": 1.9510592857 + }, + { + "year": 2023, + "month": 6, + "currency_id": "PEN", + "value": 0.2054745294 + }, + { + "year": 2023, + "month": 6, + "currency_id": "PHP", + "value": 0.0134161833 + }, + { + "year": 2023, + "month": 6, + "currency_id": "PLN", + "value": 0.1821163684 + }, + { + "year": 2023, + "month": 6, + "currency_id": "QAR", + "value": 0.2056876 + }, + { + "year": 2023, + "month": 6, + "currency_id": "RUB", + "value": 0.0089449978 + }, + { + "year": 2023, + "month": 6, + "currency_id": "SAR", + "value": 0.2000289167 + }, + { + "year": 2023, + "month": 6, + "currency_id": "SGD", + "value": 0.5567785 + }, + { + "year": 2023, + "month": 6, + "currency_id": "ZAR", + "value": 0.0399219526 + }, + { + "year": 2023, + "month": 6, + "currency_id": "SEK", + "value": 0.0696344389 + }, + { + "year": 2023, + "month": 6, + "currency_id": "CHF", + "value": 0.8321152381 + }, + { + "year": 2023, + "month": 6, + "currency_id": "THB", + "value": 0.0214739722 + }, + { + "year": 2023, + "month": 6, + "currency_id": "TTD", + "value": 0.1110543333 + }, + { + "year": 2023, + "month": 6, + "currency_id": "AED", + "value": 0.2041703333 + }, + { + "year": 2023, + "month": 6, + "currency_id": "GBP", + "value": 0.9459971905 + }, + { + "year": 2023, + "month": 6, + "currency_id": "USD", + "value": 0.749791 + }, + { + "year": 2023, + "month": 6, + "currency_id": "UYU", + "value": 0.019643545 + }, + { + "year": 2023, + "month": 6, + "currency_id": "PKR", + "value": 0.00261539 + }, + { + "year": 2023, + "month": 6, + "currency_id": "KZT", + "value": 0.00166148 + }, + { + "year": 2023, + "month": 6, + "currency_id": "LKR", + "value": 0.00243447 + }, + { + "year": 2023, + "month": 7, + "currency_id": "AUD", + "value": 0.5015182632 + }, + { + "year": 2023, + "month": 7, + "currency_id": "BWP", + "value": 0.0563089412 + }, + { + "year": 2023, + "month": 7, + "currency_id": "BRL", + "value": 0.1548863684 + }, + { + "year": 2023, + "month": 7, + "currency_id": "BND", + "value": 0.5577022778 + }, + { + "year": 2023, + "month": 7, + "currency_id": "CAD", + "value": 0.5626279474 + }, + { + "year": 2023, + "month": 7, + "currency_id": "CLP", + "value": 0.000912668 + }, + { + "year": 2023, + "month": 7, + "currency_id": "CNY", + "value": 0.1034731053 + }, + { + "year": 2023, + "month": 7, + "currency_id": "DKK", + "value": 0.1105018947 + }, + { + "year": 2023, + "month": 7, + "currency_id": "EUR", + "value": 0.8233651579 + }, + { + "year": 2023, + "month": 7, + "currency_id": "INR", + "value": 0.0090472889 + }, + { + "year": 2023, + "month": 7, + "currency_id": "ILS", + "value": 0.2029819444 + }, + { + "year": 2023, + "month": 7, + "currency_id": "JPY", + "value": 0.0052815311 + }, + { + "year": 2023, + "month": 7, + "currency_id": "KRW", + "value": 0.0005793915 + }, + { + "year": 2023, + "month": 7, + "currency_id": "KWD", + "value": 2.42472 + }, + { + "year": 2023, + "month": 7, + "currency_id": "MYR", + "value": 0.1620822222 + }, + { + "year": 2023, + "month": 7, + "currency_id": "MUR", + "value": 0.0162544158 + }, + { + "year": 2023, + "month": 7, + "currency_id": "MXN", + "value": 0.0440186526 + }, + { + "year": 2023, + "month": 7, + "currency_id": "NZD", + "value": 0.4633512222 + }, + { + "year": 2023, + "month": 7, + "currency_id": "NOK", + "value": 0.0727839421 + }, + { + "year": 2023, + "month": 7, + "currency_id": "OMR", + "value": 1.9345584615 + }, + { + "year": 2023, + "month": 7, + "currency_id": "PEN", + "value": 0.2069064706 + }, + { + "year": 2023, + "month": 7, + "currency_id": "PHP", + "value": 0.0135471056 + }, + { + "year": 2023, + "month": 7, + "currency_id": "PLN", + "value": 0.1852460526 + }, + { + "year": 2023, + "month": 7, + "currency_id": "QAR", + "value": 0.204225 + }, + { + "year": 2023, + "month": 7, + "currency_id": "RUB", + "value": 0.0081926668 + }, + { + "year": 2023, + "month": 7, + "currency_id": "SAR", + "value": 0.1982345333 + }, + { + "year": 2023, + "month": 7, + "currency_id": "SGD", + "value": 0.5577778947 + }, + { + "year": 2023, + "month": 7, + "currency_id": "ZAR", + "value": 0.0410038842 + }, + { + "year": 2023, + "month": 7, + "currency_id": "SEK", + "value": 0.0708933526 + }, + { + "year": 2023, + "month": 7, + "currency_id": "CHF", + "value": 0.8534652105 + }, + { + "year": 2023, + "month": 7, + "currency_id": "THB", + "value": 0.0215029944 + }, + { + "year": 2023, + "month": 7, + "currency_id": "TTD", + "value": 0.1101218947 + }, + { + "year": 2023, + "month": 7, + "currency_id": "AED", + "value": 0.202471 + }, + { + "year": 2023, + "month": 7, + "currency_id": "GBP", + "value": 0.9592823684 + }, + { + "year": 2023, + "month": 7, + "currency_id": "USD", + "value": 0.7434777368 + }, + { + "year": 2023, + "month": 7, + "currency_id": "UYU", + "value": 0.0196081444 + }, + { + "year": 2023, + "month": 7, + "currency_id": "DZD", + "value": 0.0055074706 + }, + { + "year": 2023, + "month": 7, + "currency_id": "CZK", + "value": 0.0344431059 + }, + { + "year": 2023, + "month": 7, + "currency_id": "KZT", + "value": 0.00166933 + }, + { + "year": 2023, + "month": 7, + "currency_id": "PKR", + "value": 0.00259571 + }, + { + "year": 2023, + "month": 7, + "currency_id": "LKR", + "value": 0.00226088 + }, + { + "year": 2023, + "month": 8, + "currency_id": "DZD", + "value": 0.0055146809 + }, + { + "year": 2023, + "month": 8, + "currency_id": "AUD", + "value": 0.4868466364 + }, + { + "year": 2023, + "month": 8, + "currency_id": "BWP", + "value": 0.055647587 + }, + { + "year": 2023, + "month": 8, + "currency_id": "BRL", + "value": 0.1530352174 + }, + { + "year": 2023, + "month": 8, + "currency_id": "BND", + "value": 0.5556082273 + }, + { + "year": 2023, + "month": 8, + "currency_id": "CAD", + "value": 0.5564543182 + }, + { + "year": 2023, + "month": 8, + "currency_id": "CLP", + "value": 0.0008768502 + }, + { + "year": 2023, + "month": 8, + "currency_id": "CNY", + "value": 0.1034427826 + }, + { + "year": 2023, + "month": 8, + "currency_id": "CZK", + "value": 0.0339419087 + }, + { + "year": 2023, + "month": 8, + "currency_id": "DKK", + "value": 0.1098225652 + }, + { + "year": 2023, + "month": 8, + "currency_id": "EUR", + "value": 0.818413087 + }, + { + "year": 2023, + "month": 8, + "currency_id": "INR", + "value": 0.0090619629 + }, + { + "year": 2023, + "month": 8, + "currency_id": "ILS", + "value": 0.200313 + }, + { + "year": 2023, + "month": 8, + "currency_id": "JPY", + "value": 0.0051832405 + }, + { + "year": 2023, + "month": 8, + "currency_id": "KRW", + "value": 0.0005691091 + }, + { + "year": 2023, + "month": 8, + "currency_id": "KWD", + "value": 2.4387352632 + }, + { + "year": 2023, + "month": 8, + "currency_id": "MYR", + "value": 0.1628059545 + }, + { + "year": 2023, + "month": 8, + "currency_id": "MUR", + "value": 0.0164496174 + }, + { + "year": 2023, + "month": 8, + "currency_id": "MXN", + "value": 0.0441962565 + }, + { + "year": 2023, + "month": 8, + "currency_id": "NZD", + "value": 0.4501691739 + }, + { + "year": 2023, + "month": 8, + "currency_id": "NOK", + "value": 0.071726713 + }, + { + "year": 2023, + "month": 8, + "currency_id": "OMR", + "value": 1.9510936842 + }, + { + "year": 2023, + "month": 8, + "currency_id": "PEN", + "value": 0.2031505 + }, + { + "year": 2023, + "month": 8, + "currency_id": "PHP", + "value": 0.0133565381 + }, + { + "year": 2023, + "month": 8, + "currency_id": "PLN", + "value": 0.1835320909 + }, + { + "year": 2023, + "month": 8, + "currency_id": "QAR", + "value": 0.2060975263 + }, + { + "year": 2023, + "month": 8, + "currency_id": "RUB", + "value": 0.00786002 + }, + { + "year": 2023, + "month": 8, + "currency_id": "SAR", + "value": 0.2000377778 + }, + { + "year": 2023, + "month": 8, + "currency_id": "SGD", + "value": 0.5556082273 + }, + { + "year": 2023, + "month": 8, + "currency_id": "ZAR", + "value": 0.0400104545 + }, + { + "year": 2023, + "month": 8, + "currency_id": "SEK", + "value": 0.0693934045 + }, + { + "year": 2023, + "month": 8, + "currency_id": "AED", + "value": 0.2042737368 + }, + { + "year": 2023, + "month": 8, + "currency_id": "GBP", + "value": 0.953221 + }, + { + "year": 2023, + "month": 8, + "currency_id": "USD", + "value": 0.750241087 + }, + { + "year": 2023, + "month": 8, + "currency_id": "UYU", + "value": 0.0198174182 + }, + { + "year": 2023, + "month": 8, + "currency_id": "CHF", + "value": 0.8538547727 + }, + { + "year": 2023, + "month": 8, + "currency_id": "THB", + "value": 0.0214139476 + }, + { + "year": 2023, + "month": 8, + "currency_id": "TTD", + "value": 0.1112321429 + }, + { + "year": 2023, + "month": 8, + "currency_id": "KZT", + "value": 0.0016357 + }, + { + "year": 2023, + "month": 8, + "currency_id": "PKR", + "value": 0.00246031 + }, + { + "year": 2023, + "month": 8, + "currency_id": "LKR", + "value": 0.00233033 + }, + { + "year": 2023, + "month": 9, + "currency_id": "DZD", + "value": 0.00553097 + }, + { + "year": 2023, + "month": 9, + "currency_id": "AUD", + "value": 0.48688025 + }, + { + "year": 2023, + "month": 9, + "currency_id": "BWP", + "value": 0.055502375 + }, + { + "year": 2023, + "month": 9, + "currency_id": "BRL", + "value": 0.1535732105 + }, + { + "year": 2023, + "month": 9, + "currency_id": "CAD", + "value": 0.5599346316 + }, + { + "year": 2023, + "month": 9, + "currency_id": "CLP", + "value": 0.0008556479 + }, + { + "year": 2023, + "month": 9, + "currency_id": "CNY", + "value": 0.1038478421 + }, + { + "year": 2023, + "month": 9, + "currency_id": "DKK", + "value": 0.10856035 + }, + { + "year": 2023, + "month": 9, + "currency_id": "EUR", + "value": 0.80951055 + }, + { + "year": 2023, + "month": 9, + "currency_id": "INR", + "value": 0.0091232659 + }, + { + "year": 2023, + "month": 9, + "currency_id": "ILS", + "value": 0.1983924444 + }, + { + "year": 2023, + "month": 9, + "currency_id": "JPY", + "value": 0.0051312216 + }, + { + "year": 2023, + "month": 9, + "currency_id": "KRW", + "value": 0.0005697273 + }, + { + "year": 2023, + "month": 9, + "currency_id": "MYR", + "value": 0.1619180526 + }, + { + "year": 2023, + "month": 9, + "currency_id": "MUR", + "value": 0.0167957579 + }, + { + "year": 2023, + "month": 9, + "currency_id": "MXN", + "value": 0.04379213 + }, + { + "year": 2023, + "month": 9, + "currency_id": "NZD", + "value": 0.44912815 + }, + { + "year": 2023, + "month": 9, + "currency_id": "NOK", + "value": 0.070701 + }, + { + "year": 2023, + "month": 9, + "currency_id": "PEN", + "value": 0.2032994211 + }, + { + "year": 2023, + "month": 9, + "currency_id": "PLN", + "value": 0.17586925 + }, + { + "year": 2023, + "month": 9, + "currency_id": "RUB", + "value": 0.0078457025 + }, + { + "year": 2023, + "month": 9, + "currency_id": "ZAR", + "value": 0.0399287947 + }, + { + "year": 2023, + "month": 9, + "currency_id": "SEK", + "value": 0.0683663 + }, + { + "year": 2023, + "month": 9, + "currency_id": "CHF", + "value": 0.8432669 + }, + { + "year": 2023, + "month": 9, + "currency_id": "THB", + "value": 0.021131905 + }, + { + "year": 2023, + "month": 9, + "currency_id": "TTD", + "value": 0.11244325 + }, + { + "year": 2023, + "month": 9, + "currency_id": "GBP", + "value": 0.93914965 + }, + { + "year": 2023, + "month": 9, + "currency_id": "USD", + "value": 0.7581312 + }, + { + "year": 2023, + "month": 9, + "currency_id": "UYU", + "value": 0.0198647158 + }, + { + "year": 2023, + "month": 9, + "currency_id": "BND", + "value": 0.5559153889 + }, + { + "year": 2023, + "month": 9, + "currency_id": "CZK", + "value": 0.0331536722 + }, + { + "year": 2023, + "month": 9, + "currency_id": "KWD", + "value": 2.4562335714 + }, + { + "year": 2023, + "month": 9, + "currency_id": "OMR", + "value": 1.9709521429 + }, + { + "year": 2023, + "month": 9, + "currency_id": "PHP", + "value": 0.0133545842 + }, + { + "year": 2023, + "month": 9, + "currency_id": "QAR", + "value": 0.2083222667 + }, + { + "year": 2023, + "month": 9, + "currency_id": "SAR", + "value": 0.2022114 + }, + { + "year": 2023, + "month": 9, + "currency_id": "SGD", + "value": 0.5559168947 + }, + { + "year": 2023, + "month": 9, + "currency_id": "AED", + "value": 0.206395 + }, + { + "year": 2023, + "month": 9, + "currency_id": "LKR", + "value": 0.00235056 + }, + { + "year": 2023, + "month": 9, + "currency_id": "PKR", + "value": 0.00264992 + }, + { + "year": 2023, + "month": 9, + "currency_id": "KZT", + "value": 0.00160278 + }, + { + "year": 2023, + "month": 10, + "currency_id": "DZD", + "value": 0.0055493914 + }, + { + "year": 2023, + "month": 10, + "currency_id": "BRL", + "value": 0.149033 + }, + { + "year": 2023, + "month": 10, + "currency_id": "BND", + "value": 0.5570754286 + }, + { + "year": 2023, + "month": 10, + "currency_id": "CLP", + "value": 0.0008356787 + }, + { + "year": 2023, + "month": 10, + "currency_id": "CZK", + "value": 0.0328431429 + }, + { + "year": 2023, + "month": 10, + "currency_id": "DKK", + "value": 0.1077734286 + }, + { + "year": 2023, + "month": 10, + "currency_id": "EUR", + "value": 0.803745 + }, + { + "year": 2023, + "month": 10, + "currency_id": "ILS", + "value": 0.1965672857 + }, + { + "year": 2023, + "month": 10, + "currency_id": "JPY", + "value": 0.0051166829 + }, + { + "year": 2023, + "month": 10, + "currency_id": "KWD", + "value": 2.4685933333 + }, + { + "year": 2023, + "month": 10, + "currency_id": "MYR", + "value": 0.1614551429 + }, + { + "year": 2023, + "month": 10, + "currency_id": "MUR", + "value": 0.0171297286 + }, + { + "year": 2023, + "month": 10, + "currency_id": "MXN", + "value": 0.0424532714 + }, + { + "year": 2023, + "month": 10, + "currency_id": "NZD", + "value": 0.4555665714 + }, + { + "year": 2023, + "month": 10, + "currency_id": "NOK", + "value": 0.0699529857 + }, + { + "year": 2023, + "month": 10, + "currency_id": "OMR", + "value": 1.9845675 + }, + { + "year": 2023, + "month": 10, + "currency_id": "PEN", + "value": 0.2001476667 + }, + { + "year": 2023, + "month": 10, + "currency_id": "PHP", + "value": 0.0134337286 + }, + { + "year": 2023, + "month": 10, + "currency_id": "PLN", + "value": 0.1752022857 + }, + { + "year": 2023, + "month": 10, + "currency_id": "QAR", + "value": 0.2095483333 + }, + { + "year": 2023, + "month": 10, + "currency_id": "RUB", + "value": 0.00765667 + }, + { + "year": 2023, + "month": 10, + "currency_id": "SAR", + "value": 0.2034015 + }, + { + "year": 2023, + "month": 10, + "currency_id": "SGD", + "value": 0.5570754286 + }, + { + "year": 2023, + "month": 10, + "currency_id": "ZAR", + "value": 0.0397042571 + }, + { + "year": 2023, + "month": 10, + "currency_id": "SEK", + "value": 0.0693457429 + }, + { + "year": 2023, + "month": 10, + "currency_id": "CHF", + "value": 0.8352217143 + }, + { + "year": 2023, + "month": 10, + "currency_id": "THB", + "value": 0.0206688286 + }, + { + "year": 2023, + "month": 10, + "currency_id": "TTD", + "value": 0.1131596667 + }, + { + "year": 2023, + "month": 10, + "currency_id": "AED", + "value": 0.2076938333 + }, + { + "year": 2023, + "month": 10, + "currency_id": "GBP", + "value": 0.9286597143 + }, + { + "year": 2023, + "month": 10, + "currency_id": "USD", + "value": 0.7626655714 + }, + { + "year": 2023, + "month": 10, + "currency_id": "UYU", + "value": 0.0193836667 + }, + { + "year": 2023, + "month": 10, + "currency_id": "AUD", + "value": 0.4856458333 + }, + { + "year": 2023, + "month": 10, + "currency_id": "CAD", + "value": 0.5572896 + }, + { + "year": 2023, + "month": 10, + "currency_id": "INR", + "value": 0.00916226 + }, + { + "year": 2023, + "month": 10, + "currency_id": "BWP", + "value": 0.0551175 + }, + { + "year": 2023, + "month": 10, + "currency_id": "KRW", + "value": 0.0005638912 + }, + { + "year": 2023, + "month": 10, + "currency_id": "CNY", + "value": 0.104258 + } + ] + # mock codelists class init to set self.codelists_dict to `data` + monkeypatch.setattr(currencies.Currencies, "__init__", lambda x: setattr(x, "currencies_list", data)) + return currencies.Currencies() From 401956b012318a2772987607cf741e1ab6b894b5 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Tue, 14 Nov 2023 17:19:29 +0100 Subject: [PATCH 36/49] refactor: pep8 style --- direct_indexing/processing/util.py | 1 + direct_indexing/util.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/direct_indexing/processing/util.py b/direct_indexing/processing/util.py index 67fe4eab4..ffdff4fc4 100644 --- a/direct_indexing/processing/util.py +++ b/direct_indexing/processing/util.py @@ -86,6 +86,7 @@ def get_dataset_filetype(dataset): except Exception: return 'None' + def valid_version_from_file(filepath): """ Extract the value of the iati version from the dataset diff --git a/direct_indexing/util.py b/direct_indexing/util.py index 1c3b938e2..28f8bddfa 100644 --- a/direct_indexing/util.py +++ b/direct_indexing/util.py @@ -33,7 +33,8 @@ def index_to_core(url, json_path, remove=False): :param remove: bool to indicate if the created json file should be removed, defaults to False """ try: - solr_out = subprocess.check_output([settings.SOLR_POST_TOOL, '-url', url, json_path], stderr=subprocess.STDOUT).decode('utf-8') + solr_out = subprocess.check_output([settings.SOLR_POST_TOOL, '-url', url, json_path], + stderr=subprocess.STDOUT).decode('utf-8') result = 'Successfully indexed' if 'SolrException' in solr_out or 'Failed to index' in solr_out: message_index = re.search(r'\b(msg)\b', solr_out).start()+5 # +5 to get past the 'msg:' From 454a5a3b895c044c6932c1c267ed912ad07d50fe Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 15 Nov 2023 15:13:05 +0100 Subject: [PATCH 37/49] feat: added tests for currency aggregation --- .../custom_fields/currency_aggregation.py | 66 +-- .../test_currency_aggregation.py | 515 ++++++++++++++++-- 2 files changed, 503 insertions(+), 78 deletions(-) diff --git a/direct_indexing/custom_fields/currency_aggregation.py b/direct_indexing/custom_fields/currency_aggregation.py index 86d2f16ea..7ab6ecb94 100644 --- a/direct_indexing/custom_fields/currency_aggregation.py +++ b/direct_indexing/custom_fields/currency_aggregation.py @@ -23,6 +23,12 @@ PDV_GBP_CURR = 'planned-disbursement.value-gbp.conversion-currency' TV_USD_CURR = 'transaction.value-usd.conversion-currency' TV_GBP_CURR = 'transaction.value-gbp.conversion-currency' +T_TYPES = [None, "incoming-funds", "outgoing-commitment", "disbursement", + "expenditure", "interest-payment", "loan-repayment", + "reimbursement", "purchase-of-equity", "sale-of-equity", + "credit-guarantee", "incoming-commitment", "outgoing-pledge", + "incoming-pledge"] +TT_U = [t.replace("-", "_") if t else None for t in T_TYPES] def currency_aggregation(data): @@ -297,24 +303,18 @@ def process_activity_aggregations(data, activity_aggregations, activity_indexes, :param activity_indexes: the activity indexes :param aggregation_fields: the aggregation fields """ - budget_agg = activity_aggregations['budget'] - transaction_agg = activity_aggregations['transaction'] - transaction_usd_agg = activity_aggregations['transaction-usd'] - transaction_gbp_agg = activity_aggregations['transaction-gbp'] - planned_disbursement_agg = activity_aggregations['planned-disbursement'] + budget_agg = activity_aggregations.get('budget', []) + transaction_agg = activity_aggregations.get('transaction', []) + transaction_usd_agg = activity_aggregations.get('transaction-usd', []) + transaction_gbp_agg = activity_aggregations.get('transaction-gbp', []) + planned_disbursement_agg = activity_aggregations.get('planned-disbursement', []) # Process the aggregated data process_budget_agg(budget_agg, activity_indexes, aggregation_fields, data) process_planned_disbursement_agg(planned_disbursement_agg, activity_indexes, aggregation_fields, data) # Transaction types, starting with none to make array index match the transaction type code from the codelist - transaction_types = [None, "incoming-funds", "outgoing-commitment", "disbursement", - "expenditure", "interest-payment", "loan-repayment", - "reimbursement", "purchase-of-equity", "sale-of-equity", - "credit-guarantee", "incoming-commitment", "outgoing-pledge", - "incoming-pledge"] - tt_u = [t.replace("-", "_") if t else None for t in transaction_types] - process_transaction_agg(transaction_agg, activity_indexes, aggregation_fields, data, tt_u) - process_transaction_currency_agg(transaction_usd_agg, activity_indexes, aggregation_fields, data, tt_u, 'usd') - process_transaction_currency_agg(transaction_gbp_agg, activity_indexes, aggregation_fields, data, tt_u, 'gbp') + process_transaction_agg(transaction_agg, activity_indexes, aggregation_fields, data) + process_transaction_currency_agg(transaction_usd_agg, activity_indexes, aggregation_fields, data, 'usd') + process_transaction_currency_agg(transaction_gbp_agg, activity_indexes, aggregation_fields, data, 'gbp') return data @@ -345,12 +345,13 @@ def get_child_aggregations(dba, aggregation_fields): for key in aggregation_fields: if "currency" not in aggregation_fields[key]: group_object[key] = {"$sum": f'${aggregation_fields[key]}'} - # Get aggregations for all fields children_agg = list(dba.aggregate([ - {MONGO_UNWIND: "$related-activity"}, + # {MONGO_UNWIND: "$related-activity"}, + {"$unwind": "$related-activity"}, {"$match": {"related-activity.type": 1}}, - {MONGO_GROUP: group_object} + {"$group": group_object} + # {MONGO_GROUP: group_object} ])) return children_agg @@ -468,9 +469,9 @@ def process_budget_agg(budget_agg, activity_indexes, aggregation_fields, data): data[index_of_activity][aggregation_fields['budget_usd']] = data[index_of_activity]['budget.value-usd.sum'] if 'budget.value-gbp.sum' in data[index_of_activity]: data[index_of_activity][aggregation_fields['budget_gbp']] = data[index_of_activity]['budget.value-gbp.sum'] + # Get the original currency from which has been converted if BV_USD_CURR in data[index_of_activity]: - data[index_of_activity][aggregation_fields['budget_currency']] = data[index_of_activity][ - BV_USD_CURR] + data[index_of_activity][aggregation_fields['budget_currency']] = data[index_of_activity][BV_USD_CURR] def process_planned_disbursement_agg(planned_disbursement_agg, activity_indexes, aggregation_fields, data): @@ -494,7 +495,7 @@ def process_planned_disbursement_agg(planned_disbursement_agg, activity_indexes, PDV_GBP_CURR] -def process_transaction_agg(transaction_agg, activity_indexes, aggregation_fields, data, types): +def process_transaction_agg(transaction_agg, activity_indexes, aggregation_fields, data): for agg in transaction_agg: # Find the index of the relevant activity if agg['_id'][0] not in activity_indexes: @@ -502,26 +503,25 @@ def process_transaction_agg(transaction_agg, activity_indexes, aggregation_field index_of_activity = activity_indexes[agg['_id'][0]] transaction_type = agg['_id'][1] if type(transaction_type) is int: - data[index_of_activity][f'{aggregation_fields[types[transaction_type]]}'] = \ + data[index_of_activity][f'{aggregation_fields[TT_U[transaction_type]]}'] = \ agg['transaction-value-sum'] -def process_transaction_currency_agg(transaction_curr_agg, activity_indexes, aggregation_fields, data, types, currency): +def process_transaction_currency_agg(transaction_curr_agg, activity_indexes, aggregation_fields, data, currency): for agg in transaction_curr_agg: if agg['_id'][0] not in activity_indexes: continue # Find the index of the relevant activity index_of_activity = activity_indexes[agg['_id'][0]] transaction_type = agg['_id'][1] - if not transaction_type: + if not transaction_type or type(transaction_type) is not int: continue - if type(transaction_type) is int: - data[index_of_activity][f'{aggregation_fields[types[transaction_type]]}-{currency}'] = agg[ - f'transaction-value-{currency}-sum'] - if f'transaction-value-{currency}-conversion-currency' in data[index_of_activity]: - if currency == 'gbp': - selector = TV_GBP_CURR - else: - selector = TV_USD_CURR - data[index_of_activity][f'{aggregation_fields[types[transaction_type]]}-currency'] = \ - data[index_of_activity][selector] + data[index_of_activity][f'{aggregation_fields[TT_U[transaction_type]]}-{currency}'] = agg[ + f'transaction-value-{currency}-sum'] + if f'transaction-value-{currency}-conversion-currency' in data[index_of_activity]: + if currency == 'gbp': + selector = TV_GBP_CURR + else: + selector = TV_USD_CURR + data[index_of_activity][f'{aggregation_fields[TT_U[transaction_type]]}-currency'] = \ + data[index_of_activity][selector] diff --git a/tests/direct_indexing/custom_fields/test_currency_aggregation.py b/tests/direct_indexing/custom_fields/test_currency_aggregation.py index 06159a0a1..e47b01bf9 100644 --- a/tests/direct_indexing/custom_fields/test_currency_aggregation.py +++ b/tests/direct_indexing/custom_fields/test_currency_aggregation.py @@ -1,77 +1,502 @@ -# TODO - - -def test_currency_aggregation(): - assert True +import pytest +from pymongo.errors import PyMongoError +from pytest_mock_resources import create_mongo_fixture + +from direct_indexing.custom_fields.currency_aggregation import ( + TVU_CLEAN, TVU_CLEAN_GBP, TVU_CLEAN_TYPE, TVU_CLEAN_TYPE_GBP, TVU_DASHES, TVU_DASHES_GBP, TVU_DASHES_TYPE, + TVU_DASHES_TYPE_GBP, aggregate_converted_types, clean_aggregation_result, connect_to_mongo, currency_aggregation, + get_aggregation_fields, get_aggregations, get_child_aggregations, get_currency, index_activity_data, prepare_data, + process_activity_aggregations, process_budget_agg, process_child_agg_currencies, process_child_aggregations, + process_planned_disbursement_agg, process_transaction_agg, process_transaction_currency_agg, refresh_mongo_data, + revert_activity_tvu +) + +mongo = create_mongo_fixture() + + +def test_currency_aggregation(mocker): + # mock all the functions in currency_aggregation + mock_prepare_data = mocker.patch('direct_indexing.custom_fields.currency_aggregation.prepare_data') # NOQA: 501 + mock_connect_to_mongo = mocker.patch('direct_indexing.custom_fields.currency_aggregation.connect_to_mongo') # NOQA: 501 + mock_get_aggregations = mocker.patch('direct_indexing.custom_fields.currency_aggregation.get_aggregations') # NOQA: 501 + mock_get_aggregation_fields = mocker.patch('direct_indexing.custom_fields.currency_aggregation.get_aggregation_fields') # NOQA: 501 + mock_index_activity_data = mocker.patch('direct_indexing.custom_fields.currency_aggregation.index_activity_data') # NOQA: 501 + mock_process_activity_aggregations = mocker.patch('direct_indexing.custom_fields.currency_aggregation.process_activity_aggregations') # NOQA: 501 + mock_refresh_mongo_data = mocker.patch('direct_indexing.custom_fields.currency_aggregation.refresh_mongo_data') # NOQA: 501 + mock_get_child_aggregations = mocker.patch('direct_indexing.custom_fields.currency_aggregation.get_child_aggregations') # NOQA: 501 + mock_process_child_aggregations = mocker.patch('direct_indexing.custom_fields.currency_aggregation.process_child_aggregations') # NOQA: 501 + mock_clean_aggregation_result = mocker.patch('direct_indexing.custom_fields.currency_aggregation.clean_aggregation_result') # NOQA: 501 + + client_mock = mocker.MagicMock() + mock_prepare_data.return_value = {} + mock_connect_to_mongo.return_value = None, client_mock + mock_get_aggregation_fields.return_value = {}, {}, {}, {} + mock_process_activity_aggregations.return_value = {} + mock_process_child_aggregations.return_value = {} + mock_clean_aggregation_result.return_value = {} + res = currency_aggregation({}) + + # assert the mocks were all called once + mock_prepare_data.assert_called_once() + mock_connect_to_mongo.assert_called_once() + mock_get_aggregations.assert_called_once() + mock_get_aggregation_fields.assert_called_once() + mock_index_activity_data.assert_called_once() + mock_process_activity_aggregations.assert_called_once() + mock_refresh_mongo_data.assert_called_once() + mock_get_child_aggregations.assert_called_once() + mock_process_child_aggregations.assert_called_once() + mock_clean_aggregation_result.assert_called_once() + client_mock.close.assert_called_once() + assert res == {} + + mock_prepare_data.reset_mock() + mock_prepare_data.side_effect = PyMongoError + assert currency_aggregation({}) == [{}] def test_prepare_data(): - assert True - - -def test_connect_to_mongo(): - assert True - - -def test_get_aggregations(): - assert True + # Data is a list of objects + data = [{ + TVU_CLEAN: 0, + TVU_CLEAN_TYPE: 1, + TVU_CLEAN_GBP: 2, + TVU_CLEAN_TYPE_GBP: 3 + }] + expected_res = [{ + TVU_DASHES: 0, + TVU_DASHES_TYPE: 1, + TVU_DASHES_GBP: 2, + TVU_DASHES_TYPE_GBP: 3, + }] + assert prepare_data(data) == expected_res + + +def test_connect_to_mongo(mocker): + mongo_mocker = mocker.MagicMock() + mock = mocker.patch('direct_indexing.custom_fields.currency_aggregation.MongoClient', return_value=mongo_mocker) + + connect_to_mongo([{}]) + mongo_mocker.activities.activity.drop.assert_called_once() + mongo_mocker.activities.activity.insert_many.assert_called_once() + + mock.side_effect = PyMongoError + with pytest.raises(PyMongoError): + connect_to_mongo([{}]) + + +def test_get_aggregations(mocker, fixture_activity_aggregations_res, fixture_aggregation_data): + # # mock dba.aggregate to return a list of objects + b_res = fixture_activity_aggregations_res['budget'] + t_res = fixture_activity_aggregations_res['transaction'] + p_res = fixture_activity_aggregations_res['planned-disbursement'] + mock_dba = mocker.MagicMock() + mock_dba.aggregate.side_effect = [b_res, t_res, p_res] + + # Mock aggregate_converted_types + mock_agg = mocker.patch('direct_indexing.custom_fields.currency_aggregation.aggregate_converted_types') + mock_agg.return_value = [] + + data = fixture_aggregation_data.copy() + ex_res = fixture_activity_aggregations_res.copy() + assert get_aggregations(mock_dba, data) == ex_res + assert mock_dba.aggregate.call_count == 3 + assert mock_agg.call_count == 2 def test_aggregate_converted_types(): - assert True - + curr = "USD" + data = [] + assert aggregate_converted_types(data, curr) == [] -def test_get_aggregation_fields(): - assert True + data = [ + { + 'iati-identifier': "test", + 'transaction-value-USD': [1, 1, 42, None], + 'transaction-value-USD-type': [1, 1, 2, 2], + } + ] + expected_res = [ + {'_id': ['test', 1], 'transaction-value-USD-sum': 2}, + {'_id': ['test', 2], 'transaction-value-USD-sum': 42} + ] + assert aggregate_converted_types(data, curr) == expected_res -def test_index_activity_data(): - assert True - +def test_get_aggregation_fields(fixture_af, fixture_faf, fixture_caf, fixture_ppcaf): + assert get_aggregation_fields() == (fixture_af, fixture_faf, fixture_caf, fixture_ppcaf) -def test_process_activity_aggregations(): - assert True - -def test_refresh_mongo_data(): - assert True - - -def test_get_child_aggregations(): - assert True - - -def test_process_child_aggregations(): - assert True - - -def test_process_child_agg_currencies(): - assert True +def test_index_activity_data(): + assert index_activity_data([]) == {} + data = [ + {'iati-identifier': 'test1'}, + {}, + {'iati-identifier': 'test2'} + ] + # index of test1 is 0, index of test2 is 2 + assert index_activity_data(data) == {'test1': 0, 'test2': 2} + + +def test_process_activity_aggregations(mocker): + # mock all functions in process_activity_aggregations + mock_process_budget_agg = mocker.patch('direct_indexing.custom_fields.currency_aggregation.process_budget_agg') # NOQA: 501 + mock_process_planned_disbursement_agg = mocker.patch('direct_indexing.custom_fields.currency_aggregation.process_planned_disbursement_agg') # NOQA: 501 + mock_process_transaction_agg = mocker.patch('direct_indexing.custom_fields.currency_aggregation.process_transaction_agg') # NOQA: 501 + mock_process_transaction_currency_agg = mocker.patch('direct_indexing.custom_fields.currency_aggregation.process_transaction_currency_agg') # NOQA: 501 + process_activity_aggregations(None, {}, None, None) + + mock_process_budget_agg.assert_called_once() + mock_process_planned_disbursement_agg.assert_called_once() + mock_process_transaction_agg.assert_called_once() + assert mock_process_transaction_currency_agg.call_count == 2 + + +def test_refresh_mongo_data(mocker): + mock = mocker.MagicMock() + refresh_mongo_data(mock, None) + mock.drop.assert_called_once() + mock.insert_many.assert_called_once() + + +def test_get_child_aggregations(mocker): + # This function receives the database with data, and the first result of get_aggregation_fields + aggregation_fields, _, _, _ = get_aggregation_fields() + dba = mocker.MagicMock() + dba.aggregate.return_value = [] + + # count the number of keys in aggregation_fields that are not currency + n_keys = sum("currency" not in key for key in aggregation_fields) + assert get_child_aggregations(dba, aggregation_fields) == [] + + # assert that dba.aggregate was called with a list, the 3rd item's key is '$group', + # and the group_object is a dict with n_keys + 1 length + dba.aggregate.assert_called_once() + assert len(dba.aggregate.call_args[0][0][2]['$group']) == n_keys + 1 + + # We do not test the aggregate function of pymongo. We assume it works as intended. + + +def test_process_child_aggregations(mocker, fixture_simple_data_list_activity_with_child): + """ + A very simple iati activity with a child activity + """ + # mock process_child_agg_currencies + mock_process_child_agg_currencies = mocker.patch('direct_indexing.custom_fields.currency_aggregation.process_child_agg_currencies') # NOQA: 501 + # Sample of aggregation fields + aggregation_fields = {"budget": "activity-aggregation-budget-value", "planned-disbursement": "activity-aggregation-planned-disbursement-value"} # NOQA: 501 + child_aggregation_fields = {"budget": "child-aggregation-budget-value", "planned-disbursement": "child-aggregation-planned-disbursement-value"} # NOQA: 501 + parent_plus_child_aggregation_fields = {"budget": "activity-plus-child-aggregation-budget-value", "planned-disbursement": "activity-plus-child-aggregation-planned-disbursement-value"} # NOQA: 501 + activity_indexes = {"test1": 0, "test2": 1, "test3": 2} # based on the fixture + dba = mocker.MagicMock() + dba.aggregate.return_value = [ + {'_id': 'test1', 'budget': 42, 'planned-disbursement': 0}, + {'_id': 'testNA', 'budget': 21, 'planned-disbursement': 0} + ] + data = fixture_simple_data_list_activity_with_child.copy() + children_agg = get_child_aggregations(dba, aggregation_fields) + res = process_child_aggregations( + data, + children_agg, + activity_indexes, + aggregation_fields, + child_aggregation_fields, + parent_plus_child_aggregation_fields + ) + # once for budget, not for pd in test1, none for testNA as it is not in activity_indexes + mock_process_child_agg_currencies.assert_called_once() + # assert the first activity in res (test1), has child aggregation value for budget, but not for pd as it is 0 + assert res[0]['child-aggregation-budget-value'] == 42 + assert 'child-aggregation-planned-disbursement-value' not in res[0] + # assert the child + parent together is 42 + 21 + 21 + assert res[0]['activity-plus-child-aggregation-budget-value'] == 84 + + +def test_process_child_agg_currencies(mocker, fixture_simple_data_list_activity_with_child): + # Taking the fixture data and assuming the first agg result for test 1 (budget value of 42) + data = fixture_simple_data_list_activity_with_child.copy() + key = 'budget' + index_of_activity = 0 + child_aggregation_fields = {"budget": "child-aggregation-budget-value", "budget_currency": "child-aggregation-budget-currency"} # NOQA: 501 + parent_plus_child_aggregation_fields = {"budget": "activity-plus-child-aggregation-budget-value", "budget_currency": "activity-plus-child-aggregation-budget-currency"} # NOQA: 501 + + # mock get_currency to return "USD" + mock_get_currency = mocker.patch('direct_indexing.custom_fields.currency_aggregation.get_currency', return_value="USD") # NOQA: 501 + process_child_agg_currencies(data, key, index_of_activity, child_aggregation_fields, parent_plus_child_aggregation_fields) # NOQA: 501 + assert data[index_of_activity][child_aggregation_fields[key + "_currency"]] == "USD" + assert data[index_of_activity][parent_plus_child_aggregation_fields[key + "_currency"]] == "USD" + mock_get_currency.assert_called_once() def test_get_currency(): - assert True + key = "budget" + data = [{'budget.value-usd.conversion-currency': "CAD"}] + assert get_currency(key, data, 0) == "CAD" + data = [{'budget.value-gbp.conversion-currency': "CAD"}] + assert get_currency(key, data, 0) == "CAD" + key = "planned-disbursement" + data = [{'planned-disbursement.value-usd.conversion-currency': "CAD"}] + assert get_currency(key, data, 0) == "CAD" + data = [{'planned-disbursement.value-gbp.conversion-currency': "CAD"}] + assert get_currency(key, data, 0) == "CAD" + key = "transaction" + data = [{'transaction.value-usd.conversion-currency': "CAD"}] + assert get_currency(key, data, 0) == "CAD" + data = [{'transaction.value-gbp.conversion-currency': "CAD"}] + assert get_currency(key, data, 0) == "CAD" def test_clean_aggregation_result(): - assert True + aggregation_fields, formatted_aggregation_fields, _, _ = get_aggregation_fields() + data = [ + { + '_id': None, + }, + { + 'activity-aggregation-budget-value': 42 + } + ] + res = clean_aggregation_result(data, aggregation_fields, formatted_aggregation_fields) + assert '_id' not in res[0] + assert 'activity-aggregation-budget-value' not in res[1] + assert res[1]['activity-aggregation.budget.value'] == 42 def test_revert_activity_tvu(): - assert True + activity = { + TVU_DASHES: 1, + TVU_DASHES_TYPE: 2, + TVU_DASHES_GBP: 3, + TVU_DASHES_TYPE_GBP: 4, + } + ex_res = { + TVU_CLEAN: 1, + TVU_CLEAN_TYPE: 2, + TVU_CLEAN_GBP: 3, + TVU_CLEAN_TYPE_GBP: 4, + } + assert revert_activity_tvu(activity) == ex_res def test_process_budget_agg(): - assert True + data = [ + { + 'iati-identifier': 'test1', + 'budget.value-usd.sum': 42, + 'budget.value-gbp.sum': 42, + 'budget.value-usd.conversion-currency': 'USD' + } + ] + # Test that the correct values are appended to the correct data entry + ex_res = data.copy() + ex_res[0]['activity-aggregation-budget-value'] = 42 + ex_res[0]['activity-aggregation-budget-value-usd'] = 42 + ex_res[0]['activity-aggregation-budget-value-gbp'] = 42 + budget_agg = [{'_id': 'test1', 'budget-value-sum': 42}, {'_id': 'testNA', 'budget-value-sum': 21}] + activity_indexes = {'test1': 0} + aggregation_fields, _, _, _ = get_aggregation_fields() + process_budget_agg(budget_agg, activity_indexes, aggregation_fields, data) + assert data == ex_res def test_process_planned_disbursement_agg(): - assert True + data = [ + { + 'iati-identifier': 'test1', + 'planned-disbursement.value-usd.sum': 42, + 'planned-disbursement.value-gbp.sum': 42, + 'planned-disbursement.value-usd.conversion-currency': 'USD', + 'planned-disbursement.value-gbp.conversion-currency': 'USD' + } + ] + # Test that the correct values are appended to the correct data entry + ex_res = data.copy() + ex_res[0]['activity-aggregation-planned-disbursement-value'] = 42 + ex_res[0]['activity-aggregation-planned-disbursement-value-usd'] = 42 + ex_res[0]['activity-aggregation-planned-disbursement-value-gbp'] = 42 + pd_agg = [{'_id': 'test1', 'planned-disbursement-value-sum': 42}, {'_id': 'testNA', 'planned-disbursement-value-sum': 21}] # NOQA: 501 + activity_indexes = {'test1': 0} + aggregation_fields, _, _, _ = get_aggregation_fields() + process_planned_disbursement_agg(pd_agg, activity_indexes, aggregation_fields, data) + assert data == ex_res def test_process_transaction_agg(): - assert True + data = [ + { + 'iati-identifier': 'test1', + }, + { + 'iati-identifier': 'test2', + } + ] + transaction_agg = [ + {'_id': ('test1', 3), 'transaction-value-sum': 42}, + {'_id': ('test2', '2'), 'transaction-value-sum': 42}, + {'_id': ('testNA', 1)} + ] + activity_indexes = {'test1': 0, 'test2': 1} + aggregation_fields, _, _, _ = get_aggregation_fields() + process_transaction_agg(transaction_agg, activity_indexes, aggregation_fields, data) + assert data[0]['activity-aggregation-disbursement-value'] == 42 + assert 'activity-aggregation-outgoing-commitment-value' not in data[1] def test_process_transaction_currency_agg(): - assert True + # sample usage + transaction_usd_agg = [ + {'_id': ('test1', 3), 'transaction-value-usd-sum': 42, 'transaction-value-gbp-sum': 42}, + {'_id': ('test2', '2'), 'transaction-value-usd-sum': 42}, + {'_id': ('test3', None), 'transaction-value-usd-sum': 42}, + {'_id': ('testNA', 1)} + ] + activity_indexes = {'test1': 0, 'test2': 1, 'test3': 2} + aggregation_fields, _, _, _ = get_aggregation_fields() + data = [ + { + 'iati-identifier': 'test1', + 'transaction.value-usd.conversion-currency': 'USD', + 'transaction-value-usd-conversion-currency': 'USD', + 'transaction.value-gbp.conversion-currency': 'GBP', + 'transaction-value-gbp-conversion-currency': 'GBP' + }, + { + 'iati-identifier': 'test2', + }, + { + 'iati-identifier': 'test3', + } + ] + + process_transaction_currency_agg(transaction_usd_agg, activity_indexes, aggregation_fields, data, 'usd') + assert data[0]['activity-aggregation-disbursement-value-usd'] == 42 + assert 'activity-aggregation-outgoing-commitment-value-usd' not in data[1] + assert len(data[2].keys()) == 1 + + process_transaction_currency_agg(transaction_usd_agg, activity_indexes, aggregation_fields, data, 'gbp') + assert data[0]['activity-aggregation-disbursement-value-gbp'] == 42 + assert data[0]['activity-aggregation-disbursement-value-usd'] == 42 + + +@pytest.fixture +def fixture_af(): + return {'budget': 'activity-aggregation-budget-value', 'budget_usd': 'activity-aggregation-budget-value-usd', 'budget_gbp': 'activity-aggregation-budget-value-gbp', 'budget_currency': 'activity-aggregation-budget-currency', 'planned_disbursement': 'activity-aggregation-planned-disbursement-value', 'planned_disbursement_usd': 'activity-aggregation-planned-disbursement-value-usd', 'planned_disbursement_gbp': 'activity-aggregation-planned-disbursement-value-gbp', 'planned_disbursement_currency': 'activity-aggregation-planned-disbursement-currency', 'incoming_funds': 'activity-aggregation-incoming-funds-value', 'incoming_funds_usd': 'activity-aggregation-incoming-funds-value-usd', 'incoming_funds_gbp': 'activity-aggregation-incoming-funds-value-gbp', 'incoming_funds_currency': 'activity-aggregation-incoming-funds-currency', 'outgoing_commitment': 'activity-aggregation-outgoing-commitment-value', 'outgoing_commitment_usd': 'activity-aggregation-outgoing-commitment-value-usd', 'outgoing_commitment_gbp': 'activity-aggregation-outgoing-commitment-value-gbp', 'outgoing_commitment_currency': 'activity-aggregation-outgoing-commitment-currency', 'disbursement': 'activity-aggregation-disbursement-value', 'disbursement_usd': 'activity-aggregation-disbursement-value-usd', 'disbursement_gbp': 'activity-aggregation-disbursement-value-gbp', 'disbursement_currency': 'activity-aggregation-disbursement-currency', 'expenditure': 'activity-aggregation-expenditure-value', 'expenditure_usd': 'activity-aggregation-expenditure-value-usd', 'expenditure_gbp': 'activity-aggregation-expenditure-value-gbp', 'expenditure_currency': 'activity-aggregation-expenditure-currency', 'interest_payment': 'activity-aggregation-interest-payment-value', 'interest_payment_usd': 'activity-aggregation-interest-payment-value-usd', 'interest_payment_gbp': 'activity-aggregation-interest-payment-value-gbp', 'interest_payment_currency': 'activity-aggregation-interest-payment-currency', 'loan_repayment': 'activity-aggregation-loan-repayment-value', 'loan_repayment_usd': 'activity-aggregation-loan-repayment-value-usd', 'loan_repayment_gbp': 'activity-aggregation-loan-repayment-value-gbp', 'loan_repayment_currency': 'activity-aggregation-loan-repayment-currency', 'reimbursement': 'activity-aggregation-reimbursement-value', 'reimbursement_usd': 'activity-aggregation-reimbursement-value-usd', 'reimbursement_gbp': 'activity-aggregation-reimbursement-value-gbp', 'reimbursement_currency': 'activity-aggregation-reimbursement-currency', 'purchase_of_equity': 'activity-aggregation-purchase-of-equity-value', 'purchase_of_equity_usd': 'activity-aggregation-purchase-of-equity-value-usd', 'purchase_of_equity_gbp': 'activity-aggregation-purchase-of-equity-value-gbp', 'purchase_of_equity_currency': 'activity-aggregation-purchase-of-equity-currency', 'sale_of_equity': 'activity-aggregation-sale-of-equity-value', 'sale_of_equity_usd': 'activity-aggregation-sale-of-equity-value-usd', 'sale_of_equity_gbp': 'activity-aggregation-sale-of-equity-value-gbp', 'sale_of_equity_currency': 'activity-aggregation-sale-of-equity-currency', 'credit_guarantee': 'activity-aggregation-credit-guarantee-value', 'credit_guarantee_usd': 'activity-aggregation-credit-guarantee-value-usd', 'credit_guarantee_gbp': 'activity-aggregation-credit-guarantee-value-gbp', 'credit_guarantee_currency': 'activity-aggregation-credit-guarantee-currency', 'incoming_commitment': 'activity-aggregation-incoming-commitment-value', 'incoming_commitment_usd': 'activity-aggregation-incoming-commitment-value-usd', 'incoming_commitment_gbp': 'activity-aggregation-incoming-commitment-value-gbp', 'incoming_commitment_currency': 'activity-aggregation-incoming-commitment-currency', 'outgoing_pledge': 'activity-aggregation-outgoing-pledge-value', 'outgoing_pledge_usd': 'activity-aggregation-outgoing-pledge-value-usd', 'outgoing_pledge_gbp': 'activity-aggregation-outgoing-pledge-value-gbp', 'outgoing_pledge_currency': 'activity-aggregation-outgoing-pledge-currency', 'incoming_pledge': 'activity-aggregation-incoming-pledge-value', 'incoming_pledge_usd': 'activity-aggregation-incoming-pledge-value-usd', 'incoming_pledge_gbp': 'activity-aggregation-incoming-pledge-value-gbp', 'incoming_pledge_currency': 'activity-aggregation-incoming-pledge-currency'} # NOQA: 501 + + +@pytest.fixture +def fixture_faf(): + return {'budget': 'activity-aggregation.budget.value', 'budget_usd': 'activity-aggregation.budget.value-usd', 'budget_gbp': 'activity-aggregation.budget.value-gbp', 'budget_currency': 'activity-aggregation.budget.currency', 'planned_disbursement': 'activity-aggregation.planned-disbursement.value', 'planned_disbursement_usd': 'activity-aggregation.planned-disbursement.value-usd', 'planned_disbursement_gbp': 'activity-aggregation.planned-disbursement.value-gbp', 'planned_disbursement_currency': 'activity-aggregation.planned-disbursement.currency', 'incoming_funds': 'activity-aggregation.incoming-funds.value', 'incoming_funds_usd': 'activity-aggregation.incoming-funds.value-usd', 'incoming_funds_gbp': 'activity-aggregation.incoming-funds.value-gbp', 'incoming_funds_currency': 'activity-aggregation.incoming-funds.currency', 'outgoing_commitment': 'activity-aggregation.outgoing-commitment.value', 'outgoing_commitment_usd': 'activity-aggregation.outgoing-commitment.value-usd', 'outgoing_commitment_gbp': 'activity-aggregation.outgoing-commitment.value-gbp', 'outgoing_commitment_currency': 'activity-aggregation.outgoing-commitment.currency', 'disbursement': 'activity-aggregation.disbursement.value', 'disbursement_usd': 'activity-aggregation.disbursement.value-usd', 'disbursement_gbp': 'activity-aggregation.disbursement.value-gbp', 'disbursement_currency': 'activity-aggregation.disbursement.currency', 'expenditure': 'activity-aggregation.expenditure.value', 'expenditure_usd': 'activity-aggregation.expenditure.value-usd', 'expenditure_gbp': 'activity-aggregation.expenditure.value-gbp', 'expenditure_currency': 'activity-aggregation.expenditure.currency', 'interest_payment': 'activity-aggregation.interest-payment.value', 'interest_payment_usd': 'activity-aggregation.interest-payment.value-usd', 'interest_payment_gbp': 'activity-aggregation.interest-payment.value-gbp', 'interest_payment_currency': 'activity-aggregation.interest-payment.currency', 'loan_repayment': 'activity-aggregation.loan-repayment.value', 'loan_repayment_usd': 'activity-aggregation.loan-repayment.value-usd', 'loan_repayment_gbp': 'activity-aggregation.loan-repayment.value-gbp', 'loan_repayment_currency': 'activity-aggregation.loan-repayment.currency', 'reimbursement': 'activity-aggregation.reimbursement.value', 'reimbursement_usd': 'activity-aggregation.reimbursement.value-usd', 'reimbursement_gbp': 'activity-aggregation.reimbursement.value-gbp', 'reimbursement_currency': 'activity-aggregation.reimbursement.currency', 'purchase_of_equity': 'activity-aggregation.purchase-of-equity.value', 'purchase_of_equity_usd': 'activity-aggregation.purchase-of-equity.value-usd', 'purchase_of_equity_gbp': 'activity-aggregation.purchase-of-equity.value-gbp', 'purchase_of_equity_currency': 'activity-aggregation.purchase-of-equity.currency', 'sale_of_equity': 'activity-aggregation.sale-of-equity.value', 'sale_of_equity_usd': 'activity-aggregation.sale-of-equity.value-usd', 'sale_of_equity_gbp': 'activity-aggregation.sale-of-equity.value-gbp', 'sale_of_equity_currency': 'activity-aggregation.sale-of-equity.currency', 'credit_guarantee': 'activity-aggregation.credit-guarantee.value', 'credit_guarantee_usd': 'activity-aggregation.credit-guarantee.value-usd', 'credit_guarantee_gbp': 'activity-aggregation.credit-guarantee.value-gbp', 'credit_guarantee_currency': 'activity-aggregation.credit-guarantee.currency', 'incoming_commitment': 'activity-aggregation.incoming-commitment.value', 'incoming_commitment_usd': 'activity-aggregation.incoming-commitment.value-usd', 'incoming_commitment_gbp': 'activity-aggregation.incoming-commitment.value-gbp', 'incoming_commitment_currency': 'activity-aggregation.incoming-commitment.currency', 'outgoing_pledge': 'activity-aggregation.outgoing-pledge.value', 'outgoing_pledge_usd': 'activity-aggregation.outgoing-pledge.value-usd', 'outgoing_pledge_gbp': 'activity-aggregation.outgoing-pledge.value-gbp', 'outgoing_pledge_currency': 'activity-aggregation.outgoing-pledge.currency', 'incoming_pledge': 'activity-aggregation.incoming-pledge.value', 'incoming_pledge_usd': 'activity-aggregation.incoming-pledge.value-usd', 'incoming_pledge_gbp': 'activity-aggregation.incoming-pledge.value-gbp', 'incoming_pledge_currency': 'activity-aggregation.incoming-pledge.currency'} # NOQA: 501 + + +@pytest.fixture +def fixture_caf(): + return {'budget': 'child-aggregation.budget.value', 'budget_usd': 'child-aggregation.budget.value-usd', 'budget_gbp': 'child-aggregation.budget.value-gbp', 'budget_currency': 'child-aggregation.budget.currency', 'planned_disbursement': 'child-aggregation.planned-disbursement.value', 'planned_disbursement_usd': 'child-aggregation.planned-disbursement.value-usd', 'planned_disbursement_gbp': 'child-aggregation.planned-disbursement.value-gbp', 'planned_disbursement_currency': 'child-aggregation.planned-disbursement.currency', 'incoming_funds': 'child-aggregation.incoming-funds.value', 'incoming_funds_usd': 'child-aggregation.incoming-funds.value-usd', 'incoming_funds_gbp': 'child-aggregation.incoming-funds.value-gbp', 'incoming_funds_currency': 'child-aggregation.incoming-funds.currency', 'outgoing_commitment': 'child-aggregation.outgoing-commitment.value', 'outgoing_commitment_usd': 'child-aggregation.outgoing-commitment.value-usd', 'outgoing_commitment_gbp': 'child-aggregation.outgoing-commitment.value-gbp', 'outgoing_commitment_currency': 'child-aggregation.outgoing-commitment.currency', 'disbursement': 'child-aggregation.disbursement.value', 'disbursement_usd': 'child-aggregation.disbursement.value-usd', 'disbursement_gbp': 'child-aggregation.disbursement.value-gbp', 'disbursement_currency': 'child-aggregation.disbursement.currency', 'expenditure': 'child-aggregation.expenditure.value', 'expenditure_usd': 'child-aggregation.expenditure.value-usd', 'expenditure_gbp': 'child-aggregation.expenditure.value-gbp', 'expenditure_currency': 'child-aggregation.expenditure.currency', 'interest_payment': 'child-aggregation.interest-payment.value', 'interest_payment_usd': 'child-aggregation.interest-payment.value-usd', 'interest_payment_gbp': 'child-aggregation.interest-payment.value-gbp', 'interest_payment_currency': 'child-aggregation.interest-payment.currency', 'loan_repayment': 'child-aggregation.loan-repayment.value', 'loan_repayment_usd': 'child-aggregation.loan-repayment.value-usd', 'loan_repayment_gbp': 'child-aggregation.loan-repayment.value-gbp', 'loan_repayment_currency': 'child-aggregation.loan-repayment.currency', 'reimbursement': 'child-aggregation.reimbursement.value', 'reimbursement_usd': 'child-aggregation.reimbursement.value-usd', 'reimbursement_gbp': 'child-aggregation.reimbursement.value-gbp', 'reimbursement_currency': 'child-aggregation.reimbursement.currency', 'purchase_of_equity': 'child-aggregation.purchase-of-equity.value', 'purchase_of_equity_usd': 'child-aggregation.purchase-of-equity.value-usd', 'purchase_of_equity_gbp': 'child-aggregation.purchase-of-equity.value-gbp', 'purchase_of_equity_currency': 'child-aggregation.purchase-of-equity.currency', 'sale_of_equity': 'child-aggregation.sale-of-equity.value', 'sale_of_equity_usd': 'child-aggregation.sale-of-equity.value-usd', 'sale_of_equity_gbp': 'child-aggregation.sale-of-equity.value-gbp', 'sale_of_equity_currency': 'child-aggregation.sale-of-equity.currency', 'credit_guarantee': 'child-aggregation.credit-guarantee.value', 'credit_guarantee_usd': 'child-aggregation.credit-guarantee.value-usd', 'credit_guarantee_gbp': 'child-aggregation.credit-guarantee.value-gbp', 'credit_guarantee_currency': 'child-aggregation.credit-guarantee.currency', 'incoming_commitment': 'child-aggregation.incoming-commitment.value', 'incoming_commitment_usd': 'child-aggregation.incoming-commitment.value-usd', 'incoming_commitment_gbp': 'child-aggregation.incoming-commitment.value-gbp', 'incoming_commitment_currency': 'child-aggregation.incoming-commitment.currency', 'outgoing_pledge': 'child-aggregation.outgoing-pledge.value', 'outgoing_pledge_usd': 'child-aggregation.outgoing-pledge.value-usd', 'outgoing_pledge_gbp': 'child-aggregation.outgoing-pledge.value-gbp', 'outgoing_pledge_currency': 'child-aggregation.outgoing-pledge.currency', 'incoming_pledge': 'child-aggregation.incoming-pledge.value', 'incoming_pledge_usd': 'child-aggregation.incoming-pledge.value-usd', 'incoming_pledge_gbp': 'child-aggregation.incoming-pledge.value-gbp', 'incoming_pledge_currency': 'child-aggregation.incoming-pledge.currency'} # NOQA: 501 + + +@pytest.fixture +def fixture_ppcaf(): + return {'budget': 'activity-plus-child-aggregation.budget.value', 'budget_usd': 'activity-plus-child-aggregation.budget.value-usd', 'budget_gbp': 'activity-plus-child-aggregation.budget.value-gbp', 'budget_currency': 'activity-plus-child-aggregation.budget.currency', 'planned_disbursement': 'activity-plus-child-aggregation.planned-disbursement.value', 'planned_disbursement_usd': 'activity-plus-child-aggregation.planned-disbursement.value-usd', 'planned_disbursement_gbp': 'activity-plus-child-aggregation.planned-disbursement.value-gbp', 'planned_disbursement_currency': 'activity-plus-child-aggregation.planned-disbursement.currency', 'incoming_funds': 'activity-plus-child-aggregation.incoming-funds.value', 'incoming_funds_usd': 'activity-plus-child-aggregation.incoming-funds.value-usd', 'incoming_funds_gbp': 'activity-plus-child-aggregation.incoming-funds.value-gbp', 'incoming_funds_currency': 'activity-plus-child-aggregation.incoming-funds.currency', 'outgoing_commitment': 'activity-plus-child-aggregation.outgoing-commitment.value', 'outgoing_commitment_usd': 'activity-plus-child-aggregation.outgoing-commitment.value-usd', 'outgoing_commitment_gbp': 'activity-plus-child-aggregation.outgoing-commitment.value-gbp', 'outgoing_commitment_currency': 'activity-plus-child-aggregation.outgoing-commitment.currency', 'disbursement': 'activity-plus-child-aggregation.disbursement.value', 'disbursement_usd': 'activity-plus-child-aggregation.disbursement.value-usd', 'disbursement_gbp': 'activity-plus-child-aggregation.disbursement.value-gbp', 'disbursement_currency': 'activity-plus-child-aggregation.disbursement.currency', 'expenditure': 'activity-plus-child-aggregation.expenditure.value', 'expenditure_usd': 'activity-plus-child-aggregation.expenditure.value-usd', 'expenditure_gbp': 'activity-plus-child-aggregation.expenditure.value-gbp', 'expenditure_currency': 'activity-plus-child-aggregation.expenditure.currency', 'interest_payment': 'activity-plus-child-aggregation.interest-payment.value', 'interest_payment_usd': 'activity-plus-child-aggregation.interest-payment.value-usd', 'interest_payment_gbp': 'activity-plus-child-aggregation.interest-payment.value-gbp', 'interest_payment_currency': 'activity-plus-child-aggregation.interest-payment.currency', 'loan_repayment': 'activity-plus-child-aggregation.loan-repayment.value', 'loan_repayment_usd': 'activity-plus-child-aggregation.loan-repayment.value-usd', 'loan_repayment_gbp': 'activity-plus-child-aggregation.loan-repayment.value-gbp', 'loan_repayment_currency': 'activity-plus-child-aggregation.loan-repayment.currency', 'reimbursement': 'activity-plus-child-aggregation.reimbursement.value', 'reimbursement_usd': 'activity-plus-child-aggregation.reimbursement.value-usd', 'reimbursement_gbp': 'activity-plus-child-aggregation.reimbursement.value-gbp', 'reimbursement_currency': 'activity-plus-child-aggregation.reimbursement.currency', 'purchase_of_equity': 'activity-plus-child-aggregation.purchase-of-equity.value', 'purchase_of_equity_usd': 'activity-plus-child-aggregation.purchase-of-equity.value-usd', 'purchase_of_equity_gbp': 'activity-plus-child-aggregation.purchase-of-equity.value-gbp', 'purchase_of_equity_currency': 'activity-plus-child-aggregation.purchase-of-equity.currency', 'sale_of_equity': 'activity-plus-child-aggregation.sale-of-equity.value', 'sale_of_equity_usd': 'activity-plus-child-aggregation.sale-of-equity.value-usd', 'sale_of_equity_gbp': 'activity-plus-child-aggregation.sale-of-equity.value-gbp', 'sale_of_equity_currency': 'activity-plus-child-aggregation.sale-of-equity.currency', 'credit_guarantee': 'activity-plus-child-aggregation.credit-guarantee.value', 'credit_guarantee_usd': 'activity-plus-child-aggregation.credit-guarantee.value-usd', 'credit_guarantee_gbp': 'activity-plus-child-aggregation.credit-guarantee.value-gbp', 'credit_guarantee_currency': 'activity-plus-child-aggregation.credit-guarantee.currency', 'incoming_commitment': 'activity-plus-child-aggregation.incoming-commitment.value', 'incoming_commitment_usd': 'activity-plus-child-aggregation.incoming-commitment.value-usd', 'incoming_commitment_gbp': 'activity-plus-child-aggregation.incoming-commitment.value-gbp', 'incoming_commitment_currency': 'activity-plus-child-aggregation.incoming-commitment.currency', 'outgoing_pledge': 'activity-plus-child-aggregation.outgoing-pledge.value', 'outgoing_pledge_usd': 'activity-plus-child-aggregation.outgoing-pledge.value-usd', 'outgoing_pledge_gbp': 'activity-plus-child-aggregation.outgoing-pledge.value-gbp', 'outgoing_pledge_currency': 'activity-plus-child-aggregation.outgoing-pledge.currency', 'incoming_pledge': 'activity-plus-child-aggregation.incoming-pledge.value', 'incoming_pledge_usd': 'activity-plus-child-aggregation.incoming-pledge.value-usd', 'incoming_pledge_gbp': 'activity-plus-child-aggregation.incoming-pledge.value-gbp', 'incoming_pledge_currency': 'activity-plus-child-aggregation.incoming-pledge.currency'} # NOQA: 501 + + +@pytest.fixture +def fixture_activity_aggregations_res(): + b_res = [{'_id': None, 'budget-value-sum': 126}] + t_res = [{'_id': [None, 1], 'transaction-value-sum': 84}, + {'_id': [None, 2], 'transaction-value-sum': 42}] + p_res = [{'_id': None, 'planned-disbursement-value-sum': 126}] + + return { + 'budget': b_res, + 'transaction': t_res, + 'transaction-usd': [], + 'transaction-gbp': [], + 'planned-disbursement': p_res + } + + +@pytest.fixture +def fixture_aggregation_data(): + budget_data = [{'iati-identifier': 'test1', 'budget': [{'value': 42}, {'value': 42}]}, {'iati-identifier': 'test1', 'budget': [{'value': 21}, {'value': 21}]}] # NOQA: 501 + transaction_data = [{'iati-identifier': 'test1', 'transaction': [{'value': 42, 'transaction-type': {'code': 1}}, {'value': 42, 'transaction-type': {'code': 1}}]}, {'iati-identifier': 'test1', 'transaction': [{'value': 21, 'transaction-type': {'code': 2}}, {'value': 21, 'transaction-type': {'code': 2}}]}] # NOQA: 501 + pd_data = [{'iati-identifier': 'test1', 'planned-disbursement': [{'value': 42}, {'value': 42}]}, {'iati-identifier': 'test1', 'planned-disbursement': [{'value': 21}, {'value': 21}]}] # NOQA: 501 + data = budget_data + transaction_data + pd_data + return data + + +@pytest.fixture +def fixture_simple_data_list_activity_with_child(): + # At this time in the processing, the activity objects are updated with the aggregated values + # A budget value is also provided as activity-aggregation-budget-value + return [ + { + "iati-identifier": "test1", + "budget": [ + { + "value": 42, + "value-date": "2019-01-01", + } + ], + "related-activity": [ + {'ref': 'test2', 'type': 2}, + {'ref': 'test3', 'type': 2}, + ], + "activity-aggregation-budget-value": 42 + }, + { + "iati-identifier": "test2", + "budget": [ + { + "value": 21, + "value-date": "2019-01-01", + } + ], + "related-activity": [ + {'ref': 'test1', 'type': 1}, + ], + "activity-aggregation-budget-value": 21 + }, + { + "iati-identifier": "test3", + "budget": [ + { + "value": 21, + "value-date": "2019-01-01", + } + ], + "related-activity": [ + {'ref': 'test1', 'type': 1}, + ], + "activity-aggregation-budget-value": 21 + }, + { + "iati-identifier": "test4", + "budget": [ + { + "value": 21, + "value-date": "2019-01-01", + } + ], + "related-activity": [ + {'ref': 'testNA', 'type': 1}, + ], + "activity-aggregation-budget-value": 21 + } + ] + + +@pytest.fixture +def fixture_simple_data_list_activity_with_child_aggregated(): + return [ + { + "_id": "test2", # Grouped by "related-activity.ref" + "budget_value": 21, # Sum of "activity-aggregation-budget-value" for related-activity.type: 1 + }, + { + "_id": "test3", # Grouped by "related-activity.ref" + "budget_value": 21, # Sum of "activity-aggregation-budget-value" for related-activity.type: 1 + }, + { + "_id": "test1", + "budget_value": 42, + } + ] From 4699699f71ca8f70d70bb648b908a776ec2ea2c7 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 15 Nov 2023 15:30:29 +0100 Subject: [PATCH 38/49] chore: update django version --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ebae3cd8a..11ed342c6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ # Django installation -Django==4.2.3 +Django==4.2.7 ## Dependencies asgiref==3.6.0 sqlparse==0.4.4 From cef497dbdf95662426e18927e5b911bb19892165 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 15 Nov 2023 15:30:55 +0100 Subject: [PATCH 39/49] chore: update static submodule --- static | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/static b/static index 8e03cb01f..906c01ab9 160000 --- a/static +++ b/static @@ -1 +1 @@ -Subproject commit 8e03cb01fb94551686a4e41922dfeb5a6a2748fa +Subproject commit 906c01ab9f3e83c2d0110ac078fd1798a755d150 From 3d7e2dc8ce3e1119026bf88c3a962df2918cbfb2 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Wed, 15 Nov 2023 15:35:05 +0100 Subject: [PATCH 40/49] feat: updated license to MIT --- LICENSE.MD | 682 ++--------------------------------------------------- 1 file changed, 21 insertions(+), 661 deletions(-) diff --git a/LICENSE.MD b/LICENSE.MD index 360064d5d..77e2478e1 100644 --- a/LICENSE.MD +++ b/LICENSE.MD @@ -1,661 +1,21 @@ - GNU AFFERO GENERAL PUBLIC LICENSE - Version 3, 19 November 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU Affero General Public License is a free, copyleft license for -software and other kinds of works, specifically designed to ensure -cooperation with the community in the case of network server software. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -our General Public Licenses are intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - Developers that use our General Public Licenses protect your rights -with two steps: (1) assert copyright on the software, and (2) offer -you this License which gives you legal permission to copy, distribute -and/or modify the software. - - A secondary benefit of defending all users' freedom is that -improvements made in alternate versions of the program, if they -receive widespread use, become available for other developers to -incorporate. Many developers of free software are heartened and -encouraged by the resulting cooperation. However, in the case of -software used on network servers, this result may fail to come about. -The GNU General Public License permits making a modified version and -letting the public access it on a server without ever releasing its -source code to the public. - - The GNU Affero General Public License is designed specifically to -ensure that, in such cases, the modified source code becomes available -to the community. It requires the operator of a network server to -provide the source code of the modified version running there to the -users of that server. Therefore, public use of a modified version, on -a publicly accessible server, gives the public access to the source -code of the modified version. - - An older license, called the Affero General Public License and -published by Affero, was designed to accomplish similar goals. This is -a different license, not a version of the Affero GPL, but Affero has -released a new version of the Affero GPL which permits relicensing under -this license. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU Affero General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Remote Network Interaction; Use with the GNU General Public License. - - Notwithstanding any other provision of this License, if you modify the -Program, your modified version must prominently offer all users -interacting with it remotely through a computer network (if your version -supports such interaction) an opportunity to receive the Corresponding -Source of your version by providing access to the Corresponding Source -from a network server at no charge, through some standard or customary -means of facilitating copying of software. This Corresponding Source -shall include the Corresponding Source for any work covered by version 3 -of the GNU General Public License that is incorporated pursuant to the -following paragraph. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the work with which it is combined will remain governed by version -3 of the GNU General Public License. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU Affero General Public License from time to time. Such new versions -will be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU Affero General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU Affero General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU Affero General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) 2011 Zimmerman - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published - by the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If your software can interact with users remotely through a computer -network, you should also make sure that it provides a way for users to -get its source. For example, if your program is a web application, its -interface could display a "Source" link that leads users to an archive -of the code. There are many ways you could offer source, and different -solutions will be better for different programs; see section 13 for the -specific requirements. - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU AGPL, see -. +MIT License + +Copyright (c) [2023] [Zimmerman B.V.] + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. From 167e73cffa871257c1bf186df7bc0a4f27ac2d2f Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Tue, 28 Nov 2023 15:02:07 +0100 Subject: [PATCH 41/49] chore: allow .coverage file for future coverage review --- .gitignore | 2 -- 1 file changed, 2 deletions(-) diff --git a/.gitignore b/.gitignore index 470f2d92a..a91b82569 100644 --- a/.gitignore +++ b/.gitignore @@ -32,5 +32,3 @@ session.vim # Python cache __pycache__ - -.coverage From db58aa92ba87df6c4a7f109fb95b4589818b4988 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Tue, 28 Nov 2023 15:02:41 +0100 Subject: [PATCH 42/49] chore: update readme badge for license change --- README.MD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.MD b/README.MD index 64db08935..c7f0500b3 100644 --- a/README.MD +++ b/README.MD @@ -1,7 +1,7 @@ # IATI.cloud [![Quality Gate Status](https://sonarcloud.io/api/project_badges/measure?project=zimmerman-zimmerman_iati.cloud&metric=alert_status)](https://sonarcloud.io/dashboard?id=zimmerman-zimmerman_iati.cloud) -[![License: AGPLv3](https://img.shields.io/badge/License-AGPL%20v3-blue.svg)](https://github.com/zimmerman-zimmerman/OIPA/blob/main/LICENSE.MD) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Open issues](https://img.shields.io/github/issues/zimmerman-zimmerman/OIPA.svg?style=flat)](https://github.com/zimmerman-team/iati.cloud/issues) --- From a782e1db77ea2c38e140568173de52d54d3e2958 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Tue, 28 Nov 2023 15:03:11 +0100 Subject: [PATCH 43/49] chore: add coverage to requirements --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 11ed342c6..135c04b7d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -60,6 +60,7 @@ pytest-celery==0.0.0 pytest-django==4.6.0 pytest-mock==3.12.0 pytest-cov==4.1.0 +coverage==7.3.2 requests-mock==1.11.0 # Working with XML Documents (legacy currency convert) From 49bb5d558aa47f3ab4bd17a14b026a9ca59d20e8 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Tue, 28 Nov 2023 15:12:50 +0100 Subject: [PATCH 44/49] chore: update readme about testing --- README.MD | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.MD b/README.MD index c7f0500b3..12ae23078 100644 --- a/README.MD +++ b/README.MD @@ -95,6 +95,10 @@ test: Adding missing or correcting existing tests chore: Changes to the build process or auxiliary tools and libraries such as documentation generation ``` +## Testing +We test with `pytest`,and use `coverage` to generage coverage reports. +You can use `. scripts/cov.sh` to quickly run all tests and generate a coverage report. This also conveniently prints the location of the coverage HTML report, which can be viewed from your browser. + ## Contributing ### Can I contribute? Yes! We are mainly looking for coders to help on the project. If you are a coder feel free to _Fork_ the repository and send us your amazing Pull Requests! From 49d4df70e0a4ad1c2dcc257c7fc83d1d76542cf8 Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Tue, 28 Nov 2023 15:13:04 +0100 Subject: [PATCH 45/49] feat: added recent coverage report --- .coverage | Bin 0 -> 53248 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 .coverage diff --git a/.coverage b/.coverage new file mode 100644 index 0000000000000000000000000000000000000000..954e34b8c2cd2bfae54475e5bbad44d429065339 GIT binary patch literal 53248 zcmeI4eQ*@z8Nhe%_CEH$-sJLe2_eanT0<&H3=t8jAX>&EVC35tblKc(l7+k5u)7yX z0mGsdZENjVt>ZvJ`$zjnTU$GJYPE&5bvmPVq)w++#|pF^m0B#eT8M&NpLg#zcLxM* zZyg7Dc5=_NU+?og?{DAFy}Y+<$s)y&g+5hFNruqPR4^>dTrCI;!|?D4!^hryu*2C0 zpxL7RiFSFWW9~~~vXTjT?`Fs?;U3Z++7-Sy_ z3-Ts8o-yRaSu;3|LbEr3SDYKLwU%aRZ7yZrR}q~;;5?YIrNkQ$istipnx(rsu-kd780DWWhZ~1*p=V$n?BALtEpklT#caZ68(5eGC|K2 zs=#R-!G{ljP%;LDGh?TxA@Ej@H#WOwqQGegVi%R(+g7`*kayHu^i$C2(~r9(WO}9$ z@lw#wu7FC0&SXGF*hdw5`>B zu3|u+me@rlohAF}P|j>-eX&a}Va-gwGUnPDQHM@FCD~!;4_zgN!G_tmj(YE{Bp28 z@N8g!KgB=9&-Cy2uk-tT_xWacpYg8t9QWMmY2o&9i`~cFyTJ%l9Ecv@XH$M&Kos#og6pxP6gH_$CGL%VeNI~b`uJ$gEB;GMJTNM?KC8Py$-9H zR!sq0-R0S`7m;-iW_pu~J|Jsgc)0+E+siXNsHw2lS*P)43?&Ix2b;mFT%OfuX2F9942R)7NKuoMzz~*hf=8(dJYH9x$D({luqJd!F@{Aw(`yV5%G5=o z5nL=T&qZmbQffj@>Yee7Mwg2#mY39I@FF_tMb-MHw4z(6#F79YQ@st~Zh@2T48?#| zkZDQNz~hh%K2npM&XOi6x-1S%7ke{_e%TP|dO^`p#eptxr__VHRZeDUP)#cFVJLdq zdRY@;?P4#iicWy5o9n>UTqj+v)77+?fK~9c#Az`wuQ+Kwp~f>QIcxA4xIE=gOeprI?2~k3M-u- zGl8G}D)6)HBk?2k_iJ*$Rc*o1nkYEB%27wO^-*+Ku0J?DPzlyoIB8uG%IF?qv-5 zN${269l?&sjRA$v@(=S@_&iVhOb)31%(JW0k|F6G=Gv_+$s$~9Or*o#_sQC|` z|HmZGobRm9)8_xR3C>*Tw1?B@|26%bxx`T?XP^I94{+wPkH$~w{J-jE&b-Q5M@3VD zsKS{m9JM~t{J+wHhPt#B9dE1c`G1AuMYzNvjJ?S!&g?1|*IDNOk(Hd;>6G2V#3t-y zxJ7e@kOQNVqE5FEaPSM9IipMtMTUz)4qVTfu`(^^2RZyQ&TK2w)cBYzZ`1F1+h~t5 ziYmL$@jm~|`M-A!XU;AYPD!mRp8tCss3zm&3#E>8pqh-EEh=+v2gcZE&i`GDIkTBB zZ+cM_B>w&%PFaxv5uG%?@tfJ`%2KqBl3aQOUgh z{=1KF=0`W)zin=1Bb>ab@mr71WKL8u%%NjXMk*)MQe<%6anYDBSVM2F+0I;(y~|x!4F}V!qlXUc+j%Ve>Zu-P=;d#ZjvV;++|O5eSl+Ct zdg8?K9USw{-UnI_jkvb{rK<$=wsPCfY)>#Ky56_wE|f)zBJiYPPo_kGO~ z0tZdxy%E=%oy8p32^p9UI>L_H!d1mgD&dYpwq$)eJd7 z-XU+2*T_NgB6*%XLw-YkL4Haefp-B9kq61WR4itx+FDE1)L5#z+EP_jmWoC#Rat4NiV8~+VyQ^P zQsJi~XCzE8eO9wxiU z1F#n0ZgMBNjoeCxi2>^Y))ASkC9A;*Zb$$LAOR$R1dsp{Kmter2_OL^fCMfm0`3<0 zX?^z;Dg-K;sc532k&4MwG*B^#ih3&QsEAQfOGOP8)l^hb5v8J%iV7+S6%i`JRD`Gq cQW2nnr@~K#j|wjp9x6C0+*G)zpuhkBU##Zzo&W#< literal 0 HcmV?d00001 From 03ce3f5f0763cf1573c1c7beaf47bac6f6d1fe6b Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Tue, 28 Nov 2023 15:13:16 +0100 Subject: [PATCH 46/49] feat: added coverage script --- scripts/cov.sh | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 scripts/cov.sh diff --git a/scripts/cov.sh b/scripts/cov.sh new file mode 100644 index 000000000..ef4e0713d --- /dev/null +++ b/scripts/cov.sh @@ -0,0 +1,3 @@ +pytest --cov=direct_indexing --cov-report html:./coverage_report tests + +echo $(pwd)/coverage_report/index.html From f2f948920096a6a2b68a57f9e95ea3b71941ae3f Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Tue, 28 Nov 2023 15:13:36 +0100 Subject: [PATCH 47/49] refactor: updated None objects in tests --- .../custom_fields/test_custom_fields.py | 13 +++++++------ .../processing/test_activity_subtypes.py | 11 ++++++----- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/tests/direct_indexing/custom_fields/test_custom_fields.py b/tests/direct_indexing/custom_fields/test_custom_fields.py index 8117232cf..bb748c08c 100644 --- a/tests/direct_indexing/custom_fields/test_custom_fields.py +++ b/tests/direct_indexing/custom_fields/test_custom_fields.py @@ -7,25 +7,25 @@ def test_add_all(mocker): mock_pa = mocker.patch('direct_indexing.custom_fields.custom_fields.process_activity') mock_ca = mocker.patch('direct_indexing.custom_fields.custom_fields.currency_aggregation') mock_h2 = mocker.patch('direct_indexing.custom_fields.custom_fields.raise_h2_budget_data_to_h1') - + mock = mocker.MagicMock() # Test that the h2 function is not called when fcdo instance is false, # and that the process_activity and currency_aggregation functions are called once mocker.patch(FCDO_IN, False) data = {} - add_all(data, None, None, None) + add_all(data, mock, mock, {}) mock_pa.assert_called_once() mock_ca.assert_called_once() mock_h2.assert_not_called() # Test that the process_activity function is called len(data) times data = [{}, {}] - add_all(data, None, None, None) + add_all(data, mock, mock, {}) assert mock_pa.call_count == len(data) + 1 # +1 for the previous test # Test that the h2 function is called when fcdo instance is true mocker.patch(FCDO_IN, True) data = {} - add_all(data, None, None, None) + add_all(data, mock, mock, {}) mock_h2.assert_called_once() @@ -41,11 +41,12 @@ def test_process_activity(mocker): mock_ajd = mocker.patch('direct_indexing.custom_fields.custom_fields.add_json_dumps') mock_adq = mocker.patch('direct_indexing.custom_fields.custom_fields.add_date_quarter_fields') mock_dlcc = mocker.patch('direct_indexing.custom_fields.custom_fields.document_link_category_combined') + mock = mocker.MagicMock() # Test that all subfunctions are called once mocker.patch(FCDO_IN, False) activity = {} - process_activity(activity, None, None, None) + process_activity(activity, mock, mock, {}) mock_ac.assert_called_once() mock_tn.assert_called_once() mock_ad.assert_called_once() @@ -60,7 +61,7 @@ def test_process_activity(mocker): # Test that the remaining functions are called when FCDO_INSTANCE is True mocker.patch(FCDO_IN, True) - process_activity(activity, None, None, None) + process_activity(activity, mock, mock, {}) mock_ajd.assert_called_once() mock_adq.assert_called_once() mock_dlcc.assert_called_once() diff --git a/tests/direct_indexing/processing/test_activity_subtypes.py b/tests/direct_indexing/processing/test_activity_subtypes.py index ec1ad0b86..4750854a4 100644 --- a/tests/direct_indexing/processing/test_activity_subtypes.py +++ b/tests/direct_indexing/processing/test_activity_subtypes.py @@ -23,25 +23,26 @@ def test_extract_subtype(mocker): assert mock_process.call_count == len(data.keys()) # once for each key in data -def test_process_subtype_dict(): +def test_process_subtype_dict(mocker): tvu = 'transaction.value-usd' bvu = 'budget.value-usd' # Test if key is in AVAILABLE_SUBTYPES, it is nothing changes in subtype_dict subtype_dict = {'transaction': {'value': 1}} expected_res = subtype_dict.copy() key = 'transaction' - res = process_subtype_dict(subtype_dict, key, None, None, None, None) + mock = mocker.MagicMock() + res = process_subtype_dict(subtype_dict, key, mock, None, None, None) assert res == expected_res # Test if key is in exclude fields we do not include it in the subtype dict - res = process_subtype_dict(subtype_dict, bvu, None, None, [bvu], None) + res = process_subtype_dict(subtype_dict, bvu, mock, None, [bvu], None) assert res == expected_res # Test that a specific value which is a dict can be retrieved data = {'title': 'title', 'budget': {'value': 1}, tvu: 1.1} expected_res = subtype_dict.copy() expected_res[tvu] = 1.1 - res = process_subtype_dict(subtype_dict, tvu, None, data, [], [tvu]) + res = process_subtype_dict(subtype_dict, tvu, mock, data, [], [tvu]) assert res == expected_res # Test that the value of a specific element is extracted from the list @@ -53,7 +54,7 @@ def test_process_subtype_dict(): # Test that additional fields are kept without modification expected_res['title'] = 'title' - res = process_subtype_dict(subtype_dict, 'title', None, data, [], []) + res = process_subtype_dict(subtype_dict, 'title', mock, data, [], []) assert res == expected_res From 2b7aeda91e5c7607ad171f8f4555a65b62669e6c Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Tue, 28 Nov 2023 15:28:30 +0100 Subject: [PATCH 48/49] refactor: update tests to use html --- tests/direct_indexing/cleaning/test_metadata.py | 2 +- .../direct_indexing/custom_fields/test_codelists.py | 6 +++--- tests/direct_indexing/metadata/test_dataset.py | 4 ++-- tests/direct_indexing/metadata/test_util.py | 2 +- .../processing/test_activity_subtypes.py | 4 ++-- tests/direct_indexing/processing/test_util.py | 12 ++++++------ tests/direct_indexing/test_util.py | 4 ++-- 7 files changed, 17 insertions(+), 17 deletions(-) diff --git a/tests/direct_indexing/cleaning/test_metadata.py b/tests/direct_indexing/cleaning/test_metadata.py index 6049619bc..dda52f75d 100644 --- a/tests/direct_indexing/cleaning/test_metadata.py +++ b/tests/direct_indexing/cleaning/test_metadata.py @@ -70,7 +70,7 @@ def fixture_dataset_metadata(): "description": "", "metadata_modified": "2022-05-05T23:56:00.301819", "cache_last_updated": "", - "url": "http://formin.finland.fi/opendata/IATI/Finland_total_2012.xml", + "url": "https://formin.finland.fi/opendata/IATI/Finland_total_2012.xml", "format": "iati-xml", "state": "active", "created": "2022-04-20T07:16:11.709336", diff --git a/tests/direct_indexing/custom_fields/test_codelists.py b/tests/direct_indexing/custom_fields/test_codelists.py index ef33d26cd..682f98a6f 100644 --- a/tests/direct_indexing/custom_fields/test_codelists.py +++ b/tests/direct_indexing/custom_fields/test_codelists.py @@ -1856,14 +1856,14 @@ def fixture_cl(monkeypatch): "code": "2", "name": "UN Sustainable Development Goals (SDG)", "description": "A value from the top-level list of UN sustainable development goals (SDGs) (e.g. \u20181\u2019)", # NOQA: E501 - "url": "http://reference.iatistandard.org/codelists/UNSDG-Goals/", + "url": "https://reference.iatistandard.org/codelists/UNSDG-Goals/", "status": "active" }, { "code": "3", "name": "UN Sustainable Development Goals (SDG) Targets", "description": "A value from the second-level list of UN sustainable development goals (SDGs) (e.g. \u20181.1\u2019)", # NOQA: E501 - "url": "http://reference.iatistandard.org/codelists/UNSDG-Targets/", + "url": "https://reference.iatistandard.org/codelists/UNSDG-Targets/", "status": "active" }, { @@ -2246,7 +2246,7 @@ def fixture_cl(monkeypatch): "code": "1", "name": "OECD DAC CRS", "description": "The policy marker is an OECD DAC CRS policy marker, Reported in columns 20-23, 28-31 and 54 of CRS++ reporting format.", # NOQA: E501 - "url": "http://reference.iatistandard.org/codelists/PolicyMarker/", + "url": "https://reference.iatistandard.org/codelists/PolicyMarker/", "status": "active" }, { diff --git a/tests/direct_indexing/metadata/test_dataset.py b/tests/direct_indexing/metadata/test_dataset.py index 4da5a1e7c..d63232426 100644 --- a/tests/direct_indexing/metadata/test_dataset.py +++ b/tests/direct_indexing/metadata/test_dataset.py @@ -92,8 +92,8 @@ def test_load_codelists(mocker): def test__get_existing_datasets(mocker, requests_mock, fixture_solr_dataset, fixture_existing_datasets): - mocker.patch('direct_indexing.metadata.dataset.settings.SOLR_DATASET', "http://test.com") - requests_mock.get("http://test.com" + ( + mocker.patch('direct_indexing.metadata.dataset.settings.SOLR_DATASET', "https://test.com") + requests_mock.get("https://test.com" + ( '/select?q=resources.hash:* AND extras.filetype:*' ' AND id:*&rows=100000&wt=json&fl=resources.hash,id,extras.filetype' ), json=fixture_solr_dataset) diff --git a/tests/direct_indexing/metadata/test_util.py b/tests/direct_indexing/metadata/test_util.py index e5f567610..b63fa6f54 100644 --- a/tests/direct_indexing/metadata/test_util.py +++ b/tests/direct_indexing/metadata/test_util.py @@ -10,7 +10,7 @@ # consts SETTINGS_FRESH = 'direct_indexing.metadata.util.settings.FRESH' SETTINGS_DATASET_PARENT_PATH = 'direct_indexing.metadata.util.settings.DATASET_PARENT_PATH' -TEST_URL = 'http://test.com' +TEST_URL = 'https://test.com' def test_retrieve(mocker, tmp_path, sample_data, requests_mock): diff --git a/tests/direct_indexing/processing/test_activity_subtypes.py b/tests/direct_indexing/processing/test_activity_subtypes.py index 4750854a4..add89730d 100644 --- a/tests/direct_indexing/processing/test_activity_subtypes.py +++ b/tests/direct_indexing/processing/test_activity_subtypes.py @@ -31,11 +31,11 @@ def test_process_subtype_dict(mocker): expected_res = subtype_dict.copy() key = 'transaction' mock = mocker.MagicMock() - res = process_subtype_dict(subtype_dict, key, mock, None, None, None) + res = process_subtype_dict(subtype_dict, key, mock, mock, None, None) assert res == expected_res # Test if key is in exclude fields we do not include it in the subtype dict - res = process_subtype_dict(subtype_dict, bvu, mock, None, [bvu], None) + res = process_subtype_dict(subtype_dict, bvu, mock, mock, [bvu], None) assert res == expected_res # Test that a specific value which is a dict can be retrieved diff --git a/tests/direct_indexing/processing/test_util.py b/tests/direct_indexing/processing/test_util.py index 9367b2d8c..6ef31da0c 100644 --- a/tests/direct_indexing/processing/test_util.py +++ b/tests/direct_indexing/processing/test_util.py @@ -107,7 +107,7 @@ def fixture_dataset_activity(): "description": None, "metadata_modified": "2023-10-29T05:49:15.354998", "cache_last_updated": None, - "url": "http://iati.fcdo.gov.uk/iati_files/solr/FCDO-set-1.xml", + "url": "https://iati.fcdo.gov.uk/iati_files/solr/FCDO-set-1.xml", "format": "IATI-XML", "state": "active", "created": "2023-10-29T05:49:12.216564", @@ -170,7 +170,7 @@ def fixture_dataset_activity(): "value": "Not Found" } ], - "license_url": "http://reference.data.gov.uk/id/open-government-licence", + "license_url": "https://reference.data.gov.uk/id/open-government-licence", "organization": { "description": "", "title": "UK - Foreign, Commonwealth and Development Office", @@ -178,7 +178,7 @@ def fixture_dataset_activity(): "approval_status": "approved", "is_organization": True, "state": "active", - "image_url": "http://iati.fcdo.gov.uk/iati_files/FCDO_logo.png", + "image_url": "https://iati.fcdo.gov.uk/iati_files/FCDO_logo.png", "type": "organization", "id": "4da32e41-a060-4d75-86c1-4b627eb22647", "name": "fcdo" @@ -213,7 +213,7 @@ def fixture_dataset_organisation(): "description": "", "metadata_modified": "2023-11-03T04:58:51.148529", "cache_last_updated": None, - "url": "http://iati.fcdo.gov.uk/iati_files/organisation.xml", + "url": "https://iati.fcdo.gov.uk/iati_files/organisation.xml", "format": "IATI-XML", "state": "active", "created": "2020-09-01T11:35:05.318259", @@ -273,7 +273,7 @@ def fixture_dataset_organisation(): "value": "Success" } ], - "license_url": "http://reference.data.gov.uk/id/open-government-licence", + "license_url": "https://reference.data.gov.uk/id/open-government-licence", "organization": { "description": "", "title": "UK - Foreign, Commonwealth and Development Office", @@ -281,7 +281,7 @@ def fixture_dataset_organisation(): "approval_status": "approved", "is_organization": True, "state": "active", - "image_url": "http://iati.fcdo.gov.uk/iati_files/FCDO_logo.png", + "image_url": "https://iati.fcdo.gov.uk/iati_files/FCDO_logo.png", "type": "organization", "id": "4da32e41-a060-4d75-86c1-4b627eb22647", "name": "fcdo" diff --git a/tests/direct_indexing/test_util.py b/tests/direct_indexing/test_util.py index bf51c4dc7..7eef34d34 100644 --- a/tests/direct_indexing/test_util.py +++ b/tests/direct_indexing/test_util.py @@ -12,7 +12,7 @@ # Test clear_core function def test_clear_core(mocker): # Define the core URL - core_url = "http://example.com/solr/core" + core_url = "https://example.com/solr/core" # Mock the pysolr.Solr instance mock_solr = mocker.patch('pysolr.Solr') @@ -53,7 +53,7 @@ def test_index_to_core(tmp_path, mocker): with open(json_path, 'w') as file: file.write('{"key": "value"}') - url = "http://example.com/solr/core" + url = "https://example.com/solr/core" OP = "subprocess.check_output" # SUCCESSFUL INDEX: # Mock subprocess.check_output to simulate success From ff2d14c94aef0705850fcac4491557ea16f47cbb Mon Sep 17 00:00:00 2001 From: Sylvan Ridderinkhof Date: Tue, 28 Nov 2023 15:35:29 +0100 Subject: [PATCH 49/49] chore: removed greetings demo workflow --- .github/workflows/greetings.yml | 13 ------------- 1 file changed, 13 deletions(-) delete mode 100644 .github/workflows/greetings.yml diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml deleted file mode 100644 index edcba70b4..000000000 --- a/.github/workflows/greetings.yml +++ /dev/null @@ -1,13 +0,0 @@ -name: Greetings - -on: [pull_request, issues] - -jobs: - greeting: - runs-on: ubuntu-latest - steps: - - uses: actions/first-interaction@v1 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - issue-message: 'Thank you for creating your first issue on our repo! We''ll take a look at it shortly.' - pr-message: 'Thank you for creating your first pull request on our repo! We''ll take a look at it shortly.'