Skip to content

Commit

Permalink
Merge pull request #145 from Chia-Network/feat/paginate-cadt-requests-2
Browse files Browse the repository at this point in the history
feat: paginate cadt request
  • Loading branch information
MichaelTaylor3D authored Dec 12, 2023
2 parents c5efce9 + c241081 commit 98874ec
Show file tree
Hide file tree
Showing 4 changed files with 64 additions and 28 deletions.
2 changes: 1 addition & 1 deletion app/api/v1/cron.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ async def _scan_token_activity(

# This is causing logging for benign errors, so commenting out for now
# except json.JSONDecodeError as e:
# logger.error(f"Failed to parse JSON for key {key} in organization {org_name}: {str(e)}")
# logger.error(f"Failed to parse JSON for key {key} in organization {org_name}: {str(e)}")
except Exception as e:
logger.error(f"An error occurred for organization {org_name} under key {key}: {str(e)}")

Expand Down
80 changes: 58 additions & 22 deletions app/crud/chia.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,36 +35,72 @@ def _headers(self) -> Dict[str, str]:

return headers

def get_climate_units(self, search: Dict[str, Any]) -> Any:
def _get_paginated_data(self, path: str, search_params: Dict[str, Any]) -> List[Any]:
"""
Generic function to retrieve paginated data from a given path.
Args:
path: API endpoint path.
search_params: A dictionary of search parameters including pagination.
Returns:
A list of all data retrieved from the paginated API.
"""
all_data = []
page = 1
limit = 10

try:
params = urlencode(search)
url = urlparse(self.url + "/v1/units")
while True:
# Update search parameters with current page and limit
params = {**search_params, "page": page, "limit": limit}
encoded_params = urlencode(params)

r = requests.get(url.geturl(), params=params, headers=self._headers())
if r.status_code != requests.codes.ok:
logger.error(f"Request Url: {r.url} Error Message: {r.text}")
raise error_code.internal_server_error(message="Call Climate API Failure")
# Construct the URL
url = urlparse(f"{self.url}{path}?{encoded_params}")

return r.json()
response = requests.get(url.geturl(), headers=self._headers())
if response.status_code != requests.codes.ok:
logger.error(f"Request Url: {response.url} Error Message: {response.text}")
raise error_code.internal_server_error(message="API Call Failure")

except TimeoutError as e:
logger.error("Call Climate API Timeout, ErrorMessage: " + str(e))
raise error_code.internal_server_error("Call Climate API Timeout")
data = response.json()

def get_climate_projects(self) -> Any:
try:
url = urlparse(self.url + "/v1/projects")
all_data.extend(data["data"]) # Add data from the current page

r = requests.get(url.geturl(), headers=self._headers())
if r.status_code != requests.codes.ok:
logger.error(f"Request Url: {r.url} Error Message: {r.text}")
raise error_code.internal_server_error(message="Call Climate API Failure")
if page >= data["pageCount"]:
break # Exit loop if all pages have been processed

return r.json()
page += 1

return all_data

except TimeoutError as e:
logger.error("Call Climate API Timeout, ErrorMessage: " + str(e))
raise error_code.internal_server_error("Call Climate API Timeout")
logger.error("API Call Timeout, ErrorMessage: " + str(e))
raise error_code.internal_server_error("API Call Timeout")

def get_climate_units(self, search: Dict[str, Any]) -> Any:
"""
Retrieves all climate units using pagination and given search parameters.
Args:
search: A dictionary of search parameters.
Returns:
A JSON object containing all the climate units.
"""
search_with_marketplace = {**search, "hasMarketplaceIdentifier": True}
return self._get_paginated_data("/v1/units", search_with_marketplace)

def get_climate_projects(self) -> Any:
"""
Retrieves all climate projects using pagination.
Returns:
A JSON object containing all the climate projects.
"""
search_params = {"onlyMarketplaceProjects": True}
return self._get_paginated_data("/v1/projects", search_params)

def get_climate_organizations(self) -> Any:
try:
Expand Down Expand Up @@ -142,7 +178,7 @@ def combine_climate_units_and_metadata(self, search: Dict[str, Any]) -> List[Dic
try:
warehouse_project_id = unit["issuance"]["warehouseProjectId"]
project = project_by_id[warehouse_project_id]
except KeyError:
except (KeyError, TypeError):
logger.warning(f"Can not get project by warehouse_project_id: {warehouse_project_id}")
continue

Expand Down
9 changes: 4 additions & 5 deletions app/logger.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
from __future__ import annotations

import importlib.metadata
import logging

import uvicorn
import toml

from app.config import settings

# Parse pyproject.toml to get the version
with open('pyproject.toml', 'r') as toml_file:
pyproject = toml.load(toml_file)
version = pyproject['tool']['poetry']['version']
version = importlib.metadata.version("Chia Climate Token Driver")

# Define the log format with version
log_format = f"%(asctime)s,%(msecs)d {version} %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s"
Expand Down
1 change: 1 addition & 0 deletions tests/test_activities_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ def test_activities_with_empty_db_then_success(
fastapi_client.portal = portal # workaround anyio 4.0.0 incompat with TextClient
m.setattr(crud.BlockChainCrud, "get_challenge", mock_get_challenge)
m.setattr(crud.DBCrud, "select_activity_with_pagination", mock_db_data)
m.setattr(crud.ClimateWareHouseCrud, "combine_climate_units_and_metadata", mock.MagicMock(return_value={}))

params = urlencode({})
response = fastapi_client.get("v1/activities/", params=params)
Expand Down

0 comments on commit 98874ec

Please sign in to comment.