Skip to content

Commit

Permalink
Merge pull request #450 from BalancerMaxis/issue/399
Browse files Browse the repository at this point in the history
chore: make compatible with new bal tools release
  • Loading branch information
gosuto-inzasheru authored Oct 3, 2024
2 parents 98a8df9 + b8a8f48 commit 7323096
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 43 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/generate_permissions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
id: update
run: |
pip3 install -r bal_addresses/requirements.txt
python3 generate_current_permissions.py
python3 gen_current_permissions.py
git add -A
- name: pull-request
Expand Down
File renamed without changes.
60 changes: 18 additions & 42 deletions gen_pools_and_gauges.py
Original file line number Diff line number Diff line change
@@ -1,38 +1,17 @@
import json

import pandas as pd
import requests

from bal_tools import BalPoolsGauges
from bal_tools import Subgraph


def query_swap_enabled_pools(chain, skip=0, step_size=100) -> list:
url = Subgraph(chain).get_subgraph_url("core")
query = f"""{{
pools(
skip: {skip}
first: {step_size}
where: {{swapEnabled: true}}
) {{
address
symbol
}}
}}"""
r = requests.post(url, json={"query": query})
r.raise_for_status()
try:
result = r.json()["data"]["pools"]
except KeyError:
result = []
if len(result) > 0:
# didnt reach end of results yet, collect next page
result += query_swap_enabled_pools(chain, skip + step_size, step_size)
return result


def process_query_swap_enabled_pools(result) -> dict:
df = pd.DataFrame(result)


def process_query_pools(result) -> dict:
flattened_result = []
for pool_data in result:
flattened_result.append(
{"address": pool_data.address, "symbol": pool_data.symbol}
)
df = pd.DataFrame(flattened_result)
if len(df) == 0:
return
# assert no duplicate addresses exist
Expand All @@ -46,25 +25,25 @@ def process_query_swap_enabled_pools(result) -> dict:
print("Found duplicate symbols!")
print(df[df["symbol"].duplicated(keep=False)].sort_values("symbol"))
raise
return df.set_index("symbol")["address"].to_dict()
return df.sort_values("address").set_index("symbol")["address"].to_dict()


def process_query_preferential_gauges(result) -> dict:
def process_query_gauges(result) -> dict:
df = pd.DataFrame(result)
if len(df) == 0:
return
# assert no duplicate addresses exist
assert len(df["id"].unique()) == len(df)
assert len(df["address"].unique()) == len(df)

# solve issue of duplicate gauge symbols
df["symbol"] = df["symbol"] + "-" + df["id"].str[2:6]
df["symbol"] = df["symbol"] + "-" + df["address"].str[2:6]

# confirm no duplicate symbols exist, raise if so
if len(df["symbol"].unique()) != len(df):
print("Found duplicate symbols!")
print(df[df["symbol"].duplicated(keep=False)].sort_values("symbol"))
raise
return df.set_index("symbol")["id"].to_dict()
return df.sort_values("address").set_index("symbol")["address"].to_dict()


def process_query_root_gauges(result, gauges) -> dict:
Expand Down Expand Up @@ -110,21 +89,18 @@ def main():
chains = json.load(f)
for chain in chains["BALANCER_PRODUCTION_CHAINS"]:
print(f"Generating pools and gauges for {chain}...")
gauge_info = BalPoolsGauges(chain)
pool_gauge_info = BalPoolsGauges(chain)
# pools
# TODO: consider moving to query object??
result = process_query_swap_enabled_pools(query_swap_enabled_pools(chain))
result = process_query_pools(pool_gauge_info.query_all_pools())
if result:
pools[chain] = result
# gauges
result = process_query_preferential_gauges(
gauge_info.query_preferential_gauges()
)
result = process_query_gauges(pool_gauge_info.query_all_gauges())
if result:
gauges[chain] = result
# cache mainnet BalPoolsGauges
if chain == "mainnet":
gauge_info_mainnet = gauge_info
gauge_info_mainnet = pool_gauge_info

# root gauges; only on mainnet
result = process_query_root_gauges(gauge_info_mainnet.query_root_gauges(), gauges)
Expand Down

0 comments on commit 7323096

Please sign in to comment.