diff --git a/.gitignore b/.gitignore
index a7a51e8b67..a4e2d980a3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,3 +37,4 @@ go.work*
control/target/
web/packages/operations/.env.polkadot
web/packages/operations/.env.rococo
+lodestar
diff --git a/.gitmodules b/.gitmodules
index e1c59569a9..b87b18857d 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -13,6 +13,4 @@
[submodule "contracts/lib/prb-math"]
path = contracts/lib/prb-math
url = https://github.com/PaulRBerg/prb-math
-[submodule "lodestar"]
- path = lodestar
- url = https://github.com/Snowfork/lodestar
+
diff --git a/docs/.gitbook/assets/image.png b/docs/.gitbook/assets/image.png
new file mode 100644
index 0000000000..67ef7a1b01
Binary files /dev/null and b/docs/.gitbook/assets/image.png differ
diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md
index 10726c26d3..14cda97389 100644
--- a/docs/SUMMARY.md
+++ b/docs/SUMMARY.md
@@ -38,3 +38,4 @@
* [Contributing to Snowbridge](runbooks/updating-snowbridge-pallets-bridgehub-and-assethub-runtimes.md)
* [Governance Updates](runbooks/governance-updates.md)
+* [Test Runtime Upgrades](runbooks/test-runtime-upgrades.md)
diff --git a/docs/architecture/components.md b/docs/architecture/components.md
index 6c9e39add3..4b7f381efe 100644
--- a/docs/architecture/components.md
+++ b/docs/architecture/components.md
@@ -21,7 +21,7 @@ This [pallet](https://github.com/Snowfork/snowbridge/tree/main/parachain/pallets
3. At the end of every block, a merkle root of all processed messages is generated and inserted into the parachain header as a [digest item](https://github.com/paritytech/substrate/blob/46136f2a18780d71542ae615565703da754b5348/primitives/runtime/src/generic/digest.rs#L100).
4. Processed messages are also temporarily held in storage so that they can be queried by offchain message relayers.
-The merkle root in (3) is the commitment that needs to verified on the Ethereum side.
+The merkle root in (3) is the commitment that needs to be verified on the Ethereum side.
### EthereumBeaconClient
diff --git a/docs/architecture/fees-and-channels.md b/docs/architecture/fees-and-channels.md
index 4ba49a73a3..3f685bac45 100644
--- a/docs/architecture/fees-and-channels.md
+++ b/docs/architecture/fees-and-channels.md
@@ -50,7 +50,7 @@ In both of the scenarios above, there is a common pattern:
Parachain governance therefore has the responsibility to ensure that it has enough funds to cover costs on the destination network.
-This can be done by selling collected fees on the source network for currency of the destination network. This currently a manual process, but should only need to be done a few times a year.
+This can be done by selling collected fees on the source network for currency of the destination network. This is currently a manual process, but should only need to be done a few times a year.
Parachains can use the BridgeHub [transfer\_native\_from\_agent](https://github.com/Snowfork/snowbridge/blob/c2142e41b5a2cbd3749a5fd8f22a95abf2b923d9/parachain/pallets/system/src/lib.rs#L503C10-L503C36) API to transfer funds from their agent to some EOA account.
diff --git a/docs/architecture/governance.md b/docs/architecture/governance.md
index 233ad97541..cfe54d04f1 100644
--- a/docs/architecture/governance.md
+++ b/docs/architecture/governance.md
@@ -10,7 +10,7 @@ This promotes decentralisation in the following ways:
## Cross-chain Governance
-Our bridge has contracts on the Ethereum, and these contracts need to be able to evolve along with the parachain side. Cross-chain governance will control both configuration and code upgrades on the Ethereum side.
+Our bridge has contracts on the Ethereum side, and these contracts need to be able to evolve along with the parachain side. Cross-chain governance will control both configuration and code upgrades on the Ethereum side.
As a prime example, Polkadot and BEEFY consensus algorithms will change, and so we need to make sure the Ethereum side of the bridge remains compatible. Otherwise locked up collateral will not be redeemable.
diff --git a/docs/runbooks/test-runtime-upgrades.md b/docs/runbooks/test-runtime-upgrades.md
new file mode 100644
index 0000000000..7f8b308831
--- /dev/null
+++ b/docs/runbooks/test-runtime-upgrades.md
@@ -0,0 +1,191 @@
+---
+description: How to test upgrades depending on a runtime upgrade not yet executed.
+---
+
+# Test Runtime Upgrades
+
+## Overview
+
+A scenario that frequently occurs is that we need to test a Snowbridge-related runtime upgrade that depends on a system parachain upgrade. Runtime upgrades for system parachains can take up to four weeks to execute. If we wait for the system parachain upgrade to complete first before initiating the Snowbridge upgrades, release cycles could take months.
+
+Therefore, it is useful to be able to test system parachain upgrades that have not yet executed and then apply Snowbridge upgrades to ensure everything works.
+
+## Steps
+
+In the following scenario, we will simulate execution of the 1.2.0 upgrade: https://github.com/polkadot-fellows/runtimes/releases/tag/v1.2.0.
+
+1. Install [opengov-cli](https://github.com/joepetrowski/opengov-cli)
+2. Build the preimage for the upgrade:
+
+
opengov-cli build-upgrade --network polkadot --relay-version 1.2.0 --filename preimage.hex
+
+
+3. Convert the preimage from hex to binary
+
+```sh
+cd upgrade-polkadot-1.2.0
+xxd -r -p preimage.hex > preimage.bin
+```
+
+4. Determine the size of the of preimage, save as `PREIMAGE_SIZE`
+
+On Linux:
+
+```sh
+$ stat -c%s preimage.bin
+1567371
+$ export PREIMAGE_SIZE=1567371
+```
+
+On Mac:
+
+```sh
+$ stat -f%z preimage.bin
+1567371
+$ export PREIMAGE_SIZE=1567371
+```
+
+5. Compute blake2-256 hash of preimage, save as PREIMAGE\_HASH
+
+```sh
+$ b2sum -l 256 preimage.bin | awk '{print "0x"$1}'
+0x15165c85152568b7f523e374ce1a5172f2aa148721d5dae0441f86c201c1a77b4
+$ export PREIMAGE_HASH=0x15165c85152568b7f523e374ce1a5172f2aa148721d5dae0441f86c201c1a77b4
+```
+
+6. Create a chopsticks configuration file for the Polkadot relay chain, substituting the values generated previously:
+
+`polkadot.yml`
+
+```yaml
+endpoint: wss://polkadot-rpc.dwellir.com
+mock-signature-host: true
+block: ${env.POLKADOT_BLOCK_NUMBER}
+db: ./polkadot.sqlite
+
+import-storage:
+ System:
+ Account:
+ - - - 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY
+ - providers: 1
+ data:
+ free: '10000000000000000000'
+ ParasDisputes:
+ $removePrefix: ['disputes'] # those can makes block building super slow
+ Preimage:
+ {
+ PreimageFor:
+ [[[[PREIMAGE_HASH, PREIMAGE_SIZE]], PREIMAGE_WITH_LENGTH_PREFIX]],
+ StatusFor:
+ [[[PREIMAGE_HASH], { Requested: { count: 1, len: PREIMAGE_SIZE } }]],
+ }
+```
+
+7. Use these Chopstics config files for AssetHub and BridgeHub
+
+`polkadot-asset-hub.yml`
+
+```yaml
+endpoint: wss://statemint-rpc.dwellir.com
+mock-signature-host: true
+block: ${env.POLKADOT_ASSET_HUB_BLOCK_NUMBER}
+db: ./assethub.sqlite
+
+import-storage:
+ System:
+ Account:
+ - - - 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY
+ - providers: 1
+ data:
+ free: 1000000000000000
+```
+
+`polkadot-bridge-hub.yml`
+
+```yaml
+endpoint: wss://polkadot-bridge-hub-rpc.dwellir.com
+mock-signature-host: true
+block: ${env.POLKADOT_BRIDGEHUB_BLOCK_NUMBER}
+db: ./bridgehub.sqlite
+
+import-storage:
+ System:
+ Account:
+ - - - 5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY
+ - providers: 1
+ data:
+ free: 1000000000000000
+```
+
+8. Run Chopsticks
+
+```sh
+yarn start xcm -r polkadot.yml -p polkadot-asset-hub.yml -p polkadot-bridge-hub.yml
+```
+
+A verification step that can be performed to see if the preimage has been added successfully is to check the `preimage` storage in the chain state. The authorized preimage should be in the list of added preimages.
+
+9. Execute the upgrade on the relay chain using Polkadot-JS:
+
+```rust
+const number = (await api.rpc.chain.getHeader()).number.toNumber()
+
+await api.rpc('dev_setStorage', {
+ Scheduler: {
+ Agenda: [
+ [
+ [number + 1],
+ [
+ {
+ call: {
+ Lookup: {
+ hash: PREIMAGE_HASH,
+ len: PREIMAGE_SIZE,
+ },
+ },
+ origin: {
+ system: 'Root',
+ },
+ },
+ ],
+ ],
+ ],
+ },
+})
+
+await api.rpc('dev_newBlock', { count: 1 })
+```
+
+10. Advance a few blocks on the relay chain
+
+```rust
+await api.rpc('dev_newBlock', { count: 2 })
+```
+
+11. Advance by one block on bridgehub (not sure if necessary, need to experiment)
+
+```rust
+await api.rpc('dev_newBlock', { count: 1 })
+```
+
+12. Now that the upgrade has been authorized on BridgeHub, we can execute the upgrade by calling parachainSystem.enactAuthorizedUpgrade, passing the parachain WASM blob previously generated by opengov-cli:
+
+
+
+12. Advance a few blocks on both bridgehub AND the relay chain
+
+```rust
+await api.rpc('dev_newBlock', { count: 1 })
+```
+
+14. The parachain should now be upgraded.
+
+## Caveats
+
+Some polkadot API endpoints aggressively timeout connections, causing Chopsticks to die: Comment
+
+```sh
+API-WS: disconnected from wss://polkadot-rpc.dwellir.com: 1006:: Abnormal Closure
+```
+
+The usual remedy is to restart chopsticks and pray the API connections don't die again.
diff --git a/flake.lock b/flake.lock
index 61b5385a79..ab1c96a30a 100644
--- a/flake.lock
+++ b/flake.lock
@@ -1,12 +1,15 @@
{
"nodes": {
"flake-utils": {
+ "inputs": {
+ "systems": "systems"
+ },
"locked": {
- "lastModified": 1676283394,
- "narHash": "sha256-XX2f9c3iySLCw54rJ/CZs+ZK6IQy7GXNY4nSOyu2QG4=",
+ "lastModified": 1710146030,
+ "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
- "rev": "3db36a8b464d0c4532ba1c7dda728f4576d6d073",
+ "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
@@ -85,6 +88,21 @@
"foundry": "foundry",
"nixpkgs": "nixpkgs_2"
}
+ },
+ "systems": {
+ "locked": {
+ "lastModified": 1681028828,
+ "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
+ "owner": "nix-systems",
+ "repo": "default",
+ "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
+ "type": "github"
+ },
+ "original": {
+ "owner": "nix-systems",
+ "repo": "default",
+ "type": "github"
+ }
}
},
"root": "root",
diff --git a/flake.nix b/flake.nix
index 8529150e7b..c44ac3995d 100644
--- a/flake.nix
+++ b/flake.nix
@@ -86,6 +86,7 @@
export RUSTUP_HOME=$PWD/.rustup
export RUST_NIGHTLY_VERSION=nightly-2024-02-08
export PATH=$CARGO_HOME/bin:$PATH
+ export LODESTAR_VERSION=v1.16.0
eval "$(direnv hook bash)"
diff --git a/lodestar b/lodestar
deleted file mode 160000
index 5d93a629c0..0000000000
--- a/lodestar
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 5d93a629c0fd1fdd32160cbf7717e7a6b22f7f2d
diff --git a/relayer/cmd/generate_beacon_data.go b/relayer/cmd/generate_beacon_data.go
index 1635303adb..dfa13e860a 100644
--- a/relayer/cmd/generate_beacon_data.go
+++ b/relayer/cmd/generate_beacon_data.go
@@ -55,6 +55,7 @@ func generateBeaconCheckpointCmd() *cobra.Command {
}
cmd.Flags().String("config", "/tmp/snowbridge/beacon-relay.json", "Path to the beacon relay config")
+ cmd.Flags().Uint64("finalized-slot", 0, "Optional finalized slot to create checkpoint at")
cmd.Flags().Bool("export-json", false, "Export Json")
return cmd
@@ -117,6 +118,7 @@ func generateBeaconCheckpoint(cmd *cobra.Command, _ []string) error {
if err != nil {
return err
}
+ finalizedSlot, _ := cmd.Flags().GetUint64("finalized-slot")
viper.SetConfigFile(config)
@@ -138,7 +140,13 @@ func generateBeaconCheckpoint(cmd *cobra.Command, _ []string) error {
client := api.NewBeaconClient(conf.Source.Beacon.Endpoint, conf.Source.Beacon.StateEndpoint)
s := syncer.New(client, &store, p)
- checkPointScale, err := s.GetCheckpoint()
+ var checkPointScale scale.BeaconCheckpoint
+ if finalizedSlot == 0 {
+ checkPointScale, err = s.GetCheckpoint()
+ } else {
+ checkPointScale, err = s.GetCheckpointAtSlot(finalizedSlot)
+ }
+
if err != nil {
return fmt.Errorf("get initial sync: %w", err)
}
diff --git a/relayer/cmd/import_beacon_state.go b/relayer/cmd/import_beacon_state.go
index 8884b72f3e..f683e48db0 100644
--- a/relayer/cmd/import_beacon_state.go
+++ b/relayer/cmd/import_beacon_state.go
@@ -106,6 +106,11 @@ func importBeaconState(cmd *cobra.Command, _ []string) error {
attestedSlot := attestedState.GetSlot()
finalizedSlot := finalizedState.GetSlot()
+ err = syncer.ValidatePair(finalizedSlot, attestedSlot, attestedState)
+ if err != nil {
+ return fmt.Errorf("state pair validation failed: %w", err)
+ }
+
err = store.WriteEntry(attestedSlot, finalizedSlot, attestedData, finalizedData)
if err != nil {
return fmt.Errorf("write beacon store entry: %w", err)
diff --git a/relayer/relays/beacon/header/header.go b/relayer/relays/beacon/header/header.go
index 950d2cdcbc..167836bae7 100644
--- a/relayer/relays/beacon/header/header.go
+++ b/relayer/relays/beacon/header/header.go
@@ -23,6 +23,7 @@ import (
var ErrFinalizedHeaderUnchanged = errors.New("finalized header unchanged")
var ErrFinalizedHeaderNotImported = errors.New("finalized header not imported")
+var ErrInterimHeaderNotImported = errors.New("interim finalized header not imported")
var ErrSyncCommitteeNotImported = errors.New("sync committee not imported")
var ErrSyncCommitteeLatency = errors.New("sync committee latency found")
var ErrExecutionHeaderNotImported = errors.New("execution header not imported")
@@ -63,6 +64,7 @@ func (h *Header) Sync(ctx context.Context, eg *errgroup.Group) error {
// Special handling here for the initial checkpoint to sync the next sync committee which is not included in initial
// checkpoint.
if h.isInitialSyncPeriod() {
+ log.Info("syncing next sync committee for initial checkpoint")
err = h.SyncCommitteePeriodUpdate(ctx, latestSyncedPeriod)
if err != nil {
return fmt.Errorf("sync next committee for initial sync period: %w", err)
@@ -128,16 +130,27 @@ func (h *Header) SyncCommitteePeriodUpdate(ctx context.Context, period uint64) e
// finalized header
if uint64(update.Payload.FinalizedHeader.Slot) > h.cache.Finalized.LastSyncedSlot {
diff := uint64(update.Payload.FinalizedHeader.Slot) - h.cache.Finalized.LastSyncedSlot
- log.WithFields(log.Fields{"diff": diff, "last_finalized_slot": h.cache.Finalized.LastSyncedSlot, "new_finalized_slot": uint64(update.Payload.FinalizedHeader.Slot)}).Info("checking max latency")
- if diff > h.protocol.Settings.SlotsInEpoch*h.protocol.Settings.EpochsPerSyncCommitteePeriod {
- log.Info("syncing an interim update")
- err = h.syncInterimFinalizedUpdate(ctx, h.cache.Finalized.LastSyncedSlot, uint64(update.Payload.FinalizedHeader.Slot))
+ minSlot := h.cache.Finalized.LastSyncedSlot
+ for diff > h.protocol.Settings.SlotsInEpoch*h.protocol.Settings.EpochsPerSyncCommitteePeriod {
+ log.WithFields(log.Fields{
+ "diff": diff,
+ "last_finalized_slot": h.cache.Finalized.LastSyncedSlot,
+ "new_finalized_slot": uint64(update.Payload.FinalizedHeader.Slot),
+ }).Info("interim update required")
+
+ interimUpdate, err := h.syncInterimFinalizedUpdate(ctx, minSlot, uint64(update.Payload.FinalizedHeader.Slot))
if err != nil {
return fmt.Errorf("sync interim finalized header update: %w", err)
}
+
+ diff = uint64(update.Payload.FinalizedHeader.Slot) - uint64(interimUpdate.Payload.FinalizedHeader.Slot)
+ minSlot = uint64(update.Payload.FinalizedHeader.Slot) + h.protocol.Settings.SlotsInEpoch
+ log.WithFields(log.Fields{
+ "new_diff": diff,
+ "interim_finalized_slot": uint64(interimUpdate.Payload.FinalizedHeader.Slot),
+ "new_finalized_slot": uint64(update.Payload.FinalizedHeader.Slot),
+ }).Info("interim update synced successfully")
}
- } else {
- log.Info("interim update not required")
}
log.WithFields(log.Fields{
@@ -243,24 +256,29 @@ func (h *Header) SyncHeaders(ctx context.Context) error {
return nil
}
-func (h *Header) syncInterimFinalizedUpdate(ctx context.Context, lastSyncedSlot, newCheckpointSlot uint64) error {
+func (h *Header) syncInterimFinalizedUpdate(ctx context.Context, lastSyncedSlot, newCheckpointSlot uint64) (scale.Update, error) {
+ currentPeriod := h.protocol.ComputeSyncPeriodAtSlot(lastSyncedSlot)
+
// Calculate the range that the interim finalized header update may be in
minSlot := newCheckpointSlot - h.protocol.SlotsPerHistoricalRoot
- maxSlot := lastSyncedSlot + h.protocol.SlotsPerHistoricalRoot
+ maxSlot := ((currentPeriod + 1) * h.protocol.SlotsPerHistoricalRoot) - h.protocol.Settings.SlotsInEpoch // just before the new sync committee boundary
finalizedUpdate, err := h.syncer.GetFinalizedUpdateAtAttestedSlot(minSlot, maxSlot, false)
if err != nil {
- return fmt.Errorf("get interim checkpoint to update chain (last synced slot %d, new slot: %d): %w", lastSyncedSlot, newCheckpointSlot, err)
+ return scale.Update{}, fmt.Errorf("get interim checkpoint to update chain (last synced slot %d, new slot: %d): %w", lastSyncedSlot, newCheckpointSlot, err)
}
log.WithField("slot", finalizedUpdate.Payload.FinalizedHeader.Slot).Info("syncing an interim update to on-chain")
err = h.updateFinalizedHeaderOnchain(ctx, finalizedUpdate)
- if err != nil {
- return fmt.Errorf("update interim finalized header on-chain: %w", err)
+ switch {
+ case errors.Is(err, ErrFinalizedHeaderNotImported):
+ return scale.Update{}, ErrInterimHeaderNotImported
+ case err != nil:
+ return scale.Update{}, fmt.Errorf("update interim finalized header on-chain: %w", err)
}
- return nil
+ return finalizedUpdate, nil
}
func (h *Header) syncLaggingSyncCommitteePeriods(ctx context.Context, latestSyncedPeriod, currentSyncPeriod uint64) error {
diff --git a/relayer/relays/beacon/header/header_test.go b/relayer/relays/beacon/header/header_test.go
index 03b20db334..210fa16f69 100644
--- a/relayer/relays/beacon/header/header_test.go
+++ b/relayer/relays/beacon/header/header_test.go
@@ -68,7 +68,7 @@ func TestSyncInterimFinalizedUpdate_WithDataFromAPI(t *testing.T) {
)
// Find a checkpoint for a slot that is just out of the on-chain synced finalized header block roots range
- err = h.syncInterimFinalizedUpdate(context.Background(), 4563072, 4571360)
+ _, err = h.syncInterimFinalizedUpdate(context.Background(), 4563072, 4571360)
require.NoError(t, err)
}
@@ -131,7 +131,7 @@ func TestSyncInterimFinalizedUpdate_WithDataFromStore(t *testing.T) {
)
// Find a checkpoint for a slot that is just out of the on-chain synced finalized header block roots range
- err = h.syncInterimFinalizedUpdate(context.Background(), 4563072, 4571360)
+ _, err = h.syncInterimFinalizedUpdate(context.Background(), 4563072, 4571360)
require.NoError(t, err)
}
@@ -196,7 +196,7 @@ func TestSyncInterimFinalizedUpdate_WithDataFromStoreWithDifferentBlocks(t *test
)
// Find a checkpoint for a slot that is just out of the on-chain synced finalized header block roots range
- err = h.syncInterimFinalizedUpdate(context.Background(), 4563072, 4571360)
+ _, err = h.syncInterimFinalizedUpdate(context.Background(), 4563072, 4571360)
require.NoError(t, err)
}
@@ -241,7 +241,7 @@ func TestSyncInterimFinalizedUpdate_BeaconStateNotAvailableInAPIAndStore(t *test
)
// Find a checkpoint for a slot that is just out of the on-chain synced finalized header block roots range
- err = h.syncInterimFinalizedUpdate(context.Background(), 4570722, 4578922)
+ _, err = h.syncInterimFinalizedUpdate(context.Background(), 4570722, 4578922)
require.Error(t, err)
}
@@ -279,6 +279,6 @@ func TestSyncInterimFinalizedUpdate_NoValidBlocksFound(t *testing.T) {
)
// Find a checkpoint for a slot that is just out of the on-chain synced finalized header block roots range
- err = h.syncInterimFinalizedUpdate(context.Background(), 4570722, 4578922)
+ _, err = h.syncInterimFinalizedUpdate(context.Background(), 4570722, 4578922)
require.Errorf(t, err, "cannot find blocks at boundaries")
}
diff --git a/relayer/relays/beacon/header/syncer/syncer.go b/relayer/relays/beacon/header/syncer/syncer.go
index 3cc25c40a0..2254b0ec46 100644
--- a/relayer/relays/beacon/header/syncer/syncer.go
+++ b/relayer/relays/beacon/header/syncer/syncer.go
@@ -1,8 +1,10 @@
package syncer
import (
+ "encoding/json"
"errors"
"fmt"
+ "os"
"strconv"
"github.com/snowfork/go-substrate-rpc-client/v4/types"
@@ -21,10 +23,11 @@ import (
)
const (
- BlockRootGeneralizedIndex = 37
- FinalizedCheckpointGeneralizedIndex = 105
- NextSyncCommitteeGeneralizedIndex = 55
- ExecutionPayloadGeneralizedIndex = 25
+ BlockRootGeneralizedIndex = 37
+ FinalizedCheckpointGeneralizedIndex = 105
+ CurrentSyncCommitteeGeneralizedIndex = 54
+ NextSyncCommitteeGeneralizedIndex = 55
+ ExecutionPayloadGeneralizedIndex = 25
)
var (
@@ -95,6 +98,100 @@ func (s *Syncer) GetCheckpoint() (scale.BeaconCheckpoint, error) {
}, nil
}
+func (s *Syncer) GetCheckpointFromFile(file string) (scale.BeaconCheckpoint, error) {
+ type CheckPointResponse struct {
+ Header api.BeaconHeader `json:"header"`
+ CurrentSyncCommittee api.SyncCommitteeResponse `json:"current_sync_committee"`
+ CurrentSyncCommitteeBranch []string `json:"current_sync_committee_branch"`
+ ValidatorsRoot string `json:"validators_root"`
+ BlockRootsRoot string `json:"block_roots_root"`
+ BlockRootsRootBranch []string `json:"block_roots_branch"`
+ }
+ var response CheckPointResponse
+
+ byteValue, err := os.ReadFile(file)
+ if err != nil {
+ return scale.BeaconCheckpoint{}, err
+ }
+
+ err = json.Unmarshal(byteValue, &response)
+ if err != nil {
+ return scale.BeaconCheckpoint{}, err
+ }
+
+ header, err := response.Header.ToScale()
+ if err != nil {
+ return scale.BeaconCheckpoint{}, err
+ }
+
+ currentSyncCommittee, err := response.CurrentSyncCommittee.ToScale()
+ if err != nil {
+ return scale.BeaconCheckpoint{}, err
+ }
+
+ return scale.BeaconCheckpoint{
+ Header: header,
+ CurrentSyncCommittee: currentSyncCommittee,
+ CurrentSyncCommitteeBranch: util.ProofBranchToScale(response.CurrentSyncCommitteeBranch),
+ ValidatorsRoot: types.H256(common.HexToHash(response.ValidatorsRoot)),
+ BlockRootsRoot: types.H256(common.HexToHash(response.BlockRootsRoot)),
+ BlockRootsBranch: util.ProofBranchToScale(response.BlockRootsRootBranch),
+ }, nil
+}
+
+func (s *Syncer) GetCheckpointAtSlot(slot uint64) (scale.BeaconCheckpoint, error) {
+ checkpoint, err := s.GetFinalizedUpdateAtAttestedSlot(slot, slot, false)
+ if err != nil {
+ return scale.BeaconCheckpoint{}, fmt.Errorf("get finalized update at slot: %w", err)
+ }
+
+ genesis, err := s.Client.GetGenesis()
+ if err != nil {
+ return scale.BeaconCheckpoint{}, fmt.Errorf("get genesis: %w", err)
+ }
+
+ finalizedState, err := s.getBeaconStateAtSlot(slot)
+
+ blockRootsProof, err := s.GetBlockRootsFromState(finalizedState)
+ if err != nil {
+ return scale.BeaconCheckpoint{}, fmt.Errorf("fetch block roots: %w", err)
+ }
+
+ syncCommittee := finalizedState.GetCurrentSyncCommittee()
+ if err != nil {
+ return scale.BeaconCheckpoint{}, fmt.Errorf("convert sync committee to scale: %w", err)
+ }
+
+ stateTree, err := finalizedState.GetTree()
+ if err != nil {
+ return scale.BeaconCheckpoint{}, fmt.Errorf("get state tree: %w", err)
+ }
+
+ _ = stateTree.Hash() // necessary to populate the proof tree values
+
+ proof, err := stateTree.Prove(CurrentSyncCommitteeGeneralizedIndex)
+ if err != nil {
+ return scale.BeaconCheckpoint{}, fmt.Errorf("get block roof proof: %w", err)
+ }
+
+ pubkeys, err := util.ByteArrayToPublicKeyArray(syncCommittee.PubKeys)
+ if err != nil {
+ return scale.BeaconCheckpoint{}, fmt.Errorf("bytes to pubkey array: %w", err)
+ }
+
+ return scale.BeaconCheckpoint{
+ Header: checkpoint.Payload.FinalizedHeader,
+ CurrentSyncCommittee: scale.SyncCommittee{
+ Pubkeys: pubkeys,
+ AggregatePubkey: syncCommittee.AggregatePubKey,
+ },
+ CurrentSyncCommitteeBranch: util.BytesBranchToScale(proof.Hashes),
+ ValidatorsRoot: types.H256(genesis.ValidatorsRoot),
+ BlockRootsRoot: blockRootsProof.Leaf,
+ BlockRootsBranch: blockRootsProof.Proof,
+ }, nil
+}
+
// GetSyncCommitteePeriodUpdate fetches a sync committee update from the light client API endpoint. If it fails
// (typically because it cannot download the finalized header beacon state because the slot does not fall on a 32
// slot interval, due to a missed block), it will construct an update manually from data download from the beacon
@@ -568,6 +665,37 @@ func (s *Syncer) findValidUpdatePair(slot uint64) (uint64, uint64, error) {
return finalizedHeader.Slot, attestedHeader.Slot, nil
}
+func (s *Syncer) ValidatePair(finalizedSlot, attestedSlot uint64, attestedState state.BeaconState) error {
+ finalizedCheckpoint := attestedState.GetFinalizedCheckpoint()
+ finalizedHeader, err := s.Client.GetHeaderByBlockRoot(common.BytesToHash(finalizedCheckpoint.Root))
+ if err != nil {
+ return fmt.Errorf("unable to download finalized header from attested state")
+ }
+
+ if finalizedHeader.Slot != finalizedSlot {
+ return fmt.Errorf("finalized state in attested state does not match provided finalized state, attested state finalized slot: %d, finalized slot provided: %d", finalizedHeader.Slot, finalizedSlot)
+ }
+
+ nextHeader, err := s.FindBeaconHeaderWithBlockIncluded(attestedSlot + 1)
+ if err != nil {
+ return fmt.Errorf("get sync aggregate header: %d err: %w", attestedSlot+1, err)
+ }
+ nextBlock, err := s.Client.GetBeaconBlockBySlot(nextHeader.Slot)
+ if err != nil {
+ return fmt.Errorf("get sync aggregate block: %d err: %w", nextHeader.Slot, err)
+ }
+
+ superMajority, err := s.protocol.SyncCommitteeSuperMajority(nextBlock.Data.Message.Body.SyncAggregate.SyncCommitteeBits)
+ if err != nil {
+ return fmt.Errorf("compute sync committee supermajority: %d err: %w", nextHeader.Slot, err)
+ }
+ if !superMajority {
+ return fmt.Errorf("sync committee at slot not supermajority: %d", nextHeader.Slot)
+ }
+
+ return nil
+}
+
func (s *Syncer) GetFinalizedUpdateWithSyncCommittee(syncCommitteePeriod uint64) (scale.Update, error) {
minSlot := syncCommitteePeriod * s.protocol.SlotsPerHistoricalRoot
maxSlot := ((syncCommitteePeriod + 1) * s.protocol.SlotsPerHistoricalRoot) - s.protocol.Settings.SlotsInEpoch // just before the new sync committee boundary
@@ -591,17 +719,31 @@ func (s *Syncer) GetFinalizedUpdateAtAttestedSlot(minSlot, maxSlot uint64, fetch
// Try getting beacon data from the API first
data, err := s.getBeaconDataFromClient(attestedSlot)
if err != nil {
- log.WithFields(log.Fields{"minSlot": minSlot, "maxSlot": maxSlot}).Info("attempting to find in beacon store")
// If it fails, using the beacon store and look for a relevant finalized update
- data, err = s.getBestMatchBeaconDataFromStore(minSlot, maxSlot)
- if err != nil {
- return update, fmt.Errorf("fetch beacon data from api and data store failure: %w", err)
- }
+ for {
+ if minSlot > maxSlot {
+ return update, fmt.Errorf("find beacon state store options exhausted: %w", err)
+ }
+
+ data, err = s.getBestMatchBeaconDataFromStore(minSlot, maxSlot)
+ if err != nil {
+ return update, fmt.Errorf("fetch beacon data from api and data store failure: %w", err)
+ }
- // The datastore may not have found the attested slot we wanted, but provided another valid one
- attestedSlot = data.AttestedSlot
+ err = s.ValidatePair(data.FinalizedHeader.Slot, data.AttestedSlot, data.AttestedState)
+ if err != nil {
+ minSlot = data.FinalizedHeader.Slot + 1
+ log.WithError(err).WithField("minSlot", minSlot).Warn("pair retrieved from database invalid")
+ continue
+ }
+
+ // The datastore may not have found the attested slot we wanted, but provided another valid one
+ attestedSlot = data.AttestedSlot
+ break
+ }
}
+ log.WithFields(log.Fields{"finalizedSlot": data.FinalizedHeader.Slot, "attestedSlot": data.AttestedSlot}).Info("found slot pair for finalized update")
// Finalized header proof
stateTree, err := data.AttestedState.GetTree()
if err != nil {
@@ -620,7 +762,7 @@ func (s *Syncer) GetFinalizedUpdateAtAttestedSlot(minSlot, maxSlot uint64, fetch
return update, fmt.Errorf("get finalized header proof: %w", err)
}
- nextSyncCommittee := data.AttestedState.GetSyncSyncCommittee()
+ nextSyncCommittee := data.AttestedState.GetNextSyncCommittee()
syncCommitteePubKeys, err := util.ByteArrayToPublicKeyArray(nextSyncCommittee.PubKeys)
nextSyncCommitteeScale = scale.OptionNextSyncCommitteeUpdatePayload{
@@ -789,18 +931,20 @@ func (s *Syncer) getBestMatchBeaconDataFromStore(minSlot, maxSlot uint64) (final
return response, fmt.Errorf("fetch header: %w", err)
}
+ if response.FinalizedHeader.Slot != response.FinalizedState.GetSlot() {
+ return response, fmt.Errorf("finalized slot in state does not match attested finalized state: %w", err)
+ }
+
return response, nil
}
func (s *Syncer) getBeaconState(slot uint64) ([]byte, error) {
data, err := s.Client.GetBeaconState(strconv.FormatUint(slot, 10))
if err != nil {
- log.WithFields(log.Fields{"slot": slot, "err": err}).Warn("unable to download ssz state from api, trying store")
data, err = s.store.GetBeaconStateData(slot)
if err != nil {
return nil, fmt.Errorf("fetch beacon state from store: %w", err)
}
- log.WithField("slot", slot).Info("found state in store")
}
return data, nil
}
diff --git a/relayer/relays/beacon/state/beacon.go b/relayer/relays/beacon/state/beacon.go
index ef0ff75393..bd0f5e8907 100644
--- a/relayer/relays/beacon/state/beacon.go
+++ b/relayer/relays/beacon/state/beacon.go
@@ -164,7 +164,8 @@ type BeaconState interface {
GetBlockRoots() [][]byte
GetTree() (*ssz.Node, error)
GetFinalizedCheckpoint() *Checkpoint
- GetSyncSyncCommittee() *SyncCommittee
+ GetCurrentSyncCommittee() *SyncCommittee
+ GetNextSyncCommittee() *SyncCommittee
}
type SyncAggregate interface {
@@ -318,6 +319,9 @@ func (b *BeaconStateCapellaMainnet) GetFinalizedCheckpoint() *Checkpoint {
return b.FinalizedCheckpoint
}
-func (b *BeaconStateCapellaMainnet) GetSyncSyncCommittee() *SyncCommittee {
+func (b *BeaconStateCapellaMainnet) GetNextSyncCommittee() *SyncCommittee {
return b.NextSyncCommittee
}
+func (b *BeaconStateCapellaMainnet) GetCurrentSyncCommittee() *SyncCommittee {
+ return b.CurrentSyncCommittee
+}
diff --git a/relayer/relays/beacon/state/beacon_deneb.go b/relayer/relays/beacon/state/beacon_deneb.go
index bd889c8b18..f68f43ccfe 100644
--- a/relayer/relays/beacon/state/beacon_deneb.go
+++ b/relayer/relays/beacon/state/beacon_deneb.go
@@ -134,6 +134,9 @@ func (b *BeaconStateDenebMainnet) GetFinalizedCheckpoint() *Checkpoint {
return b.FinalizedCheckpoint
}
-func (b *BeaconStateDenebMainnet) GetSyncSyncCommittee() *SyncCommittee {
+func (b *BeaconStateDenebMainnet) GetNextSyncCommittee() *SyncCommittee {
return b.NextSyncCommittee
}
+func (b *BeaconStateDenebMainnet) GetCurrentSyncCommittee() *SyncCommittee {
+ return b.CurrentSyncCommittee
+}
diff --git a/relayer/relays/beacon/state/beacon_deneb_encoding.go b/relayer/relays/beacon/state/beacon_deneb_encoding.go
index 2ad6ca4f25..f041c55b0f 100644
--- a/relayer/relays/beacon/state/beacon_deneb_encoding.go
+++ b/relayer/relays/beacon/state/beacon_deneb_encoding.go
@@ -1,5 +1,5 @@
// Code generated by fastssz. DO NOT EDIT.
-// Hash: 2d1815cffaa3bda65721acc72bdfc0e47fdeb4193ba7500d237e58f2369c3628
+// Hash: 03b5096ab94e41e2c740924a4ae7ea8fdd515fe3dd4861032a569e28bcba8bb4
// Version: 0.1.3
package state
diff --git a/relayer/relays/beacon/state/beacon_encoding.go b/relayer/relays/beacon/state/beacon_encoding.go
index ce50a6e58e..ea9e3b1fb0 100644
--- a/relayer/relays/beacon/state/beacon_encoding.go
+++ b/relayer/relays/beacon/state/beacon_encoding.go
@@ -1,5 +1,5 @@
// Code generated by fastssz. DO NOT EDIT.
-// Hash: 2d1815cffaa3bda65721acc72bdfc0e47fdeb4193ba7500d237e58f2369c3628
+// Hash: 03b5096ab94e41e2c740924a4ae7ea8fdd515fe3dd4861032a569e28bcba8bb4
// Version: 0.1.3
package state
diff --git a/relayer/relays/beefy/main.go b/relayer/relays/beefy/main.go
index 1605a16ff8..553ac0f0e0 100644
--- a/relayer/relays/beefy/main.go
+++ b/relayer/relays/beefy/main.go
@@ -112,7 +112,7 @@ func (relay *Relay) OneShotSync(ctx context.Context, blockNumber uint64) error {
}
// generate beefy update for that specific relay block
- task, err := relay.polkadotListener.generateBeefyUpdate(ctx, blockNumber)
+ task, err := relay.polkadotListener.generateBeefyUpdate(blockNumber)
if err != nil {
return fmt.Errorf("fail to generate next beefy request: %w", err)
}
diff --git a/relayer/relays/beefy/polkadot-listener.go b/relayer/relays/beefy/polkadot-listener.go
index 016f8e1c31..e2ecc439ea 100644
--- a/relayer/relays/beefy/polkadot-listener.go
+++ b/relayer/relays/beefy/polkadot-listener.go
@@ -136,7 +136,7 @@ func (li *PolkadotListener) queryBeefyNextAuthoritySet(blockHash types.Hash) (ty
return nextAuthoritySet, nil
}
-func (li *PolkadotListener) generateBeefyUpdate(ctx context.Context, relayBlockNumber uint64) (Request, error) {
+func (li *PolkadotListener) generateBeefyUpdate(relayBlockNumber uint64) (Request, error) {
api := li.conn.API()
meta := li.conn.Metadata()
var request Request
@@ -145,7 +145,7 @@ func (li *PolkadotListener) generateBeefyUpdate(ctx context.Context, relayBlockN
return request, fmt.Errorf("find match beefy block: %w", err)
}
- commitment, proof, err := fetchCommitmentAndProof(ctx, meta, api, beefyBlockHash)
+ commitment, proof, err := fetchCommitmentAndProof(meta, api, beefyBlockHash)
if err != nil {
return request, fmt.Errorf("fetch commitment and proof: %w", err)
}
@@ -185,8 +185,8 @@ func (li *PolkadotListener) findNextBeefyBlock(blockNumber uint64) (types.Hash,
// The relay block not finalized yet, just wait and retry
time.Sleep(6 * time.Second)
continue
- } else if latestBeefyBlockNumber <= nextBeefyBlockNumber+600 {
- // The relay block has been finalized not long ago(1 hour), just return the finalized block
+ } else if latestBeefyBlockNumber <= nextBeefyBlockNumber+60 {
+ // The relay block has been finalized not long ago, just return the finalized block
nextBeefyBlockHash = finalizedBeefyBlockHash
break
} else {
diff --git a/relayer/relays/beefy/scanner.go b/relayer/relays/beefy/scanner.go
index 6c84cc06bb..d6f08352f5 100644
--- a/relayer/relays/beefy/scanner.go
+++ b/relayer/relays/beefy/scanner.go
@@ -169,7 +169,7 @@ func scanCommitments(ctx context.Context, meta *types.Metadata, api *gsrpc.Subst
return
}
- commitment, proof, err := fetchCommitmentAndProof(ctx, meta, api, result.BlockHash)
+ commitment, proof, err := fetchCommitmentAndProof(meta, api, result.BlockHash)
if err != nil {
emitError(fmt.Errorf("fetch commitment and proof: %w", err))
return
@@ -240,7 +240,7 @@ func verifyProof(meta *types.Metadata, api *gsrpc.SubstrateAPI, proof merkle.Sim
return actualRoot == expectedRoot, nil
}
-func fetchCommitmentAndProof(ctx context.Context, meta *types.Metadata, api *gsrpc.SubstrateAPI, beefyBlockHash types.Hash) (*types.SignedCommitment, *merkle.SimplifiedMMRProof, error) {
+func fetchCommitmentAndProof(meta *types.Metadata, api *gsrpc.SubstrateAPI, beefyBlockHash types.Hash) (*types.SignedCommitment, *merkle.SimplifiedMMRProof, error) {
beefyHeader, err := api.RPC.Chain.GetHeader(beefyBlockHash)
if err != nil {
return nil, nil, fmt.Errorf("fetch header: %w", err)
diff --git a/scripts/init.sh b/scripts/init.sh
index ceff160a9a..71d5bcf198 100755
--- a/scripts/init.sh
+++ b/scripts/init.sh
@@ -4,18 +4,25 @@ set -eux
echo "Checkout polkadot-sdk Snowfork fork"
pushd ..
-if [ ! -d "polkadot-sdk" ]; then
- git clone https://github.com/Snowfork/polkadot-sdk.git
-fi
-pushd polkadot-sdk
-git checkout snowbridge
+ if [ ! -d "polkadot-sdk" ]; then
+ git clone https://github.com/Snowfork/polkadot-sdk.git
+ cd snowbridge && ln -sf ../polkadot-sdk polkadot-sdk
+ fi
+ pushd polkadot-sdk
+ git fetch && git checkout snowbridge
+ popd
popd
-popd
-
-ln -sf ../polkadot-sdk polkadot-sdk
-echo "Setting up submodules"
-git submodule update --init --recursive || true
+echo "Checkout lodestar Snowfork fork"
+pushd ..
+ if [ ! -d "lodestar" ]; then
+ git clone https://github.com/ChainSafe/lodestar
+ cd snowbridge && ln -sf ../lodestar lodestar
+ fi
+ pushd lodestar
+ git fetch && git checkout $LODESTAR_VERSION
+ popd
+popd
echo "Setting up git hooks"
git config --local core.hooksPath hooks/
@@ -36,13 +43,3 @@ cargo install cargo-fuzz
echo "Installing web packages"
(cd web && pnpm install)
-echo "Download geth to replace the nix version"
-OS=$(uname -s | tr A-Z a-z)
-MACHINE_TYPE=$(uname -m | tr A-Z a-z | sed 's/x86_64/amd64/')
-
-geth_package=geth-$OS-$MACHINE_TYPE-1.13.11-8f7eb9cc
-curl https://gethstore.blob.core.windows.net/builds/$geth_package.tar.gz -o /tmp/geth.tar.gz || { echo 'Download failed'; exit 1; }
-mkdir -p $GOPATH/bin
-tar -xvf /tmp/geth.tar.gz -C $GOPATH
-cp $GOPATH/$geth_package/geth $GOPATH/bin
-geth version
diff --git a/web/packages/api/package.json b/web/packages/api/package.json
index 82e8ac1d70..9b83bf1fd2 100644
--- a/web/packages/api/package.json
+++ b/web/packages/api/package.json
@@ -1,6 +1,6 @@
{
"name": "@snowbridge/api",
- "version": "0.1.5",
+ "version": "0.1.6",
"description": "Snowbridge API client",
"license": "Apache-2.0",
"repository": {
diff --git a/web/packages/api/src/assets.ts b/web/packages/api/src/assets.ts
index b05d175820..0ba35f71af 100644
--- a/web/packages/api/src/assets.ts
+++ b/web/packages/api/src/assets.ts
@@ -106,7 +106,16 @@ export const assetErc20Balance = async (
}
}
-export const assetErc20Metadata = async (context: Context, tokenAddress: string) => {
+export type ERC20Metadata = {
+ name: string
+ symbol: string
+ decimals: bigint
+}
+
+export const assetErc20Metadata = async (
+ context: Context,
+ tokenAddress: string
+): Promise => {
const tokenMetadata = IERC20Metadata__factory.connect(tokenAddress, context.ethereum.api)
const [name, symbol, decimals] = await Promise.all([
tokenMetadata.name(),
diff --git a/web/packages/api/src/environment.ts b/web/packages/api/src/environment.ts
index 8a1980bd4e..f9686035fc 100644
--- a/web/packages/api/src/environment.ts
+++ b/web/packages/api/src/environment.ts
@@ -12,6 +12,11 @@ export type Config = {
SECONDARY_GOVERNANCE_CHANNEL_ID: string
RELAYERS: Relayer[]
PARACHAINS: string[]
+ SUBSCAN_API?: {
+ RELAY_CHAIN_URL: string
+ ASSET_HUB_URL: string
+ BRIDGE_HUB_URL: string
+ }
}
export type SourceType = "substrate" | "ethereum"
@@ -21,6 +26,7 @@ export type ParachainInfo = {
destinationFeeDOT: bigint
has20ByteAccounts: boolean
decimals: number
+ maxConsumers: number
ss58Format?: number
}
export type TransferLocation = {
@@ -61,6 +67,7 @@ export const SNOWBRIDGE_ENV: { [id: string]: SnowbridgeEnvironment } = {
destinationFeeDOT: 0n,
has20ByteAccounts: false,
decimals: 12,
+ maxConsumers: 16,
},
erc20tokensReceivable: {
WETH: "0x87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d",
@@ -76,6 +83,7 @@ export const SNOWBRIDGE_ENV: { [id: string]: SnowbridgeEnvironment } = {
destinationFeeDOT: 4_000_000_000n,
has20ByteAccounts: false,
decimals: 12,
+ maxConsumers: 16,
},
erc20tokensReceivable: {
WETH: "0x87d1f7fdfEe7f651FaBc8bFCB6E086C278b77A7d",
@@ -165,6 +173,7 @@ export const SNOWBRIDGE_ENV: { [id: string]: SnowbridgeEnvironment } = {
destinationFeeDOT: 0n,
has20ByteAccounts: false,
decimals: 12,
+ maxConsumers: 16,
},
erc20tokensReceivable: {
WETH: "0xfff9976782d46cc05630d1f6ebab18b2324d6b14",
@@ -182,6 +191,7 @@ export const SNOWBRIDGE_ENV: { [id: string]: SnowbridgeEnvironment } = {
destinationFeeDOT: 200_000_000_000n,
has20ByteAccounts: true,
decimals: 12,
+ maxConsumers: 16,
},
erc20tokensReceivable: {
MUSE: "0xb34a6924a02100ba6ef12af1c798285e8f7a16ee",
@@ -235,6 +245,11 @@ export const SNOWBRIDGE_ENV: { [id: string]: SnowbridgeEnvironment } = {
type: "ethereum",
},
],
+ SUBSCAN_API: {
+ RELAY_CHAIN_URL: "https://rococo.api.subscan.io",
+ ASSET_HUB_URL: "https://assethub-rococo.api.subscan.io",
+ BRIDGE_HUB_URL: "https://bridgehub-rococo.api.subscan.io",
+ },
},
},
polkadot_mainnet: {
@@ -259,6 +274,7 @@ export const SNOWBRIDGE_ENV: { [id: string]: SnowbridgeEnvironment } = {
destinationFeeDOT: 0n,
has20ByteAccounts: false,
decimals: 10,
+ maxConsumers: 64,
},
erc20tokensReceivable: {
WETH: "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2",
@@ -267,13 +283,13 @@ export const SNOWBRIDGE_ENV: { [id: string]: SnowbridgeEnvironment } = {
],
config: {
BEACON_HTTP_API: "https://lodestar-mainnet.chainsafe.io",
- ETHEREUM_WS_API: (key) => `https://mainnet.infura.io/v3/${key}`,
+ ETHEREUM_WS_API: (key) => `https://eth-mainnet.g.alchemy.com/v2/${key}`,
RELAY_CHAIN_WS_URL: "wss://polkadot-rpc.dwellir.com",
ASSET_HUB_WS_URL: "wss://asset-hub-polkadot-rpc.dwellir.com",
BRIDGE_HUB_WS_URL: "wss://polkadot-bridge-hub-rpc.dwellir.com",
PARACHAINS: [],
GATEWAY_CONTRACT: "0x27ca963c279c93801941e1eb8799c23f407d68e7",
- BEEFY_CONTRACT: "0x27e5e17ac995d3d720c311e1e9560e28f5855fb1",
+ BEEFY_CONTRACT: "0xad04888ff41947a2e34f9e7b990bbc6cd85fe1d1",
ASSET_HUB_PARAID: 1000,
BRIDGE_HUB_PARAID: 1002,
PRIMARY_GOVERNANCE_CHANNEL_ID:
@@ -283,12 +299,12 @@ export const SNOWBRIDGE_ENV: { [id: string]: SnowbridgeEnvironment } = {
RELAYERS: [
{
name: "beacon",
- account: "5FyC9GkHhiAYjMtddwVNc2gx8wBjH9gpMKWbQ1QVXmmJtr8M",
+ account: "5HHDmTHN4FZYhuMSt3oP8YySDxzPLj9ZGBwxZjSdKf29qcnj",
type: "substrate",
},
{
name: "beefy",
- account: "0xF061685F2B729b89a7A5966B3ab9aee15269e8FE",
+ account: "0xB8124B07467E46dE73eb5c73a7b1E03863F18062",
type: "ethereum",
},
{
@@ -308,10 +324,15 @@ export const SNOWBRIDGE_ENV: { [id: string]: SnowbridgeEnvironment } = {
},
{
name: "parachain-assethub",
- account: "0x0b65d43d159f1C40Bad7768fd59667E3104a2ECE",
+ account: "0x1F1819C3C68F9533adbB8E51C8E8428a818D693E",
type: "ethereum",
},
],
+ SUBSCAN_API: {
+ RELAY_CHAIN_URL: "https://polkadot.api.subscan.io",
+ ASSET_HUB_URL: "https://assethub-polkadot.api.subscan.io",
+ BRIDGE_HUB_URL: "https://bridgehub-polkadot.api.subscan.io",
+ },
},
},
}
diff --git a/web/packages/api/src/history.ts b/web/packages/api/src/history.ts
new file mode 100644
index 0000000000..58ad55d712
--- /dev/null
+++ b/web/packages/api/src/history.ts
@@ -0,0 +1,1062 @@
+import { Context } from "./index"
+import { fetchBeaconSlot, paraIdToChannelId } from "./utils"
+import { SubscanApi, fetchEvents, fetchExtrinsics } from "./subscan"
+import { forwardedTopicId } from "./utils"
+
+export enum TransferStatus {
+ Pending,
+ Complete,
+ Failed,
+}
+
+export type TransferInfo = {
+ when: Date
+ sourceAddress: string
+ beneficiaryAddress: string
+ tokenAddress: string
+ destinationParachain?: number
+ destinationFee?: string
+ amount: string
+}
+
+export type ToPolkadotTransferResult = {
+ id: string
+ status: TransferStatus
+ info: TransferInfo
+ submitted: {
+ blockHash: string
+ blockNumber: number
+ logIndex: number
+ transactionHash: string
+ transactionIndex: number
+ channelId: string
+ messageId: string
+ nonce: number
+ parentBeaconSlot: number
+ }
+ beaconClientIncluded?: {
+ extrinsic_index: string
+ extrinsic_hash: string
+ event_index: string
+ block_timestamp: number
+ beaconSlot: number
+ beaconBlockHash: string
+ }
+ inboundMessageReceived?: {
+ extrinsic_index: string
+ extrinsic_hash: string
+ event_index: string
+ block_timestamp: number
+ messageId: string
+ channelId: string
+ nonce: number
+ }
+ assetHubMessageProcessed?: {
+ extrinsic_hash: string
+ event_index: string
+ block_timestamp: number
+ success: boolean
+ sibling: number
+ }
+}
+
+export type ToEthereumTransferResult = {
+ id: string
+ status: TransferStatus
+ info: TransferInfo
+ submitted: {
+ extrinsic_index: string
+ extrinsic_hash: string
+ block_hash: string
+ account_id: string
+ block_num: number
+ block_timestamp: number
+ messageId: string
+ bridgeHubMessageId: string
+ success: boolean
+ relayChain: {
+ block_hash: string
+ block_num: number
+ }
+ }
+ bridgeHubXcmDelivered?: {
+ extrinsic_hash: string
+ event_index: string
+ block_timestamp: number
+ siblingParachain: number
+ success: boolean
+ }
+ bridgeHubChannelDelivered?: {
+ extrinsic_hash: string
+ event_index: string
+ block_timestamp: number
+ channelId: string
+ success: boolean
+ }
+ bridgeHubMessageQueued?: {
+ extrinsic_hash: string
+ event_index: string
+ block_timestamp: number
+ }
+ bridgeHubMessageAccepted?: {
+ extrinsic_hash: string
+ event_index: string
+ block_timestamp: number
+ nonce: number
+ }
+ ethereumBeefyIncluded?: {
+ blockNumber: number
+ blockHash: string
+ transactionHash: string
+ transactionIndex: number
+ logIndex: number
+ relayChainblockNumber: number
+ mmrRoot: string
+ }
+ ethereumMessageDispatched?: {
+ blockNumber: number
+ blockHash: string
+ transactionHash: string
+ transactionIndex: number
+ logIndex: number
+ messageId: string
+ channelId: string
+ nonce: number
+ success: boolean
+ }
+}
+
+export const toPolkadotHistory = async (
+ context: Context,
+ assetHubScan: SubscanApi,
+ bridgeHubScan: SubscanApi,
+ range: {
+ assetHub: { fromBlock: number; toBlock: number }
+ bridgeHub: { fromBlock: number; toBlock: number }
+ ethereum: { fromBlock: number; toBlock: number }
+ }
+): Promise => {
+ console.log("Fetching history To Polkadot")
+ console.log(
+ `eth from ${range.ethereum.fromBlock} to ${range.ethereum.toBlock} (${
+ range.ethereum.toBlock - range.ethereum.fromBlock
+ } blocks)`
+ )
+ console.log(
+ `assethub from ${range.assetHub.fromBlock} to ${range.assetHub.toBlock} (${
+ range.assetHub.toBlock - range.assetHub.fromBlock
+ } blocks)`
+ )
+ console.log(
+ `bridgehub from ${range.bridgeHub.fromBlock} to ${range.bridgeHub.toBlock} (${
+ range.bridgeHub.toBlock - range.bridgeHub.fromBlock
+ } blocks)`
+ )
+
+ const bridgeHubParaIdCodec =
+ await context.polkadot.api.bridgeHub.query.parachainInfo.parachainId()
+ const bridgeHubParaId = bridgeHubParaIdCodec.toPrimitive() as number
+
+ const [
+ ethOutboundMessages,
+ beaconClientUpdates,
+ inboundMessagesReceived,
+ assetHubMessageQueue,
+ ] = [
+ await getEthOutboundMessages(context, range.ethereum.fromBlock, range.ethereum.toBlock),
+
+ await getBeaconClientUpdates(
+ bridgeHubScan,
+ range.bridgeHub.fromBlock,
+ range.bridgeHub.toBlock
+ ),
+
+ await getBridgeHubInboundMessages(
+ bridgeHubScan,
+ range.bridgeHub.fromBlock,
+ range.bridgeHub.toBlock
+ ),
+
+ await getAssetHubMessageQueueProccessed(
+ assetHubScan,
+ bridgeHubParaId,
+ range.assetHub.fromBlock,
+ range.assetHub.toBlock
+ ),
+ ]
+
+ console.log("number of transfers", ethOutboundMessages.length)
+ console.log("number of beacon client updates", beaconClientUpdates.length)
+ console.log("number of inbound messages received", inboundMessagesReceived.length)
+ console.log("number of asset hub message queue processed", assetHubMessageQueue.length)
+
+ const results: ToPolkadotTransferResult[] = []
+ for (const outboundMessage of ethOutboundMessages) {
+ const result: ToPolkadotTransferResult = {
+ id: `${outboundMessage.transactionHash}-${outboundMessage.data.messageId}`,
+ status: TransferStatus.Pending,
+ info: {
+ when: new Date(outboundMessage.data.timestamp * 1000),
+ sourceAddress: outboundMessage.data.sourceAddress,
+ beneficiaryAddress: outboundMessage.data.beneficiaryAddress,
+ tokenAddress: outboundMessage.data.tokenAddress,
+ destinationParachain: outboundMessage.data.destinationParachain,
+ destinationFee: outboundMessage.data.destinationFee,
+ amount: outboundMessage.data.amount,
+ },
+ submitted: {
+ blockHash: outboundMessage.blockHash,
+ blockNumber: outboundMessage.blockNumber,
+ logIndex: outboundMessage.logIndex,
+ transactionHash: outboundMessage.transactionHash,
+ transactionIndex: outboundMessage.transactionIndex,
+ channelId: outboundMessage.data.channelId,
+ messageId: outboundMessage.data.messageId,
+ nonce: outboundMessage.data.nonce,
+ parentBeaconSlot: Number(outboundMessage.data.parentBeaconSlot),
+ },
+ }
+ results.push(result)
+
+ const beaconClientIncluded = beaconClientUpdates.find(
+ (ev) => ev.data.beaconSlot > result.submitted.parentBeaconSlot + 1 // add one to parent to get current
+ )
+ if (beaconClientIncluded) {
+ result.beaconClientIncluded = {
+ extrinsic_index: beaconClientIncluded.extrinsic_index,
+ extrinsic_hash: beaconClientIncluded.extrinsic_hash,
+ event_index: beaconClientIncluded.event_index,
+ block_timestamp: beaconClientIncluded.block_timestamp,
+ beaconSlot: beaconClientIncluded.data.beaconSlot,
+ beaconBlockHash: beaconClientIncluded.data.beaconBlockHash,
+ }
+ }
+
+ const inboundMessageReceived = inboundMessagesReceived.find(
+ (ev) =>
+ ev.data.messageId === result.submitted.messageId &&
+ ev.data.channelId === result.submitted.channelId &&
+ ev.data.nonce === result.submitted.nonce
+ )
+ if (inboundMessageReceived) {
+ result.inboundMessageReceived = {
+ extrinsic_index: inboundMessageReceived.extrinsic_index,
+ extrinsic_hash: inboundMessageReceived.extrinsic_hash,
+ event_index: inboundMessageReceived.event_index,
+ block_timestamp: inboundMessageReceived.block_timestamp,
+ messageId: inboundMessageReceived.data.messageId,
+ channelId: inboundMessageReceived.data.channelId,
+ nonce: inboundMessageReceived.data.nonce,
+ }
+ }
+
+ const assetHubMessageProcessed = assetHubMessageQueue.find(
+ (ev) =>
+ ev.data.sibling === bridgeHubParaId &&
+ ev.data.messageId == result.submitted.messageId
+ )
+ if (assetHubMessageProcessed) {
+ result.assetHubMessageProcessed = {
+ extrinsic_hash: assetHubMessageProcessed.extrinsic_hash,
+ event_index: assetHubMessageProcessed.event_index,
+ block_timestamp: assetHubMessageProcessed.block_timestamp,
+ success: assetHubMessageProcessed.data.success,
+ sibling: assetHubMessageProcessed.data.sibling,
+ }
+ if (!result.assetHubMessageProcessed.success) {
+ result.status = TransferStatus.Failed
+ continue
+ }
+
+ result.status = TransferStatus.Complete
+ }
+ }
+ return results
+}
+
+export const toEthereumHistory = async (
+ context: Context,
+ assetHubScan: SubscanApi,
+ bridgeHubScan: SubscanApi,
+ relaychainScan: SubscanApi,
+ range: {
+ assetHub: { fromBlock: number; toBlock: number }
+ bridgeHub: { fromBlock: number; toBlock: number }
+ ethereum: { fromBlock: number; toBlock: number }
+ }
+): Promise => {
+ console.log("Fetching history To Ethereum")
+ console.log(
+ `eth from ${range.ethereum.fromBlock} to ${range.ethereum.toBlock} (${
+ range.ethereum.toBlock - range.ethereum.fromBlock
+ } blocks)`
+ )
+ console.log(
+ `assethub from ${range.assetHub.fromBlock} to ${range.assetHub.toBlock} (${
+ range.assetHub.toBlock - range.assetHub.fromBlock
+ } blocks)`
+ )
+ console.log(
+ `bridgehub from ${range.bridgeHub.fromBlock} to ${range.bridgeHub.toBlock} (${
+ range.bridgeHub.toBlock - range.bridgeHub.fromBlock
+ } blocks)`
+ )
+
+ const [ethNetwork, assetHubParaId] = await Promise.all([
+ context.ethereum.api.getNetwork(),
+ context.polkadot.api.assetHub.query.parachainInfo.parachainId(),
+ ])
+ const assetHubParaIdDecoded = assetHubParaId.toPrimitive() as number
+ const assetHubChannelId = paraIdToChannelId(assetHubParaIdDecoded)
+
+ const [
+ allTransfers,
+ allMessageQueues,
+ allOutboundMessages,
+ allBeefyClientUpdates,
+ allInboundMessages,
+ ] = [
+ await getAssetHubTransfers(
+ assetHubScan,
+ relaychainScan,
+ Number(ethNetwork.chainId),
+ range.assetHub.fromBlock,
+ range.assetHub.toBlock
+ ),
+
+ await getBridgeHubMessageQueueProccessed(
+ bridgeHubScan,
+ assetHubParaIdDecoded,
+ assetHubChannelId,
+ range.bridgeHub.fromBlock,
+ range.bridgeHub.toBlock
+ ),
+
+ await getBridgeHubOutboundMessages(
+ bridgeHubScan,
+ range.bridgeHub.fromBlock,
+ range.bridgeHub.toBlock
+ ),
+
+ await getBeefyClientUpdates(context, range.ethereum.fromBlock, range.ethereum.toBlock),
+
+ await getEthInboundMessagesDispatched(
+ context,
+ range.ethereum.fromBlock,
+ range.ethereum.toBlock
+ ),
+ ]
+
+ console.log("number of transfers", allTransfers.length)
+ console.log("number of message queues", allMessageQueues.length)
+ console.log("number of outbound messages", allOutboundMessages.length)
+ console.log("number of beefy updates", allBeefyClientUpdates.length)
+ console.log("number of inbound messages", allInboundMessages.length)
+
+ const results: ToEthereumTransferResult[] = []
+ for (const transfer of allTransfers) {
+ const result: ToEthereumTransferResult = {
+ id: `${transfer.extrinsic_hash}-${transfer.data.messageId}`,
+ status: TransferStatus.Pending,
+ info: {
+ when: new Date(transfer.block_timestamp * 1000),
+ sourceAddress: transfer.data.account_id,
+ tokenAddress: transfer.data.tokenAddress,
+ beneficiaryAddress: transfer.data.beneficiaryAddress,
+ amount: transfer.data.amount,
+ },
+ submitted: {
+ extrinsic_index: transfer.extrinsic_index,
+ extrinsic_hash: transfer.extrinsic_hash,
+ block_hash: transfer.data.block_hash,
+ account_id: transfer.data.account_id,
+ block_num: transfer.block_num,
+ block_timestamp: transfer.block_timestamp,
+ messageId: transfer.data.messageId,
+ bridgeHubMessageId: transfer.data.bridgeHubMessageId,
+ success: transfer.data.success,
+ relayChain: {
+ block_num: transfer.data.relayChain.block_num,
+ block_hash: transfer.data.relayChain.block_hash,
+ },
+ },
+ }
+ results.push(result)
+ if (!result.submitted.success) {
+ result.status = TransferStatus.Failed
+ continue
+ }
+
+ const bridgeHubXcmDelivered = allMessageQueues.find(
+ (ev: any) =>
+ ev.data.messageId === result.submitted.bridgeHubMessageId &&
+ ev.data.sibling == assetHubParaIdDecoded
+ )
+ if (bridgeHubXcmDelivered) {
+ result.bridgeHubXcmDelivered = {
+ block_timestamp: bridgeHubXcmDelivered.block_timestamp,
+ event_index: bridgeHubXcmDelivered.event_index,
+ extrinsic_hash: bridgeHubXcmDelivered.extrinsic_hash,
+ siblingParachain: bridgeHubXcmDelivered.data.sibling,
+ success: bridgeHubXcmDelivered.data.success,
+ }
+ if (!result.bridgeHubXcmDelivered.success) {
+ result.status = TransferStatus.Failed
+ continue
+ }
+ }
+ const bridgeHubChannelDelivered = allMessageQueues.find(
+ (ev: any) =>
+ ev.extrinsic_hash === result.bridgeHubXcmDelivered?.extrinsic_hash &&
+ ev.data.channelId === assetHubChannelId &&
+ ev.block_timestamp === result.bridgeHubXcmDelivered?.block_timestamp
+ )
+ if (bridgeHubChannelDelivered) {
+ result.bridgeHubChannelDelivered = {
+ block_timestamp: bridgeHubChannelDelivered.block_timestamp,
+ event_index: bridgeHubChannelDelivered.event_index,
+ extrinsic_hash: bridgeHubChannelDelivered.extrinsic_hash,
+ channelId: bridgeHubChannelDelivered.data.channelId,
+ success: bridgeHubChannelDelivered.data.success,
+ }
+ if (!result.bridgeHubChannelDelivered.success) {
+ result.status = TransferStatus.Failed
+ continue
+ }
+ }
+
+ const bridgeHubMessageQueued = allOutboundMessages.find(
+ (ev: any) =>
+ ev.data.messageId === result.submitted.messageId &&
+ ev.event_id === "MessageQueued" /* TODO: ChannelId */
+ )
+ if (bridgeHubMessageQueued) {
+ result.bridgeHubMessageQueued = {
+ block_timestamp: bridgeHubMessageQueued.block_timestamp,
+ event_index: bridgeHubMessageQueued.event_index,
+ extrinsic_hash: bridgeHubMessageQueued.extrinsic_hash,
+ }
+ }
+ const bridgeHubMessageAccepted = allOutboundMessages.find(
+ (ev: any) =>
+ ev.data.messageId === result.submitted.messageId &&
+ ev.event_id === "MessageAccepted" /* TODO: ChannelId */
+ )
+ if (bridgeHubMessageAccepted) {
+ result.bridgeHubMessageAccepted = {
+ block_timestamp: bridgeHubMessageAccepted.block_timestamp,
+ event_index: bridgeHubMessageAccepted.event_index,
+ extrinsic_hash: bridgeHubMessageAccepted.extrinsic_hash,
+ nonce: bridgeHubMessageAccepted.data.nonce,
+ }
+ }
+
+ const secondsTillAcceptedByRelayChain = 6 /* 6 secs per block */ * 10 /* blocks */
+ const ethereumBeefyIncluded = allBeefyClientUpdates.find(
+ (ev) =>
+ ev.data.blockNumber >
+ result.submitted.relayChain.block_num + secondsTillAcceptedByRelayChain
+ )
+ if (ethereumBeefyIncluded) {
+ result.ethereumBeefyIncluded = {
+ blockNumber: ethereumBeefyIncluded.blockNumber,
+ blockHash: ethereumBeefyIncluded.blockHash,
+ transactionHash: ethereumBeefyIncluded.transactionHash,
+ transactionIndex: ethereumBeefyIncluded.transactionIndex,
+ logIndex: ethereumBeefyIncluded.logIndex,
+ relayChainblockNumber: ethereumBeefyIncluded.data.blockNumber,
+ mmrRoot: ethereumBeefyIncluded.data.mmrRoot,
+ }
+ }
+
+ const ethereumMessageDispatched = allInboundMessages.find(
+ (ev) =>
+ ev.data.channelId === result.bridgeHubChannelDelivered?.channelId &&
+ ev.data.messageId === result.submitted.messageId &&
+ ev.data.nonce === result.bridgeHubMessageAccepted?.nonce
+ )
+
+ if (ethereumMessageDispatched) {
+ result.ethereumMessageDispatched = {
+ blockNumber: ethereumMessageDispatched.blockNumber,
+ blockHash: ethereumMessageDispatched.blockHash,
+ transactionHash: ethereumMessageDispatched.transactionHash,
+ transactionIndex: ethereumMessageDispatched.transactionIndex,
+ logIndex: ethereumMessageDispatched.logIndex,
+ messageId: ethereumMessageDispatched.data.messageId,
+ channelId: ethereumMessageDispatched.data.channelId,
+ nonce: ethereumMessageDispatched.data.nonce,
+ success: ethereumMessageDispatched.data.success,
+ }
+ if (!result.ethereumMessageDispatched.success) {
+ result.status = TransferStatus.Failed
+ continue
+ }
+
+ result.status = TransferStatus.Complete
+ }
+ }
+ return results
+}
+
+const getAssetHubTransfers = async (
+ assetHubScan: SubscanApi,
+ relaychainScan: SubscanApi,
+ ethChainId: number,
+ fromBlock: number,
+ toBlock: number
+) => {
+ const acc = []
+ const rows = 100
+ let page = 0
+
+ let endOfPages = false
+ while (!endOfPages) {
+ const { extrinsics: transfers, endOfPages: end } = await subFetchBridgeTransfers(
+ assetHubScan,
+ relaychainScan,
+ ethChainId,
+ fromBlock,
+ toBlock,
+ page,
+ rows
+ )
+ endOfPages = end
+ acc.push(...transfers)
+ page++
+ }
+ return acc
+}
+
+const getBridgeHubMessageQueueProccessed = async (
+ bridgeHubScan: SubscanApi,
+ assetHubParaId: number,
+ assetHubChannelId: string,
+ fromBlock: number,
+ toBlock: number
+) => {
+ const acc = []
+ const rows = 100
+ let page = 0
+ let endOfPages = false
+ while (!endOfPages) {
+ const { events, endOfPages: end } = await subFetchMessageQueueBySiblingOrChannel(
+ bridgeHubScan,
+ assetHubParaId,
+ assetHubChannelId,
+ fromBlock,
+ toBlock,
+ page,
+ rows
+ )
+ endOfPages = end
+ acc.push(...events)
+ page++
+ }
+ return acc
+}
+
+const getBridgeHubOutboundMessages = async (
+ bridgeHubScan: SubscanApi,
+ fromBlock: number,
+ toBlock: number
+) => {
+ const acc = []
+ const rows = 100
+ let page = 0
+ let endOfPages = false
+ while (!endOfPages) {
+ const { events, endOfPages: end } = await subFetchOutboundMessages(
+ bridgeHubScan,
+ fromBlock,
+ toBlock,
+ page,
+ rows
+ )
+ endOfPages = end
+ acc.push(...events)
+ page++
+ }
+ return acc
+}
+
+const getBeefyClientUpdates = async (context: Context, fromBlock: number, toBlock: number) => {
+ const { beefyClient } = context.ethereum.contracts
+ const NewMMRRoot = beefyClient.getEvent("NewMMRRoot")
+ const roots = await beefyClient.queryFilter(NewMMRRoot, fromBlock, toBlock)
+ const updates = roots.map((r) => {
+ return {
+ blockNumber: r.blockNumber,
+ blockHash: r.blockHash,
+ logIndex: r.index,
+ transactionIndex: r.transactionIndex,
+ transactionHash: r.transactionHash,
+ data: {
+ blockNumber: Number(r.args.blockNumber),
+ mmrRoot: r.args.mmrRoot,
+ },
+ }
+ })
+ updates.sort((a, b) => Number(a.data.blockNumber - b.data.blockNumber))
+ return updates
+}
+
+const getEthInboundMessagesDispatched = async (
+ context: Context,
+ fromBlock: number,
+ toBlock: number
+) => {
+ const { gateway } = context.ethereum.contracts
+ const InboundMessageDispatched = gateway.getEvent("InboundMessageDispatched")
+ const inboundMessages = await gateway.queryFilter(InboundMessageDispatched, fromBlock, toBlock)
+ return inboundMessages.map((im) => {
+ return {
+ blockNumber: im.blockNumber,
+ blockHash: im.blockHash,
+ logIndex: im.index,
+ transactionIndex: im.transactionIndex,
+ transactionHash: im.transactionHash,
+ data: {
+ channelId: im.args.channelID,
+ nonce: Number(im.args.nonce),
+ messageId: im.args.messageID,
+ success: im.args.success,
+ },
+ }
+ })
+}
+
+const subFetchBridgeTransfers = async (
+ assetHub: SubscanApi,
+ relaychain: SubscanApi,
+ ethChainId: number,
+ fromBlock: number,
+ toBlock: number,
+ page: number,
+ rows = 10
+) => {
+ return fetchExtrinsics(
+ assetHub,
+ "polkadotxcm",
+ "transfer_assets",
+ fromBlock,
+ toBlock,
+ page,
+ rows,
+ async (extrinsic, params) => {
+ const dest = params.find((p: any) => p.name == "dest")
+ const parents: number | null = dest.value.V3?.parents ?? dest.value.V4?.parents ?? null
+ const chainId: number | null =
+ dest.value.V3?.interior?.X1?.GlobalConsensus?.Ethereum ??
+ (dest.value.V4?.interior?.X1 && dest.value.V4?.interior?.X1[0])?.GlobalConsensus
+ ?.Ethereum ??
+ null
+
+ if (!(parents === 2 && chainId === ethChainId)) {
+ return null
+ }
+
+ const beneficiary = params.find((p: any) => p.name == "beneficiary")?.value
+ const beneficiaryParents: number | null =
+ beneficiary.V3?.parents ?? beneficiary.V4?.parents ?? null
+ const beneficiaryAddress: string | null =
+ beneficiary.V3?.interior?.X1?.AccountKey20?.key ??
+ (beneficiary.V4?.interior?.X1 && beneficiary.V4?.interior?.X1[0])?.AccountKey20
+ ?.key ??
+ null
+
+ if (!(beneficiaryParents === 0 && beneficiaryAddress !== null)) {
+ return null
+ }
+
+ const assets = params.find((p: any) => p.name == "assets")?.value
+ let amount: string | null = null
+ let tokenParents: number | null = null
+ let tokenAddress: string | null = null
+ let tokenChainId: number | null = null
+ for (const asset of assets.V3 ?? assets.V4 ?? []) {
+ amount = asset.fun?.Fungible ?? null
+ if (amount === null) {
+ continue
+ }
+
+ tokenParents = asset.id?.parents ?? asset.id?.Concrete?.parents ?? null
+ if (tokenParents === null) {
+ continue
+ }
+
+ const tokenX2 =
+ asset.id?.interior?.X2 ?? Object.values(asset.id?.Concrete?.interior?.X2 ?? {})
+ if (tokenX2 === null || tokenX2.length !== 2) {
+ continue
+ }
+
+ tokenChainId = tokenX2[0].GlobalConsensus?.Ethereum ?? null
+ if (tokenChainId === null) {
+ continue
+ }
+
+ tokenAddress = tokenX2[1].AccountKey20?.key ?? null
+ if (tokenAddress === null) {
+ continue
+ }
+
+ // found first token
+ break
+ }
+
+ if (
+ !(
+ tokenParents === 2 &&
+ tokenChainId === ethChainId &&
+ tokenAddress !== null &&
+ amount !== null
+ )
+ ) {
+ return null
+ }
+
+ const [
+ {
+ json: { data: transfer },
+ },
+ {
+ json: { data: relayBlock },
+ },
+ ] = [
+ await assetHub.post("api/scan/extrinsic", {
+ extrinsic_index: extrinsic.extrinsic_index,
+ only_extrinsic_event: true,
+ }),
+ await relaychain.post("api/scan/block", {
+ block_timestamp: extrinsic.block_timestamp,
+ only_head: true,
+ }),
+ ]
+ const maybeEvent = transfer.event.find(
+ (ev: any) => ev.module_id === "polkadotxcm" && ev.event_id === "Sent"
+ )
+ let messageId: string | null = null
+ let bridgeHubMessageId: string | null = null
+
+ if (transfer.success && maybeEvent) {
+ const ev = JSON.parse(maybeEvent.params)
+ messageId = ev.find((pa: any) => pa.name === "message_id")?.value ?? null
+ if (messageId) {
+ bridgeHubMessageId = forwardedTopicId(messageId)
+ }
+ }
+
+ const success =
+ transfer.event.find(
+ (ev: any) => ev.module_id === "system" && ev.event_id === "ExtrinsicSuccess"
+ ) !== undefined
+
+ return {
+ events: transfer.events,
+ messageId,
+ bridgeHubMessageId,
+ success,
+ block_hash: transfer.block_hash,
+ account_id: transfer.account_id,
+ relayChain: { block_num: relayBlock.block_num, block_hash: relayBlock.hash },
+ tokenAddress,
+ beneficiaryAddress,
+ amount,
+ }
+ }
+ )
+}
+
+const subFetchMessageQueueBySiblingOrChannel = async (
+ api: SubscanApi,
+ filterSibling: number,
+ filterChannelId: string,
+ fromBlock: number,
+ toBlock: number,
+ page: number,
+ rows = 10
+) => {
+ return fetchEvents(
+ api,
+ "messagequeue",
+ ["Processed", "ProcessingFailed", "OverweightEnqueued"],
+ fromBlock,
+ toBlock,
+ page,
+ rows,
+ async (event, params) => {
+ const messageId = params.find((e: any) => e.name === "id")?.value
+ if (!messageId) {
+ return null
+ }
+
+ const origin = params.find((e: any) => e.name === "origin")?.value
+ const sibling = origin?.Sibling ?? null
+ const channelId = origin?.Snowbridge ?? null
+
+ if (sibling === null && channelId !== filterChannelId) {
+ return null
+ }
+ if (channelId === null && sibling !== filterSibling) {
+ return null
+ }
+ if (channelId === null && sibling === null) {
+ return null
+ }
+
+ let success =
+ event.event_id === "Processed" &&
+ (params.find((e: any) => e.name === "success")?.value ?? false)
+
+ return { messageId, sibling, channelId, success }
+ }
+ )
+}
+
+const subFetchMessageQueueBySibling = async (
+ api: SubscanApi,
+ filterSibling: number,
+ fromBlock: number,
+ toBlock: number,
+ page: number,
+ rows = 10
+) => {
+ return fetchEvents(
+ api,
+ "messagequeue",
+ ["Processed", "ProcessingFailed", "OverweightEnqueued"],
+ fromBlock,
+ toBlock,
+ page,
+ rows,
+ async (event, params) => {
+ const messageId = params.find((e: any) => e.name === "id")?.value
+ if (!messageId) {
+ return null
+ }
+
+ const origin = params.find((e: any) => e.name === "origin")?.value
+ const sibling = origin?.Sibling
+
+ if (sibling !== filterSibling) {
+ return null
+ }
+
+ let success =
+ event.event_id === "Processed" &&
+ (params.find((e: any) => e.name === "success")?.value ?? false)
+
+ return { messageId, sibling, success }
+ }
+ )
+}
+
+const subFetchOutboundMessages = async (
+ api: SubscanApi,
+ fromBlock: number,
+ toBlock: number,
+ page: number,
+ rows = 10
+) => {
+ return fetchEvents(
+ api,
+ "ethereumoutboundqueue",
+ ["MessageAccepted", "MessageQueued"],
+ fromBlock,
+ toBlock,
+ page,
+ rows,
+ async (_, params) => {
+ const messageId = params.find((e: any) => e.name === "id")?.value
+ // TODO: channelId
+ const nonce = params.find((e: any) => e.name === "nonce")?.value ?? null
+ return { messageId, nonce }
+ }
+ )
+}
+
+const getEthOutboundMessages = async (context: Context, fromBlock: number, toBlock: number) => {
+ const { gateway } = context.ethereum.contracts
+ const OutboundMessageAccepted = gateway.getEvent("OutboundMessageAccepted")
+ const outboundMessages = await gateway.queryFilter(OutboundMessageAccepted, fromBlock, toBlock)
+ const result = []
+ for (const om of outboundMessages) {
+ const block = await om.getBlock()
+ const beaconBlockRoot = await fetchBeaconSlot(
+ context.config.ethereum.beacon_url,
+ block.parentBeaconBlockRoot as any
+ )
+ const transaction = await block.getTransaction(om.transactionHash)
+ const [
+ tokenAddress,
+ destinationParachain,
+ [addressType, beneficiaryAddress],
+ destinationFee,
+ amount,
+ ] = context.ethereum.contracts.gateway.interface.decodeFunctionData(
+ "sendToken",
+ transaction.data
+ )
+ let beneficiary = beneficiaryAddress as string
+ switch (addressType) {
+ case 0n:
+ {
+ // 4-byte index
+ const index = BigInt(beneficiary.substring(0, 6))
+ beneficiary = index.toString()
+ }
+ break
+ case 2n:
+ {
+ // 20-byte address
+ beneficiary = beneficiary.substring(0, 42)
+ }
+ break
+ }
+
+ result.push({
+ blockNumber: om.blockNumber,
+ blockHash: om.blockHash,
+ logIndex: om.index,
+ transactionIndex: om.transactionIndex,
+ transactionHash: om.transactionHash,
+ data: {
+ sourceAddress: transaction.from,
+ timestamp: block.timestamp,
+ channelId: om.args.channelID,
+ nonce: Number(om.args.nonce),
+ messageId: om.args.messageID,
+ parentBeaconSlot: Number(beaconBlockRoot.data.message.slot),
+ tokenAddress: tokenAddress as string,
+ destinationParachain: Number(destinationParachain),
+ beneficiaryAddress: beneficiary,
+ destinationFee: destinationFee.toString() as string,
+ amount: amount.toString() as string,
+ },
+ })
+ }
+ return result
+}
+
+const getBeaconClientUpdates = async (
+ bridgeHubScan: SubscanApi,
+ fromBlock: number,
+ toBlock: number
+) => {
+ const updates = []
+ const rows = 100
+ let page = 0
+ let endOfPages = false
+ while (!endOfPages) {
+ const { events, endOfPages: end } = await subFetchBeaconHeaderImports(
+ bridgeHubScan,
+ fromBlock,
+ toBlock,
+ page,
+ rows
+ )
+ endOfPages = end
+ updates.push(...events)
+ page++
+ }
+ updates.sort((a, b) => Number(a.data.beaconSlot - b.data.beaconSlot))
+ return updates
+}
+
+const getBridgeHubInboundMessages = async (
+ bridgeHubScan: SubscanApi,
+ fromBlock: number,
+ toBlock: number
+) => {
+ const updates = []
+ const rows = 100
+ let page = 0
+ let endOfPages = false
+ while (!endOfPages) {
+ const { events, endOfPages: end } = await subFetchInboundMessageReceived(
+ bridgeHubScan,
+ fromBlock,
+ toBlock,
+ page,
+ rows
+ )
+ endOfPages = end
+ updates.push(...events)
+ page++
+ }
+ return updates
+}
+
+const getAssetHubMessageQueueProccessed = async (
+ bridgeHubScan: SubscanApi,
+ bridgeHubParaId: number,
+ fromBlock: number,
+ toBlock: number
+) => {
+ const acc = []
+ const rows = 100
+ let page = 0
+ let endOfPages = false
+ while (!endOfPages) {
+ const { events, endOfPages: end } = await subFetchMessageQueueBySibling(
+ bridgeHubScan,
+ bridgeHubParaId,
+ fromBlock,
+ toBlock,
+ page,
+ rows
+ )
+ endOfPages = end
+ acc.push(...events)
+ page++
+ }
+ return acc
+}
+
+const subFetchBeaconHeaderImports = async (
+ api: SubscanApi,
+ fromBlock: number,
+ toBlock: number,
+ page: number,
+ rows = 10
+) => {
+ return fetchEvents(
+ api,
+ "ethereumbeaconclient",
+ ["BeaconHeaderImported"],
+ fromBlock,
+ toBlock,
+ page,
+ rows,
+ async (_, params) => {
+ const beaconBlockHash = params.find((e: any) => e.name === "block_hash")?.value
+ const beaconSlot = params.find((e: any) => e.name === "slot")?.value
+ return { beaconBlockHash, beaconSlot }
+ }
+ )
+}
+
+const subFetchInboundMessageReceived = async (
+ api: SubscanApi,
+ fromBlock: number,
+ toBlock: number,
+ page: number,
+ rows = 10
+) => {
+ return fetchEvents(
+ api,
+ "ethereuminboundqueue",
+ ["MessageReceived"],
+ fromBlock,
+ toBlock,
+ page,
+ rows,
+ async (_, params) => {
+ const channelId = params.find((e: any) => e.name === "channel_id")?.value
+ const nonce = params.find((e: any) => e.name === "nonce")?.value
+ const messageId = params.find((e: any) => e.name === "message_id")?.value
+ return { channelId, nonce, messageId }
+ }
+ )
+}
diff --git a/web/packages/api/src/index.ts b/web/packages/api/src/index.ts
index c721e914a8..6687112246 100644
--- a/web/packages/api/src/index.ts
+++ b/web/packages/api/src/index.ts
@@ -54,19 +54,26 @@ class EthereumContext {
}
}
+type Parachains = { [paraId: number]: ApiPromise }
+
class PolkadotContext {
api: {
relaychain: ApiPromise
assetHub: ApiPromise
bridgeHub: ApiPromise
- parachains: { [paraId: number]: ApiPromise }
+ parachains: Parachains
}
- constructor(relaychain: ApiPromise, assetHub: ApiPromise, bridgeHub: ApiPromise) {
+ constructor(
+ relaychain: ApiPromise,
+ assetHub: ApiPromise,
+ bridgeHub: ApiPromise,
+ parachains: Parachains
+ ) {
this.api = {
relaychain: relaychain,
assetHub: assetHub,
bridgeHub: bridgeHub,
- parachains: {},
+ parachains: parachains,
}
}
}
@@ -79,15 +86,32 @@ export const contextFactory = async (config: Config): Promise => {
} else {
ethApi = new ethers.WebSocketProvider(config.ethereum.execution_url)
}
- const relaychainApi = await ApiPromise.create({
- provider: new WsProvider(config.polkadot.url.relaychain),
- })
- const assetHubApi = await ApiPromise.create({
- provider: new WsProvider(config.polkadot.url.assetHub),
- })
- const bridgeHubApi = await ApiPromise.create({
- provider: new WsProvider(config.polkadot.url.bridgeHub),
- })
+
+ const parasConnect: Promise<{ paraId: number; api: ApiPromise }>[] = []
+ for (const parachain of config.polkadot.url.parachains ?? []) {
+ parasConnect.push(addParachainConnection(parachain))
+ }
+
+ const [relaychainApi, assetHubApi, bridgeHubApi] = await Promise.all([
+ ApiPromise.create({
+ provider: new WsProvider(config.polkadot.url.relaychain),
+ }),
+ ApiPromise.create({
+ provider: new WsProvider(config.polkadot.url.assetHub),
+ }),
+ ApiPromise.create({
+ provider: new WsProvider(config.polkadot.url.bridgeHub),
+ }),
+ ])
+
+ const paras = await Promise.all(parasConnect)
+ const parachains: Parachains = {}
+ for (const { paraId, api } of paras) {
+ if (paraId in parachains) {
+ throw new Error(`${paraId} already added.`)
+ }
+ parachains[paraId] = api
+ }
const gatewayAddr = config.appContracts.gateway
const beefyAddr = config.appContracts.beefy
@@ -100,25 +124,20 @@ export const contextFactory = async (config: Config): Promise => {
}
const ethCtx = new EthereumContext(ethApi, appContracts)
- const polCtx = new PolkadotContext(relaychainApi, assetHubApi, bridgeHubApi)
+ const polCtx = new PolkadotContext(relaychainApi, assetHubApi, bridgeHubApi, parachains)
const context = new Context(config, ethCtx, polCtx)
- for (const parachain of config.polkadot.url.parachains ?? []) {
- await addParachainConnection(context, parachain)
- }
+ await Promise.all(parasConnect)
return context
}
-export const addParachainConnection = async (context: Context, url: string): Promise => {
+export const addParachainConnection = async (url: string) => {
const api = await ApiPromise.create({
provider: new WsProvider(url),
})
const paraId = (await api.query.parachainInfo.parachainId()).toPrimitive() as number
- if (paraId in context.polkadot.api.parachains) {
- throw new Error(`${paraId} already added.`)
- }
- context.polkadot.api.parachains[paraId] = api
console.log(`${url} added with parachain id: ${paraId}`)
+ return { paraId, api }
}
export const destroyContext = async (context: Context): Promise => {
@@ -142,3 +161,5 @@ export * as utils from "./utils"
export * as status from "./status"
export * as assets from "./assets"
export * as environment from "./environment"
+export * as subscan from "./subscan"
+export * as history from "./history"
diff --git a/web/packages/api/src/status.ts b/web/packages/api/src/status.ts
index e7f0c268a5..15169ec142 100644
--- a/web/packages/api/src/status.ts
+++ b/web/packages/api/src/status.ts
@@ -26,8 +26,15 @@ export type BridgeStatusInfo = {
previousEthereumBlockOnPolkadot: number
}
}
+
+export enum ChannelKind {
+ Primary = "Primary",
+ Secondary = "Secondary",
+ AssetHub = "AssetHub",
+}
+
export type ChannelStatusInfo = {
- name?: string
+ name?: ChannelKind
toEthereum: {
outbound: number
inbound: number
diff --git a/web/packages/api/src/subscan.ts b/web/packages/api/src/subscan.ts
new file mode 100644
index 0000000000..20c4afaa4e
--- /dev/null
+++ b/web/packages/api/src/subscan.ts
@@ -0,0 +1,204 @@
+export type SubscanResult = {
+ status: number
+ statusText: string
+ json: any
+ rateLimit: SubscanRateLimit
+}
+
+export type SubscanRateLimit = {
+ limit: number | null
+ reset: number | null
+ remaining: number | null
+ retryAfter: number | null
+}
+
+export type SubscanApiPost = (subUrl: string, body: any) => Promise
+export interface SubscanApi {
+ post: SubscanApiPost
+}
+
+const sleepMs = async (ms: number) => {
+ await new Promise((resolve) => {
+ const id = setTimeout(() => {
+ resolve()
+ clearTimeout(id)
+ }, ms)
+ })
+}
+
+export const createApi = (baseUrl: string, apiKey: string, options = { limit: 1 }): SubscanApi => {
+ let url = baseUrl.trim()
+ if (!url.endsWith("/")) {
+ url += "/"
+ }
+
+ const headers = new Headers()
+ headers.append("Content-Type", "application/json")
+ headers.append("x-api-key", apiKey)
+
+ let rateLimit: SubscanRateLimit = {
+ limit: options.limit,
+ reset: 0,
+ remaining: options.limit,
+ retryAfter: 0,
+ }
+ const post: SubscanApiPost = async (subUrl: string, body: any) => {
+ const request: RequestInit = {
+ method: "POST",
+ headers,
+ body: JSON.stringify(body),
+ redirect: "follow",
+ }
+
+ if (rateLimit.retryAfter !== null && rateLimit.retryAfter > 0) {
+ console.log("Being rate limited", rateLimit)
+ await sleepMs(rateLimit.retryAfter * 1000)
+ }
+ if (rateLimit.remaining === 0 && rateLimit.reset !== null && rateLimit.reset > 0) {
+ console.log("Being rate limited", rateLimit)
+ await sleepMs(rateLimit.reset * 1000)
+ }
+
+ const response = await fetch(`${url}${subUrl}`, request)
+
+ rateLimit.limit = Number(response.headers.get("ratelimit-limit"))
+ rateLimit.reset = Number(response.headers.get("ratelimit-reset"))
+ rateLimit.remaining = Number(response.headers.get("ratelimit-remaining"))
+ rateLimit.retryAfter = Number(response.headers.get("retry-after"))
+
+ if (response.status !== 200) {
+ throw new Error(
+ `Failed to fetch from Subscan: ${response.status} ${response.statusText}`
+ )
+ }
+
+ const json = await response.json()
+ return {
+ status: response.status,
+ statusText: response.statusText,
+ json,
+ rateLimit: { ...rateLimit },
+ }
+ }
+
+ return {
+ post,
+ }
+}
+
+export const fetchEvents = async (
+ api: SubscanApi,
+ module: string,
+ eventIds: string[],
+ fromBlock: number,
+ toBlock: number,
+ page: number,
+ rows: number,
+ filterMap: (events: any, params: any) => Promise
+) => {
+ const eventsBody = {
+ module,
+ block_range: `${fromBlock}-${toBlock}`,
+ event_id: eventIds.length === 1 ? eventIds[0] : undefined,
+ row: rows,
+ page,
+ }
+
+ const eventResponse = await api.post("api/v2/scan/events", eventsBody)
+
+ let endOfPages = false
+ if (eventResponse.json.data.events === null) {
+ eventResponse.json.data.events = []
+ endOfPages = true
+ }
+
+ const map = new Map()
+ eventResponse.json.data.events
+ .filter((e: any) => eventIds.includes(e.event_id))
+ .forEach((e: any) => {
+ map.set(e.event_index, e)
+ })
+
+ const events = []
+
+ if (map.size > 0) {
+ const paramsBody = { event_index: Array.from(map.keys()) }
+ const paramsResponse = await api.post("api/scan/event/params", paramsBody)
+
+ if (paramsResponse.json.data === null) {
+ paramsResponse.json.data = []
+ }
+
+ for (const { event_index, params } of paramsResponse.json.data) {
+ const event = map.get(event_index)
+ const transform = await filterMap(event, params)
+ if (transform === null) {
+ continue
+ }
+ events.push({ ...event, params, data: transform })
+ }
+ }
+ return {
+ status: eventResponse.status,
+ statusText: eventResponse.statusText,
+ events,
+ endOfPages,
+ }
+}
+
+export const fetchExtrinsics = async (
+ api: SubscanApi,
+ module: string,
+ call: string,
+ fromBlock: number,
+ toBlock: number,
+ page: number,
+ rows: number,
+ filterMap: (extrinsic: any, params: any) => Promise
+) => {
+ const extBody = {
+ module,
+ call,
+ block_range: `${fromBlock}-${toBlock}`,
+ row: rows,
+ page,
+ }
+ const extResponse = await api.post("api/v2/scan/extrinsics", extBody)
+
+ let endOfPages = false
+ if (extResponse.json.data.extrinsics === null) {
+ extResponse.json.data.extrinsics = []
+ endOfPages = true
+ }
+ const map = new Map()
+ extResponse.json.data.extrinsics.forEach((e: any) => {
+ map.set(e.extrinsic_index, e)
+ })
+
+ const extrinsics = []
+
+ if (map.size > 0) {
+ const paramsBody = { extrinsic_index: Array.from(map.keys()) }
+ const extParams = await api.post("api/scan/extrinsic/params", paramsBody)
+
+ if (extParams.json.data === null) {
+ extParams.json.data = []
+ }
+
+ for (const { extrinsic_index, params } of extParams.json.data) {
+ const event = map.get(extrinsic_index)
+ const transform = await filterMap(event, params)
+ if (transform === null) {
+ continue
+ }
+
+ extrinsics.push({ ...event, params, data: transform })
+ }
+ }
+ return {
+ status: extResponse.status,
+ statusText: extResponse.statusText,
+ extrinsics,
+ endOfPages,
+ }
+}
diff --git a/web/packages/api/src/toEthereum.ts b/web/packages/api/src/toEthereum.ts
index e2fd62baff..fb06b70560 100644
--- a/web/packages/api/src/toEthereum.ts
+++ b/web/packages/api/src/toEthereum.ts
@@ -72,6 +72,16 @@ export type SendValidationResult = {
}
}
+export interface IValidateOptions {
+ defaultFee: bigint
+ acceptableLatencyInSeconds: number
+}
+
+const ValidateOptionDefaults: IValidateOptions = {
+ defaultFee: 2_750_872_500_000n,
+ acceptableLatencyInSeconds: 28800 /* 8 Hours */,
+}
+
export const getSendFee = async (
context: Context,
options = {
@@ -97,11 +107,9 @@ export const validateSend = async (
beneficiary: string,
tokenAddress: string,
amount: bigint,
- options = {
- defaultFee: 2_750_872_500_000n,
- acceptableLatencyInSeconds: 28800 /* 8 Hours */,
- }
+ validateOptions: Partial = {}
): Promise => {
+ const options = { ...ValidateOptionDefaults, ...validateOptions }
const {
ethereum,
ethereum: {
diff --git a/web/packages/api/src/toPolkadot.ts b/web/packages/api/src/toPolkadot.ts
index b5742681b2..422aa7f1c7 100644
--- a/web/packages/api/src/toPolkadot.ts
+++ b/web/packages/api/src/toPolkadot.ts
@@ -3,7 +3,7 @@ import { Codec } from "@polkadot/types/types"
import { u8aToHex } from "@polkadot/util"
import { IERC20__factory, IGateway__factory, WETH9__factory } from "@snowbridge/contract-types"
import { MultiAddressStruct } from "@snowbridge/contract-types/src/IGateway"
-import { LogDescription, Signer, TransactionReceipt, ethers, keccak256 } from "ethers"
+import { LogDescription, Signer, TransactionReceipt, ethers } from "ethers"
import { concatMap, filter, firstValueFrom, lastValueFrom, take, takeWhile, tap } from "rxjs"
import { assetStatusInfo } from "./assets"
import { Context } from "./index"
@@ -15,11 +15,13 @@ import {
paraIdToChannelId,
paraIdToSovereignAccount,
} from "./utils"
+import { ApiPromise } from "@polkadot/api"
export enum SendValidationCode {
BridgeNotOperational,
ChannelNotOperational,
BeneficiaryAccountMissing,
+ BeneficiaryHasHitMaxConsumers,
ForeignAssetMissing,
ERC20InvalidToken,
ERC20NotRegistered,
@@ -68,9 +70,20 @@ export type SendValidationResult = {
tokenBalance: bigint
tokenSpendAllowance: bigint
lightClientLatencySeconds: number
+ accountConsumers: number | null
}
}
+export interface IValidateOptions {
+ acceptableLatencyInSeconds: number /* 3 Hours */
+ maxConsumers: number
+}
+
+const ValidateOptionDefaults: IValidateOptions = {
+ acceptableLatencyInSeconds: 28800 /* 3 Hours */,
+ maxConsumers: 16,
+}
+
export const approveTokenSpend = async (
context: Context,
signer: Signer,
@@ -105,6 +118,14 @@ export const getSendFee = async (
return await gateway.quoteSendTokenFee(tokenAddress, destinationParaId, destinationFee)
}
+export const getSubstrateAccount = async (parachain: ApiPromise, beneficiaryHex: string) => {
+ const account = (await parachain.query.system.account(beneficiaryHex)).toPrimitive() as {
+ data: { free: string }
+ consumers: number
+ }
+ return { balance: account.data.free, consumers: account.consumers }
+}
+
export const validateSend = async (
context: Context,
source: ethers.Addressable,
@@ -113,14 +134,13 @@ export const validateSend = async (
destinationParaId: number,
amount: bigint,
destinationFee: bigint,
- options = {
- acceptableLatencyInSeconds: 28800 /* 3 Hours */,
- }
+ validateOptions: Partial = {}
): Promise => {
+ const options = { ...ValidateOptionDefaults, ...validateOptions }
const {
ethereum,
polkadot: {
- api: { assetHub, bridgeHub, relaychain },
+ api: { assetHub, bridgeHub, relaychain, parachains },
},
} = context
@@ -194,9 +214,11 @@ export const validateSend = async (
let { address: beneficiaryAddress, hexAddress: beneficiaryHex } =
beneficiaryMultiAddress(beneficiary)
- let beneficiaryAccountExists = true
+ let beneficiaryAccountExists = false
+ let hasConsumers = false
let destinationChainExists = true
let hrmpChannelSetup = true
+ let accountConsumers: number | null = null
const existentialDeposit = BigInt(
assetHub.consts.balances.existentialDeposit.toPrimitive() as number
)
@@ -204,10 +226,10 @@ export const validateSend = async (
if (destinationFee !== 0n) throw new Error("Asset Hub does not require a destination fee.")
if (beneficiaryAddress.kind !== 1)
throw new Error("Asset Hub only supports 32 byte addresses.")
- const account = (await assetHub.query.system.account(beneficiaryHex)).toPrimitive() as {
- data: { free: string }
- }
- beneficiaryAccountExists = BigInt(account.data.free) > existentialDeposit
+ const { balance, consumers } = await getSubstrateAccount(assetHub, beneficiaryHex)
+ beneficiaryAccountExists = BigInt(balance) > existentialDeposit
+ hasConsumers = consumers + 2 <= options.maxConsumers
+ accountConsumers = consumers
} else {
const [destinationHead, hrmpChannel] = await Promise.all([
relaychain.query.paras.heads(destinationParaId),
@@ -218,6 +240,17 @@ export const validateSend = async (
])
destinationChainExists = destinationHead.toPrimitive() !== null
hrmpChannelSetup = hrmpChannel.toPrimitive() !== null
+
+ if (destinationParaId in parachains) {
+ const { balance, consumers } = await getSubstrateAccount(assetHub, beneficiaryHex)
+ beneficiaryAccountExists = BigInt(balance) > existentialDeposit
+ hasConsumers = consumers + 2 <= options.maxConsumers
+ accountConsumers = consumers
+ } else {
+ // We cannot check this as we do not know the destination.
+ beneficiaryAccountExists = true
+ hasConsumers = true
+ }
}
if (!destinationChainExists)
errors.push({
@@ -229,6 +262,11 @@ export const validateSend = async (
code: SendValidationCode.BeneficiaryAccountMissing,
message: "Beneficiary does not hold existential deposit on destination.",
})
+ if (!hasConsumers)
+ errors.push({
+ code: SendValidationCode.BeneficiaryHasHitMaxConsumers,
+ message: "Benificiary is approaching the asset consumer limit. Transfer may fail.",
+ })
if (!hrmpChannelSetup)
errors.push({
code: SendValidationCode.NoHRMPChannelToDestination,
@@ -300,6 +338,7 @@ export const validateSend = async (
tokenBalance: assetInfo.ownerBalance,
tokenSpendAllowance: assetInfo.tokenGatewayAllowance,
existentialDeposit: existentialDeposit,
+ accountConsumers: accountConsumers,
},
}
}
@@ -381,7 +420,6 @@ export const send = async (
])
const contract = IGateway__factory.connect(context.config.appContracts.gateway, signer)
- const fees = await context.ethereum.api.getFeeData()
const response = await contract.sendToken(
success.token,
diff --git a/web/packages/contract-types/package.json b/web/packages/contract-types/package.json
index d4143f9e88..c16cf18234 100644
--- a/web/packages/contract-types/package.json
+++ b/web/packages/contract-types/package.json
@@ -1,6 +1,6 @@
{
"name": "@snowbridge/contract-types",
- "version": "0.1.5",
+ "version": "0.1.6",
"description": "Snowbridge contract type bindings",
"license": "Apache-2.0",
"repository": {
diff --git a/web/packages/operations/.env.example b/web/packages/operations/.env.example
index 034cce1751..3e1aaf5753 100644
--- a/web/packages/operations/.env.example
+++ b/web/packages/operations/.env.example
@@ -1,6 +1,6 @@
NODE_ENV=rococo_sepolia
REACT_APP_INFURA_KEY=
-SLACK_WEBHOOK_URL=
AWS_ACCESS_KEY_ID=
AWS_SECRET_ACCESS_KEY=
AWS_REGION=eu-central-1
+SNS_TOPIC_TO_PAGERDUTY=arn:aws:sns:eu-central-1:232374692033:PD
diff --git a/web/packages/operations/src/alarm.ts b/web/packages/operations/src/alarm.ts
index 74568978f0..0b8af0c85e 100644
--- a/web/packages/operations/src/alarm.ts
+++ b/web/packages/operations/src/alarm.ts
@@ -1,13 +1,12 @@
import { status, environment } from "@snowbridge/api"
-import axios from "axios"
import {
CloudWatchClient,
PutMetricDataCommand,
PutMetricAlarmCommand,
} from "@aws-sdk/client-cloudwatch"
-const SLACK_WEBHOOK_URL = process.env["SLACK_WEBHOOK_URL"]
const CLOUD_WATCH_NAME_SPACE = "SnowbridgeMetrics"
+const SNS_TOPIC_TO_PAGERDUTY = process.env["SNS_TOPIC_TO_PAGERDUTY"] || ""
export const AlarmThreshold = {
MaxBlockLatency: 2000,
@@ -17,6 +16,7 @@ export const AlarmThreshold = {
export type Sovereign = { name: string; account: string; balance: bigint }
export type AllMetrics = {
+ name: string
bridgeStatus: status.BridgeStatusInfo
channels: status.ChannelStatusInfo[]
sovereigns: Sovereign[]
@@ -31,50 +31,24 @@ export enum AlarmReason {
AccountBalanceInsufficient = "AccountBalanceInsufficient",
}
-export type ChannelKind = "Primary" | "Secondary" | "AssetHub"
-
export const sendMetrics = async (metrics: AllMetrics) => {
let client = new CloudWatchClient({})
let metricData = []
// Beefy metrics
metricData.push({
MetricName: "BeefyLatency",
- Dimensions: [
- {
- Name: "Direction",
- Value: "ToEthereum",
- },
- ],
Value: metrics.bridgeStatus.toEthereum.blockLatency,
})
metricData.push({
MetricName: "LatestBeefyBlock",
- Dimensions: [
- {
- Name: "Direction",
- Value: "ToEthereum",
- },
- ],
Value: metrics.bridgeStatus.toEthereum.latestPolkadotBlockOnEthereum,
})
metricData.push({
MetricName: "PreviousBeefyBlock",
- Dimensions: [
- {
- Name: "Direction",
- Value: "ToEthereum",
- },
- ],
Value: metrics.bridgeStatus.toEthereum.previousPolkadotBlockOnEthereum,
})
metricData.push({
MetricName: AlarmReason.BeefyStale.toString(),
- Dimensions: [
- {
- Name: "Direction",
- Value: "ToEthereum",
- },
- ],
Value: Number(
metrics.bridgeStatus.toEthereum.blockLatency > AlarmThreshold.MaxBlockLatency &&
metrics.bridgeStatus.toEthereum.latestPolkadotBlockOnEthereum <=
@@ -84,42 +58,18 @@ export const sendMetrics = async (metrics: AllMetrics) => {
// Beacon metrics
metricData.push({
MetricName: "BeaconLatency",
- Dimensions: [
- {
- Name: "Direction",
- Value: "ToPolkadot",
- },
- ],
Value: metrics.bridgeStatus.toPolkadot.blockLatency,
})
metricData.push({
MetricName: "LatestBeaconBlock",
- Dimensions: [
- {
- Name: "Direction",
- Value: "ToPolkadot",
- },
- ],
Value: metrics.bridgeStatus.toPolkadot.latestEthereumBlockOnPolkadot,
})
metricData.push({
MetricName: "PreviousBeaconBlock",
- Dimensions: [
- {
- Name: "Direction",
- Value: "ToPolkadot",
- },
- ],
Value: metrics.bridgeStatus.toPolkadot.previousEthereumBlockOnPolkadot,
})
metricData.push({
MetricName: AlarmReason.BeaconStale.toString(),
- Dimensions: [
- {
- Name: "Direction",
- Value: "ToPolkadot",
- },
- ],
Value: Number(
metrics.bridgeStatus.toPolkadot.blockLatency > AlarmThreshold.MaxBlockLatency &&
metrics.bridgeStatus.toPolkadot.latestEthereumBlockOnPolkadot <=
@@ -132,10 +82,6 @@ export const sendMetrics = async (metrics: AllMetrics) => {
metricData.push({
MetricName: "ToEthereumOutboundNonce",
Dimensions: [
- {
- Name: "Direction",
- Value: "ToEthereum",
- },
{
Name: "ChannelName",
Value: channel.name,
@@ -146,10 +92,6 @@ export const sendMetrics = async (metrics: AllMetrics) => {
metricData.push({
MetricName: "ToEthereumPreviousOutboundNonce",
Dimensions: [
- {
- Name: "Direction",
- Value: "ToEthereum",
- },
{
Name: "ChannelName",
Value: channel.name,
@@ -160,10 +102,6 @@ export const sendMetrics = async (metrics: AllMetrics) => {
metricData.push({
MetricName: "ToEthereumInboundNonce",
Dimensions: [
- {
- Name: "Direction",
- Value: "ToEthereum",
- },
{
Name: "ChannelName",
Value: channel.name,
@@ -174,10 +112,6 @@ export const sendMetrics = async (metrics: AllMetrics) => {
metricData.push({
MetricName: "ToEthereumPreviousInboundNonce",
Dimensions: [
- {
- Name: "Direction",
- Value: "ToEthereum",
- },
{
Name: "ChannelName",
Value: channel.name,
@@ -187,12 +121,6 @@ export const sendMetrics = async (metrics: AllMetrics) => {
})
metricData.push({
MetricName: AlarmReason.ToEthereumChannelStale.toString(),
- Dimensions: [
- {
- Name: "Direction",
- Value: "ToEthereum",
- },
- ],
Value: Number(
channel.toEthereum.outbound < channel.toEthereum.inbound ||
(channel.toEthereum.outbound > channel.toEthereum.inbound &&
@@ -203,10 +131,6 @@ export const sendMetrics = async (metrics: AllMetrics) => {
metricData.push({
MetricName: "ToPolkadotOutboundNonce",
Dimensions: [
- {
- Name: "Direction",
- Value: "ToPolkadot",
- },
{
Name: "ChannelName",
Value: channel.name,
@@ -217,10 +141,6 @@ export const sendMetrics = async (metrics: AllMetrics) => {
metricData.push({
MetricName: "ToPolkadotPreviousOutboundNonce",
Dimensions: [
- {
- Name: "Direction",
- Value: "ToPolkadot",
- },
{
Name: "ChannelName",
Value: channel.name,
@@ -231,10 +151,6 @@ export const sendMetrics = async (metrics: AllMetrics) => {
metricData.push({
MetricName: "ToPolkadotInboundNonce",
Dimensions: [
- {
- Name: "Direction",
- Value: "ToPolkadot",
- },
{
Name: "ChannelName",
Value: channel.name,
@@ -245,10 +161,6 @@ export const sendMetrics = async (metrics: AllMetrics) => {
metricData.push({
MetricName: "ToPolkadotPreviousInboundNonce",
Dimensions: [
- {
- Name: "Direction",
- Value: "ToPolkadot",
- },
{
Name: "ChannelName",
Value: channel.name,
@@ -258,12 +170,6 @@ export const sendMetrics = async (metrics: AllMetrics) => {
})
metricData.push({
MetricName: AlarmReason.ToPolkadotChannelStale.toString(),
- Dimensions: [
- {
- Name: "Direction",
- Value: "ToPolkadot",
- },
- ],
Value: Number(
channel.toPolkadot.outbound < channel.toPolkadot.inbound ||
(channel.toPolkadot.outbound > channel.toPolkadot.inbound &&
@@ -307,19 +213,30 @@ export const sendMetrics = async (metrics: AllMetrics) => {
}
const command = new PutMetricDataCommand({
MetricData: metricData,
- Namespace: CLOUD_WATCH_NAME_SPACE,
+ Namespace: CLOUD_WATCH_NAME_SPACE + "-" + metrics.name,
})
await client.send(command)
}
export const initializeAlarms = async () => {
+ let env = "local_e2e"
+ if (process.env.NODE_ENV !== undefined) {
+ env = process.env.NODE_ENV
+ }
+ const snowbridgeEnv = environment.SNOWBRIDGE_ENV[env]
+ if (snowbridgeEnv === undefined) {
+ throw Error(`Unknown environment '${env}'`)
+ }
+ const { name } = snowbridgeEnv
+
let client = new CloudWatchClient({})
let cloudWatchAlarms = []
let alarmCommandSharedInput = {
EvaluationPeriods: 3,
- Namespace: CLOUD_WATCH_NAME_SPACE,
- Period: 600,
+ Namespace: CLOUD_WATCH_NAME_SPACE + "-" + name,
+ Period: 300,
Threshold: 0,
+ AlarmActions: [SNS_TOPIC_TO_PAGERDUTY],
}
cloudWatchAlarms.push(
new PutMetricAlarmCommand({
@@ -371,65 +288,7 @@ export const initializeAlarms = async () => {
...alarmCommandSharedInput,
})
)
- console.log(cloudWatchAlarms)
for (let alarm of cloudWatchAlarms) {
await client.send(alarm)
}
}
-
-export const sendAlarm = async (metrics: AllMetrics) => {
- let alarm = false
- let alarms = []
-
- if (
- metrics.bridgeStatus.toEthereum.blockLatency > AlarmThreshold.MaxBlockLatency &&
- metrics.bridgeStatus.toEthereum.latestPolkadotBlockOnEthereum ==
- metrics.bridgeStatus.toEthereum.previousPolkadotBlockOnEthereum
- ) {
- alarm = true
- alarms.push(AlarmReason.BeefyStale)
- }
- if (
- metrics.bridgeStatus.toPolkadot.blockLatency > AlarmThreshold.MaxBlockLatency &&
- metrics.bridgeStatus.toPolkadot.latestEthereumBlockOnPolkadot ==
- metrics.bridgeStatus.toPolkadot.previousEthereumBlockOnPolkadot
- ) {
- alarm = true
- alarms.push(AlarmReason.BeaconStale)
- }
- for (let channel of metrics.channels) {
- if (
- channel.toEthereum.outbound != channel.toEthereum.inbound &&
- channel.toEthereum.inbound == channel.toEthereum.previousInbound
- ) {
- alarm = true
- alarms.push(AlarmReason.ToEthereumChannelStale)
- }
- if (
- channel.toPolkadot.outbound != channel.toPolkadot.inbound &&
- channel.toPolkadot.inbound == channel.toPolkadot.previousInbound
- ) {
- alarm = true
- alarms.push(AlarmReason.ToPolkadotChannelStale)
- }
- break
- }
-
- for (let relayer of metrics.relayers) {
- if (!relayer.balance || relayer.balance < AlarmThreshold.MinBalanceToKeep) {
- alarm = true
- alarms.push(AlarmReason.AccountBalanceInsufficient)
- break
- }
- }
- const text = JSON.stringify(
- { alarms, metrics },
- (key, value) => (typeof value === "bigint" ? value.toString() : value),
- 2
- )
- console.log(text)
-
- if (alarm) {
- await axios.post(SLACK_WEBHOOK_URL || "", { text })
- }
-}
diff --git a/web/packages/operations/src/cron.ts b/web/packages/operations/src/cron.ts
index f819477afd..7fd3696d6a 100644
--- a/web/packages/operations/src/cron.ts
+++ b/web/packages/operations/src/cron.ts
@@ -2,4 +2,4 @@ import "dotenv/config"
import cron from "node-cron"
import { monitor } from "./monitor"
-cron.schedule("*/10 * * * *", monitor)
+cron.schedule("*/5 * * * *", monitor)
diff --git a/web/packages/operations/src/global_transfer_history.ts b/web/packages/operations/src/global_transfer_history.ts
new file mode 100644
index 0000000000..826dc16ffd
--- /dev/null
+++ b/web/packages/operations/src/global_transfer_history.ts
@@ -0,0 +1,99 @@
+import { contextFactory, destroyContext, environment, subscan, history } from "@snowbridge/api"
+
+const monitor = async () => {
+ const subscanKey = process.env.REACT_APP_SUBSCAN_KEY ?? ""
+
+ let env = "rococo_sepolia"
+ if (process.env.NODE_ENV !== undefined) {
+ env = process.env.NODE_ENV
+ }
+ const snwobridgeEnv = environment.SNOWBRIDGE_ENV[env]
+ if (snwobridgeEnv === undefined) {
+ throw Error(`Unknown environment '${env}'`)
+ }
+
+ const { config } = snwobridgeEnv
+ if (!config.SUBSCAN_API) throw Error(`Environment ${env} does not support subscan.`)
+
+ const context = await contextFactory({
+ ethereum: {
+ execution_url: config.ETHEREUM_WS_API(process.env.REACT_APP_ALCHEMY_KEY ?? ""),
+ beacon_url: config.BEACON_HTTP_API,
+ },
+ polkadot: {
+ url: {
+ bridgeHub: config.BRIDGE_HUB_WS_URL,
+ assetHub: config.ASSET_HUB_WS_URL,
+ relaychain: config.RELAY_CHAIN_WS_URL,
+ parachains: config.PARACHAINS,
+ },
+ },
+ appContracts: {
+ gateway: config.GATEWAY_CONTRACT,
+ beefy: config.BEEFY_CONTRACT,
+ },
+ })
+
+ const ethBlockTimeSeconds = 12
+ const polkadotBlockTimeSeconds = 9
+ const ethereumSearchPeriodBlocks = (60 * 60 * 24 * 7 * 2) / ethBlockTimeSeconds // 2 Weeks
+ const polkadotSearchPeriodBlocks = (60 * 60 * 24 * 7 * 2) / polkadotBlockTimeSeconds // 2 Weeks
+
+ const assetHubScan = subscan.createApi(config.SUBSCAN_API.ASSET_HUB_URL, subscanKey)
+ const bridgeHubScan = subscan.createApi(config.SUBSCAN_API.BRIDGE_HUB_URL, subscanKey)
+ const relaychainScan = subscan.createApi(config.SUBSCAN_API.RELAY_CHAIN_URL, subscanKey)
+
+ const [ethNowBlock, assetHubNowBlock, bridgeHubNowBlock] = await Promise.all([
+ (async () => {
+ const ethNowBlock = await context.ethereum.api.getBlock("latest")
+ if (ethNowBlock == null) throw Error("Cannot fetch block")
+ return ethNowBlock
+ })(),
+ context.polkadot.api.assetHub.rpc.chain.getHeader(),
+ context.polkadot.api.bridgeHub.rpc.chain.getHeader(),
+ ])
+
+ const [toEthereum, toPolkadot] = [
+ await history.toEthereumHistory(context, assetHubScan, bridgeHubScan, relaychainScan, {
+ assetHub: {
+ fromBlock: assetHubNowBlock.number.toNumber() - polkadotSearchPeriodBlocks,
+ toBlock: assetHubNowBlock.number.toNumber(),
+ },
+ bridgeHub: {
+ fromBlock: bridgeHubNowBlock.number.toNumber() - polkadotSearchPeriodBlocks,
+ toBlock: bridgeHubNowBlock.number.toNumber(),
+ },
+ ethereum: {
+ fromBlock: ethNowBlock.number - ethereumSearchPeriodBlocks,
+ toBlock: ethNowBlock.number,
+ },
+ }),
+ await history.toPolkadotHistory(context, assetHubScan, bridgeHubScan, {
+ assetHub: {
+ fromBlock: assetHubNowBlock.number.toNumber() - polkadotSearchPeriodBlocks,
+ toBlock: assetHubNowBlock.number.toNumber(),
+ },
+ bridgeHub: {
+ fromBlock: bridgeHubNowBlock.number.toNumber() - polkadotSearchPeriodBlocks,
+ toBlock: bridgeHubNowBlock.number.toNumber(),
+ },
+ ethereum: {
+ fromBlock: ethNowBlock.number - ethereumSearchPeriodBlocks,
+ toBlock: ethNowBlock.number,
+ },
+ }),
+ ]
+
+ const transfers = [...toEthereum, ...toPolkadot]
+ transfers.sort((a, b) => b.info.when.getTime() - a.info.when.getTime())
+ console.log(JSON.stringify(transfers, null, 2))
+
+ await destroyContext(context)
+}
+
+monitor()
+ .then(() => process.exit(0))
+ .catch((error) => {
+ console.error("Error:", error)
+ process.exit(1)
+ })
diff --git a/web/packages/operations/src/monitor.ts b/web/packages/operations/src/monitor.ts
index b5384c9416..f18fd7d525 100644
--- a/web/packages/operations/src/monitor.ts
+++ b/web/packages/operations/src/monitor.ts
@@ -1,19 +1,19 @@
import { u8aToHex } from "@polkadot/util"
import { blake2AsU8a } from "@polkadot/util-crypto"
import { contextFactory, destroyContext, environment, status, utils } from "@snowbridge/api"
-import { sendAlarm, AllMetrics, Sovereign, sendMetrics } from "./alarm"
+import { AllMetrics, Sovereign, sendMetrics } from "./alarm"
export const monitor = async (): Promise => {
let env = "local_e2e"
if (process.env.NODE_ENV !== undefined) {
env = process.env.NODE_ENV
}
- const snwobridgeEnv = environment.SNOWBRIDGE_ENV[env]
- if (snwobridgeEnv === undefined) {
+ const snowbridgeEnv = environment.SNOWBRIDGE_ENV[env]
+ if (snowbridgeEnv === undefined) {
throw Error(`Unknown environment '${env}'`)
}
- const { config } = snwobridgeEnv
+ const { config, name } = snowbridgeEnv
const infuraKey = process.env.REACT_APP_INFURA_KEY || ""
@@ -35,22 +35,25 @@ export const monitor = async (): Promise => {
},
})
- const bridegStatus = await status.bridgeStatusInfo(context)
- console.log("Bridge Status:", bridegStatus)
+ const bridgeStatus = await status.bridgeStatusInfo(context)
+ console.log("Bridge Status:", bridgeStatus)
+
const assethub = await status.channelStatusInfo(
context,
utils.paraIdToChannelId(config.ASSET_HUB_PARAID)
)
- assethub.name = "AssetHub"
+ assethub.name = status.ChannelKind.AssetHub
console.log("Asset Hub Channel:", assethub)
+
const primaryGov = await status.channelStatusInfo(context, config.PRIMARY_GOVERNANCE_CHANNEL_ID)
- primaryGov.name = "Primary"
+ primaryGov.name = status.ChannelKind.Primary
console.log("Primary Governance Channel:", primaryGov)
+
const secondaryGov = await status.channelStatusInfo(
context,
config.SECONDARY_GOVERNANCE_CHANNEL_ID
)
- secondaryGov.name = "Secondary"
+ secondaryGov.name = status.ChannelKind.Secondary
console.log("Secondary Governance Channel:", secondaryGov)
let assetHubSovereign = BigInt(
@@ -125,17 +128,10 @@ export const monitor = async (): Promise => {
},
]
- const allMetrics: AllMetrics = {
- bridgeStatus: bridegStatus,
- channels: channels,
- relayers: relayers,
- sovereigns,
- }
+ const allMetrics: AllMetrics = { name, bridgeStatus, channels, relayers, sovereigns }
await sendMetrics(allMetrics)
- await sendAlarm(allMetrics)
-
await destroyContext(context)
return allMetrics
diff --git a/web/packages/operations/src/transfer_token.ts b/web/packages/operations/src/transfer_token.ts
index 38c84c8be8..ab9a45bea3 100644
--- a/web/packages/operations/src/transfer_token.ts
+++ b/web/packages/operations/src/transfer_token.ts
@@ -64,7 +64,7 @@ const monitor = async () => {
amount,
BigInt(0)
)
- console.log("Plan:", plan)
+ console.log("Plan:", plan, plan.failure?.errors)
let result = await toPolkadot.send(context, ETHEREUM_ACCOUNT, plan)
console.log("Execute:", result)
while (true) {
@@ -87,7 +87,7 @@ const monitor = async () => {
WETH_CONTRACT,
amount
)
- console.log("Plan:", plan)
+ console.log("Plan:", plan, plan.failure?.errors)
const result = await toEthereum.send(context, POLKADOT_ACCOUNT, plan)
console.log("Execute:", result)
while (true) {
@@ -111,7 +111,7 @@ const monitor = async () => {
amount,
BigInt(4_000_000_000)
)
- console.log("Plan:", plan)
+ console.log("Plan:", plan, plan.failure?.errors)
let result = await toPolkadot.send(context, ETHEREUM_ACCOUNT, plan)
console.log("Execute:", result)
while (true) {
@@ -134,7 +134,7 @@ const monitor = async () => {
WETH_CONTRACT,
amount
)
- console.log("Plan:", plan)
+ console.log("Plan:", plan, plan.failure?.errors)
const result = await toEthereum.send(context, POLKADOT_ACCOUNT, plan)
console.log("Execute:", result)
while (true) {
diff --git a/web/packages/test/scripts/build-binary.sh b/web/packages/test/scripts/build-binary.sh
index 74ecfdaa74..8378236040 100755
--- a/web/packages/test/scripts/build-binary.sh
+++ b/web/packages/test/scripts/build-binary.sh
@@ -17,8 +17,7 @@ build_binaries() {
# Check that all 3 binaries are available and no changes made in the polkadot and substrate dirs
if [[ ! -e "target/release/polkadot" || ! -e "target/release/polkadot-execute-worker" || ! -e "target/release/polkadot-prepare-worker" || "$changes_detected" -eq 1 ]]; then
echo "Building polkadot binary, due to changes detected in polkadot or substrate, or binaries not found"
- # Increase session length to 2 mins
- ROCOCO_EPOCH_DURATION=20 cargo build --release --locked --bin polkadot --bin polkadot-execute-worker --bin polkadot-prepare-worker
+ cargo build --release --locked --bin polkadot --bin polkadot-execute-worker --bin polkadot-prepare-worker
else
echo "No changes detected in polkadot or substrate and binaries are available, not rebuilding relaychain binaries."
fi
diff --git a/web/packages/test/scripts/force-beacon-checkpoint.sh b/web/packages/test/scripts/force-beacon-checkpoint.sh
new file mode 100755
index 0000000000..bd5cd1364c
--- /dev/null
+++ b/web/packages/test/scripts/force-beacon-checkpoint.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+set -e
+
+source scripts/set-env.sh
+source scripts/xcm-helper.sh
+
+pushd $root_dir
+check_point_hex=$($relay_bin generate-beacon-checkpoint --finalized-slot 9043968 --config /opt/config/beacon-relay.json --export-json)
+popd
+transact_call="0x5200"$check_point_hex
+send_governance_transact_from_relaychain $BRIDGE_HUB_PARAID "$transact_call" 180000000000 900000
diff --git a/web/packages/test/scripts/start-polkadot.sh b/web/packages/test/scripts/start-polkadot.sh
new file mode 100755
index 0000000000..2028e6514b
--- /dev/null
+++ b/web/packages/test/scripts/start-polkadot.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+set -eu
+
+start=$(date +%s)
+
+from_start_services=true
+
+source scripts/set-env.sh
+source scripts/build-binary.sh
+
+trap kill_all SIGINT SIGTERM EXIT
+cleanup
+
+# 0. check required tools
+echo "Check building tools"
+check_tool
+
+# 1. install binary if required
+echo "Installing binaries if required"
+build_binaries
+build_relayer
+
+# 2. start polkadot
+echo "Starting polkadot nodes"
+source scripts/deploy-polkadot.sh
+deploy_polkadot
+
+# 4. generate beefy checkpoint
+echo "Generate beefy checkpoint"
+source scripts/generate-beefy-checkpoint.sh
+generate_beefy_checkpoint
+
+# 6. config substrate
+echo "Config Substrate"
+source scripts/configure-substrate.sh
+configure_substrate
+
+echo "Prod testnet has been initialized"
+
+end=$(date +%s)
+runtime=$((end - start))
+minutes=$(((runtime % 3600) / 60))
+seconds=$(((runtime % 3600) % 60))
+echo "Took $minutes minutes $seconds seconds"
+
+wait
diff --git a/web/pnpm-lock.yaml b/web/pnpm-lock.yaml
index 61ed8fe116..8f4e05f2a4 100644
--- a/web/pnpm-lock.yaml
+++ b/web/pnpm-lock.yaml
@@ -344,9 +344,6 @@ importers:
'@types/lodash':
specifier: ^4.14.186
version: 4.14.187
- '@types/node':
- specifier: ^18.13.0
- version: 18.16.8
'@types/secp256k1':
specifier: ^4.0.3
version: 4.0.3
@@ -384,6 +381,9 @@ importers:
specifier: ^3.0.5
version: 3.0.5
devDependencies:
+ '@types/node':
+ specifier: ^18.16.8
+ version: 18.19.31
'@typescript-eslint/eslint-plugin':
specifier: ^5.42.0
version: 5.42.0(@typescript-eslint/parser@5.42.0)(eslint@8.26.0)(typescript@5.1.6)
@@ -398,7 +398,7 @@ importers:
version: 8.5.0(eslint@8.26.0)
ts-node:
specifier: ^10.9.1
- version: 10.9.1(@types/node@18.16.8)(typescript@5.1.6)
+ version: 10.9.1(@types/node@18.19.31)(typescript@5.1.6)
tsconfig-paths:
specifier: ^4.2.0
version: 4.2.0
@@ -3036,6 +3036,7 @@ packages:
/@types/node@18.16.8:
resolution: {integrity: sha512-p0iAXcfWCOTCBbsExHIDFCfwsqFwBTgETJveKMT+Ci3LY9YqQCI91F5S+TB20+aRCXpcWfvx5Qr5EccnwCm2NA==}
+ dev: true
/@types/node@18.19.31:
resolution: {integrity: sha512-ArgCD39YpyyrtFKIqMDvjz79jto5fcI/SVUs2HwB+f0dAzq68yqOdyaSivLiLugSziTpNXLQrVb7RZFmdZzbhA==}
@@ -7378,7 +7379,7 @@ packages:
yn: 3.1.1
dev: true
- /ts-node@10.9.1(@types/node@18.16.8)(typescript@5.1.6):
+ /ts-node@10.9.1(@types/node@18.19.31)(typescript@5.1.6):
resolution: {integrity: sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==}
hasBin: true
peerDependencies:
@@ -7397,7 +7398,7 @@ packages:
'@tsconfig/node12': 1.0.11
'@tsconfig/node14': 1.0.3
'@tsconfig/node16': 1.0.3
- '@types/node': 18.16.8
+ '@types/node': 18.19.31
acorn: 8.8.2
acorn-walk: 8.2.0
arg: 4.1.3