From 013fec20b2e6d4c0f25f01d84565436bd0354304 Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Wed, 31 Jul 2024 18:02:36 -0300 Subject: [PATCH 01/19] feat: add keystore API phoenix endpoint --- Makefile | 2 +- config/runtime.exs | 14 + keymanager-oapi.yaml | 1455 +++++++++++++++++ lib/key_store_api/api_spec.ex | 13 + .../controllers/error_controller.ex | 33 + lib/key_store_api/endpoint.ex | 11 + lib/key_store_api/error_json.ex | 10 + lib/key_store_api/key_store_api.ex | 45 + lib/key_store_api/router.ex | 16 + lib/lambda_ethereum_consensus/application.ex | 2 + 10 files changed, 1600 insertions(+), 1 deletion(-) create mode 100644 keymanager-oapi.yaml create mode 100644 lib/key_store_api/api_spec.ex create mode 100644 lib/key_store_api/controllers/error_controller.ex create mode 100644 lib/key_store_api/endpoint.ex create mode 100644 lib/key_store_api/error_json.ex create mode 100644 lib/key_store_api/key_store_api.ex create mode 100644 lib/key_store_api/router.ex diff --git a/Makefile b/Makefile index 34485f3ca..afa1d8739 100644 --- a/Makefile +++ b/Makefile @@ -168,7 +168,7 @@ checkpoint-sync: compile-all #▶️ sepolia: @ Run an interactive terminal using sepolia network sepolia: compile-all - iex -S mix run -- --checkpoint-sync-url https://sepolia.beaconstate.info --network sepolia --metrics + iex -S mix run -- --checkpoint-sync-url https://sepolia.beaconstate.info --network sepolia --metrics --keystore-api #▶️ holesky: @ Run an interactive terminal using holesky network holesky: compile-all diff --git a/config/runtime.exs b/config/runtime.exs index 7ecc40018..87b4446db 100644 --- a/config/runtime.exs +++ b/config/runtime.exs @@ -18,6 +18,8 @@ switches = [ log_file: :string, beacon_api: :boolean, beacon_api_port: :integer, + keystore_api: :boolean, + keystore_api_port: :integer, listen_address: [:string, :keep], discovery_port: :integer, boot_nodes: :string, @@ -47,6 +49,8 @@ metrics_port = Keyword.get(args, :metrics_port, nil) enable_metrics = Keyword.get(args, :metrics, not is_nil(metrics_port)) beacon_api_port = Keyword.get(args, :beacon_api_port, nil) enable_beacon_api = Keyword.get(args, :beacon_api, not is_nil(beacon_api_port)) +keystore_api_port = Keyword.get(args, :keystore_api_port, nil) +enable_keystore_api = Keyword.get(args, :keystore_api, not is_nil(keystore_api_port)) listen_addresses = Keyword.get_values(args, :listen_address) discovery_port = Keyword.get(args, :discovery_port, 9000) cli_bootnodes = Keyword.get(args, :boot_nodes, "") @@ -153,6 +157,16 @@ config :lambda_ethereum_consensus, BeaconApi.Endpoint, layout: false ] +# KeyStore API +config :lambda_ethereum_consensus, KeyStoreApi.Endpoint, + server: enable_keystore_api, + http: [port: keystore_api_port || 5000], + url: [host: "localhost"], + render_errors: [ + formats: [json: KeyStoreApi.ErrorJSON], + layout: false + ] + # Validator setup if (keystore_dir != nil and keystore_pass_dir == nil) or diff --git a/keymanager-oapi.yaml b/keymanager-oapi.yaml new file mode 100644 index 000000000..e35176294 --- /dev/null +++ b/keymanager-oapi.yaml @@ -0,0 +1,1455 @@ +openapi: 3.0.3 +info: + title: Eth2 key manager API + description: | + API specification for a key manager client, which enables users to manage keystores. + + The key manager API is served by the binary holding the validator keys. This binary may be a remote signer or a validator client. + + All routes SHOULD be exposed through a secure channel, such as with HTTPs, an SSH tunnel, a VPN, etc. + + All requests by default send and receive JSON, and as such should have either or both of the "Content-Type: application/json" + and "Accept: application/json" headers. + + All sensitive routes are to be authenticated with a token. This token should be provided by the user via a secure channel: + - Log the token to stdout when running the binary with the key manager API enabled + - Read the token from a file available to the binary + version: v1.0.0-alpha + contact: + name: Ethereum Github + url: 'https://github.com/ethereum/keymanager-APIs/issues' + license: + name: Creative Commons Zero v1.0 Universal + url: 'https://creativecommons.org/publicdomain/zero/1.0/' +servers: + - url: '{server_url}' + variables: + server_url: + description: key manager API url + default: 'https://public-mainnet-node.ethereum.org' +tags: + - name: Fee Recipient + description: Set of endpoints for management of fee recipient. + - name: Gas Limit + description: Set of endpoints for management of gas limits. + - name: Local Key Manager + description: Set of endpoints for key management of local keys. + - name: Remote Key Manager + description: Set of endpoints for key management of external keys. +paths: + /eth/v1/keystores: + get: + operationId: listKeys + summary: List Keys. + description: | + List all validating pubkeys known to and decrypted by this keymanager binary + security: + - bearerAuth: [] + tags: + - Local Key Manager + responses: + '200': + description: Success response + content: + application/json: + schema: + title: ListKeysResponse + type: object + required: + - data + properties: + data: + type: array + items: + type: object + required: + - validating_pubkey + properties: + validating_pubkey: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + derivation_path: + type: string + description: The derivation path (if present in the imported keystore). + example: m/12381/3600/0/0/0 + readonly: + type: boolean + description: The key associated with this pubkey cannot be deleted from the API + '401': + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '403': + description: 'Forbidden, a token is found but is invalid' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '500': + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + post: + operationId: importKeystores + summary: Import Keystores. + description: | + Import keystores generated by the Eth2.0 deposit CLI tooling. `passwords[i]` must unlock `keystores[i]`. + + Users SHOULD send slashing_protection data associated with the imported pubkeys. MUST follow the format defined in + EIP-3076: Slashing Protection Interchange Format. + security: + - bearerAuth: [] + tags: + - Local Key Manager + requestBody: + content: + application/json: + schema: + type: object + required: + - keystores + - passwords + properties: + keystores: + type: array + description: JSON-encoded keystore files generated with the Launchpad. + items: + type: string + description: | + JSON serialized representation of a single keystore in EIP-2335: BLS12-381 Keystore format. + example: '{"version":4,"uuid":"9f75a3fa-1e5a-49f9-be3d-f5a19779c6fa","path":"m/12381/3600/0/0/0","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a","crypto":{"kdf":{"function":"pbkdf2","params":{"dklen":32,"c":262144,"prf":"hmac-sha256","salt":"8ff8f22ef522a40f99c6ce07fdcfc1db489d54dfbc6ec35613edf5d836fa1407"},"message":""},"checksum":{"function":"sha256","params":{},"message":"9678a69833d2576e3461dd5fa80f6ac73935ae30d69d07659a709b3cd3eddbe3"},"cipher":{"function":"aes-128-ctr","params":{"iv":"31b69f0ac97261e44141b26aa0da693f"},"message":"e8228bafec4fcbaca3b827e586daad381d53339155b034e5eaae676b715ab05e"}}}' + passwords: + type: array + description: 'Passwords to unlock imported keystore files. `passwords[i]` must unlock `keystores[i]`.' + items: + type: string + example: ABCDEFGH01234567ABCDEFGH01234567 + slashing_protection: + type: string + description: | + JSON serialized representation of the slash protection data in format defined in EIP-3076: Slashing Protection Interchange Format. + example: '{"metadata":{"interchange_format_version":"5","genesis_validators_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"data":[{"pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a","signed_blocks":[],"signed_attestations":[]}]}' + responses: + '200': + description: Success response + content: + application/json: + schema: + title: ImportKeystoresResponse + type: object + required: + - data + properties: + data: + type: array + description: Status result of each `request.keystores` with same length and order of `request.keystores` + items: + type: object + required: + - status + properties: + status: + type: string + description: | + - imported: Keystore successfully decrypted and imported to keymanager permanent storage + - duplicate: Keystore's pubkey is already known to the keymanager + - error: Any other status different to the above: decrypting error, I/O errors, etc. + enum: + - imported + - duplicate + - error + example: imported + message: + type: string + description: error message if status == error + '400': + description: Bad request. Request was malformed and could not be processed + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '401': + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '403': + description: 'Forbidden, a token is found but is invalid' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '500': + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + delete: + operationId: deleteKeys + summary: Delete Keys. + description: | + DELETE must delete all keys from `request.pubkeys` that are known to the keymanager and exist in its + persistent storage. Additionally, DELETE must fetch the slashing protection data for the requested keys from + persistent storage, which must be retained (and not deleted) after the response has been sent. Therefore in the + case of two identical delete requests being made, both will have access to slashing protection data. + + In a single atomic sequential operation the keymanager must: + 1. Guarantee that key(s) can not produce any more signature; only then + 2. Delete key(s) and serialize its associated slashing protection data + + DELETE should never return a 404 response, even if all pubkeys from request.pubkeys have no extant keystores + nor slashing protection data. + + Slashing protection data must only be returned for keys from `request.pubkeys` for which a + `deleted` or `not_active` status is returned. + security: + - bearerAuth: [] + tags: + - Local Key Manager + requestBody: + content: + application/json: + schema: + type: object + required: + - pubkeys + properties: + pubkeys: + type: array + description: List of public keys to delete. + items: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + responses: + '200': + description: Success response + content: + application/json: + schema: + title: DeleteKeysResponse + type: object + required: + - data + - slashing_protection + properties: + data: + type: array + description: Deletion status of all keys in `request.pubkeys` in the same order. + items: + type: object + required: + - status + properties: + status: + type: string + description: | + - deleted: key was active and removed + - not_active: slashing protection data returned but key was not active + - not_found: key was not found to be removed, and no slashing data can be returned + - error: unexpected condition meant the key could not be removed (the key was actually found, but we couldn't stop using it) - this would be a sign that making it active elsewhere would almost certainly cause you headaches / slashing conditions etc. + enum: + - deleted + - not_active + - not_found + - error + example: deleted + message: + type: string + description: error message if status == error + slashing_protection: + type: string + description: | + JSON serialized representation of the slash protection data in format defined in EIP-3076: Slashing Protection Interchange Format. + example: '{"metadata":{"interchange_format_version":"5","genesis_validators_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"data":[{"pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a","signed_blocks":[],"signed_attestations":[]}]}' + '400': + description: Bad request. Request was malformed and could not be processed + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '401': + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '403': + description: 'Forbidden, a token is found but is invalid' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '500': + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + /eth/v1/remotekeys: + get: + operationId: listRemoteKeys + summary: List Remote Keys. + description: | + List all remote validating pubkeys known to this validator client binary + security: + - bearerAuth: [] + tags: + - Remote Key Manager + responses: + '200': + description: Success response + content: + application/json: + schema: + title: ListRemoteKeysResponse + type: object + required: + - data + properties: + data: + type: array + items: + type: object + required: + - pubkey + properties: + pubkey: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + url: + description: 'URL to API implementing EIP-3030: BLS Remote Signer HTTP API' + type: string + example: 'https://remote.signer' + readonly: + type: boolean + description: The signer associated with this pubkey cannot be deleted from the API + '401': + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '403': + description: 'Forbidden, a token is found but is invalid' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '500': + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + post: + operationId: importRemoteKeys + summary: Import Remote Keys. + description: | + Import remote keys for the validator client to request duties for. + security: + - bearerAuth: [] + tags: + - Remote Key Manager + requestBody: + content: + application/json: + schema: + type: object + required: + - remote_keys + properties: + remote_keys: + type: array + items: + type: object + required: + - pubkey + properties: + pubkey: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + url: + description: 'URL to API implementing EIP-3030: BLS Remote Signer HTTP API' + type: string + example: 'https://remote.signer' + responses: + '200': + description: Success response + content: + application/json: + schema: + title: ImportRemoteKeysResponse + type: object + required: + - data + properties: + data: + type: array + description: Status result of each `request.remote_keys` with same length and order of `request.remote_keys` + items: + type: object + required: + - status + properties: + status: + type: string + description: | + - imported: Remote key successfully imported to validator client permanent storage + - duplicate: Remote key's pubkey is already known to the validator client + - error: Any other status different to the above: I/O errors, etc. + enum: + - imported + - duplicate + - error + example: imported + message: + type: string + description: error message if status == error + '401': + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '403': + description: 'Forbidden, a token is found but is invalid' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '500': + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + delete: + operationId: deleteRemoteKeys + summary: Delete Remote Keys. + description: | + DELETE must delete all keys from `request.pubkeys` that are known to the validator client and exist in its + persistent storage. + + DELETE should never return a 404 response, even if all pubkeys from request.pubkeys have no existing keystores. + security: + - bearerAuth: [] + tags: + - Remote Key Manager + requestBody: + content: + application/json: + schema: + type: object + required: + - pubkeys + properties: + pubkeys: + type: array + description: List of public keys to delete. + items: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + responses: + '200': + description: Success response + content: + application/json: + schema: + title: DeleteRemoteKeysResponse + type: object + required: + - data + properties: + data: + type: array + description: Deletion status of all keys in `request.pubkeys` in the same order. + items: + type: object + required: + - status + properties: + status: + type: string + description: | + - deleted: key was active and removed + - not_found: key was not found to be removed + - error: unexpected condition meant the key could not be removed (the key was actually found, + but we couldn't stop using it) - this would be a sign that making it active elsewhere would + almost certainly cause you headaches / slashing conditions etc. + enum: + - deleted + - not_found + - error + example: deleted + message: + type: string + description: error message if status == error + '401': + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '403': + description: 'Forbidden, a token is found but is invalid' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '500': + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '/eth/v1/validator/{pubkey}/feerecipient': + get: + operationId: listFeeRecipient + summary: List Fee Recipient. + description: | + List the validator public key to eth address mapping for fee recipient feature on a specific public key. + The validator public key will return with the default fee recipient address if a specific one was not found. + + WARNING: The fee_recipient is not used on Phase0 or Altair networks. + security: + - bearerAuth: [] + tags: + - Fee Recipient + parameters: + - in: path + name: pubkey + schema: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + required: true + responses: + '200': + description: success response + content: + application/json: + schema: + title: ListFeeRecipientResponse + type: object + required: + - data + properties: + data: + type: object + required: + - ethaddress + properties: + pubkey: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + ethaddress: + type: string + description: An address on the execution (Ethereum 1) network. + example: '0xabcf8e0d4e9587369b2301d0790347320302cc09' + pattern: '^0x[a-fA-F0-9]{40}$' + '401': + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '403': + description: 'Forbidden, a token is found but is invalid' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '404': + description: Path not found + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '500': + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + post: + operationId: setFeeRecipient + summary: Set Fee Recipient. + description: | + Sets the validator client fee recipient mapping which will then update the beacon node. + Existing mappings for the same validator public key will be overwritten. + Specific Public keys not mapped will continue to use the default address for fee recipient in accordance to the startup of the validator client and beacon node. + Cannot specify the 0x00 fee recipient address through the API. + + WARNING: The fee_recipient is not used on Phase0 or Altair networks. + security: + - bearerAuth: [] + tags: + - Fee Recipient + parameters: + - in: path + name: pubkey + schema: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + required: true + requestBody: + content: + application/json: + schema: + title: SetFeeRecipientRequest + type: object + required: + - ethaddress + properties: + ethaddress: + type: string + description: An address on the execution (Ethereum 1) network. + example: '0xabcf8e0d4e9587369b2301d0790347320302cc09' + pattern: '^0x[a-fA-F0-9]{40}$' + responses: + '202': + description: successfully updated + '400': + description: Bad request. Request was malformed and could not be processed + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '401': + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '403': + description: 'Forbidden, a token is found but is invalid' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '404': + description: Path not found + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '500': + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + delete: + operationId: deleteFeeRecipient + summary: Delete Configured Fee Recipient + description: Delete a configured fee recipient mapping for the specified public key. + security: + - bearerAuth: [] + tags: + - Fee Recipient + parameters: + - in: path + name: pubkey + schema: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + required: true + responses: + '204': + description: 'Successfully removed the mapping, or there was no mapping to remove for a key that the server is managing.' + '401': + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '403': + description: 'A mapping was found, but cannot be removed. This may be because the mapping was in configuration files that cannot be updated.' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '404': + description: 'The key was not found on the server, nothing to delete.' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '500': + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '/eth/v1/validator/{pubkey}/gas_limit': + get: + operationId: getGasLimit + summary: Get Gas Limit. + description: | + Get the execution gas limit for an individual validator. This gas limit is the one used by the + validator when proposing blocks via an external builder. If no limit has been set explicitly for + a key then the process-wide default will be returned. + + The server may return a 400 status code if no external builder is configured. + + WARNING: The gas_limit is not used on Phase0 or Altair networks. + security: + - bearerAuth: [] + tags: + - Gas Limit + parameters: + - in: path + name: pubkey + schema: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + required: true + responses: + '200': + description: success response + content: + application/json: + schema: + title: ListGasLimitResponse + type: object + required: + - data + properties: + data: + type: object + required: + - gas_limit + properties: + pubkey: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + gas_limit: + type: string + pattern: '^[1-9][0-9]{0,19}$' + example: '30000000' + '400': + description: Bad request. Request was malformed and could not be processed + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '401': + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '403': + description: 'Forbidden, a token is found but is invalid' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '404': + description: Path not found + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '500': + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + post: + operationId: setGasLimit + summary: Set Gas Limit. + description: | + Set the gas limit for an individual validator. This limit will be propagated to the beacon + node for use on future block proposals. The beacon node is responsible for informing external + block builders of the change. + + The server may return a 400 status code if no external builder is configured. + + WARNING: The gas_limit is not used on Phase0 or Altair networks. + security: + - bearerAuth: [] + tags: + - Gas Limit + parameters: + - in: path + name: pubkey + schema: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + required: true + requestBody: + content: + application/json: + schema: + title: SetGasLimitRequest + type: object + required: + - gas_limit + properties: + gas_limit: + type: string + pattern: '^[1-9][0-9]{0,19}$' + example: '30000000' + responses: + '202': + description: successfully updated + '400': + description: Bad request. Request was malformed and could not be processed + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '401': + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '403': + description: 'Forbidden, a token is found but is invalid' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '404': + description: Path not found + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '500': + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + delete: + operationId: deleteGasLimit + summary: Delete Configured Gas Limit + description: | + Delete a configured gas limit for the specified public key. + + The server may return a 400 status code if no external builder is configured. + security: + - bearerAuth: [] + tags: + - Gas Limit + parameters: + - in: path + name: pubkey + schema: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + required: true + responses: + '204': + description: 'Successfully removed the gas limit, or there was no gas limit set for the requested public key.' + '400': + description: Bad request. Request was malformed and could not be processed + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '401': + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '403': + description: 'A gas limit was found, but cannot be removed. This may be because the gas limit was in configuration files that cannot be updated.' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '404': + description: 'The key was not found on the server, nothing to delete.' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + '500': + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred +components: + securitySchemes: + bearerAuth: + type: http + scheme: bearer + bearerFormat: 'URL safe token, optionally JWT' + schemas: + Pubkey: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + EthAddress: + type: string + description: An address on the execution (Ethereum 1) network. + example: '0xabcf8e0d4e9587369b2301d0790347320302cc09' + pattern: '^0x[a-fA-F0-9]{40}$' + Keystore: + type: string + description: | + JSON serialized representation of a single keystore in EIP-2335: BLS12-381 Keystore format. + example: '{"version":4,"uuid":"9f75a3fa-1e5a-49f9-be3d-f5a19779c6fa","path":"m/12381/3600/0/0/0","pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a","crypto":{"kdf":{"function":"pbkdf2","params":{"dklen":32,"c":262144,"prf":"hmac-sha256","salt":"8ff8f22ef522a40f99c6ce07fdcfc1db489d54dfbc6ec35613edf5d836fa1407"},"message":""},"checksum":{"function":"sha256","params":{},"message":"9678a69833d2576e3461dd5fa80f6ac73935ae30d69d07659a709b3cd3eddbe3"},"cipher":{"function":"aes-128-ctr","params":{"iv":"31b69f0ac97261e44141b26aa0da693f"},"message":"e8228bafec4fcbaca3b827e586daad381d53339155b034e5eaae676b715ab05e"}}}' + FeeRecipient: + type: object + required: + - ethaddress + properties: + pubkey: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + ethaddress: + type: string + description: An address on the execution (Ethereum 1) network. + example: '0xabcf8e0d4e9587369b2301d0790347320302cc09' + pattern: '^0x[a-fA-F0-9]{40}$' + GasLimit: + type: object + required: + - gas_limit + properties: + pubkey: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + gas_limit: + type: string + pattern: '^[1-9][0-9]{0,19}$' + example: '30000000' + Uint64: + type: string + pattern: '^[1-9][0-9]{0,19}$' + example: '30000000' + SignerDefinition: + type: object + required: + - pubkey + properties: + pubkey: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + url: + description: 'URL to API implementing EIP-3030: BLS Remote Signer HTTP API' + type: string + example: 'https://remote.signer' + readonly: + type: boolean + description: The signer associated with this pubkey cannot be deleted from the API + ImportRemoteSignerDefinition: + type: object + required: + - pubkey + properties: + pubkey: + type: string + pattern: '^0x[a-fA-F0-9]{96}$' + description: | + The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._ + example: '0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a' + url: + description: 'URL to API implementing EIP-3030: BLS Remote Signer HTTP API' + type: string + example: 'https://remote.signer' + SlashingProtectionData: + type: string + description: | + JSON serialized representation of the slash protection data in format defined in EIP-3076: Slashing Protection Interchange Format. + example: '{"metadata":{"interchange_format_version":"5","genesis_validators_root":"0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"},"data":[{"pubkey":"0x93247f2209abcacf57b75a51dafae777f9dd38bc7053d1af526f220a7489a6d3a2753e5f3e8b1cfe39b56f43611df74a","signed_blocks":[],"signed_attestations":[]}]}' + ErrorResponse: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + responses: + BadRequest: + description: Bad request. Request was malformed and could not be processed + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + Unauthorized: + description: 'Unauthorized, no token is found' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + Forbidden: + description: 'Forbidden, a token is found but is invalid' + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + NotFound: + description: Path not found + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred + InternalError: + description: | + Internal server error. The server encountered an unexpected error indicative of + a serious fault in the system, or a bug. + content: + application/json: + schema: + type: object + required: + - message + properties: + message: + description: Detailed error message + type: string + example: description of the error that occurred diff --git a/lib/key_store_api/api_spec.ex b/lib/key_store_api/api_spec.ex new file mode 100644 index 000000000..a1ac29c23 --- /dev/null +++ b/lib/key_store_api/api_spec.ex @@ -0,0 +1,13 @@ +defmodule KeyStoreApi.ApiSpec do + @moduledoc false + alias OpenApiSpex.OpenApi + @behaviour OpenApi + + file = "keymanager-oapi.yaml" + @external_resource file + @ethspec YamlElixir.read_from_file!(file) + |> OpenApiSpex.OpenApi.Decode.decode() + + @impl OpenApi + def spec(), do: @ethspec +end diff --git a/lib/key_store_api/controllers/error_controller.ex b/lib/key_store_api/controllers/error_controller.ex new file mode 100644 index 000000000..ad7c10148 --- /dev/null +++ b/lib/key_store_api/controllers/error_controller.ex @@ -0,0 +1,33 @@ +defmodule KeyStoreApi.ErrorController do + use KeyStoreApi, :controller + + @spec bad_request(Plug.Conn.t(), binary()) :: Plug.Conn.t() + def bad_request(conn, message) do + conn + |> put_status(400) + |> json(%{ + code: 400, + message: "#{message}" + }) + end + + @spec not_found(Plug.Conn.t(), any) :: Plug.Conn.t() + def not_found(conn, _params) do + conn + |> put_status(404) + |> json(%{ + code: 404, + message: "Resource not found" + }) + end + + @spec internal_error(Plug.Conn.t(), any) :: Plug.Conn.t() + def internal_error(conn, _params) do + conn + |> put_status(500) + |> json(%{ + code: 500, + message: "Internal server error" + }) + end +end diff --git a/lib/key_store_api/endpoint.ex b/lib/key_store_api/endpoint.ex new file mode 100644 index 000000000..f3cf1dac9 --- /dev/null +++ b/lib/key_store_api/endpoint.ex @@ -0,0 +1,11 @@ +defmodule KeyStoreApi.Endpoint do + use Phoenix.Endpoint, otp_app: :lambda_ethereum_consensus + + plug(Plug.Parsers, + parsers: [:urlencoded, :multipart, :json], + pass: ["*/*"], + json_decoder: Phoenix.json_library() + ) + + plug(KeyStoreApi.Router) +end diff --git a/lib/key_store_api/error_json.ex b/lib/key_store_api/error_json.ex new file mode 100644 index 000000000..24855f639 --- /dev/null +++ b/lib/key_store_api/error_json.ex @@ -0,0 +1,10 @@ +defmodule KeyStoreApi.ErrorJSON do + use KeyStoreApi, :controller + + @spec render(any, any) :: %{message: String.t()} + def render(_, _) do + %{ + message: "There has been an error" + } + end +end diff --git a/lib/key_store_api/key_store_api.ex b/lib/key_store_api/key_store_api.ex new file mode 100644 index 000000000..b2bb3e00f --- /dev/null +++ b/lib/key_store_api/key_store_api.ex @@ -0,0 +1,45 @@ +defmodule KeyStoreApi do + @moduledoc """ + The entrypoint for defining your web interface, such + as controllers, components, channels, and so on. + + This can be used in your application as: + + use KeyStoreApi, :controller + use KeyStoreApi, :html + + The definitions below will be executed for every controller, + component, etc, so keep them short and clean, focused + on imports, uses and aliases. + + Do NOT define functions inside the quoted expressions + below. Instead, define additional modules and import + those modules here. + """ + + def router() do + quote do + use Phoenix.Router, helpers: false + + # Import common connection and controller functions to use in pipelines + import Plug.Conn + import Phoenix.Controller + end + end + + def controller() do + quote do + use Phoenix.Controller, + formats: [:json] + + import Plug.Conn + end + end + + @doc """ + When used, dispatch to the appropriate controller/view/etc. + """ + defmacro __using__(which) when is_atom(which) do + apply(__MODULE__, which, []) + end +end diff --git a/lib/key_store_api/router.ex b/lib/key_store_api/router.ex new file mode 100644 index 000000000..862c796a2 --- /dev/null +++ b/lib/key_store_api/router.ex @@ -0,0 +1,16 @@ +defmodule KeyStoreApi.Router do + use KeyStoreApi, :router + + pipeline :api do + plug(:accepts, ["json"]) + plug(OpenApiSpex.Plug.PutApiSpec, module: KeyStoreApi.ApiSpec) + end + + scope "/api" do + pipe_through(:api) + get("/openapi", OpenApiSpex.Plug.RenderSpec, []) + end + + # Catch-all route outside of any scope + match(:*, "/*path", KeyStoreApi.ErrorController, :not_found) +end diff --git a/lib/lambda_ethereum_consensus/application.ex b/lib/lambda_ethereum_consensus/application.ex index b88b82d10..dc89954be 100644 --- a/lib/lambda_ethereum_consensus/application.ex +++ b/lib/lambda_ethereum_consensus/application.ex @@ -29,6 +29,7 @@ defmodule LambdaEthereumConsensus.Application do @impl true def config_change(changed, _new, removed) do BeaconApi.Endpoint.config_change(changed, removed) + KeyStoreApi.Endpoint.config_change(changed, removed) :ok end @@ -46,6 +47,7 @@ defmodule LambdaEthereumConsensus.Application do get_children(:db) ++ [ BeaconApi.Endpoint, + KeyStoreApi.Endpoint, LambdaEthereumConsensus.PromEx, LambdaEthereumConsensus.Beacon.BeaconNode ] From 6f74cd696af4a9ca3da662afb99d230ea4a53719 Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Wed, 31 Jul 2024 22:05:14 -0300 Subject: [PATCH 02/19] feat: add GET /keystores endpoint --- .../controllers/v1/key_store_controller.ex | 32 +++++++++++++++++++ lib/key_store_api/router.ex | 9 ++++++ .../validator/validator_manager.ex | 5 +++ 3 files changed, 46 insertions(+) create mode 100644 lib/key_store_api/controllers/v1/key_store_controller.ex diff --git a/lib/key_store_api/controllers/v1/key_store_controller.ex b/lib/key_store_api/controllers/v1/key_store_controller.ex new file mode 100644 index 000000000..e65c05ac8 --- /dev/null +++ b/lib/key_store_api/controllers/v1/key_store_controller.ex @@ -0,0 +1,32 @@ +defmodule KeyStoreApi.V1.KeyStoreController do + use KeyStoreApi, :controller + + alias BeaconApi.Utils + alias KeyStoreApi.ApiSpec + alias LambdaEthereumConsensus.Validator.ValidatorManager + + plug(OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true) + + # NOTE: this function is required by OpenApiSpex, and should return the information + # of each specific endpoint. We just return the specific entry from the parsed spec. + def open_api_operation(:get_keys), + do: ApiSpec.spec().paths["/eth/v1/keystores"].get + + @spec get_keys(Plug.Conn.t(), any) :: Plug.Conn.t() + def get_keys(conn, _params) do + pubkeys_info = + ValidatorManager.get_pubkeys() + |> Enum.map( + &%{ + "validatin_pubkey" => &1 |> Utils.hex_encode(), + "derivation_path" => "m/12381/3600/0/0/0", + "readonly" => true + } + ) + + conn + |> json(%{ + "data" => pubkeys_info + }) + end +end diff --git a/lib/key_store_api/router.ex b/lib/key_store_api/router.ex index 862c796a2..9761be944 100644 --- a/lib/key_store_api/router.ex +++ b/lib/key_store_api/router.ex @@ -6,6 +6,15 @@ defmodule KeyStoreApi.Router do plug(OpenApiSpex.Plug.PutApiSpec, module: KeyStoreApi.ApiSpec) end + # KeyManager API Version 1 + scope "/eth/v1", KeyStoreApi.V1 do + pipe_through(:api) + + scope "/keystores" do + get("/", KeyStoreController, :get_keys) + end + end + scope "/api" do pipe_through(:api) get("/openapi", OpenApiSpex.Plug.RenderSpec, []) diff --git a/lib/lambda_ethereum_consensus/validator/validator_manager.ex b/lib/lambda_ethereum_consensus/validator/validator_manager.ex index 099a18a75..820b44ab4 100644 --- a/lib/lambda_ethereum_consensus/validator/validator_manager.ex +++ b/lib/lambda_ethereum_consensus/validator/validator_manager.ex @@ -23,6 +23,11 @@ defmodule LambdaEthereumConsensus.Validator.ValidatorManager do setup_validators(slot, head_root, keystore_dir, keystore_pass_dir) end + def get_pubkeys(), do: GenServer.call(__MODULE__, :get_pubkeys) + + def handle_call(:get_pubkeys, _from, [] = validators), do: {:reply, validators, validators} + def handle_call(:get_pubkeys, _from, validators), do: {:reply, Map.keys(validators), validators} + defp setup_validators(_s, _r, keystore_dir, keystore_pass_dir) when is_nil(keystore_dir) or is_nil(keystore_pass_dir) do Logger.warning( From 72cbba975e7de7f9897643b06c35bb725aa9b3ea Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Thu, 1 Aug 2024 17:27:55 -0300 Subject: [PATCH 03/19] fix: update port flag --- Makefile | 2 +- config/runtime.exs | 8 ++++---- network_params.yaml | 3 ++- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index afa1d8739..54e4eedc0 100644 --- a/Makefile +++ b/Makefile @@ -168,7 +168,7 @@ checkpoint-sync: compile-all #▶️ sepolia: @ Run an interactive terminal using sepolia network sepolia: compile-all - iex -S mix run -- --checkpoint-sync-url https://sepolia.beaconstate.info --network sepolia --metrics --keystore-api + iex -S mix run -- --checkpoint-sync-url https://sepolia.beaconstate.info --network sepolia --metrics --validator-api-port 5056 #▶️ holesky: @ Run an interactive terminal using holesky network holesky: compile-all diff --git a/config/runtime.exs b/config/runtime.exs index 87b4446db..1ebd1c7d1 100644 --- a/config/runtime.exs +++ b/config/runtime.exs @@ -19,7 +19,7 @@ switches = [ beacon_api: :boolean, beacon_api_port: :integer, keystore_api: :boolean, - keystore_api_port: :integer, + validator_api_port: :integer, listen_address: [:string, :keep], discovery_port: :integer, boot_nodes: :string, @@ -49,8 +49,8 @@ metrics_port = Keyword.get(args, :metrics_port, nil) enable_metrics = Keyword.get(args, :metrics, not is_nil(metrics_port)) beacon_api_port = Keyword.get(args, :beacon_api_port, nil) enable_beacon_api = Keyword.get(args, :beacon_api, not is_nil(beacon_api_port)) -keystore_api_port = Keyword.get(args, :keystore_api_port, nil) -enable_keystore_api = Keyword.get(args, :keystore_api, not is_nil(keystore_api_port)) +validator_api_port = Keyword.get(args, :validator_api_port, nil) +enable_keystore_api = Keyword.get(args, :keystore_api, not is_nil(validator_api_port)) listen_addresses = Keyword.get_values(args, :listen_address) discovery_port = Keyword.get(args, :discovery_port, 9000) cli_bootnodes = Keyword.get(args, :boot_nodes, "") @@ -160,7 +160,7 @@ config :lambda_ethereum_consensus, BeaconApi.Endpoint, # KeyStore API config :lambda_ethereum_consensus, KeyStoreApi.Endpoint, server: enable_keystore_api, - http: [port: keystore_api_port || 5000], + http: [port: validator_api_port || 5000], url: [host: "localhost"], render_errors: [ formats: [json: KeyStoreApi.ErrorJSON], diff --git a/network_params.yaml b/network_params.yaml index e995d03fa..102796866 100644 --- a/network_params.yaml +++ b/network_params.yaml @@ -9,4 +9,5 @@ participants: use_separate_vc: false count: 1 validator_count: 32 - cl_max_mem: 4096 \ No newline at end of file + cl_max_mem: 4096 + keymanager_enabled: true From 4c814a35d89113dc4bb4e238bd9f5cdeefd57a90 Mon Sep 17 00:00:00 2001 From: Rodrigo Oliveri Date: Mon, 5 Aug 2024 19:22:21 -0300 Subject: [PATCH 04/19] refactor: validator manager genserver removal (#1244) --- Makefile | 1 + config/runtime.exs | 2 +- .../beacon/beacon_node.ex | 26 ++-- lib/lambda_ethereum_consensus/beacon/clock.ex | 99 -------------- .../fork_choice/fork_choice.ex | 8 +- .../p2p/gossip/attestation.ex | 2 +- .../validator/duties.ex | 11 +- .../{validator_manager.ex => setup.ex} | 91 +++++------- .../validator/validator.ex | 58 ++++---- lib/libp2p_port.ex | 129 ++++++++++++++++-- test/unit/beacon_api/beacon_api_v1_test.exs | 2 +- test/unit/libp2p_port_test.exs | 4 +- 12 files changed, 204 insertions(+), 229 deletions(-) delete mode 100644 lib/lambda_ethereum_consensus/beacon/clock.ex rename lib/lambda_ethereum_consensus/validator/{validator_manager.ex => setup.ex} (50%) diff --git a/Makefile b/Makefile index 34485f3ca..a6f62a7b3 100644 --- a/Makefile +++ b/Makefile @@ -266,6 +266,7 @@ lint: mix recode --no-autocorrect mix format --check-formatted mix credo --strict + mix dialyzer --no-check #✅ fmt: @ Format all code (Go, rust and elixir). fmt: diff --git a/config/runtime.exs b/config/runtime.exs index 7ecc40018..e12304012 100644 --- a/config/runtime.exs +++ b/config/runtime.exs @@ -171,7 +171,7 @@ if keystore_pass_dir != nil and not File.dir?(keystore_pass_dir) do System.halt(2) end -config :lambda_ethereum_consensus, LambdaEthereumConsensus.Validator.ValidatorManager, +config :lambda_ethereum_consensus, LambdaEthereumConsensus.Validator.Setup, keystore_dir: keystore_dir, keystore_pass_dir: keystore_pass_dir diff --git a/lib/lambda_ethereum_consensus/beacon/beacon_node.ex b/lib/lambda_ethereum_consensus/beacon/beacon_node.ex index d6c4c6cd7..f939f597b 100644 --- a/lib/lambda_ethereum_consensus/beacon/beacon_node.ex +++ b/lib/lambda_ethereum_consensus/beacon/beacon_node.ex @@ -8,7 +8,7 @@ defmodule LambdaEthereumConsensus.Beacon.BeaconNode do alias LambdaEthereumConsensus.ForkChoice alias LambdaEthereumConsensus.StateTransition.Cache alias LambdaEthereumConsensus.Store.BlockStates - alias LambdaEthereumConsensus.Validator.ValidatorManager + alias LambdaEthereumConsensus.Validator alias Types.BeaconState def start_link(opts) do @@ -24,43 +24,35 @@ defmodule LambdaEthereumConsensus.Beacon.BeaconNode do Cache.initialize_cache() - libp2p_args = get_libp2p_args() - time = :os.system_time(:second) ForkChoice.init_store(store, time) - validator_manager = - get_validator_manager( - deposit_tree_snapshot, - store.head_slot, - store.head_root - ) + init_execution_chain(deposit_tree_snapshot, store.head_root) + + validators = Validator.Setup.init(store.head_slot, store.head_root) + + libp2p_args = [genesis_time: store.genesis_time, validators: validators] ++ get_libp2p_args() children = [ - {LambdaEthereumConsensus.Beacon.Clock, {store.genesis_time, time}}, {LambdaEthereumConsensus.Libp2pPort, libp2p_args}, {Task.Supervisor, name: PruneStatesSupervisor}, {Task.Supervisor, name: PruneBlocksSupervisor}, {Task.Supervisor, name: PruneBlobsSupervisor} - ] ++ validator_manager + ] Supervisor.init(children, strategy: :one_for_all) end - defp get_validator_manager(nil, _, _) do + defp init_execution_chain(nil, _) do Logger.warning("Deposit data not found. Validator will be disabled.") [] end - defp get_validator_manager(snapshot, slot, head_root) do + defp init_execution_chain(snapshot, head_root) do %BeaconState{eth1_data_votes: votes} = BlockStates.get_state_info!(head_root).beacon_state LambdaEthereumConsensus.Execution.ExecutionChain.init(snapshot, votes) - # TODO: move checkpoint sync outside and move this to application.ex - [ - {ValidatorManager, {slot, head_root}} - ] end defp get_libp2p_args() do diff --git a/lib/lambda_ethereum_consensus/beacon/clock.ex b/lib/lambda_ethereum_consensus/beacon/clock.ex deleted file mode 100644 index 45d8f10e5..000000000 --- a/lib/lambda_ethereum_consensus/beacon/clock.ex +++ /dev/null @@ -1,99 +0,0 @@ -defmodule LambdaEthereumConsensus.Beacon.Clock do - @moduledoc false - - use GenServer - - alias LambdaEthereumConsensus.Libp2pPort - alias LambdaEthereumConsensus.Validator.ValidatorManager - - require Logger - - @type state :: %{ - genesis_time: Types.uint64(), - time: Types.uint64() - } - - @spec start_link({Types.uint64(), Types.uint64()}) :: :ignore | {:error, any} | {:ok, pid} - def start_link(opts) do - GenServer.start_link(__MODULE__, opts, name: __MODULE__) - end - - @spec get_current_time() :: Types.uint64() - def get_current_time(), do: GenServer.call(__MODULE__, :get_current_time) - - ########################## - ### GenServer Callbacks - ########################## - - @impl GenServer - @spec init({Types.uint64(), Types.uint64()}) :: - {:ok, state()} | {:stop, any} - def init({genesis_time, time}) do - schedule_next_tick() - - {:ok, - %{ - genesis_time: genesis_time, - time: time - }} - end - - @impl true - def handle_call(:get_current_time, _from, %{time: time} = state) do - {:reply, time, state} - end - - @impl true - def handle_info(:on_tick, state) do - schedule_next_tick() - time = :os.system_time(:second) - new_state = %{state | time: time} - - if time >= state.genesis_time do - Libp2pPort.on_tick(time) - # TODO: reduce time between ticks to account for gnosis' 5s slot time. - old_logical_time = compute_logical_time(state) - new_logical_time = compute_logical_time(new_state) - - if old_logical_time != new_logical_time do - log_new_slot(new_logical_time) - ValidatorManager.notify_tick(new_logical_time) - end - end - - {:noreply, new_state} - end - - def schedule_next_tick() do - # For millisecond precision - time_to_next_tick = 1000 - rem(:os.system_time(:millisecond), 1000) - Process.send_after(__MODULE__, :on_tick, time_to_next_tick) - end - - @type slot_third :: :first_third | :second_third | :last_third - @type logical_time :: {Types.slot(), slot_third()} - - @spec compute_logical_time(state()) :: logical_time() - defp compute_logical_time(state) do - elapsed_time = state.time - state.genesis_time - - slot_thirds = div(elapsed_time * 3, ChainSpec.get("SECONDS_PER_SLOT")) - slot = div(slot_thirds, 3) - - slot_third = - case rem(slot_thirds, 3) do - 0 -> :first_third - 1 -> :second_third - 2 -> :last_third - end - - {slot, slot_third} - end - - defp log_new_slot({slot, :first_third}) do - :telemetry.execute([:sync, :store], %{slot: slot}) - Logger.info("[Clock] Slot transition", slot: slot) - end - - defp log_new_slot(_), do: :ok -end diff --git a/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex b/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex index 32cd09264..6e8ccd844 100644 --- a/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex +++ b/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex @@ -4,11 +4,10 @@ defmodule LambdaEthereumConsensus.ForkChoice do """ require Logger - - alias LambdaEthereumConsensus.Beacon.Clock alias LambdaEthereumConsensus.Execution.ExecutionChain alias LambdaEthereumConsensus.ForkChoice.Handlers alias LambdaEthereumConsensus.ForkChoice.Head + alias LambdaEthereumConsensus.Libp2pPort alias LambdaEthereumConsensus.Metrics alias LambdaEthereumConsensus.P2P.Gossip.OperationsCollector alias LambdaEthereumConsensus.StateTransition.Misc @@ -18,7 +17,6 @@ defmodule LambdaEthereumConsensus.ForkChoice do alias LambdaEthereumConsensus.Store.CheckpointStates alias LambdaEthereumConsensus.Store.StateDb alias LambdaEthereumConsensus.Store.StoreDb - alias LambdaEthereumConsensus.Validator.ValidatorManager alias Types.Attestation alias Types.BlockInfo alias Types.Checkpoint @@ -120,7 +118,7 @@ defmodule LambdaEthereumConsensus.ForkChoice do @spec get_current_chain_slot() :: Types.slot() def get_current_chain_slot() do - time = Clock.get_current_time() + time = :os.system_time(:second) genesis_time = StoreDb.fetch_genesis_time!() compute_current_slot(time, genesis_time) end @@ -261,7 +259,7 @@ defmodule LambdaEthereumConsensus.ForkChoice do %{slot: slot, body: body} = head_block OperationsCollector.notify_new_block(head_block) - ValidatorManager.notify_new_block(slot, head_root) + Libp2pPort.notify_new_head(slot, head_root) ExecutionChain.notify_new_block(slot, body.eth1_data, body.execution_payload) update_fork_choice_data( diff --git a/lib/lambda_ethereum_consensus/p2p/gossip/attestation.ex b/lib/lambda_ethereum_consensus/p2p/gossip/attestation.ex index fab0b691b..11ea44353 100644 --- a/lib/lambda_ethereum_consensus/p2p/gossip/attestation.ex +++ b/lib/lambda_ethereum_consensus/p2p/gossip/attestation.ex @@ -66,7 +66,7 @@ defmodule LambdaEthereumConsensus.P2P.Gossip.Attestation do def collect(subnet_id, attestation) do join(subnet_id) SubnetInfo.new_subnet_with_attestation(subnet_id, attestation) - Libp2pPort.subscribe_to_topic(topic(subnet_id), __MODULE__) + Libp2pPort.async_subscribe_to_topic(topic(subnet_id), __MODULE__) end @spec stop_collecting(non_neg_integer()) :: diff --git a/lib/lambda_ethereum_consensus/validator/duties.ex b/lib/lambda_ethereum_consensus/validator/duties.ex index 6d5145972..ff9b70ff9 100644 --- a/lib/lambda_ethereum_consensus/validator/duties.ex +++ b/lib/lambda_ethereum_consensus/validator/duties.ex @@ -69,9 +69,14 @@ defmodule LambdaEthereumConsensus.Validator.Duties do attester_duties # Drop the first element, which is the previous epoch's duty |> Stream.drop(1) - |> Enum.each(fn %{index_in_committee: i, committee_index: ci, slot: slot} -> - Logger.debug( - "[Validator] #{validator_index} has to attest in committee #{ci} of slot #{slot} with index #{i}" + |> Enum.each(fn %{ + index_in_committee: i, + committee_index: ci, + slot: slot, + should_aggregate?: sa + } -> + Logger.info( + "[Validator] #{validator_index} has to attest in committee #{ci} of slot #{slot} with index #{i}, and should_aggregate?: #{sa}" ) end) diff --git a/lib/lambda_ethereum_consensus/validator/validator_manager.ex b/lib/lambda_ethereum_consensus/validator/setup.ex similarity index 50% rename from lib/lambda_ethereum_consensus/validator/validator_manager.ex rename to lib/lambda_ethereum_consensus/validator/setup.ex index 099a18a75..378e11fc8 100644 --- a/lib/lambda_ethereum_consensus/validator/validator_manager.ex +++ b/lib/lambda_ethereum_consensus/validator/setup.ex @@ -1,21 +1,13 @@ -defmodule LambdaEthereumConsensus.Validator.ValidatorManager do +defmodule LambdaEthereumConsensus.Validator.Setup do @moduledoc """ - Module that manage the validators state + Module that setups the initial validators state """ - use GenServer require Logger - alias LambdaEthereumConsensus.Beacon.Clock alias LambdaEthereumConsensus.Validator - @spec start_link({Types.slot(), Types.root()}) :: :ignore | {:error, any} | {:ok, pid} - def start_link({slot, head_root}) do - GenServer.start_link(__MODULE__, {slot, head_root}, name: __MODULE__) - end - - @spec init({Types.slot(), Types.root()}) :: - {:ok, %{Bls.pubkey() => Validator.state()}} | {:stop, any} - def init({slot, head_root}) do + @spec init(Types.slot(), Types.root()) :: %{Bls.pubkey() => Validator.state()} + def init(slot, head_root) do config = Application.get_env(:lambda_ethereum_consensus, __MODULE__, []) keystore_dir = Keyword.get(config, :keystore_dir) keystore_pass_dir = Keyword.get(config, :keystore_pass_dir) @@ -26,10 +18,10 @@ defmodule LambdaEthereumConsensus.Validator.ValidatorManager do defp setup_validators(_s, _r, keystore_dir, keystore_pass_dir) when is_nil(keystore_dir) or is_nil(keystore_pass_dir) do Logger.warning( - "[Validator Manager] No keystore_dir or keystore_pass_dir provided. Validator will not start." + "[Validator] No keystore_dir or keystore_pass_dir provided. Validator will not start." ) - {:ok, []} + %{} end defp setup_validators(slot, head_root, keystore_dir, keystore_pass_dir) do @@ -42,51 +34,11 @@ defmodule LambdaEthereumConsensus.Validator.ValidatorManager do end) |> Map.new() - Logger.info("[Validator Manager] Initialized #{Enum.count(validators)} validators") - - {:ok, validators} - end - - @spec notify_new_block(Types.slot(), Types.root()) :: :ok - def notify_new_block(slot, head_root) do - notify_validators({:new_block, slot, head_root}) - end - - @spec notify_tick(Clock.logical_time()) :: :ok - def notify_tick(logical_time) do - notify_validators({:on_tick, logical_time}) - end - - # TODO: The use of a Genserver and cast is still needed to avoid locking at the clock level. - # This is a temporary solution and will be taken off in a future PR. - defp notify_validators(msg), do: GenServer.cast(__MODULE__, {:notify_all, msg}) - - def handle_cast({:notify_all, msg}, validators) do - validators = notify_all(validators, msg) + Logger.info("[Validator] Initialized #{Enum.count(validators)} validators") - {:noreply, validators} + validators end - defp notify_all(validators, msg) do - start_time = System.monotonic_time(:millisecond) - - updated_validators = Enum.map(validators, ¬ify_validator(&1, msg)) - - end_time = System.monotonic_time(:millisecond) - - Logger.debug( - "[Validator Manager] #{inspect(msg)} notified to all Validators after #{end_time - start_time} ms" - ) - - updated_validators - end - - defp notify_validator({pubkey, validator}, {:on_tick, logical_time}), - do: {pubkey, Validator.handle_tick(logical_time, validator)} - - defp notify_validator({pubkey, validator}, {:new_block, slot, head_root}), - do: {pubkey, Validator.handle_new_block(slot, head_root, validator)} - @doc """ Get validator keys from the keystore directory. This function expects two files for each validator: @@ -107,7 +59,7 @@ defmodule LambdaEthereumConsensus.Validator.ValidatorManager do {keystore_file, keystore_pass_file} else - Logger.warning("[Validator Manager] Skipping file: #{filename}. Not a keystore file.") + Logger.warning("[Validator] Skipping file: #{filename}. Not a keystore file.") nil end end) @@ -119,7 +71,7 @@ defmodule LambdaEthereumConsensus.Validator.ValidatorManager do rescue error -> Logger.error( - "[Validator Manager] Failed to decode keystore file: #{keystore_file}. Pass file: #{keystore_pass_file} Error: #{inspect(error)}" + "[Validator] Failed to decode keystore file: #{keystore_file}. Pass file: #{keystore_pass_file} Error: #{inspect(error)}" ) nil @@ -127,4 +79,27 @@ defmodule LambdaEthereumConsensus.Validator.ValidatorManager do end) |> Enum.reject(&is_nil/1) end + + @spec notify_validators(map(), tuple()) :: map() + def notify_validators(validators, msg) do + start_time = System.monotonic_time(:millisecond) + + Logger.debug("[Validator] Notifying all Validators with message: #{inspect(msg)}") + + updated_validators = Map.new(validators, ¬ify_validator(&1, msg)) + + end_time = System.monotonic_time(:millisecond) + + Logger.debug( + "[Validator] #{inspect(msg)} notified to all Validators after #{end_time - start_time} ms" + ) + + updated_validators + end + + defp notify_validator({pubkey, validator}, {:on_tick, slot_data}), + do: {pubkey, Validator.handle_tick(slot_data, validator)} + + defp notify_validator({pubkey, validator}, {:new_head, slot, head_root}), + do: {pubkey, Validator.handle_new_head(slot, head_root, validator)} end diff --git a/lib/lambda_ethereum_consensus/validator/validator.ex b/lib/lambda_ethereum_consensus/validator/validator.ex index df0486cb4..fa43825a0 100644 --- a/lib/lambda_ethereum_consensus/validator/validator.ex +++ b/lib/lambda_ethereum_consensus/validator/validator.ex @@ -7,12 +7,12 @@ defmodule LambdaEthereumConsensus.Validator do defstruct [ :slot, :root, + :epoch, :duties, :validator, :payload_builder ] - alias LambdaEthereumConsensus.Beacon.Clock alias LambdaEthereumConsensus.ForkChoice alias LambdaEthereumConsensus.Libp2pPort alias LambdaEthereumConsensus.P2P.Gossip @@ -41,6 +41,7 @@ defmodule LambdaEthereumConsensus.Validator do # just at the begining of every epoch, and then just update them as needed. @type state :: %__MODULE__{ slot: Types.slot(), + epoch: Types.epoch(), root: Types.root(), duties: Duties.duties(), validator: validator(), @@ -51,6 +52,7 @@ defmodule LambdaEthereumConsensus.Validator do def new({head_slot, head_root, {pubkey, privkey}}) do state = %__MODULE__{ slot: head_slot, + epoch: Misc.compute_epoch_at_slot(head_slot), root: head_root, duties: Duties.empty_duties(), validator: %{ @@ -93,8 +95,8 @@ defmodule LambdaEthereumConsensus.Validator do end end - @spec handle_new_block(Types.slot(), Types.root(), state) :: state - def handle_new_block(slot, head_root, %{validator: %{index: nil}} = state) do + @spec handle_new_head(Types.slot(), Types.root(), state) :: state + def handle_new_head(slot, head_root, %{validator: %{index: nil}} = state) do log_error("-1", "setup validator", "index not present handle block", slot: slot, root: head_root @@ -103,8 +105,8 @@ defmodule LambdaEthereumConsensus.Validator do state end - def handle_new_block(slot, head_root, state) do - log_debug(state.validator.index, "recieved new block", slot: slot, root: head_root) + def handle_new_head(slot, head_root, state) do + log_debug(state.validator.index, "recieved new head", slot: slot, root: head_root) # TODO: this doesn't take into account reorgs state @@ -113,7 +115,7 @@ defmodule LambdaEthereumConsensus.Validator do |> maybe_build_payload(slot + 1) end - @spec handle_tick(Clock.logical_time(), state) :: state + @spec handle_tick({Types.slot(), atom()}, state) :: state def handle_tick(_logical_time, %{validator: %{index: nil}} = state) do log_error("-1", "setup validator", "index not present for handle tick") state @@ -152,17 +154,8 @@ defmodule LambdaEthereumConsensus.Validator do defp update_state(%{slot: slot, root: root} = state, slot, root), do: state - defp update_state(%{slot: slot, root: _other_root} = state, slot, head_root) do - # TODO: this log is appearing for every block - # Logger.warning("[Validator] Block came late", slot: slot, root: head_root) - - # TODO: rollback stale data instead of the whole cache - epoch = Misc.compute_epoch_at_slot(slot + 1) - recompute_duties(state, 0, epoch, slot, head_root) - end - - defp update_state(%{slot: last_slot} = state, slot, head_root) do - last_epoch = Misc.compute_epoch_at_slot(last_slot + 1) + # Epoch as part of the state now avoids recomputing the duties at every block + defp update_state(%{epoch: last_epoch} = state, slot, head_root) do epoch = Misc.compute_epoch_at_slot(slot + 1) if last_epoch == epoch do @@ -187,7 +180,7 @@ defmodule LambdaEthereumConsensus.Validator do move_subnets(state.duties, new_duties) Duties.log_duties(new_duties, state.validator.index) - %{state | slot: slot, root: head_root, duties: new_duties} + %{state | slot: slot, root: head_root, duties: new_duties, epoch: epoch} end @spec fetch_target_state(Types.epoch(), Types.root()) :: Types.BeaconState.t() @@ -252,10 +245,14 @@ defmodule LambdaEthereumConsensus.Validator do attestation = produce_attestation(current_duty, state.root, state.validator.privkey) log_md = [slot: attestation.data.slot, attestation: attestation, subnet_id: subnet_id] - log_debug(validator.index, "publishing attestation", log_md) + + debug_log_msg = + "publishing attestation on committee index: #{current_duty.committee_index} | as #{current_duty.index_in_committee}/#{current_duty.committee_length - 1} and pubkey: #{LambdaEthereumConsensus.Utils.format_shorten_binary(validator.pubkey)}" + + log_debug(validator.index, debug_log_msg, log_md) Gossip.Attestation.publish(subnet_id, attestation) - |> log_debug_result(validator.index, "published attestation", log_md) + |> log_info_result(validator.index, "published attestation", log_md) if current_duty.should_aggregate? do log_debug(validator.index, "collecting for future aggregation", log_md) @@ -300,12 +297,16 @@ defmodule LambdaEthereumConsensus.Validator do end defp aggregate_attestations(attestations) do + # TODO: We need to check why we are producing duplicate attestations, this was generating invalid signatures + unique_attestations = attestations |> Enum.uniq() + aggregation_bits = - attestations + unique_attestations |> Stream.map(&Map.fetch!(&1, :aggregation_bits)) |> Enum.reduce(&BitField.bitwise_or/2) - {:ok, signature} = attestations |> Enum.map(&Map.fetch!(&1, :signature)) |> Bls.aggregate() + {:ok, signature} = + unique_attestations |> Enum.map(&Map.fetch!(&1, :signature)) |> Bls.aggregate() %{List.first(attestations) | aggregation_bits: aggregation_bits, signature: signature} end @@ -400,16 +401,16 @@ defmodule LambdaEthereumConsensus.Validator do defp start_payload_builder(%{validator: validator} = state, proposed_slot, head_root) do # TODO: handle reorgs and late blocks - log_debug(validator.index, "starting building payload", slot: proposed_slot) + log_debug(validator.index, "starting building payload for slot #{proposed_slot}") case BlockBuilder.start_building_payload(proposed_slot, head_root) do {:ok, payload_id} -> - log_debug(validator.index, "payload built", slot: proposed_slot) + log_info(validator.index, "payload built for slot #{proposed_slot}") %{state | payload_builder: {proposed_slot, head_root, payload_id}} {:error, reason} -> - log_error(validator.index, "start building payload", reason, slot: proposed_slot) + log_error(validator.index, "start building payload for slot #{proposed_slot}", reason) %{state | payload_builder: nil} end @@ -514,13 +515,10 @@ defmodule LambdaEthereumConsensus.Validator do defp log_result(:ok, :info, index, message, metadata), do: log_info(index, message, metadata) defp log_result(:ok, :debug, index, message, metadata), do: log_debug(index, message, metadata) - defp log_result({:error, reason}, _level, index, message, metadata), - do: log_error(index, message, reason, metadata) - - defp log_info(index, message, metadata), + defp log_info(index, message, metadata \\ []), do: Logger.info("[Validator] #{index} #{message}", metadata) - defp log_debug(index, message, metadata), + defp log_debug(index, message, metadata \\ []), do: Logger.debug("[Validator] #{index} #{message}", metadata) defp log_error(index, message, reason, metadata \\ []), diff --git a/lib/libp2p_port.ex b/lib/libp2p_port.ex index 314241f85..47f86eaa8 100644 --- a/lib/libp2p_port.ex +++ b/lib/libp2p_port.ex @@ -9,6 +9,8 @@ defmodule LambdaEthereumConsensus.Libp2pPort do use GenServer + @tick_time 1000 + alias LambdaEthereumConsensus.Beacon.PendingBlocks alias LambdaEthereumConsensus.Beacon.SyncBlocks alias LambdaEthereumConsensus.ForkChoice @@ -21,6 +23,7 @@ defmodule LambdaEthereumConsensus.Libp2pPort do alias LambdaEthereumConsensus.P2p.Requests alias LambdaEthereumConsensus.StateTransition.Misc alias LambdaEthereumConsensus.Utils.BitVector + alias LambdaEthereumConsensus.Validator alias Libp2pProto.AddPeer alias Libp2pProto.Command alias Libp2pProto.Enr @@ -61,13 +64,17 @@ defmodule LambdaEthereumConsensus.Libp2pPort do ] @type init_arg :: - {:listen_addr, [String.t()]} + {:genesis_time, Types.uint64()} + | {:validators, %{}} + | {:listen_addr, [String.t()]} | {:enable_discovery, boolean()} | {:discovery_addr, String.t()} | {:bootnodes, [String.t()]} | {:join_init_topics, boolean()} | {:enable_request_handlers, boolean()} + @type slot_data() :: {Types.uint64(), :first_third | :second_third | :last_third} + @type node_identity() :: %{ peer_id: binary(), # Pretty-printed version of the peer ID @@ -104,9 +111,14 @@ defmodule LambdaEthereumConsensus.Libp2pPort do GenServer.start_link(__MODULE__, args, opts) end - @spec on_tick(Types.uint64()) :: :ok - def on_tick(time) do - GenServer.cast(__MODULE__, {:on_tick, time}) + @spec notify_new_head(Types.slot(), Types.root()) :: :ok + def notify_new_head(slot, head_root) do + # TODO: This is quick workarround to notify the libp2p port about new heads from within + # the ForkChoice.recompute_head/1 without moving the validators to the store this + # allows to deferr that move until we simplify the state and remove duplicates. + # THIS IS NEEDED BECAUSE FORKCHOICE IS CURRENTLY RUNNING ON LIBP2P PORT. + # It could be a simple cast in the future if that's not the case anymore. + send(self(), {:new_head, slot, head_root}) end @doc """ @@ -226,7 +238,7 @@ defmodule LambdaEthereumConsensus.Libp2pPort do direction: "elixir->" }) - call_command(pid, {:publish, %Publish{topic: topic_name, message: message}}) + cast_command(pid, {:publish, %Publish{topic: topic_name, message: message}}) end @doc """ @@ -246,6 +258,23 @@ defmodule LambdaEthereumConsensus.Libp2pPort do call_command(pid, {:subscribe, %SubscribeToTopic{name: topic_name}}) end + @doc """ + Subscribes to the given topic async, not waiting for a response at the subscribe. + After this, messages published to the topicwill be received by `self()`. + """ + @spec async_subscribe_to_topic(GenServer.server(), String.t(), module()) :: + :ok | {:error, String.t()} + def async_subscribe_to_topic(pid \\ __MODULE__, topic_name, module) do + :telemetry.execute([:port, :message], %{}, %{ + function: "async_subscribe_to_topic", + direction: "elixir->" + }) + + GenServer.cast(pid, {:new_subscriber, topic_name, module}) + + cast_command(pid, {:subscribe, %SubscribeToTopic{name: topic_name}}) + end + @doc """ Returns the next gossipsub message received by the server for subscribed topics on the current process. If there are none, it waits for one. @@ -349,6 +378,8 @@ defmodule LambdaEthereumConsensus.Libp2pPort do @impl GenServer def init(args) do + {genesis_time, args} = Keyword.pop!(args, :genesis_time) + {validators, args} = Keyword.pop(args, :validators, %{}) {join_init_topics, args} = Keyword.pop(args, :join_init_topics, false) {enable_request_handlers, args} = Keyword.pop(args, :enable_request_handlers, false) @@ -371,8 +402,13 @@ defmodule LambdaEthereumConsensus.Libp2pPort do "[Optimistic Sync] Waiting #{@sync_delay_millis / 1000} seconds to discover some peers before requesting blocks." ) + schedule_next_tick() + {:ok, %{ + genesis_time: genesis_time, + validators: validators, + slot_data: nil, port: port, subscribers: %{}, requests: Requests.new(), @@ -400,14 +436,6 @@ defmodule LambdaEthereumConsensus.Libp2pPort do {:noreply, state} end - @impl GenServer - def handle_cast({:on_tick, time}, state) do - # TODO: we probably want to remove this from here, but we keep it here to have this serialized - # with respect to the other fork choice store modifications. - ForkChoice.on_tick(time) - {:noreply, state} - end - def handle_cast( {:send_request, peer_id, protocol_id, message, handler}, %{ @@ -458,6 +486,14 @@ defmodule LambdaEthereumConsensus.Libp2pPort do {:noreply, state} end + @impl GenServer + def handle_info(:on_tick, state) do + schedule_next_tick() + time = :os.system_time(:second) + + {:noreply, on_tick(time, state)} + end + @impl GenServer def handle_info(:sync_blocks, state) do blocks_to_download = SyncBlocks.run() @@ -468,6 +504,14 @@ defmodule LambdaEthereumConsensus.Libp2pPort do {:noreply, new_state} end + @impl GenServer + def handle_info({:new_head, slot, head_root}, %{validators: validators} = state) do + updated_validators = + Validator.Setup.notify_validators(validators, {:new_head, slot, head_root}) + + {:noreply, %{state | validators: updated_validators}} + end + @impl GenServer def handle_info({_port, {:data, data}}, state) do %Notification{n: {_, payload}} = Notification.decode(data) @@ -688,4 +732,63 @@ defmodule LambdaEthereumConsensus.Libp2pPort do add_subscriber(state, topic, module) end) end + + defp on_tick(time, %{genesis_time: genesis_time} = state) when time < genesis_time, do: state + + defp on_tick(time, %{genesis_time: genesis_time, slot_data: slot_data} = state) do + # TODO: we probably want to remove this (ForkChoice.on_tick) from here, but we keep it + # here to have this serialized with respect to the other fork choice store modifications. + + ForkChoice.on_tick(time) + + new_slot_data = compute_slot(genesis_time, time) + + updated_state = + if slot_data == new_slot_data do + state + else + updated_validators = + Validator.Setup.notify_validators(state.validators, {:on_tick, new_slot_data}) + + %{state | slot_data: new_slot_data, validators: updated_validators} + end + + maybe_log_new_slot(slot_data, new_slot_data) + + updated_state + end + + defp schedule_next_tick() do + # For millisecond precision + time_to_next_tick = @tick_time - rem(:os.system_time(:millisecond), @tick_time) + Process.send_after(__MODULE__, :on_tick, time_to_next_tick) + end + + defp compute_slot(genesis_time, time) do + # TODO: This was copied as it is from the Clock, slot calculations are spread + # across modules, we should probably centralize them. + elapsed_time = time - genesis_time + + slot_thirds = div(elapsed_time * 3, ChainSpec.get("SECONDS_PER_SLOT")) + slot = div(slot_thirds, 3) + + slot_third = + case rem(slot_thirds, 3) do + 0 -> :first_third + 1 -> :second_third + 2 -> :last_third + end + + {slot, slot_third} + end + + defp maybe_log_new_slot({slot, _third}, {slot, _another_third}), do: :ok + + defp maybe_log_new_slot({_prev_slot, _thrid}, {slot, :first_third}) do + # TODO: It used :sync, :store as the slot event in the old Clock, double-check. + :telemetry.execute([:sync, :store], %{slot: slot}) + Logger.info("[Libp2p] Slot transition", slot: slot) + end + + defp maybe_log_new_slot(_, _), do: :ok end diff --git a/test/unit/beacon_api/beacon_api_v1_test.exs b/test/unit/beacon_api/beacon_api_v1_test.exs index 0157fdac8..91a4ec522 100644 --- a/test/unit/beacon_api/beacon_api_v1_test.exs +++ b/test/unit/beacon_api/beacon_api_v1_test.exs @@ -159,7 +159,7 @@ defmodule Unit.BeaconApiTest.V1 do alias LambdaEthereumConsensus.P2P.Metadata patch(ForkChoice, :get_fork_version, fn -> ChainSpec.get("DENEB_FORK_VERSION") end) - start_link_supervised!(Libp2pPort) + start_link_supervised!({Libp2pPort, genesis_time: 42}) Metadata.init() identity = Libp2pPort.get_node_identity() metadata = Metadata.get_metadata() diff --git a/test/unit/libp2p_port_test.exs b/test/unit/libp2p_port_test.exs index 7bf9e0d12..82fb5bc52 100644 --- a/test/unit/libp2p_port_test.exs +++ b/test/unit/libp2p_port_test.exs @@ -17,7 +17,9 @@ defmodule Unit.Libp2pPortTest do end defp start_port(name \\ Libp2pPort, init_args \\ []) do - start_link_supervised!({Libp2pPort, [opts: [name: name]] ++ init_args}, id: name) + start_link_supervised!({Libp2pPort, [opts: [name: name], genesis_time: 42] ++ init_args}, + id: name + ) end @tag :tmp_dir From deaddeee92351b25767698b53e21946b891e1b39 Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Tue, 6 Aug 2024 13:09:57 -0300 Subject: [PATCH 05/19] feat: add post method --- Makefile | 2 +- .../controllers/v1/key_store_controller.ex | 46 +++++++++++++++++++ lib/key_store_api/router.ex | 1 + 3 files changed, 48 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 54e4eedc0..d0c3042d3 100644 --- a/Makefile +++ b/Makefile @@ -168,7 +168,7 @@ checkpoint-sync: compile-all #▶️ sepolia: @ Run an interactive terminal using sepolia network sepolia: compile-all - iex -S mix run -- --checkpoint-sync-url https://sepolia.beaconstate.info --network sepolia --metrics --validator-api-port 5056 + iex -S mix run -- --checkpoint-sync-url https://sepolia.beaconstate.info --network sepolia --metrics --validator-api-port 5056 --keystore-dir "keystore_dir" --keystore-pass-dir "keystore_pass_dir" #▶️ holesky: @ Run an interactive terminal using holesky network holesky: compile-all diff --git a/lib/key_store_api/controllers/v1/key_store_controller.ex b/lib/key_store_api/controllers/v1/key_store_controller.ex index e65c05ac8..3c12075d1 100644 --- a/lib/key_store_api/controllers/v1/key_store_controller.ex +++ b/lib/key_store_api/controllers/v1/key_store_controller.ex @@ -7,11 +7,17 @@ defmodule KeyStoreApi.V1.KeyStoreController do plug(OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true) + @default_keystore_dir "keystore_dir" + @default_keystore_pass_dir "keystore_pass_dir" + # NOTE: this function is required by OpenApiSpex, and should return the information # of each specific endpoint. We just return the specific entry from the parsed spec. def open_api_operation(:get_keys), do: ApiSpec.spec().paths["/eth/v1/keystores"].get + def open_api_operation(:add_keys), + do: ApiSpec.spec().paths["/eth/v1/keystores"].post + @spec get_keys(Plug.Conn.t(), any) :: Plug.Conn.t() def get_keys(conn, _params) do pubkeys_info = @@ -29,4 +35,44 @@ defmodule KeyStoreApi.V1.KeyStoreController do "data" => pubkeys_info }) end + + @spec add_keys(Plug.Conn.t(), any) :: Plug.Conn.t() + def add_keys(conn, _params) do + body_params = conn.private.open_api_spex.body_params + config = Application.get_env(:lambda_ethereum_consensus, ValidatorManager, []) + keystore_dir = Keyword.get(config, :keystore_dir) || @default_keystore_dir + keystore_pass_dir = Keyword.get(config, :keystore_pass_dir) || @default_keystore_pass_dir + + results = + Enum.zip(body_params.keystores, body_params.passwords) + |> Enum.map(fn {keystore, password} -> + {pubkey, _privkey} = Keystore.decode_str!(keystore, password) + + File.write!( + Path.join( + keystore_dir, + "#{inspect(pubkey |> Utils.hex_encode())}.json" + ), + keystore + ) + + File.write!( + Path.join( + keystore_pass_dir, + "#{inspect(pubkey |> Utils.hex_encode())}.txt" + ), + password + ) + + %{ + status: "imported", + message: "Pubkey: #{inspect(pubkey)}" + } + end) + + conn + |> json(%{ + "data" => results + }) + end end diff --git a/lib/key_store_api/router.ex b/lib/key_store_api/router.ex index 9761be944..b4d3c02de 100644 --- a/lib/key_store_api/router.ex +++ b/lib/key_store_api/router.ex @@ -12,6 +12,7 @@ defmodule KeyStoreApi.Router do scope "/keystores" do get("/", KeyStoreController, :get_keys) + post("/", KeyStoreController, :add_keys) end end From d643d2d17c83c7df51bc1070fc81442cdb5046dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Arjovsky?= Date: Tue, 6 Aug 2024 18:18:28 +0200 Subject: [PATCH 06/19] perf: compute all committees at once at the beginning of an epoch. (#1245) --- .gitignore | 1 + bench/block_processing.exs | 29 ++++---- .../fork_choice/fork_choice.ex | 42 +++++++---- .../state_transition/accessors.ex | 58 ++++++++++++++- .../state_transition/cache.ex | 3 + .../state_transition/misc.ex | 74 ++++++++++++++++++- .../state_transition/operations.ex | 8 +- .../state_transition/shuffling.ex | 22 +++++- lib/lambda_ethereum_consensus/utils.ex | 23 ++++++ lib/types/beacon_chain/attestation.ex | 3 + lib/types/beacon_chain/indexed_attestation.ex | 8 ++ lib/utils/date.ex | 16 ++++ lib/utils/profile.ex | 11 +-- test/fixtures/block.ex | 11 +++ test/unit/state_transition/misc_test.exs | 37 ++++++++++ 15 files changed, 297 insertions(+), 49 deletions(-) create mode 100644 lib/utils/date.ex create mode 100644 test/unit/state_transition/misc_test.exs diff --git a/.gitignore b/.gitignore index f417f1396..04e481ee9 100644 --- a/.gitignore +++ b/.gitignore @@ -65,3 +65,4 @@ callgrind.out.* # beacon node oapi json file beacon-node-oapi.json +flamegraphs/ diff --git a/bench/block_processing.exs b/bench/block_processing.exs index 202b0a5da..15a4ee745 100644 --- a/bench/block_processing.exs +++ b/bench/block_processing.exs @@ -1,43 +1,42 @@ alias LambdaEthereumConsensus.ForkChoice alias LambdaEthereumConsensus.ForkChoice.Handlers alias LambdaEthereumConsensus.StateTransition.Cache -alias LambdaEthereumConsensus.Store -alias LambdaEthereumConsensus.Store.BlockBySlot alias LambdaEthereumConsensus.Store.BlockDb alias LambdaEthereumConsensus.Store.StateDb -alias Types.BeaconState alias Types.BlockInfo -alias Types.SignedBeaconBlock alias Types.StateInfo +alias Utils.Date Logger.configure(level: :warning) Cache.initialize_cache() # NOTE: this slot must be at the beginning of an epoch (i.e. a multiple of 32) -slot = 9_591_424 +slot = 9_649_056 -IO.puts("fetching blocks...") +IO.puts("Fetching state and blocks...") {:ok, %StateInfo{beacon_state: state}} = StateDb.get_state_by_slot(slot) {:ok, %BlockInfo{signed_block: block}} = BlockDb.get_block_info_by_slot(slot) -{:ok, %BlockInfo{signed_block: new_block} = block_info} = BlockDb.get_block_info_by_slot(slot + 1) +{:ok, %BlockInfo{} = block_info} = BlockDb.get_block_info_by_slot(slot + 1) +{:ok, %BlockInfo{} = block_info_2} = BlockDb.get_block_info_by_slot(slot + 2) -IO.puts("initializing store...") +IO.puts("Initializing store...") {:ok, store} = Types.Store.get_forkchoice_store(state, block) store = Handlers.on_tick(store, store.time + 30) -{:ok, root} = BlockBySlot.get(slot) +IO.puts("Processing the block 1...") -IO.puts("about to process block: #{slot + 1}, with root: #{Base.encode16(root)}...") -IO.puts("#{length(attestations)} attestations ; #{length(attester_slashings)} attester slashings") -IO.puts("") +{:ok, new_store} = ForkChoice.process_block(block_info, store) +IO.puts("Processing the block 2...") if System.get_env("FLAMA") do - Flama.run({ForkChoice, :process_block, [block_info, store]}) + filename = "flamegraphs/stacks.#{Date.now_str()}.out" + Flama.run({ForkChoice, :process_block, [block_info_2, new_store]}, output_file: filename) + IO.puts("Flamegraph saved to #{filename}") else Benchee.run( %{ "block (full cache)" => fn -> - ForkChoice.process_block(block_info, store) + ForkChoice.process_block(block_info_2, new_store) end }, time: 30 @@ -46,7 +45,7 @@ else Benchee.run( %{ "block (empty cache)" => fn _ -> - ForkChoice.process_block(block_info, store) + ForkChoice.process_block(block_info_2, new_store) end }, time: 30, diff --git a/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex b/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex index 6e8ccd844..7f2b19123 100644 --- a/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex +++ b/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex @@ -10,6 +10,7 @@ defmodule LambdaEthereumConsensus.ForkChoice do alias LambdaEthereumConsensus.Libp2pPort alias LambdaEthereumConsensus.Metrics alias LambdaEthereumConsensus.P2P.Gossip.OperationsCollector + alias LambdaEthereumConsensus.StateTransition.Accessors alias LambdaEthereumConsensus.StateTransition.Misc alias LambdaEthereumConsensus.Store.BlobDb alias LambdaEthereumConsensus.Store.BlockDb @@ -209,13 +210,35 @@ defmodule LambdaEthereumConsensus.ForkChoice do attestations = signed_block.message.body.attestations attester_slashings = signed_block.message.body.attester_slashings + # Prefetch relevant states. + states = + Metrics.span_operation(:prefetch_states, nil, nil, fn -> + attestations + |> Enum.map(& &1.data.target) + |> Enum.uniq() + |> Enum.flat_map(&fetch_checkpoint_state/1) + |> Map.new() + end) + + # Prefetch committees for all relevant epochs. + Metrics.span_operation(:prefetch_committees, nil, nil, fn -> + Enum.each(states, fn {ch, state} -> Accessors.maybe_prefetch_committees(state, ch.epoch) end) + end) + with {:ok, new_store} <- apply_on_block(store, block_info), - {:ok, new_store} <- process_attestations(new_store, attestations), + {:ok, new_store} <- process_attestations(new_store, attestations, states), {:ok, new_store} <- process_attester_slashings(new_store, attester_slashings) do {:ok, new_store} end end + def fetch_checkpoint_state(checkpoint) do + case CheckpointStates.get_checkpoint_state(checkpoint) do + {:ok, state} -> [{checkpoint, state}] + _other -> [] + end + end + defp apply_on_block(store, block_info) do Metrics.span_operation(:on_block, nil, nil, fn -> Handlers.on_block(store, block_info) end) end @@ -226,29 +249,16 @@ defmodule LambdaEthereumConsensus.ForkChoice do end) end - defp process_attestations(store, attestations) do + defp process_attestations(store, attestations, states) do Metrics.span_operation(:attestations, nil, nil, fn -> apply_handler( attestations, store, - &Handlers.on_attestation(&1, &2, true, prefetch_states(attestations)) + &Handlers.on_attestation(&1, &2, true, states) ) end) end - defp prefetch_states(attestations) do - attestations - |> Enum.map(& &1.data.target) - |> Enum.uniq() - |> Enum.flat_map(fn ch -> - case CheckpointStates.get_checkpoint_state(ch) do - {:ok, state} -> [{ch, state}] - _other -> [] - end - end) - |> Map.new() - end - @spec recompute_head(Store.t()) :: :ok def recompute_head(store) do {:ok, head_root} = Head.get_head(store) diff --git a/lib/lambda_ethereum_consensus/state_transition/accessors.ex b/lib/lambda_ethereum_consensus/state_transition/accessors.ex index 6e6a4cf68..3d631c80f 100644 --- a/lib/lambda_ethereum_consensus/state_transition/accessors.ex +++ b/lib/lambda_ethereum_consensus/state_transition/accessors.ex @@ -3,6 +3,7 @@ defmodule LambdaEthereumConsensus.StateTransition.Accessors do Functions accessing the current `BeaconState` """ + require Logger alias LambdaEthereumConsensus.StateTransition.Cache alias LambdaEthereumConsensus.StateTransition.Math alias LambdaEthereumConsensus.StateTransition.Misc @@ -281,7 +282,12 @@ defmodule LambdaEthereumConsensus.StateTransition.Accessors do end @doc """ - Return the number of committees in each slot for the given ``epoch``. + Returns the number of committees in each slot for the given ``epoch``. + + The amount of committees is (using integer division): + active_validator_count / slots_per_epoch / TARGET_COMMITTEE_SIZE + + The amount of committees will be capped between 1 and MAX_COMMITTEES_PER_SLOT. """ @spec get_committee_count_per_slot(BeaconState.t(), Types.epoch()) :: Types.uint64() def get_committee_count_per_slot(%BeaconState{} = state, epoch) do @@ -300,7 +306,16 @@ defmodule LambdaEthereumConsensus.StateTransition.Accessors do end @doc """ - Return the beacon committee at ``slot`` for ``index``. + Returns the beacon committee at ``slot`` for ``index``. + - slot is the one for which the committee is being calculated. Typically the slot of an + attestation. Might be different from the state slot. + - index: the index of the committee within the slot. It's not the committee index, which is the + index of the committee within the epoch. This transformation is done internally. + + The beacon committee returned is a list of global validator indices that should participate in + the requested slot. The order in which the indices are sorted is the same as the one used in + aggregation bits, so checking if the nth member of a committee participated is as simple as + checking if the nth bit is set. """ @spec get_beacon_committee(BeaconState.t(), Types.slot(), Types.commitee_index()) :: {:ok, [Types.validator_index()]} | {:error, String.t()} @@ -327,6 +342,41 @@ defmodule LambdaEthereumConsensus.StateTransition.Accessors do end end + @doc """ + Computes all committees for a single epoch and saves them in the cache. This only happens if the + value is not calculated and if the root for the epoch is available. If any of those conditions + is not true, this function is a noop. + + Arguments: + - state: state used to get active validators, seed and others. Any state that is within the same + epoch is equivalent, as validators are updated in epoch boundaries. + - epoch: epoch for which the committees are calculated. + """ + def maybe_prefetch_committees(state, epoch) do + first_slot = Misc.compute_start_slot_at_epoch(epoch) + + with {:ok, root} <- get_epoch_root(state, epoch), + false <- Cache.present?(:beacon_committee, {first_slot, {0, root}}) do + Logger.info("[Block processing] Computing committees for epoch #{epoch}") + + committees_per_slot = get_committee_count_per_slot(state, epoch) + + Misc.compute_all_committees(state, epoch) + |> Enum.with_index() + |> Enum.each(fn {committee, i} -> + # The how do we know for which slot is a committee + slot = first_slot + div(i, committees_per_slot) + index = rem(i, committees_per_slot) + + Cache.set( + :beacon_committee, + {slot, {index, root}}, + {:ok, committee |> Aja.Enum.to_list()} + ) + end) + end + end + @spec get_base_reward_per_increment(BeaconState.t()) :: Types.gwei() def get_base_reward_per_increment(state) do numerator = ChainSpec.get("EFFECTIVE_BALANCE_INCREMENT") * ChainSpec.get("BASE_REWARD_FACTOR") @@ -505,6 +555,10 @@ defmodule LambdaEthereumConsensus.StateTransition.Accessors do @doc """ Return the set of attesting indices corresponding to ``data`` and ``bits``. + + It computes the committee for the attestation (indices of validators that should participate in + that slot) and then filters the ones that actually participated. It returns an unordered MapSet, + which is useful for checking inclusion, but should be ordered if used to validate an attestation. """ @spec get_attesting_indices(BeaconState.t(), Types.AttestationData.t(), Types.bitlist()) :: {:ok, MapSet.t()} | {:error, String.t()} diff --git a/lib/lambda_ethereum_consensus/state_transition/cache.ex b/lib/lambda_ethereum_consensus/state_transition/cache.ex index 75d3fa280..68d5844a1 100644 --- a/lib/lambda_ethereum_consensus/state_transition/cache.ex +++ b/lib/lambda_ethereum_consensus/state_transition/cache.ex @@ -73,4 +73,7 @@ defmodule LambdaEthereumConsensus.StateTransition.Cache do match_spec = generate_cleanup_spec(table, key) :ets.select_delete(table, match_spec) end + + def present?(table, key), do: :ets.member(table, key) + def set(table, key, value), do: :ets.insert_new(table, {key, value}) end diff --git a/lib/lambda_ethereum_consensus/state_transition/misc.ex b/lib/lambda_ethereum_consensus/state_transition/misc.ex index a2259121e..1be4d915e 100644 --- a/lib/lambda_ethereum_consensus/state_transition/misc.ex +++ b/lib/lambda_ethereum_consensus/state_transition/misc.ex @@ -5,7 +5,11 @@ defmodule LambdaEthereumConsensus.StateTransition.Misc do import Bitwise require Aja + require Logger + alias LambdaEthereumConsensus.StateTransition.Accessors + alias LambdaEthereumConsensus.StateTransition.Shuffling + alias LambdaEthereumConsensus.Utils alias Types.BeaconState @max_random_byte 2 ** 8 - 1 @@ -180,9 +184,60 @@ defmodule LambdaEthereumConsensus.StateTransition.Misc do <> end + @doc """ + Gets all committees for a single epoch. More efficient than calculating each one, as the shuffling + is done a single time for the whole index list and shared values are reused between committees. + """ + @spec compute_all_committees(BeaconState.t(), Types.epoch()) :: list(Aja.Vector.t()) + def compute_all_committees(state, epoch) do + indices = Accessors.get_active_validator_indices(state, epoch) + index_count = Aja.Vector.size(indices) + seed = Accessors.get_seed(state, epoch, Constants.domain_beacon_attester()) + + shuffled_indices = Shuffling.shuffle_list(indices, seed) |> Aja.Vector.to_list() + + committee_count = + Accessors.get_committee_count_per_slot(state, epoch) * ChainSpec.get("SLOTS_PER_EPOCH") + + committee_sizes = + Enum.map(0..(committee_count - 1), fn index -> + {c_start, c_end} = committee_boundaries(index, index_count, committee_count) + c_end - c_start + 1 + end) + + # separate using sizes. + Utils.chunk_by_sizes(shuffled_indices, committee_sizes) + end + @doc """ Computes the validator indices of the ``committee_index``-th committee at some epoch with ``committee_count`` committees, and for some given ``indices`` and ``seed``. + + Args: + - indices: a full list of all active validator indices for a single epoch. + - seed: for shuffling calculations. + - committee_index: global number representing the order of the requested committee within the + whole epoch. + - committee_count: total amount of committees for the epoch. Useful to determine the start and end + of the requested committee. + + Returns: + - The list of indices for the validators that conform the requested committee. The order is the + same as used in the aggregation bits of an attestation in that committee. + + PERFORMANCE NOTE: + + Instead of shuffling the full index list, it focuses on the positions of the requested committee + and calculates their shuffled index. Because of the symmetric nature of the shuffling algorithm, + looking at the shuffled index position in the index list gives the element that would end up in + the committee if the full list was to be shuffled. + + This is, in logic, equivalent to shuffling the whole validator index list and getting the + elements for the committee under calculation, but only calculating the shuffling for the elements + of the committee. + + While the amount of calculations is smaller than the full shuffling, calling this for every + committee in an epoch is inefficient. For that end, compute_all_committees should be called. """ @spec compute_committee(Aja.Vector.t(), Types.bytes32(), Types.uint64(), Types.uint64()) :: {:error, String.t()} @@ -197,8 +252,9 @@ defmodule LambdaEthereumConsensus.StateTransition.Misc do def compute_committee(indices, seed, committee_index, committee_count) when committee_index < committee_count do index_count = Aja.Vector.size(indices) - committee_start = div(index_count * committee_index, committee_count) - committee_end = div(index_count * (committee_index + 1), committee_count) - 1 + + {committee_start, committee_end} = + committee_boundaries(committee_index, index_count, committee_count) committee_start..committee_end//1 # NOTE: this cannot fail because committee_end < index_count @@ -211,6 +267,20 @@ defmodule LambdaEthereumConsensus.StateTransition.Misc do def compute_committee(_, _, _, _), do: {:error, "Invalid committee index"} + @doc """ + Computes the boundaries of a committee. + + Args: + - committee_index: epoch based committee index. + - index_count: amount of active validators participating in the epoch. + - committee_count: amount of committees that will be formed in the epoch. + """ + def committee_boundaries(committee_index, index_count, committee_count) do + committee_start = div(index_count * committee_index, committee_count) + committee_end = div(index_count * (committee_index + 1), committee_count) - 1 + {committee_start, committee_end} + end + @doc """ Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``. This is used primarily in signature domains to avoid collisions across forks/chains. diff --git a/lib/lambda_ethereum_consensus/state_transition/operations.ex b/lib/lambda_ethereum_consensus/state_transition/operations.ex index 76acd20e9..89447ff14 100644 --- a/lib/lambda_ethereum_consensus/state_transition/operations.ex +++ b/lib/lambda_ethereum_consensus/state_transition/operations.ex @@ -847,10 +847,14 @@ defmodule LambdaEthereumConsensus.StateTransition.Operations do end defp check_matching_aggregation_bits_length(attestation, beacon_committee) do - if BitList.length(attestation.aggregation_bits) == length(beacon_committee) do + aggregation_bits_length = BitList.length(attestation.aggregation_bits) + beacon_committee_length = length(beacon_committee) + + if aggregation_bits_length == beacon_committee_length do :ok else - {:error, "Mismatched aggregation bits length"} + {:error, + "Mismatched length. aggregation_bits: #{aggregation_bits_length}. beacon_committee: #{beacon_committee_length}"} end end diff --git a/lib/lambda_ethereum_consensus/state_transition/shuffling.ex b/lib/lambda_ethereum_consensus/state_transition/shuffling.ex index 67fc091d5..bd15e6080 100644 --- a/lib/lambda_ethereum_consensus/state_transition/shuffling.ex +++ b/lib/lambda_ethereum_consensus/state_transition/shuffling.ex @@ -9,10 +9,24 @@ defmodule LambdaEthereumConsensus.StateTransition.Shuffling do @position_size 4 @doc """ - Performs a full shuffle of a list of indices. - This function is equivalent to running `compute_shuffled_index` for each index in the list. - - Shuffling the whole list should be 10-100x faster than shuffling each single item. + Performs a full shuffle of an Aja.Vector, regardless of its values. It's equivalent to: + 1. Iterating over the indexes of the elements. + 2. Calculating the shuffled index with compute_shuffled_index. + 3. Swapping the elements of those indexes. + + In code, it's equivalent to: + + r = Vector.size(list) + 1..r + |> Enum.map(fn i -> + {:ok, j} = compute_shuffled_index(i, r, seed) + Aja.Vector.at!(list, j) + end) + |> Aja.Vector.new() + + However, shuffling the whole list with this function should be 10-100x faster than shuffling each + item separately, as pivots and other structures are reused. To further improve this function, + index calculation could be parallelized. ## Examples iex> shuffled = Shuffling.shuffle_list(Aja.Vector.new(0..99), <<0::32*8>>) diff --git a/lib/lambda_ethereum_consensus/utils.ex b/lib/lambda_ethereum_consensus/utils.ex index 03043ed48..174e0f228 100644 --- a/lib/lambda_ethereum_consensus/utils.ex +++ b/lib/lambda_ethereum_consensus/utils.ex @@ -47,4 +47,27 @@ defmodule LambdaEthereumConsensus.Utils do encoded = binary |> Base.encode16(case: :lower) "0x#{String.slice(encoded, 0, 3)}..#{String.slice(encoded, -4, 4)}" end + + def chunk_by_sizes(enum, sizes), do: chunk_by_sizes(enum, sizes, [], 0, []) + + # No more elements, there may be a leftover chunk to add. + def chunk_by_sizes([], _sizes, chunk, chunk_size, all_chunks) do + if chunk_size > 0 do + [Enum.reverse(chunk) | all_chunks] |> Enum.reverse() + else + Enum.reverse(all_chunks) + end + end + + # No more splits will be done. We just performed a split. + def chunk_by_sizes(enum, [], [], 0, all_chunks), do: [enum | Enum.reverse(all_chunks)] + + def chunk_by_sizes(enum, [size | rem_sizes] = sizes, chunk, chunk_size, all_chunks) do + if chunk_size == size do + chunk_by_sizes(enum, rem_sizes, [], 0, [Enum.reverse(chunk) | all_chunks]) + else + [elem | rem_enum] = enum + chunk_by_sizes(rem_enum, sizes, [elem | chunk], chunk_size + 1, all_chunks) + end + end end diff --git a/lib/types/beacon_chain/attestation.ex b/lib/types/beacon_chain/attestation.ex index 9f9ce26e3..0e5588b93 100644 --- a/lib/types/beacon_chain/attestation.ex +++ b/lib/types/beacon_chain/attestation.ex @@ -2,6 +2,9 @@ defmodule Types.Attestation do @moduledoc """ Struct definition for `AttestationMainnet`. Related definitions in `native/ssz_nif/src/types/`. + + aggregation_bits is a bit list that has the size of a committee. Each individual bit is set if + the validator corresponding to that bit participated in attesting. """ alias LambdaEthereumConsensus.Utils.BitList diff --git a/lib/types/beacon_chain/indexed_attestation.ex b/lib/types/beacon_chain/indexed_attestation.ex index ee94d7511..2fa258522 100644 --- a/lib/types/beacon_chain/indexed_attestation.ex +++ b/lib/types/beacon_chain/indexed_attestation.ex @@ -2,6 +2,14 @@ defmodule Types.IndexedAttestation do @moduledoc """ Struct definition for `IndexedAttestation`. Related definitions in `native/ssz_nif/src/types/`. + + attesting_indices is a list of indices, each one of them spanning from 0 to the amount of + validators in the chain - 1 (it's a global index). Only the validators that participated + are included, so not the full committee is present in the list, and they should be sorted. This + field is the only difference with respect to Types.Attestation. + + To verify an attestation, it needs to be converted to an indexed one (get_indexed_attestation), + with the attesting indices sorted. The bls signature can then be used to verify for the result. """ use LambdaEthereumConsensus.Container diff --git a/lib/utils/date.ex b/lib/utils/date.ex new file mode 100644 index 000000000..447d2c4ae --- /dev/null +++ b/lib/utils/date.ex @@ -0,0 +1,16 @@ +defmodule Utils.Date do + @moduledoc """ + Module with date utilities to be shared across scripts and utilities. + """ + alias Timex.Format.DateTime.Formatter + + @doc """ + Returns a human readable string representing the current UTC datetime. Specially useful to + name auto-generated files. + """ + def now_str() do + DateTime.utc_now() + |> Formatter.format!("{YYYY}_{0M}_{0D}__{h24}_{m}_{s}_{ss}") + |> String.replace(".", "") + end +end diff --git a/lib/utils/profile.ex b/lib/utils/profile.ex index c38f31e6a..3f37b0a4c 100644 --- a/lib/utils/profile.ex +++ b/lib/utils/profile.ex @@ -2,7 +2,8 @@ defmodule LambdaEthereumConsensus.Profile do @moduledoc """ Wrappers for profiling using EEP, with easy defaults. """ - alias Timex.Format.DateTime.Formatter + alias Utils.Date + @default_profile_time_millis 300 @doc """ @@ -18,7 +19,7 @@ defmodule LambdaEthereumConsensus.Profile do traces instead of a long one and to inspect them separately. """ def build(opts \\ []) do - trace_name = Keyword.get(opts, :trace_name, now_str()) + trace_name = Keyword.get(opts, :trace_name, Date.now_str()) erlang_trace_name = String.to_charlist(trace_name) profile_time_millis = Keyword.get(opts, :profile_time_millis, @default_profile_time_millis) @@ -30,10 +31,4 @@ defmodule LambdaEthereumConsensus.Profile do File.rm(trace_name <> ".trace") :ok end - - defp now_str() do - DateTime.utc_now() - |> Formatter.format!("{YYYY}_{0M}_{0D}__{h24}_{m}_{s}_{ss}") - |> String.replace(".", "") - end end diff --git a/test/fixtures/block.ex b/test/fixtures/block.ex index 097e6f2bc..074d54ee5 100644 --- a/test/fixtures/block.ex +++ b/test/fixtures/block.ex @@ -6,6 +6,7 @@ defmodule Fixtures.Block do alias Fixtures.Random alias LambdaEthereumConsensus.Utils.BitVector alias Types.BlockInfo + alias Types.StateInfo alias Types.BeaconBlock alias Types.BeaconBlockBody @@ -202,4 +203,14 @@ defmodule Fixtures.Block do historical_summaries: [] } end + + def beacon_state_from_file() do + {:ok, encoded} = + File.read!("test/fixtures/validator/proposer/beacon_state.ssz_snappy") + |> :snappyer.decompress() + + {:ok, decoded} = SszEx.decode(encoded, BeaconState) + {:ok, state_info} = StateInfo.from_beacon_state(decoded) + state_info + end end diff --git a/test/unit/state_transition/misc_test.exs b/test/unit/state_transition/misc_test.exs new file mode 100644 index 000000000..470336c52 --- /dev/null +++ b/test/unit/state_transition/misc_test.exs @@ -0,0 +1,37 @@ +defmodule Unit.StateTransition.MiscTest do + alias Fixtures.Block + alias LambdaEthereumConsensus.StateTransition.Accessors + alias LambdaEthereumConsensus.StateTransition.Misc + alias LambdaEthereumConsensus.Utils.Diff + + use ExUnit.Case + + setup_all do + Application.fetch_env!(:lambda_ethereum_consensus, ChainSpec) + |> Keyword.put(:config, MinimalConfig) + |> then(&Application.put_env(:lambda_ethereum_consensus, ChainSpec, &1)) + end + + test "Calculating all committees for a single epoch should be the same by any method" do + state = Block.beacon_state_from_file().beacon_state + epoch = Accessors.get_current_epoch(state) + committees = Misc.compute_all_committees(state, epoch) + + assert_all_committees_equal(committees, calculate_all_individually(state, epoch)) + end + + defp calculate_all_individually(state, epoch) do + committee_count_per_slot = Accessors.get_committee_count_per_slot(state, epoch) + slots_per_epoch = ChainSpec.get("SLOTS_PER_EPOCH") + + for slot <- state.slot..(state.slot + slots_per_epoch - 1), + index <- 0..(committee_count_per_slot - 1) do + Accessors.get_beacon_committee(state, slot, index) + end + end + + defp assert_all_committees_equal(all_committees, all_committees_individual) do + adapted_committees = Enum.map(all_committees, &{:ok, &1}) + assert Diff.diff(adapted_committees, all_committees_individual) == :unchanged + end +end From 4ecf6db24cf4b717a1bdcc467c404e86d731a2c5 Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Tue, 6 Aug 2024 22:12:00 -0300 Subject: [PATCH 07/19] refactor: save keystores into validators --- Makefile | 3 +- .../controllers/v1/key_store_controller.ex | 41 +++---- lib/keystore.ex | 26 ++++- .../validator/duties.ex | 31 ++++-- .../validator/setup.ex | 19 ++-- .../validator/validator.ex | 101 +++++++++--------- lib/libp2p_port.ex | 10 ++ 7 files changed, 132 insertions(+), 99 deletions(-) diff --git a/Makefile b/Makefile index bfff0b1e0..1823ebbae 100644 --- a/Makefile +++ b/Makefile @@ -168,8 +168,7 @@ checkpoint-sync: compile-all #▶️ sepolia: @ Run an interactive terminal using sepolia network sepolia: compile-all - iex -S mix run -- --checkpoint-sync-url https://sepolia.beaconstate.info --network sepolia --metrics --validator-api-port 5056 --keystore-dir "keystore_dir" --keystore-pass-dir "keystore_pass_dir" - + iex -S mix run -- --checkpoint-sync-url https://sepolia.beaconstate.info --network sepolia --metrics --validator-api-port 5056 #▶️ holesky: @ Run an interactive terminal using holesky network holesky: compile-all iex -S mix run -- --checkpoint-sync-url https://checkpoint-sync.holesky.ethpandaops.io --network holesky diff --git a/lib/key_store_api/controllers/v1/key_store_controller.ex b/lib/key_store_api/controllers/v1/key_store_controller.ex index 3c12075d1..bc3883e52 100644 --- a/lib/key_store_api/controllers/v1/key_store_controller.ex +++ b/lib/key_store_api/controllers/v1/key_store_controller.ex @@ -3,7 +3,7 @@ defmodule KeyStoreApi.V1.KeyStoreController do alias BeaconApi.Utils alias KeyStoreApi.ApiSpec - alias LambdaEthereumConsensus.Validator.ValidatorManager + alias LambdaEthereumConsensus.Libp2pPort plug(OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true) @@ -20,53 +20,54 @@ defmodule KeyStoreApi.V1.KeyStoreController do @spec get_keys(Plug.Conn.t(), any) :: Plug.Conn.t() def get_keys(conn, _params) do - pubkeys_info = - ValidatorManager.get_pubkeys() - |> Enum.map( - &%{ - "validatin_pubkey" => &1 |> Utils.hex_encode(), - "derivation_path" => "m/12381/3600/0/0/0", - "readonly" => true - } - ) - conn |> json(%{ - "data" => pubkeys_info + "data" => + Libp2pPort.get_keystores() + |> Enum.map( + &%{ + "validatin_pubkey" => &1.pubkey |> Utils.hex_encode(), + "derivation_path" => &1.path, + "readonly" => &1.readonly + } + ) }) end @spec add_keys(Plug.Conn.t(), any) :: Plug.Conn.t() def add_keys(conn, _params) do body_params = conn.private.open_api_spex.body_params - config = Application.get_env(:lambda_ethereum_consensus, ValidatorManager, []) + + config = + Application.get_env(:lambda_ethereum_consensus, LambdaEthereumConsensus.Validator.Setup, []) + keystore_dir = Keyword.get(config, :keystore_dir) || @default_keystore_dir keystore_pass_dir = Keyword.get(config, :keystore_pass_dir) || @default_keystore_pass_dir results = Enum.zip(body_params.keystores, body_params.passwords) - |> Enum.map(fn {keystore, password} -> - {pubkey, _privkey} = Keystore.decode_str!(keystore, password) + |> Enum.map(fn {keystore_file, password_file} -> + keystore = Keystore.decode_str!(keystore_file, password_file) File.write!( Path.join( keystore_dir, - "#{inspect(pubkey |> Utils.hex_encode())}.json" + "#{inspect(keystore.pubkey |> Utils.hex_encode())}.json" ), - keystore + keystore_file ) File.write!( Path.join( keystore_pass_dir, - "#{inspect(pubkey |> Utils.hex_encode())}.txt" + "#{inspect(keystore.pubkey |> Utils.hex_encode())}.txt" ), - password + password_file ) %{ status: "imported", - message: "Pubkey: #{inspect(pubkey)}" + message: "Pubkey: #{inspect(keystore.pubkey)}" } end) diff --git a/lib/keystore.ex b/lib/keystore.ex index 145a92bdd..f51334b8f 100644 --- a/lib/keystore.ex +++ b/lib/keystore.ex @@ -9,18 +9,36 @@ defmodule Keystore do @iv_size 16 @checksum_message_size 32 - @spec decode_from_files!(Path.t(), Path.t()) :: {Types.bls_pubkey(), Bls.privkey()} + fields = [ + :pubkey, + :privkey, + :path, + :readonly + ] + + @enforce_keys fields + defstruct fields + + @type t() :: %__MODULE__{ + pubkey: Types.bls_pubkey(), + privkey: Bls.privkey(), + path: String.t(), + readonly: boolean() + } + + @spec decode_from_files!(Path.t(), Path.t()) :: t() def decode_from_files!(json, password) do password = File.read!(password) File.read!(json) |> decode_str!(password) end - @spec decode_str!(String.t(), String.t()) :: {Types.bls_pubkey(), Bls.privkey()} + @spec decode_str!(String.t(), String.t()) :: t() def decode_str!(json, password) do decoded_json = Jason.decode!(json) # We only support version 4 (the only one) %{"version" => 4} = decoded_json - validate_empty_path!(decoded_json["path"]) + path = decoded_json["path"] + validate_empty_path!(path) privkey = decrypt!(decoded_json["crypto"], password) @@ -36,7 +54,7 @@ defmodule Keystore do raise("Keystore secret and public keys don't form a valid pair") end - {pubkey, privkey} + %__MODULE__{pubkey: pubkey, privkey: privkey, path: path, readonly: false} end # TODO: support keystore paths diff --git a/lib/lambda_ethereum_consensus/validator/duties.ex b/lib/lambda_ethereum_consensus/validator/duties.ex index ff9b70ff9..5e590fd7b 100644 --- a/lib/lambda_ethereum_consensus/validator/duties.ex +++ b/lib/lambda_ethereum_consensus/validator/duties.ex @@ -4,7 +4,6 @@ defmodule LambdaEthereumConsensus.Validator.Duties do """ alias LambdaEthereumConsensus.StateTransition.Accessors alias LambdaEthereumConsensus.StateTransition.Misc - alias LambdaEthereumConsensus.Validator alias LambdaEthereumConsensus.Validator.Utils alias Types.BeaconState @@ -101,11 +100,11 @@ defmodule LambdaEthereumConsensus.Validator.Duties do end) end - def maybe_update_duties(duties, beacon_state, epoch, validator) do + def maybe_update_duties(duties, beacon_state, epoch, validator_index, privkey) do attester_duties = - maybe_update_attester_duties(duties.attester, beacon_state, epoch, validator) + maybe_update_attester_duties(duties.attester, beacon_state, epoch, validator_index, privkey) - proposer_duties = compute_proposer_duties(beacon_state, epoch, validator.index) + proposer_duties = compute_proposer_duties(beacon_state, epoch, validator_index) # To avoid edge-cases old_duty = case duties.proposer do @@ -116,12 +115,21 @@ defmodule LambdaEthereumConsensus.Validator.Duties do %{duties | attester: attester_duties, proposer: old_duty ++ proposer_duties} end - defp maybe_update_attester_duties([epp, ep0, ep1], beacon_state, epoch, validator) do + defp maybe_update_attester_duties( + [epp, ep0, ep1], + beacon_state, + epoch, + validator_index, + privkey + ) do duties = Stream.with_index([ep0, ep1]) |> Enum.map(fn - {:not_computed, i} -> compute_attester_duties(beacon_state, epoch + i, validator) - {d, _} -> d + {:not_computed, i} -> + compute_attester_duties(beacon_state, epoch + i, validator_index, privkey) + + {d, _} -> + d end) [epp | duties] @@ -138,11 +146,12 @@ defmodule LambdaEthereumConsensus.Validator.Duties do @spec compute_attester_duties( beacon_state :: BeaconState.t(), epoch :: Types.epoch(), - validator :: Validator.validator() + validator_index :: non_neg_integer(), + privkey :: Bls.privkey() ) :: attester_duty() | nil - defp compute_attester_duties(beacon_state, epoch, validator) do + defp compute_attester_duties(beacon_state, epoch, validator_index, privkey) do # Can't fail - {:ok, duty} = get_committee_assignment(beacon_state, epoch, validator.index) + {:ok, duty} = get_committee_assignment(beacon_state, epoch, validator_index) case duty do nil -> @@ -151,7 +160,7 @@ defmodule LambdaEthereumConsensus.Validator.Duties do duty -> duty |> Map.put(:attested?, false) - |> update_with_aggregation_duty(beacon_state, validator.privkey) + |> update_with_aggregation_duty(beacon_state, privkey) |> update_with_subnet_id(beacon_state, epoch) end end diff --git a/lib/lambda_ethereum_consensus/validator/setup.ex b/lib/lambda_ethereum_consensus/validator/setup.ex index d07469dcf..00206e312 100644 --- a/lib/lambda_ethereum_consensus/validator/setup.ex +++ b/lib/lambda_ethereum_consensus/validator/setup.ex @@ -15,11 +15,6 @@ defmodule LambdaEthereumConsensus.Validator.Setup do setup_validators(slot, head_root, keystore_dir, keystore_pass_dir) end - def get_pubkeys(), do: GenServer.call(__MODULE__, :get_pubkeys) - - def handle_call(:get_pubkeys, _from, [] = validators), do: {:reply, validators, validators} - def handle_call(:get_pubkeys, _from, validators), do: {:reply, Map.keys(validators), validators} - defp setup_validators(_s, _r, keystore_dir, keystore_pass_dir) when is_nil(keystore_dir) or is_nil(keystore_pass_dir) do Logger.warning( @@ -30,12 +25,12 @@ defmodule LambdaEthereumConsensus.Validator.Setup do end defp setup_validators(slot, head_root, keystore_dir, keystore_pass_dir) do - validator_keys = decode_validator_keys(keystore_dir, keystore_pass_dir) + validator_keystores = decode_validator_keystores(keystore_dir, keystore_pass_dir) validators = - validator_keys - |> Enum.map(fn {pubkey, privkey} -> - {pubkey, Validator.new({slot, head_root, {pubkey, privkey}})} + validator_keystores + |> Enum.map(fn keystore -> + {keystore.pubkey, Validator.new({slot, head_root, keystore})} end) |> Map.new() @@ -45,14 +40,14 @@ defmodule LambdaEthereumConsensus.Validator.Setup do end @doc """ - Get validator keys from the keystore directory. + Get validator keystores from the keystore directory. This function expects two files for each validator: - /.json - /.txt """ - @spec decode_validator_keys(binary(), binary()) :: + @spec decode_validator_keystores(binary(), binary()) :: list({Bls.pubkey(), Bls.privkey()}) - def decode_validator_keys(keystore_dir, keystore_pass_dir) + def decode_validator_keystores(keystore_dir, keystore_pass_dir) when is_binary(keystore_dir) and is_binary(keystore_pass_dir) do File.ls!(keystore_dir) |> Enum.map(fn filename -> diff --git a/lib/lambda_ethereum_consensus/validator/validator.ex b/lib/lambda_ethereum_consensus/validator/validator.ex index fa43825a0..af9ed737f 100644 --- a/lib/lambda_ethereum_consensus/validator/validator.ex +++ b/lib/lambda_ethereum_consensus/validator/validator.ex @@ -9,7 +9,8 @@ defmodule LambdaEthereumConsensus.Validator do :root, :epoch, :duties, - :validator, + :index, + :keystore, :payload_builder ] @@ -31,12 +32,6 @@ defmodule LambdaEthereumConsensus.Validator do @default_graffiti_message "Lambda, so gentle, so good" - @type validator :: %{ - index: non_neg_integer() | nil, - pubkey: Bls.pubkey(), - privkey: Bls.privkey() - } - # TODO: Slot and Root are redundant, we should also have the duties separated and calculated # just at the begining of every epoch, and then just update them as needed. @type state :: %__MODULE__{ @@ -44,22 +39,20 @@ defmodule LambdaEthereumConsensus.Validator do epoch: Types.epoch(), root: Types.root(), duties: Duties.duties(), - validator: validator(), + index: non_neg_integer() | nil, + keystore: Keystore.t(), payload_builder: {Types.slot(), Types.root(), BlockBuilder.payload_id()} | nil } - @spec new({Types.slot(), Types.root(), {Bls.pubkey(), Bls.privkey()}}) :: state - def new({head_slot, head_root, {pubkey, privkey}}) do + @spec new({Types.slot(), Types.root(), Keystore.t()}) :: state + def new({head_slot, head_root, keystore}) do state = %__MODULE__{ slot: head_slot, epoch: Misc.compute_epoch_at_slot(head_slot), root: head_root, duties: Duties.empty_duties(), - validator: %{ - pubkey: pubkey, - privkey: privkey, - index: nil - }, + index: nil, + keystore: keystore, payload_builder: nil } @@ -81,17 +74,25 @@ defmodule LambdaEthereumConsensus.Validator do epoch = Misc.compute_epoch_at_slot(slot) beacon = fetch_target_state(epoch, root) - case fetch_validator_index(beacon, state.validator) do + case fetch_validator_index(beacon, state.keystore.pubkey) do nil -> nil validator_index -> log_info(validator_index, "setup validator", slot: slot, root: root) - validator = %{state.validator | index: validator_index} - duties = Duties.maybe_update_duties(state.duties, beacon, epoch, validator) + + duties = + Duties.maybe_update_duties( + state.duties, + beacon, + epoch, + validator_index, + state.keystore.privkey + ) + join_subnets_for_duties(duties) Duties.log_duties(duties, validator_index) - %{state | duties: duties, validator: validator} + %{state | duties: duties, index: validator_index} end end @@ -106,7 +107,7 @@ defmodule LambdaEthereumConsensus.Validator do end def handle_new_head(slot, head_root, state) do - log_debug(state.validator.index, "recieved new head", slot: slot, root: head_root) + log_debug(state.index, "recieved new head", slot: slot, root: head_root) # TODO: this doesn't take into account reorgs state @@ -122,7 +123,7 @@ defmodule LambdaEthereumConsensus.Validator do end def handle_tick({slot, :first_third}, state) do - log_debug(state.validator.index, "started first third", slot: slot) + log_debug(state.index, "started first third", slot: slot) # Here we may: # 1. propose our blocks # 2. (TODO) start collecting attestations for aggregation @@ -131,7 +132,7 @@ defmodule LambdaEthereumConsensus.Validator do end def handle_tick({slot, :second_third}, state) do - log_debug(state.validator.index, "started second third", slot: slot) + log_debug(state.index, "started second third", slot: slot) # Here we may: # 1. send our attestation for an empty slot # 2. start building a payload @@ -141,7 +142,7 @@ defmodule LambdaEthereumConsensus.Validator do end def handle_tick({slot, :last_third}, state) do - log_debug(state.validator.index, "started last third", slot: slot) + log_debug(state.index, "started last third", slot: slot) # Here we may publish our attestation aggregate maybe_publish_aggregate(state, slot) end @@ -175,10 +176,10 @@ defmodule LambdaEthereumConsensus.Validator do new_duties = Duties.shift_duties(state.duties, epoch, last_epoch) - |> Duties.maybe_update_duties(new_beacon, epoch, state.validator) + |> Duties.maybe_update_duties(new_beacon, epoch, state.index, state.keystore.privkey) move_subnets(state.duties, new_duties) - Duties.log_duties(new_duties, state.validator.index) + Duties.log_duties(new_duties, state.index) %{state | slot: slot, root: head_root, duties: new_duties, epoch: epoch} end @@ -238,35 +239,35 @@ defmodule LambdaEthereumConsensus.Validator do end @spec attest(state, Duties.attester_duty()) :: :ok - defp attest(%{validator: validator} = state, current_duty) do + defp attest(%{index: validator_index, keystore: keystore} = state, current_duty) do subnet_id = current_duty.subnet_id - log_debug(validator.index, "attesting", slot: current_duty.slot, subnet_id: subnet_id) + log_debug(validator_index, "attesting", slot: current_duty.slot, subnet_id: subnet_id) - attestation = produce_attestation(current_duty, state.root, state.validator.privkey) + attestation = produce_attestation(current_duty, state.root, keystore.privkey) log_md = [slot: attestation.data.slot, attestation: attestation, subnet_id: subnet_id] debug_log_msg = - "publishing attestation on committee index: #{current_duty.committee_index} | as #{current_duty.index_in_committee}/#{current_duty.committee_length - 1} and pubkey: #{LambdaEthereumConsensus.Utils.format_shorten_binary(validator.pubkey)}" + "publishing attestation on committee index: #{current_duty.committee_index} | as #{current_duty.index_in_committee}/#{current_duty.committee_length - 1} and pubkey: #{LambdaEthereumConsensus.Utils.format_shorten_binary(keystore.pubkey)}" - log_debug(validator.index, debug_log_msg, log_md) + log_debug(validator_index, debug_log_msg, log_md) Gossip.Attestation.publish(subnet_id, attestation) - |> log_info_result(validator.index, "published attestation", log_md) + |> log_info_result(validator_index, "published attestation", log_md) if current_duty.should_aggregate? do - log_debug(validator.index, "collecting for future aggregation", log_md) + log_debug(validator_index, "collecting for future aggregation", log_md) Gossip.Attestation.collect(subnet_id, attestation) - |> log_debug_result(validator.index, "collected attestation", log_md) + |> log_debug_result(validator_index, "collected attestation", log_md) end end # We publish our aggregate on the next slot, and when we're an aggregator - defp maybe_publish_aggregate(%{validator: validator} = state, slot) do + defp maybe_publish_aggregate(%{index: validator_index, keystore: keystore} = state, slot) do case Duties.get_current_attester_duty(state.duties, slot) do %{should_aggregate?: true} = duty -> - publish_aggregate(duty, validator) + publish_aggregate(duty, validator_index, keystore) new_duties = Duties.replace_attester_duty(state.duties, duty, %{duty | should_aggregate?: false}) @@ -278,20 +279,20 @@ defmodule LambdaEthereumConsensus.Validator do end end - defp publish_aggregate(duty, validator) do + defp publish_aggregate(duty, validator_index, keystore) do case Gossip.Attestation.stop_collecting(duty.subnet_id) do {:ok, attestations} -> log_md = [slot: duty.slot, attestations: attestations] - log_debug(validator.index, "publishing aggregate", log_md) + log_debug(validator_index, "publishing aggregate", log_md) aggregate_attestations(attestations) - |> append_proof(duty.selection_proof, validator) - |> append_signature(duty.signing_domain, validator) + |> append_proof(duty.selection_proof, validator_index) + |> append_signature(duty.signing_domain, keystore) |> Gossip.Attestation.publish_aggregate() - |> log_info_result(validator.index, "published aggregate", log_md) + |> log_info_result(validator_index, "published aggregate", log_md) {:error, reason} -> - log_error(validator.index, "stop collecting attestations", reason) + log_error(validator_index, "stop collecting attestations", reason) :ok end end @@ -311,9 +312,9 @@ defmodule LambdaEthereumConsensus.Validator do %{List.first(attestations) | aggregation_bits: aggregation_bits, signature: signature} end - defp append_proof(aggregate, proof, validator) do + defp append_proof(aggregate, proof, validator_index) do %Types.AggregateAndProof{ - aggregator_index: validator.index, + aggregator_index: validator_index, aggregate: aggregate, selection_proof: proof } @@ -378,9 +379,9 @@ defmodule LambdaEthereumConsensus.Validator do BlockStates.get_state_info!(parent_root).beacon_state |> go_to_slot(slot) end - @spec fetch_validator_index(Types.BeaconState.t(), validator()) :: + @spec fetch_validator_index(Types.BeaconState.t(), Bls.privkey()) :: non_neg_integer() | nil - defp fetch_validator_index(beacon, %{index: nil, pubkey: pk}) do + defp fetch_validator_index(beacon, pk) do Enum.find_index(beacon.validators, &(&1.pubkey == pk)) end @@ -399,18 +400,18 @@ defmodule LambdaEthereumConsensus.Validator do defp start_payload_builder(%{payload_builder: {slot, root, _}} = state, slot, root), do: state - defp start_payload_builder(%{validator: validator} = state, proposed_slot, head_root) do + defp start_payload_builder(%{index: validator_index} = state, proposed_slot, head_root) do # TODO: handle reorgs and late blocks - log_debug(validator.index, "starting building payload for slot #{proposed_slot}") + log_debug(validator_index, "starting building payload for slot #{proposed_slot}") case BlockBuilder.start_building_payload(proposed_slot, head_root) do {:ok, payload_id} -> - log_info(validator.index, "payload built for slot #{proposed_slot}") + log_info(validator_index, "payload built for slot #{proposed_slot}") %{state | payload_builder: {proposed_slot, head_root, payload_id}} {:error, reason} -> - log_error(validator.index, "start building payload for slot #{proposed_slot}", reason) + log_error(validator_index, "start building payload for slot #{proposed_slot}", reason) %{state | payload_builder: nil} end @@ -460,7 +461,7 @@ defmodule LambdaEthereumConsensus.Validator do # TODO: at least in kurtosis there are blocks that are proposed without a payload apparently, must investigate. defp propose(%{payload_builder: nil} = state, _proposed_slot) do - log_error(state.validator.index, "propose block", "lack of execution payload") + log_error(state.index, "propose block", "lack of execution payload") state end diff --git a/lib/libp2p_port.ex b/lib/libp2p_port.ex index 47f86eaa8..184881bfd 100644 --- a/lib/libp2p_port.ex +++ b/lib/libp2p_port.ex @@ -334,6 +334,8 @@ defmodule LambdaEthereumConsensus.Libp2pPort do cast_command(pid, {:update_enr, enr}) end + def get_keystores(), do: GenServer.call(__MODULE__, :get_keystores) + @spec join_init_topics(port()) :: :ok | {:error, String.t()} defp join_init_topics(port) do topics = [BeaconBlock.topic()] ++ BlobSideCar.topics() @@ -530,6 +532,14 @@ defmodule LambdaEthereumConsensus.Libp2pPort do {:noreply, state} end + @impl GenServer + def handle_call(:get_keystores, _from, %{validators: []} = state), + do: {:reply, [], state} + + @impl GenServer + def handle_call(:get_keystores, _from, %{validators: validators} = state), + do: {:reply, Enum.map(validators, fn {_pk, validator} -> validator.keystore end), state} + ###################### ### PRIVATE FUNCTIONS ###################### From b597427cd9afbcfd4cd24e300afd7a416e493517 Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Wed, 7 Aug 2024 10:50:58 -0300 Subject: [PATCH 08/19] feat: add delete endpoint --- .../controllers/v1/key_store_controller.ex | 45 +++++++++++++++++++ lib/key_store_api/router.ex | 1 + lib/libp2p_port.ex | 24 +++++++++- test/unit/keystore_test.exs | 12 +++-- 4 files changed, 77 insertions(+), 5 deletions(-) diff --git a/lib/key_store_api/controllers/v1/key_store_controller.ex b/lib/key_store_api/controllers/v1/key_store_controller.ex index bc3883e52..50da08064 100644 --- a/lib/key_store_api/controllers/v1/key_store_controller.ex +++ b/lib/key_store_api/controllers/v1/key_store_controller.ex @@ -18,6 +18,9 @@ defmodule KeyStoreApi.V1.KeyStoreController do def open_api_operation(:add_keys), do: ApiSpec.spec().paths["/eth/v1/keystores"].post + def open_api_operation(:delete_keys), + do: ApiSpec.spec().paths["/eth/v1/keystores"].delete + @spec get_keys(Plug.Conn.t(), any) :: Plug.Conn.t() def get_keys(conn, _params) do conn @@ -65,6 +68,8 @@ defmodule KeyStoreApi.V1.KeyStoreController do password_file ) + Libp2pPort.add_validator(keystore) + %{ status: "imported", message: "Pubkey: #{inspect(keystore.pubkey)}" @@ -76,4 +81,44 @@ defmodule KeyStoreApi.V1.KeyStoreController do "data" => results }) end + + @spec delete_keys(Plug.Conn.t(), any) :: Plug.Conn.t() + def delete_keys(conn, _params) do + body_params = conn.private.open_api_spex.body_params + + config = + Application.get_env(:lambda_ethereum_consensus, LambdaEthereumConsensus.Validator.Setup, []) + + keystore_dir = Keyword.get(config, :keystore_dir) || @default_keystore_dir + keystore_pass_dir = Keyword.get(config, :keystore_pass_dir) || @default_keystore_pass_dir + + results = + Enum.map(body_params.pubkeys, fn pubkey -> + :ok = Libp2pPort.delete_validator(pubkey) + + File.rm!( + Path.join( + keystore_dir, + "#{inspect(pubkey |> Utils.hex_encode())}.json" + ) + ) + + File.rm!( + Path.join( + keystore_pass_dir, + "#{inspect(pubkey |> Utils.hex_encode())}.txt" + ) + ) + + %{ + status: "deleted", + message: "Pubkey: #{inspect(pubkey)}" + } + end) + + conn + |> json(%{ + "data" => results + }) + end end diff --git a/lib/key_store_api/router.ex b/lib/key_store_api/router.ex index b4d3c02de..f840d43f8 100644 --- a/lib/key_store_api/router.ex +++ b/lib/key_store_api/router.ex @@ -13,6 +13,7 @@ defmodule KeyStoreApi.Router do scope "/keystores" do get("/", KeyStoreController, :get_keys) post("/", KeyStoreController, :add_keys) + delete("/", KeyStoreController, :delete_keys) end end diff --git a/lib/libp2p_port.ex b/lib/libp2p_port.ex index 184881bfd..c8947b8f1 100644 --- a/lib/libp2p_port.ex +++ b/lib/libp2p_port.ex @@ -84,7 +84,7 @@ defmodule LambdaEthereumConsensus.Libp2pPort do discovery_addresses: [String.t()] } - @sync_delay_millis 10_000 + @sync_delay_millis 20_000 ###################### ### API @@ -334,8 +334,15 @@ defmodule LambdaEthereumConsensus.Libp2pPort do cast_command(pid, {:update_enr, enr}) end + @spec get_keystores() :: list(Keystore.t()) def get_keystores(), do: GenServer.call(__MODULE__, :get_keystores) + @spec delete_validator(Bls.pubkey()) :: :ok + def delete_validator(pubkey), do: GenServer.call(__MODULE__, {:delete_validator, pubkey}) + + @spec add_validator(Keystore.t()) :: :ok + def add_validator(keystore), do: GenServer.call(__MODULE__, {:add_validator, keystore}) + @spec join_init_topics(port()) :: :ok | {:error, String.t()} defp join_init_topics(port) do topics = [BeaconBlock.topic()] ++ BlobSideCar.topics() @@ -540,6 +547,21 @@ defmodule LambdaEthereumConsensus.Libp2pPort do def handle_call(:get_keystores, _from, %{validators: validators} = state), do: {:reply, Enum.map(validators, fn {_pk, validator} -> validator.keystore end), state} + @impl GenServer + def handle_call({:delete_validator, pk}, _from, %{validators: validators} = state), + do: {:reply, :ok, %{state | validators: Map.delete(validators, pk)}} + + @impl GenServer + def handle_call({:add_validator, keystore}, _from, %{validators: validators} = state) do + # TODO: HANDLE REPEATED VALIDATORS + {:reply, :ok, + %{ + state + | validators: + Map.put(validators, keystore.pubkey, Validator.new({0, <<0::256>>, keystore})) + }} + end + ###################### ### PRIVATE FUNCTIONS ###################### diff --git a/test/unit/keystore_test.exs b/test/unit/keystore_test.exs index 639776366..1d4f40c92 100644 --- a/test/unit/keystore_test.exs +++ b/test/unit/keystore_test.exs @@ -72,7 +72,8 @@ defmodule Unit.KeystoreTest do }) test "eip scrypt test vector" do - {pubkey, privkey} = Keystore.decode_str!(@scrypt_json, @eip_password) + %Keystore{pubkey: pubkey, privkey: privkey, path: _path} = + Keystore.decode_str!(@scrypt_json, @eip_password) assert privkey == @eip_secret assert pubkey == @pubkey @@ -83,7 +84,8 @@ defmodule Unit.KeystoreTest do end test "eip pbkdf2 test vector" do - {pubkey, privkey} = Keystore.decode_str!(@pbkdf2_json, @eip_password) + %Keystore{pubkey: pubkey, privkey: privkey, path: _path} = + Keystore.decode_str!(@pbkdf2_json, @eip_password) assert privkey == @eip_secret assert pubkey == @pubkey @@ -99,7 +101,8 @@ defmodule Unit.KeystoreTest do |> Map.delete("pubkey") |> Jason.encode!() - {pubkey, privkey} = Keystore.decode_str!(scrypt_json, @eip_password) + %Keystore{pubkey: pubkey, privkey: privkey, path: _path} = + Keystore.decode_str!(scrypt_json, @eip_password) assert privkey == @eip_secret assert pubkey == @pubkey @@ -115,7 +118,8 @@ defmodule Unit.KeystoreTest do |> Map.delete("pubkey") |> Jason.encode!() - {pubkey, privkey} = Keystore.decode_str!(pbkdf2_json, @eip_password) + %Keystore{pubkey: pubkey, privkey: privkey, path: _path} = + Keystore.decode_str!(pbkdf2_json, @eip_password) assert privkey == @eip_secret assert pubkey == @pubkey From 52003fb9256c4fc28539bc44d6694faa685abd60 Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Wed, 7 Aug 2024 17:00:42 -0300 Subject: [PATCH 09/19] fix: delete/add endpoints --- lib/beacon_api/utils.ex | 6 +++ .../controllers/v1/key_store_controller.ex | 52 +++++++++++-------- .../validator/setup.ex | 2 + lib/libp2p_port.ex | 23 ++++++-- 4 files changed, 57 insertions(+), 26 deletions(-) diff --git a/lib/beacon_api/utils.ex b/lib/beacon_api/utils.ex index 4f510eb12..df0f3fde0 100644 --- a/lib/beacon_api/utils.ex +++ b/lib/beacon_api/utils.ex @@ -31,6 +31,12 @@ defmodule BeaconApi.Utils do "0x" <> Base.encode16(binary, case: :lower) end + def hex_decode("0x" <> binary) do + with {:ok, decoded} <- Base.decode16(binary, case: :lower) do + decoded + end + end + defp to_json(attribute, module) when is_struct(attribute) do module.schema() |> Enum.map(fn {k, schema} -> diff --git a/lib/key_store_api/controllers/v1/key_store_controller.ex b/lib/key_store_api/controllers/v1/key_store_controller.ex index 50da08064..3a3a92992 100644 --- a/lib/key_store_api/controllers/v1/key_store_controller.ex +++ b/lib/key_store_api/controllers/v1/key_store_controller.ex @@ -5,6 +5,8 @@ defmodule KeyStoreApi.V1.KeyStoreController do alias KeyStoreApi.ApiSpec alias LambdaEthereumConsensus.Libp2pPort + require Logger + plug(OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true) @default_keystore_dir "keystore_dir" @@ -52,10 +54,12 @@ defmodule KeyStoreApi.V1.KeyStoreController do |> Enum.map(fn {keystore_file, password_file} -> keystore = Keystore.decode_str!(keystore_file, password_file) + base_name = keystore.pubkey |> Utils.hex_encode() + File.write!( Path.join( keystore_dir, - "#{inspect(keystore.pubkey |> Utils.hex_encode())}.json" + base_name <> ".json" ), keystore_file ) @@ -63,7 +67,7 @@ defmodule KeyStoreApi.V1.KeyStoreController do File.write!( Path.join( keystore_pass_dir, - "#{inspect(keystore.pubkey |> Utils.hex_encode())}.txt" + base_name <> ".txt" ), password_file ) @@ -94,26 +98,30 @@ defmodule KeyStoreApi.V1.KeyStoreController do results = Enum.map(body_params.pubkeys, fn pubkey -> - :ok = Libp2pPort.delete_validator(pubkey) - - File.rm!( - Path.join( - keystore_dir, - "#{inspect(pubkey |> Utils.hex_encode())}.json" - ) - ) - - File.rm!( - Path.join( - keystore_pass_dir, - "#{inspect(pubkey |> Utils.hex_encode())}.txt" - ) - ) - - %{ - status: "deleted", - message: "Pubkey: #{inspect(pubkey)}" - } + case Libp2pPort.delete_validator(pubkey |> Utils.hex_decode()) do + :ok -> + File.rm!( + Path.join( + keystore_dir, + pubkey <> ".json" + ) + ) + + File.rm!( + Path.join( + keystore_pass_dir, + pubkey <> ".txt" + ) + ) + + %{ + status: "deleted", + message: "Pubkey: #{inspect(pubkey)}" + } + + {:error, reason} -> + Logger.error("[Keystore] Error removing key. Reason: #{reason}") + end end) conn diff --git a/lib/lambda_ethereum_consensus/validator/setup.ex b/lib/lambda_ethereum_consensus/validator/setup.ex index 00206e312..630bc96b7 100644 --- a/lib/lambda_ethereum_consensus/validator/setup.ex +++ b/lib/lambda_ethereum_consensus/validator/setup.ex @@ -57,6 +57,8 @@ defmodule LambdaEthereumConsensus.Validator.Setup do keystore_file = Path.join(keystore_dir, "#{base_name}.json") keystore_pass_file = Path.join(keystore_pass_dir, "#{base_name}.txt") + IO.inspect("KEYSTORE_FILE: #{inspect(keystore_file)}") + IO.inspect("KEYSTORE_PASS_FILE: #{inspect(keystore_pass_file)}") {keystore_file, keystore_pass_file} else Logger.warning("[Validator] Skipping file: #{filename}. Not a keystore file.") diff --git a/lib/libp2p_port.ex b/lib/libp2p_port.ex index c8947b8f1..1c2ecbf4c 100644 --- a/lib/libp2p_port.ex +++ b/lib/libp2p_port.ex @@ -337,7 +337,7 @@ defmodule LambdaEthereumConsensus.Libp2pPort do @spec get_keystores() :: list(Keystore.t()) def get_keystores(), do: GenServer.call(__MODULE__, :get_keystores) - @spec delete_validator(Bls.pubkey()) :: :ok + @spec delete_validator(Bls.pubkey()) :: :ok | {:error, String.t()} def delete_validator(pubkey), do: GenServer.call(__MODULE__, {:delete_validator, pubkey}) @spec add_validator(Keystore.t()) :: :ok @@ -548,17 +548,32 @@ defmodule LambdaEthereumConsensus.Libp2pPort do do: {:reply, Enum.map(validators, fn {_pk, validator} -> validator.keystore end), state} @impl GenServer - def handle_call({:delete_validator, pk}, _from, %{validators: validators} = state), - do: {:reply, :ok, %{state | validators: Map.delete(validators, pk)}} + def handle_call({:delete_validator, pk}, _from, %{validators: validators} = state) do + case Map.fetch(validators, pk) do + {:ok, validator} -> + Logger.warning("[Libp2pPort] Deleting validator with index #{inspect(validator.index)}.") + + {:reply, :ok, %{state | validators: Map.delete(validators, pk)}} + + :error -> + {:error, "Pubkey #{inspect(pk)} not found."} + end + end @impl GenServer def handle_call({:add_validator, keystore}, _from, %{validators: validators} = state) do # TODO: HANDLE REPEATED VALIDATORS + current_status = ForkChoice.get_current_status_message() + {:reply, :ok, %{ state | validators: - Map.put(validators, keystore.pubkey, Validator.new({0, <<0::256>>, keystore})) + Map.put( + validators, + keystore.pubkey, + Validator.new({current_status.head_slot, current_status.head_root, keystore}) + ) }} end From c45cc5648d8dd9bcf2bd7b67ea9c72ca9a419589 Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Wed, 7 Aug 2024 18:19:28 -0300 Subject: [PATCH 10/19] fix: try fix ci --- lib/keystore.ex | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystore.ex b/lib/keystore.ex index f51334b8f..92095ce21 100644 --- a/lib/keystore.ex +++ b/lib/keystore.ex @@ -20,7 +20,7 @@ defmodule Keystore do defstruct fields @type t() :: %__MODULE__{ - pubkey: Types.bls_pubkey(), + pubkey: Bls.pubkey(), privkey: Bls.privkey(), path: String.t(), readonly: boolean() From 31edacb3eda39ae68068ff15432d1a7940683c58 Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Wed, 7 Aug 2024 23:22:20 -0300 Subject: [PATCH 11/19] fix: test --- .../fork_choice/fork_choice.ex | 46 +++++++++---------- .../validator/validator.ex | 17 +++---- network_params.yaml | 4 +- 3 files changed, 34 insertions(+), 33 deletions(-) diff --git a/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex b/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex index 6e8ccd844..103fb98a5 100644 --- a/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex +++ b/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex @@ -11,11 +11,11 @@ defmodule LambdaEthereumConsensus.ForkChoice do alias LambdaEthereumConsensus.Metrics alias LambdaEthereumConsensus.P2P.Gossip.OperationsCollector alias LambdaEthereumConsensus.StateTransition.Misc - alias LambdaEthereumConsensus.Store.BlobDb - alias LambdaEthereumConsensus.Store.BlockDb + # alias LambdaEthereumConsensus.Store.BlobDb + # alias LambdaEthereumConsensus.Store.BlockDb alias LambdaEthereumConsensus.Store.Blocks alias LambdaEthereumConsensus.Store.CheckpointStates - alias LambdaEthereumConsensus.Store.StateDb + # alias LambdaEthereumConsensus.Store.StateDb alias LambdaEthereumConsensus.Store.StoreDb alias Types.Attestation alias Types.BlockInfo @@ -174,26 +174,26 @@ defmodule LambdaEthereumConsensus.ForkChoice do ### Private Functions ########################## - defp prune_old_states(last_finalized_epoch, new_finalized_epoch) do - if last_finalized_epoch < new_finalized_epoch do - new_finalized_slot = - new_finalized_epoch * ChainSpec.get("SLOTS_PER_EPOCH") - - Task.Supervisor.start_child( - PruneStatesSupervisor, - fn -> StateDb.prune_states_older_than(new_finalized_slot) end - ) - - Task.Supervisor.start_child( - PruneBlocksSupervisor, - fn -> BlockDb.prune_blocks_older_than(new_finalized_slot) end - ) - - Task.Supervisor.start_child( - PruneBlobsSupervisor, - fn -> BlobDb.prune_old_blobs(new_finalized_slot) end - ) - end + defp prune_old_states(_last_finalized_epoch, _new_finalized_epoch) do + # if last_finalized_epoch < new_finalized_epoch do + # new_finalized_slot = + # new_finalized_epoch * ChainSpec.get("SLOTS_PER_EPOCH") + + # Task.Supervisor.start_child( + # PruneStatesSupervisor, + # fn -> StateDb.prune_states_older_than(new_finalized_slot) end + # ) + + # Task.Supervisor.start_child( + # PruneBlocksSupervisor, + # fn -> BlockDb.prune_blocks_older_than(new_finalized_slot) end + # ) + + # Task.Supervisor.start_child( + # PruneBlobsSupervisor, + # fn -> BlobDb.prune_old_blobs(new_finalized_slot) end + # ) + # end end def apply_handler(iter, state, handler) do diff --git a/lib/lambda_ethereum_consensus/validator/validator.ex b/lib/lambda_ethereum_consensus/validator/validator.ex index af9ed737f..9aeee3385 100644 --- a/lib/lambda_ethereum_consensus/validator/validator.ex +++ b/lib/lambda_ethereum_consensus/validator/validator.ex @@ -428,32 +428,33 @@ defmodule LambdaEthereumConsensus.Validator do defp propose( %{ root: head_root, - validator: validator, - payload_builder: {proposed_slot, head_root, payload_id} + index: validator_index, + payload_builder: {proposed_slot, head_root, payload_id}, + keystore: keystore } = state, proposed_slot ) do - log_debug(validator.index, "building block", slot: proposed_slot) + log_debug(validator_index, "building block", slot: proposed_slot) build_result = BlockBuilder.build_block( %BuildBlockRequest{ slot: proposed_slot, parent_root: head_root, - proposer_index: validator.index, + proposer_index: validator_index, graffiti_message: @default_graffiti_message, - privkey: validator.privkey + privkey: keystore.privkey }, payload_id ) case build_result do {:ok, {signed_block, blob_sidecars}} -> - publish_block(validator.index, signed_block) - Enum.each(blob_sidecars, &publish_sidecar(validator.index, &1)) + publish_block(validator_index, signed_block) + Enum.each(blob_sidecars, &publish_sidecar(validator_index, &1)) {:error, reason} -> - log_error(validator.index, "build block", reason, slot: proposed_slot) + log_error(validator_index, "build block", reason, slot: proposed_slot) end %{state | payload_builder: nil} diff --git a/network_params.yaml b/network_params.yaml index 102796866..5c564f285 100644 --- a/network_params.yaml +++ b/network_params.yaml @@ -2,12 +2,12 @@ participants: - el_type: geth cl_type: lighthouse count: 2 - validator_count: 32 + validator_count: 1 - el_type: geth cl_type: lambda cl_image: lambda_ethereum_consensus:latest use_separate_vc: false count: 1 - validator_count: 32 + validator_count: 63 cl_max_mem: 4096 keymanager_enabled: true From f0f352432a164d9b88328776af51cba7d8e6e630 Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Thu, 8 Aug 2024 00:14:27 -0300 Subject: [PATCH 12/19] refactor: enhace readability --- .../controllers/v1/key_store_controller.ex | 51 ++++--------------- lib/keystore.ex | 14 +++++ lib/libp2p_port.ex | 6 ++- 3 files changed, 27 insertions(+), 44 deletions(-) diff --git a/lib/key_store_api/controllers/v1/key_store_controller.ex b/lib/key_store_api/controllers/v1/key_store_controller.ex index 3a3a92992..e979d70b3 100644 --- a/lib/key_store_api/controllers/v1/key_store_controller.ex +++ b/lib/key_store_api/controllers/v1/key_store_controller.ex @@ -9,8 +9,8 @@ defmodule KeyStoreApi.V1.KeyStoreController do plug(OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true) - @default_keystore_dir "keystore_dir" - @default_keystore_pass_dir "keystore_pass_dir" + @keystore_dir Keystore.get_keystore_dir() + @keystore_pass_dir Keystore.get_keystore_pass_dir() # NOTE: this function is required by OpenApiSpex, and should return the information # of each specific endpoint. We just return the specific entry from the parsed spec. @@ -43,12 +43,6 @@ defmodule KeyStoreApi.V1.KeyStoreController do def add_keys(conn, _params) do body_params = conn.private.open_api_spex.body_params - config = - Application.get_env(:lambda_ethereum_consensus, LambdaEthereumConsensus.Validator.Setup, []) - - keystore_dir = Keyword.get(config, :keystore_dir) || @default_keystore_dir - keystore_pass_dir = Keyword.get(config, :keystore_pass_dir) || @default_keystore_pass_dir - results = Enum.zip(body_params.keystores, body_params.passwords) |> Enum.map(fn {keystore_file, password_file} -> @@ -56,21 +50,8 @@ defmodule KeyStoreApi.V1.KeyStoreController do base_name = keystore.pubkey |> Utils.hex_encode() - File.write!( - Path.join( - keystore_dir, - base_name <> ".json" - ), - keystore_file - ) - - File.write!( - Path.join( - keystore_pass_dir, - base_name <> ".txt" - ), - password_file - ) + File.write!(get_keystore_file(base_name), keystore_file) + File.write!(get_keystore_pass_file(base_name), password_file) Libp2pPort.add_validator(keystore) @@ -90,29 +71,12 @@ defmodule KeyStoreApi.V1.KeyStoreController do def delete_keys(conn, _params) do body_params = conn.private.open_api_spex.body_params - config = - Application.get_env(:lambda_ethereum_consensus, LambdaEthereumConsensus.Validator.Setup, []) - - keystore_dir = Keyword.get(config, :keystore_dir) || @default_keystore_dir - keystore_pass_dir = Keyword.get(config, :keystore_pass_dir) || @default_keystore_pass_dir - results = Enum.map(body_params.pubkeys, fn pubkey -> case Libp2pPort.delete_validator(pubkey |> Utils.hex_decode()) do :ok -> - File.rm!( - Path.join( - keystore_dir, - pubkey <> ".json" - ) - ) - - File.rm!( - Path.join( - keystore_pass_dir, - pubkey <> ".txt" - ) - ) + File.rm!(get_keystore_file(pubkey)) + File.rm!(get_keystore_pass_file(pubkey)) %{ status: "deleted", @@ -129,4 +93,7 @@ defmodule KeyStoreApi.V1.KeyStoreController do "data" => results }) end + + defp get_keystore_file(base_name), do: Path.join(@keystore_dir, base_name <> ".json") + defp get_keystore_pass_file(base_name), do: Path.join(@keystore_pass_dir, base_name <> ".txt") end diff --git a/lib/keystore.ex b/lib/keystore.ex index 92095ce21..cb071838b 100644 --- a/lib/keystore.ex +++ b/lib/keystore.ex @@ -146,4 +146,18 @@ defmodule Keystore do defp sanitize_password(password), do: password |> String.normalize(:nfkd) |> String.replace(~r/[\x00-\x1f\x80-\x9f\x7f]/, "") + + def get_keystore_dir() do + config = + Application.get_env(:lambda_ethereum_consensus, LambdaEthereumConsensus.Validator.Setup, []) + + Keyword.get(config, :keystore_dir) || "keystore_dir" + end + + def get_keystore_pass_dir() do + config = + Application.get_env(:lambda_ethereum_consensus, LambdaEthereumConsensus.Validator.Setup, []) + + Keyword.get(config, :keystore_pass_dir) || "keystore_pass_dir" + end end diff --git a/lib/libp2p_port.ex b/lib/libp2p_port.ex index 1c2ecbf4c..131715ed0 100644 --- a/lib/libp2p_port.ex +++ b/lib/libp2p_port.ex @@ -562,8 +562,10 @@ defmodule LambdaEthereumConsensus.Libp2pPort do @impl GenServer def handle_call({:add_validator, keystore}, _from, %{validators: validators} = state) do - # TODO: HANDLE REPEATED VALIDATORS + # TODO: handle repeated validators current_status = ForkChoice.get_current_status_message() + validator = Validator.new({current_status.head_slot, current_status.head_root, keystore}) + Logger.warning("[Libp2pPort] Adding validator with index #{inspect(validator.index)}.") {:reply, :ok, %{ @@ -572,7 +574,7 @@ defmodule LambdaEthereumConsensus.Libp2pPort do Map.put( validators, keystore.pubkey, - Validator.new({current_status.head_slot, current_status.head_root, keystore}) + validator ) }} end From b49b168b4d46d4fed1d1af73d0831e205b4b8730 Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Thu, 8 Aug 2024 21:29:46 -0300 Subject: [PATCH 13/19] fix: use other validators slot when adding a new validator --- lib/lambda_ethereum_consensus/validator/setup.ex | 2 -- lib/libp2p_port.ex | 10 +++++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/lambda_ethereum_consensus/validator/setup.ex b/lib/lambda_ethereum_consensus/validator/setup.ex index 630bc96b7..00206e312 100644 --- a/lib/lambda_ethereum_consensus/validator/setup.ex +++ b/lib/lambda_ethereum_consensus/validator/setup.ex @@ -57,8 +57,6 @@ defmodule LambdaEthereumConsensus.Validator.Setup do keystore_file = Path.join(keystore_dir, "#{base_name}.json") keystore_pass_file = Path.join(keystore_pass_dir, "#{base_name}.txt") - IO.inspect("KEYSTORE_FILE: #{inspect(keystore_file)}") - IO.inspect("KEYSTORE_PASS_FILE: #{inspect(keystore_pass_file)}") {keystore_file, keystore_pass_file} else Logger.warning("[Validator] Skipping file: #{filename}. Not a keystore file.") diff --git a/lib/libp2p_port.ex b/lib/libp2p_port.ex index 131715ed0..7a2953de3 100644 --- a/lib/libp2p_port.ex +++ b/lib/libp2p_port.ex @@ -563,9 +563,13 @@ defmodule LambdaEthereumConsensus.Libp2pPort do @impl GenServer def handle_call({:add_validator, keystore}, _from, %{validators: validators} = state) do # TODO: handle repeated validators - current_status = ForkChoice.get_current_status_message() - validator = Validator.new({current_status.head_slot, current_status.head_root, keystore}) - Logger.warning("[Libp2pPort] Adding validator with index #{inspect(validator.index)}.") + # TODO: handle 0 validators + first_validator = validators |> Map.values() |> List.first() + validator = Validator.new({first_validator.slot, first_validator.root, keystore}) + + Logger.warning( + "[Libp2pPort] Adding validator with index #{inspect(validator.index)}. head_slot: #{inspect(validator.slot)}." + ) {:reply, :ok, %{ From 2d2e77d9797b5035e5a4b195bc9bcc96baa8b29b Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Fri, 9 Aug 2024 10:13:41 -0300 Subject: [PATCH 14/19] refactor: restore pruning --- Makefile | 2 +- config/runtime.exs | 6 +-- .../fork_choice/fork_choice.ex | 46 +++++++++---------- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/Makefile b/Makefile index 1823ebbae..8b66dda22 100644 --- a/Makefile +++ b/Makefile @@ -168,7 +168,7 @@ checkpoint-sync: compile-all #▶️ sepolia: @ Run an interactive terminal using sepolia network sepolia: compile-all - iex -S mix run -- --checkpoint-sync-url https://sepolia.beaconstate.info --network sepolia --metrics --validator-api-port 5056 + iex -S mix run -- --checkpoint-sync-url https://sepolia.beaconstate.info --network sepolia --metrics #▶️ holesky: @ Run an interactive terminal using holesky network holesky: compile-all iex -S mix run -- --checkpoint-sync-url https://checkpoint-sync.holesky.ethpandaops.io --network holesky diff --git a/config/runtime.exs b/config/runtime.exs index 0eb75ffb6..c187a1329 100644 --- a/config/runtime.exs +++ b/config/runtime.exs @@ -18,7 +18,7 @@ switches = [ log_file: :string, beacon_api: :boolean, beacon_api_port: :integer, - keystore_api: :boolean, + validator_api: :boolean, validator_api_port: :integer, listen_address: [:string, :keep], discovery_port: :integer, @@ -50,7 +50,7 @@ enable_metrics = Keyword.get(args, :metrics, not is_nil(metrics_port)) beacon_api_port = Keyword.get(args, :beacon_api_port, nil) enable_beacon_api = Keyword.get(args, :beacon_api, not is_nil(beacon_api_port)) validator_api_port = Keyword.get(args, :validator_api_port, nil) -enable_keystore_api = Keyword.get(args, :keystore_api, not is_nil(validator_api_port)) +enable_validator_api = Keyword.get(args, :validator_api, not is_nil(validator_api_port)) listen_addresses = Keyword.get_values(args, :listen_address) discovery_port = Keyword.get(args, :discovery_port, 9000) cli_bootnodes = Keyword.get(args, :boot_nodes, "") @@ -159,7 +159,7 @@ config :lambda_ethereum_consensus, BeaconApi.Endpoint, # KeyStore API config :lambda_ethereum_consensus, KeyStoreApi.Endpoint, - server: enable_keystore_api, + server: enable_validator_api, http: [port: validator_api_port || 5000], url: [host: "localhost"], render_errors: [ diff --git a/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex b/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex index 6c1a6bfd7..7f2b19123 100644 --- a/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex +++ b/lib/lambda_ethereum_consensus/fork_choice/fork_choice.ex @@ -12,11 +12,11 @@ defmodule LambdaEthereumConsensus.ForkChoice do alias LambdaEthereumConsensus.P2P.Gossip.OperationsCollector alias LambdaEthereumConsensus.StateTransition.Accessors alias LambdaEthereumConsensus.StateTransition.Misc - # alias LambdaEthereumConsensus.Store.BlobDb - # alias LambdaEthereumConsensus.Store.BlockDb + alias LambdaEthereumConsensus.Store.BlobDb + alias LambdaEthereumConsensus.Store.BlockDb alias LambdaEthereumConsensus.Store.Blocks alias LambdaEthereumConsensus.Store.CheckpointStates - # alias LambdaEthereumConsensus.Store.StateDb + alias LambdaEthereumConsensus.Store.StateDb alias LambdaEthereumConsensus.Store.StoreDb alias Types.Attestation alias Types.BlockInfo @@ -175,26 +175,26 @@ defmodule LambdaEthereumConsensus.ForkChoice do ### Private Functions ########################## - defp prune_old_states(_last_finalized_epoch, _new_finalized_epoch) do - # if last_finalized_epoch < new_finalized_epoch do - # new_finalized_slot = - # new_finalized_epoch * ChainSpec.get("SLOTS_PER_EPOCH") - - # Task.Supervisor.start_child( - # PruneStatesSupervisor, - # fn -> StateDb.prune_states_older_than(new_finalized_slot) end - # ) - - # Task.Supervisor.start_child( - # PruneBlocksSupervisor, - # fn -> BlockDb.prune_blocks_older_than(new_finalized_slot) end - # ) - - # Task.Supervisor.start_child( - # PruneBlobsSupervisor, - # fn -> BlobDb.prune_old_blobs(new_finalized_slot) end - # ) - # end + defp prune_old_states(last_finalized_epoch, new_finalized_epoch) do + if last_finalized_epoch < new_finalized_epoch do + new_finalized_slot = + new_finalized_epoch * ChainSpec.get("SLOTS_PER_EPOCH") + + Task.Supervisor.start_child( + PruneStatesSupervisor, + fn -> StateDb.prune_states_older_than(new_finalized_slot) end + ) + + Task.Supervisor.start_child( + PruneBlocksSupervisor, + fn -> BlockDb.prune_blocks_older_than(new_finalized_slot) end + ) + + Task.Supervisor.start_child( + PruneBlobsSupervisor, + fn -> BlobDb.prune_old_blobs(new_finalized_slot) end + ) + end end def apply_handler(iter, state, handler) do From 206dee6ccf4ca8565dd76b947d1e8e5b71e04a87 Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Fri, 9 Aug 2024 10:51:00 -0300 Subject: [PATCH 15/19] Update README.md --- README.md | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 75414b851..601003924 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,8 @@ Some public endpoints can be found in [eth-clients.github.io/checkpoint-sync-end > The data retrieved from the URL is stored in the DB once the node is initiated (i.e. the iex prompt shows). > Once this happens, following runs of `make iex` will start the node using that data. -### Beacon API +### APIs +#### Beacon API You can start the application with the Beacon API on the default port `4000` running: ```shell @@ -100,7 +101,27 @@ make start You can also specify a port with the "--beacon-api-port" flag: ```shell -iex -S mix run -- --beacon-api --beacon-api-port +iex -S mix run -- --beacon-api-port +``` +> [!WARNING] +> In case checkpoint-sync is needed, following the instructions above will end immediately with an error (see [Checkpoint Sync](#checkpoint-sync)). +> + +#### Key-Manager API + +Implemented following the [Ethereum specification](https://ethereum.github.io/keymanager-APIs/#/). + +You can start the application with the key manager API on the default port `5000` running: + +```shell +iex -S mix run -- --validator-api +``` + + +You can also specify a port with the "--validator-api-port" flag: + +```shell +iex -S mix run -- --validator-api-port ``` > [!WARNING] > In case checkpoint-sync is needed, following the instructions above will end immediately with an error (see [Checkpoint Sync](#checkpoint-sync)). From bee3af166abeeb56a38ecd475f440582b47a8727 Mon Sep 17 00:00:00 2001 From: avilagaston9 Date: Fri, 9 Aug 2024 13:19:16 -0300 Subject: [PATCH 16/19] refactor: nit changes --- Makefile | 1 + README.md | 1 + .../controllers/v1/key_store_controller.ex | 14 ++++++++------ lib/libp2p_port.ex | 6 +++--- 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 8b66dda22..a6f62a7b3 100644 --- a/Makefile +++ b/Makefile @@ -169,6 +169,7 @@ checkpoint-sync: compile-all #▶️ sepolia: @ Run an interactive terminal using sepolia network sepolia: compile-all iex -S mix run -- --checkpoint-sync-url https://sepolia.beaconstate.info --network sepolia --metrics + #▶️ holesky: @ Run an interactive terminal using holesky network holesky: compile-all iex -S mix run -- --checkpoint-sync-url https://checkpoint-sync.holesky.ethpandaops.io --network holesky diff --git a/README.md b/README.md index 601003924..baa2d7f33 100644 --- a/README.md +++ b/README.md @@ -271,6 +271,7 @@ participants: use_separate_vc: false count: 1 cl_max_mem: 4096 + keymanager_enabled: true ``` ### Kurtosis Execution and Make tasks diff --git a/lib/key_store_api/controllers/v1/key_store_controller.ex b/lib/key_store_api/controllers/v1/key_store_controller.ex index e979d70b3..440000765 100644 --- a/lib/key_store_api/controllers/v1/key_store_controller.ex +++ b/lib/key_store_api/controllers/v1/key_store_controller.ex @@ -50,8 +50,8 @@ defmodule KeyStoreApi.V1.KeyStoreController do base_name = keystore.pubkey |> Utils.hex_encode() - File.write!(get_keystore_file(base_name), keystore_file) - File.write!(get_keystore_pass_file(base_name), password_file) + File.write!(get_keystore_file_path(base_name), keystore_file) + File.write!(get_keystore_pass_file_path(base_name), password_file) Libp2pPort.add_validator(keystore) @@ -75,8 +75,8 @@ defmodule KeyStoreApi.V1.KeyStoreController do Enum.map(body_params.pubkeys, fn pubkey -> case Libp2pPort.delete_validator(pubkey |> Utils.hex_decode()) do :ok -> - File.rm!(get_keystore_file(pubkey)) - File.rm!(get_keystore_pass_file(pubkey)) + File.rm!(get_keystore_file_path(pubkey)) + File.rm!(get_keystore_pass_file_path(pubkey)) %{ status: "deleted", @@ -94,6 +94,8 @@ defmodule KeyStoreApi.V1.KeyStoreController do }) end - defp get_keystore_file(base_name), do: Path.join(@keystore_dir, base_name <> ".json") - defp get_keystore_pass_file(base_name), do: Path.join(@keystore_pass_dir, base_name <> ".txt") + defp get_keystore_file_path(base_name), do: Path.join(@keystore_dir, base_name <> ".json") + + defp get_keystore_pass_file_path(base_name), + do: Path.join(@keystore_pass_dir, base_name <> ".txt") end diff --git a/lib/libp2p_port.ex b/lib/libp2p_port.ex index 7a2953de3..919b41baf 100644 --- a/lib/libp2p_port.ex +++ b/lib/libp2p_port.ex @@ -84,7 +84,7 @@ defmodule LambdaEthereumConsensus.Libp2pPort do discovery_addresses: [String.t()] } - @sync_delay_millis 20_000 + @sync_delay_millis 10_000 ###################### ### API @@ -562,8 +562,8 @@ defmodule LambdaEthereumConsensus.Libp2pPort do @impl GenServer def handle_call({:add_validator, keystore}, _from, %{validators: validators} = state) do - # TODO: handle repeated validators - # TODO: handle 0 validators + # TODO (#1263): handle 0 validators + # TODO (#1264): handle repeated validators first_validator = validators |> Map.values() |> List.first() validator = Validator.new({first_validator.slot, first_validator.root, keystore}) From 0ef58bf6c4061bd56c0ed377e89bce43e5fee446 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 15:12:07 -0300 Subject: [PATCH 17/19] chore(deps): bump open_api_spex from 3.20.0 to 3.20.1 (#1252) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- mix.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mix.lock b/mix.lock index caa081abb..d40381f41 100644 --- a/mix.lock +++ b/mix.lock @@ -43,7 +43,7 @@ "nimble_ownership": {:hex, :nimble_ownership, "0.3.1", "99d5244672fafdfac89bfad3d3ab8f0d367603ce1dc4855f86a1c75008bce56f", [:mix], [], "hexpm", "4bf510adedff0449a1d6e200e43e57a814794c8b5b6439071274d248d272a549"}, "nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"}, "octo_fetch": {:hex, :octo_fetch, "0.4.0", "074b5ecbc08be10b05b27e9db08bc20a3060142769436242702931c418695b19", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}], "hexpm", "cf8be6f40cd519d7000bb4e84adcf661c32e59369ca2827c4e20042eda7a7fc6"}, - "open_api_spex": {:hex, :open_api_spex, "3.20.0", "d4fcf1ee297aa94a673cddb92734eb0bc7cac698be93949a223a50f724e3af89", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 3.0 or ~> 4.0 or ~> 5.0 or ~> 6.0", [hex: :poison, repo: "hexpm", optional: true]}, {:ymlr, "~> 2.0 or ~> 3.0 or ~> 4.0 or ~> 5.0", [hex: :ymlr, repo: "hexpm", optional: true]}], "hexpm", "2e9beea71142ff09f8f935579b39406e2c6b5a3978e7235978d7faf2f90cd081"}, + "open_api_spex": {:hex, :open_api_spex, "3.20.1", "ce5b3db013cd7337ab147f39fa2d4d627ddeeb4ff3fea34792f43d7e2e654605", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 3.0 or ~> 4.0 or ~> 5.0 or ~> 6.0", [hex: :poison, repo: "hexpm", optional: true]}, {:ymlr, "~> 2.0 or ~> 3.0 or ~> 4.0 or ~> 5.0", [hex: :ymlr, repo: "hexpm", optional: true]}], "hexpm", "dc9c383949d0fc4b20b73103ac20af39dad638b3a15c0e6281853c2fc7cc3cc8"}, "parse_trans": {:hex, :parse_trans, "3.4.1", "6e6aa8167cb44cc8f39441d05193be6e6f4e7c2946cb2759f015f8c56b76e5ff", [:rebar3], [], "hexpm", "620a406ce75dada827b82e453c19cf06776be266f5a67cff34e1ef2cbb60e49a"}, "patch": {:hex, :patch, "0.13.1", "2da5b508e4d6558924a0959d95dc3aa8176b5ccf2539e4567481448d61853ccc", [:mix], [], "hexpm", "75f805827d9db0c335155fbb857e6eeb5c85034c9dc668d146bc0bfe48fac822"}, "phoenix": {:hex, :phoenix, "1.7.14", "a7d0b3f1bc95987044ddada111e77bd7f75646a08518942c72a8440278ae7825", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.7", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "c7859bc56cc5dfef19ecfc240775dae358cbaa530231118a9e014df392ace61a"}, From 07ce5acda63d103b518d16529e04277c21a5ed3c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 15:13:45 -0300 Subject: [PATCH 18/19] chore(deps-dev): bump recode from 0.7.2 to 0.7.3 (#1251) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- mix.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mix.lock b/mix.lock index d40381f41..0a77f66fd 100644 --- a/mix.lock +++ b/mix.lock @@ -26,7 +26,7 @@ "finch": {:hex, :finch, "0.18.0", "944ac7d34d0bd2ac8998f79f7a811b21d87d911e77a786bc5810adb75632ada4", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: false]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.3", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 0.2.6 or ~> 1.0", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "69f5045b042e531e53edc2574f15e25e735b522c37e2ddb766e15b979e03aa65"}, "flama": {:git, "https://github.com/lambdaclass/ht1223_tracer", "4baea1e15459aa9ffa2ddc53af3e30a167c3998a", []}, "gettext": {:hex, :gettext, "0.24.0", "6f4d90ac5f3111673cbefc4ebee96fe5f37a114861ab8c7b7d5b30a1108ce6d8", [:mix], [{:expo, "~> 0.5.1", [hex: :expo, repo: "hexpm", optional: false]}], "hexpm", "bdf75cdfcbe9e4622dd18e034b227d77dd17f0f133853a1c73b97b3d6c770e8b"}, - "glob_ex": {:hex, :glob_ex, "0.1.7", "eae6b6377147fb712ac45b360e6dbba00346689a87f996672fe07e97d70597b1", [:mix], [], "hexpm", "decc1c21c0c73df3c9c994412716345c1692477b9470e337f628a7e08da0da6a"}, + "glob_ex": {:hex, :glob_ex, "0.1.8", "f7ef872877ca2ae7a792ab1f9ff73d9c16bf46ecb028603a8a3c5283016adc07", [:mix], [], "hexpm", "9e39d01729419a60a937c9260a43981440c43aa4cadd1fa6672fecd58241c464"}, "hackney": {:hex, :hackney, "1.20.1", "8d97aec62ddddd757d128bfd1df6c5861093419f8f7a4223823537bad5d064e2", [:rebar3], [{:certifi, "~> 2.12.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~> 6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~> 1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~> 1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.4.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "fe9094e5f1a2a2c0a7d10918fee36bfec0ec2a979994cff8cfe8058cd9af38e3"}, "hpax": {:hex, :hpax, "1.0.0", "28dcf54509fe2152a3d040e4e3df5b265dcb6cb532029ecbacf4ce52caea3fd2", [:mix], [], "hexpm", "7f1314731d711e2ca5fdc7fd361296593fc2542570b3105595bb0bc6d0fad601"}, "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, @@ -57,7 +57,7 @@ "quantile_estimator": {:hex, :quantile_estimator, "0.2.1", "ef50a361f11b5f26b5f16d0696e46a9e4661756492c981f7b2229ef42ff1cd15", [:rebar3], [], "hexpm", "282a8a323ca2a845c9e6f787d166348f776c1d4a41ede63046d72d422e3da946"}, "ranch": {:hex, :ranch, "1.8.0", "8c7a100a139fd57f17327b6413e4167ac559fbc04ca7448e9be9057311597a1d", [:make, :rebar3], [], "hexpm", "49fbcfd3682fab1f5d109351b61257676da1a2fdbe295904176d5e521a2ddfe5"}, "recase": {:hex, :recase, "0.8.1", "ab98cd35857a86fa5ca99036f575241d71d77d9c2ab0c39aacf1c9b61f6f7d1d", [:mix], [], "hexpm", "9fd8d63e7e43bd9ea385b12364e305778b2bbd92537e95c4b2e26fc507d5e4c2"}, - "recode": {:hex, :recode, "0.7.2", "aa24873b6eb4c90e635ad1f7e12b8e21575a087698bd6bda6e72a82c1298eca1", [:mix], [{:escape, "~> 0.1", [hex: :escape, repo: "hexpm", optional: false]}, {:glob_ex, "~> 0.1", [hex: :glob_ex, repo: "hexpm", optional: false]}, {:rewrite, "~> 0.9", [hex: :rewrite, repo: "hexpm", optional: false]}], "hexpm", "d70fc60aae3c42781ec845515c1ddd4fe55218ed3fd8fe52267d338044ec7fb8"}, + "recode": {:hex, :recode, "0.7.3", "aa14fda1ba7771c4d9f393bb5ee5b3ea8ec87e983b3f0032435ffca647123a10", [:mix], [{:escape, "~> 0.1", [hex: :escape, repo: "hexpm", optional: false]}, {:glob_ex, "~> 0.1", [hex: :glob_ex, repo: "hexpm", optional: false]}, {:rewrite, "~> 0.9", [hex: :rewrite, repo: "hexpm", optional: false]}], "hexpm", "c58cf50ff099b7655dc423b35177dccdbfad9645a2639f7e80a5e340d9c36fd8"}, "redbug": {:hex, :redbug, "1.2.2", "366d8961770ddc7bb5d209fbadddfa7271005487f938c087a0e385a57abfee33", [:rebar3], [], "hexpm", "b5fe7b94e487be559cb0ec1c0e938c9761205d3e91a96bf263bdf1beaebea729"}, "req": {:hex, :req, "0.5.2", "70b4976e5fbefe84e5a57fd3eea49d4e9aa0ac015301275490eafeaec380f97f", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "0c63539ab4c2d6ced6114d2684276cef18ac185ee00674ee9af4b1febba1f986"}, "rewrite": {:hex, :rewrite, "0.10.5", "6afadeae0b9d843b27ac6225e88e165884875e0aed333ef4ad3bf36f9c101bed", [:mix], [{:glob_ex, "~> 0.1", [hex: :glob_ex, repo: "hexpm", optional: false]}, {:sourceror, "~> 1.0", [hex: :sourceror, repo: "hexpm", optional: false]}], "hexpm", "51cc347a4269ad3a1e7a2c4122dbac9198302b082f5615964358b4635ebf3d4f"}, @@ -66,7 +66,7 @@ "scrypt_elixir": {:hex, :scrypt_elixir_copy, "0.1.1", "2b23573e8d9e6c93c8116cd17f9b453b6ebf0725b5317ecaeacaf73353a4dbd3", [:make, :mix], [{:elixir_make, "~> 0.4", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "1eb5768b6b6c657770cbc00a9724f47bad4e9d664a2da3916030d591223561e7"}, "sentry": {:hex, :sentry, "10.6.2", "a867ab728d424e187ccb2bccc388170a740a79bc0ddccabd72d303b203acbe0e", [:mix], [{:hackney, "~> 1.8", [hex: :hackney, repo: "hexpm", optional: true]}, {:jason, "~> 1.1", [hex: :jason, repo: "hexpm", optional: true]}, {:nimble_options, "~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_ownership, "~> 0.3.0", [hex: :nimble_ownership, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.6", [hex: :phoenix, repo: "hexpm", optional: true]}, {:phoenix_live_view, "~> 0.20", [hex: :phoenix_live_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.6", [hex: :plug, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "31bb84247274f9262fd300df0e3eb73302e4849cc6b7a6560bb2465f03fbd446"}, "snappyer": {:hex, :snappyer, "1.2.9", "9cc58470798648ce34c662ca0aa6daae31367667714c9a543384430a3586e5d3", [:rebar3], [], "hexpm", "18d00ca218ae613416e6eecafe1078db86342a66f86277bd45c95f05bf1c8b29"}, - "sourceror": {:hex, :sourceror, "1.4.0", "be87319b1579191e25464005d465713079b3fd7124a3938a1e6cf4def39735a9", [:mix], [], "hexpm", "16751ca55e3895f2228938b703ad399b0b27acfe288eff6c0e629ed3e6ec0358"}, + "sourceror": {:hex, :sourceror, "1.5.0", "3e65d5fbb1a8e2864ad6411262c8018fee73474f5789dda12285c82999253d5d", [:mix], [], "hexpm", "4a32b5d189d8453f73278c15712f8731b89e9211e50726b798214b303b51bfc7"}, "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, "statistex": {:hex, :statistex, "1.0.0", "f3dc93f3c0c6c92e5f291704cf62b99b553253d7969e9a5fa713e5481cd858a5", [:mix], [], "hexpm", "ff9d8bee7035028ab4742ff52fc80a2aa35cece833cf5319009b52f1b5a86c27"}, "stream_data": {:hex, :stream_data, "1.1.1", "fd515ca95619cca83ba08b20f5e814aaf1e5ebff114659dc9731f966c9226246", [:mix], [], "hexpm", "45d0cd46bd06738463fd53f22b70042dbb58c384bb99ef4e7576e7bb7d3b8c8c"}, From 02a2f8645fcb337bbe691884885d5d81eb2e5775 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 9 Aug 2024 15:14:58 -0300 Subject: [PATCH 19/19] chore(deps): bump tesla from 1.11.2 to 1.12.1 (#1250) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- mix.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mix.lock b/mix.lock index 0a77f66fd..bb588bed4 100644 --- a/mix.lock +++ b/mix.lock @@ -74,7 +74,7 @@ "telemetry_metrics": {:hex, :telemetry_metrics, "0.6.2", "2caabe9344ec17eafe5403304771c3539f3b6e2f7fb6a6f602558c825d0d0bfb", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "9b43db0dc33863930b9ef9d27137e78974756f5f198cae18409970ed6fa5b561"}, "telemetry_metrics_prometheus_core": {:hex, :telemetry_metrics_prometheus_core, "1.2.1", "c9755987d7b959b557084e6990990cb96a50d6482c683fb9622a63837f3cd3d8", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "5e2c599da4983c4f88a33e9571f1458bf98b0cf6ba930f1dc3a6e8cf45d5afb6"}, "telemetry_poller": {:hex, :telemetry_poller, "1.1.0", "58fa7c216257291caaf8d05678c8d01bd45f4bdbc1286838a28c4bb62ef32999", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "9eb9d9cbfd81cbd7cdd24682f8711b6e2b691289a0de6826e58452f28c103c8f"}, - "tesla": {:hex, :tesla, "1.11.2", "24707ac48b52f72f88fc05d242b1c59a85d1ee6f16f19c312d7d3419665c9cd5", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:exjsx, ">= 3.0.0", [hex: :exjsx, repo: "hexpm", optional: true]}, {:finch, "~> 0.13", [hex: :finch, repo: "hexpm", optional: true]}, {:fuse, "~> 2.4", [hex: :fuse, repo: "hexpm", optional: true]}, {:gun, ">= 1.0.0", [hex: :gun, repo: "hexpm", optional: true]}, {:hackney, "~> 1.6", [hex: :hackney, repo: "hexpm", optional: true]}, {:ibrowse, "4.4.2", [hex: :ibrowse, repo: "hexpm", optional: true]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: true]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.0", [hex: :mint, repo: "hexpm", optional: true]}, {:msgpax, "~> 2.3", [hex: :msgpax, repo: "hexpm", optional: true]}, {:poison, ">= 1.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "c549cd03aec6a7196a641689dd378b799e635eb393f689b4bd756f750c7a4014"}, + "tesla": {:hex, :tesla, "1.12.1", "fe2bf4250868ee72e5d8b8dfa408d13a00747c41b7237b6aa3b9a24057346681", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:exjsx, ">= 3.0.0", [hex: :exjsx, repo: "hexpm", optional: true]}, {:finch, "~> 0.13", [hex: :finch, repo: "hexpm", optional: true]}, {:fuse, "~> 2.4", [hex: :fuse, repo: "hexpm", optional: true]}, {:gun, ">= 1.0.0", [hex: :gun, repo: "hexpm", optional: true]}, {:hackney, "~> 1.6", [hex: :hackney, repo: "hexpm", optional: true]}, {:ibrowse, "4.4.2", [hex: :ibrowse, repo: "hexpm", optional: true]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: true]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.0", [hex: :mint, repo: "hexpm", optional: true]}, {:msgpax, "~> 2.3", [hex: :msgpax, repo: "hexpm", optional: true]}, {:poison, ">= 1.0.0", [hex: :poison, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: true]}], "hexpm", "2391efc6243d37ead43afd0327b520314c7b38232091d4a440c1212626fdd6e7"}, "timex": {:hex, :timex, "3.7.11", "bb95cb4eb1d06e27346325de506bcc6c30f9c6dea40d1ebe390b262fad1862d1", [:mix], [{:combine, "~> 0.10", [hex: :combine, repo: "hexpm", optional: false]}, {:gettext, "~> 0.20", [hex: :gettext, repo: "hexpm", optional: false]}, {:tzdata, "~> 1.1", [hex: :tzdata, repo: "hexpm", optional: false]}], "hexpm", "8b9024f7efbabaf9bd7aa04f65cf8dcd7c9818ca5737677c7b76acbc6a94d1aa"}, "toml": {:hex, :toml, "0.7.0", "fbcd773caa937d0c7a02c301a1feea25612720ac3fa1ccb8bfd9d30d822911de", [:mix], [], "hexpm", "0690246a2478c1defd100b0c9b89b4ea280a22be9a7b313a8a058a2408a2fa70"}, "tzdata": {:hex, :tzdata, "1.1.1", "20c8043476dfda8504952d00adac41c6eda23912278add38edc140ae0c5bcc46", [:mix], [{:hackney, "~> 1.17", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm", "a69cec8352eafcd2e198dea28a34113b60fdc6cb57eb5ad65c10292a6ba89787"},