diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index afcc42e934..f296289e6e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -88,6 +88,66 @@ Deploy exporter to stage: - stage +# +---------------------+ +# | STAGE HETZNER NODES | +# +---------------------+ + + +Deploy nodes to hetzner stage: + stage: deploy + tags: + - hetzner-k8s-stage + image: bitnami/kubectl:1.27.5 + script: + - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION + - export SSV_NODES_CPU_LIMIT=$HETZNER_STAGE_SSV_NODES_CPU_LIMIT + - export SSV_NODES_MEM_LIMIT=$HETZNER_STAGE_SSV_NODES_MEM_LIMIT + - echo $HETZNER_KUBECONFIG | base64 -d > kubeconfig + - mv kubeconfig ~/.kube/ + - export KUBECONFIG=~/.kube/kubeconfig + - kubectl config get-contexts + # + # +--------------------+ + # | Deploy SSV nodes | + # +--------------------+ + - .k8/hetzner-stage/scripts/deploy-cluster-1--4.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-5--8.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-9--12.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-13--16.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-17--20.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-21--24.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-25--28.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-29--32.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-33--36.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-37--40.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-41--44.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-45--48.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-49--52.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-53--56.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-57--60.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-61--64.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-65--68.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + - .k8/hetzner-stage/scripts/deploy-cluster-69--72.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $STAGE_HEALTH_CHECK_IMAGE $SSV_NODES_CPU_LIMIT $SSV_NODES_MEM_LIMIT + only: + - stage + +Deploy exporter to hetzner stage: + stage: deploy + tags: + - hetzner-k8s-stage + image: bitnami/kubectl:1.27.5 + script: + - export K8S_API_VERSION=$INFRA_STAGE_K8_API_VERSION + - export SSV_EXPORTER_CPU_LIMIT=$STAGE_SSV_EXPORTER_CPU_LIMIT + - export SSV_EXPORTER_MEM_LIMIT=$STAGE_SSV_EXPORTER_MEM_LIMIT + - echo $HETZNER_KUBECONFIG | base64 -d > kubeconfig + - mv kubeconfig ~/.kube/ + - export KUBECONFIG=~/.kube/kubeconfig + - kubectl config get-contexts + - .k8/hetzner-stage/scripts/deploy-holesky-exporters.sh $DOCKER_REPO_INFRA_STAGE $CI_COMMIT_SHA ssv $APP_REPLICAS_INFRA_STAGE hetzner.stage.k8s.local hetzner.stage.k8s.local stage.ssv.network $K8S_API_VERSION $SSV_EXPORTER_CPU_LIMIT $SSV_EXPORTER_MEM_LIMIT + only: + - stage + # +---------------+ # | Prod | # +---------------+ diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh b/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh new file mode 100755 index 0000000000..f2a8669b7d --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-1--4.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-1-deployment.yml" + "ssv-node-2-deployment.yml" + "ssv-node-3-deployment.yml" + "ssv-node-4-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/stage/scripts/deploy-cluster-13--16.sh b/.k8/hetzner-stage/scripts/deploy-cluster-13--16.sh similarity index 94% rename from .k8/stage/scripts/deploy-cluster-13--16.sh rename to .k8/hetzner-stage/scripts/deploy-cluster-13--16.sh index 9b3772bdfe..1de999f0e8 100755 --- a/.k8/stage/scripts/deploy-cluster-13--16.sh +++ b/.k8/hetzner-stage/scripts/deploy-cluster-13--16.sh @@ -103,12 +103,12 @@ fi #done #fi -DIR=".k8/stage" +DIR=".k8/hetzner-stage" DEPLOY_FILES=( - "ssv-node-v3-1-deployment.yml" - "ssv-node-v3-2-deployment.yml" - "ssv-node-v3-3-deployment.yml" - "ssv-node-v3-4-deployment.yml" + "ssv-node-13-deployment.yml" + "ssv-node-14-deployment.yml" + "ssv-node-15-deployment.yml" + "ssv-node-16-deployment.yml" ) if [[ -d $DIR ]]; then diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh b/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh new file mode 100755 index 0000000000..812a48e3f6 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-17--20.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-17-deployment.yml" + "ssv-node-18-deployment.yml" + "ssv-node-19-deployment.yml" + "ssv-node-20-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh b/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh new file mode 100755 index 0000000000..57c89f2fdd --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-21--24.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-21-deployment.yml" + "ssv-node-22-deployment.yml" + "ssv-node-23-deployment.yml" + "ssv-node-24-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh b/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh new file mode 100755 index 0000000000..134e83dad8 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-25--28.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-25-deployment.yml" + "ssv-node-26-deployment.yml" + "ssv-node-27-deployment.yml" + "ssv-node-28-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh b/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh new file mode 100755 index 0000000000..6e721e8342 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-29--32.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-29-deployment.yml" + "ssv-node-30-deployment.yml" + "ssv-node-31-deployment.yml" + "ssv-node-32-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh b/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh new file mode 100755 index 0000000000..deb2d911e5 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-33--36.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-33-deployment.yml" + "ssv-node-34-deployment.yml" + "ssv-node-35-deployment.yml" + "ssv-node-36-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh b/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh new file mode 100755 index 0000000000..c82c77ce42 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-37--40.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-37-deployment.yml" + "ssv-node-38-deployment.yml" + "ssv-node-39-deployment.yml" + "ssv-node-40-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh b/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh new file mode 100755 index 0000000000..c4684e685e --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-41--44.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-41-deployment.yml" + "ssv-node-42-deployment.yml" + "ssv-node-43-deployment.yml" + "ssv-node-44-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh b/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh new file mode 100755 index 0000000000..11a54c9722 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-45--48.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-45-deployment.yml" + "ssv-node-46-deployment.yml" + "ssv-node-47-deployment.yml" + "ssv-node-48-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh b/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh new file mode 100755 index 0000000000..dcc90d2742 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-49--52.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-49-deployment.yml" + "ssv-node-50-deployment.yml" + "ssv-node-51-deployment.yml" + "ssv-node-52-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh b/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh new file mode 100755 index 0000000000..e3bb9e94a2 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-5--8.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-5-deployment.yml" + "ssv-node-6-deployment.yml" + "ssv-node-7-deployment.yml" + "ssv-node-8-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh b/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh new file mode 100755 index 0000000000..9efd728b17 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-53--56.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-53-deployment.yml" + "ssv-node-54-deployment.yml" + "ssv-node-55-deployment.yml" + "ssv-node-56-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh b/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh new file mode 100755 index 0000000000..1be68e57f5 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-57--60.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-57-deployment.yml" + "ssv-node-58-deployment.yml" + "ssv-node-59-deployment.yml" + "ssv-node-60-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh b/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh new file mode 100755 index 0000000000..2fc32263a0 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-61--64.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-61-deployment.yml" + "ssv-node-62-deployment.yml" + "ssv-node-63-deployment.yml" + "ssv-node-64-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh b/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh new file mode 100755 index 0000000000..fe57c84c75 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-65--68.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-65-deployment.yml" + "ssv-node-66-deployment.yml" + "ssv-node-67-deployment.yml" + "ssv-node-68-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh b/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh new file mode 100755 index 0000000000..229536c0d4 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-cluster-69--72.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Pleae provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z $9 ]]; then + echo "Please provide health check image" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide nodes cpu limit" + exit 1 +fi + +if [[ -z ${11} ]]; then + echo "Please provide nodes mem limit" + exit 1 +fi + + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +HEALTH_CHECK_IMAGE=$9 +NODES_CPU_LIMIT=${10} +NODES_MEM_LIMIT=${11} + + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $HEALTH_CHECK_IMAGE +echo $NODES_CPU_LIMIT +echo $NODES_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +#config +#if [[ -d .k8/configmaps/ ]]; then +#config + #for file in $(ls -A1 .k8/configmaps/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/configmaps/${file}" + #done +#fi + +#if [[ -d .k8/secrets/ ]]; then + #for file in $(ls -A1 .k8/secrets/); do + #sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" ".k8/secrets/${file}" + #done +#fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-node-69-deployment.yml" + "ssv-node-70-deployment.yml" + "ssv-node-71-deployment.yml" + "ssv-node-72-deployment.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_HEALTH_IMAGE|${HEALTH_CHECK_IMAGE}|g" \ + -e "s|REPLACE_NODES_CPU_LIMIT|${NODES_CPU_LIMIT}|g" \ + -e "s|REPLACE_NODES_MEM_LIMIT|${NODES_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/stage/scripts/deploy-cluster-9--12.sh b/.k8/hetzner-stage/scripts/deploy-cluster-9--12.sh similarity index 99% rename from .k8/stage/scripts/deploy-cluster-9--12.sh rename to .k8/hetzner-stage/scripts/deploy-cluster-9--12.sh index 057b7205af..81fe2de698 100755 --- a/.k8/stage/scripts/deploy-cluster-9--12.sh +++ b/.k8/hetzner-stage/scripts/deploy-cluster-9--12.sh @@ -103,7 +103,7 @@ fi #done #fi -DIR=".k8/stage" +DIR=".k8/hetzner-stage" DEPLOY_FILES=( "ssv-node-9-deployment.yml" "ssv-node-10-deployment.yml" diff --git a/.k8/hetzner-stage/scripts/deploy-holesky-exporters.sh b/.k8/hetzner-stage/scripts/deploy-holesky-exporters.sh new file mode 100755 index 0000000000..9a899ef3d3 --- /dev/null +++ b/.k8/hetzner-stage/scripts/deploy-holesky-exporters.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Please provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z ${9} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +EXPORTER_CPU_LIMIT=$9 +EXPORTER_MEM_LIMIT=${10} + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $EXPORTER_CPU_LIMIT +echo $EXPORTER_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-exporter-holesky.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_EXPORTER_CPU_LIMIT|${EXPORTER_CPU_LIMIT}|g" \ + -e "s|REPLACE_EXPORTER_MEM_LIMIT|${EXPORTER_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/hetzner-stage/ssv-exporter-holesky.yml b/.k8/hetzner-stage/ssv-exporter-holesky.yml new file mode 100644 index 0000000000..10fb398390 --- /dev/null +++ b/.k8/hetzner-stage/ssv-exporter-holesky.yml @@ -0,0 +1,152 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-exporter-holesky + namespace: REPLACE_NAMESPACE + labels: + app: ssv-exporter-holesky +spec: + type: ClusterIP + ports: + - port: 12013 + protocol: UDP + targetPort: 12013 + name: port-12013 + - port: 13013 + protocol: TCP + targetPort: 13013 + name: port-13013 + - port: 14013 + protocol: TCP + targetPort: 14013 + name: port-14013 + - port: 15013 + protocol: TCP + targetPort: 15013 + name: port-15013 + - port: 16013 + protocol: TCP + targetPort: 16013 + name: port-16013 + selector: + app: ssv-exporter-holesky +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-exporter-holesky + name: ssv-exporter-holesky + namespace: REPLACE_NAMESPACE +spec: + replicas: REPLACE_REPLICAS + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-exporter-holesky + template: + metadata: + labels: + app: ssv-exporter-holesky + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/role + operator: In + values: + - ssv-exporter + containers: + - name: ssv-exporter-holesky + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_EXPORTER_CPU_LIMIT + memory: REPLACE_EXPORTER_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12013 + name: port-12013 + hostPort: 12013 + protocol: UDP + - containerPort: 13013 + name: port-13013 + hostPort: 13013 + - containerPort: 14013 + name: port-14013 + hostPort: 14013 + - containerPort: 15013 + name: port-15013 + hostPort: 15013 + - containerPort: 16013 + name: port-16013 + hostPort: 16013 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv.*" + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15013" + - name: SSV_API_PORT + value: "16013" + - name: ENABLE_PROFILE + value: "true" + - name: UDP_PORT + value: "12013" + - name: TCP_PORT + value: "13003" + - name: WS_API_PORT + value: "14013" + - name: FULLNODE + value: "true" + - name: EXPORTER + value: "true" + - name: DISCOVERY_TRACE + value: "false" + - name: PUBSUB_TRACE + value: "false" + - name: SUBNETS + value: "0xffffffffffffffffffffffffffffffff" + volumeMounts: + - mountPath: /data + name: ssv-exporter-holesky + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-exporter-holesky-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-exporter-holesky + persistentVolumeClaim: + claimName: ssv-exporter-holesky + - name: ssv-exporter-holesky-cm + configMap: + name: ssv-exporter-holesky-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-1-deployment.yml b/.k8/hetzner-stage/ssv-node-1-deployment.yml new file mode 100644 index 0000000000..9b11ffbce6 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-1-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-1-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-1 +spec: + type: ClusterIP + ports: + - port: 12001 + protocol: UDP + targetPort: 12001 + name: port-12001 + - port: 13001 + protocol: TCP + targetPort: 13001 + name: port-13001 + - port: 15001 + protocol: TCP + targetPort: 15001 + name: port-15001 + - port: 16001 + protocol: TCP + targetPort: 16001 + name: port-16001 + selector: + app: ssv-node-1 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-1 + name: ssv-node-1 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-1 + template: + metadata: + labels: + app: ssv-node-1 + spec: + containers: + - name: ssv-node-1 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12001 + name: port-12001 + hostPort: 12001 + protocol: UDP + - containerPort: 13001 + name: port-13001 + hostPort: 13001 + - containerPort: 15001 + name: port-15001 + hostPort: 15001 + - containerPort: 16001 + name: port-16001 + hostPort: 16001 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15001" + - name: SSV_API_PORT + value: "16001" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-1 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-1-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-1 + persistentVolumeClaim: + claimName: ssv-node-1 + - name: ssv-node-1-cm + configMap: + name: ssv-node-1-cm + hostNetwork: true diff --git a/.k8/stage/ssv-node-10-deployment.yml b/.k8/hetzner-stage/ssv-node-10-deployment.yml similarity index 70% rename from .k8/stage/ssv-node-10-deployment.yml rename to .k8/hetzner-stage/ssv-node-10-deployment.yml index ce73488cf3..051cf589d4 100644 --- a/.k8/stage/ssv-node-10-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-10-deployment.yml @@ -36,7 +36,7 @@ metadata: name: ssv-node-10 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: @@ -47,18 +47,10 @@ spec: labels: app: ssv-node-10 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - name: ssv-node-10 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -106,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -128,34 +120,14 @@ spec: name: ssv-node-10 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-10 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 + name: ssv-node-10-cm + imagePullSecrets: + - name: ecr-repo volumes: - name: ssv-node-10 persistentVolumeClaim: claimName: ssv-node-10 - - name: ssv-cm-validator-options-10 + - name: ssv-node-10-cm configMap: - name: ssv-cm-validator-options-10 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-10-cm hostNetwork: true diff --git a/.k8/stage/ssv-node-11-deployment.yml b/.k8/hetzner-stage/ssv-node-11-deployment.yml similarity index 70% rename from .k8/stage/ssv-node-11-deployment.yml rename to .k8/hetzner-stage/ssv-node-11-deployment.yml index 2bddd3cdeb..e15bdb7b49 100644 --- a/.k8/stage/ssv-node-11-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-11-deployment.yml @@ -36,7 +36,7 @@ metadata: name: ssv-node-11 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: @@ -47,18 +47,10 @@ spec: labels: app: ssv-node-11 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - name: ssv-node-11 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -106,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -128,34 +120,14 @@ spec: name: ssv-node-11 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-11 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 + name: ssv-node-11-cm + imagePullSecrets: + - name: ecr-repo volumes: - name: ssv-node-11 persistentVolumeClaim: claimName: ssv-node-11 - - name: ssv-cm-validator-options-11 + - name: ssv-node-11-cm configMap: - name: ssv-cm-validator-options-11 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-11-cm hostNetwork: true diff --git a/.k8/stage/ssv-node-12-deployment.yml b/.k8/hetzner-stage/ssv-node-12-deployment.yml similarity index 70% rename from .k8/stage/ssv-node-12-deployment.yml rename to .k8/hetzner-stage/ssv-node-12-deployment.yml index f06afa878f..ebcc12a1ac 100644 --- a/.k8/stage/ssv-node-12-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-12-deployment.yml @@ -36,7 +36,7 @@ metadata: name: ssv-node-12 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: @@ -47,18 +47,10 @@ spec: labels: app: ssv-node-12 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - name: ssv-node-12 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -106,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -128,34 +120,14 @@ spec: name: ssv-node-12 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-12 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 + name: ssv-node-12-cm + imagePullSecrets: + - name: ecr-repo volumes: - name: ssv-node-12 persistentVolumeClaim: claimName: ssv-node-12 - - name: ssv-cm-validator-options-12 + - name: ssv-node-12-cm configMap: - name: ssv-cm-validator-options-12 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-12-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-13-deployment.yml b/.k8/hetzner-stage/ssv-node-13-deployment.yml new file mode 100644 index 0000000000..53f1bae513 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-13-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-13-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-13 +spec: + type: ClusterIP + ports: + - port: 12013 + protocol: UDP + targetPort: 12013 + name: port-12013 + - port: 13013 + protocol: TCP + targetPort: 13013 + name: port-13013 + - port: 15013 + protocol: TCP + targetPort: 15013 + name: port-15013 + - port: 16013 + protocol: TCP + targetPort: 16013 + name: port-16013 + selector: + app: ssv-node-13 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-13 + name: ssv-node-13 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-13 + template: + metadata: + labels: + app: ssv-node-13 + spec: + containers: + - name: ssv-node-13 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12013 + name: port-12013 + protocol: UDP + hostPort: 12013 + - containerPort: 13013 + name: port-13013 + hostPort: 13013 + - containerPort: 15013 + name: port-15013 + hostPort: 15013 + - containerPort: 16013 + name: port-16013 + hostPort: 16013 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15013" + - name: SSV_API_PORT + value: "16013" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-13 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-13-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-13 + persistentVolumeClaim: + claimName: ssv-node-13 + - name: ssv-node-13-cm + configMap: + name: ssv-node-13-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-14-deployment.yml b/.k8/hetzner-stage/ssv-node-14-deployment.yml new file mode 100644 index 0000000000..65f47bc363 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-14-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-14-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-14 +spec: + type: ClusterIP + ports: + - port: 12014 + protocol: UDP + targetPort: 12014 + name: port-12014 + - port: 13014 + protocol: TCP + targetPort: 13014 + name: port-13014 + - port: 15014 + protocol: TCP + targetPort: 15014 + name: port-15014 + - port: 16014 + protocol: TCP + targetPort: 16014 + name: port-16014 + selector: + app: ssv-node-14 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-14 + name: ssv-node-14 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-14 + template: + metadata: + labels: + app: ssv-node-14 + spec: + containers: + - name: ssv-node-14 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12014 + name: port-12014 + protocol: UDP + hostPort: 12014 + - containerPort: 13014 + name: port-13014 + hostPort: 13014 + - containerPort: 15014 + name: port-15014 + hostPort: 15014 + - containerPort: 16014 + name: port-16014 + hostPort: 16014 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15014" + - name: SSV_API_PORT + value: "16014" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-14 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-14-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-14 + persistentVolumeClaim: + claimName: ssv-node-14 + - name: ssv-node-14-cm + configMap: + name: ssv-node-14-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-15-deployment.yml b/.k8/hetzner-stage/ssv-node-15-deployment.yml new file mode 100644 index 0000000000..ec59df9720 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-15-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-15-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-15 +spec: + type: ClusterIP + ports: + - port: 12015 + protocol: UDP + targetPort: 12015 + name: port-12015 + - port: 13015 + protocol: TCP + targetPort: 13015 + name: port-13015 + - port: 15015 + protocol: TCP + targetPort: 15015 + name: port-15015 + - port: 16015 + protocol: TCP + targetPort: 16015 + name: port-16015 + selector: + app: ssv-node-15 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-15 + name: ssv-node-15 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-15 + template: + metadata: + labels: + app: ssv-node-15 + spec: + containers: + - name: ssv-node-15 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12015 + name: port-12015 + protocol: UDP + hostPort: 12015 + - containerPort: 13015 + name: port-13015 + hostPort: 13015 + - containerPort: 15015 + name: port-15015 + hostPort: 15015 + - containerPort: 16015 + name: port-16015 + hostPort: 16015 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15015" + - name: SSV_API_PORT + value: "16015" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-15 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-15-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-15 + persistentVolumeClaim: + claimName: ssv-node-15 + - name: ssv-node-15-cm + configMap: + name: ssv-node-15-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-16-deployment.yml b/.k8/hetzner-stage/ssv-node-16-deployment.yml new file mode 100644 index 0000000000..f25f60b70c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-16-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-16-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-16 +spec: + type: ClusterIP + ports: + - port: 12016 + protocol: UDP + targetPort: 12016 + name: port-12016 + - port: 13016 + protocol: TCP + targetPort: 13016 + name: port-13016 + - port: 15016 + protocol: TCP + targetPort: 15016 + name: port-15016 + - port: 16016 + protocol: TCP + targetPort: 16016 + name: port-16016 + selector: + app: ssv-node-16 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-16 + name: ssv-node-16 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-16 + template: + metadata: + labels: + app: ssv-node-16 + spec: + containers: + - name: ssv-node-16 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12016 + name: port-12016 + protocol: UDP + hostPort: 12016 + - containerPort: 13016 + name: port-13016 + hostPort: 13016 + - containerPort: 15016 + name: port-15016 + hostPort: 15016 + - containerPort: 16016 + name: port-16016 + hostPort: 16016 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15016" + - name: SSV_API_PORT + value: "16016" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-16 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-16-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-16 + persistentVolumeClaim: + claimName: ssv-node-16 + - name: ssv-node-16-cm + configMap: + name: ssv-node-16-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-17-deployment.yml b/.k8/hetzner-stage/ssv-node-17-deployment.yml new file mode 100644 index 0000000000..14561ef74c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-17-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-17-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-17 +spec: + type: ClusterIP + ports: + - port: 12017 + protocol: UDP + targetPort: 12017 + name: port-12017 + - port: 13017 + protocol: TCP + targetPort: 13017 + name: port-13017 + - port: 15017 + protocol: TCP + targetPort: 15017 + name: port-15017 + - port: 16017 + protocol: TCP + targetPort: 16017 + name: port-16017 + selector: + app: ssv-node-17 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-17 + name: ssv-node-17 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-17 + template: + metadata: + labels: + app: ssv-node-17 + spec: + containers: + - name: ssv-node-17 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12017 + name: port-12017 + protocol: UDP + hostPort: 12017 + - containerPort: 13017 + name: port-13017 + hostPort: 13017 + - containerPort: 15017 + name: port-15017 + hostPort: 15017 + - containerPort: 16017 + name: port-16017 + hostPort: 16017 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15017" + - name: SSV_API_PORT + value: "16017" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-17 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-17-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-17 + persistentVolumeClaim: + claimName: ssv-node-17 + - name: ssv-node-17-cm + configMap: + name: ssv-node-17-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-18-deployment.yml b/.k8/hetzner-stage/ssv-node-18-deployment.yml new file mode 100644 index 0000000000..40ac470dd3 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-18-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-18-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-18 +spec: + type: ClusterIP + ports: + - port: 12018 + protocol: UDP + targetPort: 12018 + name: port-12018 + - port: 13018 + protocol: TCP + targetPort: 13018 + name: port-13018 + - port: 15018 + protocol: TCP + targetPort: 15018 + name: port-15018 + - port: 16018 + protocol: TCP + targetPort: 16018 + name: port-16018 + selector: + app: ssv-node-18 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-18 + name: ssv-node-18 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-18 + template: + metadata: + labels: + app: ssv-node-18 + spec: + containers: + - name: ssv-node-18 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12018 + name: port-12018 + protocol: UDP + hostPort: 12018 + - containerPort: 13018 + name: port-13018 + hostPort: 13018 + - containerPort: 15018 + name: port-15018 + hostPort: 15018 + - containerPort: 16018 + name: port-16018 + hostPort: 16018 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15018" + - name: SSV_API_PORT + value: "16018" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-18 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-18-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-18 + persistentVolumeClaim: + claimName: ssv-node-18 + - name: ssv-node-18-cm + configMap: + name: ssv-node-18-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-19-deployment.yml b/.k8/hetzner-stage/ssv-node-19-deployment.yml new file mode 100644 index 0000000000..a266c88e48 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-19-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-19-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-19 +spec: + type: ClusterIP + ports: + - port: 12019 + protocol: UDP + targetPort: 12019 + name: port-12019 + - port: 13019 + protocol: TCP + targetPort: 13019 + name: port-13019 + - port: 15019 + protocol: TCP + targetPort: 15019 + name: port-15019 + - port: 16019 + protocol: TCP + targetPort: 16019 + name: port-16019 + selector: + app: ssv-node-19 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-19 + name: ssv-node-19 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-19 + template: + metadata: + labels: + app: ssv-node-19 + spec: + containers: + - name: ssv-node-19 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12019 + name: port-12019 + protocol: UDP + hostPort: 12019 + - containerPort: 13019 + name: port-13019 + hostPort: 13019 + - containerPort: 15019 + name: port-15019 + hostPort: 15019 + - containerPort: 16019 + name: port-16019 + hostPort: 16019 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15019" + - name: SSV_API_PORT + value: "16019" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-19 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-19-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-19 + persistentVolumeClaim: + claimName: ssv-node-19 + - name: ssv-node-19-cm + configMap: + name: ssv-node-19-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-2-deployment.yml b/.k8/hetzner-stage/ssv-node-2-deployment.yml new file mode 100644 index 0000000000..f98472bdf2 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-2-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-2-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-2 +spec: + type: ClusterIP + ports: + - port: 12002 + protocol: UDP + targetPort: 12002 + name: port-12002 + - port: 13002 + protocol: TCP + targetPort: 13002 + name: port-13002 + - port: 15002 + protocol: TCP + targetPort: 15002 + name: port-15002 + - port: 16002 + protocol: TCP + targetPort: 16002 + name: port-16002 + selector: + app: ssv-node-2 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-2 + name: ssv-node-2 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-2 + template: + metadata: + labels: + app: ssv-node-2 + spec: + containers: + - name: ssv-node-2 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12002 + name: port-12002 + protocol: UDP + hostPort: 12002 + - containerPort: 13002 + name: port-13002 + hostPort: 13002 + - containerPort: 15002 + name: port-15002 + hostPort: 15002 + - containerPort: 16002 + name: port-16002 + hostPort: 16002 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15002" + - name: SSV_API_PORT + value: "16002" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-2 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-2-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-2 + persistentVolumeClaim: + claimName: ssv-node-2 + - name: ssv-node-2-cm + configMap: + name: ssv-node-2-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-20-deployment.yml b/.k8/hetzner-stage/ssv-node-20-deployment.yml new file mode 100644 index 0000000000..2e4cc9792d --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-20-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-20-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-20 +spec: + type: ClusterIP + ports: + - port: 12020 + protocol: UDP + targetPort: 12020 + name: port-12020 + - port: 13020 + protocol: TCP + targetPort: 13020 + name: port-13020 + - port: 15020 + protocol: TCP + targetPort: 15020 + name: port-15020 + - port: 16020 + protocol: TCP + targetPort: 16020 + name: port-16020 + selector: + app: ssv-node-20 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-20 + name: ssv-node-20 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-20 + template: + metadata: + labels: + app: ssv-node-20 + spec: + containers: + - name: ssv-node-20 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12020 + name: port-12020 + protocol: UDP + hostPort: 12020 + - containerPort: 13020 + name: port-13020 + hostPort: 13020 + - containerPort: 15020 + name: port-15020 + hostPort: 15020 + - containerPort: 16020 + name: port-16020 + hostPort: 16020 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15020" + - name: SSV_API_PORT + value: "16020" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-20 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-20-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-20 + persistentVolumeClaim: + claimName: ssv-node-20 + - name: ssv-node-20-cm + configMap: + name: ssv-node-20-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-21-deployment.yml b/.k8/hetzner-stage/ssv-node-21-deployment.yml new file mode 100644 index 0000000000..7e7a28c0fa --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-21-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-21-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-21 +spec: + type: ClusterIP + ports: + - port: 12021 + protocol: UDP + targetPort: 12021 + name: port-12021 + - port: 13021 + protocol: TCP + targetPort: 13021 + name: port-13021 + - port: 15021 + protocol: TCP + targetPort: 15021 + name: port-15021 + - port: 16021 + protocol: TCP + targetPort: 16021 + name: port-16021 + selector: + app: ssv-node-21 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-21 + name: ssv-node-21 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-21 + template: + metadata: + labels: + app: ssv-node-21 + spec: + containers: + - name: ssv-node-21 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12021 + name: port-12021 + protocol: UDP + hostPort: 12021 + - containerPort: 13021 + name: port-13021 + hostPort: 13021 + - containerPort: 15021 + name: port-15021 + hostPort: 15021 + - containerPort: 16021 + name: port-16021 + hostPort: 16021 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15021" + - name: SSV_API_PORT + value: "16021" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-21 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-21-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-21 + persistentVolumeClaim: + claimName: ssv-node-21 + - name: ssv-node-21-cm + configMap: + name: ssv-node-21-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-22-deployment.yml b/.k8/hetzner-stage/ssv-node-22-deployment.yml new file mode 100644 index 0000000000..1459d26dc6 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-22-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-22-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-22 +spec: + type: ClusterIP + ports: + - port: 12022 + protocol: UDP + targetPort: 12022 + name: port-12022 + - port: 13022 + protocol: TCP + targetPort: 13022 + name: port-13022 + - port: 15022 + protocol: TCP + targetPort: 15022 + name: port-15022 + - port: 16022 + protocol: TCP + targetPort: 16022 + name: port-16022 + selector: + app: ssv-node-22 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-22 + name: ssv-node-22 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-22 + template: + metadata: + labels: + app: ssv-node-22 + spec: + containers: + - name: ssv-node-22 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12022 + name: port-12022 + protocol: UDP + hostPort: 12022 + - containerPort: 13022 + name: port-13022 + hostPort: 13022 + - containerPort: 15022 + name: port-15022 + hostPort: 15022 + - containerPort: 16022 + name: port-16022 + hostPort: 16022 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15022" + - name: SSV_API_PORT + value: "16022" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-22 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-22-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-22 + persistentVolumeClaim: + claimName: ssv-node-22 + - name: ssv-node-22-cm + configMap: + name: ssv-node-22-cm + hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-1-deployment.yml b/.k8/hetzner-stage/ssv-node-23-deployment.yml similarity index 52% rename from .k8/stage/ssv-node-v3-1-deployment.yml rename to .k8/hetzner-stage/ssv-node-23-deployment.yml index 59eeab296a..a5eeac635c 100644 --- a/.k8/stage/ssv-node-v3-1-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-23-deployment.yml @@ -2,67 +2,55 @@ apiVersion: v1 kind: Service metadata: - name: ssv-node-v3-1-svc + name: ssv-node-23-svc namespace: REPLACE_NAMESPACE labels: - app: ssv-node-v3-1 + app: ssv-node-23 spec: type: ClusterIP ports: - - port: 12301 + - port: 12023 protocol: UDP - targetPort: 12301 - name: port-12301 - - port: 13301 + targetPort: 12023 + name: port-12023 + - port: 13023 protocol: TCP - targetPort: 13301 - name: port-13301 - - port: 15301 + targetPort: 13023 + name: port-13023 + - port: 15023 protocol: TCP - targetPort: 15301 - name: port-15301 - - port: 16301 + targetPort: 15023 + name: port-15023 + - port: 16023 protocol: TCP - targetPort: 16301 - name: port-16301 - - port: 16301 - protocol: TCP - targetPort: 16301 - name: port-16301 + targetPort: 16023 + name: port-16023 selector: - app: ssv-node-v3-1 + app: ssv-node-23 --- apiVersion: REPLACE_API_VERSION kind: Deployment metadata: labels: - app: ssv-node-v3-1 - name: ssv-node-v3-1 + app: ssv-node-23 + name: ssv-node-23 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: matchLabels: - app: ssv-node-v3-1 + app: ssv-node-23 template: metadata: labels: - app: ssv-node-v3-1 + app: ssv-node-23 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - - name: ssv-node-v3-1 + - name: ssv-node-23 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -70,19 +58,19 @@ spec: memory: REPLACE_NODES_MEM_LIMIT command: ["make", "start-node"] ports: - - containerPort: 12301 - name: port-12301 + - containerPort: 12023 + name: port-12023 protocol: UDP - hostPort: 12301 - - containerPort: 13301 - name: port-13301 - hostPort: 13301 - - containerPort: 15301 - name: port-15301 - hostPort: 15301 - - containerPort: 16301 - name: port-16301 - hostPort: 16301 + hostPort: 12023 + - containerPort: 13023 + name: port-13023 + hostPort: 13023 + - containerPort: 15023 + name: port-15023 + hostPort: 15023 + - containerPort: 16023 + name: port-16023 + hostPort: 16023 env: - name: SHARE_CONFIG value: "./data/share.yaml" @@ -97,14 +85,10 @@ spec: name: config-secrets key: abi_version optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - name: LOG_LEVEL value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" + - name: DEBUG_SERVICES + value: "ssv/*." - name: DISCOVERY_TYPE_KEY value: "discv5" - name: CONSENSUS_TYPE @@ -114,36 +98,36 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" + - name: DB_REPORTING + value: "false" - name: METRICS_API_PORT - value: "15301" + value: "15023" - name: SSV_API_PORT - value: "16301" + value: "16023" - name: ENABLE_PROFILE value: "true" - - name: WS_API_PORT - value: "16301" - - name: FULLNODE - value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' - name: BUILDER_PROPOSALS value: "true" volumeMounts: - mountPath: /data - name: ssv-node-v3-1 + name: ssv-node-23 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-v3-1 + name: ssv-node-23-cm + imagePullSecrets: + - name: ecr-repo volumes: - - name: ssv-node-v3-1 + - name: ssv-node-23 persistentVolumeClaim: - claimName: ssv-node-v3-1 - - name: ssv-cm-validator-options-v3-1 + claimName: ssv-node-23 + - name: ssv-node-23-cm configMap: - name: ssv-cm-validator-options-v3-1 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-23-cm hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-2-deployment.yml b/.k8/hetzner-stage/ssv-node-24-deployment.yml similarity index 55% rename from .k8/stage/ssv-node-v3-2-deployment.yml rename to .k8/hetzner-stage/ssv-node-24-deployment.yml index 2daed3c6a8..5cb1e41b5f 100644 --- a/.k8/stage/ssv-node-v3-2-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-24-deployment.yml @@ -2,63 +2,55 @@ apiVersion: v1 kind: Service metadata: - name: ssv-node-v3-2-svc + name: ssv-node-24-svc namespace: REPLACE_NAMESPACE labels: - app: ssv-node-v3-2 + app: ssv-node-24 spec: type: ClusterIP ports: - - port: 12302 + - port: 12424 protocol: UDP - targetPort: 12302 - name: port-12302 - - port: 13302 + targetPort: 12424 + name: port-12424 + - port: 13024 protocol: TCP - targetPort: 13302 - name: port-13302 - - port: 15302 + targetPort: 13024 + name: port-13024 + - port: 15024 protocol: TCP - targetPort: 15302 - name: port-15302 - - port: 16302 + targetPort: 15024 + name: port-15024 + - port: 16024 protocol: TCP - targetPort: 16302 - name: port-16302 + targetPort: 16024 + name: port-16024 selector: - app: ssv-node-v3-2 + app: ssv-node-24 --- apiVersion: REPLACE_API_VERSION kind: Deployment metadata: labels: - app: ssv-node-v3-2 - name: ssv-node-v3-2 + app: ssv-node-24 + name: ssv-node-24 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: matchLabels: - app: ssv-node-v3-2 + app: ssv-node-24 template: metadata: labels: - app: ssv-node-v3-2 + app: ssv-node-24 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - - name: ssv-node-v3-2 + - name: ssv-node-24 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -66,19 +58,19 @@ spec: memory: REPLACE_NODES_MEM_LIMIT command: ["make", "start-node"] ports: - - containerPort: 12302 - name: port-12302 + - containerPort: 12424 + name: port-12424 protocol: UDP - hostPort: 12302 - - containerPort: 13302 - name: port-13302 - hostPort: 13302 - - containerPort: 15302 - name: port-15302 - hostPort: 15302 - - containerPort: 16302 - name: port-16302 - hostPort: 16302 + hostPort: 12424 + - containerPort: 13024 + name: port-13024 + hostPort: 13024 + - containerPort: 15024 + name: port-15024 + hostPort: 15024 + - containerPort: 16024 + name: port-16024 + hostPort: 16024 env: - name: SHARE_CONFIG value: "./data/share.yaml" @@ -93,14 +85,10 @@ spec: name: config-secrets key: abi_version optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - name: LOG_LEVEL value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" + - name: DEBUG_SERVICES + value: "ssv/*." - name: DISCOVERY_TYPE_KEY value: "discv5" - name: CONSENSUS_TYPE @@ -110,32 +98,36 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" + - name: DB_REPORTING + value: "false" - name: METRICS_API_PORT - value: "15302" + value: "15024" - name: SSV_API_PORT - value: "16302" + value: "16024" - name: ENABLE_PROFILE value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' - name: BUILDER_PROPOSALS value: "true" volumeMounts: - mountPath: /data - name: ssv-node-v3-2 + name: ssv-node-24 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-v3-2 + name: ssv-node-24-cm + imagePullSecrets: + - name: ecr-repo volumes: - - name: ssv-node-v3-2 + - name: ssv-node-24 persistentVolumeClaim: - claimName: ssv-node-v3-2 - - name: ssv-cm-validator-options-v3-2 + claimName: ssv-node-24 + - name: ssv-node-24-cm configMap: - name: ssv-cm-validator-options-v3-2 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-24-cm hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-3-deployment.yml b/.k8/hetzner-stage/ssv-node-25-deployment.yml similarity index 52% rename from .k8/stage/ssv-node-v3-3-deployment.yml rename to .k8/hetzner-stage/ssv-node-25-deployment.yml index 64bfbbe759..ccd6e42cf2 100644 --- a/.k8/stage/ssv-node-v3-3-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-25-deployment.yml @@ -2,67 +2,55 @@ apiVersion: v1 kind: Service metadata: - name: ssv-node-v3-3-svc + name: ssv-node-25-svc namespace: REPLACE_NAMESPACE labels: - app: ssv-node-v3-3 + app: ssv-node-25 spec: type: ClusterIP ports: - - port: 12303 + - port: 12025 protocol: UDP - targetPort: 12303 - name: port-12303 - - port: 13303 + targetPort: 12025 + name: port-12025 + - port: 13025 protocol: TCP - targetPort: 13303 - name: port-13303 - - port: 15303 + targetPort: 13025 + name: port-13025 + - port: 15025 protocol: TCP - targetPort: 15303 - name: port-15303 - - port: 16303 + targetPort: 15025 + name: port-15025 + - port: 16025 protocol: TCP - targetPort: 16303 - name: port-16303 - - port: 16303 - protocol: TCP - targetPort: 16303 - name: port-16303 + targetPort: 16025 + name: port-16025 selector: - app: ssv-node-v3-3 + app: ssv-node-25 --- apiVersion: REPLACE_API_VERSION kind: Deployment metadata: labels: - app: ssv-node-v3-3 - name: ssv-node-v3-3 + app: ssv-node-25 + name: ssv-node-25 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: matchLabels: - app: ssv-node-v3-3 + app: ssv-node-25 template: metadata: labels: - app: ssv-node-v3-3 + app: ssv-node-25 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - - name: ssv-node-v3-3 + - name: ssv-node-25 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -70,19 +58,19 @@ spec: memory: REPLACE_NODES_MEM_LIMIT command: ["make", "start-node"] ports: - - containerPort: 12303 - name: port-12303 + - containerPort: 12025 + name: port-12025 protocol: UDP - hostPort: 12303 - - containerPort: 13303 - name: port-13303 - hostPort: 13303 - - containerPort: 15303 - name: port-15303 - hostPort: 15303 - - containerPort: 16303 - name: port-16303 - hostPort: 16303 + hostPort: 12025 + - containerPort: 13025 + name: port-13025 + hostPort: 13025 + - containerPort: 15025 + name: port-15025 + hostPort: 15025 + - containerPort: 16025 + name: port-16025 + hostPort: 16025 env: - name: SHARE_CONFIG value: "./data/share.yaml" @@ -97,14 +85,10 @@ spec: name: config-secrets key: abi_version optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - name: LOG_LEVEL value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" + - name: DEBUG_SERVICES + value: "ssv/*." - name: DISCOVERY_TYPE_KEY value: "discv5" - name: CONSENSUS_TYPE @@ -114,36 +98,36 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" + - name: DB_REPORTING + value: "false" - name: METRICS_API_PORT - value: "15303" + value: "15025" - name: SSV_API_PORT - value: "16303" + value: "16025" - name: ENABLE_PROFILE value: "true" - - name: WS_API_PORT - value: "16303" - - name: FULLNODE - value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' - name: BUILDER_PROPOSALS value: "true" volumeMounts: - mountPath: /data - name: ssv-node-v3-3 + name: ssv-node-25 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-v3-3 + name: ssv-node-25-cm + imagePullSecrets: + - name: ecr-repo volumes: - - name: ssv-node-v3-3 + - name: ssv-node-25 persistentVolumeClaim: - claimName: ssv-node-v3-3 - - name: ssv-cm-validator-options-v3-3 + claimName: ssv-node-25 + - name: ssv-node-25-cm configMap: - name: ssv-cm-validator-options-v3-3 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-25-cm hostNetwork: true diff --git a/.k8/stage/ssv-node-v3-4-deployment.yml b/.k8/hetzner-stage/ssv-node-26-deployment.yml similarity index 55% rename from .k8/stage/ssv-node-v3-4-deployment.yml rename to .k8/hetzner-stage/ssv-node-26-deployment.yml index b13efb4f5c..396e7360f1 100644 --- a/.k8/stage/ssv-node-v3-4-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-26-deployment.yml @@ -2,63 +2,55 @@ apiVersion: v1 kind: Service metadata: - name: ssv-node-v3-4-svc + name: ssv-node-26-svc namespace: REPLACE_NAMESPACE labels: - app: ssv-node-v3-4 + app: ssv-node-26 spec: type: ClusterIP ports: - - port: 12304 + - port: 12026 protocol: UDP - targetPort: 12304 - name: port-12304 - - port: 13304 + targetPort: 12026 + name: port-12026 + - port: 13026 protocol: TCP - targetPort: 13304 - name: port-13304 - - port: 15304 + targetPort: 13026 + name: port-13026 + - port: 15026 protocol: TCP - targetPort: 15304 - name: port-15304 - - port: 16304 + targetPort: 15026 + name: port-15026 + - port: 16026 protocol: TCP - targetPort: 16304 - name: port-16304 + targetPort: 16026 + name: port-16026 selector: - app: ssv-node-v3-4 + app: ssv-node-26 --- apiVersion: REPLACE_API_VERSION kind: Deployment metadata: labels: - app: ssv-node-v3-4 - name: ssv-node-v3-4 + app: ssv-node-26 + name: ssv-node-26 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: matchLabels: - app: ssv-node-v3-4 + app: ssv-node-26 template: metadata: labels: - app: ssv-node-v3-4 + app: ssv-node-26 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - - name: ssv-node-v3-4 + - name: ssv-node-26 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -66,19 +58,19 @@ spec: memory: REPLACE_NODES_MEM_LIMIT command: ["make", "start-node"] ports: - - containerPort: 12304 - name: port-12304 + - containerPort: 12026 + name: port-12026 protocol: UDP - hostPort: 12304 - - containerPort: 13304 - name: port-13304 - hostPort: 13304 - - containerPort: 15304 - name: port-15304 - hostPort: 15304 - - containerPort: 16304 - name: port-16304 - hostPort: 16304 + hostPort: 12026 + - containerPort: 13026 + name: port-13026 + hostPort: 13026 + - containerPort: 15026 + name: port-15026 + hostPort: 15026 + - containerPort: 16026 + name: port-16026 + hostPort: 16026 env: - name: SHARE_CONFIG value: "./data/share.yaml" @@ -93,14 +85,10 @@ spec: name: config-secrets key: abi_version optional: true - - name: DEBUG_SERVICES - value: "ssv/*." - name: LOG_LEVEL value: "debug" - - name: DB_REPORTING - value: "false" - - name: PUBSUB_TRACE - value: "false" + - name: DEBUG_SERVICES + value: "ssv/*." - name: DISCOVERY_TYPE_KEY value: "discv5" - name: CONSENSUS_TYPE @@ -110,32 +98,36 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" + - name: DB_REPORTING + value: "false" - name: METRICS_API_PORT - value: "15304" + value: "15026" - name: SSV_API_PORT - value: "16304" + value: "16026" - name: ENABLE_PROFILE value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' - name: BUILDER_PROPOSALS value: "true" volumeMounts: - mountPath: /data - name: ssv-node-v3-4 + name: ssv-node-26 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-v3-4 + name: ssv-node-26-cm + imagePullSecrets: + - name: ecr-repo volumes: - - name: ssv-node-v3-4 + - name: ssv-node-26 persistentVolumeClaim: - claimName: ssv-node-v3-4 - - name: ssv-cm-validator-options-v3-4 + claimName: ssv-node-26 + - name: ssv-node-26-cm configMap: - name: ssv-cm-validator-options-v3-4 - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-26-cm hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-27-deployment.yml b/.k8/hetzner-stage/ssv-node-27-deployment.yml new file mode 100644 index 0000000000..8674533272 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-27-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-27-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-27 +spec: + type: ClusterIP + ports: + - port: 12027 + protocol: UDP + targetPort: 12027 + name: port-12027 + - port: 13027 + protocol: TCP + targetPort: 13027 + name: port-13027 + - port: 15027 + protocol: TCP + targetPort: 15027 + name: port-15027 + - port: 16027 + protocol: TCP + targetPort: 16027 + name: port-16027 + selector: + app: ssv-node-27 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-27 + name: ssv-node-27 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-27 + template: + metadata: + labels: + app: ssv-node-27 + spec: + containers: + - name: ssv-node-27 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12027 + name: port-12027 + protocol: UDP + hostPort: 12027 + - containerPort: 13027 + name: port-13027 + hostPort: 13027 + - containerPort: 15027 + name: port-15027 + hostPort: 15027 + - containerPort: 16027 + name: port-16027 + hostPort: 16027 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15027" + - name: SSV_API_PORT + value: "16027" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-27 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-27-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-27 + persistentVolumeClaim: + claimName: ssv-node-27 + - name: ssv-node-27-cm + configMap: + name: ssv-node-27-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-28-deployment.yml b/.k8/hetzner-stage/ssv-node-28-deployment.yml new file mode 100644 index 0000000000..08712b773b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-28-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-28-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-28 +spec: + type: ClusterIP + ports: + - port: 12028 + protocol: UDP + targetPort: 12028 + name: port-12028 + - port: 13028 + protocol: TCP + targetPort: 13028 + name: port-13028 + - port: 15028 + protocol: TCP + targetPort: 15028 + name: port-15028 + - port: 16028 + protocol: TCP + targetPort: 16028 + name: port-16028 + selector: + app: ssv-node-28 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-28 + name: ssv-node-28 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-28 + template: + metadata: + labels: + app: ssv-node-28 + spec: + containers: + - name: ssv-node-28 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12028 + name: port-12028 + protocol: UDP + hostPort: 12028 + - containerPort: 13028 + name: port-13028 + hostPort: 13028 + - containerPort: 15028 + name: port-15028 + hostPort: 15028 + - containerPort: 16028 + name: port-16028 + hostPort: 16028 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15028" + - name: SSV_API_PORT + value: "16028" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-28 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-28-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-28 + persistentVolumeClaim: + claimName: ssv-node-28 + - name: ssv-node-28-cm + configMap: + name: ssv-node-28-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-29-deployment.yml b/.k8/hetzner-stage/ssv-node-29-deployment.yml new file mode 100644 index 0000000000..acb427576c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-29-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-29-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-29 +spec: + type: ClusterIP + ports: + - port: 12029 + protocol: UDP + targetPort: 12029 + name: port-12029 + - port: 13029 + protocol: TCP + targetPort: 13029 + name: port-13029 + - port: 15029 + protocol: TCP + targetPort: 15029 + name: port-15029 + - port: 16029 + protocol: TCP + targetPort: 16029 + name: port-16029 + selector: + app: ssv-node-29 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-29 + name: ssv-node-29 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-29 + template: + metadata: + labels: + app: ssv-node-29 + spec: + containers: + - name: ssv-node-29 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12029 + name: port-12029 + protocol: UDP + hostPort: 12029 + - containerPort: 13029 + name: port-13029 + hostPort: 13029 + - containerPort: 15029 + name: port-15029 + hostPort: 15029 + - containerPort: 16029 + name: port-16029 + hostPort: 16029 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15029" + - name: SSV_API_PORT + value: "16029" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-29 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-29-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-29 + persistentVolumeClaim: + claimName: ssv-node-29 + - name: ssv-node-29-cm + configMap: + name: ssv-node-29-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-3-deployment.yml b/.k8/hetzner-stage/ssv-node-3-deployment.yml new file mode 100644 index 0000000000..8486b720d0 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-3-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-3-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-3 +spec: + type: ClusterIP + ports: + - port: 12003 + protocol: UDP + targetPort: 12003 + name: port-12003 + - port: 13003 + protocol: TCP + targetPort: 13003 + name: port-13003 + - port: 15003 + protocol: TCP + targetPort: 15003 + name: port-15003 + - port: 16003 + protocol: TCP + targetPort: 16003 + name: port-16003 + selector: + app: ssv-node-3 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-3 + name: ssv-node-3 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-3 + template: + metadata: + labels: + app: ssv-node-3 + spec: + containers: + - name: ssv-node-3 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12003 + name: port-12003 + protocol: UDP + hostPort: 12003 + - containerPort: 13003 + name: port-13003 + hostPort: 13003 + - containerPort: 15003 + name: port-15003 + hostPort: 15003 + - containerPort: 16003 + name: port-16003 + hostPort: 16003 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15003" + - name: SSV_API_PORT + value: "16003" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-3 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-3-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-3 + persistentVolumeClaim: + claimName: ssv-node-3 + - name: ssv-node-3-cm + configMap: + name: ssv-node-3-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-30-deployment.yml b/.k8/hetzner-stage/ssv-node-30-deployment.yml new file mode 100644 index 0000000000..239bbc7302 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-30-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-30-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-30 +spec: + type: ClusterIP + ports: + - port: 12030 + protocol: UDP + targetPort: 12030 + name: port-12030 + - port: 13030 + protocol: TCP + targetPort: 13030 + name: port-13030 + - port: 15030 + protocol: TCP + targetPort: 15030 + name: port-15030 + - port: 16030 + protocol: TCP + targetPort: 16030 + name: port-16030 + selector: + app: ssv-node-30 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-30 + name: ssv-node-30 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-30 + template: + metadata: + labels: + app: ssv-node-30 + spec: + containers: + - name: ssv-node-30 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12030 + name: port-12030 + protocol: UDP + hostPort: 12030 + - containerPort: 13030 + name: port-13030 + hostPort: 13030 + - containerPort: 15030 + name: port-15030 + hostPort: 15030 + - containerPort: 16030 + name: port-16030 + hostPort: 16030 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15030" + - name: SSV_API_PORT + value: "16030" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-30 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-30-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-30 + persistentVolumeClaim: + claimName: ssv-node-30 + - name: ssv-node-30-cm + configMap: + name: ssv-node-30-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-31-deployment.yml b/.k8/hetzner-stage/ssv-node-31-deployment.yml new file mode 100644 index 0000000000..af78e460ce --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-31-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-31-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-31 +spec: + type: ClusterIP + ports: + - port: 12031 + protocol: UDP + targetPort: 12031 + name: port-12031 + - port: 13031 + protocol: TCP + targetPort: 13031 + name: port-13031 + - port: 15031 + protocol: TCP + targetPort: 15031 + name: port-15031 + - port: 16031 + protocol: TCP + targetPort: 16031 + name: port-16031 + selector: + app: ssv-node-31 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-31 + name: ssv-node-31 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-31 + template: + metadata: + labels: + app: ssv-node-31 + spec: + containers: + - name: ssv-node-31 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12031 + name: port-12031 + protocol: UDP + hostPort: 12031 + - containerPort: 13031 + name: port-13031 + hostPort: 13031 + - containerPort: 15031 + name: port-15031 + hostPort: 15031 + - containerPort: 16031 + name: port-16031 + hostPort: 16031 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15031" + - name: SSV_API_PORT + value: "16031" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-31 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-31-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-31 + persistentVolumeClaim: + claimName: ssv-node-31 + - name: ssv-node-31-cm + configMap: + name: ssv-node-31-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-32-deployment.yml b/.k8/hetzner-stage/ssv-node-32-deployment.yml new file mode 100644 index 0000000000..d6567ac81e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-32-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-32-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-32 +spec: + type: ClusterIP + ports: + - port: 12032 + protocol: UDP + targetPort: 12032 + name: port-12032 + - port: 13032 + protocol: TCP + targetPort: 13032 + name: port-13032 + - port: 15032 + protocol: TCP + targetPort: 15032 + name: port-15032 + - port: 16032 + protocol: TCP + targetPort: 16032 + name: port-16032 + selector: + app: ssv-node-32 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-32 + name: ssv-node-32 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-32 + template: + metadata: + labels: + app: ssv-node-32 + spec: + containers: + - name: ssv-node-32 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12032 + name: port-12032 + protocol: UDP + hostPort: 12032 + - containerPort: 13032 + name: port-13032 + hostPort: 13032 + - containerPort: 15032 + name: port-15032 + hostPort: 15032 + - containerPort: 16032 + name: port-16032 + hostPort: 16032 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15032" + - name: SSV_API_PORT + value: "16032" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-32 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-32-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-32 + persistentVolumeClaim: + claimName: ssv-node-32 + - name: ssv-node-32-cm + configMap: + name: ssv-node-32-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-33-deployment.yml b/.k8/hetzner-stage/ssv-node-33-deployment.yml new file mode 100644 index 0000000000..6b72d090df --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-33-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-33-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-33 +spec: + type: ClusterIP + ports: + - port: 12033 + protocol: UDP + targetPort: 12033 + name: port-12033 + - port: 13033 + protocol: TCP + targetPort: 13033 + name: port-13033 + - port: 15033 + protocol: TCP + targetPort: 15033 + name: port-15033 + - port: 16033 + protocol: TCP + targetPort: 16033 + name: port-16033 + selector: + app: ssv-node-33 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-33 + name: ssv-node-33 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-33 + template: + metadata: + labels: + app: ssv-node-33 + spec: + containers: + - name: ssv-node-33 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12033 + name: port-12033 + protocol: UDP + hostPort: 12033 + - containerPort: 13033 + name: port-13033 + hostPort: 13033 + - containerPort: 15033 + name: port-15033 + hostPort: 15033 + - containerPort: 16033 + name: port-16033 + hostPort: 16033 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15033" + - name: SSV_API_PORT + value: "16033" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-33 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-33-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-33 + persistentVolumeClaim: + claimName: ssv-node-33 + - name: ssv-node-33-cm + configMap: + name: ssv-node-33-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-34-deployment.yml b/.k8/hetzner-stage/ssv-node-34-deployment.yml new file mode 100644 index 0000000000..363b7b16d3 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-34-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-34-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-34 +spec: + type: ClusterIP + ports: + - port: 12034 + protocol: UDP + targetPort: 12034 + name: port-12034 + - port: 13034 + protocol: TCP + targetPort: 13034 + name: port-13034 + - port: 15034 + protocol: TCP + targetPort: 15034 + name: port-15034 + - port: 16034 + protocol: TCP + targetPort: 16034 + name: port-16034 + selector: + app: ssv-node-34 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-34 + name: ssv-node-34 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-34 + template: + metadata: + labels: + app: ssv-node-34 + spec: + containers: + - name: ssv-node-34 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12034 + name: port-12034 + protocol: UDP + hostPort: 12034 + - containerPort: 13034 + name: port-13034 + hostPort: 13034 + - containerPort: 15034 + name: port-15034 + hostPort: 15034 + - containerPort: 16034 + name: port-16034 + hostPort: 16034 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15034" + - name: SSV_API_PORT + value: "16034" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-34 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-34-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-34 + persistentVolumeClaim: + claimName: ssv-node-34 + - name: ssv-node-34-cm + configMap: + name: ssv-node-34-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-35-deployment.yml b/.k8/hetzner-stage/ssv-node-35-deployment.yml new file mode 100644 index 0000000000..0693b7da9d --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-35-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-35-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-35 +spec: + type: ClusterIP + ports: + - port: 12035 + protocol: UDP + targetPort: 12035 + name: port-12035 + - port: 13035 + protocol: TCP + targetPort: 13035 + name: port-13035 + - port: 15035 + protocol: TCP + targetPort: 15035 + name: port-15035 + - port: 16035 + protocol: TCP + targetPort: 16035 + name: port-16035 + selector: + app: ssv-node-35 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-35 + name: ssv-node-35 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-35 + template: + metadata: + labels: + app: ssv-node-35 + spec: + containers: + - name: ssv-node-35 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12035 + name: port-12035 + protocol: UDP + hostPort: 12035 + - containerPort: 13035 + name: port-13035 + hostPort: 13035 + - containerPort: 15035 + name: port-15035 + hostPort: 15035 + - containerPort: 16035 + name: port-16035 + hostPort: 16035 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15035" + - name: SSV_API_PORT + value: "16035" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-35 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-35-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-35 + persistentVolumeClaim: + claimName: ssv-node-35 + - name: ssv-node-35-cm + configMap: + name: ssv-node-35-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-36-deployment.yml b/.k8/hetzner-stage/ssv-node-36-deployment.yml new file mode 100644 index 0000000000..65a1566a23 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-36-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-36-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-36 +spec: + type: ClusterIP + ports: + - port: 12036 + protocol: UDP + targetPort: 12036 + name: port-12036 + - port: 13036 + protocol: TCP + targetPort: 13036 + name: port-13036 + - port: 15036 + protocol: TCP + targetPort: 15036 + name: port-15036 + - port: 16036 + protocol: TCP + targetPort: 16036 + name: port-16036 + selector: + app: ssv-node-36 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-36 + name: ssv-node-36 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-36 + template: + metadata: + labels: + app: ssv-node-36 + spec: + containers: + - name: ssv-node-36 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12036 + name: port-12036 + protocol: UDP + hostPort: 12036 + - containerPort: 13036 + name: port-13036 + hostPort: 13036 + - containerPort: 15036 + name: port-15036 + hostPort: 15036 + - containerPort: 16036 + name: port-16036 + hostPort: 16036 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15036" + - name: SSV_API_PORT + value: "16036" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-36 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-36-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-36 + persistentVolumeClaim: + claimName: ssv-node-36 + - name: ssv-node-36-cm + configMap: + name: ssv-node-36-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-37-deployment.yml b/.k8/hetzner-stage/ssv-node-37-deployment.yml new file mode 100644 index 0000000000..3c312c1560 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-37-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-37-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-37 +spec: + type: ClusterIP + ports: + - port: 12037 + protocol: UDP + targetPort: 12037 + name: port-12037 + - port: 13037 + protocol: TCP + targetPort: 13037 + name: port-13037 + - port: 15037 + protocol: TCP + targetPort: 15037 + name: port-15037 + - port: 16037 + protocol: TCP + targetPort: 16037 + name: port-16037 + selector: + app: ssv-node-37 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-37 + name: ssv-node-37 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-37 + template: + metadata: + labels: + app: ssv-node-37 + spec: + containers: + - name: ssv-node-37 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12037 + name: port-12037 + protocol: UDP + hostPort: 12037 + - containerPort: 13037 + name: port-13037 + hostPort: 13037 + - containerPort: 15037 + name: port-15037 + hostPort: 15037 + - containerPort: 16037 + name: port-16037 + hostPort: 16037 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15037" + - name: SSV_API_PORT + value: "16037" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-37 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-37-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-37 + persistentVolumeClaim: + claimName: ssv-node-37 + - name: ssv-node-37-cm + configMap: + name: ssv-node-37-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-38-deployment.yml b/.k8/hetzner-stage/ssv-node-38-deployment.yml new file mode 100644 index 0000000000..ba3e0dacb2 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-38-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-38-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-38 +spec: + type: ClusterIP + ports: + - port: 12038 + protocol: UDP + targetPort: 12038 + name: port-12038 + - port: 13038 + protocol: TCP + targetPort: 13038 + name: port-13038 + - port: 15038 + protocol: TCP + targetPort: 15038 + name: port-15038 + - port: 16038 + protocol: TCP + targetPort: 16038 + name: port-16038 + selector: + app: ssv-node-38 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-38 + name: ssv-node-38 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-38 + template: + metadata: + labels: + app: ssv-node-38 + spec: + containers: + - name: ssv-node-38 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12038 + name: port-12038 + protocol: UDP + hostPort: 12038 + - containerPort: 13038 + name: port-13038 + hostPort: 13038 + - containerPort: 15038 + name: port-15038 + hostPort: 15038 + - containerPort: 16038 + name: port-16038 + hostPort: 16038 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15038" + - name: SSV_API_PORT + value: "16038" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-38 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-38-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-38 + persistentVolumeClaim: + claimName: ssv-node-38 + - name: ssv-node-38-cm + configMap: + name: ssv-node-38-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-39-deployment.yml b/.k8/hetzner-stage/ssv-node-39-deployment.yml new file mode 100644 index 0000000000..cef15eed57 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-39-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-39-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-39 +spec: + type: ClusterIP + ports: + - port: 12039 + protocol: UDP + targetPort: 12039 + name: port-12039 + - port: 13039 + protocol: TCP + targetPort: 13039 + name: port-13039 + - port: 15039 + protocol: TCP + targetPort: 15039 + name: port-15039 + - port: 16039 + protocol: TCP + targetPort: 16039 + name: port-16039 + selector: + app: ssv-node-39 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-39 + name: ssv-node-39 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-39 + template: + metadata: + labels: + app: ssv-node-39 + spec: + containers: + - name: ssv-node-39 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12039 + name: port-12039 + protocol: UDP + hostPort: 12039 + - containerPort: 13039 + name: port-13039 + hostPort: 13039 + - containerPort: 15039 + name: port-15039 + hostPort: 15039 + - containerPort: 16039 + name: port-16039 + hostPort: 16039 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15039" + - name: SSV_API_PORT + value: "16039" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-39 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-39-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-39 + persistentVolumeClaim: + claimName: ssv-node-39 + - name: ssv-node-39-cm + configMap: + name: ssv-node-39-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-4-deployment.yml b/.k8/hetzner-stage/ssv-node-4-deployment.yml new file mode 100644 index 0000000000..758473cb70 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-4-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-4-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-4 +spec: + type: ClusterIP + ports: + - port: 12004 + protocol: UDP + targetPort: 12004 + name: port-12004 + - port: 13004 + protocol: TCP + targetPort: 13004 + name: port-13004 + - port: 15004 + protocol: TCP + targetPort: 15004 + name: port-15004 + - port: 16004 + protocol: TCP + targetPort: 16004 + name: port-16004 + selector: + app: ssv-node-4 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-4 + name: ssv-node-4 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-4 + template: + metadata: + labels: + app: ssv-node-4 + spec: + containers: + - name: ssv-node-4 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12004 + name: port-12004 + protocol: UDP + hostPort: 12004 + - containerPort: 13004 + name: port-13004 + hostPort: 13004 + - containerPort: 15004 + name: port-15004 + hostPort: 15004 + - containerPort: 16004 + name: port-16004 + hostPort: 16004 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15004" + - name: SSV_API_PORT + value: "16004" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-4 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-4-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-4 + persistentVolumeClaim: + claimName: ssv-node-4 + - name: ssv-node-4-cm + configMap: + name: ssv-node-4-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-40-deployment.yml b/.k8/hetzner-stage/ssv-node-40-deployment.yml new file mode 100644 index 0000000000..022eded9fd --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-40-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-40-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-40 +spec: + type: ClusterIP + ports: + - port: 12040 + protocol: UDP + targetPort: 12040 + name: port-12040 + - port: 13040 + protocol: TCP + targetPort: 13040 + name: port-13040 + - port: 15040 + protocol: TCP + targetPort: 15040 + name: port-15040 + - port: 16040 + protocol: TCP + targetPort: 16040 + name: port-16040 + selector: + app: ssv-node-40 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-40 + name: ssv-node-40 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-40 + template: + metadata: + labels: + app: ssv-node-40 + spec: + containers: + - name: ssv-node-40 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12040 + name: port-12040 + protocol: UDP + hostPort: 12040 + - containerPort: 13040 + name: port-13040 + hostPort: 13040 + - containerPort: 15040 + name: port-15040 + hostPort: 15040 + - containerPort: 16040 + name: port-16040 + hostPort: 16040 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15040" + - name: SSV_API_PORT + value: "16040" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-40 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-40-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-40 + persistentVolumeClaim: + claimName: ssv-node-40 + - name: ssv-node-40-cm + configMap: + name: ssv-node-40-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-41-deployment.yml b/.k8/hetzner-stage/ssv-node-41-deployment.yml new file mode 100644 index 0000000000..b2fc6fcad1 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-41-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-41-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-41 +spec: + type: ClusterIP + ports: + - port: 12041 + protocol: UDP + targetPort: 12041 + name: port-12041 + - port: 13041 + protocol: TCP + targetPort: 13041 + name: port-13041 + - port: 15041 + protocol: TCP + targetPort: 15041 + name: port-15041 + - port: 16041 + protocol: TCP + targetPort: 16041 + name: port-16041 + selector: + app: ssv-node-41 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-41 + name: ssv-node-41 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-41 + template: + metadata: + labels: + app: ssv-node-41 + spec: + containers: + - name: ssv-node-41 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12041 + name: port-12041 + protocol: UDP + hostPort: 12041 + - containerPort: 13041 + name: port-13041 + hostPort: 13041 + - containerPort: 15041 + name: port-15041 + hostPort: 15041 + - containerPort: 16041 + name: port-16041 + hostPort: 16041 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15041" + - name: SSV_API_PORT + value: "16041" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-41 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-41-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-41 + persistentVolumeClaim: + claimName: ssv-node-41 + - name: ssv-node-41-cm + configMap: + name: ssv-node-41-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-42-deployment.yml b/.k8/hetzner-stage/ssv-node-42-deployment.yml new file mode 100644 index 0000000000..3664aeca45 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-42-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-42-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-42 +spec: + type: ClusterIP + ports: + - port: 12042 + protocol: UDP + targetPort: 12042 + name: port-12042 + - port: 13042 + protocol: TCP + targetPort: 13042 + name: port-13042 + - port: 15042 + protocol: TCP + targetPort: 15042 + name: port-15042 + - port: 16042 + protocol: TCP + targetPort: 16042 + name: port-16042 + selector: + app: ssv-node-42 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-42 + name: ssv-node-42 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-42 + template: + metadata: + labels: + app: ssv-node-42 + spec: + containers: + - name: ssv-node-42 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12042 + name: port-12042 + protocol: UDP + hostPort: 12042 + - containerPort: 13042 + name: port-13042 + hostPort: 13042 + - containerPort: 15042 + name: port-15042 + hostPort: 15042 + - containerPort: 16042 + name: port-16042 + hostPort: 16042 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15042" + - name: SSV_API_PORT + value: "16042" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-42 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-42-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-42 + persistentVolumeClaim: + claimName: ssv-node-42 + - name: ssv-node-42-cm + configMap: + name: ssv-node-42-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-43-deployment.yml b/.k8/hetzner-stage/ssv-node-43-deployment.yml new file mode 100644 index 0000000000..a9cd4f9b95 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-43-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-43-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-43 +spec: + type: ClusterIP + ports: + - port: 12043 + protocol: UDP + targetPort: 12043 + name: port-12043 + - port: 13043 + protocol: TCP + targetPort: 13043 + name: port-13043 + - port: 15043 + protocol: TCP + targetPort: 15043 + name: port-15043 + - port: 16043 + protocol: TCP + targetPort: 16043 + name: port-16043 + selector: + app: ssv-node-43 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-43 + name: ssv-node-43 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-43 + template: + metadata: + labels: + app: ssv-node-43 + spec: + containers: + - name: ssv-node-43 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12043 + name: port-12043 + protocol: UDP + hostPort: 12043 + - containerPort: 13043 + name: port-13043 + hostPort: 13043 + - containerPort: 15043 + name: port-15043 + hostPort: 15043 + - containerPort: 16043 + name: port-16043 + hostPort: 16043 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15043" + - name: SSV_API_PORT + value: "16043" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-43 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-43-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-43 + persistentVolumeClaim: + claimName: ssv-node-43 + - name: ssv-node-43-cm + configMap: + name: ssv-node-43-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-44-deployment.yml b/.k8/hetzner-stage/ssv-node-44-deployment.yml new file mode 100644 index 0000000000..01d0e22a17 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-44-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-44-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-44 +spec: + type: ClusterIP + ports: + - port: 12044 + protocol: UDP + targetPort: 12044 + name: port-12044 + - port: 13044 + protocol: TCP + targetPort: 13044 + name: port-13044 + - port: 15044 + protocol: TCP + targetPort: 15044 + name: port-15044 + - port: 16044 + protocol: TCP + targetPort: 16044 + name: port-16044 + selector: + app: ssv-node-44 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-44 + name: ssv-node-44 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-44 + template: + metadata: + labels: + app: ssv-node-44 + spec: + containers: + - name: ssv-node-44 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12044 + name: port-12044 + protocol: UDP + hostPort: 12044 + - containerPort: 13044 + name: port-13044 + hostPort: 13044 + - containerPort: 15044 + name: port-15044 + hostPort: 15044 + - containerPort: 16044 + name: port-16044 + hostPort: 16044 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15044" + - name: SSV_API_PORT + value: "16044" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-44 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-44-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-44 + persistentVolumeClaim: + claimName: ssv-node-44 + - name: ssv-node-44-cm + configMap: + name: ssv-node-44-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-45-deployment.yml b/.k8/hetzner-stage/ssv-node-45-deployment.yml new file mode 100644 index 0000000000..81c4760282 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-45-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-45-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-45 +spec: + type: ClusterIP + ports: + - port: 12045 + protocol: UDP + targetPort: 12045 + name: port-12045 + - port: 13045 + protocol: TCP + targetPort: 13045 + name: port-13045 + - port: 15045 + protocol: TCP + targetPort: 15045 + name: port-15045 + - port: 16045 + protocol: TCP + targetPort: 16045 + name: port-16045 + selector: + app: ssv-node-45 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-45 + name: ssv-node-45 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-45 + template: + metadata: + labels: + app: ssv-node-45 + spec: + containers: + - name: ssv-node-45 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12045 + name: port-12045 + protocol: UDP + hostPort: 12045 + - containerPort: 13045 + name: port-13045 + hostPort: 13045 + - containerPort: 15045 + name: port-15045 + hostPort: 15045 + - containerPort: 16045 + name: port-16045 + hostPort: 16045 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15045" + - name: SSV_API_PORT + value: "16045" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-45 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-45-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-45 + persistentVolumeClaim: + claimName: ssv-node-45 + - name: ssv-node-45-cm + configMap: + name: ssv-node-45-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-46-deployment.yml b/.k8/hetzner-stage/ssv-node-46-deployment.yml new file mode 100644 index 0000000000..57526b672c --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-46-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-46-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-46 +spec: + type: ClusterIP + ports: + - port: 12046 + protocol: UDP + targetPort: 12046 + name: port-12046 + - port: 13046 + protocol: TCP + targetPort: 13046 + name: port-13046 + - port: 15046 + protocol: TCP + targetPort: 15046 + name: port-15046 + - port: 16046 + protocol: TCP + targetPort: 16046 + name: port-16046 + selector: + app: ssv-node-46 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-46 + name: ssv-node-46 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-46 + template: + metadata: + labels: + app: ssv-node-46 + spec: + containers: + - name: ssv-node-46 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12046 + name: port-12046 + protocol: UDP + hostPort: 12046 + - containerPort: 13046 + name: port-13046 + hostPort: 13046 + - containerPort: 15046 + name: port-15046 + hostPort: 15046 + - containerPort: 16046 + name: port-16046 + hostPort: 16046 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15046" + - name: SSV_API_PORT + value: "16046" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-46 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-46-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-46 + persistentVolumeClaim: + claimName: ssv-node-46 + - name: ssv-node-46-cm + configMap: + name: ssv-node-46-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-47-deployment.yml b/.k8/hetzner-stage/ssv-node-47-deployment.yml new file mode 100644 index 0000000000..8d832b2158 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-47-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-47-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-47 +spec: + type: ClusterIP + ports: + - port: 12047 + protocol: UDP + targetPort: 12047 + name: port-12047 + - port: 13047 + protocol: TCP + targetPort: 13047 + name: port-13047 + - port: 15047 + protocol: TCP + targetPort: 15047 + name: port-15047 + - port: 16047 + protocol: TCP + targetPort: 16047 + name: port-16047 + selector: + app: ssv-node-47 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-47 + name: ssv-node-47 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-47 + template: + metadata: + labels: + app: ssv-node-47 + spec: + containers: + - name: ssv-node-47 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12047 + name: port-12047 + protocol: UDP + hostPort: 12047 + - containerPort: 13047 + name: port-13047 + hostPort: 13047 + - containerPort: 15047 + name: port-15047 + hostPort: 15047 + - containerPort: 16047 + name: port-16047 + hostPort: 16047 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15047" + - name: SSV_API_PORT + value: "16047" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-47 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-47-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-47 + persistentVolumeClaim: + claimName: ssv-node-47 + - name: ssv-node-47-cm + configMap: + name: ssv-node-47-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-48-deployment.yml b/.k8/hetzner-stage/ssv-node-48-deployment.yml new file mode 100644 index 0000000000..3c6fcbc533 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-48-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-48-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-48 +spec: + type: ClusterIP + ports: + - port: 12048 + protocol: UDP + targetPort: 12048 + name: port-12048 + - port: 13048 + protocol: TCP + targetPort: 13048 + name: port-13048 + - port: 15048 + protocol: TCP + targetPort: 15048 + name: port-15048 + - port: 16048 + protocol: TCP + targetPort: 16048 + name: port-16048 + selector: + app: ssv-node-48 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-48 + name: ssv-node-48 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-48 + template: + metadata: + labels: + app: ssv-node-48 + spec: + containers: + - name: ssv-node-48 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12048 + name: port-12048 + protocol: UDP + hostPort: 12048 + - containerPort: 13048 + name: port-13048 + hostPort: 13048 + - containerPort: 15048 + name: port-15048 + hostPort: 15048 + - containerPort: 16048 + name: port-16048 + hostPort: 16048 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15048" + - name: SSV_API_PORT + value: "16048" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-48 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-48-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-48 + persistentVolumeClaim: + claimName: ssv-node-48 + - name: ssv-node-48-cm + configMap: + name: ssv-node-48-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-49-deployment.yml b/.k8/hetzner-stage/ssv-node-49-deployment.yml new file mode 100644 index 0000000000..16c168c0c0 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-49-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-49-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-49 +spec: + type: ClusterIP + ports: + - port: 12049 + protocol: UDP + targetPort: 12049 + name: port-12049 + - port: 13049 + protocol: TCP + targetPort: 13049 + name: port-13049 + - port: 15049 + protocol: TCP + targetPort: 15049 + name: port-15049 + - port: 16049 + protocol: TCP + targetPort: 16049 + name: port-16049 + selector: + app: ssv-node-49 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-49 + name: ssv-node-49 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-49 + template: + metadata: + labels: + app: ssv-node-49 + spec: + containers: + - name: ssv-node-49 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12049 + name: port-12049 + protocol: UDP + hostPort: 12049 + - containerPort: 13049 + name: port-13049 + hostPort: 13049 + - containerPort: 15049 + name: port-15049 + hostPort: 15049 + - containerPort: 16049 + name: port-16049 + hostPort: 16049 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15049" + - name: SSV_API_PORT + value: "16049" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-49 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-49-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-49 + persistentVolumeClaim: + claimName: ssv-node-49 + - name: ssv-node-49-cm + configMap: + name: ssv-node-49-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-5-deployment.yml b/.k8/hetzner-stage/ssv-node-5-deployment.yml new file mode 100644 index 0000000000..0c4f294174 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-5-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-5-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-5 +spec: + type: ClusterIP + ports: + - port: 12005 + protocol: UDP + targetPort: 12005 + name: port-12005 + - port: 13005 + protocol: TCP + targetPort: 13005 + name: port-13005 + - port: 15005 + protocol: TCP + targetPort: 15005 + name: port-15005 + - port: 16005 + protocol: TCP + targetPort: 16005 + name: port-16005 + selector: + app: ssv-node-5 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-5 + name: ssv-node-5 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-5 + template: + metadata: + labels: + app: ssv-node-5 + spec: + containers: + - name: ssv-node-5 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12005 + name: port-12005 + protocol: UDP + hostPort: 12005 + - containerPort: 13005 + name: port-13005 + hostPort: 13005 + - containerPort: 15005 + name: port-15005 + hostPort: 15005 + - containerPort: 16005 + name: port-16005 + hostPort: 16005 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15005" + - name: SSV_API_PORT + value: "16005" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-5 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-5-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-5 + persistentVolumeClaim: + claimName: ssv-node-5 + - name: ssv-node-5-cm + configMap: + name: ssv-node-5-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-50-deployment.yml b/.k8/hetzner-stage/ssv-node-50-deployment.yml new file mode 100644 index 0000000000..237964637e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-50-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-50-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-50 +spec: + type: ClusterIP + ports: + - port: 12050 + protocol: UDP + targetPort: 12050 + name: port-12050 + - port: 13050 + protocol: TCP + targetPort: 13050 + name: port-13050 + - port: 15050 + protocol: TCP + targetPort: 15050 + name: port-15050 + - port: 16050 + protocol: TCP + targetPort: 16050 + name: port-16050 + selector: + app: ssv-node-50 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-50 + name: ssv-node-50 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-50 + template: + metadata: + labels: + app: ssv-node-50 + spec: + containers: + - name: ssv-node-50 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12050 + name: port-12050 + protocol: UDP + hostPort: 12050 + - containerPort: 13050 + name: port-13050 + hostPort: 13050 + - containerPort: 15050 + name: port-15050 + hostPort: 15050 + - containerPort: 16050 + name: port-16050 + hostPort: 16050 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15050" + - name: SSV_API_PORT + value: "16050" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-50 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-50-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-50 + persistentVolumeClaim: + claimName: ssv-node-50 + - name: ssv-node-50-cm + configMap: + name: ssv-node-50-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-51-deployment.yml b/.k8/hetzner-stage/ssv-node-51-deployment.yml new file mode 100644 index 0000000000..028ac33bde --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-51-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-51-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-51 +spec: + type: ClusterIP + ports: + - port: 12051 + protocol: UDP + targetPort: 12051 + name: port-12051 + - port: 13051 + protocol: TCP + targetPort: 13051 + name: port-13051 + - port: 15051 + protocol: TCP + targetPort: 15051 + name: port-15051 + - port: 16051 + protocol: TCP + targetPort: 16051 + name: port-16051 + selector: + app: ssv-node-51 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-51 + name: ssv-node-51 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-51 + template: + metadata: + labels: + app: ssv-node-51 + spec: + containers: + - name: ssv-node-51 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12051 + name: port-12051 + protocol: UDP + hostPort: 12051 + - containerPort: 13051 + name: port-13051 + hostPort: 13051 + - containerPort: 15051 + name: port-15051 + hostPort: 15051 + - containerPort: 16051 + name: port-16051 + hostPort: 16051 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15051" + - name: SSV_API_PORT + value: "16051" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-51 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-51-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-51 + persistentVolumeClaim: + claimName: ssv-node-51 + - name: ssv-node-51-cm + configMap: + name: ssv-node-51-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-52-deployment.yml b/.k8/hetzner-stage/ssv-node-52-deployment.yml new file mode 100644 index 0000000000..9f2eb3d888 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-52-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-52-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-52 +spec: + type: ClusterIP + ports: + - port: 12052 + protocol: UDP + targetPort: 12052 + name: port-12052 + - port: 13052 + protocol: TCP + targetPort: 13052 + name: port-13052 + - port: 15052 + protocol: TCP + targetPort: 15052 + name: port-15052 + - port: 16052 + protocol: TCP + targetPort: 16052 + name: port-16052 + selector: + app: ssv-node-52 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-52 + name: ssv-node-52 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-52 + template: + metadata: + labels: + app: ssv-node-52 + spec: + containers: + - name: ssv-node-52 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12052 + name: port-12052 + protocol: UDP + hostPort: 12052 + - containerPort: 13052 + name: port-13052 + hostPort: 13052 + - containerPort: 15052 + name: port-15052 + hostPort: 15052 + - containerPort: 16052 + name: port-16052 + hostPort: 16052 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15052" + - name: SSV_API_PORT + value: "16052" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-52 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-52-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-52 + persistentVolumeClaim: + claimName: ssv-node-52 + - name: ssv-node-52-cm + configMap: + name: ssv-node-52-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-53-deployment.yml b/.k8/hetzner-stage/ssv-node-53-deployment.yml new file mode 100644 index 0000000000..68515c515b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-53-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-53-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-53 +spec: + type: ClusterIP + ports: + - port: 12053 + protocol: UDP + targetPort: 12053 + name: port-12053 + - port: 13053 + protocol: TCP + targetPort: 13053 + name: port-13053 + - port: 15053 + protocol: TCP + targetPort: 15053 + name: port-15053 + - port: 16053 + protocol: TCP + targetPort: 16053 + name: port-16053 + selector: + app: ssv-node-53 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-53 + name: ssv-node-53 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-53 + template: + metadata: + labels: + app: ssv-node-53 + spec: + containers: + - name: ssv-node-53 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12053 + name: port-12053 + protocol: UDP + hostPort: 12053 + - containerPort: 13053 + name: port-13053 + hostPort: 13053 + - containerPort: 15053 + name: port-15053 + hostPort: 15053 + - containerPort: 16053 + name: port-16053 + hostPort: 16053 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15053" + - name: SSV_API_PORT + value: "16053" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-53 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-53-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-53 + persistentVolumeClaim: + claimName: ssv-node-53 + - name: ssv-node-53-cm + configMap: + name: ssv-node-53-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-54-deployment.yml b/.k8/hetzner-stage/ssv-node-54-deployment.yml new file mode 100644 index 0000000000..9eb12dd56b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-54-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-54-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-54 +spec: + type: ClusterIP + ports: + - port: 12054 + protocol: UDP + targetPort: 12054 + name: port-12054 + - port: 13054 + protocol: TCP + targetPort: 13054 + name: port-13054 + - port: 15054 + protocol: TCP + targetPort: 15054 + name: port-15054 + - port: 16054 + protocol: TCP + targetPort: 16054 + name: port-16054 + selector: + app: ssv-node-54 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-54 + name: ssv-node-54 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-54 + template: + metadata: + labels: + app: ssv-node-54 + spec: + containers: + - name: ssv-node-54 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12054 + name: port-12054 + protocol: UDP + hostPort: 12054 + - containerPort: 13054 + name: port-13054 + hostPort: 13054 + - containerPort: 15054 + name: port-15054 + hostPort: 15054 + - containerPort: 16054 + name: port-16054 + hostPort: 16054 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15054" + - name: SSV_API_PORT + value: "16054" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-54 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-54-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-54 + persistentVolumeClaim: + claimName: ssv-node-54 + - name: ssv-node-54-cm + configMap: + name: ssv-node-54-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-55-deployment.yml b/.k8/hetzner-stage/ssv-node-55-deployment.yml new file mode 100644 index 0000000000..05a109197b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-55-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-55-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-55 +spec: + type: ClusterIP + ports: + - port: 12055 + protocol: UDP + targetPort: 12055 + name: port-12055 + - port: 13055 + protocol: TCP + targetPort: 13055 + name: port-13055 + - port: 15055 + protocol: TCP + targetPort: 15055 + name: port-15055 + - port: 16055 + protocol: TCP + targetPort: 16055 + name: port-16055 + selector: + app: ssv-node-55 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-55 + name: ssv-node-55 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-55 + template: + metadata: + labels: + app: ssv-node-55 + spec: + containers: + - name: ssv-node-55 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12055 + name: port-12055 + protocol: UDP + hostPort: 12055 + - containerPort: 13055 + name: port-13055 + hostPort: 13055 + - containerPort: 15055 + name: port-15055 + hostPort: 15055 + - containerPort: 16055 + name: port-16055 + hostPort: 16055 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15055" + - name: SSV_API_PORT + value: "16055" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-55 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-55-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-55 + persistentVolumeClaim: + claimName: ssv-node-55 + - name: ssv-node-55-cm + configMap: + name: ssv-node-55-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-56-deployment.yml b/.k8/hetzner-stage/ssv-node-56-deployment.yml new file mode 100644 index 0000000000..42c0c59b42 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-56-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-56-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-56 +spec: + type: ClusterIP + ports: + - port: 12056 + protocol: UDP + targetPort: 12056 + name: port-12056 + - port: 13056 + protocol: TCP + targetPort: 13056 + name: port-13056 + - port: 15056 + protocol: TCP + targetPort: 15056 + name: port-15056 + - port: 16056 + protocol: TCP + targetPort: 16056 + name: port-16056 + selector: + app: ssv-node-56 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-56 + name: ssv-node-56 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-56 + template: + metadata: + labels: + app: ssv-node-56 + spec: + containers: + - name: ssv-node-56 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12056 + name: port-12056 + protocol: UDP + hostPort: 12056 + - containerPort: 13056 + name: port-13056 + hostPort: 13056 + - containerPort: 15056 + name: port-15056 + hostPort: 15056 + - containerPort: 16056 + name: port-16056 + hostPort: 16056 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15056" + - name: SSV_API_PORT + value: "16056" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-56 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-56-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-56 + persistentVolumeClaim: + claimName: ssv-node-56 + - name: ssv-node-56-cm + configMap: + name: ssv-node-56-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-57-deployment.yml b/.k8/hetzner-stage/ssv-node-57-deployment.yml new file mode 100644 index 0000000000..d2d8945516 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-57-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-57-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-57 +spec: + type: ClusterIP + ports: + - port: 12057 + protocol: UDP + targetPort: 12057 + name: port-12057 + - port: 13057 + protocol: TCP + targetPort: 13057 + name: port-13057 + - port: 15057 + protocol: TCP + targetPort: 15057 + name: port-15057 + - port: 16057 + protocol: TCP + targetPort: 16057 + name: port-16057 + selector: + app: ssv-node-57 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-57 + name: ssv-node-57 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-57 + template: + metadata: + labels: + app: ssv-node-57 + spec: + containers: + - name: ssv-node-57 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12057 + name: port-12057 + protocol: UDP + hostPort: 12057 + - containerPort: 13057 + name: port-13057 + hostPort: 13057 + - containerPort: 15057 + name: port-15057 + hostPort: 15057 + - containerPort: 16057 + name: port-16057 + hostPort: 16057 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15057" + - name: SSV_API_PORT + value: "16057" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-57 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-57-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-57 + persistentVolumeClaim: + claimName: ssv-node-57 + - name: ssv-node-57-cm + configMap: + name: ssv-node-57-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-58-deployment.yml b/.k8/hetzner-stage/ssv-node-58-deployment.yml new file mode 100644 index 0000000000..21401421dd --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-58-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-58-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-58 +spec: + type: ClusterIP + ports: + - port: 12058 + protocol: UDP + targetPort: 12058 + name: port-12058 + - port: 13058 + protocol: TCP + targetPort: 13058 + name: port-13058 + - port: 15858 + protocol: TCP + targetPort: 15858 + name: port-15858 + - port: 16058 + protocol: TCP + targetPort: 16058 + name: port-16058 + selector: + app: ssv-node-58 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-58 + name: ssv-node-58 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-58 + template: + metadata: + labels: + app: ssv-node-58 + spec: + containers: + - name: ssv-node-58 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12058 + name: port-12058 + protocol: UDP + hostPort: 12058 + - containerPort: 13058 + name: port-13058 + hostPort: 13058 + - containerPort: 15858 + name: port-15858 + hostPort: 15858 + - containerPort: 16058 + name: port-16058 + hostPort: 16058 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15858" + - name: SSV_API_PORT + value: "16058" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-58 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-58-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-58 + persistentVolumeClaim: + claimName: ssv-node-58 + - name: ssv-node-58-cm + configMap: + name: ssv-node-58-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-59-deployment.yml b/.k8/hetzner-stage/ssv-node-59-deployment.yml new file mode 100644 index 0000000000..8cefa6ba7b --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-59-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-59-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-59 +spec: + type: ClusterIP + ports: + - port: 12059 + protocol: UDP + targetPort: 12059 + name: port-12059 + - port: 13059 + protocol: TCP + targetPort: 13059 + name: port-13059 + - port: 15059 + protocol: TCP + targetPort: 15059 + name: port-15059 + - port: 16059 + protocol: TCP + targetPort: 16059 + name: port-16059 + selector: + app: ssv-node-59 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-59 + name: ssv-node-59 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-59 + template: + metadata: + labels: + app: ssv-node-59 + spec: + containers: + - name: ssv-node-59 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12059 + name: port-12059 + protocol: UDP + hostPort: 12059 + - containerPort: 13059 + name: port-13059 + hostPort: 13059 + - containerPort: 15059 + name: port-15059 + hostPort: 15059 + - containerPort: 16059 + name: port-16059 + hostPort: 16059 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15059" + - name: SSV_API_PORT + value: "16059" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-59 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-59-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-59 + persistentVolumeClaim: + claimName: ssv-node-59 + - name: ssv-node-59-cm + configMap: + name: ssv-node-59-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-6-deployment.yml b/.k8/hetzner-stage/ssv-node-6-deployment.yml new file mode 100644 index 0000000000..6eff03c297 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-6-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-6-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-6 +spec: + type: ClusterIP + ports: + - port: 12006 + protocol: UDP + targetPort: 12006 + name: port-12006 + - port: 13006 + protocol: TCP + targetPort: 13006 + name: port-13006 + - port: 15006 + protocol: TCP + targetPort: 15006 + name: port-15006 + - port: 16006 + protocol: TCP + targetPort: 16006 + name: port-16006 + selector: + app: ssv-node-6 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-6 + name: ssv-node-6 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-6 + template: + metadata: + labels: + app: ssv-node-6 + spec: + containers: + - name: ssv-node-6 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12006 + name: port-12006 + protocol: UDP + hostPort: 12006 + - containerPort: 13006 + name: port-13006 + hostPort: 13006 + - containerPort: 15006 + name: port-15006 + hostPort: 15006 + - containerPort: 16006 + name: port-16006 + hostPort: 16006 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15006" + - name: SSV_API_PORT + value: "16006" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-6 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-6-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-6 + persistentVolumeClaim: + claimName: ssv-node-6 + - name: ssv-node-6-cm + configMap: + name: ssv-node-6-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-60-deployment.yml b/.k8/hetzner-stage/ssv-node-60-deployment.yml new file mode 100644 index 0000000000..ca0b3dc8cd --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-60-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-60-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-60 +spec: + type: ClusterIP + ports: + - port: 12060 + protocol: UDP + targetPort: 12060 + name: port-12060 + - port: 13060 + protocol: TCP + targetPort: 13060 + name: port-13060 + - port: 15060 + protocol: TCP + targetPort: 15060 + name: port-15060 + - port: 16060 + protocol: TCP + targetPort: 16060 + name: port-16060 + selector: + app: ssv-node-60 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-60 + name: ssv-node-60 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-60 + template: + metadata: + labels: + app: ssv-node-60 + spec: + containers: + - name: ssv-node-60 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12060 + name: port-12060 + protocol: UDP + hostPort: 12060 + - containerPort: 13060 + name: port-13060 + hostPort: 13060 + - containerPort: 15060 + name: port-15060 + hostPort: 15060 + - containerPort: 16060 + name: port-16060 + hostPort: 16060 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15060" + - name: SSV_API_PORT + value: "16060" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-60 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-60-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-60 + persistentVolumeClaim: + claimName: ssv-node-60 + - name: ssv-node-60-cm + configMap: + name: ssv-node-60-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-61-deployment.yml b/.k8/hetzner-stage/ssv-node-61-deployment.yml new file mode 100644 index 0000000000..339c551727 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-61-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-61-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-61 +spec: + type: ClusterIP + ports: + - port: 12061 + protocol: UDP + targetPort: 12061 + name: port-12061 + - port: 13061 + protocol: TCP + targetPort: 13061 + name: port-13061 + - port: 15061 + protocol: TCP + targetPort: 15061 + name: port-15061 + - port: 16061 + protocol: TCP + targetPort: 16061 + name: port-16061 + selector: + app: ssv-node-61 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-61 + name: ssv-node-61 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-61 + template: + metadata: + labels: + app: ssv-node-61 + spec: + containers: + - name: ssv-node-61 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12061 + name: port-12061 + protocol: UDP + hostPort: 12061 + - containerPort: 13061 + name: port-13061 + hostPort: 13061 + - containerPort: 15061 + name: port-15061 + hostPort: 15061 + - containerPort: 16061 + name: port-16061 + hostPort: 16061 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15061" + - name: SSV_API_PORT + value: "16061" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-61 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-61-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-61 + persistentVolumeClaim: + claimName: ssv-node-61 + - name: ssv-node-61-cm + configMap: + name: ssv-node-61-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-62-deployment.yml b/.k8/hetzner-stage/ssv-node-62-deployment.yml new file mode 100644 index 0000000000..531005618a --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-62-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-62-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-62 +spec: + type: ClusterIP + ports: + - port: 12062 + protocol: UDP + targetPort: 12062 + name: port-12062 + - port: 13062 + protocol: TCP + targetPort: 13062 + name: port-13062 + - port: 15062 + protocol: TCP + targetPort: 15062 + name: port-15062 + - port: 16062 + protocol: TCP + targetPort: 16062 + name: port-16062 + selector: + app: ssv-node-62 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-62 + name: ssv-node-62 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-62 + template: + metadata: + labels: + app: ssv-node-62 + spec: + containers: + - name: ssv-node-62 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12062 + name: port-12062 + protocol: UDP + hostPort: 12062 + - containerPort: 13062 + name: port-13062 + hostPort: 13062 + - containerPort: 15062 + name: port-15062 + hostPort: 15062 + - containerPort: 16062 + name: port-16062 + hostPort: 16062 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15062" + - name: SSV_API_PORT + value: "16062" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-62 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-62-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-62 + persistentVolumeClaim: + claimName: ssv-node-62 + - name: ssv-node-62-cm + configMap: + name: ssv-node-62-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-63-deployment.yml b/.k8/hetzner-stage/ssv-node-63-deployment.yml new file mode 100644 index 0000000000..39e261a3bf --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-63-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-63-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-63 +spec: + type: ClusterIP + ports: + - port: 12063 + protocol: UDP + targetPort: 12063 + name: port-12063 + - port: 13063 + protocol: TCP + targetPort: 13063 + name: port-13063 + - port: 15063 + protocol: TCP + targetPort: 15063 + name: port-15063 + - port: 16063 + protocol: TCP + targetPort: 16063 + name: port-16063 + selector: + app: ssv-node-63 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-63 + name: ssv-node-63 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-63 + template: + metadata: + labels: + app: ssv-node-63 + spec: + containers: + - name: ssv-node-63 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12063 + name: port-12063 + protocol: UDP + hostPort: 12063 + - containerPort: 13063 + name: port-13063 + hostPort: 13063 + - containerPort: 15063 + name: port-15063 + hostPort: 15063 + - containerPort: 16063 + name: port-16063 + hostPort: 16063 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15063" + - name: SSV_API_PORT + value: "16063" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-63 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-63-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-63 + persistentVolumeClaim: + claimName: ssv-node-63 + - name: ssv-node-63-cm + configMap: + name: ssv-node-63-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-64-deployment.yml b/.k8/hetzner-stage/ssv-node-64-deployment.yml new file mode 100644 index 0000000000..709fc026fa --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-64-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-64-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-64 +spec: + type: ClusterIP + ports: + - port: 12064 + protocol: UDP + targetPort: 12064 + name: port-12064 + - port: 13064 + protocol: TCP + targetPort: 13064 + name: port-13064 + - port: 15064 + protocol: TCP + targetPort: 15064 + name: port-15064 + - port: 16064 + protocol: TCP + targetPort: 16064 + name: port-16064 + selector: + app: ssv-node-64 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-64 + name: ssv-node-64 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-64 + template: + metadata: + labels: + app: ssv-node-64 + spec: + containers: + - name: ssv-node-64 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12064 + name: port-12064 + protocol: UDP + hostPort: 12064 + - containerPort: 13064 + name: port-13064 + hostPort: 13064 + - containerPort: 15064 + name: port-15064 + hostPort: 15064 + - containerPort: 16064 + name: port-16064 + hostPort: 16064 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15064" + - name: SSV_API_PORT + value: "16064" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-64 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-64-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-64 + persistentVolumeClaim: + claimName: ssv-node-64 + - name: ssv-node-64-cm + configMap: + name: ssv-node-64-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-65-deployment.yml b/.k8/hetzner-stage/ssv-node-65-deployment.yml new file mode 100644 index 0000000000..7872f5efef --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-65-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-65-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-65 +spec: + type: ClusterIP + ports: + - port: 12065 + protocol: UDP + targetPort: 12065 + name: port-12065 + - port: 13065 + protocol: TCP + targetPort: 13065 + name: port-13065 + - port: 15065 + protocol: TCP + targetPort: 15065 + name: port-15065 + - port: 16065 + protocol: TCP + targetPort: 16065 + name: port-16065 + selector: + app: ssv-node-65 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-65 + name: ssv-node-65 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-65 + template: + metadata: + labels: + app: ssv-node-65 + spec: + containers: + - name: ssv-node-65 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12065 + name: port-12065 + protocol: UDP + hostPort: 12065 + - containerPort: 13065 + name: port-13065 + hostPort: 13065 + - containerPort: 15065 + name: port-15065 + hostPort: 15065 + - containerPort: 16065 + name: port-16065 + hostPort: 16065 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15065" + - name: SSV_API_PORT + value: "16065" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-65 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-65-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-65 + persistentVolumeClaim: + claimName: ssv-node-65 + - name: ssv-node-65-cm + configMap: + name: ssv-node-65-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-66-deployment.yml b/.k8/hetzner-stage/ssv-node-66-deployment.yml new file mode 100644 index 0000000000..8cf3d90cfe --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-66-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-66-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-66 +spec: + type: ClusterIP + ports: + - port: 12066 + protocol: UDP + targetPort: 12066 + name: port-12066 + - port: 13066 + protocol: TCP + targetPort: 13066 + name: port-13066 + - port: 15066 + protocol: TCP + targetPort: 15066 + name: port-15066 + - port: 16066 + protocol: TCP + targetPort: 16066 + name: port-16066 + selector: + app: ssv-node-66 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-66 + name: ssv-node-66 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-66 + template: + metadata: + labels: + app: ssv-node-66 + spec: + containers: + - name: ssv-node-66 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12066 + name: port-12066 + protocol: UDP + hostPort: 12066 + - containerPort: 13066 + name: port-13066 + hostPort: 13066 + - containerPort: 15066 + name: port-15066 + hostPort: 15066 + - containerPort: 16066 + name: port-16066 + hostPort: 16066 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15066" + - name: SSV_API_PORT + value: "16066" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-66 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-66-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-66 + persistentVolumeClaim: + claimName: ssv-node-66 + - name: ssv-node-66-cm + configMap: + name: ssv-node-66-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-67-deployment.yml b/.k8/hetzner-stage/ssv-node-67-deployment.yml new file mode 100644 index 0000000000..b9620a8b44 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-67-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-67-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-67 +spec: + type: ClusterIP + ports: + - port: 12067 + protocol: UDP + targetPort: 12067 + name: port-12067 + - port: 13067 + protocol: TCP + targetPort: 13067 + name: port-13067 + - port: 15067 + protocol: TCP + targetPort: 15067 + name: port-15067 + - port: 16067 + protocol: TCP + targetPort: 16067 + name: port-16067 + selector: + app: ssv-node-67 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-67 + name: ssv-node-67 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-67 + template: + metadata: + labels: + app: ssv-node-67 + spec: + containers: + - name: ssv-node-67 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12067 + name: port-12067 + protocol: UDP + hostPort: 12067 + - containerPort: 13067 + name: port-13067 + hostPort: 13067 + - containerPort: 15067 + name: port-15067 + hostPort: 15067 + - containerPort: 16067 + name: port-16067 + hostPort: 16067 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15067" + - name: SSV_API_PORT + value: "16067" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-67 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-67-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-67 + persistentVolumeClaim: + claimName: ssv-node-67 + - name: ssv-node-67-cm + configMap: + name: ssv-node-67-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-68-deployment.yml b/.k8/hetzner-stage/ssv-node-68-deployment.yml new file mode 100644 index 0000000000..b7252d580e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-68-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-68-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-68 +spec: + type: ClusterIP + ports: + - port: 12068 + protocol: UDP + targetPort: 12068 + name: port-12068 + - port: 13068 + protocol: TCP + targetPort: 13068 + name: port-13068 + - port: 15068 + protocol: TCP + targetPort: 15068 + name: port-15068 + - port: 16068 + protocol: TCP + targetPort: 16068 + name: port-16068 + selector: + app: ssv-node-68 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-68 + name: ssv-node-68 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-68 + template: + metadata: + labels: + app: ssv-node-68 + spec: + containers: + - name: ssv-node-68 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12068 + name: port-12068 + protocol: UDP + hostPort: 12068 + - containerPort: 13068 + name: port-13068 + hostPort: 13068 + - containerPort: 15068 + name: port-15068 + hostPort: 15068 + - containerPort: 16068 + name: port-16068 + hostPort: 16068 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15068" + - name: SSV_API_PORT + value: "16068" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-68 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-68-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-68 + persistentVolumeClaim: + claimName: ssv-node-68 + - name: ssv-node-68-cm + configMap: + name: ssv-node-68-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-69-deployment.yml b/.k8/hetzner-stage/ssv-node-69-deployment.yml new file mode 100644 index 0000000000..6372ddf492 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-69-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-69-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-69 +spec: + type: ClusterIP + ports: + - port: 12069 + protocol: UDP + targetPort: 12069 + name: port-12069 + - port: 13069 + protocol: TCP + targetPort: 13069 + name: port-13069 + - port: 15069 + protocol: TCP + targetPort: 15069 + name: port-15069 + - port: 16069 + protocol: TCP + targetPort: 16069 + name: port-16069 + selector: + app: ssv-node-69 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-69 + name: ssv-node-69 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-69 + template: + metadata: + labels: + app: ssv-node-69 + spec: + containers: + - name: ssv-node-69 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12069 + name: port-12069 + protocol: UDP + hostPort: 12069 + - containerPort: 13069 + name: port-13069 + hostPort: 13069 + - containerPort: 15069 + name: port-15069 + hostPort: 15069 + - containerPort: 16069 + name: port-16069 + hostPort: 16069 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15069" + - name: SSV_API_PORT + value: "16069" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-69 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-69-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-69 + persistentVolumeClaim: + claimName: ssv-node-69 + - name: ssv-node-69-cm + configMap: + name: ssv-node-69-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-7-deployment.yml b/.k8/hetzner-stage/ssv-node-7-deployment.yml new file mode 100644 index 0000000000..49101753c1 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-7-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-7-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-7 +spec: + type: ClusterIP + ports: + - port: 12007 + protocol: UDP + targetPort: 12007 + name: port-12007 + - port: 13007 + protocol: TCP + targetPort: 13007 + name: port-13007 + - port: 15007 + protocol: TCP + targetPort: 15007 + name: port-15007 + - port: 16007 + protocol: TCP + targetPort: 16007 + name: port-16007 + selector: + app: ssv-node-7 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-7 + name: ssv-node-7 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-7 + template: + metadata: + labels: + app: ssv-node-7 + spec: + containers: + - name: ssv-node-7 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12007 + name: port-12007 + protocol: UDP + hostPort: 12007 + - containerPort: 13007 + name: port-13007 + hostPort: 13007 + - containerPort: 15007 + name: port-15007 + hostPort: 15007 + - containerPort: 16007 + name: port-16007 + hostPort: 16007 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15007" + - name: SSV_API_PORT + value: "16007" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-7 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-7-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-7 + persistentVolumeClaim: + claimName: ssv-node-7 + - name: ssv-node-7-cm + configMap: + name: ssv-node-7-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-70-deployment.yml b/.k8/hetzner-stage/ssv-node-70-deployment.yml new file mode 100644 index 0000000000..d9cb6b3604 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-70-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-70-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-70 +spec: + type: ClusterIP + ports: + - port: 12070 + protocol: UDP + targetPort: 12070 + name: port-12070 + - port: 13070 + protocol: TCP + targetPort: 13070 + name: port-13070 + - port: 15070 + protocol: TCP + targetPort: 15070 + name: port-15070 + - port: 16070 + protocol: TCP + targetPort: 16070 + name: port-16070 + selector: + app: ssv-node-70 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-70 + name: ssv-node-70 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-70 + template: + metadata: + labels: + app: ssv-node-70 + spec: + containers: + - name: ssv-node-70 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12070 + name: port-12070 + protocol: UDP + hostPort: 12070 + - containerPort: 13070 + name: port-13070 + hostPort: 13070 + - containerPort: 15070 + name: port-15070 + hostPort: 15070 + - containerPort: 16070 + name: port-16070 + hostPort: 16070 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15070" + - name: SSV_API_PORT + value: "16070" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-70 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-70-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-70 + persistentVolumeClaim: + claimName: ssv-node-70 + - name: ssv-node-70-cm + configMap: + name: ssv-node-70-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-71-deployment.yml b/.k8/hetzner-stage/ssv-node-71-deployment.yml new file mode 100644 index 0000000000..cde1e7cd7e --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-71-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-71-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-71 +spec: + type: ClusterIP + ports: + - port: 12071 + protocol: UDP + targetPort: 12071 + name: port-12071 + - port: 13071 + protocol: TCP + targetPort: 13071 + name: port-13071 + - port: 15071 + protocol: TCP + targetPort: 15071 + name: port-15071 + - port: 16071 + protocol: TCP + targetPort: 16071 + name: port-16071 + selector: + app: ssv-node-71 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-71 + name: ssv-node-71 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-71 + template: + metadata: + labels: + app: ssv-node-71 + spec: + containers: + - name: ssv-node-71 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12071 + name: port-12071 + protocol: UDP + hostPort: 12071 + - containerPort: 13071 + name: port-13071 + hostPort: 13071 + - containerPort: 15071 + name: port-15071 + hostPort: 15071 + - containerPort: 16071 + name: port-16071 + hostPort: 16071 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15071" + - name: SSV_API_PORT + value: "16071" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-71 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-71-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-71 + persistentVolumeClaim: + claimName: ssv-node-71 + - name: ssv-node-71-cm + configMap: + name: ssv-node-71-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-72-deployment.yml b/.k8/hetzner-stage/ssv-node-72-deployment.yml new file mode 100644 index 0000000000..11b639df29 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-72-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-72-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-72 +spec: + type: ClusterIP + ports: + - port: 12072 + protocol: UDP + targetPort: 12072 + name: port-12072 + - port: 13072 + protocol: TCP + targetPort: 13072 + name: port-13072 + - port: 15072 + protocol: TCP + targetPort: 15072 + name: port-15072 + - port: 16072 + protocol: TCP + targetPort: 16072 + name: port-16072 + selector: + app: ssv-node-72 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-72 + name: ssv-node-72 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-72 + template: + metadata: + labels: + app: ssv-node-72 + spec: + containers: + - name: ssv-node-72 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12072 + name: port-12072 + protocol: UDP + hostPort: 12072 + - containerPort: 13072 + name: port-13072 + hostPort: 13072 + - containerPort: 15072 + name: port-15072 + hostPort: 15072 + - containerPort: 16072 + name: port-16072 + hostPort: 16072 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15072" + - name: SSV_API_PORT + value: "16072" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-72 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-72-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-72 + persistentVolumeClaim: + claimName: ssv-node-72 + - name: ssv-node-72-cm + configMap: + name: ssv-node-72-cm + hostNetwork: true diff --git a/.k8/hetzner-stage/ssv-node-8-deployment.yml b/.k8/hetzner-stage/ssv-node-8-deployment.yml new file mode 100644 index 0000000000..a08bd81e24 --- /dev/null +++ b/.k8/hetzner-stage/ssv-node-8-deployment.yml @@ -0,0 +1,133 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ssv-node-8-svc + namespace: REPLACE_NAMESPACE + labels: + app: ssv-node-8 +spec: + type: ClusterIP + ports: + - port: 12008 + protocol: UDP + targetPort: 12008 + name: port-12008 + - port: 13008 + protocol: TCP + targetPort: 13008 + name: port-13008 + - port: 15008 + protocol: TCP + targetPort: 15008 + name: port-15008 + - port: 16008 + protocol: TCP + targetPort: 16008 + name: port-16008 + selector: + app: ssv-node-8 +--- +apiVersion: REPLACE_API_VERSION +kind: Deployment +metadata: + labels: + app: ssv-node-8 + name: ssv-node-8 + namespace: REPLACE_NAMESPACE +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + app: ssv-node-8 + template: + metadata: + labels: + app: ssv-node-8 + spec: + containers: + - name: ssv-node-8 + image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage + imagePullPolicy: Always + resources: + limits: + cpu: REPLACE_NODES_CPU_LIMIT + memory: REPLACE_NODES_MEM_LIMIT + command: ["make", "start-node"] + ports: + - containerPort: 12008 + name: port-12008 + protocol: UDP + hostPort: 12008 + - containerPort: 13008 + name: port-13008 + hostPort: 13008 + - containerPort: 15008 + name: port-15008 + hostPort: 15008 + - containerPort: 16008 + name: port-16008 + hostPort: 16008 + env: + - name: SHARE_CONFIG + value: "./data/share.yaml" + - name: CONFIG_PATH + valueFrom: + secretKeyRef: + name: config-secrets + key: config_path + - name: ABI_VERSION + valueFrom: + secretKeyRef: + name: config-secrets + key: abi_version + optional: true + - name: LOG_LEVEL + value: "debug" + - name: DEBUG_SERVICES + value: "ssv/*." + - name: DISCOVERY_TYPE_KEY + value: "discv5" + - name: CONSENSUS_TYPE + value: "validation" + - name: HOST_DNS + value: "" + - name: HOST_ADDRESS + value: "" + - name: DB_PATH + value: "./data/db-holesky-stage" + - name: NETWORK + value: "holesky-stage" + - name: DB_REPORTING + value: "false" + - name: METRICS_API_PORT + value: "15008" + - name: SSV_API_PORT + value: "16008" + - name: ENABLE_PROFILE + value: "true" + - name: DISCOVERY_TRACE + value: 'false' + - name: PUBSUB_TRACE + value: 'false' + - name: BUILDER_PROPOSALS + value: "true" + volumeMounts: + - mountPath: /data + name: ssv-node-8 + - mountPath: /data/share.yaml + subPath: share.yaml + name: ssv-node-8-cm + imagePullSecrets: + - name: ecr-repo + volumes: + - name: ssv-node-8 + persistentVolumeClaim: + claimName: ssv-node-8 + - name: ssv-node-8-cm + configMap: + name: ssv-node-8-cm + hostNetwork: true diff --git a/.k8/stage/ssv-node-9-deployment.yml b/.k8/hetzner-stage/ssv-node-9-deployment.yml similarity index 70% rename from .k8/stage/ssv-node-9-deployment.yml rename to .k8/hetzner-stage/ssv-node-9-deployment.yml index 37098e1a0a..1dc1e6c2a3 100644 --- a/.k8/stage/ssv-node-9-deployment.yml +++ b/.k8/hetzner-stage/ssv-node-9-deployment.yml @@ -36,7 +36,7 @@ metadata: name: ssv-node-9 namespace: REPLACE_NAMESPACE spec: - replicas: REPLACE_REPLICAS + replicas: 1 strategy: type: Recreate selector: @@ -47,18 +47,10 @@ spec: labels: app: ssv-node-9 spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/role - operator: In - values: - - ssv-main containers: - name: ssv-node-9 image: REPLACE_DOCKER_REPO:REPLACE_IMAGETAG + #image: mosheblox/ssv-preview:stage imagePullPolicy: Always resources: limits: @@ -106,9 +98,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -128,34 +120,14 @@ spec: name: ssv-node-9 - mountPath: /data/share.yaml subPath: share.yaml - name: ssv-cm-validator-options-9 -# - name: ubuntu -# image: REPLACE_HEALTH_IMAGE -# imagePullPolicy: Always -# args: [bash, -c, sleep infinity] -# volumeMounts: -# - name: ssv-nodes-health-check-cm -# mountPath: /root/http-status.sh -# subPath: http-status.sh -# livenessProbe: -# exec: -# command: -# - /bin/bash -# - /root/http-status.sh -# initialDelaySeconds: 120 -# periodSeconds: 60 + name: ssv-node-9-cm + imagePullSecrets: + - name: ecr-repo volumes: - name: ssv-node-9 persistentVolumeClaim: claimName: ssv-node-9 - - name: ssv-cm-validator-options-9 + - name: ssv-node-9-cm configMap: - name: ssv-cm-validator-options-9 -# - name: ssv-nodes-health-check-cm -# configMap: -# name: ssv-nodes-health-check-cm - tolerations: - - effect: NoSchedule - key: kubernetes.io/role - operator: Exists + name: ssv-node-9-cm hostNetwork: true diff --git a/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml index 7f2616196a..49f3ad5f29 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-1-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16017" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-1 diff --git a/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml index 2484e7c214..8cfa6f3d6b 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-2-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16018" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-2 diff --git a/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml index 2b0b836915..1b197f8f51 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-3-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16019" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-3 diff --git a/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml b/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml index 048e021889..5e83a864eb 100644 --- a/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml +++ b/.k8/production/mainnet/ssv-node-mainnet-4-deployment.yml @@ -110,6 +110,8 @@ spec: value: "16020" - name: ENABLE_PROFILE value: "true" + - name: BUILDER_PROPOSALS + value: "true" volumeMounts: - mountPath: /data name: ssv-node-mainnet-4 diff --git a/.k8/stage/scripts/deploy-holesky-exporters.sh b/.k8/stage/scripts/deploy-holesky-exporters.sh new file mode 100755 index 0000000000..9a899ef3d3 --- /dev/null +++ b/.k8/stage/scripts/deploy-holesky-exporters.sh @@ -0,0 +1,104 @@ +#!/bin/bash + +set -x + +if [[ -z $1 ]]; then + echo "Please provide DOCKERREPO" + exit 1 +fi + +if [[ -z $2 ]]; then + echo "Please provide IMAGETAG" + exit 1 +fi + +if [[ -z $3 ]]; then + echo "Please provide NAMESPACE" + exit 1 +fi + +if [[ -z $4 ]]; then + echo "Please provide number of replicas" + exit 1 +fi + +if [[ -z $5 ]]; then + echo "Please provide deployment type: blox-infra-stage|blox-infra-prod" + exit 1 +fi + +if [[ -z $6 ]]; then + echo "Please provide k8s context" + exit 1 +fi + +if [[ -z $7 ]]; then + echo "Please provide domain suffix" + exit 1 +fi + +if [[ -z ${8} ]]; then + echo "Please provide k8s app version" + exit 1 +fi + +if [[ -z ${9} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +if [[ -z ${10} ]]; then + echo "Please provide exporter cpu limit" + exit 1 +fi + +DOCKERREPO=$1 +IMAGETAG=$2 +NAMESPACE=$3 +REPLICAS=$4 +DEPL_TYPE=$5 +K8S_CONTEXT=$6 +DOMAIN_SUFFIX=$7 +K8S_API_VERSION=$8 +EXPORTER_CPU_LIMIT=$9 +EXPORTER_MEM_LIMIT=${10} + +echo $DOCKERREPO +echo $IMAGETAG +echo $NAMESPACE +echo $REPLICAS +echo $DEPL_TYPE +echo $K8S_CONTEXT +echo $DOMAIN_SUFFIX +echo $K8S_API_VERSION +echo $EXPORTER_CPU_LIMIT +echo $EXPORTER_MEM_LIMIT + +# create namespace if not exists +if ! kubectl --context=$K8S_CONTEXT get ns | grep -q $NAMESPACE; then + echo "$NAMESPACE created" + kubectl --context=$K8S_CONTEXT create namespace $NAMESPACE +fi + +DIR=".k8/hetzner-stage" +DEPLOY_FILES=( + "ssv-exporter-holesky.yml" +) + +if [[ -d $DIR ]]; then + for file in "${DEPLOY_FILES[@]}"; do + sed -i -e "s|REPLACE_NAMESPACE|${NAMESPACE}|g" \ + -e "s|REPLACE_DOCKER_REPO|${DOCKERREPO}|g" \ + -e "s|REPLACE_REPLICAS|${REPLICAS}|g" \ + -e "s|REPLACE_DOMAIN_SUFFIX|${DOMAIN_SUFFIX}|g" \ + -e "s|REPLACE_API_VERSION|${K8S_API_VERSION}|g" \ + -e "s|REPLACE_EXPORTER_CPU_LIMIT|${EXPORTER_CPU_LIMIT}|g" \ + -e "s|REPLACE_EXPORTER_MEM_LIMIT|${EXPORTER_MEM_LIMIT}|g" \ + -e "s|REPLACE_IMAGETAG|${IMAGETAG}|g" "${DIR}/${file}" || exit 1 + done +fi + +#deploy +for file in "${DEPLOY_FILES[@]}"; do + kubectl --context=$K8S_CONTEXT apply -f "${DIR}/${file}" || exit 1 +done diff --git a/.k8/stage/ssv-node-v2-1-deployment.yml b/.k8/stage/ssv-node-v2-1-deployment.yml index 340d2a3419..a62ac399cb 100644 --- a/.k8/stage/ssv-node-v2-1-deployment.yml +++ b/.k8/stage/ssv-node-v2-1-deployment.yml @@ -106,9 +106,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-2-deployment.yml b/.k8/stage/ssv-node-v2-2-deployment.yml index ccb63c8cde..bc728de072 100644 --- a/.k8/stage/ssv-node-v2-2-deployment.yml +++ b/.k8/stage/ssv-node-v2-2-deployment.yml @@ -110,9 +110,9 @@ spec: - name: UDP_PORT value: "12002" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-3-deployment.yml b/.k8/stage/ssv-node-v2-3-deployment.yml index d30d7648a5..81ca74db36 100644 --- a/.k8/stage/ssv-node-v2-3-deployment.yml +++ b/.k8/stage/ssv-node-v2-3-deployment.yml @@ -110,9 +110,9 @@ spec: - name: UDP_PORT value: "12003" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-4-deployment.yml b/.k8/stage/ssv-node-v2-4-deployment.yml index de012b24f7..a1b98d28a1 100644 --- a/.k8/stage/ssv-node-v2-4-deployment.yml +++ b/.k8/stage/ssv-node-v2-4-deployment.yml @@ -110,9 +110,9 @@ spec: - name: UDP_PORT value: "12004" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-5-deployment.yml b/.k8/stage/ssv-node-v2-5-deployment.yml index 8e0a8436a8..c7446bafdf 100644 --- a/.k8/stage/ssv-node-v2-5-deployment.yml +++ b/.k8/stage/ssv-node-v2-5-deployment.yml @@ -104,9 +104,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -120,7 +120,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-v2-5 diff --git a/.k8/stage/ssv-node-v2-6-deployment.yml b/.k8/stage/ssv-node-v2-6-deployment.yml index 1fddf2a098..b56673db9e 100644 --- a/.k8/stage/ssv-node-v2-6-deployment.yml +++ b/.k8/stage/ssv-node-v2-6-deployment.yml @@ -106,9 +106,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/.k8/stage/ssv-node-v2-7-deployment.yml b/.k8/stage/ssv-node-v2-7-deployment.yml index b4a4b93e72..4e61986511 100644 --- a/.k8/stage/ssv-node-v2-7-deployment.yml +++ b/.k8/stage/ssv-node-v2-7-deployment.yml @@ -106,9 +106,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT @@ -122,7 +122,7 @@ spec: - name: PUBSUB_TRACE value: 'false' - name: BUILDER_PROPOSALS - value: "true" + value: "false" volumeMounts: - mountPath: /data name: ssv-node-v2-7 diff --git a/.k8/stage/ssv-node-v2-8-deployment.yml b/.k8/stage/ssv-node-v2-8-deployment.yml index af3607ba5c..745fb3a3ea 100644 --- a/.k8/stage/ssv-node-v2-8-deployment.yml +++ b/.k8/stage/ssv-node-v2-8-deployment.yml @@ -106,9 +106,9 @@ spec: - name: HOST_ADDRESS value: "" - name: DB_PATH - value: "./data/db-jato-v2" + value: "./data/db-holesky-stage" - name: NETWORK - value: "jato-v2-stage" + value: "holesky-stage" - name: DB_REPORTING value: "false" - name: METRICS_API_PORT diff --git a/Dockerfile b/Dockerfile index 0faa9e340e..44c362dcfc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,6 @@ RUN apt-get update && \ git=1:2.39.2-1.1 \ zip=3.0-13 \ unzip=6.0-28 \ - wget=1.21.3-1+b2 \ g++=4:12.2.0-3 \ gcc-aarch64-linux-gnu=4:12.2.0-3 \ bzip2=1.0.8-5+b1 \ @@ -61,7 +60,7 @@ RUN apk -v --update add \ ca-certificates=20230506-r0 \ bash=5.2.15-r5 \ make=4.4.1-r1 \ - bind-tools=9.18.16-r0 && \ + bind-tools=9.18.19-r0 && \ rm /var/cache/apk/* COPY --from=builder /go/bin/ssvnode /go/bin/ssvnode diff --git a/beacon/goclient/goclient.go b/beacon/goclient/goclient.go index 8fe1216155..de3ed18c0d 100644 --- a/beacon/goclient/goclient.go +++ b/beacon/goclient/goclient.go @@ -20,7 +20,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" ) @@ -122,6 +122,7 @@ type Client interface { eth2client.BlindedBeaconBlockProposalProvider eth2client.BlindedBeaconBlockSubmitter eth2client.ValidatorRegistrationsSubmitter + eth2client.VoluntaryExitSubmitter } type NodeClientProvider interface { @@ -147,7 +148,7 @@ type goClient struct { } // New init new client and go-client instance -func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.OperatorID, slotTicker slot_ticker.Ticker) (beaconprotocol.BeaconNode, error) { +func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.OperatorID, slotTickerProvider slotticker.Provider) (beaconprotocol.BeaconNode, error) { logger.Info("consensus client: connecting", fields.Address(opt.BeaconNodeAddr), fields.Network(string(opt.Network.BeaconNetwork))) httpClient, err := http.New(opt.Context, @@ -161,9 +162,6 @@ func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.Op return nil, errors.WithMessage(err, "failed to create http client") } - tickerChan := make(chan phase0.Slot, 32) - slotTicker.Subscribe(tickerChan) - client := &goClient{ log: logger, ctx: opt.Context, @@ -190,7 +188,7 @@ func New(logger *zap.Logger, opt beaconprotocol.Options, operatorID spectypes.Op ) // Start registration submitter. - go client.registrationSubmitter(tickerChan) + go client.registrationSubmitter(slotTickerProvider) return client, nil } diff --git a/beacon/goclient/proposer.go b/beacon/goclient/proposer.go index cb48d5e33c..38d7f4f565 100644 --- a/beacon/goclient/proposer.go +++ b/beacon/goclient/proposer.go @@ -19,6 +19,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/slotticker" ) const ( @@ -230,9 +231,15 @@ func (gc *goClient) createValidatorRegistration(pubkey []byte, feeRecipient bell return signedReg } -func (gc *goClient) registrationSubmitter(slots <-chan phase0.Slot) { - for currentSlot := range slots { - gc.submitRegistrationsFromCache(currentSlot) +func (gc *goClient) registrationSubmitter(slotTickerProvider slotticker.Provider) { + ticker := slotTickerProvider() + for { + select { + case <-gc.ctx.Done(): + return + case <-ticker.Next(): + gc.submitRegistrationsFromCache(ticker.Slot()) + } } } diff --git a/beacon/goclient/voluntary_exit.go b/beacon/goclient/voluntary_exit.go new file mode 100644 index 0000000000..bb2dfaa62f --- /dev/null +++ b/beacon/goclient/voluntary_exit.go @@ -0,0 +1,10 @@ +package goclient + +import ( + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/pkg/errors" +) + +func (gc *goClient) SubmitVoluntaryExit(voluntaryExit *phase0.SignedVoluntaryExit, sig phase0.BLSSignature) error { + return errors.New("not implemented") +} diff --git a/cli/operator/node.go b/cli/operator/node.go index 4dd14f558a..9c20e2fda0 100644 --- a/cli/operator/node.go +++ b/cli/operator/node.go @@ -19,7 +19,6 @@ import ( "github.com/bloxapp/ssv/api/handlers" apiserver "github.com/bloxapp/ssv/api/server" - "github.com/bloxapp/ssv/beacon/goclient" global_config "github.com/bloxapp/ssv/cli/config" "github.com/bloxapp/ssv/ekm" @@ -34,6 +33,7 @@ import ( ssv_identity "github.com/bloxapp/ssv/identity" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/migrations" "github.com/bloxapp/ssv/monitoring/metrics" "github.com/bloxapp/ssv/monitoring/metricsreporter" @@ -42,9 +42,11 @@ import ( "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/nodeprobe" "github.com/bloxapp/ssv/operator" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/duties/dutystore" + "github.com/bloxapp/ssv/operator/slotticker" operatorstorage "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/operator/validator" + "github.com/bloxapp/ssv/operator/validatorsmap" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/types" registrystorage "github.com/bloxapp/ssv/registry/storage" @@ -60,6 +62,10 @@ type KeyStore struct { PasswordFile string `yaml:"PasswordFile" env:"PASSWORD_FILE" env-description:"Password for operator private key file decryption"` } +type MessageValidation struct { + VerifySignatures bool `yaml:"VerifySignatures" env:"MESSAGE_VALIDATION_VERIFY_SIGNATURES" env-default:"false" env-description:"Experimental feature to verify signatures in pubsub's message validation instead of in consensus protocol."` +} + type config struct { global_config.GlobalConfig `yaml:"global"` DBOptions basedb.Options `yaml:"db"` @@ -72,13 +78,11 @@ type config struct { MetricsAPIPort int `yaml:"MetricsAPIPort" env:"METRICS_API_PORT" env-description:"Port to listen on for the metrics API."` EnableProfile bool `yaml:"EnableProfile" env:"ENABLE_PROFILE" env-description:"flag that indicates whether go profiling tools are enabled"` NetworkPrivateKey string `yaml:"NetworkPrivateKey" env:"NETWORK_PRIVATE_KEY" env-description:"private key for network identity"` - - WsAPIPort int `yaml:"WebSocketAPIPort" env:"WS_API_PORT" env-description:"Port to listen on for the websocket API."` - WithPing bool `yaml:"WithPing" env:"WITH_PING" env-description:"Whether to send websocket ping messages'"` - - SSVAPIPort int `yaml:"SSVAPIPort" env:"SSV_API_PORT" env-description:"Port to listen on for the SSV API."` - - LocalEventsPath string `yaml:"LocalEventsPath" env:"EVENTS_PATH" env-description:"path to local events"` + WsAPIPort int `yaml:"WebSocketAPIPort" env:"WS_API_PORT" env-description:"Port to listen on for the websocket API."` + WithPing bool `yaml:"WithPing" env:"WITH_PING" env-description:"Whether to send websocket ping messages'"` + SSVAPIPort int `yaml:"SSVAPIPort" env:"SSV_API_PORT" env-description:"Port to listen on for the SSV API."` + LocalEventsPath string `yaml:"LocalEventsPath" env:"EVENTS_PATH" env-description:"path to local events"` + MessageValidation MessageValidation `yaml:"MessageValidation"` } var cfg config @@ -97,6 +101,11 @@ var StartNodeCmd = &cobra.Command{ log.Fatal("could not create logger", err) } defer logging.CapturePanic(logger) + + metricsReporter := metricsreporter.New( + metricsreporter.WithLogger(logger), + ) + networkConfig, err := setupSSVNetwork(logger) if err != nil { logger.Fatal("could not setup network", zap.Error(err)) @@ -128,28 +137,16 @@ var StartNodeCmd = &cobra.Command{ return currentEpoch >= cfg.P2pNetworkConfig.PermissionedActivateEpoch && currentEpoch < cfg.P2pNetworkConfig.PermissionedDeactivateEpoch } - cfg.P2pNetworkConfig.Permissioned = permissioned - cfg.P2pNetworkConfig.WhitelistedOperatorKeys = append(cfg.P2pNetworkConfig.WhitelistedOperatorKeys, networkConfig.WhitelistedOperatorKeys...) - cfg.P2pNetworkConfig.NodeStorage = nodeStorage - cfg.P2pNetworkConfig.OperatorID = format.OperatorID(operatorData.PublicKey) - cfg.P2pNetworkConfig.FullNode = cfg.SSVOptions.ValidatorOptions.FullNode - cfg.P2pNetworkConfig.Network = networkConfig - - p2pNetwork := setupP2P(logger, db) - - slotTicker := slot_ticker.NewTicker(cmd.Context(), networkConfig) - - metricsReporter := metricsreporter.New( - metricsreporter.WithLogger(logger), - ) + slotTickerProvider := func() slotticker.SlotTicker { + return slotticker.New(networkConfig) + } cfg.ConsensusClient.Context = cmd.Context() - cfg.ConsensusClient.Graffiti = []byte("SSV.Network") cfg.ConsensusClient.GasLimit = spectypes.DefaultGasLimit cfg.ConsensusClient.Network = networkConfig.Beacon.GetNetwork() - consensusClient := setupConsensusClient(logger, operatorData.ID, slotTicker) + consensusClient := setupConsensusClient(logger, operatorData.ID, slotTickerProvider) executionClient, err := executionclient.New( cmd.Context(), @@ -166,6 +163,36 @@ var StartNodeCmd = &cobra.Command{ logger.Fatal("could not connect to execution client", zap.Error(err)) } + cfg.P2pNetworkConfig.Permissioned = permissioned + cfg.P2pNetworkConfig.WhitelistedOperatorKeys = append(cfg.P2pNetworkConfig.WhitelistedOperatorKeys, networkConfig.WhitelistedOperatorKeys...) + cfg.P2pNetworkConfig.NodeStorage = nodeStorage + cfg.P2pNetworkConfig.OperatorID = format.OperatorID(operatorData.PublicKey) + cfg.P2pNetworkConfig.FullNode = cfg.SSVOptions.ValidatorOptions.FullNode + cfg.P2pNetworkConfig.Network = networkConfig + + validatorsMap := validatorsmap.New(cmd.Context()) + + dutyStore := dutystore.New() + cfg.SSVOptions.DutyStore = dutyStore + + messageValidator := validation.NewMessageValidator( + networkConfig, + validation.WithShareStorage(nodeStorage.Shares()), + validation.WithLogger(logger), + validation.WithMetrics(metricsReporter), + validation.WithDutyStore(dutyStore), + validation.WithOwnOperatorID(operatorData.ID), + validation.WithSignatureVerification(cfg.MessageValidation.VerifySignatures), + ) + + cfg.P2pNetworkConfig.Metrics = metricsReporter + cfg.P2pNetworkConfig.MessageValidator = messageValidator + cfg.SSVOptions.ValidatorOptions.MessageValidator = messageValidator + // if signature check is enabled in message validation then it's disabled in validator controller and vice versa + cfg.SSVOptions.ValidatorOptions.VerifySignatures = !cfg.MessageValidation.VerifySignatures + + p2pNetwork := setupP2P(logger, db) + cfg.SSVOptions.Context = cmd.Context() cfg.SSVOptions.DB = db cfg.SSVOptions.BeaconNode = consensusClient @@ -178,6 +205,7 @@ var StartNodeCmd = &cobra.Command{ cfg.SSVOptions.ValidatorOptions.Network = p2pNetwork cfg.SSVOptions.ValidatorOptions.Beacon = consensusClient cfg.SSVOptions.ValidatorOptions.KeyManager = keyManager + cfg.SSVOptions.ValidatorOptions.ValidatorsMap = validatorsMap cfg.SSVOptions.ValidatorOptions.ShareEncryptionKeyProvider = nodeStorage.GetPrivateKey cfg.SSVOptions.ValidatorOptions.OperatorData = operatorData @@ -209,12 +237,12 @@ var StartNodeCmd = &cobra.Command{ cfg.SSVOptions.ValidatorOptions.StorageMap = storageMap cfg.SSVOptions.ValidatorOptions.Metrics = metricsReporter + cfg.SSVOptions.Metrics = metricsReporter validatorCtrl := validator.NewController(logger, cfg.SSVOptions.ValidatorOptions) cfg.SSVOptions.ValidatorController = validatorCtrl - cfg.SSVOptions.Metrics = metricsReporter - operatorNode = operator.New(logger, cfg.SSVOptions, slotTicker) + operatorNode = operator.New(logger, cfg.SSVOptions, slotTickerProvider) if cfg.MetricsAPIPort > 0 { go startMetricsHandler(cmd.Context(), logger, db, metricsReporter, cfg.MetricsAPIPort, cfg.EnableProfile) @@ -477,10 +505,7 @@ func setupSSVNetwork(logger *zap.Logger) (networkconfig.NetworkConfig, error) { return networkConfig, nil } -func setupP2P( - logger *zap.Logger, - db basedb.Database, -) network.P2PNetwork { +func setupP2P(logger *zap.Logger, db basedb.Database) network.P2PNetwork { istore := ssv_identity.NewIdentityStore(db) netPrivKey, err := istore.SetupNetworkKey(logger, cfg.NetworkPrivateKey) if err != nil { @@ -494,9 +519,9 @@ func setupP2P( func setupConsensusClient( logger *zap.Logger, operatorID spectypes.OperatorID, - slotTicker slot_ticker.Ticker, + slotTickerProvider slotticker.Provider, ) beaconprotocol.BeaconNode { - cl, err := goclient.New(logger, cfg.ConsensusClient, operatorID, slotTicker) + cl, err := goclient.New(logger, cfg.ConsensusClient, operatorID, slotTickerProvider) if err != nil { logger.Fatal("failed to create beacon go-client", zap.Error(err), fields.Address(cfg.ConsensusClient.BeaconNodeAddr)) diff --git a/docs/OPERATOR_GETTING_STARTED.md b/docs/OPERATOR_GETTING_STARTED.md index f46fdd08b1..d99c30ae52 100644 --- a/docs/OPERATOR_GETTING_STARTED.md +++ b/docs/OPERATOR_GETTING_STARTED.md @@ -148,7 +148,8 @@ OperatorPrivateKey: LS0tLS... ### 6. Start SSV Node in Docker -Run the docker image in the same folder you created the `config.yaml`: +Before start, make sure the clock is synced with NTP servers. +Then, run the docker image in the same folder you created the `config.yaml`: ```shell $ docker run -d --restart unless-stopped --name=ssv_node -e CONFIG_PATH=./config.yaml -p 13001:13001 -p 12001:12001/udp -v $(pwd)/config.yaml:/config.yaml -v $(pwd):/data --log-opt max-size=500m --log-opt max-file=10 -it 'bloxstaking/ssv-node:latest' make BUILD_PATH=/go/bin/ssvnode start-node \ diff --git a/ekm/eth_key_manager_signer.go b/ekm/eth_key_manager_signer.go index 21c663a5b1..6d4f098e00 100644 --- a/ekm/eth_key_manager_signer.go +++ b/ekm/eth_key_manager_signer.go @@ -29,9 +29,16 @@ import ( "github.com/bloxapp/ssv/storage/basedb" ) -// minimal att&block epoch/slot distance to protect slashing -var minimalAttSlashingProtectionEpochDistance = phase0.Epoch(0) -var minimalBlockSlashingProtectionSlotDistance = phase0.Slot(0) +const ( + // minSPAttestationEpochGap is the minimum epoch distance used for slashing protection in attestations. + // It defines the smallest allowable gap between the source and target epochs in an existing attestation + // and those in a new attestation, helping to prevent slashable offenses. + minSPAttestationEpochGap = phase0.Epoch(0) + // minSPProposalSlotGap is the minimum slot distance used for slashing protection in block proposals. + // It defines the smallest allowable gap between the current slot and the slot of a new block proposal, + // helping to prevent slashable offenses. + minSPProposalSlotGap = phase0.Slot(0) +) type ethKeyManagerSigner struct { wallet core.Wallet @@ -43,9 +50,17 @@ type ethKeyManagerSigner struct { builderProposals bool } +// StorageProvider provides the underlying KeyManager storage. +type StorageProvider interface { + ListAccounts() ([]core.ValidatorAccount, error) + RetrieveHighestAttestation(pubKey []byte) (*phase0.AttestationData, bool, error) + RetrieveHighestProposal(pubKey []byte) (phase0.Slot, bool, error) + BumpSlashingProtection(pubKey []byte) error +} + // NewETHKeyManagerSigner returns a new instance of ethKeyManagerSigner func NewETHKeyManagerSigner(logger *zap.Logger, db basedb.Database, network networkconfig.NetworkConfig, builderProposals bool, encryptionKey string) (spectypes.KeyManager, error) { - signerStore := NewSignerStorage(db, network.Beacon.GetNetwork(), logger) + signerStore := NewSignerStorage(db, network.Beacon, logger) if encryptionKey != "" { err := signerStore.SetEncryptionKey(encryptionKey) if err != nil { @@ -85,6 +100,18 @@ func NewETHKeyManagerSigner(logger *zap.Logger, db basedb.Database, network netw }, nil } +func (km *ethKeyManagerSigner) ListAccounts() ([]core.ValidatorAccount, error) { + return km.storage.ListAccounts() +} + +func (km *ethKeyManagerSigner) RetrieveHighestAttestation(pubKey []byte) (*phase0.AttestationData, bool, error) { + return km.storage.RetrieveHighestAttestation(pubKey) +} + +func (km *ethKeyManagerSigner) RetrieveHighestProposal(pubKey []byte) (phase0.Slot, bool, error) { + return km.storage.RetrieveHighestProposal(pubKey) +} + func (km *ethKeyManagerSigner) SignBeaconObject(obj ssz.HashRoot, domain phase0.Domain, pk []byte, domainType phase0.DomainType) (spectypes.Signature, [32]byte, error) { sig, rootSlice, err := km.signBeaconObject(obj, domain, pk, domainType) if err != nil { @@ -260,9 +287,8 @@ func (km *ethKeyManagerSigner) AddShare(shareKey *bls.SecretKey) error { return errors.Wrap(err, "could not check share existence") } if acc == nil { - currentSlot := km.storage.Network().EstimatedCurrentSlot() - if err := km.saveMinimalSlashingProtection(shareKey.GetPublicKey().Serialize(), currentSlot); err != nil { - return errors.Wrap(err, "could not save minimal slashing protection") + if err := km.BumpSlashingProtection(shareKey.GetPublicKey().Serialize()); err != nil { + return errors.Wrap(err, "could not bump slashing protection") } if err := km.saveShare(shareKey); err != nil { return errors.Wrap(err, "could not save share") @@ -272,23 +298,6 @@ func (km *ethKeyManagerSigner) AddShare(shareKey *bls.SecretKey) error { return nil } -func (km *ethKeyManagerSigner) saveMinimalSlashingProtection(pk []byte, currentSlot phase0.Slot) error { - currentEpoch := km.storage.Network().EstimatedEpochAtSlot(currentSlot) - highestTarget := currentEpoch + minimalAttSlashingProtectionEpochDistance - highestSource := highestTarget - 1 - highestProposal := currentSlot + minimalBlockSlashingProtectionSlotDistance - - minAttData := minimalAttProtectionData(highestSource, highestTarget) - - if err := km.storage.SaveHighestAttestation(pk, minAttData); err != nil { - return errors.Wrapf(err, "could not save minimal highest attestation for %s", string(pk)) - } - if err := km.storage.SaveHighestProposal(pk, highestProposal); err != nil { - return errors.Wrapf(err, "could not save minimal highest proposal for %s", string(pk)) - } - return nil -} - func (km *ethKeyManagerSigner) RemoveShare(pubKey string) error { km.walletLock.Lock() defer km.walletLock.Unlock() @@ -315,28 +324,110 @@ func (km *ethKeyManagerSigner) RemoveShare(pubKey string) error { return nil } -func (km *ethKeyManagerSigner) saveShare(shareKey *bls.SecretKey) error { - key, err := core.NewHDKeyFromPrivateKey(shareKey.Serialize(), "") +// BumpSlashingProtection updates the slashing protection data for a given public key. +func (km *ethKeyManagerSigner) BumpSlashingProtection(pubKey []byte) error { + currentSlot := km.storage.BeaconNetwork().EstimatedCurrentSlot() + + // Update highest attestation data for slashing protection. + if err := km.updateHighestAttestation(pubKey, currentSlot); err != nil { + return err + } + + // Update highest proposal data for slashing protection. + if err := km.updateHighestProposal(pubKey, currentSlot); err != nil { + return err + } + + return nil +} + +// updateHighestAttestation updates the highest attestation data for slashing protection. +func (km *ethKeyManagerSigner) updateHighestAttestation(pubKey []byte, slot phase0.Slot) error { + // Retrieve the highest attestation data stored for the given public key. + retrievedHighAtt, found, err := km.RetrieveHighestAttestation(pubKey) if err != nil { - return errors.Wrap(err, "could not generate HDKey") + return fmt.Errorf("could not retrieve highest attestation: %w", err) } - account := wallets.NewValidatorAccount("", key, nil, "", nil) - if err := km.wallet.AddValidatorAccount(account); err != nil { - return errors.Wrap(err, "could not save new account") + + currentEpoch := km.storage.BeaconNetwork().EstimatedEpochAtSlot(slot) + minimalSP := km.computeMinimalAttestationSP(currentEpoch) + + // Check if the retrieved highest attestation data is valid and not outdated. + if found && retrievedHighAtt != nil { + if retrievedHighAtt.Source.Epoch >= minimalSP.Source.Epoch || retrievedHighAtt.Target.Epoch >= minimalSP.Target.Epoch { + return nil + } } + + // At this point, either the retrieved attestation data was not found, or it was outdated. + // In either case, we update it to the minimal slashing protection data. + if err := km.storage.SaveHighestAttestation(pubKey, minimalSP); err != nil { + return fmt.Errorf("could not save highest attestation: %w", err) + } + + return nil +} + +// updateHighestProposal updates the highest proposal slot for slashing protection. +func (km *ethKeyManagerSigner) updateHighestProposal(pubKey []byte, slot phase0.Slot) error { + // Retrieve the highest proposal slot stored for the given public key. + retrievedHighProp, found, err := km.RetrieveHighestProposal(pubKey) + if err != nil { + return fmt.Errorf("could not retrieve highest proposal: %w", err) + } + + minimalSPSlot := km.computeMinimalProposerSP(slot) + + // Check if the retrieved highest proposal slot is valid and not outdated. + if found && retrievedHighProp != 0 { + if retrievedHighProp >= minimalSPSlot { + return nil + } + } + + // At this point, either the retrieved proposal slot was not found, or it was outdated. + // In either case, we update it to the minimal slashing protection slot. + if err := km.storage.SaveHighestProposal(pubKey, minimalSPSlot); err != nil { + return fmt.Errorf("could not save highest proposal: %w", err) + } + return nil } -func minimalAttProtectionData(source, target phase0.Epoch) *phase0.AttestationData { +// computeMinimalAttestationSP calculates the minimal safe attestation data for slashing protection. +// It takes the current epoch as an argument and returns an AttestationData object with the minimal safe source and target epochs. +func (km *ethKeyManagerSigner) computeMinimalAttestationSP(epoch phase0.Epoch) *phase0.AttestationData { + // Calculate the highest safe target epoch based on the current epoch and a predefined minimum distance. + highestTarget := epoch + minSPAttestationEpochGap + // The highest safe source epoch is one less than the highest target epoch. + highestSource := highestTarget - 1 + + // Return a new AttestationData object with the calculated source and target epochs. return &phase0.AttestationData{ - BeaconBlockRoot: [32]byte{}, Source: &phase0.Checkpoint{ - Epoch: source, - Root: [32]byte{}, + Epoch: highestSource, }, Target: &phase0.Checkpoint{ - Epoch: target, - Root: [32]byte{}, + Epoch: highestTarget, }, } } + +// computeMinimalProposerSP calculates the minimal safe slot for a block proposal to avoid slashing. +// It takes the current slot as an argument and returns the minimal safe slot. +func (km *ethKeyManagerSigner) computeMinimalProposerSP(slot phase0.Slot) phase0.Slot { + // Calculate the highest safe proposal slot based on the current slot and a predefined minimum distance. + return slot + minSPProposalSlotGap +} + +func (km *ethKeyManagerSigner) saveShare(shareKey *bls.SecretKey) error { + key, err := core.NewHDKeyFromPrivateKey(shareKey.Serialize(), "") + if err != nil { + return errors.Wrap(err, "could not generate HDKey") + } + account := wallets.NewValidatorAccount("", key, nil, "", nil) + if err := km.wallet.AddValidatorAccount(account); err != nil { + return errors.Wrap(err, "could not save new account") + } + return nil +} diff --git a/ekm/signer_key_manager_test.go b/ekm/signer_key_manager_test.go index 4efe2c4fb3..65cf5df24c 100644 --- a/ekm/signer_key_manager_test.go +++ b/ekm/signer_key_manager_test.go @@ -7,26 +7,25 @@ import ( "encoding/hex" "testing" - "github.com/bloxapp/eth2-key-manager/core" - "github.com/bloxapp/eth2-key-manager/wallets/hd" - "github.com/bloxapp/ssv/utils/rsaencryption" - - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/storage/basedb" - "github.com/attestantio/go-eth2-client/spec/altair" "github.com/attestantio/go-eth2-client/spec/bellatrix" "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/bloxapp/eth2-key-manager/core" + "github.com/bloxapp/eth2-key-manager/wallets/hd" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/networkconfig" - "github.com/bloxapp/ssv/utils/threshold" "github.com/herumi/bls-eth-go-binary/bls" + "github.com/pkg/errors" "github.com/prysmaticlabs/go-bitfield" "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/utils" + "github.com/bloxapp/ssv/utils/rsaencryption" + "github.com/bloxapp/ssv/utils/threshold" ) const ( @@ -36,7 +35,7 @@ const ( pk2Str = "8796fafa576051372030a75c41caafea149e4368aebaca21c9f90d9974b3973d5cee7d7874e4ec9ec59fb2c8945b3e01" ) -func testKeyManager(t *testing.T) spectypes.KeyManager { +func testKeyManager(t *testing.T, network *networkconfig.NetworkConfig) spectypes.KeyManager { threshold.Init() logger := logging.TestLogger(t) @@ -44,7 +43,14 @@ func testKeyManager(t *testing.T) spectypes.KeyManager { db, err := getBaseStorage(logger) require.NoError(t, err) - km, err := NewETHKeyManagerSigner(logger, db, networkconfig.TestNetwork, true, "") + if network == nil { + network = &networkconfig.NetworkConfig{ + Beacon: utils.SetupMockBeaconNetwork(t, nil), + Domain: networkconfig.TestNetwork.Domain, + } + } + + km, err := NewETHKeyManagerSigner(logger, db, *network, true, "") require.NoError(t, err) sk1 := &bls.SecretKey{} @@ -120,7 +126,7 @@ func TestEncryptedKeyManager(t *testing.T) { } func TestSlashing(t *testing.T) { - km := testKeyManager(t) + km := testKeyManager(t, nil) sk1 := &bls.SecretKey{} require.NoError(t, sk1.SetHexString(sk1Str)) @@ -129,12 +135,12 @@ func TestSlashing(t *testing.T) { currentSlot := km.(*ethKeyManagerSigner).storage.Network().EstimatedCurrentSlot() currentEpoch := km.(*ethKeyManagerSigner).storage.Network().EstimatedEpochAtSlot(currentSlot) - highestTarget := currentEpoch + minimalAttSlashingProtectionEpochDistance + 1 + highestTarget := currentEpoch + minSPAttestationEpochGap + 1 highestSource := highestTarget - 1 - highestProposal := currentSlot + minimalBlockSlashingProtectionSlotDistance + 1 + highestProposal := currentSlot + minSPProposalSlotGap + 1 attestationData := &phase0.AttestationData{ - Slot: 30, + Slot: currentSlot, Index: 1, BeaconBlockRoot: [32]byte{1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2}, Source: &phase0.Checkpoint{ @@ -272,7 +278,7 @@ func TestSlashing(t *testing.T) { } func TestSlashing_Attestation(t *testing.T) { - km := testKeyManager(t) + km := testKeyManager(t, nil) var secretKeys [4]*bls.SecretKey for i := range secretKeys { @@ -280,8 +286,7 @@ func TestSlashing_Attestation(t *testing.T) { secretKeys[i].SetByCSPRNG() // Equivalent to AddShare but with a custom slot for minimal slashing protection. - minimalSlot := phase0.Slot(64) - err := km.(*ethKeyManagerSigner).saveMinimalSlashingProtection(secretKeys[i].GetPublicKey().Serialize(), minimalSlot) + err := km.(*ethKeyManagerSigner).BumpSlashingProtection(secretKeys[i].GetPublicKey().Serialize()) require.NoError(t, err) err = km.(*ethKeyManagerSigner).saveShare(secretKeys[i]) require.NoError(t, err) @@ -317,6 +322,12 @@ func TestSlashing_Attestation(t *testing.T) { require.NoError(t, err, "expected no slashing") require.NotZero(t, sig, "expected non-zero signature") require.NotZero(t, root, "expected non-zero root") + + highAtt, found, err := km.(*ethKeyManagerSigner).storage.RetrieveHighestAttestation(sk.GetPublicKey().Serialize()) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, attestation.Source.Epoch, highAtt.Source.Epoch) + require.Equal(t, attestation.Target.Epoch, highAtt.Target.Epoch) } } @@ -360,7 +371,7 @@ func TestSlashing_Attestation(t *testing.T) { func TestSignRoot(t *testing.T) { require.NoError(t, bls.Init(bls.BLS12_381)) - km := testKeyManager(t) + km := testKeyManager(t, nil) t.Run("pk 1", func(t *testing.T) { pk := &bls.PublicKey{} diff --git a/ekm/signer_storage.go b/ekm/signer_storage.go index 5991e6f321..fc8eadd62e 100644 --- a/ekm/signer_storage.go +++ b/ekm/signer_storage.go @@ -47,17 +47,19 @@ type Storage interface { SetEncryptionKey(newKey string) error ListAccountsTxn(r basedb.Reader) ([]core.ValidatorAccount, error) SaveAccountTxn(rw basedb.ReadWriter, account core.ValidatorAccount) error + + BeaconNetwork() beacon.BeaconNetwork } type storage struct { db basedb.Database - network beacon.Network + network beacon.BeaconNetwork encryptionKey []byte logger *zap.Logger // struct logger is used because core.Storage does not support passing a logger lock sync.RWMutex } -func NewSignerStorage(db basedb.Database, network beacon.Network, logger *zap.Logger) Storage { +func NewSignerStorage(db basedb.Database, network beacon.BeaconNetwork, logger *zap.Logger) Storage { return &storage{ db: db, network: network, @@ -87,7 +89,7 @@ func (s *storage) DropRegistryData() error { } func (s *storage) objPrefix(obj string) []byte { - return []byte(string(s.network.BeaconNetwork) + obj) + return []byte(string(s.network.GetBeaconNetwork()) + obj) } // Name returns storage name. @@ -97,7 +99,7 @@ func (s *storage) Name() string { // Network returns the network storage is related to. func (s *storage) Network() core.Network { - return core.Network(s.network.BeaconNetwork) + return core.Network(s.network.GetBeaconNetwork()) } // SaveWallet stores the given wallet. @@ -406,3 +408,7 @@ func (s *storage) decrypt(data []byte) ([]byte, error) { nonce, ciphertext := data[:nonceSize], data[nonceSize:] return gcm.Open(nil, nonce, ciphertext, nil) } + +func (s *storage) BeaconNetwork() beacon.BeaconNetwork { + return s.network +} diff --git a/eth/ethtest/cluster_liquidated_test.go b/eth/ethtest/cluster_liquidated_test.go new file mode 100644 index 0000000000..46ae795cef --- /dev/null +++ b/eth/ethtest/cluster_liquidated_test.go @@ -0,0 +1,91 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" +) + +type testClusterLiquidatedInput struct { + *CommonTestInput + events []*ClusterLiquidatedEventInput +} + +func NewTestClusterLiquidatedInput(common *CommonTestInput) *testClusterLiquidatedInput { + return &testClusterLiquidatedInput{common, nil} +} + +func (input *testClusterLiquidatedInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type ClusterLiquidatedEventInput struct { + auth *bind.TransactOpts + ownerAddress *ethcommon.Address + opsIds []uint64 + cluster *simcontract.CallableCluster +} + +func (input *ClusterLiquidatedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.ownerAddress == nil: + return fmt.Errorf("validation error: input.ownerAddress is empty") + case input.cluster == nil: + return fmt.Errorf("validation error: input.cluster is empty") + case len(input.opsIds) == 0: + return fmt.Errorf("validation error: input.opsIds is empty") + } + + return nil +} + +func (input *testClusterLiquidatedInput) prepare( + eventsToDo []*ClusterLiquidatedEventInput, +) { + input.events = eventsToDo +} + +func (input *testClusterLiquidatedInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + + // Call the contract method + _, err = input.boundContract.SimcontractTransactor.Liquidate( + event.auth, + *event.ownerAddress, + event.opsIds, + *event.cluster, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/cluster_reactivated_test.go b/eth/ethtest/cluster_reactivated_test.go new file mode 100644 index 0000000000..664625f44b --- /dev/null +++ b/eth/ethtest/cluster_reactivated_test.go @@ -0,0 +1,87 @@ +package ethtest + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" +) + +type testClusterReactivatedInput struct { + *CommonTestInput + events []*ClusterReactivatedEventInput +} + +func NewTestClusterReactivatedInput(common *CommonTestInput) *testClusterReactivatedInput { + return &testClusterReactivatedInput{common, nil} +} + +func (input *testClusterReactivatedInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type ClusterReactivatedEventInput struct { + auth *bind.TransactOpts + opsIds []uint64 + cluster *simcontract.CallableCluster +} + +func (input *ClusterReactivatedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.cluster == nil: + return fmt.Errorf("validation error: input.cluster is empty") + case len(input.opsIds) == 0: + return fmt.Errorf("validation error: input.opsIds is empty") + } + + return nil +} + +func (input *testClusterReactivatedInput) prepare( + eventsToDo []*ClusterReactivatedEventInput, +) { + input.events = eventsToDo +} + +func (input *testClusterReactivatedInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + // Call the contract method + _, err = input.boundContract.SimcontractTransactor.Reactivate( + event.auth, + event.opsIds, + big.NewInt(100_000_000), + *event.cluster, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/common_test.go b/eth/ethtest/common_test.go new file mode 100644 index 0000000000..44105dee65 --- /dev/null +++ b/eth/ethtest/common_test.go @@ -0,0 +1,231 @@ +package ethtest + +import ( + "context" + "fmt" + "math/big" + "net/http/httptest" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rpc" + "github.com/golang/mock/gomock" + "go.uber.org/zap/zaptest" + + "github.com/bloxapp/ssv/eth/eventsyncer" + "github.com/bloxapp/ssv/eth/executionclient" + "github.com/bloxapp/ssv/eth/simulator" + "github.com/bloxapp/ssv/eth/simulator/simcontract" + "github.com/bloxapp/ssv/monitoring/metricsreporter" + "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validator/mocks" +) + +type CommonTestInput struct { + t *testing.T + sim *simulator.SimulatedBackend + boundContract *simcontract.Simcontract + blockNum *uint64 + nodeStorage storage.Storage + doInOneBlock bool +} + +func NewCommonTestInput( + t *testing.T, + sim *simulator.SimulatedBackend, + boundContract *simcontract.Simcontract, + blockNum *uint64, + nodeStorage storage.Storage, + doInOneBlock bool, +) *CommonTestInput { + return &CommonTestInput{ + t: t, + sim: sim, + boundContract: boundContract, + blockNum: blockNum, + nodeStorage: nodeStorage, + doInOneBlock: doInOneBlock, + } +} + +type TestEnv struct { + eventSyncer *eventsyncer.EventSyncer + validators []*testValidatorData + ops []*testOperator + nodeStorage storage.Storage + sim *simulator.SimulatedBackend + boundContract *simcontract.Simcontract + auth *bind.TransactOpts + shares [][]byte + execClient *executionclient.ExecutionClient + rpcServer *rpc.Server + httpSrv *httptest.Server + validatorCtrl *mocks.MockController + mockCtrl *gomock.Controller + followDistance *uint64 +} + +func (e *TestEnv) shutdown() { + if e.mockCtrl != nil { + e.mockCtrl.Finish() + } + + if e.httpSrv != nil { + e.httpSrv.Close() + } + + if e.execClient != nil { + // Always returns nil error + _ = e.execClient.Close() + } +} + +func (e *TestEnv) setup( + t *testing.T, + ctx context.Context, + testAddresses []*ethcommon.Address, + validatorsCount uint64, + operatorsCount uint64, +) error { + if e.followDistance == nil { + e.SetDefaultFollowDistance() + } + logger := zaptest.NewLogger(t) + + // Create operators RSA keys + ops, err := createOperators(operatorsCount, 0) + if err != nil { + return err + } + + validators := make([]*testValidatorData, validatorsCount) + shares := make([][]byte, validatorsCount) + + // Create validators, BLS keys, shares + for i := 0; i < int(validatorsCount); i++ { + validators[i], err = createNewValidator(ops) + if err != nil { + return err + } + + shares[i], err = generateSharesData(validators[i], ops, testAddrAlice, i) + if err != nil { + return err + } + } + + eh, validatorCtrl, mockCtrl, nodeStorage, err := setupEventHandler(t, ctx, logger, ops[0], &testAddrAlice, true) + e.mockCtrl = mockCtrl + e.nodeStorage = nodeStorage + + if err != nil { + return err + } + if validatorCtrl == nil { + return fmt.Errorf("validatorCtrl is empty") + } + + // Adding testAddresses to the genesis block mostly to specify some balances for them + sim := simTestBackend(testAddresses) + + // Create JSON-RPC handler + rpcServer, err := sim.Node.RPCHandler() + e.rpcServer = rpcServer + if err != nil { + return fmt.Errorf("can't create RPC server: %w", err) + } + // Expose handler on a test server with ws open + httpSrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) + e.httpSrv = httpSrv + + addr := "ws:" + strings.TrimPrefix(httpSrv.URL, "http:") + + parsed, err := abi.JSON(strings.NewReader(simcontract.SimcontractMetaData.ABI)) + if err != nil { + return fmt.Errorf("can't parse contract ABI: %w", err) + } + + auth, err := bind.NewKeyedTransactorWithChainID(testKeyAlice, big.NewInt(1337)) + if err != nil { + return err + } + + contractAddr, _, _, err := bind.DeployContract(auth, parsed, ethcommon.FromHex(simcontract.SimcontractMetaData.Bin), sim) + if err != nil { + return fmt.Errorf("deploy contract: %w", err) + } + + sim.Commit() + + // Check contract code at the simulated blockchain + contractCode, err := sim.CodeAt(ctx, contractAddr, nil) + if err != nil { + return fmt.Errorf("get contract code: %w", err) + } + if len(contractCode) == 0 { + return fmt.Errorf("contractCode is empty") + } + + // Create a client and connect to the simulator + e.execClient, err = executionclient.New( + ctx, + addr, + contractAddr, + executionclient.WithLogger(logger), + executionclient.WithFollowDistance(*e.followDistance), + ) + if err != nil { + return err + } + + err = e.execClient.Healthy(ctx) + if err != nil { + return err + } + + e.boundContract, err = simcontract.NewSimcontract(contractAddr, sim) + if err != nil { + return err + } + + metricsReporter := metricsreporter.New( + metricsreporter.WithLogger(logger), + ) + + e.eventSyncer = eventsyncer.New( + nodeStorage, + e.execClient, + eh, + eventsyncer.WithLogger(logger), + eventsyncer.WithMetrics(metricsReporter), + ) + + e.validatorCtrl = validatorCtrl + e.sim = sim + e.auth = auth + e.validators = validators + e.ops = ops + e.shares = shares + + return nil +} + +func (e *TestEnv) SetDefaultFollowDistance() { + // 8 is current production offset + value := uint64(8) + e.followDistance = &value +} + +func (e *TestEnv) CloseFollowDistance(blockNum *uint64) { + for i := uint64(0); i < *e.followDistance; i++ { + commitBlock(e.sim, blockNum) + } +} + +func commitBlock(sim *simulator.SimulatedBackend, blockNum *uint64) { + sim.Commit() + *blockNum++ +} diff --git a/eth/ethtest/eth_e2e_test.go b/eth/ethtest/eth_e2e_test.go new file mode 100644 index 0000000000..b38dd8ea3d --- /dev/null +++ b/eth/ethtest/eth_e2e_test.go @@ -0,0 +1,309 @@ +package ethtest + +import ( + "context" + "fmt" + "math/big" + "testing" + "time" + + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + registrystorage "github.com/bloxapp/ssv/registry/storage" +) + +var ( + testKeyAlice, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + testKeyBob, _ = crypto.HexToECDSA("42e14d227125f411d6d3285bb4a2e07c2dba2e210bd2f3f4e2a36633bd61bfe6") + + testAddrAlice = crypto.PubkeyToAddress(testKeyAlice.PublicKey) + testAddrBob = crypto.PubkeyToAddress(testKeyBob.PublicKey) +) + +// E2E tests for ETH package +func TestEthExecLayer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testAddresses := make([]*ethcommon.Address, 2) + testAddresses[0] = &testAddrAlice + testAddresses[1] = &testAddrBob + + cluster := &simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + } + + expectedNonce := registrystorage.Nonce(0) + + testEnv := TestEnv{} + testEnv.SetDefaultFollowDistance() + + defer testEnv.shutdown() + err := testEnv.setup(t, ctx, testAddresses, 7, 4) + require.NoError(t, err) + + var ( + auth = testEnv.auth + nodeStorage = testEnv.nodeStorage + sim = testEnv.sim + boundContract = testEnv.boundContract + ops = testEnv.ops + validators = testEnv.validators + eventSyncer = testEnv.eventSyncer + shares = testEnv.shares + validatorCtrl = testEnv.validatorCtrl + ) + + blockNum := uint64(0x1) + lastHandledBlockNum := uint64(0x1) + + common := NewCommonTestInput(t, sim, boundContract, &blockNum, nodeStorage, true) + // Prepare blocks with events + // Check that the state is empty before the test + // Check SyncHistory doesn't execute any tasks -> doesn't run any of Controller methods + // Check the node storage for existing of operators and a validator + t.Run("SyncHistory happy flow", func(t *testing.T) { + // BLOCK 2. produce OPERATOR ADDED + // Check that there are no registered operators + { + operators, err := nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, 0, len(operators)) + + opAddedInput := NewOperatorAddedEventInput(common) + opAddedInput.prepare(ops, auth) + opAddedInput.produce() + + testEnv.CloseFollowDistance(&blockNum) + } + + // BLOCK 3: VALIDATOR ADDED: + // Check that there were no operations for Alice Validator + { + nonce, err := nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + valAddInput := NewTestValidatorRegisteredInput(common) + valAddInput.prepare(validators, shares, ops, auth, &expectedNonce, []uint32{0, 1}) + valAddInput.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Run SyncHistory + lastHandledBlockNum, err = eventSyncer.SyncHistory(ctx, lastHandledBlockNum) + require.NoError(t, err) + + //check all the events were handled correctly and block number was increased + require.Equal(t, blockNum-*testEnv.followDistance, lastHandledBlockNum) + fmt.Println("lastHandledBlockNum", lastHandledBlockNum) + + // Check that operators were successfully registered + operators, err := nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + + // Check that validator was registered + shares := nodeStorage.Shares().List(nil) + require.Equal(t, len(valAddInput.events), len(shares)) + + // Check the nonce was bumped + nonce, err = nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + } + }) + + // Main difference between "online" events handling and syncing the historical (old) events + // is that here we have to check that the controller was triggered + t.Run("SyncOngoing happy flow", func(t *testing.T) { + go func() { + err = eventSyncer.SyncOngoing(ctx, lastHandledBlockNum+1) + require.NoError(t, err) + }() + + stopChan := make(chan struct{}) + go func() { + for { + select { + case <-ctx.Done(): + return + case <-stopChan: + return + default: + time.Sleep(100 * time.Millisecond) + } + } + }() + + // Step 1: Add more validators + { + validatorCtrl.EXPECT().StartValidator(gomock.Any()).AnyTimes() + + // Check current nonce before start + nonce, err := nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + valAddInput := NewTestValidatorRegisteredInput(common) + valAddInput.prepare(validators, shares, ops, auth, &expectedNonce, []uint32{2, 3, 4, 5, 6}) + valAddInput.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 5000) + + nonce, err = nodeStorage.GetNextNonce(nil, testAddrAlice) + require.NoError(t, err) + require.Equal(t, expectedNonce, nonce) + + // Not sure does this make sense + require.Equal(t, uint64(testEnv.sim.Blockchain.CurrentBlock().Number.Int64()), *common.blockNum) + } + + // Step 2: remove validator + { + validatorCtrl.EXPECT().StopValidator(gomock.Any()).AnyTimes() + + shares := nodeStorage.Shares().List(nil) + require.Equal(t, 7, len(shares)) + + valRemove := NewTestValidatorRemovedEventsInput(common) + valRemove.prepare( + validators, + []uint64{0, 1}, + []uint64{1, 2, 3, 4}, + auth, + cluster, + ) + valRemove.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 500) + + shares = nodeStorage.Shares().List(nil) + require.Equal(t, 5, len(shares)) + + for _, event := range valRemove.events { + valPubKey := event.validator.masterPubKey.Serialize() + valShare := nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + } + } + + // Step 3 Liquidate Cluster + { + validatorCtrl.EXPECT().LiquidateCluster(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + clusterLiquidate := NewTestClusterLiquidatedInput(common) + clusterLiquidate.prepare([]*ClusterLiquidatedEventInput{ + { + auth: auth, + ownerAddress: &testAddrAlice, + opsIds: []uint64{1, 2, 3, 4}, + cluster: cluster, + }, + }) + clusterLiquidate.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 300) + + clusterID, err := ssvtypes.ComputeClusterIDHash(testAddrAlice.Bytes(), []uint64{1, 2, 3, 4}) + require.NoError(t, err) + + shares := nodeStorage.Shares().List(nil, registrystorage.ByClusterID(clusterID)) + require.NotEmpty(t, shares) + require.Equal(t, 5, len(shares)) + + for _, s := range shares { + require.True(t, s.Liquidated) + } + } + + // Step 4 Reactivate Cluster + { + validatorCtrl.EXPECT().ReactivateCluster(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + clusterID, err := ssvtypes.ComputeClusterIDHash(testAddrAlice.Bytes(), []uint64{1, 2, 3, 4}) + require.NoError(t, err) + + shares := nodeStorage.Shares().List(nil, registrystorage.ByClusterID(clusterID)) + require.NotEmpty(t, shares) + require.Equal(t, 5, len(shares)) + + for _, s := range shares { + require.True(t, s.Liquidated) + } + + // Trigger the event + clusterReactivated := NewTestClusterReactivatedInput(common) + clusterReactivated.prepare([]*ClusterReactivatedEventInput{ + { + auth: auth, + opsIds: []uint64{1, 2, 3, 4}, + cluster: cluster, + }, + }) + clusterReactivated.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 300) + + shares = nodeStorage.Shares().List(nil, registrystorage.ByClusterID(clusterID)) + require.NotEmpty(t, shares) + require.Equal(t, 5, len(shares)) + + for _, s := range shares { + require.False(t, s.Liquidated) + } + } + + // Step 5 Remove some Operators + { + operators, err := nodeStorage.ListOperators(nil, 0, 10) + require.NoError(t, err) + require.Equal(t, 4, len(operators)) + + opRemoved := NewOperatorRemovedEventInput(common) + opRemoved.prepare([]uint64{1, 2}, auth) + opRemoved.produce() + testEnv.CloseFollowDistance(&blockNum) + + // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved + } + + // Step 6 Update Fee Recipient + { + validatorCtrl.EXPECT().UpdateFeeRecipient(gomock.Any(), gomock.Any()).Times(1) + + setFeeRecipient := NewSetFeeRecipientAddressInput(common) + setFeeRecipient.prepare([]*SetFeeRecipientAddressEventInput{ + {auth, &testAddrBob}, + }) + setFeeRecipient.produce() + testEnv.CloseFollowDistance(&blockNum) + + // Wait until the state is changed + time.Sleep(time.Millisecond * 300) + + recipientData, found, err := nodeStorage.GetRecipientData(nil, testAddrAlice) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, testAddrBob.String(), recipientData.FeeRecipient.String()) + } + + stopChan <- struct{}{} + }) +} diff --git a/eth/ethtest/operator_added_test.go b/eth/ethtest/operator_added_test.go new file mode 100644 index 0000000000..9a173a5064 --- /dev/null +++ b/eth/ethtest/operator_added_test.go @@ -0,0 +1,86 @@ +package ethtest + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/eventparser" +) + +type testOperatorAddedEventInput struct { + op *testOperator + auth *bind.TransactOpts +} + +type ProduceOperatorAddedEventsInput struct { + *CommonTestInput + events []*testOperatorAddedEventInput +} + +func NewOperatorAddedEventInput(common *CommonTestInput) *ProduceOperatorAddedEventsInput { + return &ProduceOperatorAddedEventsInput{common, nil} +} + +func (input *ProduceOperatorAddedEventsInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, event := range input.events { + err := event.validate() + if err != nil { + return err + } + } + return nil +} +func (input *testOperatorAddedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.op == nil: + return fmt.Errorf("validation error: input.op is empty") + } + + return nil +} + +func (input *ProduceOperatorAddedEventsInput) prepare( + ops []*testOperator, + auth *bind.TransactOpts, +) { + input.events = make([]*testOperatorAddedEventInput, len(ops)) + + for i, op := range ops { + input.events[i] = &testOperatorAddedEventInput{op, auth} + } +} + +func (input *ProduceOperatorAddedEventsInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + op := event.op + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.rsaPub) + require.NoError(input.t, err) + _, err = input.boundContract.SimcontractTransactor.RegisterOperator(event.auth, packedOperatorPubKey, big.NewInt(100_000_000)) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/operator_removed_test.go b/eth/ethtest/operator_removed_test.go new file mode 100644 index 0000000000..5b4dd27822 --- /dev/null +++ b/eth/ethtest/operator_removed_test.go @@ -0,0 +1,83 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" +) + +type testOperatorRemovedEventInput struct { + opId uint64 + auth *bind.TransactOpts +} + +type ProduceOperatorRemovedEventsInput struct { + *CommonTestInput + events []*testOperatorRemovedEventInput +} + +func NewOperatorRemovedEventInput(common *CommonTestInput) *ProduceOperatorRemovedEventsInput { + return &ProduceOperatorRemovedEventsInput{common, nil} +} + +func (input *ProduceOperatorRemovedEventsInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, event := range input.events { + err := event.validate() + if err != nil { + return err + } + } + return nil +} +func (input *testOperatorRemovedEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.opId == 0: + return fmt.Errorf("validation error: input.opId is invalid") + } + + return nil +} + +func (input *ProduceOperatorRemovedEventsInput) prepare( + opsIds []uint64, + auth *bind.TransactOpts, +) { + input.events = make([]*testOperatorRemovedEventInput, len(opsIds)) + + for i, opId := range opsIds { + input.events[i] = &testOperatorRemovedEventInput{opId, auth} + } +} + +func (input *ProduceOperatorRemovedEventsInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + _, err = input.boundContract.SimcontractTransactor.RemoveOperator( + event.auth, + event.opId, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/set_fee_recipient_test.go b/eth/ethtest/set_fee_recipient_test.go new file mode 100644 index 0000000000..14ac7dd263 --- /dev/null +++ b/eth/ethtest/set_fee_recipient_test.go @@ -0,0 +1,80 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +type SetFeeRecipientAddressInput struct { + *CommonTestInput + events []*SetFeeRecipientAddressEventInput +} + +func NewSetFeeRecipientAddressInput(common *CommonTestInput) *SetFeeRecipientAddressInput { + return &SetFeeRecipientAddressInput{common, nil} +} + +func (input *SetFeeRecipientAddressInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type SetFeeRecipientAddressEventInput struct { + auth *bind.TransactOpts + address *ethcommon.Address +} + +func (input *SetFeeRecipientAddressEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.address == nil: + return fmt.Errorf("validation error: input.address is empty") + } + + return nil +} + +func (input *SetFeeRecipientAddressInput) prepare( + eventsToDo []*SetFeeRecipientAddressEventInput, +) { + input.events = eventsToDo +} + +func (input *SetFeeRecipientAddressInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + // Call the contract method + _, err = input.boundContract.SimcontractTransactor.SetFeeRecipientAddress( + event.auth, + *event.address, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/utils_test.go b/eth/ethtest/utils_test.go new file mode 100644 index 0000000000..289030f7c8 --- /dev/null +++ b/eth/ethtest/utils_test.go @@ -0,0 +1,300 @@ +package ethtest + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "encoding/base64" + "errors" + "fmt" + "math/big" + "testing" + + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/golang/mock/gomock" + "github.com/herumi/bls-eth-go-binary/bls" + "go.uber.org/zap" + + "github.com/bloxapp/ssv/ekm" + "github.com/bloxapp/ssv/eth/contract" + "github.com/bloxapp/ssv/eth/eventhandler" + "github.com/bloxapp/ssv/eth/eventparser" + "github.com/bloxapp/ssv/eth/simulator" + ibftstorage "github.com/bloxapp/ssv/ibft/storage" + "github.com/bloxapp/ssv/networkconfig" + operatorstorage "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validator" + "github.com/bloxapp/ssv/operator/validator/mocks" + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + registrystorage "github.com/bloxapp/ssv/registry/storage" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/storage/kv" + "github.com/bloxapp/ssv/utils/blskeygen" + "github.com/bloxapp/ssv/utils/rsaencryption" + "github.com/bloxapp/ssv/utils/threshold" +) + +type testValidatorData struct { + masterKey *bls.SecretKey + masterPubKey *bls.PublicKey + masterPublicKeys bls.PublicKeys + operatorsShares []*testShare +} + +type testOperator struct { + id uint64 + rsaPub []byte + rsaPriv []byte +} + +type testShare struct { + opId uint64 + sec *bls.SecretKey + pub *bls.PublicKey +} + +func createNewValidator(ops []*testOperator) (*testValidatorData, error) { + validatorData := &testValidatorData{} + sharesCount := uint64(len(ops)) + threshold.Init() + + msk, mpk := blskeygen.GenBLSKeyPair() + secVec := msk.GetMasterSecretKey(int(sharesCount)) + pubKeys := bls.GetMasterPublicKey(secVec) + splitKeys, err := threshold.Create(msk.Serialize(), sharesCount-1, sharesCount) + if err != nil { + return nil, err + } + + validatorData.operatorsShares = make([]*testShare, sharesCount) + + // derive a `sharesCount` number of shares + for i := uint64(1); i <= sharesCount; i++ { + validatorData.operatorsShares[i-1] = &testShare{ + opId: i, + sec: splitKeys[i], + pub: splitKeys[i].GetPublicKey(), + } + } + + validatorData.masterKey = msk + validatorData.masterPubKey = mpk + validatorData.masterPublicKeys = pubKeys + + return validatorData, nil +} + +func createOperators(num uint64, idOffset uint64) ([]*testOperator, error) { + testOps := make([]*testOperator, num) + + for i := uint64(1); i <= num; i++ { + pb, sk, err := rsaencryption.GenerateKeys() + if err != nil { + return nil, err + } + testOps[i-1] = &testOperator{ + id: idOffset + i, + rsaPub: pb, + rsaPriv: sk, + } + } + + return testOps, nil +} + +func generateSharesData(validatorData *testValidatorData, operators []*testOperator, owner ethcommon.Address, nonce int) ([]byte, error) { + var pubKeys []byte + var encryptedShares []byte + + for i, op := range operators { + rsaKey, err := rsaencryption.ConvertPemToPublicKey(op.rsaPub) + if err != nil { + return nil, fmt.Errorf("can't convert public key: %w", err) + } + + rawShare := validatorData.operatorsShares[i].sec.SerializeToHexStr() + cipherText, err := rsa.EncryptPKCS1v15(rand.Reader, rsaKey, []byte(rawShare)) + if err != nil { + return nil, fmt.Errorf("can't encrypt share: %w", err) + } + + rsaPriv, err := rsaencryption.ConvertPemToPrivateKey(string(op.rsaPriv)) + if err != nil { + return nil, fmt.Errorf("can't convert secret key to a private key share: %w", err) + } + + // check that we encrypt right + shareSecret := &bls.SecretKey{} + decryptedSharePrivateKey, err := rsaencryption.DecodeKey(rsaPriv, cipherText) + if err != nil { + return nil, err + } + if err = shareSecret.SetHexString(string(decryptedSharePrivateKey)); err != nil { + return nil, err + } + + pubKeys = append(pubKeys, validatorData.operatorsShares[i].pub.Serialize()...) + encryptedShares = append(encryptedShares, cipherText...) + + } + + toSign := fmt.Sprintf("%s:%d", owner.String(), nonce) + msgHash := crypto.Keccak256([]byte(toSign)) + signed := validatorData.masterKey.Sign(string(msgHash)) + sig := signed.Serialize() + + if !signed.VerifyByte(validatorData.masterPubKey, msgHash) { + return nil, errors.New("can't sign correctly") + } + + sharesData := append(pubKeys, encryptedShares...) + sharesDataSigned := append(sig, sharesData...) + + return sharesDataSigned, nil +} + +func setupEventHandler( + t *testing.T, + ctx context.Context, + logger *zap.Logger, + operator *testOperator, + ownerAddress *ethcommon.Address, + useMockCtrl bool, +) (*eventhandler.EventHandler, *mocks.MockController, *gomock.Controller, operatorstorage.Storage, error) { + db, err := kv.NewInMemory(logger, basedb.Options{ + Ctx: ctx, + }) + if err != nil { + return nil, nil, nil, nil, err + } + + storageMap := ibftstorage.NewStores() + nodeStorage, operatorData := setupOperatorStorage(logger, db, operator, ownerAddress) + testNetworkConfig := networkconfig.TestNetwork + + keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, testNetworkConfig, true, "") + if err != nil { + return nil, nil, nil, nil, err + } + + ctrl := gomock.NewController(t) + bc := beacon.NewMockBeaconNode(ctrl) + + contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) + if err != nil { + return nil, nil, nil, nil, err + } + + if useMockCtrl { + validatorCtrl := mocks.NewMockController(ctrl) + + parser := eventparser.New(contractFilterer) + + eh, err := eventhandler.New( + nodeStorage, + parser, + validatorCtrl, + testNetworkConfig.Domain, + validatorCtrl, + nodeStorage.GetPrivateKey, + keyManager, + bc, + storageMap, + eventhandler.WithFullNode(), + eventhandler.WithLogger(logger), + ) + + if err != nil { + return nil, nil, nil, nil, err + } + + validatorCtrl.EXPECT().GetOperatorData().Return(operatorData).AnyTimes() + + return eh, validatorCtrl, ctrl, nodeStorage, nil + } + + validatorCtrl := validator.NewController(logger, validator.ControllerOptions{ + Context: ctx, + DB: db, + RegistryStorage: nodeStorage, + KeyManager: keyManager, + StorageMap: storageMap, + OperatorData: operatorData, + }) + + parser := eventparser.New(contractFilterer) + + eh, err := eventhandler.New( + nodeStorage, + parser, + validatorCtrl, + testNetworkConfig.Domain, + validatorCtrl, + nodeStorage.GetPrivateKey, + keyManager, + bc, + storageMap, + eventhandler.WithFullNode(), + eventhandler.WithLogger(logger), + ) + if err != nil { + return nil, nil, nil, nil, err + } + + return eh, nil, ctrl, nodeStorage, nil +} + +func setupOperatorStorage( + logger *zap.Logger, + db basedb.Database, + operator *testOperator, + ownerAddress *ethcommon.Address, +) (operatorstorage.Storage, *registrystorage.OperatorData) { + if operator == nil { + logger.Fatal("empty test operator was passed") + } + + nodeStorage, err := operatorstorage.NewNodeStorage(logger, db) + if err != nil { + logger.Fatal("failed to create node storage", zap.Error(err)) + } + + operatorPubKey, err := nodeStorage.SetupPrivateKey(base64.StdEncoding.EncodeToString(operator.rsaPriv)) + if err != nil { + logger.Fatal("couldn't setup operator private key", zap.Error(err)) + } + + _, found, err := nodeStorage.GetPrivateKey() + if err != nil || !found { + logger.Fatal("failed to get operator private key", zap.Error(err)) + } + var operatorData *registrystorage.OperatorData + operatorData, found, err = nodeStorage.GetOperatorDataByPubKey(nil, operatorPubKey) + + if err != nil { + logger.Fatal("couldn't get operator data by public key", zap.Error(err)) + } + if !found { + operatorData = ®istrystorage.OperatorData{ + PublicKey: operatorPubKey, + ID: operator.id, + OwnerAddress: *ownerAddress, + } + } + + return nodeStorage, operatorData +} + +func simTestBackend(testAddresses []*ethcommon.Address) *simulator.SimulatedBackend { + genesis := core.GenesisAlloc{} + + for _, testAddr := range testAddresses { + genesis[*testAddr] = core.GenesisAccount{Balance: big.NewInt(10000000000000000)} + } + + return simulator.NewSimulatedBackend( + genesis, 50_000_000, + ) +} diff --git a/eth/ethtest/validator_added_test.go b/eth/ethtest/validator_added_test.go new file mode 100644 index 0000000000..2497552e7f --- /dev/null +++ b/eth/ethtest/validator_added_test.go @@ -0,0 +1,134 @@ +package ethtest + +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" + registrystorage "github.com/bloxapp/ssv/registry/storage" +) + +type testValidatorRegisteredInput struct { + *CommonTestInput + events []*validatorRegisteredEventInput +} + +func NewTestValidatorRegisteredInput(common *CommonTestInput) *testValidatorRegisteredInput { + return &testValidatorRegisteredInput{common, nil} +} + +func (input *testValidatorRegisteredInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: CommonTestInput is empty") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +type validatorRegisteredEventInput struct { + auth *bind.TransactOpts + ops []*testOperator + validator *testValidatorData + share []byte + opsIds []uint64 // separating opsIds from ops as it is a separate event field and should be used for destructive tests +} + +func (input *validatorRegisteredEventInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.validator == nil: + return fmt.Errorf("validation error: input.validator is empty") + case len(input.share) == 0: + return fmt.Errorf("validation error: input.share is empty") + case len(input.ops) == 0: + return fmt.Errorf("validation error: input.ops is empty") + } + + if len(input.opsIds) == 0 { + input.opsIds = make([]uint64, len(input.ops)) + for i, op := range input.ops { + input.opsIds[i] = op.id + } + } + + return nil +} + +func (input *testValidatorRegisteredInput) prepare( + validators []*testValidatorData, + shares [][]byte, + ops []*testOperator, + auth *bind.TransactOpts, + expectedNonce *registrystorage.Nonce, + validatorsIds []uint32, +) { + input.events = make([]*validatorRegisteredEventInput, len(validatorsIds)) + + for i, validatorId := range validatorsIds { + // Check there are no shares in the state for the current validator + valPubKey := validators[validatorId].masterPubKey.Serialize() + share := input.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(input.t, share) + + // Create event input + input.events[i] = &validatorRegisteredEventInput{ + validator: validators[validatorId], + share: shares[validatorId], + auth: auth, + ops: ops, + } + + // expect nonce bumping after each of these ValidatorAdded events handling + *expectedNonce++ + } +} + +func (input *testValidatorRegisteredInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + val := event.validator + valPubKey := val.masterPubKey.Serialize() + shares := input.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(input.t, shares) + + // Call the contract method + _, err := input.boundContract.SimcontractTransactor.RegisterValidator( + event.auth, + val.masterPubKey.Serialize(), + event.opsIds, + event.share, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/ethtest/validator_removed_test.go b/eth/ethtest/validator_removed_test.go new file mode 100644 index 0000000000..778b67dff8 --- /dev/null +++ b/eth/ethtest/validator_removed_test.go @@ -0,0 +1,104 @@ +package ethtest + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/eth/simulator/simcontract" +) + +type testValidatorRemovedInput struct { + auth *bind.TransactOpts + validator *testValidatorData + opsIds []uint64 + cluster *simcontract.CallableCluster +} + +func (input *testValidatorRemovedInput) validate() error { + if input == nil { + return fmt.Errorf("validation error: empty input") + } + + switch { + case input.auth == nil: + return fmt.Errorf("validation error: input.auth is empty") + case input.validator == nil: + return fmt.Errorf("validation error: input.validator is empty") + case len(input.opsIds) == 0: + return fmt.Errorf("validation error: input.opsIds is empty") + } + + return nil +} + +type TestValidatorRemovedEventsInput struct { + *CommonTestInput + events []*testValidatorRemovedInput +} + +func (input *TestValidatorRemovedEventsInput) validate() error { + if input.CommonTestInput == nil { + return fmt.Errorf("validation error: empty CommonTestInput") + } + if input.events == nil { + return fmt.Errorf("validation error: empty events") + } + for _, e := range input.events { + if err := e.validate(); err != nil { + return err + } + } + return nil +} + +func NewTestValidatorRemovedEventsInput(common *CommonTestInput) *TestValidatorRemovedEventsInput { + return &TestValidatorRemovedEventsInput{common, nil} +} + +func (input *TestValidatorRemovedEventsInput) prepare( + validators []*testValidatorData, + validatorsIds []uint64, + opsIds []uint64, + auth *bind.TransactOpts, + cluster *simcontract.CallableCluster, +) { + input.events = make([]*testValidatorRemovedInput, len(validatorsIds)) + + for i, validatorId := range validatorsIds { + input.events[i] = &testValidatorRemovedInput{ + auth, + validators[validatorId], + opsIds, + cluster, + } + } +} + +func (input *TestValidatorRemovedEventsInput) produce() { + err := input.validate() + require.NoError(input.t, err) + + for _, event := range input.events { + valPubKey := event.validator.masterPubKey.Serialize() + // Check the validator's shares are present in the state before removing + valShare := input.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(input.t, valShare) + + _, err = input.boundContract.SimcontractTransactor.RemoveValidator( + event.auth, + valPubKey, + event.opsIds, + *event.cluster, + ) + require.NoError(input.t, err) + + if !input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } + } + if input.doInOneBlock { + commitBlock(input.sim, input.blockNum) + } +} diff --git a/eth/eventhandler/event_handler.go b/eth/eventhandler/event_handler.go index 1c909caf88..b207c78a25 100644 --- a/eth/eventhandler/event_handler.go +++ b/eth/eventhandler/event_handler.go @@ -46,7 +46,7 @@ var ( type taskExecutor interface { StartValidator(share *ssvtypes.SSVShare) error - StopValidator(publicKey []byte) error + StopValidator(pubKey spectypes.ValidatorPK) error LiquidateCluster(owner ethcommon.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error ReactivateCluster(owner ethcommon.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error UpdateFeeRecipient(owner, recipient ethcommon.Address) error @@ -285,7 +285,7 @@ func (eh *EventHandler) processEvent(txn basedb.Txn, event ethtypes.Log) (Task, return nil, nil } - sharePK, err := eh.handleValidatorRemoved(txn, validatorRemovedEvent) + validatorPubKey, err := eh.handleValidatorRemoved(txn, validatorRemovedEvent) if err != nil { eh.metrics.EventProcessingFailed(abiEvent.Name) @@ -298,13 +298,11 @@ func (eh *EventHandler) processEvent(txn basedb.Txn, event ethtypes.Log) (Task, defer eh.metrics.EventProcessed(abiEvent.Name) - if sharePK == nil { - return nil, nil + if validatorPubKey != nil { + return NewStopValidatorTask(eh.taskExecutor, validatorPubKey), nil } - task := NewStopValidatorTask(eh.taskExecutor, validatorRemovedEvent.PublicKey) - - return task, nil + return nil, nil case ClusterLiquidated: clusterLiquidatedEvent, err := eh.eventParser.ParseClusterLiquidated(event) diff --git a/eth/eventhandler/event_handler_test.go b/eth/eventhandler/event_handler_test.go index bf1f96961e..070de44d04 100644 --- a/eth/eventhandler/event_handler_test.go +++ b/eth/eventhandler/event_handler_test.go @@ -1,6 +1,7 @@ package eventhandler import ( + "bytes" "context" "crypto/rand" "crypto/rsa" @@ -12,13 +13,8 @@ import ( "strings" "testing" - "github.com/bloxapp/ssv/operator/validator" - "github.com/bloxapp/ssv/operator/validator/mocks" - "github.com/attestantio/go-eth2-client/spec/phase0" - "github.com/bloxapp/ssv/utils/blskeygen" - "github.com/pkg/errors" - + ekmcore "github.com/bloxapp/eth2-key-manager/core" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/accounts/abi/bind" ethcommon "github.com/ethereum/go-ethereum/common" @@ -27,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/golang/mock/gomock" "github.com/herumi/bls-eth-go-binary/bls" + "github.com/pkg/errors" "github.com/stretchr/testify/require" "go.uber.org/zap" "go.uber.org/zap/zaptest" @@ -40,10 +37,15 @@ import ( ibftstorage "github.com/bloxapp/ssv/ibft/storage" "github.com/bloxapp/ssv/networkconfig" operatorstorage "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validator" + "github.com/bloxapp/ssv/operator/validator/mocks" + "github.com/bloxapp/ssv/operator/validatorsmap" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" registrystorage "github.com/bloxapp/ssv/registry/storage" "github.com/bloxapp/ssv/storage/basedb" "github.com/bloxapp/ssv/storage/kv" + "github.com/bloxapp/ssv/utils" + "github.com/bloxapp/ssv/utils/blskeygen" "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/bloxapp/ssv/utils/threshold" ) @@ -60,15 +62,34 @@ func TestHandleBlockEventsStream(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + operatorsCount := uint64(0) // Create operators rsa keys - ops, err := createOperators(4) + ops, err := createOperators(4, operatorsCount) require.NoError(t, err) + operatorsCount += uint64(len(ops)) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + currentSlot := &utils.SlotValue{} + mockBeaconNetwork := utils.SetupMockBeaconNetwork(t, currentSlot) + mockNetworkConfig := &networkconfig.NetworkConfig{ + Beacon: mockBeaconNetwork, + } + + eh, _, err := setupEventHandler(t, ctx, logger, mockNetworkConfig, ops[0], false) if err != nil { t.Fatal(err) } - sim := simTestBackend(testAddr) + + // Just creating one more key -> address for testing + wrongPk, err := crypto.HexToECDSA("42e14d227125f411d6d3285bb4a2e07c2dba2e210bd2f3f4e2a36633bd61bfe6") + require.NoError(t, err) + testAddr2 := crypto.PubkeyToAddress(wrongPk.PublicKey) + + testAddresses := make([]*ethcommon.Address, 2) + testAddresses[0] = &testAddr + testAddresses[1] = &testAddr2 + + // Adding testAddresses to the genesis block mostly to specify some balances for them + sim := simTestBackend(testAddresses) // Create JSON-RPC handler rpcServer, _ := sim.Node.RPCHandler() @@ -114,13 +135,23 @@ func TestHandleBlockEventsStream(t *testing.T) { sharesData1, err := generateSharesData(validatorData1, ops, testAddr, 0) require.NoError(t, err) + // Create another validator. We'll create the shares later in the tests + validatorData2, err := createNewValidator(ops) + require.NoError(t, err) + + validatorData3, err := createNewValidator(ops) + require.NoError(t, err) + sharesData3, err := generateSharesData(validatorData3, ops, testAddr, 3) + require.NoError(t, err) + blockNum := uint64(0x1) + currentSlot.SetSlot(100) t.Run("test OperatorAdded event handle", func(t *testing.T) { for _, op := range ops { // Call the contract method - packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.pub) + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.rsaPub) require.NoError(t, err) _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) require.NoError(t, err) @@ -139,22 +170,22 @@ func TestHandleBlockEventsStream(t *testing.T) { }() // Check that there is no registered operators - operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, 0, len(operators)) - // Hanlde the event + // Handle the event lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ - // Check storage for a new operator - operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) + // Check storage for the new operators + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) require.NoError(t, err) require.Equal(t, len(ops), len(operators)) - // Check if an operator in the storage has same attributes + // Check if operators in the storage have same attributes for i, log := range block.Logs { operatorAddedEvent, err := contractFilterer.ParseOperatorAdded(log) require.NoError(t, err) @@ -162,47 +193,124 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, operatorAddedEvent.OperatorId, data.ID) require.Equal(t, operatorAddedEvent.Owner, data.OwnerAddress) - require.Equal(t, ops[i].pub, data.PublicKey) + require.Equal(t, ops[i].rsaPub, data.PublicKey) } }) - // Receive event, unmarshall, parse, check parse event is not nil or with error, operator id is correct t.Run("test OperatorRemoved event handle", func(t *testing.T) { - // Call the contract method - _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 1) - require.NoError(t, err) - sim.Commit() - block := <-logs - require.NotEmpty(t, block.Logs) - require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[0].Topics[0]) + // Should return MalformedEventError and no changes to the state + t.Run("test OperatorRemoved incorrect operator ID", func(t *testing.T) { + // Call the contract method + _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 100500) + require.NoError(t, err) + sim.Commit() - eventsCh := make(chan executionclient.BlockLogs) - go func() { - defer close(eventsCh) - eventsCh <- block - }() + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[0].Topics[0]) - // Check that there is 1 registered operator - operators, err := eh.nodeStorage.ListOperators(nil, 0, 10) - require.NoError(t, err) - require.Equal(t, len(ops), len(operators)) + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() - // Hanlde the event - lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) - require.NoError(t, err) - blockNum++ + // Check that there is 1 registered operator + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) - // Check if the operator was removed successfuly - // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved - operators, err = eh.nodeStorage.ListOperators(nil, 0, 10) - require.NoError(t, err) - require.Equal(t, len(ops), len(operators)) + // Handle the event + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check if the operator wasn't removed successfully + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + }) + + // Receive event, unmarshall, parse, check parse event is not nil or with error, operator id is correct + // TODO: fix this test. It checks nothing, due the handleOperatorRemoved method is no-op currently + t.Run("test OperatorRemoved happy flow", func(t *testing.T) { + // Prepare a new operator to remove it later in this test + op, err := createOperators(1, operatorsCount) + require.NoError(t, err) + operatorsCount++ + + // Call the contract method + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op[0].rsaPub) + require.NoError(t, err) + _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f4"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + // Check that there is no registered operators + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops), len(operators)) + + // Handle OperatorAdded event + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + // Check storage for the new operator + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops)+1, len(operators)) + + // Now start the OperatorRemoved event handling + // Call the contract method + _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, 4) + require.NoError(t, err) + sim.Commit() + + block = <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[0].Topics[0]) + + eventsCh = make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, len(ops)+1, len(operators)) + + // Handle OperatorRemoved event + lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // TODO: this should be adjusted when eth/eventhandler/handlers.go#L109 is resolved + // Check if the operator was removed successfully + //operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + //require.NoError(t, err) + //require.Equal(t, len(ops), len(operators)) + }) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, // public key is correct, owner is correct, operator ids are correct, shares are correct + // slashing protection data is correct t.Run("test ValidatorAdded event handle", func(t *testing.T) { nonce, err := eh.nodeStorage.GetNextNonce(nil, testAddr) require.NoError(t, err) @@ -236,9 +344,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + + requireKeyManagerDataToExist(t, eh, 1, validatorData1) + // Check that validator was registered shares := eh.nodeStorage.Shares().List(nil) require.Equal(t, 1, len(shares)) @@ -247,12 +358,11 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, registrystorage.Nonce(1), nonce) - validatorData2, err := createNewValidator(ops) - require.NoError(t, err) sharesData2, err := generateSharesData(validatorData2, ops, testAddr, 2) require.NoError(t, err) // SharesData length is incorrect. Nonce is bumped; Validator wasn't added + // slashing protection data is not added t.Run("test nonce bumping even for incorrect sharesData length", func(t *testing.T) { // changing the length malformedSharesData := sharesData2[:len(sharesData2)-1] @@ -285,10 +395,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToNotExist(t, eh, 1, validatorData2) + // Check that validator was not registered, shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 1, len(shares)) @@ -299,6 +411,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }) // Length of the shares []byte is correct; nonce is bumped; validator is added + // slashing protection data is correct t.Run("test validator 1 doesnt check validator's 4 share", func(t *testing.T) { malformedSharesData := sharesData2[:] // Corrupt the encrypted last share key of the 4th operator @@ -332,10 +445,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToExist(t, eh, 2, validatorData2) + // Check that validator was registered for op1, shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 2, len(shares)) @@ -345,13 +460,9 @@ func TestHandleBlockEventsStream(t *testing.T) { require.Equal(t, registrystorage.Nonce(3), nonce) }) - validatorData3, err := createNewValidator(ops) - require.NoError(t, err) - sharesData3, err := generateSharesData(validatorData3, ops, testAddr, 3) - require.NoError(t, err) - // Share for 1st operator is malformed; check nonce is bumped correctly; validator wasn't added - t.Run("test correct ValidatorAdded again and nonce is bumped", func(t *testing.T) { + // slashing protection data is not added + t.Run("test malformed ValidatorAdded and nonce is bumped", func(t *testing.T) { malformedSharesData := sharesData3[:] operatorCount := len(ops) @@ -389,10 +500,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToNotExist(t, eh, 2, validatorData3) + // Check that validator was not registered shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 2, len(shares)) @@ -403,6 +516,7 @@ func TestHandleBlockEventsStream(t *testing.T) { }) // Correct event; check nonce is bumped correctly; validator is added + // slashing protection data is correct t.Run("test correct ValidatorAdded again and nonce is bumped", func(t *testing.T) { // regenerate with updated nonce sharesData3, err = generateSharesData(validatorData3, ops, testAddr, 4) @@ -435,10 +549,12 @@ func TestHandleBlockEventsStream(t *testing.T) { }() lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) - require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) blockNum++ + requireKeyManagerDataToExist(t, eh, 3, validatorData3) + // Check that validator was registered shares = eh.nodeStorage.Shares().List(nil) require.Equal(t, 3, len(shares)) @@ -447,14 +563,192 @@ func TestHandleBlockEventsStream(t *testing.T) { require.NoError(t, err) require.Equal(t, registrystorage.Nonce(5), nonce) }) + + t.Run("test correct ValidatorAdded again and nonce is bumped with another owner", func(t *testing.T) { + validatorData4, err := createNewValidator(ops) + require.NoError(t, err) + authTestAddr2, _ := bind.NewKeyedTransactorWithChainID(wrongPk, big.NewInt(1337)) + + sharesData4, err := generateSharesData(validatorData4, ops, testAddr2, 0) + require.NoError(t, err) + // Call the contract method + _, err = boundContract.SimcontractTransactor.RegisterValidator( + authTestAddr2, + validatorData4.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + sharesData4, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block = <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e5"), block.Logs[0].Topics[0]) + + eventsCh = make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err = eh.HandleBlockEventsStream(eventsCh, false) + require.NoError(t, err) + require.Equal(t, blockNum+1, lastProcessedBlock) + blockNum++ + + requireKeyManagerDataToExist(t, eh, 4, validatorData4) + + // Check that validator was registered + shares = eh.nodeStorage.Shares().List(nil) + require.Equal(t, 4, len(shares)) + // and nonce was bumped + nonce, err = eh.nodeStorage.GetNextNonce(nil, testAddr2) + require.NoError(t, err) + // Check that nonces are not intertwined between different owner accounts! + require.Equal(t, registrystorage.Nonce(1), nonce) + }) + }) - // Receive event, unmarshall, parse, check parse event is not nil or with an error, - // public key is correct, owner is correct, operator ids are correct - t.Run("test ValidatorRemoved event handle", func(t *testing.T) { - _, err = boundContract.SimcontractTransactor.RemoveValidator( + t.Run("test ValidatorRemoved event handling", func(t *testing.T) { + // Must throw error "malformed event: could not find validator share" + t.Run("ValidatorRemoved incorrect event public key", func(t *testing.T) { + pk := validatorData1.masterPubKey.Serialize() + // Corrupt the public key + pk[len(pk)-1] ^= 1 + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + auth, + pk, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check the validator's shares are still present in the state after incorrect ValidatorRemoved event + valShare := eh.nodeStorage.Shares().Get(nil, validatorData1.masterPubKey.Serialize()) + require.NotNil(t, valShare) + }) + + t.Run("ValidatorRemoved incorrect owner address", func(t *testing.T) { + wrongAuth, _ := bind.NewKeyedTransactorWithChainID(wrongPk, big.NewInt(1337)) + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + wrongAuth, + validatorData1.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check the validator's shares are still present in the state after incorrect ValidatorRemoved event + valShare := eh.nodeStorage.Shares().Get(nil, validatorData1.masterPubKey.Serialize()) + require.NotNil(t, valShare) + }) + + // Receive event, unmarshall, parse, check parse event is not nil or with an error, + // public key is correct, owner is correct, operator ids are correct + // event handler's own operator is responsible for removed validator + t.Run("ValidatorRemoved happy flow", func(t *testing.T) { + valPubKey := validatorData1.masterPubKey.Serialize() + // Check the validator's shares are present in the state before removing + valShare := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, valShare) + requireKeyManagerDataToExist(t, eh, 4, validatorData1) + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + auth, + validatorData1.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // Check the validator was removed from the validator shares storage. + shares := eh.nodeStorage.Shares().List(nil) + require.Equal(t, 3, len(shares)) + valShare = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + requireKeyManagerDataToNotExist(t, eh, 3, validatorData1) + }) + }) + + // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct + // slashing protection data is not deleted + t.Run("test ClusterLiquidated event handle", func(t *testing.T) { + _, err = boundContract.SimcontractTransactor.Liquidate( auth, - validatorData1.masterPubKey.Serialize(), + testAddr, []uint64{1, 2, 3, 4}, simcontract.CallableCluster{ ValidatorCount: 1, @@ -468,7 +762,7 @@ func TestHandleBlockEventsStream(t *testing.T) { block := <-logs require.NotEmpty(t, block.Logs) - require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0x1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e688"), block.Logs[0].Topics[0]) eventsCh := make(chan executionclient.BlockLogs) go func() { @@ -476,14 +770,91 @@ func TestHandleBlockEventsStream(t *testing.T) { eventsCh <- block }() + // Using validator 2 because we've removed validator 1 in ValidatorRemoved tests. This one has to be in the state + valPubKey := validatorData2.masterPubKey.Serialize() + + share := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.False(t, share.Liquidated) + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ + + share = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.True(t, share.Liquidated) + // check that slashing data was not deleted + sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + + require.Equal(t, highestAttestation.Source.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())-1) + require.Equal(t, highestAttestation.Target.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())) + + highestProposal, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, highestProposal, currentSlot.GetSlot()) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct - t.Run("test ClusterLiquidated event handle", func(t *testing.T) { + // ** storedEpoch = max(nextEpoch, storedEpoch) ** + // Validate that slashing protection data stored epoch is nextEpoch and NOT storedEpoch + t.Run("test ClusterReactivated event handle", func(t *testing.T) { + _, err = boundContract.SimcontractTransactor.Reactivate( + auth, + []uint64{1, 2, 3, 4}, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b6859"), block.Logs[0].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + currentSlot.SetSlot(1000) + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + + // check that slashing data was bumped + sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + require.Equal(t, highestAttestation.Source.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())-1) + require.Equal(t, highestAttestation.Target.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())) + + highestProposal, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, highestProposal, currentSlot.GetSlot()) + + blockNum++ + }) + + // Liquidated event is far in the future + // in order to simulate stored far in the future slashing protection data + t.Run("test ClusterLiquidated event handle - far in the future", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.Liquidate( auth, testAddr, @@ -514,11 +885,13 @@ func TestHandleBlockEventsStream(t *testing.T) { blockNum++ }) - // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, operator ids are correct - t.Run("test ClusterReactivated event handle", func(t *testing.T) { + // Reactivate event + // ** storedEpoch = max(nextEpoch, storedEpoch) ** + // Validate that slashing protection data stored epoch is storedEpoch and NOT nextEpoch + t.Run("test ClusterReactivated event handle - far in the future", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.Reactivate( auth, - []uint64{1, 2, 3}, + []uint64{1, 2, 3, 4}, big.NewInt(100_000_000), simcontract.CallableCluster{ ValidatorCount: 1, @@ -540,17 +913,44 @@ func TestHandleBlockEventsStream(t *testing.T) { eventsCh <- block }() + // Using validator 2 because we've removed validator 1 in ValidatorRemoved tests + valPubKey := validatorData2.masterPubKey.Serialize() + + share := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.True(t, share.Liquidated) + currentSlot.SetSlot(100) + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) + + // check that slashing data is greater than current epoch + sharePubKey := validatorData3.operatorsShares[0].sec.GetPublicKey().Serialize() + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + require.Greater(t, highestAttestation.Source.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())-1) + require.Greater(t, highestAttestation.Target.Epoch, mockBeaconNetwork.EstimatedEpochAtSlot(currentSlot.GetSlot())) + + highestProposal, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.Greater(t, highestProposal, currentSlot.GetSlot()) + blockNum++ + + share = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.False(t, share.Liquidated) }) // Receive event, unmarshall, parse, check parse event is not nil or with an error, owner is correct, fee recipient is correct t.Run("test FeeRecipientAddressUpdated event handle", func(t *testing.T) { _, err = boundContract.SimcontractTransactor.SetFeeRecipientAddress( auth, - ethcommon.HexToAddress("0x1"), + testAddr2, ) require.NoError(t, err) sim.Commit() @@ -569,14 +969,202 @@ func TestHandleBlockEventsStream(t *testing.T) { require.Equal(t, blockNum+1, lastProcessedBlock) require.NoError(t, err) blockNum++ - // Check if the fee recepient was updated - recepientData, _, err := eh.nodeStorage.GetRecipientData(nil, testAddr) + // Check if the fee recipient was updated + recipientData, _, err := eh.nodeStorage.GetRecipientData(nil, testAddr) require.NoError(t, err) - require.Equal(t, ethcommon.HexToAddress("0x1").String(), recepientData.FeeRecipient.String()) + require.Equal(t, testAddr2.String(), recipientData.FeeRecipient.String()) + }) + + // DO / UNDO in one block tests + t.Run("test DO / UNDO in one block", func(t *testing.T) { + t.Run("test OperatorAdded + OperatorRemoved events handling", func(t *testing.T) { + // There are 5 ops before the test running + // Check that there is no registered operators + operators, err := eh.nodeStorage.ListOperators(nil, 0, 0) + require.NoError(t, err) + require.Equal(t, operatorsCount, uint64(len(operators))) + + tmpOps, err := createOperators(1, operatorsCount) + require.NoError(t, err) + operatorsCount++ + op := tmpOps[0] + + // Call the RegisterOperator contract method + packedOperatorPubKey, err := eventparser.PackOperatorPublicKey(op.rsaPub) + require.NoError(t, err) + _, err = boundContract.SimcontractTransactor.RegisterOperator(auth, packedOperatorPubKey, big.NewInt(100_000_000)) + require.NoError(t, err) + + // Call the OperatorRemoved contract method + _, err = boundContract.SimcontractTransactor.RemoveOperator(auth, op.id) + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0xd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f4"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0x0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e"), block.Logs[1].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + // Handle the event + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + // #TODO: Fails until we fix the OperatorAdded: handlers.go #108 + // Check storage for the new operators + //operators, err = eh.nodeStorage.ListOperators(nil, 0, 0) + //require.NoError(t, err) + //require.Equal(t, operatorsCount-1, uint64(len(operators))) + // + //_, found, err := eh.nodeStorage.GetOperatorData(nil, op.id) + //require.NoError(t, err) + //require.False(t, found) + }) + + t.Run("test ValidatorAdded + ValidatorRemoved events handling", func(t *testing.T) { + shares := eh.nodeStorage.Shares().List(nil) + sharesCountBeforeTest := len(shares) + + validatorData4, err := createNewValidator(ops) + require.NoError(t, err) + + currentNonce, err := eh.nodeStorage.GetNextNonce(nil, testAddr) + require.NoError(t, err) + + sharesData4, err := generateSharesData(validatorData4, ops, testAddr, int(currentNonce)) + require.NoError(t, err) + + valPubKey := validatorData4.masterPubKey.Serialize() + valShare := eh.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + + // Call the contract method + _, err = boundContract.SimcontractTransactor.RegisterValidator( + auth, + validatorData4.masterPubKey.Serialize(), + []uint64{1, 2, 3, 4}, + sharesData4, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + + _, err = boundContract.SimcontractTransactor.RemoveValidator( + auth, + valPubKey, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 2, + Active: true, + Balance: big.NewInt(100_000_000), + }) + + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e5"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0xccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e"), block.Logs[1].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + valShare = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.Nil(t, valShare) + + // Check that validator was registered + shares = eh.nodeStorage.Shares().List(nil) + require.Equal(t, sharesCountBeforeTest, len(shares)) + // and nonce was bumped + nonce, err := eh.nodeStorage.GetNextNonce(nil, testAddr) + require.NoError(t, err) + require.Equal(t, currentNonce+1, nonce) + }) + + t.Run("test ClusterLiquidated + ClusterReactivated events handling", func(t *testing.T) { + // Using validator 2 because we've removed validator 1 in ValidatorRemoved tests + valPubKey := validatorData2.masterPubKey.Serialize() + share := eh.nodeStorage.Shares().Get(nil, valPubKey) + + require.NotNil(t, share) + require.False(t, share.Liquidated) + _, err = boundContract.SimcontractTransactor.Liquidate( + auth, + testAddr, + []uint64{1, 2, 3, 4}, + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + + _, err = boundContract.SimcontractTransactor.Reactivate( + auth, + []uint64{1, 2, 3, 4}, + big.NewInt(100_000_000), + simcontract.CallableCluster{ + ValidatorCount: 1, + NetworkFeeIndex: 1, + Index: 1, + Active: true, + Balance: big.NewInt(100_000_000), + }) + require.NoError(t, err) + + sim.Commit() + + block := <-logs + require.NotEmpty(t, block.Logs) + require.Equal(t, ethcommon.HexToHash("0x1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e688"), block.Logs[0].Topics[0]) + require.Equal(t, ethcommon.HexToHash("0xc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b6859"), block.Logs[1].Topics[0]) + + eventsCh := make(chan executionclient.BlockLogs) + go func() { + defer close(eventsCh) + eventsCh <- block + }() + + lastProcessedBlock, err := eh.HandleBlockEventsStream(eventsCh, false) + require.Equal(t, blockNum+1, lastProcessedBlock) + require.NoError(t, err) + blockNum++ + + share = eh.nodeStorage.Shares().Get(nil, valPubKey) + require.NotNil(t, share) + require.False(t, share.Liquidated) + }) }) } -func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, operator *testOperator, useMockCtrl bool) (*EventHandler, *mocks.MockController, error) { +func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, network *networkconfig.NetworkConfig, operator *testOperator, useMockCtrl bool) (*EventHandler, *mocks.MockController, error) { db, err := kv.NewInMemory(logger, basedb.Options{ Ctx: ctx, }) @@ -584,9 +1172,14 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op storageMap := ibftstorage.NewStores() nodeStorage, operatorData := setupOperatorStorage(logger, db, operator) - testNetworkConfig := networkconfig.TestNetwork - keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, testNetworkConfig, true, "") + if network == nil { + network = &networkconfig.NetworkConfig{ + Beacon: utils.SetupMockBeaconNetwork(t, &utils.SlotValue{}), + } + } + + keyManager, err := ekm.NewETHKeyManagerSigner(logger, db, *network, true, "") if err != nil { return nil, nil, err } @@ -607,7 +1200,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op nodeStorage, parser, validatorCtrl, - testNetworkConfig.Domain, + network.Domain, validatorCtrl, nodeStorage.GetPrivateKey, keyManager, @@ -633,6 +1226,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op KeyManager: keyManager, StorageMap: storageMap, OperatorData: operatorData, + ValidatorsMap: validatorsmap.New(ctx), }) contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) @@ -644,7 +1238,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op nodeStorage, parser, validatorCtrl, - testNetworkConfig.Domain, + network.Domain, validatorCtrl, nodeStorage.GetPrivateKey, keyManager, @@ -660,7 +1254,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger, op func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *testOperator) (operatorstorage.Storage, *registrystorage.OperatorData) { if operator == nil { - logger.Fatal("empty test operator was passed", zap.Error(fmt.Errorf("empty test operator was passed"))) + logger.Fatal("empty test operator was passed") } nodeStorage, err := operatorstorage.NewNodeStorage(logger, db) @@ -668,9 +1262,9 @@ func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *test logger.Fatal("failed to create node storage", zap.Error(err)) } - operatorPubKey, err := nodeStorage.SetupPrivateKey(base64.StdEncoding.EncodeToString(operator.priv)) + operatorPubKey, err := nodeStorage.SetupPrivateKey(base64.StdEncoding.EncodeToString(operator.rsaPriv)) if err != nil { - logger.Fatal("could not setup operator private key", zap.Error(err)) + logger.Fatal("couldn't setup operator private key", zap.Error(err)) } _, found, err := nodeStorage.GetPrivateKey() @@ -681,7 +1275,7 @@ func setupOperatorStorage(logger *zap.Logger, db basedb.Database, operator *test operatorData, found, err = nodeStorage.GetOperatorDataByPubKey(nil, operatorPubKey) if err != nil { - logger.Fatal("could not get operator data by public key", zap.Error(err)) + logger.Fatal("couldn't get operator data by public key", zap.Error(err)) } if !found { operatorData = ®istrystorage.OperatorData{ @@ -704,20 +1298,22 @@ func unmarshalLog(t *testing.T, rawOperatorAdded string) ethtypes.Log { return vLogOperatorAdded } -func simTestBackend(testAddr ethcommon.Address) *simulator.SimulatedBackend { +func simTestBackend(testAddresses []*ethcommon.Address) *simulator.SimulatedBackend { + genesis := core.GenesisAlloc{} + + for _, testAddr := range testAddresses { + genesis[*testAddr] = core.GenesisAccount{Balance: big.NewInt(10000000000000000)} + } + return simulator.NewSimulatedBackend( - core.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000000000)}, - }, 10000000, + genesis, 50_000_000, ) } func TestCreatingSharesData(t *testing.T) { - owner := testAddr nonce := 0 - // - ops, err := createOperators(4) + ops, err := createOperators(4, 1) require.NoError(t, err) validatorData, err := createNewValidator(ops) @@ -742,7 +1338,7 @@ func TestCreatingSharesData(t *testing.T) { encryptedKeys := splitBytes(sharesData[pubKeysOffset:], len(sharesData[pubKeysOffset:])/operatorCount) for i, enck := range encryptedKeys { - priv, err := rsaencryption.ConvertPemToPrivateKey(string(ops[i].priv)) + priv, err := rsaencryption.ConvertPemToPrivateKey(string(ops[i].rsaPriv)) require.NoError(t, err) decryptedSharePrivateKey, err := rsaencryption.DecodeKey(priv, enck) require.NoError(t, err) @@ -763,9 +1359,9 @@ type testValidatorData struct { } type testOperator struct { - id uint64 - pub []byte // rsa pub - priv []byte // rsa sk + id uint64 + rsaPub []byte + rsaPriv []byte } type testShare struct { @@ -774,24 +1370,32 @@ type testShare struct { pub *bls.PublicKey } +func shareExist(accounts []ekmcore.ValidatorAccount, sharePubKey []byte) bool { + for _, acc := range accounts { + if bytes.Equal(acc.ValidatorPublicKey(), sharePubKey) { + return true + } + } + return false +} + func createNewValidator(ops []*testOperator) (*testValidatorData, error) { validatorData := &testValidatorData{} sharesCount := uint64(len(ops)) threshold.Init() - msk, pubk := blskeygen.GenBLSKeyPair() + msk, mpk := blskeygen.GenBLSKeyPair() secVec := msk.GetMasterSecretKey(int(sharesCount)) - pubks := bls.GetMasterPublicKey(secVec) + pubKeys := bls.GetMasterPublicKey(secVec) splitKeys, err := threshold.Create(msk.Serialize(), sharesCount-1, sharesCount) if err != nil { return nil, err } - num := uint64(len(ops)) - validatorData.operatorsShares = make([]*testShare, num) + validatorData.operatorsShares = make([]*testShare, sharesCount) - // derive a `hareCount` number of shares - for i := uint64(1); i <= num; i++ { + // derive a `sharesCount` number of shares + for i := uint64(1); i <= sharesCount; i++ { validatorData.operatorsShares[i-1] = &testShare{ opId: i, sec: splitKeys[i], @@ -800,54 +1404,54 @@ func createNewValidator(ops []*testOperator) (*testValidatorData, error) { } validatorData.masterKey = msk - validatorData.masterPubKey = pubk - validatorData.masterPublicKeys = pubks + validatorData.masterPubKey = mpk + validatorData.masterPublicKeys = pubKeys return validatorData, nil } -func createOperators(num uint64) ([]*testOperator, error) { - testops := make([]*testOperator, num) +func createOperators(num uint64, idOffset uint64) ([]*testOperator, error) { + testOps := make([]*testOperator, num) for i := uint64(1); i <= num; i++ { pb, sk, err := rsaencryption.GenerateKeys() if err != nil { return nil, err } - testops[i-1] = &testOperator{ - id: i, - pub: pb, - priv: sk, + testOps[i-1] = &testOperator{ + id: idOffset + i, + rsaPub: pb, + rsaPriv: sk, } } - return testops, nil + return testOps, nil } func generateSharesData(validatorData *testValidatorData, operators []*testOperator, owner ethcommon.Address, nonce int) ([]byte, error) { - var pubkeys []byte + var pubKeys []byte var encryptedShares []byte for i, op := range operators { - rsakey, err := rsaencryption.ConvertPemToPublicKey(op.pub) + rsaKey, err := rsaencryption.ConvertPemToPublicKey(op.rsaPub) if err != nil { - return nil, fmt.Errorf("cant convert publickey: %w", err) + return nil, fmt.Errorf("can't convert public key: %w", err) } - rawshare := validatorData.operatorsShares[i].sec.SerializeToHexStr() - ciphertext, err := rsa.EncryptPKCS1v15(rand.Reader, rsakey, []byte(rawshare)) + rawShare := validatorData.operatorsShares[i].sec.SerializeToHexStr() + cipherText, err := rsa.EncryptPKCS1v15(rand.Reader, rsaKey, []byte(rawShare)) if err != nil { - return nil, errors.New("cant encrypt share") + return nil, fmt.Errorf("can't encrypt share: %w", err) } - rsapriv, err := rsaencryption.ConvertPemToPrivateKey(string(op.priv)) + rsaPriv, err := rsaencryption.ConvertPemToPrivateKey(string(op.rsaPriv)) if err != nil { - return nil, err + return nil, fmt.Errorf("can't convert secret key to a private key share: %w", err) } // check that we encrypt right shareSecret := &bls.SecretKey{} - decryptedSharePrivateKey, err := rsaencryption.DecodeKey(rsapriv, ciphertext) + decryptedSharePrivateKey, err := rsaencryption.DecodeKey(rsaPriv, cipherText) if err != nil { return nil, err } @@ -855,22 +1459,56 @@ func generateSharesData(validatorData *testValidatorData, operators []*testOpera return nil, err } - pubkeys = append(pubkeys, validatorData.operatorsShares[i].pub.Serialize()...) - encryptedShares = append(encryptedShares, ciphertext...) + pubKeys = append(pubKeys, validatorData.operatorsShares[i].pub.Serialize()...) + encryptedShares = append(encryptedShares, cipherText...) } - tosign := fmt.Sprintf("%s:%d", owner.String(), nonce) - msghash := crypto.Keccak256([]byte(tosign)) - signed := validatorData.masterKey.Sign(string(msghash)) + toSign := fmt.Sprintf("%s:%d", owner.String(), nonce) + msgHash := crypto.Keccak256([]byte(toSign)) + signed := validatorData.masterKey.Sign(string(msgHash)) sig := signed.Serialize() - if !signed.VerifyByte(validatorData.masterPubKey, msghash) { - return nil, errors.New("couldn't sign correctly") + if !signed.VerifyByte(validatorData.masterPubKey, msgHash) { + return nil, errors.New("can't sign correctly") } - sharesData := append(pubkeys, encryptedShares...) + sharesData := append(pubKeys, encryptedShares...) sharesDataSigned := append(sig, sharesData...) return sharesDataSigned, nil } + +func requireKeyManagerDataToExist(t *testing.T, eh *EventHandler, expectedAccounts int, validatorData *testValidatorData) { + sharePubKey := validatorData.operatorsShares[0].sec.GetPublicKey().Serialize() + accounts, err := eh.keyManager.(ekm.StorageProvider).ListAccounts() + require.NoError(t, err) + require.Equal(t, expectedAccounts, len(accounts)) + require.True(t, shareExist(accounts, sharePubKey)) + + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.True(t, found) + require.NotNil(t, highestAttestation) + + _, found, err = eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.True(t, found) +} + +func requireKeyManagerDataToNotExist(t *testing.T, eh *EventHandler, expectedAccounts int, validatorData *testValidatorData) { + sharePubKey := validatorData.operatorsShares[0].sec.GetPublicKey().Serialize() + accounts, err := eh.keyManager.(ekm.StorageProvider).ListAccounts() + require.NoError(t, err) + require.Equal(t, expectedAccounts, len(accounts)) + require.False(t, shareExist(accounts, sharePubKey)) + + highestAttestation, found, err := eh.keyManager.(ekm.StorageProvider).RetrieveHighestAttestation(sharePubKey) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, highestAttestation) + + _, found, err = eh.keyManager.(ekm.StorageProvider).RetrieveHighestProposal(sharePubKey) + require.NoError(t, err) + require.False(t, found) +} diff --git a/eth/eventhandler/handlers.go b/eth/eventhandler/handlers.go index 7c25d7e6f4..d4632ddf6f 100644 --- a/eth/eventhandler/handlers.go +++ b/eth/eventhandler/handlers.go @@ -12,6 +12,7 @@ import ( "github.com/herumi/bls-eth-go-binary/bls" "go.uber.org/zap" + "github.com/bloxapp/ssv/ekm" "github.com/bloxapp/ssv/eth/contract" "github.com/bloxapp/ssv/logging/fields" qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" @@ -39,10 +40,10 @@ var ( func (eh *EventHandler) handleOperatorAdded(txn basedb.Txn, event *contract.ContractOperatorAdded) error { logger := eh.logger.With( - zap.String("event_type", OperatorAdded), + fields.EventName(OperatorAdded), fields.TxHash(event.Raw.TxHash), fields.OperatorID(event.OperatorId), - zap.String("owner_address", event.Owner.String()), + fields.Owner(event.Owner), fields.OperatorPubKey(event.PublicKey), ) logger.Debug("processing event") @@ -85,7 +86,7 @@ func (eh *EventHandler) handleOperatorAdded(txn basedb.Txn, event *contract.Cont func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.ContractOperatorRemoved) error { logger := eh.logger.With( - zap.String("event_type", OperatorRemoved), + fields.EventName(OperatorRemoved), fields.TxHash(event.Raw.TxHash), fields.OperatorID(event.OperatorId), ) @@ -101,8 +102,8 @@ func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.Co } logger = logger.With( - zap.String("operator_pub_key", ethcommon.Bytes2Hex(od.PublicKey)), - zap.String("owner_address", od.OwnerAddress.String()), + fields.OperatorPubKey(od.PublicKey), + fields.Owner(od.OwnerAddress), ) // TODO: In original handler we didn't delete operator data, so this behavior was preserved. However we likely need to. @@ -124,10 +125,10 @@ func (eh *EventHandler) handleOperatorRemoved(txn basedb.Txn, event *contract.Co func (eh *EventHandler) handleValidatorAdded(txn basedb.Txn, event *contract.ContractValidatorAdded) (ownShare *ssvtypes.SSVShare, err error) { logger := eh.logger.With( - zap.String("event_type", ValidatorAdded), + fields.EventName(ValidatorAdded), fields.TxHash(event.Raw.TxHash), fields.Owner(event.Owner), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.OperatorIDs(event.OperatorIds), fields.Validator(event.PublicKey), ) @@ -324,12 +325,12 @@ func validatorAddedEventToShare( return &validatorShare, shareSecret, nil } -func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.ContractValidatorRemoved) ([]byte, error) { +func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.ContractValidatorRemoved) (spectypes.ValidatorPK, error) { logger := eh.logger.With( - zap.String("event_type", ValidatorRemoved), + fields.EventName(ValidatorRemoved), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.Owner(event.Owner), + fields.OperatorIDs(event.OperatorIds), fields.PubKey(event.PublicKey), ) logger.Debug("processing event") @@ -372,6 +373,11 @@ func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.C logger = logger.With(zap.String("validator_pubkey", hex.EncodeToString(share.ValidatorPubKey))) } if isOperatorShare { + err = eh.keyManager.RemoveShare(hex.EncodeToString(share.SharePubKey)) + if err != nil { + return nil, fmt.Errorf("could not remove share from ekm storage: %w", err) + } + eh.metrics.ValidatorRemoved(event.PublicKey) logger.Debug("processed event") return share.ValidatorPubKey, nil @@ -383,10 +389,10 @@ func (eh *EventHandler) handleValidatorRemoved(txn basedb.Txn, event *contract.C func (eh *EventHandler) handleClusterLiquidated(txn basedb.Txn, event *contract.ContractClusterLiquidated) ([]*ssvtypes.SSVShare, error) { logger := eh.logger.With( - zap.String("event_type", ClusterLiquidated), + fields.EventName(ClusterLiquidated), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.Owner(event.Owner), + fields.OperatorIDs(event.OperatorIds), ) logger.Debug("processing event") @@ -405,10 +411,10 @@ func (eh *EventHandler) handleClusterLiquidated(txn basedb.Txn, event *contract. func (eh *EventHandler) handleClusterReactivated(txn basedb.Txn, event *contract.ContractClusterReactivated) ([]*ssvtypes.SSVShare, error) { logger := eh.logger.With( - zap.String("event_type", ClusterReactivated), + fields.EventName(ClusterReactivated), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), - zap.Uint64s("operator_ids", event.OperatorIds), + fields.Owner(event.Owner), + fields.OperatorIDs(event.OperatorIds), ) logger.Debug("processing event") @@ -417,6 +423,13 @@ func (eh *EventHandler) handleClusterReactivated(txn basedb.Txn, event *contract return nil, fmt.Errorf("could not process cluster event: %w", err) } + // bump slashing protection for operator reactivated validators + for _, share := range toReactivate { + if err := eh.keyManager.(ekm.StorageProvider).BumpSlashingProtection(share.SharePubKey); err != nil { + return nil, fmt.Errorf("could not bump slashing protection: %w", err) + } + } + if len(enabledPubKeys) > 0 { logger = logger.With(zap.Strings("enabled_validators", enabledPubKeys)) } @@ -427,9 +440,9 @@ func (eh *EventHandler) handleClusterReactivated(txn basedb.Txn, event *contract func (eh *EventHandler) handleFeeRecipientAddressUpdated(txn basedb.Txn, event *contract.ContractFeeRecipientAddressUpdated) (bool, error) { logger := eh.logger.With( - zap.String("event_type", FeeRecipientAddressUpdated), + fields.EventName(FeeRecipientAddressUpdated), fields.TxHash(event.Raw.TxHash), - zap.String("owner_address", event.Owner.String()), + fields.Owner(event.Owner), fields.FeeRecipient(event.RecipientAddress.Bytes()), ) logger.Debug("processing event") diff --git a/eth/eventhandler/local_events_test.go b/eth/eventhandler/local_events_test.go index 7697c79363..fda1ae0080 100644 --- a/eth/eventhandler/local_events_test.go +++ b/eth/eventhandler/local_events_test.go @@ -18,7 +18,7 @@ import ( func TestHandleLocalEvent(t *testing.T) { // Create operators rsa keys - ops, err := createOperators(1) + ops, err := createOperators(1, 0) require.NoError(t, err) t.Run("correct OperatorAdded event", func(t *testing.T) { @@ -46,7 +46,7 @@ func TestHandleLocalEvent(t *testing.T) { defer cancel() logger := zaptest.NewLogger(t) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) if err != nil { t.Fatal(err) } @@ -73,7 +73,7 @@ func TestHandleLocalEvent(t *testing.T) { defer cancel() logger := zaptest.NewLogger(t) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) if err != nil { t.Fatal(err) } diff --git a/eth/eventhandler/task.go b/eth/eventhandler/task.go index 3e825140b8..f6e2894fa8 100644 --- a/eth/eventhandler/task.go +++ b/eth/eventhandler/task.go @@ -1,9 +1,10 @@ package eventhandler import ( + spectypes "github.com/bloxapp/ssv-spec/types" ethcommon "github.com/ethereum/go-ethereum/common" - ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/protocol/v2/types" ) type Task interface { @@ -11,15 +12,15 @@ type Task interface { } type startValidatorExecutor interface { - StartValidator(share *ssvtypes.SSVShare) error + StartValidator(share *types.SSVShare) error } type StartValidatorTask struct { executor startValidatorExecutor - share *ssvtypes.SSVShare + share *types.SSVShare } -func NewStartValidatorTask(executor startValidatorExecutor, share *ssvtypes.SSVShare) *StartValidatorTask { +func NewStartValidatorTask(executor startValidatorExecutor, share *types.SSVShare) *StartValidatorTask { return &StartValidatorTask{ executor: executor, share: share, @@ -31,41 +32,41 @@ func (t StartValidatorTask) Execute() error { } type stopValidatorExecutor interface { - StopValidator(publicKey []byte) error + StopValidator(pubKey spectypes.ValidatorPK) error } type StopValidatorTask struct { - executor stopValidatorExecutor - publicKey []byte + executor stopValidatorExecutor + pubKey spectypes.ValidatorPK } -func NewStopValidatorTask(executor stopValidatorExecutor, publicKey []byte) *StopValidatorTask { +func NewStopValidatorTask(executor stopValidatorExecutor, pubKey spectypes.ValidatorPK) *StopValidatorTask { return &StopValidatorTask{ - executor: executor, - publicKey: publicKey, + executor: executor, + pubKey: pubKey, } } func (t StopValidatorTask) Execute() error { - return t.executor.StopValidator(t.publicKey) + return t.executor.StopValidator(t.pubKey) } type liquidateClusterExecutor interface { - LiquidateCluster(owner ethcommon.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error + LiquidateCluster(owner ethcommon.Address, operatorIDs []spectypes.OperatorID, toLiquidate []*types.SSVShare) error } type LiquidateClusterTask struct { executor liquidateClusterExecutor owner ethcommon.Address - operatorIDs []uint64 - toLiquidate []*ssvtypes.SSVShare + operatorIDs []spectypes.OperatorID + toLiquidate []*types.SSVShare } func NewLiquidateClusterTask( executor liquidateClusterExecutor, owner ethcommon.Address, - operatorIDs []uint64, - toLiquidate []*ssvtypes.SSVShare, + operatorIDs []spectypes.OperatorID, + toLiquidate []*types.SSVShare, ) *LiquidateClusterTask { return &LiquidateClusterTask{ executor: executor, @@ -80,21 +81,21 @@ func (t LiquidateClusterTask) Execute() error { } type reactivateClusterExecutor interface { - ReactivateCluster(owner ethcommon.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error + ReactivateCluster(owner ethcommon.Address, operatorIDs []spectypes.OperatorID, toReactivate []*types.SSVShare) error } type ReactivateClusterTask struct { executor reactivateClusterExecutor owner ethcommon.Address - operatorIDs []uint64 - toReactivate []*ssvtypes.SSVShare + operatorIDs []spectypes.OperatorID + toReactivate []*types.SSVShare } func NewReactivateClusterTask( executor reactivateClusterExecutor, owner ethcommon.Address, - operatorIDs []uint64, - toReactivate []*ssvtypes.SSVShare, + operatorIDs []spectypes.OperatorID, + toReactivate []*types.SSVShare, ) *ReactivateClusterTask { return &ReactivateClusterTask{ executor: executor, diff --git a/eth/eventhandler/task_executor_test.go b/eth/eventhandler/task_executor_test.go index 8792aadc91..a735c53dc9 100644 --- a/eth/eventhandler/task_executor_test.go +++ b/eth/eventhandler/task_executor_test.go @@ -3,9 +3,10 @@ package eventhandler import ( "context" "encoding/binary" - "github.com/golang/mock/gomock" "testing" + "github.com/golang/mock/gomock" + spectypes "github.com/bloxapp/ssv-spec/types" ethcommon "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" @@ -48,10 +49,10 @@ func TestExecuteTask(t *testing.T) { defer cancel() // Create operators rsa keys - ops, err := createOperators(1) + ops, err := createOperators(1, 0) require.NoError(t, err) - eh, validatorCtrl, err := setupEventHandler(t, ctx, logger, ops[0], true) + eh, validatorCtrl, err := setupEventHandler(t, ctx, logger, nil, ops[0], true) require.NoError(t, err) t.Run("test AddValidator task execution - not started", func(t *testing.T) { @@ -145,10 +146,10 @@ func TestHandleBlockEventsStreamWithExecution(t *testing.T) { defer cancel() // Create operators rsa keys - ops, err := createOperators(1) + ops, err := createOperators(1, 0) require.NoError(t, err) - eh, _, err := setupEventHandler(t, ctx, logger, ops[0], false) + eh, _, err := setupEventHandler(t, ctx, logger, nil, ops[0], false) if err != nil { t.Fatal(err) } @@ -189,7 +190,7 @@ func TestHandleBlockEventsStreamWithExecution(t *testing.T) { } happyFlow := []string{ "successfully setup operator keys", - "setting validator controller", + "setting up validator controller", "malformed event: failed to verify signature", "processed events from block", } diff --git a/eth/eventsyncer/event_syncer_test.go b/eth/eventsyncer/event_syncer_test.go index 4cd2e73e68..9b500fe091 100644 --- a/eth/eventsyncer/event_syncer_test.go +++ b/eth/eventsyncer/event_syncer_test.go @@ -11,6 +11,7 @@ import ( "github.com/bloxapp/ssv/eth/contract" "github.com/bloxapp/ssv/eth/simulator" + "github.com/bloxapp/ssv/operator/validatorsmap" "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/ethereum/go-ethereum/accounts/abi" @@ -152,6 +153,7 @@ func setupEventHandler(t *testing.T, ctx context.Context, logger *zap.Logger) *e DB: db, RegistryStorage: nodeStorage, OperatorData: operatorData, + ValidatorsMap: validatorsmap.New(ctx), }) contractFilterer, err := contract.NewContractFilterer(ethcommon.Address{}, nil) diff --git a/eth/executionclient/execution_client_test.go b/eth/executionclient/execution_client_test.go index 823515c52b..4fed0795c3 100644 --- a/eth/executionclient/execution_client_test.go +++ b/eth/executionclient/execution_client_test.go @@ -67,7 +67,7 @@ func TestFetchHistoricalLogs(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) parsed, _ := abi.JSON(strings.NewReader(callableAbi)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) @@ -131,7 +131,7 @@ func TestStreamLogs(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) // Deploy the contract parsed, _ := abi.JSON(strings.NewReader(callableAbi)) @@ -215,7 +215,7 @@ func TestFetchLogsInBatches(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) // Deploy the contract parsed, _ := abi.JSON(strings.NewReader(callableAbi)) @@ -325,7 +325,7 @@ func TestChainReorganizationLogs(t *testing.T) { // defer rpcServer.Stop() // defer httpsrv.Close() - // addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + // addr := httpToWebSocketURL(httpsrv.URL) // // 1. // parsed, _ := abi.JSON(strings.NewReader(callableAbi)) @@ -417,7 +417,7 @@ func TestSimSSV(t *testing.T) { httpsrv := httptest.NewServer(rpcServer.WebsocketHandler([]string{"*"})) defer rpcServer.Stop() defer httpsrv.Close() - addr := "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + addr := httpToWebSocketURL(httpsrv.URL) parsed, _ := abi.JSON(strings.NewReader(simcontract.SimcontractMetaData.ABI)) auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) @@ -584,3 +584,7 @@ func TestSimSSV(t *testing.T) { require.NoError(t, client.Close()) require.NoError(t, sim.Close()) } + +func httpToWebSocketURL(url string) string { + return "ws:" + strings.TrimPrefix(url, "http:") +} diff --git a/eth/simulator/simcontract/simcontract.go b/eth/simulator/simcontract/simcontract.go index 9da8921e7a..2877c65b29 100644 --- a/eth/simulator/simcontract/simcontract.go +++ b/eth/simulator/simcontract/simcontract.go @@ -41,7 +41,7 @@ type CallableCluster struct { // SimcontractMetaData contains all meta data concerning the Simcontract contract. var SimcontractMetaData = &bind.MetaData{ ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ClusterLiquidated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ClusterReactivated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"recipientAddress\",\"type\":\"address\"}],\"name\":\"FeeRecipientAddressUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"operatorId\",\"type\":\"uint64\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"OperatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint64\",\"name\":\"operatorId\",\"type\":\"uint64\"}],\"name\":\"OperatorRemoved\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"shares\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ValidatorAdded\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"indexed\":false,\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"ValidatorRemoved\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"clusterOwner\",\"type\":\"address\"},{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"liquidate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"reactivate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"fee\",\"type\":\"uint256\"}],\"name\":\"registerOperator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"internalType\":\"bytes\",\"name\":\"sharesData\",\"type\":\"bytes\"},{\"internalType\":\"uint256\",\"name\":\"amount\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"registerValidator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"operatorId\",\"type\":\"uint64\"}],\"name\":\"removeOperator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"publicKey\",\"type\":\"bytes\"},{\"internalType\":\"uint64[]\",\"name\":\"operatorIds\",\"type\":\"uint64[]\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"validatorCount\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"networkFeeIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"index\",\"type\":\"uint64\"},{\"internalType\":\"bool\",\"name\":\"active\",\"type\":\"bool\"},{\"internalType\":\"uint256\",\"name\":\"balance\",\"type\":\"uint256\"}],\"internalType\":\"structCallable.Cluster\",\"name\":\"cluster\",\"type\":\"tuple\"}],\"name\":\"removeValidator\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"recipientAddress\",\"type\":\"address\"}],\"name\":\"setFeeRecipientAddress\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x608060405260008060006101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555034801561003957600080fd5b50610f40806100496000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c80635fec6dd01161005b5780635fec6dd0146100d6578063bf0f2fb2146100f2578063dbcdc2cc1461010e578063ff212c5c1461012a5761007d565b806306e8fb9c1461008257806312b3fc191461009e5780632e168e0e146100ba575b600080fd5b61009c60048036038101906100979190610740565b610146565b005b6100b860048036038101906100b3919061086f565b6101a7565b005b6100d460048036038101906100cf9190610904565b610204565b005b6100f060048036038101906100eb9190610931565b61023e565b005b61010c60048036038101906101079190610a03565b610296565b005b61012860048036038101906101239190610a72565b6102eb565b005b610144600480360381019061013f9190610a9f565b61033c565b005b3373ffffffffffffffffffffffffffffffffffffffff167f48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e586898988888760405161019696959493929190610c9f565b60405180910390a250505050505050565b3373ffffffffffffffffffffffffffffffffffffffff167fccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e84848888866040516101f5959493929190610d89565b60405180910390a25050505050565b8067ffffffffffffffff167f0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e60405160405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff167fc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b685985858460405161028893929190610dd2565b60405180910390a250505050565b8273ffffffffffffffffffffffffffffffffffffffff167f1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e68883836040516102de929190610e04565b60405180910390a2505050565b3373ffffffffffffffffffffffffffffffffffffffff167f259235c230d57def1521657e7c7951d3b385e76193378bc87ef6b56bc2ec3548826040516103319190610e43565b60405180910390a250565b60016000808282829054906101000a900467ffffffffffffffff166103619190610e8d565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055503373ffffffffffffffffffffffffffffffffffffffff1660008054906101000a900467ffffffffffffffff1667ffffffffffffffff167fd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f48585856040516103f093929190610ed8565b60405180910390a3505050565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b60008083601f84011261043657610435610411565b5b8235905067ffffffffffffffff81111561045357610452610416565b5b60208301915083600182028301111561046f5761046e61041b565b5b9250929050565b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6104bf82610476565b810181811067ffffffffffffffff821117156104de576104dd610487565b5b80604052505050565b60006104f16103fd565b90506104fd82826104b6565b919050565b600067ffffffffffffffff82111561051d5761051c610487565b5b602082029050602081019050919050565b600067ffffffffffffffff82169050919050565b61054b8161052e565b811461055657600080fd5b50565b60008135905061056881610542565b92915050565b600061058161057c84610502565b6104e7565b905080838252602082019050602084028301858111156105a4576105a361041b565b5b835b818110156105cd57806105b98882610559565b8452602084019350506020810190506105a6565b5050509392505050565b600082601f8301126105ec576105eb610411565b5b81356105fc84826020860161056e565b91505092915050565b6000819050919050565b61061881610605565b811461062357600080fd5b50565b6000813590506106358161060f565b92915050565b600080fd5b600063ffffffff82169050919050565b61065981610640565b811461066457600080fd5b50565b60008135905061067681610650565b92915050565b60008115159050919050565b6106918161067c565b811461069c57600080fd5b50565b6000813590506106ae81610688565b92915050565b600060a082840312156106ca576106c961063b565b5b6106d460a06104e7565b905060006106e484828501610667565b60008301525060206106f884828501610559565b602083015250604061070c84828501610559565b60408301525060606107208482850161069f565b606083015250608061073484828501610626565b60808301525092915050565b6000806000806000806000610120888a0312156107605761075f610407565b5b600088013567ffffffffffffffff81111561077e5761077d61040c565b5b61078a8a828b01610420565b9750975050602088013567ffffffffffffffff8111156107ad576107ac61040c565b5b6107b98a828b016105d7565b955050604088013567ffffffffffffffff8111156107da576107d961040c565b5b6107e68a828b01610420565b945094505060606107f98a828b01610626565b925050608061080a8a828b016106b4565b91505092959891949750929550565b60008083601f84011261082f5761082e610411565b5b8235905067ffffffffffffffff81111561084c5761084b610416565b5b6020830191508360208202830111156108685761086761041b565b5b9250929050565b600080600080600060e0868803121561088b5761088a610407565b5b600086013567ffffffffffffffff8111156108a9576108a861040c565b5b6108b588828901610420565b9550955050602086013567ffffffffffffffff8111156108d8576108d761040c565b5b6108e488828901610819565b935093505060406108f7888289016106b4565b9150509295509295909350565b60006020828403121561091a57610919610407565b5b600061092884828501610559565b91505092915050565b60008060008060e0858703121561094b5761094a610407565b5b600085013567ffffffffffffffff8111156109695761096861040c565b5b61097587828801610819565b9450945050602061098887828801610626565b9250506040610999878288016106b4565b91505092959194509250565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006109d0826109a5565b9050919050565b6109e0816109c5565b81146109eb57600080fd5b50565b6000813590506109fd816109d7565b92915050565b600080600060e08486031215610a1c57610a1b610407565b5b6000610a2a868287016109ee565b935050602084013567ffffffffffffffff811115610a4b57610a4a61040c565b5b610a57868287016105d7565b9250506040610a68868287016106b4565b9150509250925092565b600060208284031215610a8857610a87610407565b5b6000610a96848285016109ee565b91505092915050565b600080600060408486031215610ab857610ab7610407565b5b600084013567ffffffffffffffff811115610ad657610ad561040c565b5b610ae286828701610420565b93509350506020610af586828701610626565b9150509250925092565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b610b348161052e565b82525050565b6000610b468383610b2b565b60208301905092915050565b6000602082019050919050565b6000610b6a82610aff565b610b748185610b0a565b9350610b7f83610b1b565b8060005b83811015610bb0578151610b978882610b3a565b9750610ba283610b52565b925050600181019050610b83565b5085935050505092915050565b600082825260208201905092915050565b82818337600083830152505050565b6000610be98385610bbd565b9350610bf6838584610bce565b610bff83610476565b840190509392505050565b610c1381610640565b82525050565b610c228161067c565b82525050565b610c3181610605565b82525050565b60a082016000820151610c4d6000850182610c0a565b506020820151610c606020850182610b2b565b506040820151610c736040850182610b2b565b506060820151610c866060850182610c19565b506080820151610c996080850182610c28565b50505050565b6000610100820190508181036000830152610cba8189610b5f565b90508181036020830152610ccf818789610bdd565b90508181036040830152610ce4818587610bdd565b9050610cf36060830184610c37565b979650505050505050565b6000819050919050565b6000610d176020840184610559565b905092915050565b6000602082019050919050565b6000610d388385610b0a565b9350610d4382610cfe565b8060005b85811015610d7c57610d598284610d08565b610d638882610b3a565b9750610d6e83610d1f565b925050600181019050610d47565b5085925050509392505050565b600060e0820190508181036000830152610da4818789610d2c565b90508181036020830152610db9818587610bdd565b9050610dc86040830184610c37565b9695505050505050565b600060c0820190508181036000830152610ded818587610d2c565b9050610dfc6020830184610c37565b949350505050565b600060c0820190508181036000830152610e1e8185610b5f565b9050610e2d6020830184610c37565b9392505050565b610e3d816109c5565b82525050565b6000602082019050610e586000830184610e34565b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000610e988261052e565b9150610ea38361052e565b9250828201905067ffffffffffffffff811115610ec357610ec2610e5e565b5b92915050565b610ed281610605565b82525050565b60006040820190508181036000830152610ef3818587610bdd565b9050610f026020830184610ec9565b94935050505056fea2646970667358221220a849e84b21b5cf14144f9145592d2e879b8dfd174c980e9d839aabab095d209064736f6c63430008120033", + Bin: "0x608060405260008060006101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555034801561003957600080fd5b50610f40806100496000396000f3fe608060405234801561001057600080fd5b506004361061007d5760003560e01c80635fec6dd01161005b5780635fec6dd0146100d6578063bf0f2fb2146100f2578063dbcdc2cc1461010e578063ff212c5c1461012a5761007d565b806306e8fb9c1461008257806312b3fc191461009e5780632e168e0e146100ba575b600080fd5b61009c60048036038101906100979190610740565b610146565b005b6100b860048036038101906100b3919061086f565b6101a7565b005b6100d460048036038101906100cf9190610904565b610204565b005b6100f060048036038101906100eb9190610931565b61023e565b005b61010c60048036038101906101079190610a03565b610296565b005b61012860048036038101906101239190610a72565b6102eb565b005b610144600480360381019061013f9190610a9f565b61033c565b005b3373ffffffffffffffffffffffffffffffffffffffff167f48a3ea0796746043948f6341d17ff8200937b99262a0b48c2663b951ed7114e586898988888760405161019696959493929190610c9f565b60405180910390a250505050505050565b3373ffffffffffffffffffffffffffffffffffffffff167fccf4370403e5fbbde0cd3f13426479dcd8a5916b05db424b7a2c04978cf8ce6e84848888866040516101f5959493929190610d89565b60405180910390a25050505050565b8067ffffffffffffffff167f0e0ba6c2b04de36d6d509ec5bd155c43a9fe862f8052096dd54f3902a74cca3e60405160405180910390a250565b3373ffffffffffffffffffffffffffffffffffffffff167fc803f8c01343fcdaf32068f4c283951623ef2b3fa0c547551931356f456b685985858460405161028893929190610dd2565b60405180910390a250505050565b3373ffffffffffffffffffffffffffffffffffffffff167f1fce24c373e07f89214e9187598635036111dbb363e99f4ce498488cdc66e68883836040516102de929190610e04565b60405180910390a2505050565b3373ffffffffffffffffffffffffffffffffffffffff167f259235c230d57def1521657e7c7951d3b385e76193378bc87ef6b56bc2ec3548826040516103319190610e43565b60405180910390a250565b60016000808282829054906101000a900467ffffffffffffffff166103619190610e8d565b92506101000a81548167ffffffffffffffff021916908367ffffffffffffffff1602179055503373ffffffffffffffffffffffffffffffffffffffff1660008054906101000a900467ffffffffffffffff1667ffffffffffffffff167fd839f31c14bd632f424e307b36abff63ca33684f77f28e35dc13718ef338f7f48585856040516103f093929190610ed8565b60405180910390a3505050565b6000604051905090565b600080fd5b600080fd5b600080fd5b600080fd5b600080fd5b60008083601f84011261043657610435610411565b5b8235905067ffffffffffffffff81111561045357610452610416565b5b60208301915083600182028301111561046f5761046e61041b565b5b9250929050565b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6104bf82610476565b810181811067ffffffffffffffff821117156104de576104dd610487565b5b80604052505050565b60006104f16103fd565b90506104fd82826104b6565b919050565b600067ffffffffffffffff82111561051d5761051c610487565b5b602082029050602081019050919050565b600067ffffffffffffffff82169050919050565b61054b8161052e565b811461055657600080fd5b50565b60008135905061056881610542565b92915050565b600061058161057c84610502565b6104e7565b905080838252602082019050602084028301858111156105a4576105a361041b565b5b835b818110156105cd57806105b98882610559565b8452602084019350506020810190506105a6565b5050509392505050565b600082601f8301126105ec576105eb610411565b5b81356105fc84826020860161056e565b91505092915050565b6000819050919050565b61061881610605565b811461062357600080fd5b50565b6000813590506106358161060f565b92915050565b600080fd5b600063ffffffff82169050919050565b61065981610640565b811461066457600080fd5b50565b60008135905061067681610650565b92915050565b60008115159050919050565b6106918161067c565b811461069c57600080fd5b50565b6000813590506106ae81610688565b92915050565b600060a082840312156106ca576106c961063b565b5b6106d460a06104e7565b905060006106e484828501610667565b60008301525060206106f884828501610559565b602083015250604061070c84828501610559565b60408301525060606107208482850161069f565b606083015250608061073484828501610626565b60808301525092915050565b6000806000806000806000610120888a0312156107605761075f610407565b5b600088013567ffffffffffffffff81111561077e5761077d61040c565b5b61078a8a828b01610420565b9750975050602088013567ffffffffffffffff8111156107ad576107ac61040c565b5b6107b98a828b016105d7565b955050604088013567ffffffffffffffff8111156107da576107d961040c565b5b6107e68a828b01610420565b945094505060606107f98a828b01610626565b925050608061080a8a828b016106b4565b91505092959891949750929550565b60008083601f84011261082f5761082e610411565b5b8235905067ffffffffffffffff81111561084c5761084b610416565b5b6020830191508360208202830111156108685761086761041b565b5b9250929050565b600080600080600060e0868803121561088b5761088a610407565b5b600086013567ffffffffffffffff8111156108a9576108a861040c565b5b6108b588828901610420565b9550955050602086013567ffffffffffffffff8111156108d8576108d761040c565b5b6108e488828901610819565b935093505060406108f7888289016106b4565b9150509295509295909350565b60006020828403121561091a57610919610407565b5b600061092884828501610559565b91505092915050565b60008060008060e0858703121561094b5761094a610407565b5b600085013567ffffffffffffffff8111156109695761096861040c565b5b61097587828801610819565b9450945050602061098887828801610626565b9250506040610999878288016106b4565b91505092959194509250565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b60006109d0826109a5565b9050919050565b6109e0816109c5565b81146109eb57600080fd5b50565b6000813590506109fd816109d7565b92915050565b600080600060e08486031215610a1c57610a1b610407565b5b6000610a2a868287016109ee565b935050602084013567ffffffffffffffff811115610a4b57610a4a61040c565b5b610a57868287016105d7565b9250506040610a68868287016106b4565b9150509250925092565b600060208284031215610a8857610a87610407565b5b6000610a96848285016109ee565b91505092915050565b600080600060408486031215610ab857610ab7610407565b5b600084013567ffffffffffffffff811115610ad657610ad561040c565b5b610ae286828701610420565b93509350506020610af586828701610626565b9150509250925092565b600081519050919050565b600082825260208201905092915050565b6000819050602082019050919050565b610b348161052e565b82525050565b6000610b468383610b2b565b60208301905092915050565b6000602082019050919050565b6000610b6a82610aff565b610b748185610b0a565b9350610b7f83610b1b565b8060005b83811015610bb0578151610b978882610b3a565b9750610ba283610b52565b925050600181019050610b83565b5085935050505092915050565b600082825260208201905092915050565b82818337600083830152505050565b6000610be98385610bbd565b9350610bf6838584610bce565b610bff83610476565b840190509392505050565b610c1381610640565b82525050565b610c228161067c565b82525050565b610c3181610605565b82525050565b60a082016000820151610c4d6000850182610c0a565b506020820151610c606020850182610b2b565b506040820151610c736040850182610b2b565b506060820151610c866060850182610c19565b506080820151610c996080850182610c28565b50505050565b6000610100820190508181036000830152610cba8189610b5f565b90508181036020830152610ccf818789610bdd565b90508181036040830152610ce4818587610bdd565b9050610cf36060830184610c37565b979650505050505050565b6000819050919050565b6000610d176020840184610559565b905092915050565b6000602082019050919050565b6000610d388385610b0a565b9350610d4382610cfe565b8060005b85811015610d7c57610d598284610d08565b610d638882610b3a565b9750610d6e83610d1f565b925050600181019050610d47565b5085925050509392505050565b600060e0820190508181036000830152610da4818789610d2c565b90508181036020830152610db9818587610bdd565b9050610dc86040830184610c37565b9695505050505050565b600060c0820190508181036000830152610ded818587610d2c565b9050610dfc6020830184610c37565b949350505050565b600060c0820190508181036000830152610e1e8185610b5f565b9050610e2d6020830184610c37565b9392505050565b610e3d816109c5565b82525050565b6000602082019050610e586000830184610e34565b92915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000610e988261052e565b9150610ea38361052e565b9250828201905067ffffffffffffffff811115610ec357610ec2610e5e565b5b92915050565b610ed281610605565b82525050565b60006040820190508181036000830152610ef3818587610bdd565b9050610f026020830184610ec9565b94935050505056fea26469706673582212206464f7d32909b03e1e16f822f4ba73e56f9b875dfda6cb13f3fc97c182c5e43664736f6c63430008120033", } // SimcontractABI is the input ABI used to generate the binding from. diff --git a/eth/simulator/simcontract/simcontract.sol b/eth/simulator/simcontract/simcontract.sol index 23277e23e2..9325802822 100644 --- a/eth/simulator/simcontract/simcontract.sol +++ b/eth/simulator/simcontract/simcontract.sol @@ -52,20 +52,43 @@ contract Callable { _operatorId += 1; emit OperatorAdded(_operatorId, msg.sender, publicKey, fee); } - function removeOperator(uint64 operatorId) public {emit OperatorRemoved(operatorId);} + + function removeOperator(uint64 operatorId) public { + emit OperatorRemoved(operatorId); + } + function registerValidator( bytes calldata publicKey, uint64[] memory operatorIds, bytes calldata sharesData, uint256 amount, Cluster memory cluster - ) public { emit ValidatorAdded(msg.sender, operatorIds, publicKey, sharesData, cluster);} + ) public { + emit ValidatorAdded(msg.sender, operatorIds, publicKey, sharesData, cluster); + } + function removeValidator( bytes calldata publicKey, uint64[] calldata operatorIds, Cluster memory cluster - ) public {emit ValidatorRemoved(msg.sender, operatorIds, publicKey, cluster);} - function liquidate(address clusterOwner, uint64[] memory operatorIds, Cluster memory cluster) public {emit ClusterLiquidated(clusterOwner, operatorIds, cluster);} - function reactivate(uint64[] calldata operatorIds, uint256 amount, Cluster memory cluster) public {emit ClusterReactivated(msg.sender, operatorIds, cluster);} + ) public { + emit ValidatorRemoved(msg.sender, operatorIds, publicKey, cluster); + } + + function liquidate(address clusterOwner, + uint64[] memory operatorIds, + Cluster memory cluster + ) public { + emit ClusterLiquidated(msg.sender, operatorIds, cluster); + } + + function reactivate( + uint64[] calldata operatorIds, + uint256 amount, + Cluster memory cluster + ) public { + emit ClusterReactivated(msg.sender, operatorIds, cluster); + } + function setFeeRecipientAddress(address recipientAddress) public {emit FeeRecipientAddressUpdated(msg.sender, recipientAddress);} } diff --git a/go.mod b/go.mod index 5fa7730cf6..b39d5e0cc9 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/aquasecurity/table v1.8.0 github.com/attestantio/go-eth2-client v0.16.3 github.com/bloxapp/eth2-key-manager v1.3.1 - github.com/bloxapp/ssv-spec v0.3.1 + github.com/bloxapp/ssv-spec v0.3.3 github.com/btcsuite/btcd/btcec/v2 v2.3.2 github.com/cespare/xxhash/v2 v2.2.0 github.com/cornelk/hashmap v1.0.8 @@ -37,6 +37,7 @@ require ( github.com/sourcegraph/conc v0.3.0 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.4 + github.com/wealdtech/go-eth2-types/v2 v2.8.1 github.com/wealdtech/go-eth2-util v1.8.1 github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4 v1.1.3 go.uber.org/multierr v1.11.0 @@ -192,7 +193,6 @@ require ( github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/urfave/cli/v2 v2.24.1 // indirect github.com/wealdtech/go-bytesutil v1.2.1 // indirect - github.com/wealdtech/go-eth2-types/v2 v2.8.1 // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect @@ -223,4 +223,5 @@ replace github.com/google/flatbuffers => github.com/google/flatbuffers v1.11.0 replace github.com/dgraph-io/ristretto => github.com/dgraph-io/ristretto v0.1.1-0.20211108053508-297c39e6640f -replace github.com/bloxapp/ssv-spec => github.com/bloxapp/ssv-spec v0.0.0-20230719131453-1c0044021800 +//TODO remove this replace when the following PR is merged https://github.com/bloxapp/eth2-key-manager/pull/100 +replace github.com/bloxapp/eth2-key-manager => github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5 diff --git a/go.sum b/go.sum index 7b8753260f..cf4040a7be 100644 --- a/go.sum +++ b/go.sum @@ -54,10 +54,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bloxapp/eth2-key-manager v1.3.1 h1:1olQcOHRY2TN1o8JX9AN1siEIJXWnlM+BlknfBbXoo4= -github.com/bloxapp/eth2-key-manager v1.3.1/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= -github.com/bloxapp/ssv-spec v0.0.0-20230719131453-1c0044021800 h1:ikChvdYVw4GFSlnIS+u1qmNqOvgq2a2H3b2FZ44KBn8= -github.com/bloxapp/ssv-spec v0.0.0-20230719131453-1c0044021800/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= +github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5 h1:vjrMmMH15Bo0QF+228CuEZvCI+OuPyJRco82Gj/WyTI= +github.com/bloxapp/eth2-key-manager v1.3.2-0.20231022162227-e2b8264a29a5/go.mod h1:cT+qAJfnAzNz9StFoHQ8xAkyU2eyEukd6xfxvcBWuZA= +github.com/bloxapp/ssv-spec v0.3.3 h1:iNomqWQjxDDQouHMjl27PmH1hUolJ4u8QQ+HX/TQQcg= +github.com/bloxapp/ssv-spec v0.3.3/go.mod h1:zPJR7YnG5iZ6I0h6EzfVly8bTBXaZwcx4TyJ8pzYVd8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= diff --git a/integration/qbft/tests/msg_router.go b/integration/qbft/tests/msg_router.go index bf3b667e98..dda7b7c243 100644 --- a/integration/qbft/tests/msg_router.go +++ b/integration/qbft/tests/msg_router.go @@ -1,21 +1,26 @@ package tests import ( - spectypes "github.com/bloxapp/ssv-spec/types" - protocolvalidator "github.com/bloxapp/ssv/protocol/v2/ssv/validator" + "context" + "go.uber.org/zap" + + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" + protocolvalidator "github.com/bloxapp/ssv/protocol/v2/ssv/validator" ) type msgRouter struct { + logger *zap.Logger validator *protocolvalidator.Validator } -func (m *msgRouter) Route(logger *zap.Logger, message spectypes.SSVMessage) { - m.validator.HandleMessage(logger, &message) +func (m *msgRouter) Route(_ context.Context, message *queue.DecodedSSVMessage) { + m.validator.HandleMessage(m.logger, message) } -func newMsgRouter(v *protocolvalidator.Validator) *msgRouter { +func newMsgRouter(logger *zap.Logger, v *protocolvalidator.Validator) *msgRouter { return &msgRouter{ validator: v, + logger: logger, } } diff --git a/integration/qbft/tests/round_change_test.go b/integration/qbft/tests/round_change_test.go index 65c6038e5f..4dbb839f5f 100644 --- a/integration/qbft/tests/round_change_test.go +++ b/integration/qbft/tests/round_change_test.go @@ -2,12 +2,13 @@ package tests import ( "testing" + "time" "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" - protocolstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" "github.com/stretchr/testify/require" + + protocolstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" ) func TestRoundChange4CommitteeScenario(t *testing.T) { @@ -18,8 +19,8 @@ func TestRoundChange4CommitteeScenario(t *testing.T) { Duties: map[spectypes.OperatorID]DutyProperties{ 2: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: NoDelay}, 1: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: NoDelay}, - 3: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: roundtimer.RoundTimeout(1)}, - 4: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: roundtimer.RoundTimeout(1)}, + 3: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: 2 * time.Second}, + 4: {Slot: DefaultSlot, ValidatorIndex: 1, Delay: 2 * time.Second}, }, ValidationFunctions: map[spectypes.OperatorID]func(*testing.T, int, *protocolstorage.StoredInstance){ 1: roundChangeValidator(), diff --git a/integration/qbft/tests/scenario_test.go b/integration/qbft/tests/scenario_test.go index 5fbf6c89b9..e803fd9616 100644 --- a/integration/qbft/tests/scenario_test.go +++ b/integration/qbft/tests/scenario_test.go @@ -2,7 +2,6 @@ package tests import ( "context" - "fmt" "testing" "time" @@ -21,11 +20,9 @@ import ( "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/validator" protocolbeacon "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" protocolstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" protocolvalidator "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - "github.com/bloxapp/ssv/protocol/v2/sync/handlers" "github.com/bloxapp/ssv/protocol/v2/types" "github.com/bloxapp/ssv/storage/basedb" "github.com/bloxapp/ssv/storage/kv" @@ -63,15 +60,6 @@ func (s *Scenario) Run(t *testing.T, role spectypes.BeaconRole) { for id := 1; id <= s.Committee; id++ { id := spectypes.OperatorID(id) s.validators[id] = createValidator(t, ctx, id, getKeySet(s.Committee), logger, s.shared.Nodes[id]) - - stores := newStores(logger) - s.shared.Nodes[id].RegisterHandlers(logger, protocolp2p.WithHandler( - protocolp2p.LastDecidedProtocol, - handlers.LastDecidedHandler(logger.Named(fmt.Sprintf("decided-handler-%d", id)), stores, s.shared.Nodes[id]), - ), protocolp2p.WithHandler( - protocolp2p.DecidedHistoryProtocol, - handlers.HistoryHandler(logger.Named(fmt.Sprintf("history-handler-%d", id)), stores, s.shared.Nodes[id], 25), - )) } //invoking duties @@ -84,7 +72,7 @@ func (s *Scenario) Run(t *testing.T, role spectypes.BeaconRole) { copy(pk[:], getKeySet(s.Committee).ValidatorPK.Serialize()) ssvMsg, err := validator.CreateDutyExecuteMsg(duty, pk, networkconfig.TestNetwork.Domain) require.NoError(t, err) - dec, err := queue.DecodeSSVMessage(logger, ssvMsg) + dec, err := queue.DecodeSSVMessage(ssvMsg) require.NoError(t, err) s.validators[id].Queues[role].Q.Push(dec) @@ -218,7 +206,7 @@ func createValidator(t *testing.T, pCtx context.Context, id spectypes.OperatorID options.DutyRunners = validator.SetupRunners(ctx, logger, options) val := protocolvalidator.NewValidator(ctx, cancel, options) - node.UseMessageRouter(newMsgRouter(val)) + node.UseMessageRouter(newMsgRouter(logger, val)) started, err := val.Start(logger) require.NoError(t, err) require.True(t, started) diff --git a/logging/fields/fields.go b/logging/fields/fields.go index 6b1de4ffc5..3584f07915 100644 --- a/logging/fields/fields.go +++ b/logging/fields/fields.go @@ -64,6 +64,7 @@ const ( FieldName = "name" FieldNetwork = "network" FieldOperatorId = "operator_id" + FieldOperatorIDs = "operator_ids" FieldOperatorPubKey = "operator_pubkey" FieldOwnerAddress = "owner_address" FieldPeerID = "peer_id" @@ -190,6 +191,10 @@ func OperatorID(operatorId spectypes.OperatorID) zap.Field { return zap.Uint64(FieldOperatorId, operatorId) } +func OperatorIDs(operatorIDs []spectypes.OperatorID) zap.Field { + return zap.Uint64s(FieldOperatorIDs, operatorIDs) +} + func OperatorIDStr(operatorId string) zap.Field { return zap.String(FieldOperatorId, operatorId) } diff --git a/logging/names.go b/logging/names.go index 5a23d12da9..298f6a9ee0 100644 --- a/logging/names.go +++ b/logging/names.go @@ -23,4 +23,5 @@ const ( NamePubsubTrace = "PubsubTrace" NameScoreInspector = "ScoreInspector" NameEventHandler = "EventHandler" + NameDutyFetcher = "DutyFetcher" ) diff --git a/logging/testing.go b/logging/testing.go index 6b6abd8326..b7617c2680 100644 --- a/logging/testing.go +++ b/logging/testing.go @@ -5,16 +5,17 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) func TestLogger(t *testing.T) *zap.Logger { - err := SetGlobalLogger("debug", "capital", "console", nil) + err := SetGlobalLogger(zapcore.DebugLevel.String(), "capital", "console", nil) require.NoError(t, err) return zap.L().Named(t.Name()) } func BenchLogger(b *testing.B) *zap.Logger { - err := SetGlobalLogger("debug", "capital", "console", nil) + err := SetGlobalLogger(zapcore.DebugLevel.String(), "capital", "console", nil) require.NoError(b, err) return zap.L().Named(b.Name()) } diff --git a/message/validation/consensus_validation.go b/message/validation/consensus_validation.go new file mode 100644 index 0000000000..6bdf023fc4 --- /dev/null +++ b/message/validation/consensus_validation.go @@ -0,0 +1,434 @@ +package validation + +// consensus_validation.go contains methods for validating consensus messages + +import ( + "bytes" + "encoding/hex" + "fmt" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "golang.org/x/exp/slices" + + "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" +) + +func (mv *messageValidator) validateConsensusMessage( + share *ssvtypes.SSVShare, + signedMsg *specqbft.SignedMessage, + messageID spectypes.MessageID, + receivedAt time.Time, +) (ConsensusDescriptor, phase0.Slot, error) { + var consensusDescriptor ConsensusDescriptor + + if mv.inCommittee(share) { + mv.metrics.InCommitteeMessage(spectypes.SSVConsensusMsgType, mv.isDecidedMessage(signedMsg)) + } else { + mv.metrics.NonCommitteeMessage(spectypes.SSVConsensusMsgType, mv.isDecidedMessage(signedMsg)) + } + + msgSlot := phase0.Slot(signedMsg.Message.Height) + msgRound := signedMsg.Message.Round + + consensusDescriptor = ConsensusDescriptor{ + QBFTMessageType: signedMsg.Message.MsgType, + Round: msgRound, + Signers: signedMsg.Signers, + Committee: share.Committee, + } + + mv.metrics.ConsensusMsgType(signedMsg.Message.MsgType, len(signedMsg.Signers)) + + if messageID.GetRoleType() == spectypes.BNRoleValidatorRegistration { + return consensusDescriptor, msgSlot, ErrConsensusValidatorRegistration + } + + if err := mv.validateSignatureFormat(signedMsg.Signature); err != nil { + return consensusDescriptor, msgSlot, err + } + + if !mv.validQBFTMsgType(signedMsg.Message.MsgType) { + return consensusDescriptor, msgSlot, ErrUnknownQBFTMessageType + } + + if err := mv.validConsensusSigners(share, signedMsg); err != nil { + return consensusDescriptor, msgSlot, err + } + + role := messageID.GetRoleType() + + if err := mv.validateSlotTime(msgSlot, role, receivedAt); err != nil { + return consensusDescriptor, msgSlot, err + } + + if maxRound := mv.maxRound(role); msgRound > maxRound { + err := ErrRoundTooHigh + err.got = fmt.Sprintf("%v (%v role)", msgRound, role) + err.want = fmt.Sprintf("%v (%v role)", maxRound, role) + return consensusDescriptor, msgSlot, err + } + + slotStartTime := mv.netCfg.Beacon.GetSlotStartTime(msgSlot) /*. + Add(mv.waitAfterSlotStart(role))*/ // TODO: not supported yet because first round is non-deterministic now + + sinceSlotStart := time.Duration(0) + estimatedRound := specqbft.FirstRound + if receivedAt.After(slotStartTime) { + sinceSlotStart = receivedAt.Sub(slotStartTime) + estimatedRound = mv.currentEstimatedRound(sinceSlotStart) + } + + // TODO: lowestAllowed is not supported yet because first round is non-deterministic now + lowestAllowed := /*estimatedRound - allowedRoundsInPast*/ specqbft.FirstRound + highestAllowed := estimatedRound + allowedRoundsInFuture + + if msgRound < lowestAllowed || msgRound > highestAllowed { + err := ErrEstimatedRoundTooFar + err.got = fmt.Sprintf("%v (%v role)", msgRound, role) + err.want = fmt.Sprintf("between %v and %v (%v role) / %v passed", lowestAllowed, highestAllowed, role, sinceSlotStart) + return consensusDescriptor, msgSlot, err + } + + if mv.hasFullData(signedMsg) { + hashedFullData, err := specqbft.HashDataRoot(signedMsg.FullData) + if err != nil { + return consensusDescriptor, msgSlot, fmt.Errorf("hash data root: %w", err) + } + + if hashedFullData != signedMsg.Message.Root { + return consensusDescriptor, msgSlot, ErrInvalidHash + } + } + + if err := mv.validateBeaconDuty(messageID.GetRoleType(), msgSlot, share); err != nil { + return consensusDescriptor, msgSlot, err + } + + state := mv.consensusState(messageID) + for _, signer := range signedMsg.Signers { + if err := mv.validateSignerBehaviorConsensus(state, signer, share, messageID, signedMsg); err != nil { + return consensusDescriptor, msgSlot, fmt.Errorf("bad signer behavior: %w", err) + } + } + + if mv.verifySignatures { + if err := ssvtypes.VerifyByOperators(signedMsg.Signature, signedMsg, mv.netCfg.Domain, spectypes.QBFTSignatureType, share.Committee); err != nil { + signErr := ErrInvalidSignature + signErr.innerErr = err + signErr.got = fmt.Sprintf("domain %v from %v", hex.EncodeToString(mv.netCfg.Domain[:]), hex.EncodeToString(share.ValidatorPubKey)) + return consensusDescriptor, msgSlot, signErr + } + } + + for _, signer := range signedMsg.Signers { + signerState := state.GetSignerState(signer) + if signerState == nil { + signerState = state.CreateSignerState(signer) + } + if msgSlot > signerState.Slot { + newEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) > mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) + signerState.ResetSlot(msgSlot, msgRound, newEpoch) + } else if msgSlot == signerState.Slot && msgRound > signerState.Round { + signerState.ResetRound(msgRound) + } + + if mv.hasFullData(signedMsg) && signerState.ProposalData == nil { + signerState.ProposalData = signedMsg.FullData + } + + signerState.MessageCounts.RecordConsensusMessage(signedMsg) + } + + return consensusDescriptor, msgSlot, nil +} + +func (mv *messageValidator) validateJustifications( + share *ssvtypes.SSVShare, + signedMsg *specqbft.SignedMessage, +) error { + pj, err := signedMsg.Message.GetPrepareJustifications() + if err != nil { + e := ErrMalformedPrepareJustifications + e.innerErr = err + return e + } + + if len(pj) != 0 && signedMsg.Message.MsgType != specqbft.ProposalMsgType { + e := ErrUnexpectedPrepareJustifications + e.got = signedMsg.Message.MsgType + return e + } + + rcj, err := signedMsg.Message.GetRoundChangeJustifications() + if err != nil { + e := ErrMalformedRoundChangeJustifications + e.innerErr = err + return e + } + + if len(rcj) != 0 && signedMsg.Message.MsgType != specqbft.ProposalMsgType && signedMsg.Message.MsgType != specqbft.RoundChangeMsgType { + e := ErrUnexpectedRoundChangeJustifications + e.got = signedMsg.Message.MsgType + return e + } + + if signedMsg.Message.MsgType == specqbft.ProposalMsgType { + cfg := newQBFTConfig(mv.netCfg.Domain, mv.verifySignatures) + + if err := instance.IsProposalJustification( + cfg, + share, + rcj, + pj, + signedMsg.Message.Height, + signedMsg.Message.Round, + signedMsg.FullData, + ); err != nil { + e := ErrInvalidJustifications + e.innerErr = err + return e + } + } + + return nil +} + +func (mv *messageValidator) validateSignerBehaviorConsensus( + state *ConsensusState, + signer spectypes.OperatorID, + share *ssvtypes.SSVShare, + msgID spectypes.MessageID, + signedMsg *specqbft.SignedMessage, +) error { + signerState := state.GetSignerState(signer) + + if signerState == nil { + return mv.validateJustifications(share, signedMsg) + } + + msgSlot := phase0.Slot(signedMsg.Message.Height) + msgRound := signedMsg.Message.Round + + if msgSlot < signerState.Slot { + // Signers aren't allowed to decrease their slot. + // If they've sent a future message due to clock error, + // this should be caught by the earlyMessage check. + err := ErrSlotAlreadyAdvanced + err.want = signerState.Slot + err.got = msgSlot + return err + } + + if msgSlot == signerState.Slot && msgRound < signerState.Round { + // Signers aren't allowed to decrease their round. + // If they've sent a future message due to clock error, + // they'd have to wait for the next slot/round to be accepted. + err := ErrRoundAlreadyAdvanced + err.want = signerState.Round + err.got = msgRound + return err + } + + newDutyInSameEpoch := false + if msgSlot > signerState.Slot && mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) == mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) { + newDutyInSameEpoch = true + } + + if err := mv.validateDutyCount(signerState, msgID, newDutyInSameEpoch); err != nil { + return err + } + + if !(msgSlot > signerState.Slot || msgSlot == signerState.Slot && msgRound > signerState.Round) { + if mv.hasFullData(signedMsg) && signerState.ProposalData != nil && !bytes.Equal(signerState.ProposalData, signedMsg.FullData) { + return ErrDuplicatedProposalWithDifferentData + } + + limits := maxMessageCounts(len(share.Committee)) + if err := signerState.MessageCounts.ValidateConsensusMessage(signedMsg, limits); err != nil { + return err + } + } + + return mv.validateJustifications(share, signedMsg) +} + +func (mv *messageValidator) validateDutyCount( + state *SignerState, + msgID spectypes.MessageID, + newDutyInSameEpoch bool, +) error { + switch msgID.GetRoleType() { + case spectypes.BNRoleAttester, spectypes.BNRoleAggregator, spectypes.BNRoleValidatorRegistration: + limit := maxDutiesPerEpoch + + if sameSlot := !newDutyInSameEpoch; sameSlot { + limit++ + } + + if state.EpochDuties >= limit { + err := ErrTooManyDutiesPerEpoch + err.got = fmt.Sprintf("%v (role %v)", state.EpochDuties, msgID.GetRoleType()) + err.want = fmt.Sprintf("less than %v", maxDutiesPerEpoch) + return err + } + + return nil + } + + return nil +} + +func (mv *messageValidator) validateBeaconDuty( + role spectypes.BeaconRole, + slot phase0.Slot, + share *ssvtypes.SSVShare, +) error { + switch role { + case spectypes.BNRoleProposer: + if share.Metadata.BeaconMetadata == nil { + return ErrNoShareMetadata + } + + epoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(slot) + if mv.dutyStore != nil && mv.dutyStore.Proposer.ValidatorDuty(epoch, slot, share.Metadata.BeaconMetadata.Index) == nil { + return ErrNoDuty + } + + return nil + + case spectypes.BNRoleSyncCommittee, spectypes.BNRoleSyncCommitteeContribution: + if share.Metadata.BeaconMetadata == nil { + return ErrNoShareMetadata + } + + period := mv.netCfg.Beacon.EstimatedSyncCommitteePeriodAtEpoch(mv.netCfg.Beacon.EstimatedEpochAtSlot(slot)) + if mv.dutyStore != nil && mv.dutyStore.SyncCommittee.Duty(period, share.Metadata.BeaconMetadata.Index) == nil { + return ErrNoDuty + } + + return nil + } + + return nil +} + +func (mv *messageValidator) hasFullData(signedMsg *specqbft.SignedMessage) bool { + return (signedMsg.Message.MsgType == specqbft.ProposalMsgType || + signedMsg.Message.MsgType == specqbft.RoundChangeMsgType || + mv.isDecidedMessage(signedMsg)) && len(signedMsg.FullData) != 0 // TODO: more complex check of FullData +} + +func (mv *messageValidator) isDecidedMessage(signedMsg *specqbft.SignedMessage) bool { + return signedMsg.Message.MsgType == specqbft.CommitMsgType && len(signedMsg.Signers) > 1 +} + +func (mv *messageValidator) maxRound(role spectypes.BeaconRole) specqbft.Round { + switch role { + case spectypes.BNRoleAttester, spectypes.BNRoleAggregator: // TODO: check if value for aggregator is correct as there are messages on stage exceeding the limit + return 12 // TODO: consider calculating based on quick timeout and slow timeout + case spectypes.BNRoleProposer, spectypes.BNRoleSyncCommittee, spectypes.BNRoleSyncCommitteeContribution: + return 6 + case spectypes.BNRoleValidatorRegistration: + return 0 + default: + panic("unknown role") + } +} + +func (mv *messageValidator) currentEstimatedRound(sinceSlotStart time.Duration) specqbft.Round { + if currentQuickRound := specqbft.FirstRound + specqbft.Round(sinceSlotStart/roundtimer.QuickTimeout); currentQuickRound <= roundtimer.QuickTimeoutThreshold { + return currentQuickRound + } + + sinceFirstSlowRound := sinceSlotStart - (time.Duration(roundtimer.QuickTimeoutThreshold) * roundtimer.QuickTimeout) + estimatedRound := roundtimer.QuickTimeoutThreshold + specqbft.FirstRound + specqbft.Round(sinceFirstSlowRound/roundtimer.SlowTimeout) + return estimatedRound +} + +func (mv *messageValidator) waitAfterSlotStart(role spectypes.BeaconRole) time.Duration { + switch role { + case spectypes.BNRoleAttester, spectypes.BNRoleSyncCommittee: + return mv.netCfg.Beacon.SlotDurationSec() / 3 + case spectypes.BNRoleAggregator, spectypes.BNRoleSyncCommitteeContribution: + return mv.netCfg.Beacon.SlotDurationSec() / 3 * 2 + case spectypes.BNRoleProposer, spectypes.BNRoleValidatorRegistration: + return 0 + default: + panic("unknown role") + } +} + +func (mv *messageValidator) validRole(roleType spectypes.BeaconRole) bool { + switch roleType { + case spectypes.BNRoleAttester, + spectypes.BNRoleAggregator, + spectypes.BNRoleProposer, + spectypes.BNRoleSyncCommittee, + spectypes.BNRoleSyncCommitteeContribution, + spectypes.BNRoleValidatorRegistration: + return true + } + return false +} + +func (mv *messageValidator) validQBFTMsgType(msgType specqbft.MessageType) bool { + switch msgType { + case specqbft.ProposalMsgType, specqbft.PrepareMsgType, specqbft.CommitMsgType, specqbft.RoundChangeMsgType: + return true + } + return false +} + +func (mv *messageValidator) validConsensusSigners(share *ssvtypes.SSVShare, m *specqbft.SignedMessage) error { + switch { + case len(m.Signers) == 0: + return ErrNoSigners + + case len(m.Signers) == 1: + if m.Message.MsgType == specqbft.ProposalMsgType { + qbftState := &specqbft.State{ + Height: m.Message.Height, + Share: &share.Share, + } + leader := specqbft.RoundRobinProposer(qbftState, m.Message.Round) + if m.Signers[0] != leader { + err := ErrSignerNotLeader + err.got = m.Signers[0] + err.want = leader + return err + } + } + + case m.Message.MsgType != specqbft.CommitMsgType: + e := ErrNonDecidedWithMultipleSigners + e.got = len(m.Signers) + return e + + case !share.HasQuorum(len(m.Signers)) || len(m.Signers) > len(share.Committee): + e := ErrWrongSignersLength + e.want = fmt.Sprintf("between %v and %v", share.Quorum, len(share.Committee)) + e.got = len(m.Signers) + return e + } + + if !slices.IsSorted(m.Signers) { + return ErrSignersNotSorted + } + + var prevSigner spectypes.OperatorID + for _, signer := range m.Signers { + if err := mv.commonSignerValidation(signer, share); err != nil { + return err + } + if signer == prevSigner { + return ErrDuplicatedSigner + } + prevSigner = signer + } + return nil +} diff --git a/message/validation/consensus_validation_test.go b/message/validation/consensus_validation_test.go new file mode 100644 index 0000000000..5f0ae02df1 --- /dev/null +++ b/message/validation/consensus_validation_test.go @@ -0,0 +1,104 @@ +package validation + +import ( + "testing" + "time" + + specqbft "github.com/bloxapp/ssv-spec/qbft" + "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" +) + +func TestMessageValidator_currentEstimatedRound(t *testing.T) { + tt := []struct { + name string + sinceSlotStart time.Duration + want specqbft.Round + }{ + { + name: "0s - expected first round", + sinceSlotStart: 0, + want: specqbft.FirstRound, + }, + { + name: "QuickTimeout/2 - expected first round", + sinceSlotStart: roundtimer.QuickTimeout / 2, + want: specqbft.FirstRound, + }, + { + name: "QuickTimeout - expected first+1 round", + sinceSlotStart: roundtimer.QuickTimeout, + want: specqbft.FirstRound + 1, + }, + { + name: "QuickTimeout*2 - expected first+2 round", + sinceSlotStart: roundtimer.QuickTimeout * 2, + want: specqbft.FirstRound + 2, + }, + { + name: "QuickTimeout*3 - expected first+3 round", + sinceSlotStart: roundtimer.QuickTimeout * 3, + want: specqbft.FirstRound + 3, + }, + { + name: "QuickTimeout*4 - expected first+4 round", + sinceSlotStart: roundtimer.QuickTimeout * 4, + want: specqbft.FirstRound + 4, + }, + { + name: "QuickTimeout*5 - expected first+5 round", + sinceSlotStart: roundtimer.QuickTimeout * 5, + want: specqbft.FirstRound + 5, + }, + { + name: "QuickTimeout*6 - expected first+6 round", + sinceSlotStart: roundtimer.QuickTimeout * 6, + want: specqbft.FirstRound + 6, + }, + { + name: "QuickTimeout*7 - expected first+7 round", + sinceSlotStart: roundtimer.QuickTimeout * 7, + want: specqbft.FirstRound + 7, + }, + { + name: "QuickTimeout*8 - expected first+8 round", + sinceSlotStart: roundtimer.QuickTimeout * 8, + want: specqbft.FirstRound + 8, + }, + { + name: "QuickTimeout*9 - expected first+8 round", + sinceSlotStart: roundtimer.QuickTimeout * time.Duration(roundtimer.QuickTimeoutThreshold+1), + want: roundtimer.QuickTimeoutThreshold + 1, + }, + { + name: "QuickTimeout*10 - expected first+8 round", + sinceSlotStart: roundtimer.QuickTimeout * time.Duration(roundtimer.QuickTimeoutThreshold+2), + want: roundtimer.QuickTimeoutThreshold + 1, + }, + { + name: "(QuickTimeout*8 + SlowTimeout) - expected first+9 round", + sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout, + want: roundtimer.QuickTimeoutThreshold + 2, + }, + { + name: "(QuickTimeout*8 + SlowTimeout*2) - expected first+10 round", + sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout*2, + want: roundtimer.QuickTimeoutThreshold + 3, + }, + { + name: "(QuickTimeout*8 + SlowTimeout*3) - expected first+11 round", + sinceSlotStart: roundtimer.QuickTimeout*time.Duration(roundtimer.QuickTimeoutThreshold) + roundtimer.SlowTimeout*3, + want: roundtimer.QuickTimeoutThreshold + 4, + }, + } + + for _, tc := range tt { + tc := tc + t.Run(tc.name, func(t *testing.T) { + mv := &messageValidator{} + got := mv.currentEstimatedRound(tc.sinceSlotStart) + require.Equal(t, tc.want, got) + }) + } +} diff --git a/message/validation/errors.go b/message/validation/errors.go new file mode 100644 index 0000000000..f27d3b4901 --- /dev/null +++ b/message/validation/errors.go @@ -0,0 +1,100 @@ +package validation + +import ( + "fmt" + "strings" +) + +type Error struct { + text string + got any + want any + innerErr error + reject bool + silent bool +} + +func (e Error) Error() string { + var sb strings.Builder + sb.WriteString(e.text) + + if e.got != nil { + sb.WriteString(fmt.Sprintf(", got %v", e.got)) + } + if e.want != nil { + sb.WriteString(fmt.Sprintf(", want %v", e.want)) + } + if e.innerErr != nil { + sb.WriteString(fmt.Sprintf(": %s", e.innerErr.Error())) + } + + return sb.String() +} + +func (e Error) Reject() bool { + return e.reject +} + +func (e Error) Silent() bool { + return e.silent +} + +func (e Error) Text() string { + return e.text +} + +var ( + ErrEmptyData = Error{text: "empty data"} + ErrWrongDomain = Error{text: "wrong domain"} + ErrNoShareMetadata = Error{text: "share has no metadata"} + ErrUnknownValidator = Error{text: "unknown validator"} + ErrValidatorLiquidated = Error{text: "validator is liquidated"} + ErrValidatorNotAttesting = Error{text: "validator is not attesting"} + ErrSlotAlreadyAdvanced = Error{text: "signer has already advanced to a later slot"} + ErrRoundAlreadyAdvanced = Error{text: "signer has already advanced to a later round"} + ErrRoundTooHigh = Error{text: "round is too high for this role" /*, reject: true*/} // TODO: enable reject + ErrEarlyMessage = Error{text: "early message"} + ErrLateMessage = Error{text: "late message"} + ErrTooManySameTypeMessagesPerRound = Error{text: "too many messages of same type per round"} + ErrPubSubMessageHasNoData = Error{text: "pub-sub message has no data", reject: true} + ErrPubSubDataTooBig = Error{text: "pub-sub message data too big", reject: true} + ErrMalformedPubSubMessage = Error{text: "pub-sub message is malformed", reject: true} + ErrEmptyPubSubMessage = Error{text: "pub-sub message is empty", reject: true} + ErrTopicNotFound = Error{text: "topic not found", reject: true} + ErrSSVDataTooBig = Error{text: "ssv message data too big", reject: true} + ErrInvalidRole = Error{text: "invalid role", reject: true} + ErrConsensusValidatorRegistration = Error{text: "consensus message for validator registration role", reject: true} + ErrNoSigners = Error{text: "no signers", reject: true} + ErrWrongSignatureSize = Error{text: "wrong signature size", reject: true} + ErrZeroSignature = Error{text: "zero signature", reject: true} + ErrZeroSigner = Error{text: "zero signer ID", reject: true} + ErrSignerNotInCommittee = Error{text: "signer is not in committee", reject: true} + ErrDuplicatedSigner = Error{text: "signer is duplicated", reject: true} + ErrSignerNotLeader = Error{text: "signer is not leader", reject: true} + ErrSignersNotSorted = Error{text: "signers are not sorted", reject: true} + ErrUnexpectedSigner = Error{text: "signer is not expected", reject: true} + ErrInvalidHash = Error{text: "root doesn't match full data hash", reject: true} + ErrInvalidSignature = Error{text: "invalid signature", reject: true} + ErrInvalidPartialSignature = Error{text: "invalid partial signature", reject: true} + ErrEstimatedRoundTooFar = Error{text: "message round is too far from estimated"} + ErrMalformedMessage = Error{text: "message could not be decoded", reject: true} + ErrUnknownSSVMessageType = Error{text: "unknown SSV message type", reject: true} + ErrUnknownQBFTMessageType = Error{text: "unknown QBFT message type", reject: true} + ErrUnknownPartialMessageType = Error{text: "unknown partial signature message type", reject: true} + ErrPartialSignatureTypeRoleMismatch = Error{text: "partial signature type and role don't match", reject: true} + ErrNonDecidedWithMultipleSigners = Error{text: "non-decided with multiple signers", reject: true} + ErrWrongSignersLength = Error{text: "decided signers size is not between quorum and committee size", reject: true} + ErrDuplicatedProposalWithDifferentData = Error{text: "duplicated proposal with different data", reject: true} + ErrEventMessage = Error{text: "event messages are not broadcast", reject: true} + ErrDKGMessage = Error{text: "DKG messages are not supported", reject: true} + ErrMalformedPrepareJustifications = Error{text: "malformed prepare justifications", reject: true} + ErrUnexpectedPrepareJustifications = Error{text: "prepare justifications unexpected for this message type", reject: true} + ErrMalformedRoundChangeJustifications = Error{text: "malformed round change justifications", reject: true} + ErrUnexpectedRoundChangeJustifications = Error{text: "round change justifications unexpected for this message type", reject: true} + ErrInvalidJustifications = Error{text: "invalid justifications", reject: true} + ErrTooManyDutiesPerEpoch = Error{text: "too many duties per epoch", reject: true} + ErrNoDuty = Error{text: "no duty for this epoch", reject: true} + ErrDeserializePublicKey = Error{text: "deserialize public key", reject: true} + ErrNoPartialMessages = Error{text: "no partial messages", reject: true} + ErrDuplicatedPartialSignatureMessage = Error{text: "duplicated partial signature message", reject: true} +) diff --git a/message/validation/message_counts.go b/message/validation/message_counts.go new file mode 100644 index 0000000000..609ed018bc --- /dev/null +++ b/message/validation/message_counts.go @@ -0,0 +1,156 @@ +package validation + +// message_counts.go contains code for counting and validating messages per validator-slot-round. + +import ( + "fmt" + + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" +) + +// MessageCounts tracks the number of various message types received for validation. +type MessageCounts struct { + PreConsensus int + Proposal int + Prepare int + Commit int + Decided int + RoundChange int + PostConsensus int +} + +// String provides a formatted representation of the MessageCounts. +func (c *MessageCounts) String() string { + return fmt.Sprintf("pre-consensus: %v, proposal: %v, prepare: %v, commit: %v, decided: %v, round change: %v, post-consensus: %v", + c.PreConsensus, + c.Proposal, + c.Prepare, + c.Commit, + c.Decided, + c.RoundChange, + c.PostConsensus, + ) +} + +// ValidateConsensusMessage checks if the provided consensus message exceeds the set limits. +// Returns an error if the message type exceeds its respective count limit. +func (c *MessageCounts) ValidateConsensusMessage(msg *specqbft.SignedMessage, limits MessageCounts) error { + switch msg.Message.MsgType { + case specqbft.ProposalMsgType: + if c.Proposal >= limits.Proposal { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("proposal, having %v", c.String()) + return err + } + case specqbft.PrepareMsgType: + if c.Prepare >= limits.Prepare { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("prepare, having %v", c.String()) + return err + } + case specqbft.CommitMsgType: + if len(msg.Signers) == 1 { + if c.Commit >= limits.Commit { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("commit, having %v", c.String()) + return err + } + } + if len(msg.Signers) > 1 { + if c.Decided >= limits.Decided { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("decided, having %v", c.String()) + return err + } + } + case specqbft.RoundChangeMsgType: + if c.RoundChange >= limits.RoundChange { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("round change, having %v", c.String()) + return err + } + default: + panic("unexpected signed message type") // should be checked before + } + + return nil +} + +// ValidatePartialSignatureMessage checks if the provided partial signature message exceeds the set limits. +// Returns an error if the message type exceeds its respective count limit. +func (c *MessageCounts) ValidatePartialSignatureMessage(m *spectypes.SignedPartialSignatureMessage, limits MessageCounts) error { + switch m.Message.Type { + case spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig: + if c.PreConsensus > limits.PreConsensus { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("pre-consensus, having %v", c.String()) + return err + } + case spectypes.PostConsensusPartialSig: + if c.PostConsensus > limits.PostConsensus { + err := ErrTooManySameTypeMessagesPerRound + err.got = fmt.Sprintf("post-consensus, having %v", c.String()) + return err + } + default: + panic("unexpected partial signature message type") // should be checked before + } + + return nil +} + +// RecordConsensusMessage updates the counts based on the provided consensus message type. +func (c *MessageCounts) RecordConsensusMessage(msg *specqbft.SignedMessage) { + switch msg.Message.MsgType { + case specqbft.ProposalMsgType: + c.Proposal++ + case specqbft.PrepareMsgType: + c.Prepare++ + case specqbft.CommitMsgType: + switch { + case len(msg.Signers) == 1: + c.Commit++ + case len(msg.Signers) > 1: + c.Decided++ + default: + panic("expected signers") // 0 length should be checked before + } + case specqbft.RoundChangeMsgType: + c.RoundChange++ + default: + panic("unexpected signed message type") // should be checked before + } +} + +// RecordPartialSignatureMessage updates the counts based on the provided partial signature message type. +func (c *MessageCounts) RecordPartialSignatureMessage(msg *spectypes.SignedPartialSignatureMessage) { + switch msg.Message.Type { + case spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig: + c.PreConsensus++ + case spectypes.PostConsensusPartialSig: + c.PostConsensus++ + default: + panic("unexpected partial signature message type") // should be checked before + } +} + +// maxMessageCounts is the maximum number of acceptable messages from a signer within a slot & round. +func maxMessageCounts(committeeSize int) MessageCounts { + maxDecided := maxDecidedCount(committeeSize) + + return MessageCounts{ + PreConsensus: 1, + Proposal: 1, + Prepare: 1, + Commit: 1, + Decided: maxDecided, + RoundChange: 1, + PostConsensus: 1, + } +} + +func maxDecidedCount(committeeSize int) int { + f := (committeeSize - 1) / 3 + return committeeSize * (f + 1) // N * (f + 1) +} diff --git a/message/validation/metrics.go b/message/validation/metrics.go new file mode 100644 index 0000000000..f023fe0689 --- /dev/null +++ b/message/validation/metrics.go @@ -0,0 +1,38 @@ +package validation + +import ( + "time" + + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" +) + +type metrics interface { + MessageAccepted(role spectypes.BeaconRole, round specqbft.Round) + MessageIgnored(reason string, role spectypes.BeaconRole, round specqbft.Round) + MessageRejected(reason string, role spectypes.BeaconRole, round specqbft.Round) + SSVMessageType(msgType spectypes.MsgType) + ConsensusMsgType(msgType specqbft.MessageType, signers int) + MessageValidationDuration(duration time.Duration, labels ...string) + SignatureValidationDuration(duration time.Duration, labels ...string) + MessageSize(size int) + ActiveMsgValidation(topic string) + ActiveMsgValidationDone(topic string) + InCommitteeMessage(msgType spectypes.MsgType, decided bool) + NonCommitteeMessage(msgType spectypes.MsgType, decided bool) +} + +type nopMetrics struct{} + +func (*nopMetrics) ConsensusMsgType(specqbft.MessageType, int) {} +func (*nopMetrics) MessageAccepted(spectypes.BeaconRole, specqbft.Round) {} +func (*nopMetrics) MessageIgnored(string, spectypes.BeaconRole, specqbft.Round) {} +func (*nopMetrics) MessageRejected(string, spectypes.BeaconRole, specqbft.Round) {} +func (*nopMetrics) SSVMessageType(spectypes.MsgType) {} +func (*nopMetrics) MessageValidationDuration(time.Duration, ...string) {} +func (*nopMetrics) SignatureValidationDuration(time.Duration, ...string) {} +func (*nopMetrics) MessageSize(int) {} +func (*nopMetrics) ActiveMsgValidation(string) {} +func (*nopMetrics) ActiveMsgValidationDone(string) {} +func (*nopMetrics) InCommitteeMessage(spectypes.MsgType, bool) {} +func (*nopMetrics) NonCommitteeMessage(spectypes.MsgType, bool) {} diff --git a/message/validation/partial_validation.go b/message/validation/partial_validation.go new file mode 100644 index 0000000000..781267f22d --- /dev/null +++ b/message/validation/partial_validation.go @@ -0,0 +1,251 @@ +package validation + +// partial_validation.go contains methods for validating partial signature messages + +import ( + "encoding/hex" + "fmt" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/herumi/bls-eth-go-binary/bls" + "golang.org/x/exp/slices" + + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" +) + +func (mv *messageValidator) validatePartialSignatureMessage( + share *ssvtypes.SSVShare, + signedMsg *spectypes.SignedPartialSignatureMessage, + msgID spectypes.MessageID, +) (phase0.Slot, error) { + if mv.inCommittee(share) { + mv.metrics.InCommitteeMessage(spectypes.SSVPartialSignatureMsgType, false) + } else { + mv.metrics.NonCommitteeMessage(spectypes.SSVPartialSignatureMsgType, false) + } + + msgSlot := signedMsg.Message.Slot + + if !mv.validPartialSigMsgType(signedMsg.Message.Type) { + e := ErrUnknownPartialMessageType + e.got = signedMsg.Message.Type + return msgSlot, e + } + + role := msgID.GetRoleType() + if !mv.partialSignatureTypeMatchesRole(signedMsg.Message.Type, role) { + return msgSlot, ErrPartialSignatureTypeRoleMismatch + } + + if err := mv.validatePartialMessages(share, signedMsg); err != nil { + return msgSlot, err + } + + state := mv.consensusState(msgID) + signerState := state.GetSignerState(signedMsg.Signer) + if signerState != nil { + if err := mv.validateSignerBehaviorPartial(state, signedMsg.Signer, share, msgID, signedMsg); err != nil { + return msgSlot, err + } + } + + if err := mv.validateSignatureFormat(signedMsg.Signature); err != nil { + return msgSlot, err + } + + if mv.verifySignatures { + if err := mv.validPartialSignatures(share, signedMsg); err != nil { + return msgSlot, err + } + } + + if signerState == nil { + signerState = state.CreateSignerState(signedMsg.Signer) + } + + if msgSlot > signerState.Slot { + newEpoch := mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) > mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) + signerState.ResetSlot(msgSlot, specqbft.FirstRound, newEpoch) + } + + signerState.MessageCounts.RecordPartialSignatureMessage(signedMsg) + + return msgSlot, nil +} + +func (mv *messageValidator) inCommittee(share *ssvtypes.SSVShare) bool { + return slices.ContainsFunc(share.Committee, func(operator *spectypes.Operator) bool { + return operator.OperatorID == mv.ownOperatorID + }) +} + +func (mv *messageValidator) validPartialSigMsgType(msgType spectypes.PartialSigMsgType) bool { + switch msgType { + case spectypes.PostConsensusPartialSig, + spectypes.RandaoPartialSig, + spectypes.SelectionProofPartialSig, + spectypes.ContributionProofs, + spectypes.ValidatorRegistrationPartialSig: + return true + default: + return false + } +} + +func (mv *messageValidator) partialSignatureTypeMatchesRole(msgType spectypes.PartialSigMsgType, role spectypes.BeaconRole) bool { + switch role { + case spectypes.BNRoleAttester: + return msgType == spectypes.PostConsensusPartialSig + case spectypes.BNRoleAggregator: + return msgType == spectypes.PostConsensusPartialSig || msgType == spectypes.SelectionProofPartialSig + case spectypes.BNRoleProposer: + return msgType == spectypes.PostConsensusPartialSig || msgType == spectypes.RandaoPartialSig + case spectypes.BNRoleSyncCommittee: + return msgType == spectypes.PostConsensusPartialSig + case spectypes.BNRoleSyncCommitteeContribution: + return msgType == spectypes.PostConsensusPartialSig || msgType == spectypes.ContributionProofs + case spectypes.BNRoleValidatorRegistration: + return msgType == spectypes.ValidatorRegistrationPartialSig + default: + panic("invalid role") // role validity should be checked before + } +} + +func (mv *messageValidator) validPartialSignatures(share *ssvtypes.SSVShare, signedMsg *spectypes.SignedPartialSignatureMessage) error { + if err := ssvtypes.VerifyByOperators(signedMsg.Signature, signedMsg, mv.netCfg.Domain, spectypes.PartialSignatureType, share.Committee); err != nil { + signErr := ErrInvalidSignature + signErr.innerErr = err + signErr.got = fmt.Sprintf("domain %v from %v", hex.EncodeToString(mv.netCfg.Domain[:]), hex.EncodeToString(share.ValidatorPubKey)) + return signErr + } + + for _, message := range signedMsg.Message.Messages { + if err := mv.verifyPartialSignature(message, share); err != nil { + return err + } + } + + return nil +} + +func (mv *messageValidator) verifyPartialSignature(msg *spectypes.PartialSignatureMessage, share *ssvtypes.SSVShare) error { + signer := msg.Signer + signature := msg.PartialSignature + root := msg.SigningRoot + + for _, n := range share.Committee { + if n.GetID() != signer { + continue + } + + pk, err := ssvtypes.DeserializeBLSPublicKey(n.GetPublicKey()) + if err != nil { + return fmt.Errorf("deserialize pk: %w", err) + } + sig := &bls.Sign{} + if err := sig.Deserialize(signature); err != nil { + return fmt.Errorf("deserialize signature: %w", err) + } + + if !mv.aggregateVerify(sig, pk, root) { + return ErrInvalidPartialSignature + } + + return nil + } + + return ErrSignerNotInCommittee +} + +func (mv *messageValidator) aggregateVerify(sig *bls.Sign, pk bls.PublicKey, root [32]byte) bool { + start := time.Now() + + valid := sig.FastAggregateVerify([]bls.PublicKey{pk}, root[:]) + + sinceStart := time.Since(start) + mv.metrics.SignatureValidationDuration(sinceStart) + + return valid +} + +func (mv *messageValidator) validatePartialMessages(share *ssvtypes.SSVShare, m *spectypes.SignedPartialSignatureMessage) error { + if err := mv.commonSignerValidation(m.Signer, share); err != nil { + return err + } + + if len(m.Message.Messages) == 0 { + return ErrNoPartialMessages + } + + seen := map[[32]byte]struct{}{} + for _, message := range m.Message.Messages { + if _, ok := seen[message.SigningRoot]; ok { + return ErrDuplicatedPartialSignatureMessage + } + seen[message.SigningRoot] = struct{}{} + + if message.Signer != m.Signer { + err := ErrUnexpectedSigner + err.want = m.Signer + err.got = message.Signer + return err + } + + if err := mv.commonSignerValidation(message.Signer, share); err != nil { + return err + } + + if err := mv.validateSignatureFormat(message.PartialSignature); err != nil { + return err + } + } + + return nil +} + +func (mv *messageValidator) validateSignerBehaviorPartial( + state *ConsensusState, + signer spectypes.OperatorID, + share *ssvtypes.SSVShare, + msgID spectypes.MessageID, + signedMsg *spectypes.SignedPartialSignatureMessage, +) error { + signerState := state.GetSignerState(signer) + + if signerState == nil { + return nil + } + + msgSlot := signedMsg.Message.Slot + + if msgSlot < signerState.Slot { + // Signers aren't allowed to decrease their slot. + // If they've sent a future message due to clock error, + // this should be caught by the earlyMessage check. + err := ErrSlotAlreadyAdvanced + err.want = signerState.Slot + err.got = msgSlot + return err + } + + newDutyInSameEpoch := false + if msgSlot > signerState.Slot && mv.netCfg.Beacon.EstimatedEpochAtSlot(msgSlot) == mv.netCfg.Beacon.EstimatedEpochAtSlot(signerState.Slot) { + newDutyInSameEpoch = true + } + + if err := mv.validateDutyCount(signerState, msgID, newDutyInSameEpoch); err != nil { + return err + } + + if msgSlot <= signerState.Slot { + limits := maxMessageCounts(len(share.Committee)) + if err := signerState.MessageCounts.ValidatePartialSignatureMessage(signedMsg, limits); err != nil { + return err + } + } + + return nil +} diff --git a/message/validation/qbft_config.go b/message/validation/qbft_config.go new file mode 100644 index 0000000000..fe5ed6dc04 --- /dev/null +++ b/message/validation/qbft_config.go @@ -0,0 +1,53 @@ +package validation + +import ( + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" +) + +type qbftConfig struct { + domain spectypes.DomainType + verifySignature bool +} + +func newQBFTConfig(domain spectypes.DomainType, verifySignature bool) qbftConfig { + return qbftConfig{ + domain: domain, + verifySignature: verifySignature, + } +} + +func (q qbftConfig) GetSigner() spectypes.SSVSigner { + panic("should not be called") +} + +func (q qbftConfig) GetSignatureDomainType() spectypes.DomainType { + return q.domain +} + +func (q qbftConfig) GetValueCheckF() specqbft.ProposedValueCheckF { + panic("should not be called") +} + +func (q qbftConfig) GetProposerF() specqbft.ProposerF { + panic("should not be called") +} + +func (q qbftConfig) GetNetwork() specqbft.Network { + panic("should not be called") +} + +func (q qbftConfig) GetStorage() qbftstorage.QBFTStore { + panic("should not be called") +} + +func (q qbftConfig) GetTimer() roundtimer.Timer { + panic("should not be called") +} + +func (q qbftConfig) VerifySignatures() bool { + return q.verifySignature +} diff --git a/message/validation/signer_state.go b/message/validation/signer_state.go new file mode 100644 index 0000000000..dc9bf1818e --- /dev/null +++ b/message/validation/signer_state.go @@ -0,0 +1,45 @@ +package validation + +// signer_state.go describes state of a signer. + +import ( + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" +) + +// SignerState represents the state of a signer, including its start time, slot, round, +// message counts, proposal data, and the number of duties performed in the current epoch. +type SignerState struct { + Start time.Time + Slot phase0.Slot + Round specqbft.Round + MessageCounts MessageCounts + ProposalData []byte + EpochDuties int +} + +// ResetSlot resets the state's slot, round, message counts, and proposal data to the given values. +// It also updates the start time to the current time and increments the epoch duties count if it's a new epoch. +func (s *SignerState) ResetSlot(slot phase0.Slot, round specqbft.Round, newEpoch bool) { + s.Start = time.Now() + s.Slot = slot + s.Round = round + s.MessageCounts = MessageCounts{} + s.ProposalData = nil + if newEpoch { + s.EpochDuties = 1 + } else { + s.EpochDuties++ + } +} + +// ResetRound resets the state's round, message counts, and proposal data to the given values. +// It also updates the start time to the current time. +func (s *SignerState) ResetRound(round specqbft.Round) { + s.Start = time.Now() + s.Round = round + s.MessageCounts = MessageCounts{} + s.ProposalData = nil +} diff --git a/message/validation/validation.go b/message/validation/validation.go new file mode 100644 index 0000000000..98e100fa3c --- /dev/null +++ b/message/validation/validation.go @@ -0,0 +1,556 @@ +// Package validation provides functions and structures for validating messages. +package validation + +// validator.go contains main code for validation and most of the rule checks. + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "strings" + "sync" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/cornelk/hashmap" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "golang.org/x/exp/slices" + + "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/duties/dutystore" + ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + registrystorage "github.com/bloxapp/ssv/registry/storage" +) + +const ( + // lateMessageMargin is the duration past a message's TTL in which it is still considered valid. + lateMessageMargin = time.Second * 3 + + // clockErrorTolerance is the maximum amount of clock error we expect to see between nodes. + clockErrorTolerance = time.Millisecond * 50 + + maxMessageSize = maxConsensusMsgSize + maxConsensusMsgSize = 8388608 + maxPartialSignatureMsgSize = 1952 + allowedRoundsInFuture = 1 + allowedRoundsInPast = 2 + lateSlotAllowance = 2 + signatureSize = 96 + maxDutiesPerEpoch = 2 +) + +// ConsensusID uniquely identifies a public key and role pair to keep track of state. +type ConsensusID struct { + PubKey phase0.BLSPubKey + Role spectypes.BeaconRole +} + +// ConsensusState keeps track of the signers for a given public key and role. +type ConsensusState struct { + // TODO: consider evicting old data to avoid excessive memory consumption + Signers *hashmap.Map[spectypes.OperatorID, *SignerState] +} + +// GetSignerState retrieves the state for the given signer. +// Returns nil if the signer is not found. +func (cs *ConsensusState) GetSignerState(signer spectypes.OperatorID) *SignerState { + signerState, _ := cs.Signers.Get(signer) + return signerState +} + +// CreateSignerState initializes and sets a new SignerState for the given signer. +func (cs *ConsensusState) CreateSignerState(signer spectypes.OperatorID) *SignerState { + signerState := &SignerState{} + cs.Signers.Set(signer, signerState) + + return signerState +} + +// PubsubMessageValidator defines methods for validating pubsub messages. +type PubsubMessageValidator interface { + ValidatorForTopic(topic string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult + ValidatePubsubMessage(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult +} + +// SSVMessageValidator defines methods for validating SSV messages. +type SSVMessageValidator interface { + ValidateSSVMessage(ssvMessage *spectypes.SSVMessage) (*queue.DecodedSSVMessage, Descriptor, error) +} + +// MessageValidator is an interface that combines both PubsubMessageValidator and SSVMessageValidator. +type MessageValidator interface { + PubsubMessageValidator + SSVMessageValidator +} + +type messageValidator struct { + logger *zap.Logger + metrics metrics + netCfg networkconfig.NetworkConfig + index sync.Map + shareStorage registrystorage.Shares + dutyStore *dutystore.Store + ownOperatorID spectypes.OperatorID + verifySignatures bool +} + +// NewMessageValidator returns a new MessageValidator with the given network configuration and options. +func NewMessageValidator(netCfg networkconfig.NetworkConfig, opts ...Option) MessageValidator { + mv := &messageValidator{ + logger: zap.NewNop(), + metrics: &nopMetrics{}, + netCfg: netCfg, + } + + for _, opt := range opts { + opt(mv) + } + + return mv +} + +// Option represents a functional option for configuring a messageValidator. +type Option func(validator *messageValidator) + +// WithLogger sets the logger for the messageValidator. +func WithLogger(logger *zap.Logger) Option { + return func(mv *messageValidator) { + mv.logger = logger + } +} + +// WithMetrics sets the metrics for the messageValidator. +func WithMetrics(metrics metrics) Option { + return func(mv *messageValidator) { + mv.metrics = metrics + } +} + +// WithDutyStore sets the duty store for the messageValidator. +func WithDutyStore(dutyStore *dutystore.Store) Option { + return func(mv *messageValidator) { + mv.dutyStore = dutyStore + } +} + +// WithOwnOperatorID sets the operator ID for the messageValidator. +func WithOwnOperatorID(id spectypes.OperatorID) Option { + return func(mv *messageValidator) { + mv.ownOperatorID = id + } +} + +// WithShareStorage sets the share storage for the messageValidator. +func WithShareStorage(shareStorage registrystorage.Shares) Option { + return func(mv *messageValidator) { + mv.shareStorage = shareStorage + } +} + +// WithSignatureVerification sets whether to verify signatures in the messageValidator. +func WithSignatureVerification(check bool) Option { + return func(mv *messageValidator) { + mv.verifySignatures = check + } +} + +// ConsensusDescriptor provides details about the consensus for a message. It's used for logging and metrics. +type ConsensusDescriptor struct { + Round specqbft.Round + QBFTMessageType specqbft.MessageType + Signers []spectypes.OperatorID + Committee []*spectypes.Operator +} + +// Descriptor provides details about a message. It's used for logging and metrics. +type Descriptor struct { + ValidatorPK spectypes.ValidatorPK + Role spectypes.BeaconRole + SSVMessageType spectypes.MsgType + Slot phase0.Slot + Consensus *ConsensusDescriptor +} + +// Fields returns zap logging fields for the descriptor. +func (d Descriptor) Fields() []zapcore.Field { + result := []zapcore.Field{ + fields.Validator(d.ValidatorPK), + fields.Role(d.Role), + zap.String("ssv_message_type", ssvmessage.MsgTypeToString(d.SSVMessageType)), + fields.Slot(d.Slot), + } + + if d.Consensus != nil { + var committee []spectypes.OperatorID + for _, o := range d.Consensus.Committee { + committee = append(committee, o.OperatorID) + } + + result = append(result, + fields.Round(d.Consensus.Round), + zap.String("qbft_message_type", ssvmessage.QBFTMsgTypeToString(d.Consensus.QBFTMessageType)), + zap.Uint64s("signers", d.Consensus.Signers), + zap.Uint64s("committee", committee), + ) + } + + return result +} + +// String provides a string representation of the descriptor. It may be useful for logging. +func (d Descriptor) String() string { + sb := strings.Builder{} + sb.WriteString(fmt.Sprintf("validator PK: %v, role: %v, ssv message type: %v, slot: %v", + hex.EncodeToString(d.ValidatorPK), + d.Role.String(), + ssvmessage.MsgTypeToString(d.SSVMessageType), + d.Slot, + )) + + if d.Consensus != nil { + var committee []spectypes.OperatorID + for _, o := range d.Consensus.Committee { + committee = append(committee, o.OperatorID) + } + + sb.WriteString(fmt.Sprintf(", round: %v, qbft message type: %v, signers: %v, committee: %v", + d.Consensus.Round, + ssvmessage.QBFTMsgTypeToString(d.Consensus.QBFTMessageType), + d.Consensus.Signers, + committee, + )) + } + + return sb.String() +} + +// ValidatorForTopic returns a validation function for the given topic. +// This function can be used to validate messages within the libp2p pubsub framework. +func (mv *messageValidator) ValidatorForTopic(_ string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + return mv.ValidatePubsubMessage +} + +// ValidatePubsubMessage validates the given pubsub message. +// Depending on the outcome, it will return one of the pubsub validation results (Accept, Ignore, or Reject). +func (mv *messageValidator) ValidatePubsubMessage(_ context.Context, _ peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { + start := time.Now() + var validationDurationLabels []string // TODO: implement + + defer func() { + sinceStart := time.Since(start) + mv.metrics.MessageValidationDuration(sinceStart, validationDurationLabels...) + }() + + decodedMessage, descriptor, err := mv.validateP2PMessage(pmsg, time.Now()) + round := specqbft.Round(0) + if descriptor.Consensus != nil { + round = descriptor.Consensus.Round + } + + if err != nil { + var valErr Error + if errors.As(err, &valErr) { + if valErr.Reject() { + if !valErr.Silent() { + f := append(descriptor.Fields(), zap.Error(err)) + mv.logger.Debug("rejecting invalid message", f...) + } + + mv.metrics.MessageRejected(valErr.Text(), descriptor.Role, round) + return pubsub.ValidationReject + } + + if !valErr.Silent() { + f := append(descriptor.Fields(), zap.Error(err)) + mv.logger.Debug("ignoring invalid message", f...) + } + mv.metrics.MessageIgnored(valErr.Text(), descriptor.Role, round) + return pubsub.ValidationIgnore + } + + mv.metrics.MessageIgnored(err.Error(), descriptor.Role, round) + f := append(descriptor.Fields(), zap.Error(err)) + mv.logger.Debug("ignoring invalid message", f...) + return pubsub.ValidationIgnore + } + + pmsg.ValidatorData = decodedMessage + + mv.metrics.MessageAccepted(descriptor.Role, round) + + return pubsub.ValidationAccept +} + +// ValidateSSVMessage validates the given SSV message. +// If successful, it returns the decoded message and its descriptor. Otherwise, it returns an error. +func (mv *messageValidator) ValidateSSVMessage(ssvMessage *spectypes.SSVMessage) (*queue.DecodedSSVMessage, Descriptor, error) { + return mv.validateSSVMessage(ssvMessage, time.Now()) +} + +func (mv *messageValidator) validateP2PMessage(pMsg *pubsub.Message, receivedAt time.Time) (*queue.DecodedSSVMessage, Descriptor, error) { + topic := pMsg.GetTopic() + + mv.metrics.ActiveMsgValidation(topic) + defer mv.metrics.ActiveMsgValidationDone(topic) + + messageData := pMsg.GetData() + if len(messageData) == 0 { + return nil, Descriptor{}, ErrPubSubMessageHasNoData + } + + mv.metrics.MessageSize(len(messageData)) + + // Max possible MsgType + MsgID + Data plus 10% for encoding overhead + const maxMsgSize = 4 + 56 + 8388668 + const maxEncodedMsgSize = maxMsgSize + maxMsgSize/10 + if len(messageData) > maxEncodedMsgSize { + e := ErrPubSubDataTooBig + e.got = len(messageData) + return nil, Descriptor{}, e + } + + msg, err := commons.DecodeNetworkMsg(messageData) + if err != nil { + e := ErrMalformedPubSubMessage + e.innerErr = err + return nil, Descriptor{}, e + } + + if msg == nil { + return nil, Descriptor{}, ErrEmptyPubSubMessage + } + + // Check if the message was sent on the right topic. + currentTopic := pMsg.GetTopic() + currentTopicBaseName := commons.GetTopicBaseName(currentTopic) + topics := commons.ValidatorTopicID(msg.GetID().GetPubKey()) + + topicFound := false + for _, tp := range topics { + if tp == currentTopicBaseName { + topicFound = true + break + } + } + if !topicFound { + return nil, Descriptor{}, ErrTopicNotFound + } + + mv.metrics.SSVMessageType(msg.MsgType) + + return mv.validateSSVMessage(msg, receivedAt) +} + +func (mv *messageValidator) validateSSVMessage(ssvMessage *spectypes.SSVMessage, receivedAt time.Time) (*queue.DecodedSSVMessage, Descriptor, error) { + var descriptor Descriptor + + if len(ssvMessage.Data) == 0 { + return nil, descriptor, ErrEmptyData + } + + if len(ssvMessage.Data) > maxMessageSize { + err := ErrSSVDataTooBig + err.got = len(ssvMessage.Data) + err.want = maxMessageSize + return nil, descriptor, err + } + + if !bytes.Equal(ssvMessage.MsgID.GetDomain(), mv.netCfg.Domain[:]) { + err := ErrWrongDomain + err.got = hex.EncodeToString(ssvMessage.MsgID.GetDomain()) + err.want = hex.EncodeToString(mv.netCfg.Domain[:]) + return nil, descriptor, err + } + + validatorPK := ssvMessage.GetID().GetPubKey() + role := ssvMessage.GetID().GetRoleType() + descriptor.Role = role + descriptor.ValidatorPK = validatorPK + + if !mv.validRole(role) { + return nil, descriptor, ErrInvalidRole + } + + publicKey, err := ssvtypes.DeserializeBLSPublicKey(validatorPK) + if err != nil { + e := ErrDeserializePublicKey + e.innerErr = err + return nil, descriptor, e + } + + var share *ssvtypes.SSVShare + if mv.shareStorage != nil { + share = mv.shareStorage.Get(nil, publicKey.Serialize()) + if share == nil { + e := ErrUnknownValidator + e.got = publicKey.SerializeToHexStr() + return nil, descriptor, e + } + + if share.Liquidated { + return nil, descriptor, ErrValidatorLiquidated + } + + if share.BeaconMetadata == nil { + return nil, descriptor, ErrNoShareMetadata + } + + if !share.BeaconMetadata.IsAttesting() { + err := ErrValidatorNotAttesting + err.got = share.BeaconMetadata.Status.String() + return nil, descriptor, err + } + } + + msg, err := queue.DecodeSSVMessage(ssvMessage) + if err != nil { + if errors.Is(err, queue.ErrUnknownMessageType) { + e := ErrUnknownSSVMessageType + e.got = ssvMessage.GetType() + return nil, descriptor, e + } + + e := ErrMalformedMessage + e.innerErr = err + return nil, descriptor, e + } + + descriptor.SSVMessageType = ssvMessage.MsgType + + if mv.shareStorage != nil { + switch ssvMessage.MsgType { + case spectypes.SSVConsensusMsgType: + if len(msg.Data) > maxConsensusMsgSize { + e := ErrSSVDataTooBig + e.got = len(ssvMessage.Data) + e.want = maxConsensusMsgSize + return nil, descriptor, e + } + + consensusDescriptor, slot, err := mv.validateConsensusMessage(share, msg.Body.(*specqbft.SignedMessage), msg.GetID(), receivedAt) + descriptor.Consensus = &consensusDescriptor + descriptor.Slot = slot + if err != nil { + return nil, descriptor, err + } + + case spectypes.SSVPartialSignatureMsgType: + if len(msg.Data) > maxPartialSignatureMsgSize { + e := ErrSSVDataTooBig + e.got = len(ssvMessage.Data) + e.want = maxPartialSignatureMsgSize + return nil, descriptor, e + } + + slot, err := mv.validatePartialSignatureMessage(share, msg.Body.(*spectypes.SignedPartialSignatureMessage), msg.GetID()) + descriptor.Slot = slot + if err != nil { + return nil, descriptor, err + } + + case ssvmessage.SSVEventMsgType: + return nil, descriptor, ErrEventMessage + + case spectypes.DKGMsgType: + return nil, descriptor, ErrDKGMessage + } + } + + return msg, descriptor, nil +} + +func (mv *messageValidator) containsSignerFunc(signer spectypes.OperatorID) func(operator *spectypes.Operator) bool { + return func(operator *spectypes.Operator) bool { + return operator.OperatorID == signer + } +} + +func (mv *messageValidator) validateSignatureFormat(signature []byte) error { + if len(signature) != signatureSize { + e := ErrWrongSignatureSize + e.got = len(signature) + return e + } + + if [signatureSize]byte(signature) == [signatureSize]byte{} { + return ErrZeroSignature + } + return nil +} + +func (mv *messageValidator) commonSignerValidation(signer spectypes.OperatorID, share *ssvtypes.SSVShare) error { + if signer == 0 { + return ErrZeroSigner + } + + if !slices.ContainsFunc(share.Committee, mv.containsSignerFunc(signer)) { + return ErrSignerNotInCommittee + } + + return nil +} + +func (mv *messageValidator) validateSlotTime(messageSlot phase0.Slot, role spectypes.BeaconRole, receivedAt time.Time) error { + if mv.earlyMessage(messageSlot, receivedAt) { + return ErrEarlyMessage + } + + if lateness := mv.lateMessage(messageSlot, role, receivedAt); lateness > 0 { + e := ErrLateMessage + e.got = fmt.Sprintf("late by %v", lateness) + return e + } + + return nil +} + +func (mv *messageValidator) earlyMessage(slot phase0.Slot, receivedAt time.Time) bool { + return mv.netCfg.Beacon.GetSlotEndTime(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())). + Add(-clockErrorTolerance).Before(mv.netCfg.Beacon.GetSlotStartTime(slot)) +} + +func (mv *messageValidator) lateMessage(slot phase0.Slot, role spectypes.BeaconRole, receivedAt time.Time) time.Duration { + var ttl phase0.Slot + switch role { + case spectypes.BNRoleProposer, spectypes.BNRoleSyncCommittee, spectypes.BNRoleSyncCommitteeContribution: + ttl = 1 + lateSlotAllowance + case spectypes.BNRoleAttester, spectypes.BNRoleAggregator: + ttl = 32 + lateSlotAllowance + case spectypes.BNRoleValidatorRegistration: + return 0 + } + + deadline := mv.netCfg.Beacon.GetSlotStartTime(slot + ttl). + Add(lateMessageMargin).Add(clockErrorTolerance) + + return mv.netCfg.Beacon.GetSlotStartTime(mv.netCfg.Beacon.EstimatedSlotAtTime(receivedAt.Unix())). + Sub(deadline) +} + +func (mv *messageValidator) consensusState(messageID spectypes.MessageID) *ConsensusState { + id := ConsensusID{ + PubKey: phase0.BLSPubKey(messageID.GetPubKey()), + Role: messageID.GetRoleType(), + } + + if _, ok := mv.index.Load(id); !ok { + cs := &ConsensusState{ + Signers: hashmap.New[spectypes.OperatorID, *SignerState](), + } + mv.index.Store(id, cs) + } + + cs, _ := mv.index.Load(id) + return cs.(*ConsensusState) +} diff --git a/message/validation/validation_test.go b/message/validation/validation_test.go new file mode 100644 index 0000000000..b307e05049 --- /dev/null +++ b/message/validation/validation_test.go @@ -0,0 +1,1774 @@ +package validation + +import ( + "bytes" + "encoding/hex" + "math" + "testing" + "time" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/herumi/bls-eth-go-binary/bls" + pubsub "github.com/libp2p/go-libp2p-pubsub" + pspb "github.com/libp2p/go-libp2p-pubsub/pb" + "github.com/stretchr/testify/require" + eth2types "github.com/wealdtech/go-eth2-types/v2" + "go.uber.org/zap/zaptest" + + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/duties/dutystore" + "github.com/bloxapp/ssv/operator/storage" + beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/storage/kv" +) + +func Test_ValidateSSVMessage(t *testing.T) { + logger := zaptest.NewLogger(t) + db, err := kv.NewInMemory(logger, basedb.Options{}) + require.NoError(t, err) + + ns, err := storage.NewNodeStorage(logger, db) + require.NoError(t, err) + + const validatorIndex = 123 + + ks := spectestingutils.Testing4SharesSet() + share := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateActiveOngoing, + Index: validatorIndex, + }, + Liquidated: false, + }, + } + require.NoError(t, ns.Shares().Save(nil, share)) + + netCfg := networkconfig.TestNetwork + + roleAttester := spectypes.BNRoleAttester + + // Message validation happy flow, messages are not ignored or rejected and there are no errors + t.Run("happy flow", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.NoError(t, err) + }) + + // Make sure messages are incremented and throw an ignore message if more than 1 for a commit + t.Run("message counts", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + state := validator.consensusState(msgID) + for i := spectypes.OperatorID(1); i <= 4; i++ { + signerState := state.GetSignerState(i) + require.Nil(t, signerState) + } + + signedMsg := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedMsg, err := signedMsg.Encode() + require.NoError(t, err) + + ssvMsg := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMsg, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + require.NoError(t, err) + + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + + state1 := state.GetSignerState(1) + require.NotNil(t, state1) + require.EqualValues(t, height, state1.Slot) + require.EqualValues(t, 1, state1.Round) + require.EqualValues(t, MessageCounts{Proposal: 1}, state1.MessageCounts) + for i := spectypes.OperatorID(2); i <= 4; i++ { + signerState := state.GetSignerState(i) + require.Nil(t, signerState) + } + + signedMsg = spectestingutils.TestingPrepareMessageWithParams(ks.Shares[1], 1, 2, height, spectestingutils.TestingIdentifier, spectestingutils.TestingQBFTRootData) + encodedMsg, err = signedMsg.Encode() + require.NoError(t, err) + + ssvMsg.Data = encodedMsg + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + require.NoError(t, err) + + require.NotNil(t, state1) + require.EqualValues(t, height, state1.Slot) + require.EqualValues(t, 2, state1.Round) + require.EqualValues(t, MessageCounts{Prepare: 1}, state1.MessageCounts) + + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt) + require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + + signedMsg = spectestingutils.TestingCommitMessageWithHeight(ks.Shares[1], 1, height+1) + encodedMsg, err = signedMsg.Encode() + require.NoError(t, err) + + ssvMsg.Data = encodedMsg + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) + require.NoError(t, err) + require.NotNil(t, state1) + require.EqualValues(t, height+1, state1.Slot) + require.EqualValues(t, 1, state1.Round) + require.EqualValues(t, MessageCounts{Commit: 1}, state1.MessageCounts) + + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) + require.ErrorContains(t, err, ErrTooManySameTypeMessagesPerRound.Error()) + + signedMsg = spectestingutils.TestingCommitMultiSignerMessageWithHeight([]*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}, height+1) + encodedMsg, err = signedMsg.Encode() + require.NoError(t, err) + + ssvMsg.Data = encodedMsg + _, _, err = validator.validateSSVMessage(ssvMsg, receivedAt.Add(netCfg.Beacon.SlotDurationSec())) + require.NoError(t, err) + require.NotNil(t, state1) + require.EqualValues(t, height+1, state1.Slot) + require.EqualValues(t, 1, state1.Round) + require.EqualValues(t, MessageCounts{Commit: 1, Decided: 1}, state1.MessageCounts) + }) + + // Send a pubsub message with no data should cause an error + t.Run("pubsub message has no data", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + pmsg := &pubsub.Message{} + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err := validator.validateP2PMessage(pmsg, receivedAt) + + require.ErrorIs(t, err, ErrPubSubMessageHasNoData) + }) + + // Send a pubsub message where there is too much data should cause an error + t.Run("pubsub data too big", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + topic := commons.GetTopicFullName(commons.ValidatorTopicID(share.ValidatorPubKey)[0]) + pmsg := &pubsub.Message{ + Message: &pspb.Message{ + Data: bytes.Repeat([]byte{1}, 10_000_000), + Topic: &topic, + From: []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r"), + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pmsg, receivedAt) + + e := ErrPubSubDataTooBig + e.got = 10_000_000 + require.ErrorIs(t, err, e) + }) + + // Send a malformed pubsub message (empty message) should return an error + t.Run("empty pubsub message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + topic := commons.GetTopicFullName(commons.ValidatorTopicID(share.ValidatorPubKey)[0]) + pmsg := &pubsub.Message{ + Message: &pspb.Message{ + Data: []byte{1}, + Topic: &topic, + From: []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r"), + }, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateP2PMessage(pmsg, receivedAt) + + require.ErrorContains(t, err, ErrMalformedPubSubMessage.Error()) + }) + + // Send a message with incorrect data (unable to decode incorrect message type) + t.Run("bad data format", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: bytes.Repeat([]byte{1}, 500), + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + require.ErrorContains(t, err, ErrMalformedMessage.Error()) + }) + + // Send a message with no data should return an error + t.Run("no data", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: []byte{}, + } + + _, _, err := validator.validateSSVMessage(message, time.Now()) + require.ErrorIs(t, err, ErrEmptyData) + + message = &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: nil, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + require.ErrorIs(t, err, ErrEmptyData) + }) + + // Send a message where there is too much data should cause an error + t.Run("data too big", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + const tooBigMsgSize = maxMessageSize * 2 + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: bytes.Repeat([]byte{0x1}, tooBigMsgSize), + } + + _, _, err := validator.validateSSVMessage(message, time.Now()) + expectedErr := ErrSSVDataTooBig + expectedErr.got = tooBigMsgSize + expectedErr.want = maxMessageSize + require.ErrorIs(t, err, expectedErr) + }) + + // Send exact allowed data size amount but with invalid data (fails to decode) + t.Run("data size borderline / malformed message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: bytes.Repeat([]byte{0x1}, maxMessageSize), + } + + _, _, err := validator.validateSSVMessage(message, time.Now()) + require.ErrorContains(t, err, ErrMalformedMessage.Error()) + }) + + // Send an invalid SSV message type returns an error + t.Run("invalid SSV message type", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + message := &spectypes.SSVMessage{ + MsgType: math.MaxUint64, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: []byte{0x1}, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + require.ErrorContains(t, err, ErrUnknownSSVMessageType.Error()) + }) + + // Empty validator public key returns an error + t.Run("empty validator public key", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, spectypes.ValidatorPK{}, roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + require.ErrorContains(t, err, ErrDeserializePublicKey.Error()) + }) + + // Generate random validator and validate it is unknown to the network + t.Run("unknown validator", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + sk, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, sk.PublicKey().Marshal(), roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + expectedErr := ErrUnknownValidator + expectedErr.got = hex.EncodeToString(sk.PublicKey().Marshal()) + require.ErrorIs(t, err, expectedErr) + }) + + // Make sure messages are dropped if on the incorrect network + t.Run("wrong domain", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + wrongDomain := spectypes.DomainType{math.MaxUint8, math.MaxUint8, math.MaxUint8, math.MaxUint8} + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(wrongDomain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrWrongDomain + expectedErr.got = hex.EncodeToString(wrongDomain[:]) + expectedErr.want = hex.EncodeToString(netCfg.Domain[:]) + require.ErrorIs(t, err, expectedErr) + }) + + // Send message with a value that refers to a non-existent role + t.Run("invalid role", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, math.MaxUint64), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrInvalidRole) + }) + + // Perform validator registration with a consensus type message will give an error + t.Run("consensus validator registration", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, spectypes.BNRoleValidatorRegistration), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrConsensusValidatorRegistration) + }) + + // Ignore messages related to a validator that is liquidated + t.Run("liquidated validator", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + liquidatedSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + liquidatedShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateActiveOngoing, + }, + Liquidated: true, + }, + } + liquidatedShare.ValidatorPubKey = liquidatedSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, liquidatedShare)) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, liquidatedShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, time.Now()) + expectedErr := ErrValidatorLiquidated + require.ErrorIs(t, err, expectedErr) + + require.NoError(t, ns.Shares().Delete(nil, liquidatedShare.ValidatorPubKey)) + }) + + // Ignore messages related to a validator that is not active + t.Run("inactive validator", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + inactiveSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + inactiveShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateUnknown, + }, + Liquidated: false, + }, + } + inactiveShare.ValidatorPubKey = inactiveSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, inactiveShare)) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, inactiveShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrValidatorNotAttesting + expectedErr.got = eth2apiv1.ValidatorStateUnknown.String() + require.ErrorIs(t, err, expectedErr) + + require.NoError(t, ns.Shares().Delete(nil, inactiveShare.ValidatorPubKey)) + }) + + // Unable to process a message with a validator that is not on the network + t.Run("no share metadata", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + noMetadataSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + noMetadataShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: nil, + Liquidated: false, + }, + } + noMetadataShare.ValidatorPubKey = noMetadataSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, noMetadataShare)) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, noMetadataShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrNoShareMetadata) + + require.NoError(t, ns.Shares().Delete(nil, noMetadataShare.ValidatorPubKey)) + }) + + // Receive error if more than 2 attestation duties in an epoch + t.Run("too many duties", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester))) + require.NoError(t, err) + + validSignedMessage = spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height+4) + encodedValidSignedMessage, err = validSignedMessage.Encode() + require.NoError(t, err) + + message.Data = encodedValidSignedMessage + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+4).Add(validator.waitAfterSlotStart(roleAttester))) + require.NoError(t, err) + + validSignedMessage = spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height+8) + encodedValidSignedMessage, err = validSignedMessage.Encode() + require.NoError(t, err) + + message.Data = encodedValidSignedMessage + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot+8).Add(validator.waitAfterSlotStart(roleAttester))) + require.ErrorContains(t, err, ErrTooManyDutiesPerEpoch.Error()) + }) + + // Throw error if getting a message for proposal and see there is no message from beacon + t.Run("no proposal duties", func(t *testing.T) { + const epoch = 1 + slot := netCfg.Beacon.FirstSlotAtEpoch(epoch) + height := specqbft.Height(slot) + + dutyStore := dutystore.New() + dutyStore.Proposer.Add(epoch, slot, validatorIndex+1, ð2apiv1.ProposerDuty{}, true) + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithDutyStore(dutyStore), WithSignatureVerification(true)).(*messageValidator) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, spectypes.BNRoleProposer), + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer))) + require.ErrorContains(t, err, ErrNoDuty.Error()) + + dutyStore = dutystore.New() + dutyStore.Proposer.Add(epoch, slot, validatorIndex, ð2apiv1.ProposerDuty{}, true) + validator = NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithDutyStore(dutyStore), WithSignatureVerification(true)).(*messageValidator) + _, _, err = validator.validateSSVMessage(message, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer))) + require.NoError(t, err) + }) + + // Get error when receiving a message with over 13 partial signatures + t.Run("partial message too big", func(t *testing.T) { + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + for i := 0; i < 13; i++ { + msg.Message.Messages = append(msg.Message.Messages, msg.Message.Messages[0]) + } + + _, err := msg.Encode() + require.ErrorContains(t, err, "max expected 13 and 14 found") + }) + + // Get error when receiving message from operator who is not affiliated with the validator + t.Run("signer ID not in committee", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 5, specqbft.Height(slot)) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrSignerNotInCommittee) + }) + + // Get error when receiving message from operator who is non-existent (operator id 0) + t.Run("partial zero signer ID", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 0, specqbft.Height(slot)) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrZeroSigner) + }) + + // Get error when receiving partial signature message from operator who is the incorrect signer + t.Run("partial inconsistent signer ID", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Message.Messages[0].Signer = 2 + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrUnexpectedSigner + expectedErr.got = spectypes.OperatorID(2) + expectedErr.want = spectypes.OperatorID(1) + require.ErrorIs(t, err, expectedErr) + }) + + // Receive error when receiving a duplicated partial signature message + t.Run("partial duplicated message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Message.Messages = append(msg.Message.Messages, msg.Message.Messages[0]) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrDuplicatedPartialSignatureMessage) + }) + + // Receive error when "partialSignatureMessages" does not contain any "partialSignatureMessage" + t.Run("no partial signature messages", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Message.Messages = []*spectypes.PartialSignatureMessage{} + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrNoPartialMessages) + }) + + // Receive error when the partial signature message is not enough bytes + t.Run("partial wrong signature size", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Signature = []byte{1} + + encoded, err := msg.Encode() + require.ErrorContains(t, err, "bytes array does not have the correct length") + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrMalformedMessage.Error()) + }) + + // Get error when receiving a partial signature message with an invalid signature + t.Run("partial wrong signature", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, specqbft.Height(slot)) + msg.Signature = bytes.Repeat([]byte{1}, 96) + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrInvalidSignature.Error()) + }) + + // Run partial message type validation tests + t.Run("partial message type validation", func(t *testing.T) { + slot := netCfg.Beacon.FirstSlotAtEpoch(162304) + + // Check happy flow of a duty for each role + t.Run("valid", func(t *testing.T) { + tests := map[spectypes.BeaconRole][]spectypes.PartialSigMsgType{ + spectypes.BNRoleAttester: {spectypes.PostConsensusPartialSig}, + spectypes.BNRoleAggregator: {spectypes.PostConsensusPartialSig, spectypes.SelectionProofPartialSig}, + spectypes.BNRoleProposer: {spectypes.PostConsensusPartialSig, spectypes.RandaoPartialSig}, + spectypes.BNRoleSyncCommittee: {spectypes.PostConsensusPartialSig}, + spectypes.BNRoleSyncCommitteeContribution: {spectypes.PostConsensusPartialSig, spectypes.ContributionProofs}, + spectypes.BNRoleValidatorRegistration: {spectypes.ValidatorRegistrationPartialSig}, + } + + for role, msgTypes := range tests { + for _, msgType := range msgTypes { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + innerSig, r, err := spectestingutils.NewTestingKeyManager().SignBeaconObject(spectypes.SSZUint64(spectestingutils.TestingDutyEpoch), phase0.Domain{}, ks.Shares[1].GetPublicKey().Serialize(), phase0.DomainType{}) + require.NoError(t, err) + + innerMsg := spectypes.PartialSignatureMessages{ + Type: msgType, + Messages: []*spectypes.PartialSignatureMessage{ + { + PartialSignature: innerSig, + SigningRoot: r, + Signer: 1, + }, + }, + } + + sig, err := spectestingutils.NewTestingKeyManager().SignRoot(innerMsg, spectypes.PartialSignatureType, ks.Shares[1].GetPublicKey().Serialize()) + require.NoError(t, err) + + msg := &spectypes.SignedPartialSignatureMessage{ + Message: innerMsg, + Signature: sig, + Signer: 1, + } + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.NoError(t, err) + } + } + }) + + // Get error when receiving a message with an incorrect message type + t.Run("invalid message type", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + + msg := &spectypes.SignedPartialSignatureMessage{ + Message: spectypes.PartialSignatureMessages{ + Type: math.MaxUint64, + }, + Signature: make([]byte, 96), + Signer: 1, + } + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrUnknownPartialMessageType.Error()) + }) + + // Get error when sending an unexpected message type for the required duty (sending randao for attestor duty) + t.Run("mismatch", func(t *testing.T) { + tests := map[spectypes.BeaconRole][]spectypes.PartialSigMsgType{ + spectypes.BNRoleAttester: {spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleAggregator: {spectypes.RandaoPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleProposer: {spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleSyncCommittee: {spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleSyncCommitteeContribution: {spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ValidatorRegistrationPartialSig}, + spectypes.BNRoleValidatorRegistration: {spectypes.PostConsensusPartialSig, spectypes.RandaoPartialSig, spectypes.SelectionProofPartialSig, spectypes.ContributionProofs}, + } + + for role, msgTypes := range tests { + for _, msgType := range msgTypes { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + msg := &spectypes.SignedPartialSignatureMessage{ + Message: spectypes.PartialSignatureMessages{ + Type: msgType, + }, + Signature: make([]byte, 96), + Signer: 1, + } + + encoded, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrPartialSignatureTypeRoleMismatch.Error()) + } + } + }) + }) + + // Get error when receiving QBFT message with an invalid type + t.Run("invalid QBFT message type", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + msg := &specqbft.Message{ + MsgType: math.MaxUint64, + Height: height, + Round: specqbft.FirstRound, + Identifier: spectestingutils.TestingIdentifier, + Root: spectestingutils.TestingQBFTRootData, + } + signedMsg := spectestingutils.SignQBFTMsg(ks.Shares[1], 1, msg) + + encodedValidSignedMessage, err := signedMsg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrUnknownQBFTMessageType + require.ErrorIs(t, err, expectedErr) + }) + + // Get error when receiving an incorrect signature size (too small) + t.Run("wrong signature size", func(t *testing.T) { + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Signature = []byte{0x1} + + _, err := validSignedMessage.Encode() + require.Error(t, err) + }) + + // Initialize signature tests + t.Run("zero signature", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + // Get error when receiving a consensus message with a zero signature + t.Run("consensus message", func(t *testing.T) { + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + zeroSignature := [signatureSize]byte{} + validSignedMessage.Signature = zeroSignature[:] + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrZeroSignature) + }) + + // Get error when receiving a consensus message with a zero signature + t.Run("partial signature message", func(t *testing.T) { + partialSigMessage := spectestingutils.PostConsensusAttestationMsg(ks.Shares[1], 1, height) + zeroSignature := [signatureSize]byte{} + partialSigMessage.Signature = zeroSignature[:] + + encoded, err := partialSigMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.ErrorIs(t, err, ErrZeroSignature) + }) + }) + + // Get error when receiving a message with an empty list of signers + t.Run("no signers", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Signers = []spectypes.OperatorID{} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrNoSigners) + }) + + // Initialize no signer tests + t.Run("zero signer", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + inactiveSK, err := eth2types.GenerateBLSPrivateKey() + require.NoError(t, err) + + zeroSignerKS := spectestingutils.Testing7SharesSet() + zeroSignerShare := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(zeroSignerKS), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: eth2apiv1.ValidatorStateActiveOngoing, + }, + Liquidated: false, + }, + } + zeroSignerShare.Committee[0].OperatorID = 0 + zeroSignerShare.ValidatorPubKey = inactiveSK.PublicKey().Marshal() + + require.NoError(t, ns.Shares().Save(nil, zeroSignerShare)) + + // Get error when receiving a consensus message with a zero signer + t.Run("consensus message", func(t *testing.T) { + validSignedMessage := spectestingutils.TestingProposalMessage(zeroSignerKS.Shares[1], 1) + validSignedMessage.Signers = []spectypes.OperatorID{0} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, zeroSignerShare.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrZeroSigner) + }) + + // Get error when receiving a partial message with a zero signer + t.Run("partial signature message", func(t *testing.T) { + partialSignatureMessage := spectestingutils.PostConsensusAttestationMsg(zeroSignerKS.Shares[1], 1, specqbft.Height(slot)) + partialSignatureMessage.Signer = 0 + + encoded, err := partialSignatureMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, zeroSignerShare.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrZeroSigner) + }) + + require.NoError(t, ns.Shares().Delete(nil, zeroSignerShare.ValidatorPubKey)) + }) + + // Get error when receiving a message with duplicated signers + t.Run("non unique signer", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingCommitMultiSignerMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + validSignedMessage.Signers = []spectypes.OperatorID{1, 2, 2} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrDuplicatedSigner) + }) + + // Get error when receiving a message with non-sorted signers + t.Run("signers not sorted", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingCommitMultiSignerMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + validSignedMessage.Signers = []spectypes.OperatorID{3, 2, 1} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrSignersNotSorted) + }) + + // Get error when receiving message from non quorum size amount of signers + t.Run("wrong signers length", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingCommitMultiSignerMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + validSignedMessage.Signers = []spectypes.OperatorID{1, 2} + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrWrongSignersLength + expectedErr.got = 2 + expectedErr.want = "between 3 and 4" + require.ErrorIs(t, err, expectedErr) + }) + + // Get error when receiving a non decided message with multiple signers + t.Run("non decided with multiple signers", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingMultiSignerProposalMessage( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrNonDecidedWithMultipleSigners + expectedErr.got = 3 + require.ErrorIs(t, err, expectedErr) + }) + + // Get error when receiving a proposal message with an invalid signature (random bytes) + t.Run("wrong signed signature", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + validSignedMessage := spectestingutils.TestingProposalMessage(ks.Shares[1], 1) + validSignedMessage.Signature = bytes.Repeat([]byte{1}, 96) + + encoded, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + require.ErrorContains(t, err, ErrInvalidSignature.Error()) + }) + + // Send late message for all roles and receive late message error + t.Run("late message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + tests := map[spectypes.BeaconRole]time.Time{ + spectypes.BNRoleAttester: netCfg.Beacon.GetSlotStartTime(slot + 35).Add(validator.waitAfterSlotStart(spectypes.BNRoleAttester)), + spectypes.BNRoleAggregator: netCfg.Beacon.GetSlotStartTime(slot + 35).Add(validator.waitAfterSlotStart(spectypes.BNRoleAggregator)), + spectypes.BNRoleProposer: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleProposer)), + spectypes.BNRoleSyncCommittee: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleSyncCommittee)), + spectypes.BNRoleSyncCommitteeContribution: netCfg.Beacon.GetSlotStartTime(slot + 4).Add(validator.waitAfterSlotStart(spectypes.BNRoleSyncCommitteeContribution)), + } + + for role, receivedAt := range tests { + role, receivedAt := role, receivedAt + t.Run(role.String(), func(t *testing.T) { + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedValidSignedMessage, + } + + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorContains(t, err, ErrLateMessage.Error()) + }) + } + }) + + // Send early message for all roles before the duty start and receive early message error + t.Run("early message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot - 1) + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.ErrorIs(t, err, ErrEarlyMessage) + }) + + // Send message from non-leader acting as a leader should receive an error + t.Run("not a leader", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[2], 2, height) + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrSignerNotLeader + expectedErr.got = spectypes.OperatorID(2) + expectedErr.want = spectypes.OperatorID(1) + require.ErrorIs(t, err, expectedErr) + }) + + // Send wrong size of data (8 bytes) for a prepare justification message should receive an error + t.Run("malformed prepare justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Message.PrepareJustification = [][]byte{{1}} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + require.ErrorContains(t, err, ErrMalformedPrepareJustifications.Error()) + }) + + // Send prepare justification message without a proposal message should receive an error + t.Run("non-proposal with prepare justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.TestingProposalMessageWithParams( + ks.Shares[1], spectypes.OperatorID(1), specqbft.FirstRound, specqbft.FirstHeight, spectestingutils.TestingQBFTRootData, + nil, + spectestingutils.MarshalJustifications([]*specqbft.SignedMessage{ + spectestingutils.TestingRoundChangeMessage(ks.Shares[1], spectypes.OperatorID(1)), + })) + msg.Message.MsgType = specqbft.PrepareMsgType + + encodedValidSignedMessage, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrUnexpectedPrepareJustifications + expectedErr.got = specqbft.PrepareMsgType + require.ErrorIs(t, err, expectedErr) + }) + + // Send round change justification message without a proposal message should receive an error + t.Run("non-proposal with round change justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msg := spectestingutils.TestingProposalMessageWithParams( + ks.Shares[1], spectypes.OperatorID(1), specqbft.FirstRound, specqbft.FirstHeight, spectestingutils.TestingQBFTRootData, + spectestingutils.MarshalJustifications([]*specqbft.SignedMessage{ + spectestingutils.TestingPrepareMessage(ks.Shares[1], spectypes.OperatorID(1)), + }), + nil, + ) + msg.Message.MsgType = specqbft.PrepareMsgType + + encodedValidSignedMessage, err := msg.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrUnexpectedRoundChangeJustifications + expectedErr.got = specqbft.PrepareMsgType + require.ErrorIs(t, err, expectedErr) + }) + + // Send round change justification message with a malformed message (1 byte) should receive an error + t.Run("malformed round change justification", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.Message.RoundChangeJustification = [][]byte{{1}} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + require.ErrorContains(t, err, ErrMalformedRoundChangeJustifications.Error()) + }) + + // Send message root hash that doesnt match the expected root hash should receive an error + t.Run("wrong root hash", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + validSignedMessage := spectestingutils.TestingProposalMessageWithHeight(ks.Shares[1], 1, height) + validSignedMessage.FullData = []byte{1} + + encodedValidSignedMessage, err := validSignedMessage.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedValidSignedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message, receivedAt) + + expectedErr := ErrInvalidHash + require.ErrorIs(t, err, expectedErr) + }) + + // Receive proposal from same operator twice with different messages (same round) should receive an error + t.Run("double proposal with different data", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingProposalMessageWithRound(ks.Shares[1], 1, 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt) + require.NoError(t, err) + + signed2 := spectestingutils.TestingProposalMessageWithRound(ks.Shares[1], 1, 1) + signed2.FullData = []byte{1} + signed2.Message.Root, err = specqbft.HashDataRoot(signed2.FullData) + require.NoError(t, err) + + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt) + expectedErr := ErrDuplicatedProposalWithDifferentData + require.ErrorIs(t, err, expectedErr) + }) + + // Receive prepare from same operator twice with different messages (same round) should receive an error + t.Run("double prepare", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingPrepareMessage(ks.Shares[1], 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt) + require.NoError(t, err) + + signed2 := spectestingutils.TestingPrepareMessage(ks.Shares[1], 1) + require.NoError(t, err) + + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "prepare, having pre-consensus: 0, proposal: 0, prepare: 1, commit: 0, decided: 0, round change: 0, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive commit from same operator twice with different messages (same round) should receive an error + t.Run("double commit", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingCommitMessage(ks.Shares[1], 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt) + require.NoError(t, err) + + signed2 := spectestingutils.TestingCommitMessage(ks.Shares[1], 1) + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "commit, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 1, decided: 0, round change: 0, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive round change from same operator twice with different messages (same round) should receive an error + t.Run("double round change", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signed1 := spectestingutils.TestingRoundChangeMessage(ks.Shares[1], 1) + encodedSigned1, err := signed1.Encode() + require.NoError(t, err) + + message1 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned1, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(message1, receivedAt) + require.NoError(t, err) + + signed2 := spectestingutils.TestingRoundChangeMessage(ks.Shares[1], 1) + encodedSigned2, err := signed2.Encode() + require.NoError(t, err) + + message2 := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester), + Data: encodedSigned2, + } + + _, _, err = validator.validateSSVMessage(message2, receivedAt) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "round change, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, decided: 0, round change: 1, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive too many decided messages should receive an error + t.Run("too many decided", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + + signed := spectestingutils.TestingCommitMultiSignerMessageWithRound( + []*bls.SecretKey{ks.Shares[1], ks.Shares[2], ks.Shares[3]}, []spectypes.OperatorID{1, 2, 3}, 1) + encodedSigned, err := signed.Encode() + require.NoError(t, err) + + message := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedSigned, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + + for i := 0; i < maxDecidedCount(len(share.Committee)); i++ { + _, _, err = validator.validateSSVMessage(message, receivedAt) + require.NoError(t, err) + } + + _, _, err = validator.validateSSVMessage(message, receivedAt) + expectedErr := ErrTooManySameTypeMessagesPerRound + expectedErr.got = "decided, having pre-consensus: 0, proposal: 0, prepare: 0, commit: 0, decided: 8, round change: 0, post-consensus: 0" + require.ErrorIs(t, err, expectedErr) + }) + + // Receive message from a round that is too high for that epoch should receive an error + t.Run("round too high", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + tests := map[spectypes.BeaconRole]specqbft.Round{ + spectypes.BNRoleAttester: 13, + spectypes.BNRoleAggregator: 13, + spectypes.BNRoleProposer: 7, + spectypes.BNRoleSyncCommittee: 7, + spectypes.BNRoleSyncCommitteeContribution: 7, + } + + for role, round := range tests { + role, round := role, round + t.Run(role.String(), func(t *testing.T) { + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, role) + + signedMessage := spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, round) + encodedMessage, err := signedMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(0).Add(validator.waitAfterSlotStart(role)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.ErrorContains(t, err, ErrRoundTooHigh.Error()) + }) + } + }) + + // Receive message from a round that is incorrect for current epoch should receive an error + t.Run("round already advanced", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + signedMessage := spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, 2) + encodedMessage, err := signedMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.NoError(t, err) + + signedMessage = spectestingutils.TestingPrepareMessageWithRound(ks.Shares[1], 1, 1) + encodedMessage, err = signedMessage.Encode() + require.NoError(t, err) + + ssvMessage.Data = encodedMessage + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.ErrorContains(t, err, ErrRoundAlreadyAdvanced.Error()) + }) + + // Initialize tests for testing when sending a message with a slot before the current one + t.Run("slot already advanced", func(t *testing.T) { + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + height := specqbft.Height(slot) + + // Send a consensus message with a slot before the current one should cause an error + t.Run("consensus message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + signedMessage := spectestingutils.TestingPrepareMessageWithHeight(ks.Shares[1], 1, height+1) + encodedMessage, err := signedMessage.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester))) + require.NoError(t, err) + + signedMessage = spectestingutils.TestingPrepareMessageWithHeight(ks.Shares[1], 1, height) + encodedMessage, err = signedMessage.Encode() + require.NoError(t, err) + + ssvMessage.Data = encodedMessage + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester))) + require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) + }) + + // Send a partial signature message with a slot before the current one should cause an error + t.Run("partial signature message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + message := spectestingutils.PostConsensusAttestationMsg(ks.Shares[2], 2, height+1) + message.Message.Slot = phase0.Slot(height) + 1 + sig, err := spectestingutils.NewTestingKeyManager().SignRoot(message.Message, spectypes.PartialSignatureType, ks.Shares[2].GetPublicKey().Serialize()) + require.NoError(t, err) + message.Signature = sig + + encodedMessage, err := message.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: msgID, + Data: encodedMessage, + } + + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot+1).Add(validator.waitAfterSlotStart(roleAttester))) + require.NoError(t, err) + + message = spectestingutils.PostConsensusAttestationMsg(ks.Shares[2], 2, height) + message.Message.Slot = phase0.Slot(height) + sig, err = spectestingutils.NewTestingKeyManager().SignRoot(message.Message, spectypes.PartialSignatureType, ks.Shares[2].GetPublicKey().Serialize()) + require.NoError(t, err) + message.Signature = sig + + encodedMessage, err = message.Encode() + require.NoError(t, err) + + ssvMessage.Data = encodedMessage + _, _, err = validator.validateSSVMessage(ssvMessage, netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester))) + require.ErrorContains(t, err, ErrSlotAlreadyAdvanced.Error()) + }) + }) + + // Receive an event message from an operator that is not myself should receive an error + t.Run("event message", func(t *testing.T) { + validator := NewMessageValidator(netCfg, WithShareStorage(ns.Shares()), WithSignatureVerification(true)).(*messageValidator) + + msgID := spectypes.NewMsgID(netCfg.Domain, share.ValidatorPubKey, roleAttester) + slot := netCfg.Beacon.FirstSlotAtEpoch(1) + + eventMsg := &ssvtypes.EventMsg{} + encoded, err := eventMsg.Encode() + require.NoError(t, err) + + ssvMessage := &spectypes.SSVMessage{ + MsgType: ssvmessage.SSVEventMsgType, + MsgID: msgID, + Data: encoded, + } + + receivedAt := netCfg.Beacon.GetSlotStartTime(slot).Add(validator.waitAfterSlotStart(roleAttester)) + _, _, err = validator.validateSSVMessage(ssvMessage, receivedAt) + require.ErrorIs(t, err, ErrEventMessage) + }) +} diff --git a/migrations/migration_2_encrypt_shares.go b/migrations/migration_2_encrypt_shares.go index 4ca0eb62c7..03c40a301d 100644 --- a/migrations/migration_2_encrypt_shares.go +++ b/migrations/migration_2_encrypt_shares.go @@ -5,6 +5,7 @@ import ( "crypto/sha256" "crypto/x509" "fmt" + "github.com/bloxapp/ssv/utils/rsaencryption" "github.com/bloxapp/ssv/storage/basedb" diff --git a/monitoring/grafana/dashboard_msg_validation.json b/monitoring/grafana/dashboard_msg_validation.json new file mode 100644 index 0000000000..8ea0bd8f08 --- /dev/null +++ b/monitoring/grafana/dashboard_msg_validation.json @@ -0,0 +1,2175 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 144, + "iteration": 1695134055974, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 12, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_size_count{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Total", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Ignored", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Rejected", + "refId": "C" + } + ], + "title": "Message RPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "#F2495C", + "mode": "palette-classic", + "seriesBy": "last" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 0, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 3, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 3, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max", + "mean" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval]))\n/\nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval]))", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Rejected", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval]))\n/\nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval]))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "Ignored", + "refId": "B" + } + ], + "title": "Ignore/Reject Rate", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 20, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval])) by (role) / sum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (role)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Ignored by Role", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 22, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval])) by (role) / sum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (role)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Rejected by Role", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 23, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"ignored\"}[$__interval])) by (round) \n/ \nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (round)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Ignored by Round", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 24, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation{instance=~\"$instance.*\", status=\"rejected\"}[$__interval])) by (round) \n/ \nsum(rate(ssv_message_validation{instance=~\"$instance.*\"}[$__interval])) by (round)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{role}}", + "refId": "A" + } + ], + "title": "Rejected by Round", + "transformations": [ + { + "id": "convertFieldType", + "options": { + "conversions": [ + { + "destinationType": "number", + "targetField": "Value" + } + ], + "fields": {} + } + } + ], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "duplicated proposal with different data", + "late message", + "message round is too far from estimated", + "no duty for this epoch", + "round is too high for this role", + "signer has already advanced to a later slot", + "too many messages of same type per round", + "unknown validator", + "validator is not attesting" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 4, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right", + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation{instance=~\"$instance.*\", reason!=\"\"}[$__interval])) by (reason)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{reason}}", + "refId": "A" + } + ], + "title": "Validation Failure Reason", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 5, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation_ssv_type{instance=~\"$instance.*\", type!=\"\"}[$__interval])) by (type)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "title": "Messages by SSV type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 40 + }, + "id": 6, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation_consensus_type{instance=~\"$instance.*\", type!=\"\"}[$__interval])) by (type)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "title": "Messages by QBFT type", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 48 + }, + "id": 7, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(ssv_message_validation_consensus_type{instance=~\"$instance.*\", type=\"commit\", signers=\"1\"}[$__interval])) by (signers)", + "format": "time_series", + "instant": false, + "interval": "", + "legendFormat": "Commit", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(sum(increase(ssv_message_validation_consensus_type{instance=~\"$instance.*\", type=\"commit\", signers!=\"1\"}[$__interval])) by (signers))", + "hide": false, + "interval": "", + "legendFormat": "Decided", + "refId": "B" + } + ], + "title": "Commit messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 56 + }, + "id": 19, + "interval": "5m", + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_in_committee{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "in committee", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "rate(ssv_message_non_committee{instance=~\"$instance.*\", decided=\"decided\"}[$__interval])", + "hide": false, + "interval": "", + "legendFormat": "non-committee decided", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_non_committee{instance=~\"$instance.*\", decided=\"non-decided\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "non-committee non-decided", + "refId": "C" + } + ], + "title": "Committee belonging RPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 64 + }, + "id": 9, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_size_sum{instance=~\"$instance.*\"}[$__interval])) / sum(rate(ssv_message_size_count{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(ssv_message_size_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Message size (bytes)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "Over panel interval", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 72 + }, + "id": 13, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_size_sum{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "Bytes", + "refId": "A" + } + ], + "title": "Total bytes received RPS (incoming messages)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 80 + }, + "id": 14, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_validation_duration_seconds_sum{instance=~\"$instance.*\"}[$__interval])) / sum(rate(ssv_message_validation_duration_seconds_count{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(ssv_message_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Message validation duration (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 88 + }, + "id": 15, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_signature_validation_duration_seconds_sum{instance=~\"$instance.*\"}[$__interval])) / sum(rate(ssv_signature_validation_duration_seconds_count{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, rate(ssv_signature_validation_duration_seconds_bucket{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Signature validation duration (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 96 + }, + "id": 17, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_queue_incoming{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Incoming, RPS", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_queue_outgoing{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Outgoing, RPS", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_queue_drops{instance=~\"$instance.*\"}[$__interval]))\n", + "hide": false, + "interval": "", + "legendFormat": "Dropped, RPS", + "refId": "C" + } + ], + "title": "Queue message RPS", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 104 + }, + "id": 18, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "ssv_message_queue_size{instance=~\"$instance.*\"}", + "hide": false, + "interval": "", + "legendFormat": "Size", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "ssv_message_queue_capacity{instance=~\"$instance.*\"}", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "Capacity", + "refId": "G" + } + ], + "title": "Queue size/capacity", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 112 + }, + "id": 16, + "interval": "5m", + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(rate(ssv_message_time_in_queue_seconds_sum{instance=~\"$instance.*\"}[$__interval])) by (instance)\n/\nsum(rate(ssv_message_size_count{instance=~\"$instance.*\"}[$__interval])) by (instance)\n", + "hide": false, + "interval": "", + "legendFormat": "Average", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.01, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "1st", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.05, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "5th", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.5, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "50th", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.95, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "95th", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "histogram_quantile(0.99, sum(rate(ssv_message_time_in_queue_seconds_bucket{instance=~\"$instance.*\"}[$__interval])) by (le, instance))\n", + "hide": false, + "interval": "", + "legendFormat": "99th", + "refId": "E" + } + ], + "title": "Message time in queue (seconds)", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 34, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "ssv-node-v2-4", + "value": "ssv-node-v2-4" + }, + "hide": 1, + "includeAll": false, + "multi": false, + "name": "instance", + "options": [ + { + "selected": false, + "text": "ssv-node-v2-1", + "value": "ssv-node-v2-1" + }, + { + "selected": false, + "text": "ssv-node-v2-2", + "value": "ssv-node-v2-2" + }, + { + "selected": false, + "text": "ssv-node-v2-3", + "value": "ssv-node-v2-3" + }, + { + "selected": true, + "text": "ssv-node-v2-4", + "value": "ssv-node-v2-4" + }, + { + "selected": false, + "text": "ssv-node-v2-5", + "value": "ssv-node-v2-5" + }, + { + "selected": false, + "text": "ssv-node-v2-6", + "value": "ssv-node-v2-6" + }, + { + "selected": false, + "text": "ssv-node-v2-7", + "value": "ssv-node-v2-7" + }, + { + "selected": false, + "text": "ssv-node-v2-8", + "value": "ssv-node-v2-8" + } + ], + "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Message Validation", + "uid": "DppaYPgSk", + "version": 42, + "weekStart": "" +} \ No newline at end of file diff --git a/monitoring/grafana/dashboard_ssv_node.json b/monitoring/grafana/dashboard_ssv_node.json index d5568f9de2..47150acf91 100644 --- a/monitoring/grafana/dashboard_ssv_node.json +++ b/monitoring/grafana/dashboard_ssv_node.json @@ -22,7 +22,7 @@ "fiscalYearStartMonth": 0, "graphTooltip": 0, "id": 115, - "iteration": 1676023992743, + "iteration": 1696933836051, "links": [], "liveNow": false, "panels": [ @@ -2685,8 +2685,245 @@ "title": "Stream Protocols (time-series)", "transformations": [], "type": "timeseries" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 81 + }, + "id": 67, + "panels": [], + "title": "Vitals", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "description": "Rate per second of requests, responses and active requests.\nResponses are outgoing, for incoming requests.\nSuccessful Requests are outgoing.\nActive are outgoing requests.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "log": 2, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#73BF69", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "10ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "20ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "100ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-orange", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "5000ms" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 82 + }, + "id": 65, + "maxDataPoints": 25, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.3.4", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"5.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "5ms", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"10.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"5.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "10ms", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"20.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"10.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "20ms", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"100.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"20.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "100ms", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"500.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"100.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "500ms", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "eXfXfqH7z" + }, + "exemplar": true, + "expr": "sum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"5000.0\"}[5m])) -\nsum(increase(slot_ticker_delay_milliseconds_bucket{instance=~\"$instance.*\", le=\"500.0\"}[5m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "5000ms", + "refId": "F" + } + ], + "title": "Duty Execution Latency (5m)", + "transformations": [], + "type": "timeseries" } ], + "refresh": "", "schemaVersion": 34, "style": "dark", "tags": [], @@ -2695,8 +2932,8 @@ { "current": { "selected": false, - "text": "ssv-node-v3-1", - "value": "ssv-node-v3-1" + "text": "ssv-node-v2-6", + "value": "ssv-node-v2-6" }, "description": "", "hide": 1, @@ -2730,7 +2967,7 @@ "value": "ssv-node-v2-5" }, { - "selected": false, + "selected": true, "text": "ssv-node-v2-6", "value": "ssv-node-v2-6" }, @@ -2744,26 +2981,6 @@ "text": "ssv-node-v2-8", "value": "ssv-node-v2-8" }, - { - "selected": false, - "text": "ssv-node-9", - "value": "ssv-node-9" - }, - { - "selected": false, - "text": "ssv-node-10", - "value": "ssv-node-10" - }, - { - "selected": false, - "text": "ssv-node-11", - "value": "ssv-node-11" - }, - { - "selected": false, - "text": "ssv-node-12", - "value": "ssv-node-12" - }, { "selected": false, "text": "ssv-exporter", @@ -2773,29 +2990,9 @@ "selected": false, "text": "ssv-exporter-v2", "value": "ssv-exporter-v2" - }, - { - "selected": true, - "text": "ssv-node-v3-1", - "value": "ssv-node-v3-1" - }, - { - "selected": false, - "text": "ssv-node-v3-2", - "value": "ssv-node-v3-2" - }, - { - "selected": false, - "text": "ssv-node-v3-3", - "value": "ssv-node-v3-3" - }, - { - "selected": false, - "text": "ssv-node-v3-4", - "value": "ssv-node-v3-4" } ], - "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8,ssv-node-9,ssv-node-10,ssv-node-11,ssv-node-12,ssv-exporter,ssv-exporter-v2,ssv-node-v3-1,ssv-node-v3-2,ssv-node-v3-3,ssv-node-v3-4", + "query": "ssv-node-v2-1,ssv-node-v2-2,ssv-node-v2-3,ssv-node-v2-4,ssv-node-v2-5,ssv-node-v2-6,ssv-node-v2-7,ssv-node-v2-8,ssv-exporter,ssv-exporter-v2", "queryValue": "", "skipUrlSync": false, "type": "custom" @@ -2803,13 +3000,13 @@ ] }, "time": { - "from": "now-6h", + "from": "now-1h", "to": "now" }, "timepicker": {}, "timezone": "", "title": "Node Dashboard", "uid": "QNiMrdoVz", - "version": 59, + "version": 70, "weekStart": "" } \ No newline at end of file diff --git a/monitoring/grafana/dashboard_ssv_operator_performance.json b/monitoring/grafana/dashboard_ssv_operator_performance.json index 1ba7c2714f..ce769ee03d 100644 --- a/monitoring/grafana/dashboard_ssv_operator_performance.json +++ b/monitoring/grafana/dashboard_ssv_operator_performance.json @@ -229,7 +229,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -268,7 +268,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"ATTESTER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -2033,7 +2033,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -2072,7 +2072,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"PROPOSER\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -3405,7 +3405,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -3444,7 +3444,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"AGGREGATOR\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -4777,7 +4777,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -4816,7 +4816,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -5969,7 +5969,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"})", + "expr": "count(ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 0)", "hide": false, "instant": true, "interval": "", @@ -6008,7 +6008,7 @@ "uid": "eXfXfqH7z" }, "exemplar": false, - "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"})", + "expr": "count_values(\"round\", ssv_qbft_instance_round{instance=~\"$instance.*\", roleType=\"SYNC_COMMITTEE_CONTRIBUTION\"} > 0)", "hide": false, "instant": true, "interval": "", diff --git a/monitoring/metricsreporter/metrics_reporter.go b/monitoring/metricsreporter/metrics_reporter.go index 859d46e518..01227e94c6 100644 --- a/monitoring/metricsreporter/metrics_reporter.go +++ b/monitoring/metricsreporter/metrics_reporter.go @@ -4,12 +4,16 @@ import ( "crypto/sha256" "fmt" "strconv" + "time" + specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" ethcommon "github.com/ethereum/go-ethereum/common" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.uber.org/zap" + + ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" ) // TODO: implement all methods @@ -33,6 +37,10 @@ const ( validatorPending = float64(8) validatorRemoved = float64(9) validatorUnknown = float64(10) + + messageAccepted = "accepted" + messageIgnored = "ignored" + messageRejected = "rejected" ) var ( @@ -65,6 +73,70 @@ var ( Name: "ssv:exporter:operator_index", Help: "operator footprint", }, []string{"pubKey", "index"}) + messageValidationResult = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_validation", + Help: "Message validation result", + }, []string{"status", "reason", "role", "round"}) + messageValidationSSVType = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_validation_ssv_type", + Help: "SSV message type", + }, []string{"type"}) + messageValidationConsensusType = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_validation_consensus_type", + Help: "Consensus message type", + }, []string{"type", "signers"}) + messageValidationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_message_validation_duration_seconds", + Help: "Message validation duration (seconds)", + Buckets: []float64{0.001, 0.005, 0.010, 0.020, 0.050}, + }, []string{}) + signatureValidationDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_signature_validation_duration_seconds", + Help: "Signature validation duration (seconds)", + Buckets: []float64{0.001, 0.005, 0.010, 0.020, 0.050}, + }, []string{}) + messageSize = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_message_size", + Help: "Message size", + Buckets: []float64{100, 500, 1_000, 5_000, 10_000, 50_000, 100_000, 500_000, 1_000_000, 5_000_000}, + }, []string{}) + activeMsgValidation = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ssv:p2p:pubsub:msg:val:active", + Help: "Count active message validation", + }, []string{"topic"}) + incomingQueueMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_queue_incoming", + Help: "The amount of message incoming to the validator's msg queue", + }, []string{"msg_id"}) + outgoingQueueMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_queue_outgoing", + Help: "The amount of message outgoing from the validator's msg queue", + }, []string{"msg_id"}) + droppedQueueMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_queue_drops", + Help: "The amount of message dropped from the validator's msg queue", + }, []string{"msg_id"}) + messageQueueSize = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ssv_message_queue_size", + Help: "Size of message queue", + }, []string{}) + messageQueueCapacity = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Name: "ssv_message_queue_capacity", + Help: "Capacity of message queue", + }, []string{}) + messageTimeInQueue = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "ssv_message_time_in_queue_seconds", + Help: "Time message spent in queue (seconds)", + Buckets: []float64{0.001, 0.005, 0.010, 0.050, 0.100, 0.500, 1, 5, 10, 60}, + }, []string{"msg_id"}) + inCommitteeMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_in_committee", + Help: "The amount of messages in committee", + }, []string{"ssv_msg_type", "decided"}) + nonCommitteeMessages = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "ssv_message_non_committee", + Help: "The amount of messages not in committee", + }, []string{"ssv_msg_type", "decided"}) ) type MetricsReporter struct { @@ -84,9 +156,26 @@ func New(opts ...Option) *MetricsReporter { allMetrics := []prometheus.Collector{ ssvNodeStatus, executionClientStatus, + executionClientLastFetchedBlock, validatorStatus, eventProcessed, eventProcessingFailed, + operatorIndex, + messageValidationResult, + messageValidationSSVType, + messageValidationConsensusType, + messageValidationDuration, + signatureValidationDuration, + messageSize, + activeMsgValidation, + incomingQueueMessages, + outgoingQueueMessages, + droppedQueueMessages, + messageQueueSize, + messageQueueCapacity, + messageTimeInQueue, + inCommitteeMessages, + nonCommitteeMessages, } for i, c := range allMetrics { @@ -102,77 +191,183 @@ func New(opts ...Option) *MetricsReporter { return &MetricsReporter{} } -func (m MetricsReporter) SSVNodeHealthy() { +func (m *MetricsReporter) SSVNodeHealthy() { ssvNodeStatus.Set(ssvNodeHealthy) } -func (m MetricsReporter) SSVNodeNotHealthy() { +func (m *MetricsReporter) SSVNodeNotHealthy() { ssvNodeStatus.Set(ssvNodeNotHealthy) } -func (m MetricsReporter) ExecutionClientReady() { +func (m *MetricsReporter) ExecutionClientReady() { executionClientStatus.Set(executionClientOK) } -func (m MetricsReporter) ExecutionClientSyncing() { +func (m *MetricsReporter) ExecutionClientSyncing() { executionClientStatus.Set(executionClientSyncing) } -func (m MetricsReporter) ExecutionClientFailure() { +func (m *MetricsReporter) ExecutionClientFailure() { executionClientStatus.Set(executionClientFailure) } -func (m MetricsReporter) ExecutionClientLastFetchedBlock(block uint64) { +func (m *MetricsReporter) ExecutionClientLastFetchedBlock(block uint64) { executionClientLastFetchedBlock.Set(float64(block)) } -func (m MetricsReporter) OperatorPublicKey(operatorID spectypes.OperatorID, publicKey []byte) { +func (m *MetricsReporter) OperatorPublicKey(operatorID spectypes.OperatorID, publicKey []byte) { pkHash := fmt.Sprintf("%x", sha256.Sum256(publicKey)) operatorIndex.WithLabelValues(pkHash, strconv.FormatUint(operatorID, 10)).Set(float64(operatorID)) } -func (m MetricsReporter) ValidatorInactive(publicKey []byte) { +func (m *MetricsReporter) ValidatorInactive(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorInactive) } -func (m MetricsReporter) ValidatorNoIndex(publicKey []byte) { +func (m *MetricsReporter) ValidatorNoIndex(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorNoIndex) } -func (m MetricsReporter) ValidatorError(publicKey []byte) { +func (m *MetricsReporter) ValidatorError(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorError) } -func (m MetricsReporter) ValidatorReady(publicKey []byte) { +func (m *MetricsReporter) ValidatorReady(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorReady) } -func (m MetricsReporter) ValidatorNotActivated(publicKey []byte) { +func (m *MetricsReporter) ValidatorNotActivated(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorNotActivated) } -func (m MetricsReporter) ValidatorExiting(publicKey []byte) { +func (m *MetricsReporter) ValidatorExiting(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorExiting) } -func (m MetricsReporter) ValidatorSlashed(publicKey []byte) { +func (m *MetricsReporter) ValidatorSlashed(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorSlashed) } -func (m MetricsReporter) ValidatorNotFound(publicKey []byte) { +func (m *MetricsReporter) ValidatorNotFound(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorNotFound) } -func (m MetricsReporter) ValidatorPending(publicKey []byte) { +func (m *MetricsReporter) ValidatorPending(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorPending) } -func (m MetricsReporter) ValidatorRemoved(publicKey []byte) { +func (m *MetricsReporter) ValidatorRemoved(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorRemoved) } -func (m MetricsReporter) ValidatorUnknown(publicKey []byte) { +func (m *MetricsReporter) ValidatorUnknown(publicKey []byte) { validatorStatus.WithLabelValues(ethcommon.Bytes2Hex(publicKey)).Set(validatorUnknown) } -func (m MetricsReporter) EventProcessed(eventName string) { +func (m *MetricsReporter) EventProcessed(eventName string) { eventProcessed.WithLabelValues(eventName).Inc() } -func (m MetricsReporter) EventProcessingFailed(eventName string) { +func (m *MetricsReporter) EventProcessingFailed(eventName string) { eventProcessingFailed.WithLabelValues(eventName).Inc() } // TODO implement -func (m MetricsReporter) LastBlockProcessed(uint64) {} -func (m MetricsReporter) LogsProcessingError(error) {} +func (m *MetricsReporter) LastBlockProcessed(uint64) {} +func (m *MetricsReporter) LogsProcessingError(error) {} + +func (m *MetricsReporter) MessageAccepted( + role spectypes.BeaconRole, + round specqbft.Round, +) { + messageValidationResult.WithLabelValues( + messageAccepted, + "", + role.String(), + strconv.FormatUint(uint64(round), 10), + ).Inc() +} + +func (m *MetricsReporter) MessageIgnored( + reason string, + role spectypes.BeaconRole, + round specqbft.Round, +) { + messageValidationResult.WithLabelValues( + messageIgnored, + reason, + role.String(), + strconv.FormatUint(uint64(round), 10), + ).Inc() +} + +func (m *MetricsReporter) MessageRejected( + reason string, + role spectypes.BeaconRole, + round specqbft.Round, +) { + messageValidationResult.WithLabelValues( + messageRejected, + reason, + role.String(), + strconv.FormatUint(uint64(round), 10), + ).Inc() +} + +func (m *MetricsReporter) SSVMessageType(msgType spectypes.MsgType) { + messageValidationSSVType.WithLabelValues(ssvmessage.MsgTypeToString(msgType)).Inc() +} + +func (m *MetricsReporter) ConsensusMsgType(msgType specqbft.MessageType, signers int) { + messageValidationConsensusType.WithLabelValues(ssvmessage.QBFTMsgTypeToString(msgType), strconv.Itoa(signers)).Inc() +} + +func (m *MetricsReporter) MessageValidationDuration(duration time.Duration, labels ...string) { + messageValidationDuration.WithLabelValues(labels...).Observe(duration.Seconds()) +} + +func (m *MetricsReporter) SignatureValidationDuration(duration time.Duration, labels ...string) { + signatureValidationDuration.WithLabelValues(labels...).Observe(duration.Seconds()) +} + +func (m *MetricsReporter) MessageSize(size int) { + messageSize.WithLabelValues().Observe(float64(size)) +} + +func (m *MetricsReporter) ActiveMsgValidation(topic string) { + activeMsgValidation.WithLabelValues(topic).Inc() +} + +func (m *MetricsReporter) ActiveMsgValidationDone(topic string) { + activeMsgValidation.WithLabelValues(topic).Dec() +} + +func (m *MetricsReporter) IncomingQueueMessage(messageID spectypes.MessageID) { + incomingQueueMessages.WithLabelValues(messageID.String()).Inc() +} + +func (m *MetricsReporter) OutgoingQueueMessage(messageID spectypes.MessageID) { + outgoingQueueMessages.WithLabelValues(messageID.String()).Inc() +} + +func (m *MetricsReporter) DroppedQueueMessage(messageID spectypes.MessageID) { + droppedQueueMessages.WithLabelValues(messageID.String()).Inc() +} + +func (m *MetricsReporter) MessageQueueSize(size int) { + messageQueueSize.WithLabelValues().Set(float64(size)) +} + +func (m *MetricsReporter) MessageQueueCapacity(size int) { + messageQueueCapacity.WithLabelValues().Set(float64(size)) +} + +func (m *MetricsReporter) MessageTimeInQueue(messageID spectypes.MessageID, d time.Duration) { + messageTimeInQueue.WithLabelValues(messageID.String()).Observe(d.Seconds()) +} + +func (m *MetricsReporter) InCommitteeMessage(msgType spectypes.MsgType, decided bool) { + str := "non-decided" + if decided { + str = "decided" + } + inCommitteeMessages.WithLabelValues(ssvmessage.MsgTypeToString(msgType), str).Inc() +} + +func (m *MetricsReporter) NonCommitteeMessage(msgType spectypes.MsgType, decided bool) { + str := "non-decided" + if decided { + str = "decided" + } + nonCommitteeMessages.WithLabelValues(ssvmessage.MsgTypeToString(msgType), str).Inc() +} diff --git a/network/network.go b/network/network.go index 67af7476fb..f40678892c 100644 --- a/network/network.go +++ b/network/network.go @@ -1,19 +1,19 @@ package network import ( + "context" "io" "go.uber.org/zap" - spectypes "github.com/bloxapp/ssv-spec/types" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) // MessageRouter is accepting network messages and route them to the corresponding (internal) components type MessageRouter interface { // Route routes the given message, this function MUST NOT block - Route(logger *zap.Logger, message spectypes.SSVMessage) + Route(ctx context.Context, message *queue.DecodedSSVMessage) } // MessageRouting allows to register a MessageRouter diff --git a/network/p2p/config.go b/network/p2p/config.go index 77f1e599b5..935eaa4c2a 100644 --- a/network/p2p/config.go +++ b/network/p2p/config.go @@ -14,6 +14,8 @@ import ( "github.com/pkg/errors" "go.uber.org/zap" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/monitoring/metricsreporter" "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/networkconfig" @@ -62,6 +64,10 @@ type Config struct { NodeStorage storage.Storage // Network defines a network configuration. Network networkconfig.NetworkConfig + // MessageValidator validates incoming messages. + MessageValidator validation.MessageValidator + // Metrics report metrics. + Metrics *metricsreporter.MetricsReporter PubsubMsgCacheTTL time.Duration `yaml:"PubsubMsgCacheTTL" env:"PUBSUB_MSG_CACHE_TTL" env-description:"How long a message ID will be remembered as seen"` PubsubOutQueueSize int `yaml:"PubsubOutQueueSize" env:"PUBSUB_OUT_Q_SIZE" env-description:"The size that we assign to the outbound pubsub message queue"` diff --git a/network/p2p/p2p.go b/network/p2p/p2p.go index 4f27098061..768d583042 100644 --- a/network/p2p/p2p.go +++ b/network/p2p/p2p.go @@ -7,24 +7,22 @@ import ( "time" "github.com/cornelk/hashmap" - - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/network/commons" - connmgrcore "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/peer" libp2pdiscbackoff "github.com/libp2p/go-libp2p/p2p/discovery/backoff" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" "github.com/bloxapp/ssv/network/peers" "github.com/bloxapp/ssv/network/peers/connections" "github.com/bloxapp/ssv/network/records" "github.com/bloxapp/ssv/network/streams" - "github.com/bloxapp/ssv/network/syncing" "github.com/bloxapp/ssv/network/topics" operatorstorage "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/utils/async" @@ -56,14 +54,15 @@ type p2pNetwork struct { interfaceLogger *zap.Logger // struct logger to log in interface methods that do not accept a logger cfg *Config - host host.Host - streamCtrl streams.StreamController - idx peers.Index - disc discovery.Service - topicsCtrl topics.Controller - msgRouter network.MessageRouter - msgResolver topics.MsgPeersResolver - connHandler connections.ConnHandler + host host.Host + streamCtrl streams.StreamController + idx peers.Index + disc discovery.Service + topicsCtrl topics.Controller + msgRouter network.MessageRouter + msgResolver topics.MsgPeersResolver + msgValidator validation.MessageValidator + connHandler connections.ConnHandler state int32 @@ -72,7 +71,6 @@ type p2pNetwork struct { backoffConnector *libp2pdiscbackoff.BackoffConnector subnets []byte libConnManager connmgrcore.ConnManager - syncer syncing.Syncer nodeStorage operatorstorage.Storage operatorPKCache sync.Map } @@ -90,6 +88,7 @@ func New(logger *zap.Logger, cfg *Config) network.P2PNetwork { interfaceLogger: logger, cfg: cfg, msgRouter: cfg.Router, + msgValidator: cfg.MessageValidator, state: stateClosed, activeValidators: hashmap.New[string, validatorStatus](), nodeStorage: cfg.NodeStorage, @@ -171,11 +170,6 @@ func (n *p2pNetwork) Start(logger *zap.Logger) error { return err } - // Create & start ConcurrentSyncer. - syncer := syncing.NewConcurrent(n.ctx, syncing.New(n), 16, syncing.DefaultTimeouts, nil) - go syncer.Run(logger) - n.syncer = syncer - return nil } diff --git a/network/p2p/p2p_pubsub.go b/network/p2p/p2p_pubsub.go index 708deb79d3..d88be4af21 100644 --- a/network/p2p/p2p_pubsub.go +++ b/network/p2p/p2p_pubsub.go @@ -1,6 +1,7 @@ package p2pv1 import ( + "context" "encoding/hex" "fmt" @@ -11,12 +12,12 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/records" - - "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) type validatorStatus int @@ -133,8 +134,8 @@ func (n *p2pNetwork) subscribe(logger *zap.Logger, pk spectypes.ValidatorPK) err } // handleIncomingMessages reads messages from the given channel and calls the router, note that this function blocks. -func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(topic string, msg *pubsub.Message) error { - return func(topic string, msg *pubsub.Message) error { +func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(ctx context.Context, topic string, msg *pubsub.Message) error { + return func(ctx context.Context, topic string, msg *pubsub.Message) error { if n.msgRouter == nil { logger.Debug("msg router is not configured") return nil @@ -143,26 +144,28 @@ func (n *p2pNetwork) handlePubsubMessages(logger *zap.Logger) func(topic string, return nil } - var ssvMsg *spectypes.SSVMessage + var decodedMsg *queue.DecodedSSVMessage if msg.ValidatorData != nil { - m, ok := msg.ValidatorData.(spectypes.SSVMessage) + m, ok := msg.ValidatorData.(*queue.DecodedSSVMessage) if ok { - ssvMsg = &m + decodedMsg = m } } - if ssvMsg == nil { + if decodedMsg == nil { return errors.New("message was not decoded") } - p2pID := ssvMsg.GetID().String() + p2pID := decodedMsg.GetID().String() // logger.With( // zap.String("pubKey", hex.EncodeToString(ssvMsg.MsgID.GetPubKey())), // zap.String("role", ssvMsg.MsgID.GetRoleType().String()), // ).Debug("handlePubsubMessages") - metricsRouterIncoming.WithLabelValues(p2pID, message.MsgTypeToString(ssvMsg.MsgType)).Inc() - n.msgRouter.Route(logger, *ssvMsg) + metricsRouterIncoming.WithLabelValues(p2pID, message.MsgTypeToString(decodedMsg.MsgType)).Inc() + + n.msgRouter.Route(ctx, decodedMsg) + return nil } } diff --git a/network/p2p/p2p_setup.go b/network/p2p/p2p_setup.go index 8ffe70656b..10a0e7cbc3 100644 --- a/network/p2p/p2p_setup.go +++ b/network/p2p/p2p_setup.go @@ -276,14 +276,12 @@ func (n *p2pNetwork) setupDiscovery(logger *zap.Logger) error { } func (n *p2pNetwork) setupPubsub(logger *zap.Logger) error { - cfg := &topics.PububConfig{ - Host: n.host, - TraceLog: n.cfg.PubSubTrace, - MsgValidatorFactory: func(s string) topics.MsgValidatorFunc { - return topics.NewSSVMsgValidator() - }, - MsgHandler: n.handlePubsubMessages(logger), - ScoreIndex: n.idx, + cfg := &topics.PubSubConfig{ + Host: n.host, + TraceLog: n.cfg.PubSubTrace, + MsgValidator: n.msgValidator, + MsgHandler: n.handlePubsubMessages(logger), + ScoreIndex: n.idx, //Discovery: n.disc, OutboundQueueSize: n.cfg.PubsubOutQueueSize, ValidationQueueSize: n.cfg.PubsubValidationQueueSize, @@ -302,10 +300,12 @@ func (n *p2pNetwork) setupPubsub(logger *zap.Logger) error { go cfg.MsgIDHandler.Start() // run GC every 3 minutes to clear old messages async.RunEvery(n.ctx, time.Minute*3, midHandler.GC) - _, tc, err := topics.NewPubsub(n.ctx, logger, cfg) + + _, tc, err := topics.NewPubSub(n.ctx, logger, cfg) if err != nil { return errors.Wrap(err, "could not setup pubsub") } + n.topicsCtrl = tc logger.Debug("topics controller is ready") return nil diff --git a/network/p2p/p2p_sync.go b/network/p2p/p2p_sync.go index 6b810c7d41..74ac3a4e14 100644 --- a/network/p2p/p2p_sync.go +++ b/network/p2p/p2p_sync.go @@ -1,143 +1,25 @@ package p2pv1 import ( - "context" "encoding/hex" "fmt" "math/rand" "time" - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/network/commons" - - "github.com/multiformats/go-multistream" - - "github.com/bloxapp/ssv-spec/qbft" - specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" libp2pnetwork "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" libp2p_protocol "github.com/libp2p/go-libp2p/core/protocol" + "github.com/multiformats/go-multistream" "github.com/pkg/errors" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" ) -func (n *p2pNetwork) SyncHighestDecided(mid spectypes.MessageID) error { - return n.syncer.SyncHighestDecided(context.Background(), n.interfaceLogger, mid, func(msg spectypes.SSVMessage) { - n.msgRouter.Route(n.interfaceLogger, msg) - }) -} - -func (n *p2pNetwork) SyncDecidedByRange(mid spectypes.MessageID, from, to qbft.Height) { - if !n.cfg.FullNode { - return - } - // TODO: uncomment to fix syncing bug! - // if from < to { - // n.logger.Warn("failed to sync decided by range: from is greater than to", - // zap.String("pubkey", hex.EncodeToString(mid.GetPubKey())), - // zap.String("role", mid.GetRoleType().String()), - // zap.Uint64("from", uint64(from)), - // zap.Uint64("to", uint64(to))) - // return - // } - if to > from { - n.interfaceLogger.Warn("failed to sync decided by range: to is higher than from", - zap.Uint64("from", uint64(from)), - zap.Uint64("to", uint64(to))) - return - } - - // TODO: this is a temporary solution to prevent syncing already decided heights. - // Example: Say we received a decided at height 99, and right after we received a decided at height 100 - // before we could advance the controller's height. This would cause the controller to call SyncDecidedByRange. - // However, height 99 is already synced, so temporarily we reject such requests here. - // Note: This isn't ideal because sometimes you do want to sync gaps of 1. - const minGap = 2 - if to-from < minGap { - return - } - - err := n.syncer.SyncDecidedByRange(context.Background(), n.interfaceLogger, mid, from, to, func(msg spectypes.SSVMessage) { - n.msgRouter.Route(n.interfaceLogger, msg) - }) - if err != nil { - n.interfaceLogger.Error("failed to sync decided by range", zap.Error(err)) - } -} - -// LastDecided fetches last decided from a random set of peers -func (n *p2pNetwork) LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([]p2pprotocol.SyncResult, error) { - const ( - minPeers = 3 - waitTime = time.Second * 24 - ) - if !n.isReady() { - return nil, p2pprotocol.ErrNetworkIsNotReady - } - pid, maxPeers := commons.ProtocolID(p2pprotocol.LastDecidedProtocol) - peers, err := waitSubsetOfPeers(logger, n.getSubsetOfPeers, mid.GetPubKey(), minPeers, maxPeers, waitTime, allPeersFilter) - if err != nil { - return nil, errors.Wrap(err, "could not get subset of peers") - } - return n.makeSyncRequest(logger, peers, mid, pid, &message.SyncMessage{ - Params: &message.SyncParams{ - Identifier: mid, - }, - Protocol: message.LastDecidedType, - }) -} - -// GetHistory sync the given range from a set of peers that supports history for the given identifier -func (n *p2pNetwork) GetHistory(logger *zap.Logger, mid spectypes.MessageID, from, to specqbft.Height, targets ...string) ([]p2pprotocol.SyncResult, specqbft.Height, error) { - if from >= to { - return nil, 0, nil - } - - if !n.isReady() { - return nil, 0, p2pprotocol.ErrNetworkIsNotReady - } - protocolID, peerCount := commons.ProtocolID(p2pprotocol.DecidedHistoryProtocol) - peers := make([]peer.ID, 0) - for _, t := range targets { - p, err := peer.Decode(t) - if err != nil { - continue - } - peers = append(peers, p) - } - // if no peers were provided -> select a random set of peers - if len(peers) == 0 { - random, err := n.getSubsetOfPeers(logger, mid.GetPubKey(), peerCount, n.peersWithProtocolsFilter(protocolID)) - if err != nil { - return nil, 0, errors.Wrap(err, "could not get subset of peers") - } - peers = random - } - maxBatchRes := specqbft.Height(n.cfg.MaxBatchResponse) - - var results []p2pprotocol.SyncResult - var err error - currentEnd := to - if to-from > maxBatchRes { - currentEnd = from + maxBatchRes - } - results, err = n.makeSyncRequest(logger, peers, mid, protocolID, &message.SyncMessage{ - Params: &message.SyncParams{ - Height: []specqbft.Height{from, currentEnd}, - Identifier: mid, - }, - Protocol: message.DecidedHistoryType, - }) - if err != nil { - return results, 0, err - } - return results, currentEnd, nil -} - // RegisterHandlers registers the given handlers func (n *p2pNetwork) RegisterHandlers(logger *zap.Logger, handlers ...*p2pprotocol.SyncHandler) { m := make(map[libp2p_protocol.ID][]p2pprotocol.RequestHandler) @@ -274,6 +156,8 @@ func (n *p2pNetwork) makeSyncRequest(logger *zap.Logger, peers []peer.ID, mid sp } // peersWithProtocolsFilter is used to accept peers that supports the given protocols +// +//nolint:unused func (n *p2pNetwork) peersWithProtocolsFilter(protocols ...libp2p_protocol.ID) func(peer.ID) bool { return func(id peer.ID) bool { supported, err := n.host.Network().Peerstore().SupportsProtocols(id, protocols...) diff --git a/network/p2p/p2p_test.go b/network/p2p/p2p_test.go index 9fc132d0ff..d2152c049e 100644 --- a/network/p2p/p2p_test.go +++ b/network/p2p/p2p_test.go @@ -9,6 +9,10 @@ import ( "time" "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/protocol/v2/message" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" @@ -18,8 +22,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/network" - protcolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" - "github.com/bloxapp/ssv/protocol/v2/types" + p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" ) func TestGetMaxPeers(t *testing.T) { @@ -118,7 +121,7 @@ func TestP2pNetwork_Stream(t *testing.T) { pk, err := hex.DecodeString(pkHex) require.NoError(t, err) - mid := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) + mid := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) rounds := []specqbft.Round{ 1, 1, 1, 1, 2, 2, @@ -140,7 +143,7 @@ func TestP2pNetwork_Stream(t *testing.T) { <-time.After(time.Second) node := ln.Nodes[0] - res, err := node.LastDecided(logger, mid) + res, err := node.(*p2pNetwork).LastDecided(logger, mid) require.NoError(t, err) select { case err := <-errors: @@ -205,9 +208,30 @@ func TestWaitSubsetOfPeers(t *testing.T) { } } +func (n *p2pNetwork) LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([]p2pprotocol.SyncResult, error) { + const ( + minPeers = 3 + waitTime = time.Second * 24 + ) + if !n.isReady() { + return nil, p2pprotocol.ErrNetworkIsNotReady + } + pid, maxPeers := commons.ProtocolID(p2pprotocol.LastDecidedProtocol) + peers, err := waitSubsetOfPeers(logger, n.getSubsetOfPeers, mid.GetPubKey(), minPeers, maxPeers, waitTime, allPeersFilter) + if err != nil { + return nil, errors.Wrap(err, "could not get subset of peers") + } + return n.makeSyncRequest(logger, peers, mid, pid, &message.SyncMessage{ + Params: &message.SyncParams{ + Identifier: mid, + }, + Protocol: message.LastDecidedType, + }) +} + func registerHandler(logger *zap.Logger, node network.P2PNetwork, mid spectypes.MessageID, height specqbft.Height, round specqbft.Round, counter *int64, errors chan<- error) { - node.RegisterHandlers(logger, &protcolp2p.SyncHandler{ - Protocol: protcolp2p.LastDecidedProtocol, + node.RegisterHandlers(logger, &p2pprotocol.SyncHandler{ + Protocol: p2pprotocol.LastDecidedProtocol, Handler: func(message *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { atomic.AddInt64(counter, 1) sm := specqbft.SignedMessage{ @@ -235,21 +259,23 @@ func registerHandler(logger *zap.Logger, node network.P2PNetwork, mid spectypes. }) } -func createNetworkAndSubscribe(t *testing.T, ctx context.Context, n int, pks ...string) (*LocalNet, []*dummyRouter, error) { +func createNetworkAndSubscribe(t *testing.T, ctx context.Context, nodes int, pks ...string) (*LocalNet, []*dummyRouter, error) { logger := logging.TestLogger(t) - ln, err := CreateAndStartLocalNet(ctx, logger.Named("createNetworkAndSubscribe"), n, n/2-1, false) + ln, err := CreateAndStartLocalNet(ctx, logger.Named("createNetworkAndSubscribe"), nodes, nodes/2-1, false) if err != nil { return nil, nil, err } - if len(ln.Nodes) != n { - return nil, nil, errors.Errorf("only %d peers created, expected %d", len(ln.Nodes), n) + if len(ln.Nodes) != nodes { + return nil, nil, errors.Errorf("only %d peers created, expected %d", len(ln.Nodes), nodes) } logger.Debug("created local network") - routers := make([]*dummyRouter, n) + routers := make([]*dummyRouter, nodes) for i, node := range ln.Nodes { - routers[i] = &dummyRouter{i: i} + routers[i] = &dummyRouter{ + i: i, + } node.UseMessageRouter(routers[i]) } @@ -299,9 +325,8 @@ type dummyRouter struct { i int } -func (r *dummyRouter) Route(logger *zap.Logger, message spectypes.SSVMessage) { - c := atomic.AddUint64(&r.count, 1) - logger.Debug("got message", zap.Uint64("count", c)) +func (r *dummyRouter) Route(_ context.Context, _ *queue.DecodedSSVMessage) { + atomic.AddUint64(&r.count, 1) } func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { @@ -309,7 +334,7 @@ func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { if err != nil { return nil, err } - id := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) signedMsg := &specqbft.SignedMessage{ Message: specqbft.Message{ MsgType: specqbft.CommitMsgType, diff --git a/network/p2p/test_utils.go b/network/p2p/test_utils.go index bcfa9ad311..70e862aaa7 100644 --- a/network/p2p/test_utils.go +++ b/network/p2p/test_utils.go @@ -12,12 +12,14 @@ import ( "go.uber.org/zap" "golang.org/x/sync/errgroup" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" "github.com/bloxapp/ssv/network/peers" "github.com/bloxapp/ssv/network/peers/connections/mock" "github.com/bloxapp/ssv/network/testing" + "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/utils/format" "github.com/bloxapp/ssv/utils/rsaencryption" ) @@ -136,6 +138,7 @@ func (ln *LocalNet) NewTestP2pNetwork(ctx context.Context, keys testing.NodeKeys MockGetPrivateKey: keys.OperatorKey, RegisteredOperatorPublicKeyPEMs: []string{}, } + cfg.MessageValidator = validation.NewMessageValidator(networkconfig.TestNetwork) p := New(logger, cfg) err = p.Setup(logger) diff --git a/network/syncing/concurrent.go b/network/syncing/concurrent.go deleted file mode 100644 index d3ddcd2ec1..0000000000 --- a/network/syncing/concurrent.go +++ /dev/null @@ -1,189 +0,0 @@ -package syncing - -import ( - "context" - "fmt" - "sync" - "time" - - "go.uber.org/zap" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" -) - -// Error describes an error that occurred during a syncing operation. -type Error struct { - Operation Operation - Err error -} - -func (e Error) Error() string { - return fmt.Sprintf("%s: %v", e.Operation, e.Err) -} - -// Timeouts is a set of timeouts for each syncing operation. -type Timeouts struct { - // SyncHighestDecided is the timeout for SyncHighestDecided. - // Leave zero to not timeout. - SyncHighestDecided time.Duration - - // SyncDecidedByRange is the timeout for SyncDecidedByRange. - // Leave zero to not timeout. - SyncDecidedByRange time.Duration -} - -var DefaultTimeouts = Timeouts{ - SyncHighestDecided: 12 * time.Second, - SyncDecidedByRange: 30 * time.Minute, -} - -// Operation is a syncing operation that has been queued for execution. -type Operation interface { - run(context.Context, *zap.Logger, Syncer) error - timeout(Timeouts) time.Duration -} - -type OperationSyncHighestDecided struct { - ID spectypes.MessageID - Handler MessageHandler -} - -func (o OperationSyncHighestDecided) run(ctx context.Context, logger *zap.Logger, s Syncer) error { - return s.SyncHighestDecided(ctx, logger, o.ID, o.Handler) -} - -func (o OperationSyncHighestDecided) timeout(t Timeouts) time.Duration { - return t.SyncHighestDecided -} - -func (o OperationSyncHighestDecided) String() string { - return fmt.Sprintf("SyncHighestDecided(%s)", o.ID) -} - -type OperationSyncDecidedByRange struct { - ID spectypes.MessageID - From specqbft.Height - To specqbft.Height - Handler MessageHandler -} - -func (o OperationSyncDecidedByRange) run(ctx context.Context, logger *zap.Logger, s Syncer) error { - return s.SyncDecidedByRange(ctx, logger, o.ID, o.From, o.To, o.Handler) -} - -func (o OperationSyncDecidedByRange) timeout(t Timeouts) time.Duration { - return t.SyncDecidedByRange -} - -func (o OperationSyncDecidedByRange) String() string { - return fmt.Sprintf("SyncDecidedByRange(%s, %d, %d)", o.ID, o.From, o.To) -} - -// ConcurrentSyncer is a Syncer that runs the given Syncer's methods concurrently. -type ConcurrentSyncer struct { - syncer Syncer - ctx context.Context - jobs chan Operation - errors chan<- Error - concurrency int - timeouts Timeouts -} - -// NewConcurrent returns a new Syncer that runs the given Syncer's methods concurrently. -// Unlike the standard syncer, syncing methods are non-blocking and return immediately without error. -// concurrency is the number of worker goroutines to spawn. -// errors is a channel to which any errors are sent. Pass nil to discard errors. -func NewConcurrent( - ctx context.Context, - syncer Syncer, - concurrency int, - timeouts Timeouts, - errors chan<- Error, -) *ConcurrentSyncer { - return &ConcurrentSyncer{ - syncer: syncer, - ctx: ctx, - // TODO: make the buffer size configurable or better-yet unbounded? - jobs: make(chan Operation, 128*1024), - errors: errors, - concurrency: concurrency, - timeouts: timeouts, - } -} - -// Run starts the worker goroutines and blocks until the context is done -// and any remaining jobs are finished. -func (s *ConcurrentSyncer) Run(logger *zap.Logger) { - // Spawn worker goroutines. - var wg sync.WaitGroup - for i := 0; i < s.concurrency; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for job := range s.jobs { - s.do(logger, job) - } - }() - } - - // Close the jobs channel when the context is done. - <-s.ctx.Done() - close(s.jobs) - - // Wait for workers to finish their current jobs. - wg.Wait() -} - -func (s *ConcurrentSyncer) do(logger *zap.Logger, job Operation) { - ctx, cancel := context.WithTimeout(s.ctx, job.timeout(s.timeouts)) - defer cancel() - err := job.run(ctx, logger, s.syncer) - if err != nil && s.errors != nil { - s.errors <- Error{ - Operation: job, - Err: err, - } - } -} - -// Queued returns the number of jobs that are queued but not yet started. -func (s *ConcurrentSyncer) Queued() int { - return len(s.jobs) -} - -// Capacity returns the maximum number of jobs that can be queued. -// When Queued() == Capacity(), then the next call will block -// until a job is finished. -func (s *ConcurrentSyncer) Capacity() int { - return cap(s.jobs) -} - -func (s *ConcurrentSyncer) SyncHighestDecided( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - handler MessageHandler, -) error { - s.jobs <- OperationSyncHighestDecided{ - ID: id, - Handler: handler, - } - return nil -} - -func (s *ConcurrentSyncer) SyncDecidedByRange( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - handler MessageHandler, -) error { - s.jobs <- OperationSyncDecidedByRange{ - ID: id, - From: from, - To: to, - Handler: handler, - } - return nil -} diff --git a/network/syncing/concurrent_test.go b/network/syncing/concurrent_test.go deleted file mode 100644 index ace426f6a2..0000000000 --- a/network/syncing/concurrent_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package syncing_test - -import ( - "context" - "fmt" - "runtime" - "testing" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/network/syncing" - "github.com/bloxapp/ssv/network/syncing/mocks" -) - -func TestConcurrentSyncer(t *testing.T) { - logger := logging.TestLogger(t) - - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - // Test setup - syncer := mocks.NewMockSyncer(ctrl) - errors := make(chan syncing.Error) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - concurrency := 2 - s := syncing.NewConcurrent(ctx, syncer, concurrency, syncing.DefaultTimeouts, errors) - - // Run the syncer - done := make(chan struct{}) - go func() { - s.Run(logger) - close(done) - }() - - // Test SyncHighestDecided - id := spectypes.MessageID{} - handler := newMockMessageHandler() - syncer.EXPECT().SyncHighestDecided(gomock.Any(), gomock.Any(), id, gomock.Any()).Return(nil) - require.NoError(t, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Test SyncDecidedByRange - from := specqbft.Height(1) - to := specqbft.Height(10) - syncer.EXPECT().SyncDecidedByRange(gomock.Any(), gomock.Any(), id, from, to, gomock.Any()).Return(nil) - require.NoError(t, s.SyncDecidedByRange(ctx, logger, id, from, to, handler.handler)) - - // Test error handling - syncer.EXPECT().SyncHighestDecided(gomock.Any(), gomock.Any(), id, gomock.Any()).Return(fmt.Errorf("test error")) - require.NoError(t, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Wait for the syncer to finish - cancel() - - // Verify errors. - select { - case err := <-errors: - require.IsType(t, syncing.OperationSyncHighestDecided{}, err.Operation) - require.Equal(t, id, err.Operation.(syncing.OperationSyncHighestDecided).ID) - require.Equal(t, "test error", err.Err.Error()) - case <-done: - t.Fatal("error channel should have received an error") - } - <-done -} - -func TestConcurrentSyncerMemoryUsage(t *testing.T) { - logger := logging.TestLogger(t) - - for i := 0; i < 4; i++ { - var before runtime.MemStats - runtime.ReadMemStats(&before) - - // Test setup - syncer := &mockSyncer{} - errors := make(chan syncing.Error) - ctx, cancel := context.WithCancel(context.Background()) - concurrency := 2 - s := syncing.NewConcurrent(ctx, syncer, concurrency, syncing.DefaultTimeouts, errors) - - // Run the syncer - done := make(chan struct{}) - go func() { - s.Run(logger) - close(done) - }() - - for i := 0; i < 1024*128; i++ { - // Test SyncHighestDecided - id := spectypes.MessageID{} - handler := newMockMessageHandler() - require.NoError(t, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Test SyncDecidedByRange - from := specqbft.Height(1) - to := specqbft.Height(10) - require.NoError(t, s.SyncDecidedByRange(ctx, logger, id, from, to, handler.handler)) - } - - // Wait for the syncer to finish - cancel() - <-done - - var after runtime.MemStats - runtime.ReadMemStats(&after) - t.Logf("Allocated: %.2f MB", float64(after.TotalAlloc-before.TotalAlloc)/1024/1024) - } -} - -func BenchmarkConcurrentSyncer(b *testing.B) { - logger := logging.BenchLogger(b) - - for i := 0; i < b.N; i++ { - // Test setup - syncer := &mockSyncer{} - errors := make(chan syncing.Error) - ctx, cancel := context.WithCancel(context.Background()) - concurrency := 2 - s := syncing.NewConcurrent(ctx, syncer, concurrency, syncing.DefaultTimeouts, errors) - - // Run the syncer - done := make(chan struct{}) - go func() { - s.Run(logger) - close(done) - }() - - for i := 0; i < 1024*128; i++ { - // Test SyncHighestDecided - id := spectypes.MessageID{} - handler := newMockMessageHandler() - require.NoError(b, s.SyncHighestDecided(ctx, logger, id, handler.handler)) - - // Test SyncDecidedByRange - from := specqbft.Height(1) - to := specqbft.Height(10) - require.NoError(b, s.SyncDecidedByRange(ctx, logger, id, from, to, handler.handler)) - } - - // Wait for the syncer to finish - cancel() - <-done - } -} diff --git a/network/syncing/mocks/syncer.go b/network/syncing/mocks/syncer.go deleted file mode 100644 index 1aa3a3d55d..0000000000 --- a/network/syncing/mocks/syncer.go +++ /dev/null @@ -1,127 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: ./syncer.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - reflect "reflect" - - qbft "github.com/bloxapp/ssv-spec/qbft" - types "github.com/bloxapp/ssv-spec/types" - syncing "github.com/bloxapp/ssv/network/syncing" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" - gomock "github.com/golang/mock/gomock" - zap "go.uber.org/zap" -) - -// MockSyncer is a mock of Syncer interface. -type MockSyncer struct { - ctrl *gomock.Controller - recorder *MockSyncerMockRecorder -} - -// MockSyncerMockRecorder is the mock recorder for MockSyncer. -type MockSyncerMockRecorder struct { - mock *MockSyncer -} - -// NewMockSyncer creates a new mock instance. -func NewMockSyncer(ctrl *gomock.Controller) *MockSyncer { - mock := &MockSyncer{ctrl: ctrl} - mock.recorder = &MockSyncerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSyncer) EXPECT() *MockSyncerMockRecorder { - return m.recorder -} - -// SyncDecidedByRange mocks base method. -func (m *MockSyncer) SyncDecidedByRange(ctx context.Context, logger *zap.Logger, id types.MessageID, from, to qbft.Height, handler syncing.MessageHandler) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyncDecidedByRange", ctx, logger, id, from, to, handler) - ret0, _ := ret[0].(error) - return ret0 -} - -// SyncDecidedByRange indicates an expected call of SyncDecidedByRange. -func (mr *MockSyncerMockRecorder) SyncDecidedByRange(ctx, logger, id, from, to, handler interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncDecidedByRange", reflect.TypeOf((*MockSyncer)(nil).SyncDecidedByRange), ctx, logger, id, from, to, handler) -} - -// SyncHighestDecided mocks base method. -func (m *MockSyncer) SyncHighestDecided(ctx context.Context, logger *zap.Logger, id types.MessageID, handler syncing.MessageHandler) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyncHighestDecided", ctx, logger, id, handler) - ret0, _ := ret[0].(error) - return ret0 -} - -// SyncHighestDecided indicates an expected call of SyncHighestDecided. -func (mr *MockSyncerMockRecorder) SyncHighestDecided(ctx, logger, id, handler interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncHighestDecided", reflect.TypeOf((*MockSyncer)(nil).SyncHighestDecided), ctx, logger, id, handler) -} - -// MockNetwork is a mock of Network interface. -type MockNetwork struct { - ctrl *gomock.Controller - recorder *MockNetworkMockRecorder -} - -// MockNetworkMockRecorder is the mock recorder for MockNetwork. -type MockNetworkMockRecorder struct { - mock *MockNetwork -} - -// NewMockNetwork creates a new mock instance. -func NewMockNetwork(ctrl *gomock.Controller) *MockNetwork { - mock := &MockNetwork{ctrl: ctrl} - mock.recorder = &MockNetworkMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockNetwork) EXPECT() *MockNetworkMockRecorder { - return m.recorder -} - -// GetHistory mocks base method. -func (m *MockNetwork) GetHistory(logger *zap.Logger, id types.MessageID, from, to qbft.Height, targets ...string) ([]protocolp2p.SyncResult, qbft.Height, error) { - m.ctrl.T.Helper() - varargs := []interface{}{logger, id, from, to} - for _, a := range targets { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetHistory", varargs...) - ret0, _ := ret[0].([]protocolp2p.SyncResult) - ret1, _ := ret[1].(qbft.Height) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetHistory indicates an expected call of GetHistory. -func (mr *MockNetworkMockRecorder) GetHistory(logger, id, from, to interface{}, targets ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{logger, id, from, to}, targets...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistory", reflect.TypeOf((*MockNetwork)(nil).GetHistory), varargs...) -} - -// LastDecided mocks base method. -func (m *MockNetwork) LastDecided(logger *zap.Logger, id types.MessageID) ([]protocolp2p.SyncResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LastDecided", logger, id) - ret0, _ := ret[0].([]protocolp2p.SyncResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// LastDecided indicates an expected call of LastDecided. -func (mr *MockNetworkMockRecorder) LastDecided(logger, id interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastDecided", reflect.TypeOf((*MockNetwork)(nil).LastDecided), logger, id) -} diff --git a/network/syncing/syncer.go b/network/syncing/syncer.go deleted file mode 100644 index db36a94028..0000000000 --- a/network/syncing/syncer.go +++ /dev/null @@ -1,207 +0,0 @@ -package syncing - -import ( - "context" - "time" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/logging/fields" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" - "github.com/bloxapp/ssv/utils/tasks" -) - -//go:generate mockgen -package=mocks -destination=./mocks/syncer.go -source=./syncer.go - -// MessageHandler reacts to a message received from Syncer. -type MessageHandler func(msg spectypes.SSVMessage) - -// Syncer handles the syncing of decided messages. -type Syncer interface { - SyncHighestDecided(ctx context.Context, logger *zap.Logger, id spectypes.MessageID, handler MessageHandler) error - SyncDecidedByRange( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - handler MessageHandler, - ) error -} - -// Network is a subset of protocolp2p.Syncer, required by Syncer to retrieve messages from peers. -type Network interface { - LastDecided(logger *zap.Logger, id spectypes.MessageID) ([]protocolp2p.SyncResult, error) - GetHistory( - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - targets ...string, - ) ([]protocolp2p.SyncResult, specqbft.Height, error) -} - -type syncer struct { - network Network -} - -// New returns a standard implementation of Syncer. -func New(network Network) Syncer { - return &syncer{ - network: network, - } -} - -func (s *syncer) SyncHighestDecided( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - handler MessageHandler, -) error { - if ctx.Err() != nil { - return ctx.Err() - } - - logger = logger.With( - zap.String("what", "SyncHighestDecided"), - fields.PubKey(id.GetPubKey()), - fields.Role(id.GetRoleType())) - - lastDecided, err := s.network.LastDecided(logger, id) - if err != nil { - logger.Debug("last decided sync failed", zap.Error(err)) - return errors.Wrap(err, "could not sync last decided") - } - if len(lastDecided) == 0 { - logger.Debug("no messages were synced") - return nil - } - - results := protocolp2p.SyncResults(lastDecided) - var maxHeight specqbft.Height - results.ForEachSignedMessage(func(m *specqbft.SignedMessage) (stop bool) { - if ctx.Err() != nil { - return true - } - if m.Message.Height > maxHeight { - maxHeight = m.Message.Height - } - raw, err := m.Encode() - if err != nil { - logger.Debug("could not encode signed message", zap.Error(err)) - return false - } - handler(spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: id, - Data: raw, - }) - return false - }) - logger.Debug("synced last decided", zap.Uint64("highest_height", uint64(maxHeight)), zap.Int("messages", len(lastDecided))) - return nil -} - -func (s *syncer) SyncDecidedByRange( - ctx context.Context, - logger *zap.Logger, - id spectypes.MessageID, - from, to specqbft.Height, - handler MessageHandler, -) error { - if ctx.Err() != nil { - return ctx.Err() - } - - logger = logger.With( - zap.String("what", "SyncDecidedByRange"), - fields.PubKey(id.GetPubKey()), - fields.Role(id.GetRoleType()), - zap.Uint64("from", uint64(from)), - zap.Uint64("to", uint64(to))) - logger.Debug("syncing decided by range") - - err := s.getDecidedByRange( - context.Background(), - logger, - id, - from, - to, - func(sm *specqbft.SignedMessage) error { - raw, err := sm.Encode() - if err != nil { - logger.Debug("could not encode signed message", zap.Error(err)) - return nil - } - handler(spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: id, - Data: raw, - }) - return nil - }, - ) - if err != nil { - logger.Debug("sync failed", zap.Error(err)) - } - return err -} - -// getDecidedByRange calls GetHistory in batches to retrieve all decided messages in the given range. -func (s *syncer) getDecidedByRange( - ctx context.Context, - logger *zap.Logger, - mid spectypes.MessageID, - from, to specqbft.Height, - handler func(*specqbft.SignedMessage) error, -) error { - const maxRetries = 2 - - var ( - visited = make(map[specqbft.Height]struct{}) - msgs []protocolp2p.SyncResult - ) - - tail := from - var err error - for tail < to { - if ctx.Err() != nil { - return ctx.Err() - } - err := tasks.RetryWithContext(ctx, func() error { - start := time.Now() - msgs, tail, err = s.network.GetHistory(logger, mid, tail, to) - if err != nil { - return err - } - handled := 0 - protocolp2p.SyncResults(msgs).ForEachSignedMessage(func(m *specqbft.SignedMessage) (stop bool) { - if ctx.Err() != nil { - return true - } - if _, ok := visited[m.Message.Height]; ok { - return false - } - if err := handler(m); err != nil { - logger.Warn("could not handle signed message") - } - handled++ - visited[m.Message.Height] = struct{}{} - return false - }) - logger.Debug("received and processed history batch", - zap.Int64("tail", int64(tail)), - fields.Duration(start), - zap.Int("results_count", len(msgs)), - fields.SyncResults(msgs), - zap.Int("handled", handled)) - return nil - }, maxRetries) - if err != nil { - return err - } - } - - return nil -} diff --git a/network/syncing/syncer_test.go b/network/syncing/syncer_test.go deleted file mode 100644 index e0f99c3fb4..0000000000 --- a/network/syncing/syncer_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package syncing_test - -import ( - "context" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/network/syncing" -) - -type mockSyncer struct{} - -func (m *mockSyncer) SyncHighestDecided(ctx context.Context, logger *zap.Logger, id spectypes.MessageID, handler syncing.MessageHandler) error { - return nil -} - -func (m *mockSyncer) SyncDecidedByRange(ctx context.Context, logger *zap.Logger, id spectypes.MessageID, from specqbft.Height, to specqbft.Height, handler syncing.MessageHandler) error { - return nil -} - -type mockMessageHandler struct { - calls int - handler syncing.MessageHandler -} - -func newMockMessageHandler() *mockMessageHandler { - m := &mockMessageHandler{} - m.handler = func(msg spectypes.SSVMessage) { - m.calls++ - } - return m -} diff --git a/network/topics/controller.go b/network/topics/controller.go index 3ac1dea7e6..bbc9e3f821 100644 --- a/network/topics/controller.go +++ b/network/topics/controller.go @@ -37,7 +37,11 @@ type Controller interface { } // PubsubMessageHandler handles incoming messages -type PubsubMessageHandler func(string, *pubsub.Message) error +type PubsubMessageHandler func(context.Context, string, *pubsub.Message) error + +type messageValidator interface { + ValidatorForTopic(topic string) func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult +} // topicsCtrl implements Controller type topicsCtrl struct { @@ -45,25 +49,31 @@ type topicsCtrl struct { logger *zap.Logger // struct logger to implement i.Closer ps *pubsub.PubSub // scoreParamsFactory is a function that helps to set scoring params on topics - scoreParamsFactory func(string) *pubsub.TopicScoreParams - msgValidatorFactory func(string) MsgValidatorFunc - msgHandler PubsubMessageHandler - subFilter SubFilter + scoreParamsFactory func(string) *pubsub.TopicScoreParams + msgValidator messageValidator + msgHandler PubsubMessageHandler + subFilter SubFilter container *topicsContainer } // NewTopicsController creates an instance of Controller -func NewTopicsController(ctx context.Context, logger *zap.Logger, msgHandler PubsubMessageHandler, - msgValidatorFactory func(string) MsgValidatorFunc, subFilter SubFilter, pubSub *pubsub.PubSub, - scoreParams func(string) *pubsub.TopicScoreParams) Controller { +func NewTopicsController( + ctx context.Context, + logger *zap.Logger, + msgHandler PubsubMessageHandler, + msgValidator messageValidator, + subFilter SubFilter, + pubSub *pubsub.PubSub, + scoreParams func(string) *pubsub.TopicScoreParams, +) Controller { ctrl := &topicsCtrl{ - ctx: ctx, - logger: logger, - ps: pubSub, - scoreParamsFactory: scoreParams, - msgValidatorFactory: msgValidatorFactory, - msgHandler: msgHandler, + ctx: ctx, + logger: logger, + ps: pubSub, + scoreParamsFactory: scoreParams, + msgValidator: msgValidator, + msgHandler: msgHandler, subFilter: subFilter, } @@ -171,7 +181,7 @@ func (ctrl *topicsCtrl) Broadcast(name string, data []byte, timeout time.Duratio func (ctrl *topicsCtrl) Unsubscribe(logger *zap.Logger, name string, hard bool) error { ctrl.container.Unsubscribe(name) - if ctrl.msgValidatorFactory != nil { + if ctrl.msgValidator != nil { err := ctrl.ps.UnregisterTopicValidator(name) if err != nil { logger.Debug("could not unregister msg validator", zap.String("topic", name), zap.Error(err)) @@ -207,7 +217,9 @@ func (ctrl *topicsCtrl) start(logger *zap.Logger, name string, sub *pubsub.Subsc func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) error { ctx, cancel := context.WithCancel(ctrl.ctx) defer cancel() + topicName := sub.Topic() + logger = logger.With(zap.String("topic", topicName)) logger.Debug("start listening to topic") for ctx.Err() == nil { @@ -235,7 +247,7 @@ func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) err ).Inc() } - if err := ctrl.msgHandler(topicName, msg); err != nil { + if err := ctrl.msgHandler(ctx, topicName, msg); err != nil { logger.Debug("could not handle msg", zap.Error(err)) } } @@ -244,7 +256,7 @@ func (ctrl *topicsCtrl) listen(logger *zap.Logger, sub *pubsub.Subscription) err // setupTopicValidator registers the topic validator func (ctrl *topicsCtrl) setupTopicValidator(name string) error { - if ctrl.msgValidatorFactory != nil { + if ctrl.msgValidator != nil { // first try to unregister in case there is already a msg validator for that topic (e.g. fork scenario) _ = ctrl.ps.UnregisterTopicValidator(name) @@ -252,7 +264,7 @@ func (ctrl *topicsCtrl) setupTopicValidator(name string) error { // Optional: set a timeout for message validation // opts = append(opts, pubsub.WithValidatorTimeout(time.Second)) - err := ctrl.ps.RegisterTopicValidator(name, ctrl.msgValidatorFactory(name), opts...) + err := ctrl.ps.RegisterTopicValidator(name, ctrl.msgValidator.ValidatorForTopic(name), opts...) if err != nil { return errors.Wrap(err, "could not register topic validator") } diff --git a/network/topics/controller_test.go b/network/topics/controller_test.go index bc1e028cc4..4a09584cfb 100644 --- a/network/topics/controller_test.go +++ b/network/topics/controller_test.go @@ -2,61 +2,94 @@ package topics import ( "context" + "encoding/base64" "encoding/hex" - "fmt" + "encoding/json" + "math" "sync" "sync/atomic" "testing" "time" + specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/network/commons" - - "github.com/bloxapp/ssv/protocol/v2/types" - "github.com/libp2p/go-libp2p" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/host" libp2pnetwork "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/network/commons" "github.com/bloxapp/ssv/network/discovery" + "github.com/bloxapp/ssv/networkconfig" ) func TestTopicManager(t *testing.T) { logger := logging.TestLogger(t) - nPeers := 4 - - pks := []string{"b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", - "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170", - "9340b7b80983a412bbb42cad6f992e06983d53deb41166ed5978dcbfa3761f347b237ad446d7cb4a4d0a5cca78c2ce8a", - "a5abb232568fc869765da01688387738153f3ad6cc4e635ab282c5d5cfce2bba2351f03367103090804c5243dc8e229b", - "a1169bd8407279d9e56b8cefafa37449afd6751f94d1da6bc8145b96d7ad2940184d506971291cd55ae152f9fc65b146", - "80ff2cfb8fd80ceafbb3c331f271a9f9ce0ed3e360087e314d0a8775e86fa7cd19c999b821372ab6419cde376e032ff6", - "a01909aac48337bab37c0dba395fb7495b600a53c58059a251d00b4160b9da74c62f9c4e9671125c59932e7bb864fd3d", - "a4fc8c859ed5c10d7a1ff9fb111b76df3f2e0a6cbe7d0c58d3c98973c0ff160978bc9754a964b24929fff486ebccb629"} - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - peers := newPeers(ctx, logger, t, nPeers, false, true) - baseTest(t, ctx, logger, peers, pks, 1, 2) + + t.Run("happy flow", func(t *testing.T) { + nPeers := 4 + + pks := []string{"b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", + "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170", + "9340b7b80983a412bbb42cad6f992e06983d53deb41166ed5978dcbfa3761f347b237ad446d7cb4a4d0a5cca78c2ce8a", + "a5abb232568fc869765da01688387738153f3ad6cc4e635ab282c5d5cfce2bba2351f03367103090804c5243dc8e229b", + "a1169bd8407279d9e56b8cefafa37449afd6751f94d1da6bc8145b96d7ad2940184d506971291cd55ae152f9fc65b146", + "80ff2cfb8fd80ceafbb3c331f271a9f9ce0ed3e360087e314d0a8775e86fa7cd19c999b821372ab6419cde376e032ff6", + "a01909aac48337bab37c0dba395fb7495b600a53c58059a251d00b4160b9da74c62f9c4e9671125c59932e7bb864fd3d", + "a4fc8c859ed5c10d7a1ff9fb111b76df3f2e0a6cbe7d0c58d3c98973c0ff160978bc9754a964b24929fff486ebccb629"} + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + validator := validation.NewMessageValidator(networkconfig.TestNetwork) + + peers := newPeers(ctx, logger, t, nPeers, validator, true, nil) + baseTest(t, ctx, logger, peers, pks, 1, 2) + }) + + t.Run("banning peer", func(t *testing.T) { + t.Skip() // TODO: finish the test + + pks := []string{ + "b768cdc2b2e0a859052bf04d1cd66383c96d95096a5287d08151494ce709556ba39c1300fbb902a0e2ebb7c31dc4e400", + "824b9024767a01b56790a72afb5f18bb0f97d5bddb946a7bd8dd35cc607c35a4d76be21f24f484d0d478b99dc63ed170", + "9340b7b80983a412bbb42cad6f992e06983d53deb41166ed5978dcbfa3761f347b237ad446d7cb4a4d0a5cca78c2ce8a", + "a5abb232568fc869765da01688387738153f3ad6cc4e635ab282c5d5cfce2bba2351f03367103090804c5243dc8e229b", + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + validator := validation.NewMessageValidator(networkconfig.TestNetwork) + + scoreMap := map[peer.ID]*pubsub.PeerScoreSnapshot{} + var scoreMapMu sync.Mutex + + scoreInspector := func(m map[peer.ID]*pubsub.PeerScoreSnapshot) { + b, _ := json.Marshal(m) + t.Logf("peer scores: %v", string(b)) + + scoreMapMu.Lock() + defer scoreMapMu.Unlock() + + scoreMap = m + } + + const nPeers = 4 + peers := newPeers(ctx, logger, t, nPeers, validator, true, scoreInspector) + banningTest(t, ctx, logger, peers, pks, scoreMap, &scoreMapMu) + }) } func baseTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, pks []string, minMsgCount, maxMsgCount int) { nValidators := len(pks) // nPeers := len(peers) - validatorTopic := func(pkhex string) string { - pk, err := hex.DecodeString(pkhex) - if err != nil { - return "invalid" - } - return commons.ValidatorTopicID(pk)[0] - } - t.Log("subscribing to topics") // listen to topics for _, pk := range pks { @@ -85,7 +118,7 @@ func baseTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, wg.Add(1) go func(p *P, pk string, pi int) { defer wg.Done() - msg, err := dummyMsg(pk, pi%4) + msg, err := dummyMsg(pk, pi%4, false) require.NoError(t, err) raw, err := msg.Encode() require.NoError(t, err) @@ -146,6 +179,109 @@ func baseTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, wg.Wait() } +func banningTest(t *testing.T, ctx context.Context, logger *zap.Logger, peers []*P, pks []string, scoreMap map[peer.ID]*pubsub.PeerScoreSnapshot, scoreMapMu *sync.Mutex) { + t.Log("subscribing to topics") + + for _, pk := range pks { + for _, p := range peers { + require.NoError(t, p.tm.Subscribe(logger, validatorTopic(pk))) + } + } + + // wait for the peers to join topics + <-time.After(3 * time.Second) + + t.Log("checking initial scores") + for _, pk := range pks { + for _, p := range peers { + peerList, err := p.tm.Peers(pk) + require.NoError(t, err) + + for _, pid := range peerList { + scoreMapMu.Lock() + v, ok := scoreMap[pid] + scoreMapMu.Unlock() + + require.True(t, ok) + require.Equal(t, 0, v.Score) + } + } + } + + t.Log("broadcasting messages") + + const invalidMessagesCount = 10 + + // TODO: get current default score, send an invalid rejected message, check the score; then run 10 of them and check the score; then check valid message + + invalidMessages, err := msgSequence(pks[0], invalidMessagesCount, len(pks), true) + require.NoError(t, err) + + var wg sync.WaitGroup + // publish some messages + for i, msg := range invalidMessages { + wg.Add(1) + go func(p *P, pk string, msg *spectypes.SSVMessage) { + defer wg.Done() + + raw, err := msg.Encode() + require.NoError(t, err) + + require.NoError(t, p.tm.Broadcast(validatorTopic(pk), raw, time.Second*10)) + + <-time.After(time.Second * 5) + }(peers[0], pks[i%len(pks)], msg) + } + wg.Wait() + + <-time.After(5 * time.Second) + + t.Log("checking final scores") + for _, pk := range pks { + for _, p := range peers { + peerList, err := p.tm.Peers(pk) + require.NoError(t, err) + + for _, pid := range peerList { + scoreMapMu.Lock() + v, ok := scoreMap[pid] + scoreMapMu.Unlock() + + require.True(t, ok) + require.Equal(t, 0, v.Score) // TODO: score should change + } + } + } + + //t.Log("unsubscribing") + //// unsubscribing multiple times for each topic + //wg.Add(1) + //go func(p *P, pk string) { + // defer wg.Done() + // require.NoError(t, p.tm.Unsubscribe(logger, validatorTopic(pk), false)) + // go func(p *P) { + // <-time.After(time.Millisecond) + // require.NoError(t, p.tm.Unsubscribe(logger, validatorTopic(pk), false)) + // }(p) + // wg.Add(1) + // go func(p *P) { + // defer wg.Done() + // <-time.After(time.Millisecond * 50) + // require.NoError(t, p.tm.Unsubscribe(logger, validatorTopic(pk), false)) + // }(p) + //}(peer, pk) + // + //wg.Wait() +} + +func validatorTopic(pkhex string) string { + pk, err := hex.DecodeString(pkhex) + if err != nil { + return "invalid" + } + return commons.ValidatorTopicID(pk)[0] +} + type P struct { host host.Host ps *pubsub.PubSub @@ -181,10 +317,10 @@ func (p *P) saveMsg(t string, msg *pubsub.Message) { } // TODO: use p2p/testing -func newPeers(ctx context.Context, logger *zap.Logger, t *testing.T, n int, msgValidator, msgID bool) []*P { +func newPeers(ctx context.Context, logger *zap.Logger, t *testing.T, n int, msgValidator validation.MessageValidator, msgID bool, scoreInspector pubsub.ExtendedPeerScoreInspectFn) []*P { peers := make([]*P, n) for i := 0; i < n; i++ { - peers[i] = newPeer(ctx, logger, t, msgValidator, msgID) + peers[i] = newPeer(ctx, logger, t, msgValidator, msgID, scoreInspector) } t.Logf("%d peers were created", n) th := uint64(n/2) + uint64(n/4) @@ -203,7 +339,7 @@ func newPeers(ctx context.Context, logger *zap.Logger, t *testing.T, n int, msgV return peers } -func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator, msgID bool) *P { +func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator validation.MessageValidator, msgID bool, scoreInspector pubsub.ExtendedPeerScoreInspectFn) *P { h, err := libp2p.New(libp2p.ListenAddrStrings("/ip4/0.0.0.0/tcp/0")) require.NoError(t, err) ds, err := discovery.NewLocalDiscovery(ctx, logger, h) @@ -215,11 +351,11 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator midHandler = NewMsgIDHandler(ctx, 2*time.Minute) go midHandler.Start() } - cfg := &PububConfig{ + cfg := &PubSubConfig{ Host: h, TraceLog: false, MsgIDHandler: midHandler, - MsgHandler: func(topic string, msg *pubsub.Message) error { + MsgHandler: func(_ context.Context, topic string, msg *pubsub.Message) error { p.saveMsg(topic, msg) return nil }, @@ -228,15 +364,13 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator IPColocationWeight: 0, OneEpochDuration: time.Minute, }, + MsgValidator: msgValidator, + ScoreInspector: scoreInspector, + ScoreInspectorInterval: 100 * time.Millisecond, // TODO: add mock for peers.ScoreIndex } - // - if msgValidator { - cfg.MsgValidatorFactory = func(s string) MsgValidatorFunc { - return NewSSVMsgValidator() - } - } - ps, tm, err := NewPubsub(ctx, logger, cfg) + + ps, tm, err := NewPubSub(ctx, logger, cfg) require.NoError(t, err) p = &P{ @@ -258,28 +392,63 @@ func newPeer(ctx context.Context, logger *zap.Logger, t *testing.T, msgValidator return p } -func dummyMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { +func msgSequence(pkHex string, n, committeeSize int, malformed bool) ([]*spectypes.SSVMessage, error) { + var messages []*spectypes.SSVMessage + + for i := 0; i < n; i++ { + height := i * committeeSize + msg, err := dummyMsg(pkHex, height, malformed) + if err != nil { + return nil, err + } + + messages = append(messages, msg) + } + + return messages, nil +} + +func dummyMsg(pkHex string, height int, malformed bool) (*spectypes.SSVMessage, error) { pk, err := hex.DecodeString(pkHex) if err != nil { return nil, err } - id := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) - msgData := fmt.Sprintf(`{ - "message": { - "type": 3, - "round": 2, - "identifier": "%s", - "height": %d, - "value": "bk0iAAAAAAACAAAAAAAAAAbYXFSt2H7SQd5q5u+N0bp6PbbPTQjU25H1QnkbzTECahIBAAAAAADmi+NJfvXZ3iXp2cfs0vYVW+EgGD7DTTvr5EkLtiWq8WsSAQAAAAAAIC8dZTEdD3EvE38B9kDVWkSLy40j0T+TtSrrrBqVjo4=" - }, - "signature": "sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv2hMf2iRL5ZPKOSmMifHbd8yg4PeeceyN", - "signer_ids": [1,3,4] - }`, id, height) - return &spectypes.SSVMessage{ + + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) + signature, err := base64.StdEncoding.DecodeString("sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv2hMf2iRL5ZPKOSmMifHbd8yg4PeeceyN") + if err != nil { + return nil, err + } + + signedMessage := specqbft.SignedMessage{ + Signature: signature, + Signers: []spectypes.OperatorID{1, 3, 4}, + Message: specqbft.Message{ + MsgType: specqbft.RoundChangeMsgType, + Height: specqbft.Height(height), + Round: 2, + Identifier: id[:], + Root: [32]byte{}, + }, + FullData: nil, + } + + msgData, err := signedMessage.Encode() + if err != nil { + return nil, err + } + + ssvMsg := &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, - Data: []byte(msgData), - }, nil + Data: msgData, + } + + if malformed { + ssvMsg.MsgType = math.MaxUint64 + } + + return ssvMsg, nil } // diff --git a/network/topics/metrics.go b/network/topics/metrics.go index 53c651967e..7df570090a 100644 --- a/network/topics/metrics.go +++ b/network/topics/metrics.go @@ -6,15 +6,12 @@ import ( "go.uber.org/zap" ) +// TODO: replace with new metrics var ( metricPubsubTrace = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "ssv:network:pubsub:trace", Help: "Traces of pubsub messages", }, []string{"type"}) - metricPubsubMsgValidationResults = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "ssv:network:pubsub:msg:validation", - Help: "Traces of pubsub message validation results", - }, []string{"type"}) metricPubsubOutbound = promauto.NewCounterVec(prometheus.CounterOpts{ Name: "ssv:p2p:pubsub:msg:out", Help: "Count broadcasted messages", @@ -23,10 +20,6 @@ var ( Name: "ssv:p2p:pubsub:msg:in", Help: "Count incoming messages", }, []string{"topic", "msg_type"}) - metricPubsubActiveMsgValidation = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Name: "ssv:p2p:pubsub:msg:val:active", - Help: "Count active message validation", - }, []string{"topic"}) metricPubsubPeerScoreInspect = promauto.NewGaugeVec(prometheus.GaugeOpts{ Name: "ssv:p2p:pubsub:score:inspect", Help: "Gauge for negative peer scores", @@ -38,30 +31,13 @@ func init() { if err := prometheus.Register(metricPubsubTrace); err != nil { logger.Debug("could not register prometheus collector") } - if err := prometheus.Register(metricPubsubMsgValidationResults); err != nil { - logger.Debug("could not register prometheus collector") - } if err := prometheus.Register(metricPubsubOutbound); err != nil { logger.Debug("could not register prometheus collector") } if err := prometheus.Register(metricPubsubInbound); err != nil { logger.Debug("could not register prometheus collector") } - if err := prometheus.Register(metricPubsubActiveMsgValidation); err != nil { - logger.Debug("could not register prometheus collector") - } if err := prometheus.Register(metricPubsubPeerScoreInspect); err != nil { logger.Debug("could not register prometheus collector") } } - -type msgValidationResult string - -var ( - validationResultNoData msgValidationResult = "no_data" - validationResultEncoding msgValidationResult = "encoding" -) - -func reportValidationResult(result msgValidationResult) { - metricPubsubMsgValidationResults.WithLabelValues(string(result)).Inc() -} diff --git a/network/topics/msg_validator.go b/network/topics/msg_validator.go deleted file mode 100644 index f1329fa698..0000000000 --- a/network/topics/msg_validator.go +++ /dev/null @@ -1,67 +0,0 @@ -package topics - -import ( - "context" - - pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/bloxapp/ssv/network/commons" -) - -// MsgValidatorFunc represents a message validator -type MsgValidatorFunc = func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult - -// NewSSVMsgValidator creates a new msg validator that validates message structure, -// and checks that the message was sent on the right topic. -// TODO: enable post SSZ change, remove logs, break into smaller validators? -func NewSSVMsgValidator() func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult { - return func(ctx context.Context, p peer.ID, pmsg *pubsub.Message) pubsub.ValidationResult { - topic := pmsg.GetTopic() - metricPubsubActiveMsgValidation.WithLabelValues(topic).Inc() - defer metricPubsubActiveMsgValidation.WithLabelValues(topic).Dec() - if len(pmsg.GetData()) == 0 { - reportValidationResult(validationResultNoData) - return pubsub.ValidationReject - } - msg, err := commons.DecodeNetworkMsg(pmsg.GetData()) - if err != nil { - // can't decode message - // logger.Debug("invalid: can't decode message", zap.Error(err)) - reportValidationResult(validationResultEncoding) - return pubsub.ValidationReject - } - if msg == nil { - reportValidationResult(validationResultEncoding) - return pubsub.ValidationReject - } - pmsg.ValidatorData = *msg - return pubsub.ValidationAccept - - // Check if the message was sent on the right topic. - // currentTopic := pmsg.GetTopic() - // currentTopicBaseName := fork.GetTopicBaseName(currentTopic) - // topics := fork.ValidatorTopicID(msg.GetID().GetPubKey()) - // for _, tp := range topics { - // if tp == currentTopicBaseName { - // reportValidationResult(validationResultValid) - // return pubsub.ValidationAccept - // } - //} - // reportValidationResult(validationResultTopic) - // return pubsub.ValidationReject - } -} - -//// CombineMsgValidators executes multiple validators -// func CombineMsgValidators(validators ...MsgValidatorFunc) MsgValidatorFunc { -// return func(ctx context.Context, p peer.ID, msg *pubsub.Message) pubsub.ValidationResult { -// res := pubsub.ValidationAccept -// for _, v := range validators { -// if res = v(ctx, p, msg); res == pubsub.ValidationReject { -// break -// } -// } -// return res -// } -//} diff --git a/network/topics/msg_validator_test.go b/network/topics/msg_validator_test.go index 3a4f6b2081..dd66fb8312 100644 --- a/network/topics/msg_validator_test.go +++ b/network/topics/msg_validator_test.go @@ -2,44 +2,69 @@ package topics import ( "context" - "encoding/hex" - "fmt" "testing" + v1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/herumi/bls-eth-go-binary/bls" + spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" pubsub "github.com/libp2p/go-libp2p-pubsub" ps_pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network/commons" - "github.com/bloxapp/ssv/protocol/v2/types" - "github.com/bloxapp/ssv/utils/threshold" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/storage" + beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/storage/basedb" + "github.com/bloxapp/ssv/storage/kv" ) func TestMsgValidator(t *testing.T) { - pks := createSharePublicKeys(4) - mv := NewSSVMsgValidator() + logger := zaptest.NewLogger(t) + db, err := kv.NewInMemory(logger, basedb.Options{}) + require.NoError(t, err) + + ns, err := storage.NewNodeStorage(logger, db) + require.NoError(t, err) + + ks := spectestingutils.Testing4SharesSet() + share := &ssvtypes.SSVShare{ + Share: *spectestingutils.TestingShare(ks), + Metadata: ssvtypes.Metadata{ + BeaconMetadata: &beaconprotocol.ValidatorMetadata{ + Status: v1.ValidatorStateActiveOngoing, + }, + Liquidated: false, + }, + } + require.NoError(t, ns.Shares().Save(nil, share)) + + mv := validation.NewMessageValidator(networkconfig.TestNetwork, validation.WithShareStorage(ns.Shares())) require.NotNil(t, mv) + slot := networkconfig.TestNetwork.Beacon.GetBeaconNetwork().EstimatedCurrentSlot() + t.Run("valid consensus msg", func(t *testing.T) { - pkHex := pks[0] - msg, err := dummySSVConsensusMsg(pkHex, 15160) + msg, err := dummySSVConsensusMsg(share.ValidatorPubKey, qbft.Height(slot)) require.NoError(t, err) + raw, err := msg.Encode() require.NoError(t, err) - pk, err := hex.DecodeString(pkHex) - require.NoError(t, err) - topics := commons.ValidatorTopicID(pk) + + topics := commons.ValidatorTopicID(share.ValidatorPubKey) pmsg := newPBMsg(raw, commons.GetTopicFullName(topics[0]), []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r")) - res := mv(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) - require.Equal(t, res, pubsub.ValidationAccept) + res := mv.ValidatePubsubMessage(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) + require.Equal(t, pubsub.ValidationAccept, res) }) // TODO: enable once topic validation is in place - // t.Run("wrong topic", func(t *testing.T) { + //t.Run("wrong topic", func(t *testing.T) { // pkHex := "b5de683dbcb3febe8320cc741948b9282d59b75a6970ed55d6f389da59f26325331b7ea0e71a2552373d0debb6048b8a" - // msg, err := dummySSVConsensusMsg(pkHex, 15160) + // msg, err := dummySSVConsensusMsg(share.ValidatorPubKey, 15160) // require.NoError(t, err) // raw, err := msg.Encode() // require.NoError(t, err) @@ -47,40 +72,26 @@ func TestMsgValidator(t *testing.T) { // require.NoError(t, err) // topics := commons.ValidatorTopicID(pk) // pmsg := newPBMsg(raw, topics[0], []byte("16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r")) - // res := mv(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) + // res := mv.ValidateP2PMessage(context.Background(), "16Uiu2HAkyWQyCb6reWXGQeBUt9EXArk6h3aq3PsFMwLNq3pPGH1r", pmsg) // require.Equal(t, res, pubsub.ValidationReject) - // }) + //}) t.Run("empty message", func(t *testing.T) { pmsg := newPBMsg([]byte{}, "xxx", []byte{}) - res := mv(context.Background(), "xxxx", pmsg) - require.Equal(t, res, pubsub.ValidationReject) + res := mv.ValidatePubsubMessage(context.Background(), "xxxx", pmsg) + require.Equal(t, pubsub.ValidationReject, res) }) // TODO: enable once topic validation is in place - // t.Run("invalid validator public key", func(t *testing.T) { + //t.Run("invalid validator public key", func(t *testing.T) { // msg, err := dummySSVConsensusMsg("10101011", 1) // require.NoError(t, err) // raw, err := msg.Encode() // require.NoError(t, err) // pmsg := newPBMsg(raw, "xxx", []byte{}) - // res := mv(context.Background(), "xxxx", pmsg) + // res := mv.ValidateP2PMessage(context.Background(), "xxxx", pmsg) // require.Equal(t, res, pubsub.ValidationReject) - // }) - -} - -func createSharePublicKeys(n int) []string { - threshold.Init() - - var res []string - for i := 0; i < n; i++ { - sk := bls.SecretKey{} - sk.SetByCSPRNG() - pk := sk.GetPublicKey().SerializeToHexStr() - res = append(res, pk) - } - return res + //}) } func newPBMsg(data []byte, topic string, from []byte) *pubsub.Message { @@ -93,26 +104,19 @@ func newPBMsg(data []byte, topic string, from []byte) *pubsub.Message { return pmsg } -func dummySSVConsensusMsg(pkHex string, height int) (*spectypes.SSVMessage, error) { - pk, err := hex.DecodeString(pkHex) +func dummySSVConsensusMsg(pk spectypes.ValidatorPK, height qbft.Height) (*spectypes.SSVMessage, error) { + id := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, pk, spectypes.BNRoleAttester) + ks := spectestingutils.Testing4SharesSet() + validSignedMessage := spectestingutils.TestingRoundChangeMessageWithHeightAndIdentifier(ks.Shares[1], 1, height, id[:]) + + encodedSignedMessage, err := validSignedMessage.Encode() if err != nil { return nil, err } - id := spectypes.NewMsgID(types.GetDefaultDomain(), pk, spectypes.BNRoleAttester) - msgData := fmt.Sprintf(`{ - "message": { - "type": 3, - "round": 2, - "identifier": "%s", - "height": %d, - "value": "bk0iAAAAAAACAAAAAAAAAAbYXFSt2H7SQd5q5u+N0bp6PbbPTQjU25H1QnkbzTECahIBAAAAAADmi+NJfvXZ3iXp2cfs0vYVW+EgGD7DTTvr5EkLtiWq8WsSAQAAAAAAIC8dZTEdD3EvE38B9kDVWkSLy40j0T+TtSrrrBqVjo4=" - }, - "signature": "sVV0fsvqQlqliKv/ussGIatxpe8LDWhc9uoaM5WpjbiYvvxUr1eCpz0ja7UT1PGNDdmoGi6xbMC1g/ozhAt4uCdpy0Xdfqbv2hMf2iRL5ZPKOSmMifHbd8yg4PeeceyN", - "signer_ids": [1,3,4] - }`, id, height) + return &spectypes.SSVMessage{ MsgType: spectypes.SSVConsensusMsgType, MsgID: id, - Data: []byte(msgData), + Data: encodedSignedMessage, }, nil } diff --git a/network/topics/params/gossipsub.go b/network/topics/params/gossipsub.go index 5e7945768d..c7d51ba8a1 100644 --- a/network/topics/params/gossipsub.go +++ b/network/topics/params/gossipsub.go @@ -6,7 +6,7 @@ import ( pubsub "github.com/libp2p/go-libp2p-pubsub" ) -var ( +const ( // gsD topic stable mesh target count gsD = 8 // gsDlo topic stable mesh low watermark diff --git a/network/topics/params/topic_score.go b/network/topics/params/topic_score.go index b7b19fc8ef..a7b0942f34 100644 --- a/network/topics/params/topic_score.go +++ b/network/topics/params/topic_score.go @@ -22,7 +22,8 @@ const ( // using value of 50 (prysm changed to 90) dampeningFactor = 50 - subnetTopicsWeight = 4.0 + subnetTopicsWeight = 4.0 + invalidMeshDeliveriesWeight = -800 ) const ( @@ -167,7 +168,7 @@ func TopicParams(opts Options) (*pubsub.TopicScoreParams, error) { } if opts.Topic.InvalidMsgDecayTime > 0 { - params.InvalidMessageDeliveriesWeight = -opts.maxScore() / opts.Topic.TopicWeight + params.InvalidMessageDeliveriesWeight = invalidMeshDeliveriesWeight params.InvalidMessageDeliveriesDecay = scoreDecay(opts.Topic.InvalidMsgDecayTime*opts.Network.OneEpochDuration, opts.Network.OneEpochDuration) } else { params.InvalidMessageDeliveriesDecay = 0.1 diff --git a/network/topics/pubsub.go b/network/topics/pubsub.go index b4b67b4833..2422422e2b 100644 --- a/network/topics/pubsub.go +++ b/network/topics/pubsub.go @@ -26,7 +26,7 @@ const ( ) // the following are kept in vars to allow flexibility (e.g. in tests) -var ( +const ( // validationQueueSize is the size that we assign to the validation queue validationQueueSize = 512 // outboundQueueSize is the size that we assign to the outbound message queue @@ -34,32 +34,34 @@ var ( // validateThrottle is the amount of goroutines used for pubsub msg validation validateThrottle = 8192 // scoreInspectInterval is the interval for performing score inspect, which goes over all peers scores - scoreInspectInterval = time.Minute + defaultScoreInspectInterval = time.Minute // msgIDCacheTTL specifies how long a message ID will be remembered as seen, 6.4m (as ETH 2.0) msgIDCacheTTL = params.HeartbeatInterval * 550 ) -// PububConfig is the needed config to instantiate pubsub -type PububConfig struct { +// PubSubConfig is the needed config to instantiate pubsub +type PubSubConfig struct { Host host.Host TraceLog bool StaticPeers []peer.AddrInfo MsgHandler PubsubMessageHandler - // MsgValidatorFactory accepts the topic name and returns the corresponding msg validator + // MsgValidator accepts the topic name and returns the corresponding msg validator // in case we need different validators for specific topics, // this should be the place to map a validator to topic - MsgValidatorFactory func(string) MsgValidatorFunc - ScoreIndex peers.ScoreIndex - Scoring *ScoringConfig - MsgIDHandler MsgIDHandler - Discovery discovery.Discovery + MsgValidator messageValidator + ScoreIndex peers.ScoreIndex + Scoring *ScoringConfig + MsgIDHandler MsgIDHandler + Discovery discovery.Discovery ValidateThrottle int ValidationQueueSize int OutboundQueueSize int MsgIDCacheTTL time.Duration - GetValidatorStats network.GetValidatorStats + GetValidatorStats network.GetValidatorStats + ScoreInspector pubsub.ExtendedPeerScoreInspectFn + ScoreInspectorInterval time.Duration } // ScoringConfig is the configuration for peer scoring @@ -76,7 +78,7 @@ type PubsubBundle struct { Resolver MsgPeersResolver } -func (cfg *PububConfig) init() error { +func (cfg *PubSubConfig) init() error { if cfg.Host == nil { return errors.New("bad args: missing host") } @@ -96,14 +98,14 @@ func (cfg *PububConfig) init() error { } // initScoring initializes scoring config -func (cfg *PububConfig) initScoring() { +func (cfg *PubSubConfig) initScoring() { if cfg.Scoring == nil { cfg.Scoring = DefaultScoringConfig() } } -// NewPubsub creates a new pubsub router and the necessary components -func NewPubsub(ctx context.Context, logger *zap.Logger, cfg *PububConfig) (*pubsub.PubSub, Controller, error) { +// NewPubSub creates a new pubsub router and the necessary components +func NewPubSub(ctx context.Context, logger *zap.Logger, cfg *PubSubConfig) (*pubsub.PubSub, Controller, error) { if err := cfg.init(); err != nil { return nil, nil, err } @@ -133,12 +135,23 @@ func NewPubsub(ctx context.Context, logger *zap.Logger, cfg *PububConfig) (*pubs } var topicScoreFactory func(string) *pubsub.TopicScoreParams - if cfg.ScoreIndex != nil { + + inspector := cfg.ScoreInspector + inspectInterval := cfg.ScoreInspectorInterval + if cfg.ScoreIndex != nil || inspector != nil { cfg.initScoring() - inspector := scoreInspector(logger, cfg.ScoreIndex) + + if inspector == nil { + inspector = scoreInspector(logger, cfg.ScoreIndex) + } + + if inspectInterval == 0 { + inspectInterval = defaultScoreInspectInterval + } + peerScoreParams := params.PeerScoreParams(cfg.Scoring.OneEpochDuration, cfg.MsgIDCacheTTL, cfg.Scoring.IPColocationWeight, 0, cfg.Scoring.IPWhilelist...) psOpts = append(psOpts, pubsub.WithPeerScore(peerScoreParams, params.PeerScoreThresholds()), - pubsub.WithPeerScoreInspect(inspector, scoreInspectInterval)) + pubsub.WithPeerScoreInspect(inspector, inspectInterval)) async.Interval(ctx, time.Hour, func() { // reset peer scores metric every hour because it has a label for peer ID which can grow infinitely metricPubsubPeerScoreInspect.Reset() @@ -169,7 +182,7 @@ func NewPubsub(ctx context.Context, logger *zap.Logger, cfg *PububConfig) (*pubs return nil, nil, err } - ctrl := NewTopicsController(ctx, logger, cfg.MsgHandler, cfg.MsgValidatorFactory, sf, ps, topicScoreFactory) + ctrl := NewTopicsController(ctx, logger, cfg.MsgHandler, cfg.MsgValidator, sf, ps, topicScoreFactory) return ps, ctrl, nil } diff --git a/network/topics/scoring.go b/network/topics/scoring.go index ee0360364a..9e47514262 100644 --- a/network/topics/scoring.go +++ b/network/topics/scoring.go @@ -54,7 +54,7 @@ func scoreInspector(logger *zap.Logger, scoreIdx peers.ScoreIndex) pubsub.Extend } // topicScoreParams factory for creating scoring params for topics -func topicScoreParams(logger *zap.Logger, cfg *PububConfig) func(string) *pubsub.TopicScoreParams { +func topicScoreParams(logger *zap.Logger, cfg *PubSubConfig) func(string) *pubsub.TopicScoreParams { return func(t string) *pubsub.TopicScoreParams { totalValidators, activeValidators, myValidators, err := cfg.GetValidatorStats() if err != nil { diff --git a/networkconfig/config.go b/networkconfig/config.go index de65d48fe4..a4791e878e 100644 --- a/networkconfig/config.go +++ b/networkconfig/config.go @@ -14,6 +14,7 @@ import ( var SupportedConfigs = map[string]NetworkConfig{ Mainnet.Name: Mainnet, + HoleskyStage.Name: HoleskyStage, JatoV2Stage.Name: JatoV2Stage, JatoV2.Name: JatoV2, LocalTestnet.Name: LocalTestnet, @@ -61,3 +62,8 @@ func (n NetworkConfig) SlotDurationSec() time.Duration { func (n NetworkConfig) SlotsPerEpoch() uint64 { return n.Beacon.SlotsPerEpoch() } + +// GetGenesisTime returns the genesis time in unix time. +func (n NetworkConfig) GetGenesisTime() time.Time { + return time.Unix(int64(n.Beacon.MinGenesisTime()), 0) +} diff --git a/networkconfig/holesky-stage.go b/networkconfig/holesky-stage.go new file mode 100644 index 0000000000..c3e9d1aa8a --- /dev/null +++ b/networkconfig/holesky-stage.go @@ -0,0 +1,22 @@ +package networkconfig + +import ( + "math/big" + + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" +) + +var HoleskyStage = NetworkConfig{ + Name: "holesky-stage", + Beacon: beacon.NewNetwork(spectypes.HoleskyNetwork), + Domain: [4]byte{0x00, 0x00, 0x31, 0x12}, + GenesisEpoch: 1, + RegistrySyncOffset: new(big.Int).SetInt64(84599), + RegistryContractAddr: "0x0d33801785340072C452b994496B19f196b7eE15", + Bootnodes: []string{ + "enr:-Li4QNUN0RdeoHjI4Np18-PX1VXrJ2rJMo2OarRz0wCAxiYlD3s_E4zsmXi1LHv62ULLBT-AQfZIjYefEoEsMDkaEKCGAYtCguORh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhKfrtCyJc2VjcDI1NmsxoQP2e508AoA0B-KH-IaAd3nVCfI9q16lNztV-oTpcH72tIN0Y3CCE4mDdWRwgg-h", + }, + WhitelistedOperatorKeys: []string{}, +} diff --git a/operator/duties/attester.go b/operator/duties/attester.go index 6af6f4abd1..f89cbaf867 100644 --- a/operator/duties/attester.go +++ b/operator/duties/attester.go @@ -11,19 +11,20 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/duties/dutystore" ) type AttesterHandler struct { baseHandler - duties *Duties[*eth2apiv1.AttesterDuty] + duties *dutystore.Duties[eth2apiv1.AttesterDuty] fetchCurrentEpoch bool fetchNextEpoch bool } -func NewAttesterHandler() *AttesterHandler { +func NewAttesterHandler(duties *dutystore.Duties[eth2apiv1.AttesterDuty]) *AttesterHandler { h := &AttesterHandler{ - duties: NewDuties[*eth2apiv1.AttesterDuty](), + duties: duties, } h.fetchCurrentEpoch = true h.fetchFirst = true @@ -52,7 +53,7 @@ func (h *AttesterHandler) Name() string { // // On Indices Change: // 1. Execute duties. -// 2. Reset duties for the current epoch. +// 2. ResetEpoch duties for the current epoch. // 3. Fetch duties for the current epoch. // 4. If necessary, fetch duties for the next epoch. // @@ -69,7 +70,8 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() currentEpoch := h.network.Beacon.EstimatedEpochAtSlot(slot) buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, slot, slot%32+1) h.logger.Debug("🛠 ticker event", zap.String("epoch_slot_seq", buildStr)) @@ -82,7 +84,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { } else { h.processExecution(currentEpoch, slot) if h.indicesChanged { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) h.indicesChanged = false } h.processFetching(ctx, currentEpoch, slot) @@ -98,7 +100,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // last slot of epoch if uint64(slot)%slotsPerEpoch == slotsPerEpoch-1 { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) } case reorgEvent := <-h.reorg: @@ -108,18 +110,18 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // reset current epoch duties if reorgEvent.Previous { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) h.fetchFirst = true h.fetchCurrentEpoch = true if h.shouldFetchNexEpoch(reorgEvent.Slot) { - h.duties.Reset(currentEpoch + 1) + h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } } else if reorgEvent.Current { // reset & re-fetch next epoch duties if in appropriate slot range, // otherwise they will be fetched by the appropriate slot tick. if h.shouldFetchNexEpoch(reorgEvent.Slot) { - h.duties.Reset(currentEpoch + 1) + h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } } @@ -135,7 +137,7 @@ func (h *AttesterHandler) HandleDuties(ctx context.Context) { // reset next epoch duties if in appropriate slot range if h.shouldFetchNexEpoch(slot) { - h.duties.Reset(currentEpoch + 1) + h.duties.ResetEpoch(currentEpoch + 1) h.fetchNextEpoch = true } } @@ -164,24 +166,26 @@ func (h *AttesterHandler) processFetching(ctx context.Context, epoch phase0.Epoc } func (h *AttesterHandler) processExecution(epoch phase0.Epoch, slot phase0.Slot) { + duties := h.duties.CommitteeSlotDuties(epoch, slot) + if duties == nil { + return + } + // range over duties and execute - if slotMap, ok := h.duties.m[epoch]; ok { - if duties, ok := slotMap[slot]; ok { - toExecute := make([]*spectypes.Duty, 0, len(duties)*2) - for _, d := range duties { - if h.shouldExecute(d) { - toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAttester)) - toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAggregator)) - } - } - h.executeDuties(h.logger, toExecute) + toExecute := make([]*spectypes.Duty, 0, len(duties)*2) + for _, d := range duties { + if h.shouldExecute(d) { + toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAttester)) + toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleAggregator)) } } + + h.executeDuties(h.logger, toExecute) } func (h *AttesterHandler) fetchAndProcessDuties(ctx context.Context, epoch phase0.Epoch) error { start := time.Now() - indices := h.validatorController.ActiveValidatorIndices(epoch) + indices := h.validatorController.CommitteeActiveIndices(epoch) if len(indices) == 0 { return nil @@ -194,7 +198,7 @@ func (h *AttesterHandler) fetchAndProcessDuties(ctx context.Context, epoch phase specDuties := make([]*spectypes.Duty, 0, len(duties)) for _, d := range duties { - h.duties.Add(epoch, d.Slot, d) + h.duties.Add(epoch, d.Slot, d.ValidatorIndex, d, true) specDuties = append(specDuties, h.toSpecDuty(d, spectypes.BNRoleAttester)) } @@ -245,8 +249,7 @@ func (h *AttesterHandler) shouldExecute(duty *eth2apiv1.AttesterDuty) bool { return true } if currentSlot+1 == duty.Slot { - h.logger.Debug("current slot and duty slot are not aligned, "+ - "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", duty.String())) + h.warnMisalignedSlotAndDuty(duty.String()) return true } return false diff --git a/operator/duties/attester_test.go b/operator/duties/attester_test.go index e0927c1f0a..4292ddf395 100644 --- a/operator/duties/attester_test.go +++ b/operator/duties/attester_test.go @@ -5,50 +5,52 @@ import ( "testing" "time" - v1 "github.com/attestantio/go-eth2-client/api/v1" + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/cornelk/hashmap" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/duties/mocks" ) -func setupAttesterDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*v1.AttesterDuty]) (chan struct{}, chan []*spectypes.Duty) { +func setupAttesterDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*eth2apiv1.AttesterDuty]) (chan struct{}, chan []*spectypes.Duty) { fetchDutiesCall := make(chan struct{}) executeDutiesCall := make(chan []*spectypes.Duty) s.beaconNode.(*mocks.MockBeaconNode).EXPECT().AttesterDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.AttesterDuty, error) { + func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.AttesterDuty, error) { fetchDutiesCall <- struct{}{} duties, _ := dutiesMap.Get(epoch) return duties, nil }).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().ActiveValidatorIndices(gomock.Any()).DoAndReturn( - func(epoch phase0.Epoch) []phase0.ValidatorIndex { - uniqueIndices := make(map[phase0.ValidatorIndex]bool) + getIndices := func(epoch phase0.Epoch) []phase0.ValidatorIndex { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) - duties, _ := dutiesMap.Get(epoch) - for _, d := range duties { - uniqueIndices[d.ValidatorIndex] = true - } + duties, _ := dutiesMap.Get(epoch) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } - indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) - for index := range uniqueIndices { - indices = append(indices, index) - } + indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) + for index := range uniqueIndices { + indices = append(indices, index) + } - return indices - }).AnyTimes() + return indices + } + s.validatorController.(*mocks.MockValidatorController).EXPECT().CommitteeActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() + s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SubmitBeaconCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() return fetchDutiesCall, executeDutiesCall } -func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*v1.AttesterDuty) []*spectypes.Duty { +func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*eth2apiv1.AttesterDuty) []*spectypes.Duty { expectedDuties := make([]*spectypes.Duty, 0) for _, d := range duties { expectedDuties = append(expectedDuties, handler.toSpecDuty(d, spectypes.BNRoleAttester)) @@ -59,15 +61,15 @@ func expectedExecutedAttesterDuties(handler *AttesterHandler, duties []*v1.Attes func TestScheduler_Attester_Same_Slot(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(1)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(1), @@ -95,15 +97,15 @@ func TestScheduler_Attester_Same_Slot(t *testing.T) { func TestScheduler_Attester_Diff_Slots(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -136,9 +138,9 @@ func TestScheduler_Attester_Diff_Slots(t *testing.T) { func TestScheduler_Attester_Indices_Changed(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) @@ -152,7 +154,7 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { scheduler.indicesChg <- struct{}{} // no execution should happen in slot 0 waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(0), @@ -180,7 +182,7 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { // STEP 4: wait for attester duties to be executed currentSlot.SetSlot(phase0.Slot(2)) duties, _ := dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedAttesterDuties(handler, []*v1.AttesterDuty{duties[2]}) + expected := expectedExecutedAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[2]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) mockTicker.Send(currentSlot.GetSlot()) @@ -193,9 +195,9 @@ func TestScheduler_Attester_Indices_Changed(t *testing.T) { func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) @@ -213,7 +215,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 3: trigger a change in active indices scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(3), ValidatorIndex: phase0.ValidatorIndex(1), @@ -223,7 +225,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 4: trigger a change in active indices in the same slot scheduler.indicesChg <- struct{}{} duties, _ = dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(4), ValidatorIndex: phase0.ValidatorIndex(2), @@ -238,7 +240,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 6: wait for attester duties to be executed currentSlot.SetSlot(phase0.Slot(3)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedAttesterDuties(handler, []*v1.AttesterDuty{duties[0]}) + expected := expectedExecutedAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[0]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) mockTicker.Send(currentSlot.GetSlot()) @@ -247,7 +249,7 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 7: wait for attester duties to be executed currentSlot.SetSlot(phase0.Slot(4)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected = expectedExecutedAttesterDuties(handler, []*v1.AttesterDuty{duties[1]}) + expected = expectedExecutedAttesterDuties(handler, []*eth2apiv1.AttesterDuty{duties[1]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) mockTicker.Send(currentSlot.GetSlot()) @@ -261,15 +263,15 @@ func TestScheduler_Attester_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // reorg previous dependent root changed func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(63)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(66), @@ -282,8 +284,8 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, PreviousDutyDependentRoot: phase0.Root{0x01}, @@ -298,13 +300,13 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg on epoch transition - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(67), @@ -341,15 +343,15 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition(t *testing.T) { // reorg previous dependent root changed and the indices changed as well func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(63)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(66), @@ -363,8 +365,8 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, PreviousDutyDependentRoot: phase0.Root{0x01}, @@ -379,13 +381,13 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg on epoch transition - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(67), @@ -398,7 +400,7 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t // STEP 5: trigger indices change scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(2)) - dutiesMap.Set(phase0.Epoch(2), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(67), ValidatorIndex: phase0.ValidatorIndex(2), @@ -432,15 +434,15 @@ func TestScheduler_Attester_Reorg_Previous_Epoch_Transition_Indices_Changed(t *t // reorg previous dependent root changed func TestScheduler_Attester_Reorg_Previous(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(32)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(35), @@ -453,8 +455,8 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x01}, }, @@ -468,13 +470,13 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -511,15 +513,15 @@ func TestScheduler_Attester_Reorg_Previous(t *testing.T) { // reorg previous dependent root changed and the indices changed the same slot func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(32)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(35), @@ -532,8 +534,8 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x01}, }, @@ -547,13 +549,13 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), PreviousDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -566,7 +568,7 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T // STEP 5: trigger indices change scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(1)) - dutiesMap.Set(phase0.Epoch(1), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(36), ValidatorIndex: phase0.ValidatorIndex(2), @@ -600,15 +602,15 @@ func TestScheduler_Attester_Reorg_Previous_Indices_Change_Same_Slot(t *testing.T // reorg current dependent root changed func TestScheduler_Attester_Reorg_Current(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(47)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(64), @@ -621,8 +623,8 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -636,13 +638,13 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(65), @@ -687,15 +689,15 @@ func TestScheduler_Attester_Reorg_Current(t *testing.T) { // reorg current dependent root changed including indices change in the same slot func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(47)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(64), @@ -708,8 +710,8 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -723,13 +725,13 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(2), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(65), @@ -742,7 +744,7 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 5: trigger indices change scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(2)) - dutiesMap.Set(phase0.Epoch(2), append(duties, &v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(2), append(duties, ð2apiv1.AttesterDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(65), ValidatorIndex: phase0.ValidatorIndex(2), @@ -783,15 +785,15 @@ func TestScheduler_Attester_Reorg_Current_Indices_Changed(t *testing.T) { func TestScheduler_Attester_Early_Block(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -817,8 +819,8 @@ func TestScheduler_Attester_Early_Block(t *testing.T) { setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) // STEP 4: trigger head event (block arrival) - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), }, } @@ -833,15 +835,15 @@ func TestScheduler_Attester_Early_Block(t *testing.T) { func TestScheduler_Attester_Start_In_The_End_Of_The_Epoch(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(31)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(32), @@ -869,15 +871,15 @@ func TestScheduler_Attester_Start_In_The_End_Of_The_Epoch(t *testing.T) { func TestScheduler_Attester_Fetch_Execute_Next_Epoch_Duty(t *testing.T) { var ( - handler = NewAttesterHandler() + handler = NewAttesterHandler(dutystore.NewDuties[eth2apiv1.AttesterDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.AttesterDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.AttesterDuty]() ) currentSlot.SetSlot(phase0.Slot(13)) scheduler, logger, mockTicker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupAttesterDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.AttesterDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.AttesterDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(32), diff --git a/operator/duties/base_handler.go b/operator/duties/base_handler.go index 15303fef68..c3c22ebbe2 100644 --- a/operator/duties/base_handler.go +++ b/operator/duties/base_handler.go @@ -3,11 +3,11 @@ package duties import ( "context" - "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "go.uber.org/zap" "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/slotticker" ) //go:generate mockgen -package=duties -destination=./base_handler_mock.go -source=./base_handler.go @@ -16,7 +16,7 @@ import ( type ExecuteDutiesFunc func(logger *zap.Logger, duties []*spectypes.Duty) type dutyHandler interface { - Setup(string, *zap.Logger, BeaconNode, networkconfig.NetworkConfig, ValidatorController, ExecuteDutiesFunc, chan phase0.Slot, chan ReorgEvent, chan struct{}) + Setup(string, *zap.Logger, BeaconNode, networkconfig.NetworkConfig, ValidatorController, ExecuteDutiesFunc, slotticker.Provider, chan ReorgEvent, chan struct{}) HandleDuties(context.Context) Name() string } @@ -27,7 +27,7 @@ type baseHandler struct { network networkconfig.NetworkConfig validatorController ValidatorController executeDuties ExecuteDutiesFunc - ticker chan phase0.Slot + ticker slotticker.SlotTicker reorg chan ReorgEvent indicesChange chan struct{} @@ -43,7 +43,7 @@ func (h *baseHandler) Setup( network networkconfig.NetworkConfig, validatorController ValidatorController, executeDuties ExecuteDutiesFunc, - ticker chan phase0.Slot, + slotTickerProvider slotticker.Provider, reorgEvents chan ReorgEvent, indicesChange chan struct{}, ) { @@ -52,28 +52,12 @@ func (h *baseHandler) Setup( h.network = network h.validatorController = validatorController h.executeDuties = executeDuties - h.ticker = ticker + h.ticker = slotTickerProvider() h.reorg = reorgEvents h.indicesChange = indicesChange } -type Duties[D any] struct { - m map[phase0.Epoch]map[phase0.Slot][]D -} - -func NewDuties[D any]() *Duties[D] { - return &Duties[D]{ - m: make(map[phase0.Epoch]map[phase0.Slot][]D), - } -} - -func (d *Duties[D]) Add(epoch phase0.Epoch, slot phase0.Slot, duty D) { - if _, ok := d.m[epoch]; !ok { - d.m[epoch] = make(map[phase0.Slot][]D) - } - d.m[epoch][slot] = append(d.m[epoch][slot], duty) -} - -func (d *Duties[D]) Reset(epoch phase0.Epoch) { - delete(d.m, epoch) +func (h *baseHandler) warnMisalignedSlotAndDuty(dutyType string) { + h.logger.Debug("current slot and duty slot are not aligned, "+ + "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", dutyType)) } diff --git a/operator/duties/base_handler_mock.go b/operator/duties/base_handler_mock.go index 801ca2dc8c..6177f369f3 100644 --- a/operator/duties/base_handler_mock.go +++ b/operator/duties/base_handler_mock.go @@ -8,8 +8,8 @@ import ( context "context" reflect "reflect" - phase0 "github.com/attestantio/go-eth2-client/spec/phase0" networkconfig "github.com/bloxapp/ssv/networkconfig" + slotticker "github.com/bloxapp/ssv/operator/slotticker" gomock "github.com/golang/mock/gomock" zap "go.uber.org/zap" ) @@ -64,7 +64,7 @@ func (mr *MockdutyHandlerMockRecorder) Name() *gomock.Call { } // Setup mocks base method. -func (m *MockdutyHandler) Setup(arg0 string, arg1 *zap.Logger, arg2 BeaconNode, arg3 networkconfig.NetworkConfig, arg4 ValidatorController, arg5 ExecuteDutiesFunc, arg6 chan phase0.Slot, arg7 chan ReorgEvent, arg8 chan struct{}) { +func (m *MockdutyHandler) Setup(arg0 string, arg1 *zap.Logger, arg2 BeaconNode, arg3 networkconfig.NetworkConfig, arg4 ValidatorController, arg5 ExecuteDutiesFunc, arg6 slotticker.Provider, arg7 chan ReorgEvent, arg8 chan struct{}) { m.ctrl.T.Helper() m.ctrl.Call(m, "Setup", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } diff --git a/operator/duties/dutystore/duties.go b/operator/duties/dutystore/duties.go new file mode 100644 index 0000000000..50fd0d7e22 --- /dev/null +++ b/operator/duties/dutystore/duties.go @@ -0,0 +1,97 @@ +package dutystore + +import ( + "sync" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +type Duty interface { + eth2apiv1.AttesterDuty | eth2apiv1.ProposerDuty | eth2apiv1.SyncCommitteeDuty +} + +type dutyDescriptor[D Duty] struct { + duty *D + inCommittee bool +} + +type Duties[D Duty] struct { + mu sync.RWMutex + m map[phase0.Epoch]map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D] +} + +func NewDuties[D Duty]() *Duties[D] { + return &Duties[D]{ + m: make(map[phase0.Epoch]map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D]), + } +} + +func (d *Duties[D]) CommitteeSlotDuties(epoch phase0.Epoch, slot phase0.Slot) []*D { + d.mu.RLock() + defer d.mu.RUnlock() + + slotMap, ok := d.m[epoch] + if !ok { + return nil + } + + descriptorMap, ok := slotMap[slot] + if !ok { + return nil + } + + var duties []*D + for _, descriptor := range descriptorMap { + if descriptor.inCommittee { + duties = append(duties, descriptor.duty) + } + } + + return duties +} + +func (d *Duties[D]) ValidatorDuty(epoch phase0.Epoch, slot phase0.Slot, validatorIndex phase0.ValidatorIndex) *D { + d.mu.RLock() + defer d.mu.RUnlock() + + slotMap, ok := d.m[epoch] + if !ok { + return nil + } + + descriptorMap, ok := slotMap[slot] + if !ok { + return nil + } + + descriptor, ok := descriptorMap[validatorIndex] + if !ok { + return nil + } + + return descriptor.duty +} + +func (d *Duties[D]) Add(epoch phase0.Epoch, slot phase0.Slot, validatorIndex phase0.ValidatorIndex, duty *D, inCommittee bool) { + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.m[epoch]; !ok { + d.m[epoch] = make(map[phase0.Slot]map[phase0.ValidatorIndex]dutyDescriptor[D]) + } + if _, ok := d.m[epoch][slot]; !ok { + d.m[epoch][slot] = make(map[phase0.ValidatorIndex]dutyDescriptor[D]) + } + d.m[epoch][slot][validatorIndex] = dutyDescriptor[D]{ + duty: duty, + inCommittee: inCommittee, + } +} + +func (d *Duties[D]) ResetEpoch(epoch phase0.Epoch) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.m, epoch) +} diff --git a/operator/duties/dutystore/store.go b/operator/duties/dutystore/store.go new file mode 100644 index 0000000000..53dbfaefcc --- /dev/null +++ b/operator/duties/dutystore/store.go @@ -0,0 +1,19 @@ +package dutystore + +import ( + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" +) + +type Store struct { + Attester *Duties[eth2apiv1.AttesterDuty] + Proposer *Duties[eth2apiv1.ProposerDuty] + SyncCommittee *SyncCommitteeDuties +} + +func New() *Store { + return &Store{ + Attester: NewDuties[eth2apiv1.AttesterDuty](), + Proposer: NewDuties[eth2apiv1.ProposerDuty](), + SyncCommittee: NewSyncCommitteeDuties(), + } +} diff --git a/operator/duties/dutystore/sync_committee.go b/operator/duties/dutystore/sync_committee.go new file mode 100644 index 0000000000..0ae13041c7 --- /dev/null +++ b/operator/duties/dutystore/sync_committee.go @@ -0,0 +1,76 @@ +package dutystore + +import ( + "sync" + + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +type SyncCommitteeDuties struct { + mu sync.RWMutex + m map[uint64]map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty] +} + +func NewSyncCommitteeDuties() *SyncCommitteeDuties { + return &SyncCommitteeDuties{ + m: make(map[uint64]map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty]), + } +} + +func (d *SyncCommitteeDuties) CommitteePeriodDuties(period uint64) []*eth2apiv1.SyncCommitteeDuty { + d.mu.RLock() + defer d.mu.RUnlock() + + descriptorMap, ok := d.m[period] + if !ok { + return nil + } + + var duties []*eth2apiv1.SyncCommitteeDuty + for _, descriptor := range descriptorMap { + if descriptor.inCommittee { + duties = append(duties, descriptor.duty) + } + } + + return duties +} + +func (d *SyncCommitteeDuties) Duty(period uint64, validatorIndex phase0.ValidatorIndex) *eth2apiv1.SyncCommitteeDuty { + d.mu.RLock() + defer d.mu.RUnlock() + + duties, ok := d.m[period] + if !ok { + return nil + } + + descriptor, ok := duties[validatorIndex] + if !ok { + return nil + } + + return descriptor.duty +} + +func (d *SyncCommitteeDuties) Add(period uint64, validatorIndex phase0.ValidatorIndex, duty *eth2apiv1.SyncCommitteeDuty, inCommittee bool) { + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.m[period]; !ok { + d.m[period] = make(map[phase0.ValidatorIndex]dutyDescriptor[eth2apiv1.SyncCommitteeDuty]) + } + + d.m[period][validatorIndex] = dutyDescriptor[eth2apiv1.SyncCommitteeDuty]{ + duty: duty, + inCommittee: inCommittee, + } +} + +func (d *SyncCommitteeDuties) Reset(period uint64) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.m, period) +} diff --git a/operator/duties/mocks/scheduler.go b/operator/duties/mocks/scheduler.go index 00cd929622..7195d58dcd 100644 --- a/operator/duties/mocks/scheduler.go +++ b/operator/duties/mocks/scheduler.go @@ -7,13 +7,13 @@ package mocks import ( context "context" reflect "reflect" + time "time" client "github.com/attestantio/go-eth2-client" v1 "github.com/attestantio/go-eth2-client/api/v1" phase0 "github.com/attestantio/go-eth2-client/spec/phase0" types "github.com/bloxapp/ssv/protocol/v2/types" gomock "github.com/golang/mock/gomock" - event "github.com/prysmaticlabs/prysm/v4/async/event" ) // MockSlotTicker is a mock of SlotTicker interface. @@ -39,18 +39,32 @@ func (m *MockSlotTicker) EXPECT() *MockSlotTickerMockRecorder { return m.recorder } -// Subscribe mocks base method. -func (m *MockSlotTicker) Subscribe(subscription chan phase0.Slot) event.Subscription { +// Next mocks base method. +func (m *MockSlotTicker) Next() <-chan time.Time { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Subscribe", subscription) - ret0, _ := ret[0].(event.Subscription) + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(<-chan time.Time) return ret0 } -// Subscribe indicates an expected call of Subscribe. -func (mr *MockSlotTickerMockRecorder) Subscribe(subscription interface{}) *gomock.Call { +// Next indicates an expected call of Next. +func (mr *MockSlotTickerMockRecorder) Next() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockSlotTicker)(nil).Subscribe), subscription) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockSlotTicker)(nil).Next)) +} + +// Slot mocks base method. +func (m *MockSlotTicker) Slot() phase0.Slot { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Slot") + ret0, _ := ret[0].(phase0.Slot) + return ret0 +} + +// Slot indicates an expected call of Slot. +func (mr *MockSlotTickerMockRecorder) Slot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Slot", reflect.TypeOf((*MockSlotTicker)(nil).Slot)) } // MockBeaconNode is a mock of BeaconNode interface. @@ -186,18 +200,32 @@ func (m *MockValidatorController) EXPECT() *MockValidatorControllerMockRecorder return m.recorder } -// ActiveValidatorIndices mocks base method. -func (m *MockValidatorController) ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { +// AllActiveIndices mocks base method. +func (m *MockValidatorController) AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllActiveIndices", epoch) + ret0, _ := ret[0].([]phase0.ValidatorIndex) + return ret0 +} + +// AllActiveIndices indicates an expected call of AllActiveIndices. +func (mr *MockValidatorControllerMockRecorder) AllActiveIndices(epoch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllActiveIndices", reflect.TypeOf((*MockValidatorController)(nil).AllActiveIndices), epoch) +} + +// CommitteeActiveIndices mocks base method. +func (m *MockValidatorController) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ActiveValidatorIndices", epoch) + ret := m.ctrl.Call(m, "CommitteeActiveIndices", epoch) ret0, _ := ret[0].([]phase0.ValidatorIndex) return ret0 } -// ActiveValidatorIndices indicates an expected call of ActiveValidatorIndices. -func (mr *MockValidatorControllerMockRecorder) ActiveValidatorIndices(epoch interface{}) *gomock.Call { +// CommitteeActiveIndices indicates an expected call of CommitteeActiveIndices. +func (mr *MockValidatorControllerMockRecorder) CommitteeActiveIndices(epoch interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveValidatorIndices", reflect.TypeOf((*MockValidatorController)(nil).ActiveValidatorIndices), epoch) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitteeActiveIndices", reflect.TypeOf((*MockValidatorController)(nil).CommitteeActiveIndices), epoch) } // GetOperatorShares mocks base method. diff --git a/operator/duties/proposer.go b/operator/duties/proposer.go index 60fde29186..d65b25b0e1 100644 --- a/operator/duties/proposer.go +++ b/operator/duties/proposer.go @@ -11,17 +11,18 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/duties/dutystore" ) type ProposerHandler struct { baseHandler - duties *Duties[*eth2apiv1.ProposerDuty] + duties *dutystore.Duties[eth2apiv1.ProposerDuty] } -func NewProposerHandler() *ProposerHandler { +func NewProposerHandler(duties *dutystore.Duties[eth2apiv1.ProposerDuty]) *ProposerHandler { return &ProposerHandler{ - duties: NewDuties[*eth2apiv1.ProposerDuty](), + duties: duties, baseHandler: baseHandler{ fetchFirst: true, }, @@ -44,7 +45,7 @@ func (h *ProposerHandler) Name() string { // // On Indices Change: // 1. Execute duties. -// 2. Reset duties for the current epoch. +// 2. ResetEpoch duties for the current epoch. // 3. Fetch duties for the current epoch. // // On Ticker event: @@ -58,7 +59,8 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() currentEpoch := h.network.Beacon.EstimatedEpochAtSlot(slot) buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, slot, slot%32+1) h.logger.Debug("🛠 ticker event", zap.String("epoch_slot_seq", buildStr)) @@ -71,7 +73,6 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { } else { h.processExecution(currentEpoch, slot) if h.indicesChanged { - h.duties.Reset(currentEpoch) h.indicesChanged = false h.processFetching(ctx, currentEpoch, slot) } @@ -79,7 +80,7 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { // last slot of epoch if uint64(slot)%h.network.Beacon.SlotsPerEpoch() == h.network.Beacon.SlotsPerEpoch()-1 { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch - 1) h.fetchFirst = true } @@ -90,7 +91,7 @@ func (h *ProposerHandler) HandleDuties(ctx context.Context) { // reset current epoch duties if reorgEvent.Current { - h.duties.Reset(currentEpoch) + h.duties.ResetEpoch(currentEpoch) h.fetchFirst = true } @@ -116,36 +117,46 @@ func (h *ProposerHandler) processFetching(ctx context.Context, epoch phase0.Epoc } func (h *ProposerHandler) processExecution(epoch phase0.Epoch, slot phase0.Slot) { + duties := h.duties.CommitteeSlotDuties(epoch, slot) + if duties == nil { + return + } + // range over duties and execute - if slotMap, ok := h.duties.m[epoch]; ok { - if duties, ok := slotMap[slot]; ok { - toExecute := make([]*spectypes.Duty, 0, len(duties)) - for _, d := range duties { - if h.shouldExecute(d) { - toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleProposer)) - } - } - h.executeDuties(h.logger, toExecute) + toExecute := make([]*spectypes.Duty, 0, len(duties)) + for _, d := range duties { + if h.shouldExecute(d) { + toExecute = append(toExecute, h.toSpecDuty(d, spectypes.BNRoleProposer)) } } + h.executeDuties(h.logger, toExecute) } func (h *ProposerHandler) fetchAndProcessDuties(ctx context.Context, epoch phase0.Epoch) error { start := time.Now() - indices := h.validatorController.ActiveValidatorIndices(epoch) - if len(indices) == 0 { + allIndices := h.validatorController.AllActiveIndices(epoch) + if len(allIndices) == 0 { return nil } - duties, err := h.beaconNode.ProposerDuties(ctx, epoch, indices) + inCommitteeIndices := h.validatorController.CommitteeActiveIndices(epoch) + inCommitteeIndicesSet := map[phase0.ValidatorIndex]struct{}{} + for _, idx := range inCommitteeIndices { + inCommitteeIndicesSet[idx] = struct{}{} + } + + duties, err := h.beaconNode.ProposerDuties(ctx, epoch, allIndices) if err != nil { return fmt.Errorf("failed to fetch proposer duties: %w", err) } + h.duties.ResetEpoch(epoch) + specDuties := make([]*spectypes.Duty, 0, len(duties)) for _, d := range duties { - h.duties.Add(epoch, d.Slot, d) + _, inCommitteeDuty := inCommitteeIndicesSet[d.ValidatorIndex] + h.duties.Add(epoch, d.Slot, d.ValidatorIndex, d, inCommitteeDuty) specDuties = append(specDuties, h.toSpecDuty(d, spectypes.BNRoleProposer)) } @@ -174,8 +185,7 @@ func (h *ProposerHandler) shouldExecute(duty *eth2apiv1.ProposerDuty) bool { return true } if currentSlot+1 == duty.Slot { - h.logger.Debug("current slot and duty slot are not aligned, "+ - "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", duty.String())) + h.warnMisalignedSlotAndDuty(duty.String()) return true } return false diff --git a/operator/duties/proposer_test.go b/operator/duties/proposer_test.go index 8df730b6d3..56860c3c0e 100644 --- a/operator/duties/proposer_test.go +++ b/operator/duties/proposer_test.go @@ -4,48 +4,50 @@ import ( "context" "testing" - v1 "github.com/attestantio/go-eth2-client/api/v1" + eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/cornelk/hashmap" "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/duties/mocks" ) -func setupProposerDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*v1.ProposerDuty]) (chan struct{}, chan []*spectypes.Duty) { +func setupProposerDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[phase0.Epoch, []*eth2apiv1.ProposerDuty]) (chan struct{}, chan []*spectypes.Duty) { fetchDutiesCall := make(chan struct{}) executeDutiesCall := make(chan []*spectypes.Duty) s.beaconNode.(*mocks.MockBeaconNode).EXPECT().ProposerDuties(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.ProposerDuty, error) { + func(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*eth2apiv1.ProposerDuty, error) { fetchDutiesCall <- struct{}{} duties, _ := dutiesMap.Get(epoch) return duties, nil }).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().ActiveValidatorIndices(gomock.Any()).DoAndReturn( - func(epoch phase0.Epoch) []phase0.ValidatorIndex { - uniqueIndices := make(map[phase0.ValidatorIndex]bool) + getIndices := func(epoch phase0.Epoch) []phase0.ValidatorIndex { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) - duties, _ := dutiesMap.Get(epoch) - for _, d := range duties { - uniqueIndices[d.ValidatorIndex] = true - } + duties, _ := dutiesMap.Get(epoch) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } - indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) - for index := range uniqueIndices { - indices = append(indices, index) - } + indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) + for index := range uniqueIndices { + indices = append(indices, index) + } - return indices - }).AnyTimes() + return indices + } + s.validatorController.(*mocks.MockValidatorController).EXPECT().CommitteeActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() + s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any()).DoAndReturn(getIndices).AnyTimes() return fetchDutiesCall, executeDutiesCall } -func expectedExecutedProposerDuties(handler *ProposerHandler, duties []*v1.ProposerDuty) []*spectypes.Duty { +func expectedExecutedProposerDuties(handler *ProposerHandler, duties []*eth2apiv1.ProposerDuty) []*spectypes.Duty { expectedDuties := make([]*spectypes.Duty, 0) for _, d := range duties { expectedDuties = append(expectedDuties, handler.toSpecDuty(d, spectypes.BNRoleProposer)) @@ -55,15 +57,15 @@ func expectedExecutedProposerDuties(handler *ProposerHandler, duties []*v1.Propo func TestScheduler_Proposer_Same_Slot(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(0), @@ -87,15 +89,15 @@ func TestScheduler_Proposer_Same_Slot(t *testing.T) { func TestScheduler_Proposer_Diff_Slots(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -129,9 +131,9 @@ func TestScheduler_Proposer_Diff_Slots(t *testing.T) { // execute duty after two slots after the indices changed func TestScheduler_Proposer_Indices_Changed(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) @@ -148,7 +150,7 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { // STEP 3: trigger a change in active indices scheduler.indicesChg <- struct{}{} - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(1), @@ -178,7 +180,7 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { // STEP 4: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(3)) duties, _ := dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[2]}) + expected := expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[2]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -191,15 +193,15 @@ func TestScheduler_Proposer_Indices_Changed(t *testing.T) { func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(0)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(0), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(2), @@ -215,7 +217,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { scheduler.indicesChg <- struct{}{} waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) duties, _ := dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.ProposerDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(3), ValidatorIndex: phase0.ValidatorIndex(2), @@ -225,7 +227,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { scheduler.indicesChg <- struct{}{} waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - dutiesMap.Set(phase0.Epoch(0), append(duties, &v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(0), append(duties, ð2apiv1.ProposerDuty{ PubKey: phase0.BLSPubKey{1, 2, 5}, Slot: phase0.Slot(4), ValidatorIndex: phase0.ValidatorIndex(3), @@ -239,7 +241,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 5: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(2)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected := expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[0]}) + expected := expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[0]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -248,7 +250,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 6: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(3)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected = expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[1]}) + expected = expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[1]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -257,7 +259,7 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // STEP 7: wait for proposer duties to be executed currentSlot.SetSlot(phase0.Slot(4)) duties, _ = dutiesMap.Get(phase0.Epoch(0)) - expected = expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[2]}) + expected = expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[2]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -271,15 +273,15 @@ func TestScheduler_Proposer_Multiple_Indices_Changed_Same_Slot(t *testing.T) { // reorg current dependent root changed func TestScheduler_Proposer_Reorg_Current(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(34)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -292,8 +294,8 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -307,13 +309,13 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(37), @@ -346,15 +348,15 @@ func TestScheduler_Proposer_Reorg_Current(t *testing.T) { // reorg current dependent root changed func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { var ( - handler = NewProposerHandler() + handler = NewProposerHandler(dutystore.NewDuties[eth2apiv1.ProposerDuty]()) currentSlot = &SlotValue{} - dutiesMap = hashmap.New[phase0.Epoch, []*v1.ProposerDuty]() + dutiesMap = hashmap.New[phase0.Epoch, []*eth2apiv1.ProposerDuty]() ) currentSlot.SetSlot(phase0.Slot(34)) scheduler, logger, ticker, timeout, cancel, schedulerPool := setupSchedulerAndMocks(t, handler, currentSlot) fetchDutiesCall, executeDutiesCall := setupProposerDutiesMock(scheduler, dutiesMap) - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(36), @@ -367,8 +369,8 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { waitForDutiesFetch(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 2: trigger head event - e := &v1.Event{ - Data: &v1.HeadEvent{ + e := ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x01}, }, @@ -382,13 +384,13 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { waitForNoAction(t, logger, fetchDutiesCall, executeDutiesCall, timeout) // STEP 4: trigger reorg - e = &v1.Event{ - Data: &v1.HeadEvent{ + e = ð2apiv1.Event{ + Data: ð2apiv1.HeadEvent{ Slot: currentSlot.GetSlot(), CurrentDutyDependentRoot: phase0.Root{0x02}, }, } - dutiesMap.Set(phase0.Epoch(1), []*v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), []*eth2apiv1.ProposerDuty{ { PubKey: phase0.BLSPubKey{1, 2, 3}, Slot: phase0.Slot(37), @@ -401,7 +403,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 5: trigger a change in active indices in the same slot scheduler.indicesChg <- struct{}{} duties, _ := dutiesMap.Get(phase0.Epoch(1)) - dutiesMap.Set(phase0.Epoch(1), append(duties, &v1.ProposerDuty{ + dutiesMap.Set(phase0.Epoch(1), append(duties, ð2apiv1.ProposerDuty{ PubKey: phase0.BLSPubKey{1, 2, 4}, Slot: phase0.Slot(38), ValidatorIndex: phase0.ValidatorIndex(2), @@ -417,7 +419,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 7: The second assigned duty should be executed currentSlot.SetSlot(phase0.Slot(37)) duties, _ = dutiesMap.Get(phase0.Epoch(1)) - expected := expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[0]}) + expected := expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[0]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) @@ -426,7 +428,7 @@ func TestScheduler_Proposer_Reorg_Current_Indices_Changed(t *testing.T) { // STEP 8: The second assigned duty should be executed currentSlot.SetSlot(phase0.Slot(38)) duties, _ = dutiesMap.Get(phase0.Epoch(1)) - expected = expectedExecutedProposerDuties(handler, []*v1.ProposerDuty{duties[1]}) + expected = expectedExecutedProposerDuties(handler, []*eth2apiv1.ProposerDuty{duties[1]}) setExecuteDutyFunc(scheduler, executeDutiesCall, len(expected)) ticker.Send(currentSlot.GetSlot()) diff --git a/operator/duties/scheduler.go b/operator/duties/scheduler.go index cb1f5861c6..0ee6979ff8 100644 --- a/operator/duties/scheduler.go +++ b/operator/duties/scheduler.go @@ -11,6 +11,8 @@ import ( eth2apiv1 "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/sourcegraph/conc/pool" "go.uber.org/zap" @@ -19,11 +21,26 @@ import ( "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/duties/dutystore" + "github.com/bloxapp/ssv/operator/slotticker" "github.com/bloxapp/ssv/protocol/v2/types" ) //go:generate mockgen -package=mocks -destination=./mocks/scheduler.go -source=./scheduler.go +var slotDelayHistogram = promauto.NewHistogram(prometheus.HistogramOpts{ + Name: "slot_ticker_delay_milliseconds", + Help: "The delay in milliseconds of the slot ticker", + Buckets: []float64{5, 10, 20, 100, 500, 5000}, // Buckets in milliseconds. Adjust as per your needs. +}) + +func init() { + logger := zap.L() + if err := prometheus.Register(slotDelayHistogram); err != nil { + logger.Debug("could not register prometheus collector") + } +} + const ( // blockPropagationDelay time to propagate around the nodes // before kicking off duties for the block's slot. @@ -31,7 +48,8 @@ const ( ) type SlotTicker interface { - Subscribe(subscription chan phase0.Slot) event.Subscription + Next() <-chan time.Time + Slot() phase0.Slot } type BeaconNode interface { @@ -45,7 +63,8 @@ type BeaconNode interface { // ValidatorController represents the component that controls validators via the scheduler type ValidatorController interface { - ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex GetOperatorShares() []*types.SSVShare } @@ -58,15 +77,16 @@ type SchedulerOptions struct { ValidatorController ValidatorController ExecuteDuty ExecuteDutyFunc IndicesChg chan struct{} - Ticker SlotTicker + SlotTickerProvider slotticker.Provider BuilderProposals bool + DutyStore *dutystore.Store } type Scheduler struct { beaconNode BeaconNode network networkconfig.NetworkConfig validatorController ValidatorController - slotTicker SlotTicker + slotTickerProvider slotticker.Provider executeDuty ExecuteDutyFunc builderProposals bool @@ -75,7 +95,7 @@ type Scheduler struct { reorg chan ReorgEvent indicesChg chan struct{} - ticker chan phase0.Slot + ticker slotticker.SlotTicker waitCond *sync.Cond pool *pool.ContextPool @@ -86,10 +106,15 @@ type Scheduler struct { } func NewScheduler(opts *SchedulerOptions) *Scheduler { + dutyStore := opts.DutyStore + if dutyStore == nil { + dutyStore = dutystore.New() + } + s := &Scheduler{ beaconNode: opts.BeaconNode, network: opts.Network, - slotTicker: opts.Ticker, + slotTickerProvider: opts.SlotTickerProvider, executeDuty: opts.ExecuteDuty, validatorController: opts.ValidatorController, builderProposals: opts.BuilderProposals, @@ -97,12 +122,12 @@ func NewScheduler(opts *SchedulerOptions) *Scheduler { blockPropagateDelay: blockPropagationDelay, handlers: []dutyHandler{ - NewAttesterHandler(), - NewProposerHandler(), - NewSyncCommitteeHandler(), + NewAttesterHandler(dutyStore.Attester), + NewProposerHandler(dutyStore.Proposer), + NewSyncCommitteeHandler(dutyStore.SyncCommittee), }, - ticker: make(chan phase0.Slot), + ticker: opts.SlotTickerProvider(), reorg: make(chan ReorgEvent), waitCond: sync.NewCond(&sync.Mutex{}), } @@ -135,8 +160,6 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { for _, handler := range s.handlers { handler := handler - slotTicker := make(chan phase0.Slot) - s.slotTicker.Subscribe(slotTicker) indicesChangeCh := make(chan struct{}) indicesChangeFeed.Subscribe(indicesChangeCh) @@ -150,7 +173,7 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { s.network, s.validatorController, s.ExecuteDuties, - slotTicker, + s.slotTickerProvider, reorgCh, indicesChangeCh, ) @@ -162,7 +185,6 @@ func (s *Scheduler) Start(ctx context.Context, logger *zap.Logger) error { }) } - s.slotTicker.Subscribe(s.ticker) go s.SlotTicker(ctx) go indicesChangeFeed.FanOut(ctx, s.indicesChg) @@ -214,7 +236,9 @@ func (s *Scheduler) SlotTicker(ctx context.Context) { select { case <-ctx.Done(): return - case slot := <-s.ticker: + case <-s.ticker.Next(): + slot := s.ticker.Slot() + delay := s.network.SlotDurationSec() / time.Duration(goclient.IntervalsPerSlot) /* a third of the slot duration */ finalTime := s.network.Beacon.GetSlotStartTime(slot).Add(delay) waitDuration := time.Until(finalTime) @@ -322,6 +346,11 @@ func (s *Scheduler) ExecuteDuties(logger *zap.Logger, duties []*spectypes.Duty) for _, duty := range duties { duty := duty logger := s.loggerWithDutyContext(logger, duty) + slotDelay := time.Since(s.network.Beacon.GetSlotStartTime(duty.Slot)) + if slotDelay >= 100*time.Millisecond { + logger.Debug("⚠️ late duty execution", zap.Int64("slot_delay", slotDelay.Milliseconds())) + } + slotDelayHistogram.Observe(float64(slotDelay.Milliseconds())) go func() { if duty.Type == spectypes.BNRoleAttester || duty.Type == spectypes.BNRoleSyncCommittee { s.waitOneThirdOrValidBlock(duty.Slot) diff --git a/operator/duties/scheduler_test.go b/operator/duties/scheduler_test.go index 342ba9e0cd..3a98de7e7c 100644 --- a/operator/duties/scheduler_test.go +++ b/operator/duties/scheduler_test.go @@ -17,35 +17,80 @@ import ( "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/duties/mocks" - mockslotticker "github.com/bloxapp/ssv/operator/slot_ticker/mocks" + "github.com/bloxapp/ssv/operator/slotticker" + mockslotticker "github.com/bloxapp/ssv/operator/slotticker/mocks" mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" ) +type MockSlotTicker interface { + Next() <-chan time.Time + Slot() phase0.Slot + Subscribe() chan phase0.Slot +} + type mockSlotTicker struct { - event.Feed + slotChan chan phase0.Slot + timeChan chan time.Time + slot phase0.Slot + mu sync.Mutex +} + +func NewMockSlotTicker() MockSlotTicker { + ticker := &mockSlotTicker{ + slotChan: make(chan phase0.Slot), + timeChan: make(chan time.Time), + } + ticker.start() + return ticker +} + +func (m *mockSlotTicker) start() { + go func() { + for slot := range m.slotChan { + m.mu.Lock() + m.slot = slot + m.mu.Unlock() + m.timeChan <- time.Now() + } + }() +} + +func (m *mockSlotTicker) Next() <-chan time.Time { + return m.timeChan +} + +func (m *mockSlotTicker) Slot() phase0.Slot { + m.mu.Lock() + defer m.mu.Unlock() + return m.slot +} + +func (m *mockSlotTicker) Subscribe() chan phase0.Slot { + return m.slotChan } -func (m *mockSlotTicker) Subscribe(subscriber chan phase0.Slot) event.Subscription { - return m.Feed.Subscribe(subscriber) +type mockSlotTickerService struct { + event.Feed } func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *SlotValue) ( *Scheduler, *zap.Logger, - *mockSlotTicker, + *mockSlotTickerService, time.Duration, context.CancelFunc, *pool.ContextPool, ) { ctrl := gomock.NewController(t) - timeout := 100 * time.Millisecond + // A 200ms timeout ensures the test passes, even with mockSlotTicker overhead. + timeout := 200 * time.Millisecond ctx, cancel := context.WithCancel(context.Background()) logger := logging.TestLogger(t) mockBeaconNode := mocks.NewMockBeaconNode(ctrl) mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockTicker := &mockSlotTicker{} + mockSlotService := &mockSlotTickerService{} mockNetworkConfig := networkconfig.NetworkConfig{ Beacon: mocknetwork.NewMockBeaconNetwork(ctrl), } @@ -55,8 +100,12 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Slot BeaconNode: mockBeaconNode, Network: mockNetworkConfig, ValidatorController: mockValidatorController, - Ticker: mockTicker, - BuilderProposals: false, + SlotTickerProvider: func() slotticker.SlotTicker { + ticker := NewMockSlotTicker() + mockSlotService.Subscribe(ticker.Subscribe()) + return ticker + }, + BuilderProposals: false, } s := NewScheduler(opts) @@ -103,7 +152,7 @@ func setupSchedulerAndMocks(t *testing.T, handler dutyHandler, currentSlot *Slot return s.Wait() }) - return s, logger, mockTicker, timeout, cancel, schedulerPool + return s, logger, mockSlotService, timeout, cancel, schedulerPool } func setExecuteDutyFunc(s *Scheduler, executeDutiesCall chan []*spectypes.Duty, executeDutiesCallSize int) { @@ -199,7 +248,7 @@ func TestScheduler_Run(t *testing.T) { mockBeaconNode := mocks.NewMockBeaconNode(ctrl) mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockTicker := mockslotticker.NewMockTicker(ctrl) + mockTicker := mockslotticker.NewMockSlotTicker(ctrl) // create multiple mock duty handlers mockDutyHandler1 := NewMockdutyHandler(ctrl) mockDutyHandler2 := NewMockdutyHandler(ctrl) @@ -209,8 +258,10 @@ func TestScheduler_Run(t *testing.T) { BeaconNode: mockBeaconNode, Network: networkconfig.TestNetwork, ValidatorController: mockValidatorController, - Ticker: mockTicker, BuilderProposals: false, + SlotTickerProvider: func() slotticker.SlotTicker { + return mockTicker + }, } s := NewScheduler(opts) @@ -218,7 +269,7 @@ func TestScheduler_Run(t *testing.T) { s.handlers = []dutyHandler{mockDutyHandler1, mockDutyHandler2} mockBeaconNode.EXPECT().Events(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockTicker.EXPECT().Subscribe(gomock.Any()).Return(nil).Times(len(s.handlers) + 1) + mockTicker.EXPECT().Next().Return(nil).AnyTimes() // setup mock duty handler expectations for _, mockDutyHandler := range s.handlers { @@ -248,7 +299,7 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { mockBeaconNode := mocks.NewMockBeaconNode(ctrl) mockValidatorController := mocks.NewMockValidatorController(ctrl) - mockTicker := mockslotticker.NewMockTicker(ctrl) + mockTicker := mockslotticker.NewMockSlotTicker(ctrl) // create multiple mock duty handlers opts := &SchedulerOptions{ @@ -256,8 +307,10 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { BeaconNode: mockBeaconNode, Network: networkconfig.TestNetwork, ValidatorController: mockValidatorController, - Ticker: mockTicker, - IndicesChg: make(chan struct{}), + SlotTickerProvider: func() slotticker.SlotTicker { + return mockTicker + }, + IndicesChg: make(chan struct{}), BuilderProposals: true, } @@ -267,7 +320,7 @@ func TestScheduler_Regression_IndiciesChangeStuck(t *testing.T) { // add multiple mock duty handlers s.handlers = []dutyHandler{NewValidatorRegistrationHandler()} mockBeaconNode.EXPECT().Events(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - mockTicker.EXPECT().Subscribe(gomock.Any()).Return(nil).Times(len(s.handlers) + 1) + mockTicker.EXPECT().Next().Return(nil).AnyTimes() err := s.Start(ctx, logger) require.NoError(t, err) diff --git a/operator/duties/synccommittee.go b/operator/duties/sync_committee.go similarity index 84% rename from operator/duties/synccommittee.go rename to operator/duties/sync_committee.go index 0569d7cbfd..03c2e60037 100644 --- a/operator/duties/synccommittee.go +++ b/operator/duties/sync_committee.go @@ -12,6 +12,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/operator/duties/dutystore" ) // syncCommitteePreparationEpochs is the number of epochs ahead of the sync committee @@ -21,14 +22,14 @@ var syncCommitteePreparationEpochs = uint64(2) type SyncCommitteeHandler struct { baseHandler - duties *SyncCommitteeDuties + duties *dutystore.SyncCommitteeDuties fetchCurrentPeriod bool fetchNextPeriod bool } -func NewSyncCommitteeHandler() *SyncCommitteeHandler { +func NewSyncCommitteeHandler(duties *dutystore.SyncCommitteeDuties) *SyncCommitteeHandler { h := &SyncCommitteeHandler{ - duties: NewSyncCommitteeDuties(), + duties: duties, } h.fetchCurrentPeriod = true h.fetchFirst = true @@ -39,27 +40,6 @@ func (h *SyncCommitteeHandler) Name() string { return spectypes.BNRoleSyncCommittee.String() } -type SyncCommitteeDuties struct { - m map[uint64][]*eth2apiv1.SyncCommitteeDuty -} - -func NewSyncCommitteeDuties() *SyncCommitteeDuties { - return &SyncCommitteeDuties{ - m: make(map[uint64][]*eth2apiv1.SyncCommitteeDuty), - } -} - -func (d *SyncCommitteeDuties) Add(period uint64, duty *eth2apiv1.SyncCommitteeDuty) { - if _, ok := d.m[period]; !ok { - d.m[period] = []*eth2apiv1.SyncCommitteeDuty{} - } - d.m[period] = append(d.m[period], duty) -} - -func (d *SyncCommitteeDuties) Reset(period uint64) { - delete(d.m, period) -} - // HandleDuties manages the duty lifecycle, handling different cases: // // On First Run: @@ -73,7 +53,7 @@ func (d *SyncCommitteeDuties) Reset(period uint64) { // // On Indices Change: // 1. Execute duties. -// 2. Reset duties for the current period. +// 2. ResetEpoch duties for the current period. // 3. Fetch duties for the current period. // 4. If necessary, fetch duties for the next period. // @@ -92,7 +72,8 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() epoch := h.network.Beacon.EstimatedEpochAtSlot(slot) period := h.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) buildStr := fmt.Sprintf("p%v-%v-s%v-#%v", period, epoch, slot, slot%32+1) @@ -100,15 +81,10 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { if h.fetchFirst { h.fetchFirst = false - h.indicesChanged = false h.processFetching(ctx, period, slot) h.processExecution(period, slot) } else { h.processExecution(period, slot) - if h.indicesChanged { - h.duties.Reset(period) - h.indicesChanged = false - } h.processFetching(ctx, period, slot) } @@ -123,7 +99,7 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { // last slot of period if slot == h.network.Beacon.LastSlotOfSyncPeriod(period) { - h.duties.Reset(period) + h.duties.Reset(period - 1) } case reorgEvent := <-h.reorg: @@ -146,12 +122,10 @@ func (h *SyncCommitteeHandler) HandleDuties(ctx context.Context) { buildStr := fmt.Sprintf("p%v-e%v-s%v-#%v", period, epoch, slot, slot%32+1) h.logger.Info("🔁 indices change received", zap.String("period_epoch_slot_seq", buildStr)) - h.indicesChanged = true h.fetchCurrentPeriod = true // reset next period duties if in appropriate slot range if h.shouldFetchNextPeriod(slot) { - h.duties.Reset(period + 1) h.fetchNextPeriod = true } } @@ -181,16 +155,19 @@ func (h *SyncCommitteeHandler) processFetching(ctx context.Context, period uint6 func (h *SyncCommitteeHandler) processExecution(period uint64, slot phase0.Slot) { // range over duties and execute - if duties, ok := h.duties.m[period]; ok { - toExecute := make([]*spectypes.Duty, 0, len(duties)*2) - for _, d := range duties { - if h.shouldExecute(d, slot) { - toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommittee)) - toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommitteeContribution)) - } + duties := h.duties.CommitteePeriodDuties(period) + if duties == nil { + return + } + + toExecute := make([]*spectypes.Duty, 0, len(duties)*2) + for _, d := range duties { + if h.shouldExecute(d, slot) { + toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommittee)) + toExecute = append(toExecute, h.toSpecDuty(d, slot, spectypes.BNRoleSyncCommitteeContribution)) } - h.executeDuties(h.logger, toExecute) } + h.executeDuties(h.logger, toExecute) } func (h *SyncCommitteeHandler) fetchAndProcessDuties(ctx context.Context, period uint64) error { @@ -202,19 +179,26 @@ func (h *SyncCommitteeHandler) fetchAndProcessDuties(ctx context.Context, period } lastEpoch := h.network.Beacon.FirstEpochOfSyncPeriod(period+1) - 1 - indices := h.validatorController.ActiveValidatorIndices(firstEpoch) - - if len(indices) == 0 { + allActiveIndices := h.validatorController.AllActiveIndices(firstEpoch) + if len(allActiveIndices) == 0 { return nil } - duties, err := h.beaconNode.SyncCommitteeDuties(ctx, firstEpoch, indices) + inCommitteeIndices := h.validatorController.CommitteeActiveIndices(firstEpoch) + inCommitteeIndicesSet := map[phase0.ValidatorIndex]struct{}{} + for _, idx := range inCommitteeIndices { + inCommitteeIndicesSet[idx] = struct{}{} + } + + duties, err := h.beaconNode.SyncCommitteeDuties(ctx, firstEpoch, allActiveIndices) if err != nil { return fmt.Errorf("failed to fetch sync committee duties: %w", err) } + h.duties.Reset(period) for _, d := range duties { - h.duties.Add(period, d) + _, inCommitteeDuty := inCommitteeIndicesSet[d.ValidatorIndex] + h.duties.Add(period, d.ValidatorIndex, d, inCommitteeDuty) } h.prepareDutiesResultLog(period, duties, start) @@ -276,8 +260,7 @@ func (h *SyncCommitteeHandler) shouldExecute(duty *eth2apiv1.SyncCommitteeDuty, return true } if currentSlot+1 == slot { - h.logger.Debug("current slot and duty slot are not aligned, "+ - "assuming diff caused by a time drift - ignoring and executing duty", zap.String("type", duty.String())) + h.warnMisalignedSlotAndDuty(duty.String()) return true } return false diff --git a/operator/duties/synccommittee_test.go b/operator/duties/sync_committee_test.go similarity index 94% rename from operator/duties/synccommittee_test.go rename to operator/duties/sync_committee_test.go index 774cc2c2a5..b2ec6d5d8b 100644 --- a/operator/duties/synccommittee_test.go +++ b/operator/duties/sync_committee_test.go @@ -12,6 +12,7 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/duties/mocks" mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" ) @@ -55,23 +56,24 @@ func setupSyncCommitteeDutiesMock(s *Scheduler, dutiesMap *hashmap.Map[uint64, [ return duties, nil }).AnyTimes() - s.validatorController.(*mocks.MockValidatorController).EXPECT().ActiveValidatorIndices(gomock.Any()).DoAndReturn( - func(epoch phase0.Epoch) []phase0.ValidatorIndex { - uniqueIndices := make(map[phase0.ValidatorIndex]bool) + getDuties := func(epoch phase0.Epoch) []phase0.ValidatorIndex { + uniqueIndices := make(map[phase0.ValidatorIndex]bool) - period := s.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) - duties, _ := dutiesMap.Get(period) - for _, d := range duties { - uniqueIndices[d.ValidatorIndex] = true - } + period := s.network.Beacon.EstimatedSyncCommitteePeriodAtEpoch(epoch) + duties, _ := dutiesMap.Get(period) + for _, d := range duties { + uniqueIndices[d.ValidatorIndex] = true + } - indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) - for index := range uniqueIndices { - indices = append(indices, index) - } + indices := make([]phase0.ValidatorIndex, 0, len(uniqueIndices)) + for index := range uniqueIndices { + indices = append(indices, index) + } - return indices - }).AnyTimes() + return indices + } + s.validatorController.(*mocks.MockValidatorController).EXPECT().CommitteeActiveIndices(gomock.Any()).DoAndReturn(getDuties).AnyTimes() + s.validatorController.(*mocks.MockValidatorController).EXPECT().AllActiveIndices(gomock.Any()).DoAndReturn(getDuties).AnyTimes() s.beaconNode.(*mocks.MockBeaconNode).EXPECT().SubmitSyncCommitteeSubscriptions(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() @@ -89,7 +91,7 @@ func expectedExecutedSyncCommitteeDuties(handler *SyncCommitteeHandler, duties [ func TestScheduler_SyncCommittee_Same_Period(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -148,7 +150,7 @@ func TestScheduler_SyncCommittee_Same_Period(t *testing.T) { func TestScheduler_SyncCommittee_Current_Next_Periods(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -215,7 +217,7 @@ func TestScheduler_SyncCommittee_Current_Next_Periods(t *testing.T) { func TestScheduler_SyncCommittee_Indices_Changed(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -269,7 +271,7 @@ func TestScheduler_SyncCommittee_Indices_Changed(t *testing.T) { func TestScheduler_SyncCommittee_Multiple_Indices_Changed_Same_Slot(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -327,7 +329,7 @@ func TestScheduler_SyncCommittee_Multiple_Indices_Changed_Same_Slot(t *testing.T // reorg current dependent root changed func TestScheduler_SyncCommittee_Reorg_Current(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -399,7 +401,7 @@ func TestScheduler_SyncCommittee_Reorg_Current(t *testing.T) { // reorg current dependent root changed including indices change in the same slot func TestScheduler_SyncCommittee_Reorg_Current_Indices_Changed(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) @@ -479,7 +481,7 @@ func TestScheduler_SyncCommittee_Reorg_Current_Indices_Changed(t *testing.T) { func TestScheduler_SyncCommittee_Early_Block(t *testing.T) { var ( - handler = NewSyncCommitteeHandler() + handler = NewSyncCommitteeHandler(dutystore.NewSyncCommitteeDuties()) currentSlot = &SlotValue{} dutiesMap = hashmap.New[uint64, []*v1.SyncCommitteeDuty]() ) diff --git a/operator/duties/validatorregistration.go b/operator/duties/validatorregistration.go index 2ac3a49ea3..e8b6b79210 100644 --- a/operator/duties/validatorregistration.go +++ b/operator/duties/validatorregistration.go @@ -6,22 +6,16 @@ import ( "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" "go.uber.org/zap" - - "github.com/bloxapp/ssv/logging/fields" ) const validatorRegistrationEpochInterval = uint64(10) type ValidatorRegistrationHandler struct { baseHandler - - validatorsPassedFirstRegistration map[string]struct{} } func NewValidatorRegistrationHandler() *ValidatorRegistrationHandler { - return &ValidatorRegistrationHandler{ - validatorsPassedFirstRegistration: map[string]struct{}{}, - } + return &ValidatorRegistrationHandler{} } func (h *ValidatorRegistrationHandler) Name() string { @@ -36,21 +30,19 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { case <-ctx.Done(): return - case slot := <-h.ticker: + case <-h.ticker.Next(): + slot := h.ticker.Slot() shares := h.validatorController.GetOperatorShares() - sent := 0 + validators := []phase0.ValidatorIndex{} for _, share := range shares { - if !share.HasBeaconMetadata() { + if !share.HasBeaconMetadata() || !share.BeaconMetadata.IsAttesting() { continue } // if not passed first registration, should be registered within one epoch time in a corresponding slot // if passed first registration, should be registered within validatorRegistrationEpochInterval epochs time in a corresponding slot - registrationSlotInterval := h.network.SlotsPerEpoch() - if _, ok := h.validatorsPassedFirstRegistration[string(share.ValidatorPubKey)]; ok { - registrationSlotInterval *= validatorRegistrationEpochInterval - } + registrationSlotInterval := h.network.SlotsPerEpoch() * validatorRegistrationEpochInterval if uint64(share.BeaconMetadata.Index)%registrationSlotInterval != uint64(slot)%registrationSlotInterval { continue @@ -66,10 +58,11 @@ func (h *ValidatorRegistrationHandler) HandleDuties(ctx context.Context) { // no need for other params }}) - sent++ - h.validatorsPassedFirstRegistration[string(share.ValidatorPubKey)] = struct{}{} + validators = append(validators, share.BeaconMetadata.Index) } - h.logger.Debug("validator registration duties sent", zap.Uint64("slot", uint64(slot)), fields.Count(sent)) + h.logger.Debug("validator registration duties sent", + zap.Uint64("slot", uint64(slot)), + zap.Any("validators", validators)) case <-h.indicesChange: continue diff --git a/operator/fee_recipient/controller.go b/operator/fee_recipient/controller.go index 477b40eed1..d44f20caca 100644 --- a/operator/fee_recipient/controller.go +++ b/operator/fee_recipient/controller.go @@ -10,7 +10,7 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/networkconfig" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/types" "github.com/bloxapp/ssv/registry/storage" @@ -25,42 +25,40 @@ type RecipientController interface { // ControllerOptions holds the needed dependencies type ControllerOptions struct { - Ctx context.Context - BeaconClient beaconprotocol.BeaconNode - Network networkconfig.NetworkConfig - ShareStorage storage.Shares - RecipientStorage storage.Recipients - Ticker slot_ticker.Ticker - OperatorData *storage.OperatorData + Ctx context.Context + BeaconClient beaconprotocol.BeaconNode + Network networkconfig.NetworkConfig + ShareStorage storage.Shares + RecipientStorage storage.Recipients + SlotTickerProvider slotticker.Provider + OperatorData *storage.OperatorData } // recipientController implementation of RecipientController type recipientController struct { - ctx context.Context - beaconClient beaconprotocol.BeaconNode - network networkconfig.NetworkConfig - shareStorage storage.Shares - recipientStorage storage.Recipients - ticker slot_ticker.Ticker - operatorData *storage.OperatorData + ctx context.Context + beaconClient beaconprotocol.BeaconNode + network networkconfig.NetworkConfig + shareStorage storage.Shares + recipientStorage storage.Recipients + slotTickerProvider slotticker.Provider + operatorData *storage.OperatorData } func NewController(opts *ControllerOptions) *recipientController { return &recipientController{ - ctx: opts.Ctx, - beaconClient: opts.BeaconClient, - network: opts.Network, - shareStorage: opts.ShareStorage, - recipientStorage: opts.RecipientStorage, - ticker: opts.Ticker, - operatorData: opts.OperatorData, + ctx: opts.Ctx, + beaconClient: opts.BeaconClient, + network: opts.Network, + shareStorage: opts.ShareStorage, + recipientStorage: opts.RecipientStorage, + slotTickerProvider: opts.SlotTickerProvider, + operatorData: opts.OperatorData, } } func (rc *recipientController) Start(logger *zap.Logger) { - tickerChan := make(chan phase0.Slot, 32) - rc.ticker.Subscribe(tickerChan) - rc.listenToTicker(logger, tickerChan) + rc.listenToTicker(logger) } // listenToTicker loop over the given slot channel @@ -68,16 +66,19 @@ func (rc *recipientController) Start(logger *zap.Logger) { // in addition, submitting "same data" every slot is not efficient and can overload beacon node // instead we can subscribe to beacon node events and submit only when there is // a new fee recipient event (or new validator) was handled or when there is a syncing issue with beacon node -func (rc *recipientController) listenToTicker(logger *zap.Logger, slots chan phase0.Slot) { +func (rc *recipientController) listenToTicker(logger *zap.Logger) { firstTimeSubmitted := false - for currentSlot := range slots { + ticker := rc.slotTickerProvider() + for { + <-ticker.Next() + slot := ticker.Slot() // submit if first time or if first slot in epoch - if firstTimeSubmitted && uint64(currentSlot)%rc.network.SlotsPerEpoch() != (rc.network.SlotsPerEpoch()/2) { + if firstTimeSubmitted && uint64(slot)%rc.network.SlotsPerEpoch() != (rc.network.SlotsPerEpoch()/2) { continue } firstTimeSubmitted = true - err := rc.prepareAndSubmit(logger, currentSlot) + err := rc.prepareAndSubmit(logger, slot) if err != nil { logger.Warn("could not submit proposal preparations", zap.Error(err)) } diff --git a/operator/fee_recipient/controller_test.go b/operator/fee_recipient/controller_test.go index 02bf4144dd..6e1718afd6 100644 --- a/operator/fee_recipient/controller_test.go +++ b/operator/fee_recipient/controller_test.go @@ -13,13 +13,13 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/golang/mock/gomock" "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/v4/async/event" "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/networkconfig" - "github.com/bloxapp/ssv/operator/slot_ticker/mocks" + "github.com/bloxapp/ssv/operator/slotticker" + "github.com/bloxapp/ssv/operator/slotticker/mocks" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/types" registrystorage "github.com/bloxapp/ssv/registry/storage" @@ -52,32 +52,47 @@ func TestSubmitProposal(t *testing.T) { t.Run("submit first time or halfway through epoch", func(t *testing.T) { numberOfRequests := 4 var wg sync.WaitGroup + wg.Add(numberOfRequests) // Set up the wait group before starting goroutines + client := beacon.NewMockBeaconNode(ctrl) client.EXPECT().SubmitProposalPreparation(gomock.Any()).DoAndReturn(func(feeRecipients map[phase0.ValidatorIndex]bellatrix.ExecutionAddress) error { wg.Done() return nil - }).MinTimes(numberOfRequests).MaxTimes(numberOfRequests) // call first time and on the halfway through epoch. each time should be 2 request as we have two batches + }).Times(numberOfRequests) - ticker := mocks.NewMockTicker(ctrl) - ticker.EXPECT().Subscribe(gomock.Any()).DoAndReturn(func(subscription chan phase0.Slot) event.Subscription { - subscription <- 1 // first time - time.Sleep(time.Millisecond * 500) - subscription <- 2 // should not call submit - time.Sleep(time.Millisecond * 500) - subscription <- 20 // should not call submit - time.Sleep(time.Millisecond * 500) - subscription <- phase0.Slot(network.SlotsPerEpoch()) / 2 // halfway through epoch - time.Sleep(time.Millisecond * 500) - subscription <- 63 // should not call submit - return nil - }) + ticker := mocks.NewMockSlotTicker(ctrl) + mockTimeChan := make(chan time.Time) + mockSlotChan := make(chan phase0.Slot) + ticker.EXPECT().Next().Return(mockTimeChan).AnyTimes() + ticker.EXPECT().Slot().DoAndReturn(func() phase0.Slot { + return <-mockSlotChan + }).AnyTimes() frCtrl.beaconClient = client - frCtrl.ticker = ticker + frCtrl.slotTickerProvider = func() slotticker.SlotTicker { + return ticker + } go frCtrl.Start(logger) - wg.Add(numberOfRequests) + + slots := []phase0.Slot{ + 1, // first time + 2, // should not call submit + 20, // should not call submit + phase0.Slot(network.SlotsPerEpoch()) / 2, // halfway through epoch + 63, // should not call submit + } + + for _, s := range slots { + mockTimeChan <- time.Now() + mockSlotChan <- s + time.Sleep(time.Millisecond * 500) + } + wg.Wait() + + close(mockTimeChan) // Close the channel after test + close(mockSlotChan) }) t.Run("error handling", func(t *testing.T) { @@ -88,18 +103,21 @@ func TestSubmitProposal(t *testing.T) { return errors.New("failed to submit") }).MinTimes(2).MaxTimes(2) - ticker := mocks.NewMockTicker(ctrl) - ticker.EXPECT().Subscribe(gomock.Any()).DoAndReturn(func(subscription chan phase0.Slot) event.Subscription { - subscription <- 100 // first time - return nil - }) + ticker := mocks.NewMockSlotTicker(ctrl) + mockTimeChan := make(chan time.Time, 1) + ticker.EXPECT().Next().Return(mockTimeChan).AnyTimes() + ticker.EXPECT().Slot().Return(phase0.Slot(100)).AnyTimes() frCtrl.beaconClient = client - frCtrl.ticker = ticker + frCtrl.slotTickerProvider = func() slotticker.SlotTicker { + return ticker + } go frCtrl.Start(logger) + mockTimeChan <- time.Now() wg.Add(2) wg.Wait() + close(mockTimeChan) }) } diff --git a/operator/node.go b/operator/node.go index 3dc3589349..746f2ae494 100644 --- a/operator/node.go +++ b/operator/node.go @@ -15,8 +15,9 @@ import ( "github.com/bloxapp/ssv/network" "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/operator/duties" + "github.com/bloxapp/ssv/operator/duties/dutystore" "github.com/bloxapp/ssv/operator/fee_recipient" - "github.com/bloxapp/ssv/operator/slot_ticker" + "github.com/bloxapp/ssv/operator/slotticker" "github.com/bloxapp/ssv/operator/storage" "github.com/bloxapp/ssv/operator/validator" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" @@ -40,18 +41,16 @@ type Options struct { DB basedb.Database ValidatorController validator.Controller ValidatorOptions validator.ControllerOptions `yaml:"ValidatorOptions"` - - WS api.WebSocketServer - WsAPIPort int - - Metrics nodeMetrics + DutyStore *dutystore.Store + WS api.WebSocketServer + WsAPIPort int + Metrics nodeMetrics } // operatorNode implements Node interface type operatorNode struct { network networkconfig.NetworkConfig context context.Context - ticker slot_ticker.Ticker validatorsCtrl validator.Controller consensusClient beaconprotocol.BeaconNode executionClient *executionclient.ExecutionClient @@ -68,7 +67,7 @@ type operatorNode struct { } // New is the constructor of operatorNode -func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { +func New(logger *zap.Logger, opts Options, slotTickerProvider slotticker.Provider) Node { storageMap := qbftstorage.NewStores() roles := []spectypes.BeaconRole{ @@ -85,7 +84,6 @@ func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { node := &operatorNode{ context: opts.Context, - ticker: slotTicker, validatorsCtrl: opts.ValidatorController, network: opts.Network, consensusClient: opts.BeaconNode, @@ -100,17 +98,18 @@ func New(logger *zap.Logger, opts Options, slotTicker slot_ticker.Ticker) Node { ValidatorController: opts.ValidatorController, IndicesChg: opts.ValidatorController.IndicesChangeChan(), ExecuteDuty: opts.ValidatorController.ExecuteDuty, - Ticker: slotTicker, BuilderProposals: opts.ValidatorOptions.BuilderProposals, + DutyStore: opts.DutyStore, + SlotTickerProvider: slotTickerProvider, }), feeRecipientCtrl: fee_recipient.NewController(&fee_recipient.ControllerOptions{ - Ctx: opts.Context, - BeaconClient: opts.BeaconNode, - Network: opts.Network, - ShareStorage: opts.ValidatorOptions.RegistryStorage.Shares(), - RecipientStorage: opts.ValidatorOptions.RegistryStorage, - Ticker: slotTicker, - OperatorData: opts.ValidatorOptions.OperatorData, + Ctx: opts.Context, + BeaconClient: opts.BeaconNode, + Network: opts.Network, + ShareStorage: opts.ValidatorOptions.RegistryStorage.Shares(), + RecipientStorage: opts.ValidatorOptions.RegistryStorage, + OperatorData: opts.ValidatorOptions.OperatorData, + SlotTickerProvider: slotTickerProvider, }), ws: opts.WS, @@ -140,7 +139,6 @@ func (n *operatorNode) Start(logger *zap.Logger) error { } }() - go n.ticker.Start(logger) n.validatorsCtrl.StartNetworkHandlers() n.validatorsCtrl.StartValidators() go n.net.UpdateSubnets(logger) diff --git a/operator/slot_ticker/mocks/ticker.go b/operator/slot_ticker/mocks/ticker.go deleted file mode 100644 index 2ed11c9fb9..0000000000 --- a/operator/slot_ticker/mocks/ticker.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: ./ticker.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - - phase0 "github.com/attestantio/go-eth2-client/spec/phase0" - gomock "github.com/golang/mock/gomock" - event "github.com/prysmaticlabs/prysm/v4/async/event" - zap "go.uber.org/zap" -) - -// MockTicker is a mock of Ticker interface. -type MockTicker struct { - ctrl *gomock.Controller - recorder *MockTickerMockRecorder -} - -// MockTickerMockRecorder is the mock recorder for MockTicker. -type MockTickerMockRecorder struct { - mock *MockTicker -} - -// NewMockTicker creates a new mock instance. -func NewMockTicker(ctrl *gomock.Controller) *MockTicker { - mock := &MockTicker{ctrl: ctrl} - mock.recorder = &MockTickerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockTicker) EXPECT() *MockTickerMockRecorder { - return m.recorder -} - -// Start mocks base method. -func (m *MockTicker) Start(logger *zap.Logger) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Start", logger) -} - -// Start indicates an expected call of Start. -func (mr *MockTickerMockRecorder) Start(logger interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockTicker)(nil).Start), logger) -} - -// Subscribe mocks base method. -func (m *MockTicker) Subscribe(subscription chan phase0.Slot) event.Subscription { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Subscribe", subscription) - ret0, _ := ret[0].(event.Subscription) - return ret0 -} - -// Subscribe indicates an expected call of Subscribe. -func (mr *MockTickerMockRecorder) Subscribe(subscription interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Subscribe", reflect.TypeOf((*MockTicker)(nil).Subscribe), subscription) -} diff --git a/operator/slot_ticker/slotticker.go b/operator/slot_ticker/slotticker.go deleted file mode 100644 index dbb1fc033e..0000000000 --- a/operator/slot_ticker/slotticker.go +++ /dev/null @@ -1,88 +0,0 @@ -package slot_ticker - -import ( - "time" - - "github.com/attestantio/go-eth2-client/spec/phase0" -) - -// The TTicker interface defines a type which can expose a -// receive-only channel firing slot events. -type TTicker interface { - C() <-chan phase0.Slot - Done() -} - -// SlotTicker is a special ticker for the beacon chain block. -// The channel emits over the slot interval, and ensures that -// the ticks are in line with the genesis time. This means that -// the duration between the ticks and the genesis time are always a -// multiple of the slot duration. -// In addition, the channel returns the new slot number. -type SlotTicker struct { - c chan phase0.Slot - done chan struct{} -} - -// C returns the ticker channel. Call Cancel afterwards to ensure -// that the goroutine exits cleanly. -func (s *SlotTicker) C() <-chan phase0.Slot { - return s.c -} - -// Done should be called to clean up the ticker. -func (s *SlotTicker) Done() { - go func() { - s.done <- struct{}{} - }() -} - -// NewSlotTicker starts and returns a new SlotTicker instance. -func NewSlotTicker(genesisTime time.Time, secondsPerSlot uint64) *SlotTicker { - if genesisTime.IsZero() { - panic("zero genesis time") - } - ticker := &SlotTicker{ - c: make(chan phase0.Slot), - done: make(chan struct{}), - } - ticker.start(genesisTime, secondsPerSlot, time.Since, time.Until, time.After) - return ticker -} - -func (s *SlotTicker) start( - genesisTime time.Time, - secondsPerSlot uint64, - since, until func(time.Time) time.Duration, - after func(time.Duration) <-chan time.Time) { - - d := time.Duration(secondsPerSlot) * time.Second - - go func() { - sinceGenesis := since(genesisTime) - - var nextTickTime time.Time - var slot phase0.Slot - if sinceGenesis < d { - // Handle when the current time is before the genesis time. - nextTickTime = genesisTime - slot = 0 - } else { - nextTick := sinceGenesis.Truncate(d) + d - nextTickTime = genesisTime.Add(nextTick) - slot = phase0.Slot(nextTick / d) - } - - for { - waitTime := until(nextTickTime) - select { - case <-after(waitTime): - s.c <- slot - slot++ - nextTickTime = nextTickTime.Add(d) - case <-s.done: - return - } - } - }() -} diff --git a/operator/slot_ticker/ticker.go b/operator/slot_ticker/ticker.go deleted file mode 100644 index 06cbe39604..0000000000 --- a/operator/slot_ticker/ticker.go +++ /dev/null @@ -1,84 +0,0 @@ -package slot_ticker - -import ( - "context" - "fmt" - "time" - - "github.com/attestantio/go-eth2-client/spec/phase0" - "github.com/prysmaticlabs/prysm/v4/async/event" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/networkconfig" -) - -//go:generate mockgen -package=mocks -destination=./mocks/ticker.go -source=./ticker.go - -type Ticker interface { - // Start ticker process - Start(logger *zap.Logger) - // Subscribe to ticker chan - Subscribe(subscription chan phase0.Slot) event.Subscription -} - -type ticker struct { - ctx context.Context - network networkconfig.NetworkConfig - - // chan - feed *event.Feed -} - -// NewTicker returns Ticker struct pointer -func NewTicker(ctx context.Context, network networkconfig.NetworkConfig) Ticker { - return &ticker{ - ctx: ctx, - network: network, - feed: &event.Feed{}, - } -} - -// Start slot ticker -func (t *ticker) Start(logger *zap.Logger) { - genesisTime := time.Unix(int64(t.network.Beacon.MinGenesisTime()), 0) - slotTicker := NewSlotTicker(genesisTime, uint64(t.network.SlotDurationSec().Seconds())) - t.listenToTicker(logger, slotTicker.C()) -} - -// Subscribe will trigger every slot -func (t *ticker) Subscribe(subscription chan phase0.Slot) event.Subscription { - return t.feed.Subscribe(subscription) -} - -// listenToTicker loop over the given slot channel -func (t *ticker) listenToTicker(logger *zap.Logger, slots <-chan phase0.Slot) { - for currentSlot := range slots { - currentEpoch := t.network.Beacon.EstimatedEpochAtSlot(currentSlot) - buildStr := fmt.Sprintf("e%v-s%v-#%v", currentEpoch, currentSlot, currentSlot%32+1) - logger.Debug("📅 slot ticker", zap.String("epoch_slot_seq", buildStr)) - if !t.genesisEpochEffective(logger) { - continue - } - // notify current slot to channel - _ = t.feed.Send(currentSlot) - } -} - -func (t *ticker) genesisEpochEffective(logger *zap.Logger) bool { - curSlot := t.network.Beacon.EstimatedCurrentSlot() - genSlot := t.network.Beacon.GetEpochFirstSlot(t.network.GenesisEpoch) - if curSlot < genSlot { - if t.network.Beacon.IsFirstSlotOfEpoch(curSlot) { - // wait until genesis epoch starts - curEpoch := t.network.Beacon.EstimatedCurrentEpoch() - gnsTime := t.network.Beacon.GetSlotStartTime(genSlot) - logger.Info("duties paused, will resume duties on genesis epoch", - zap.Uint64("genesis_epoch", uint64(t.network.GenesisEpoch)), - zap.Uint64("current_epoch", uint64(curEpoch)), - zap.String("genesis_time", gnsTime.Format(time.UnixDate))) - } - return false - } - - return true -} diff --git a/operator/slotticker/mocks/slotticker.go b/operator/slotticker/mocks/slotticker.go new file mode 100644 index 0000000000..f8e56df5b1 --- /dev/null +++ b/operator/slotticker/mocks/slotticker.go @@ -0,0 +1,115 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./slotticker.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + time "time" + + phase0 "github.com/attestantio/go-eth2-client/spec/phase0" + gomock "github.com/golang/mock/gomock" +) + +// MockSlotTicker is a mock of SlotTicker interface. +type MockSlotTicker struct { + ctrl *gomock.Controller + recorder *MockSlotTickerMockRecorder +} + +// MockSlotTickerMockRecorder is the mock recorder for MockSlotTicker. +type MockSlotTickerMockRecorder struct { + mock *MockSlotTicker +} + +// NewMockSlotTicker creates a new mock instance. +func NewMockSlotTicker(ctrl *gomock.Controller) *MockSlotTicker { + mock := &MockSlotTicker{ctrl: ctrl} + mock.recorder = &MockSlotTickerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSlotTicker) EXPECT() *MockSlotTickerMockRecorder { + return m.recorder +} + +// Next mocks base method. +func (m *MockSlotTicker) Next() <-chan time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Next") + ret0, _ := ret[0].(<-chan time.Time) + return ret0 +} + +// Next indicates an expected call of Next. +func (mr *MockSlotTickerMockRecorder) Next() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockSlotTicker)(nil).Next)) +} + +// Slot mocks base method. +func (m *MockSlotTicker) Slot() phase0.Slot { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Slot") + ret0, _ := ret[0].(phase0.Slot) + return ret0 +} + +// Slot indicates an expected call of Slot. +func (mr *MockSlotTickerMockRecorder) Slot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Slot", reflect.TypeOf((*MockSlotTicker)(nil).Slot)) +} + +// MockConfigProvider is a mock of ConfigProvider interface. +type MockConfigProvider struct { + ctrl *gomock.Controller + recorder *MockConfigProviderMockRecorder +} + +// MockConfigProviderMockRecorder is the mock recorder for MockConfigProvider. +type MockConfigProviderMockRecorder struct { + mock *MockConfigProvider +} + +// NewMockConfigProvider creates a new mock instance. +func NewMockConfigProvider(ctrl *gomock.Controller) *MockConfigProvider { + mock := &MockConfigProvider{ctrl: ctrl} + mock.recorder = &MockConfigProviderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockConfigProvider) EXPECT() *MockConfigProviderMockRecorder { + return m.recorder +} + +// GetGenesisTime mocks base method. +func (m *MockConfigProvider) GetGenesisTime() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGenesisTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetGenesisTime indicates an expected call of GetGenesisTime. +func (mr *MockConfigProviderMockRecorder) GetGenesisTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGenesisTime", reflect.TypeOf((*MockConfigProvider)(nil).GetGenesisTime)) +} + +// SlotDurationSec mocks base method. +func (m *MockConfigProvider) SlotDurationSec() time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SlotDurationSec") + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// SlotDurationSec indicates an expected call of SlotDurationSec. +func (mr *MockConfigProviderMockRecorder) SlotDurationSec() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SlotDurationSec", reflect.TypeOf((*MockConfigProvider)(nil).SlotDurationSec)) +} diff --git a/operator/slotticker/slotticker.go b/operator/slotticker/slotticker.go new file mode 100644 index 0000000000..74e6511092 --- /dev/null +++ b/operator/slotticker/slotticker.go @@ -0,0 +1,96 @@ +package slotticker + +import ( + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" +) + +//go:generate mockgen -package=mocks -destination=./mocks/slotticker.go -source=./slotticker.go + +type Provider func() SlotTicker + +type SlotTicker interface { + Next() <-chan time.Time + Slot() phase0.Slot +} + +type ConfigProvider interface { + SlotDurationSec() time.Duration + GetGenesisTime() time.Time +} + +type Config struct { + slotDuration time.Duration + genesisTime time.Time +} + +func (cfg Config) SlotDurationSec() time.Duration { + return cfg.slotDuration +} + +func (cfg Config) GetGenesisTime() time.Time { + return cfg.genesisTime +} + +type slotTicker struct { + timer *time.Timer + slotDuration time.Duration + genesisTime time.Time + slot phase0.Slot +} + +// New returns a goroutine-free SlotTicker implementation which is not thread-safe. +func New(cfgProvider ConfigProvider) *slotTicker { + genesisTime := cfgProvider.GetGenesisTime() + slotDuration := cfgProvider.SlotDurationSec() + + now := time.Now() + timeSinceGenesis := now.Sub(genesisTime) + + var initialDelay time.Duration + if timeSinceGenesis < 0 { + // Genesis time is in the future + initialDelay = -timeSinceGenesis // Wait until the genesis time + } else { + slotsSinceGenesis := timeSinceGenesis / slotDuration + nextSlotStartTime := genesisTime.Add((slotsSinceGenesis + 1) * slotDuration) + initialDelay = time.Until(nextSlotStartTime) + } + + return &slotTicker{ + timer: time.NewTimer(initialDelay), + slotDuration: slotDuration, + genesisTime: genesisTime, + slot: 0, + } +} + +// Next returns a channel that signals when the next slot should start. +// Note: This function is not thread-safe and should be called in a serialized fashion. +// Make sure no concurrent calls happen, as it can result in unexpected behavior. +func (s *slotTicker) Next() <-chan time.Time { + timeSinceGenesis := time.Since(s.genesisTime) + if timeSinceGenesis < 0 { + return s.timer.C + } + if !s.timer.Stop() { + // try to drain the channel, but don't block if there's no value + select { + case <-s.timer.C: + default: + } + } + slotNumber := uint64(timeSinceGenesis / s.slotDuration) + nextSlotStartTime := s.genesisTime.Add(time.Duration(slotNumber+1) * s.slotDuration) + s.timer.Reset(time.Until(nextSlotStartTime)) + s.slot = phase0.Slot(slotNumber + 1) + return s.timer.C +} + +// Slot returns the current slot number. +// Note: Like the Next function, this method is also not thread-safe. +// It should be called in a serialized manner after calling Next. +func (s *slotTicker) Slot() phase0.Slot { + return s.slot +} diff --git a/operator/slotticker/slotticker_test.go b/operator/slotticker/slotticker_test.go new file mode 100644 index 0000000000..612e61d492 --- /dev/null +++ b/operator/slotticker/slotticker_test.go @@ -0,0 +1,179 @@ +package slotticker + +import ( + "sync" + "testing" + "time" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/cornelk/hashmap/assert" + "github.com/stretchr/testify/require" +) + +func TestSlotTicker(t *testing.T) { + const numTicks = 3 + slotDuration := 200 * time.Millisecond + // Set the genesis time such that we start from slot 1 + genesisTime := time.Now().Truncate(slotDuration).Add(-slotDuration) + + // Calculate the expected starting slot based on genesisTime + timeSinceGenesis := time.Since(genesisTime) + expectedSlot := phase0.Slot(timeSinceGenesis/slotDuration) + 1 + + ticker := New(Config{slotDuration, genesisTime}) + + for i := 0; i < numTicks; i++ { + <-ticker.Next() + slot := ticker.Slot() + + require.Equal(t, expectedSlot, slot) + expectedSlot++ + } +} + +func TestTickerInitialization(t *testing.T) { + slotDuration := 200 * time.Millisecond + genesisTime := time.Now() + ticker := New(Config{slotDuration, genesisTime}) + + start := time.Now() + <-ticker.Next() + slot := ticker.Slot() + + // Allow a small buffer (e.g., 10ms) due to code execution overhead + buffer := 10 * time.Millisecond + + elapsed := time.Since(start) + assert.True(t, elapsed+buffer >= slotDuration, "First tick occurred too soon: %v", elapsed.String()) + require.Equal(t, phase0.Slot(1), slot) +} + +func TestSlotNumberConsistency(t *testing.T) { + slotDuration := 200 * time.Millisecond + genesisTime := time.Now() + + ticker := New(Config{slotDuration, genesisTime}) + var lastSlot phase0.Slot + + for i := 0; i < 10; i++ { + <-ticker.Next() + slot := ticker.Slot() + + require.Equal(t, lastSlot+1, slot) + lastSlot = slot + } +} + +func TestGenesisInFuture(t *testing.T) { + slotDuration := 200 * time.Millisecond + genesisTime := time.Now().Add(1 * time.Second) // Setting genesis time 1s in the future + + ticker := New(Config{slotDuration, genesisTime}) + start := time.Now() + + <-ticker.Next() + + // The first tick should occur after the genesis time + expectedFirstTickDuration := genesisTime.Sub(start) + actualFirstTickDuration := time.Since(start) + + // Allow a small buffer (e.g., 10ms) due to code execution overhead + buffer := 10 * time.Millisecond + + assert.True(t, actualFirstTickDuration+buffer >= expectedFirstTickDuration, "First tick occurred too soon. Expected at least: %v, but got: %v", expectedFirstTickDuration.String(), actualFirstTickDuration.String()) +} + +func TestBoundedDrift(t *testing.T) { + slotDuration := 20 * time.Millisecond + genesisTime := time.Now() + + ticker := New(Config{slotDuration, genesisTime}) + ticks := 100 + + start := time.Now() + for i := 0; i < ticks; i++ { + <-ticker.Next() + } + expectedDuration := time.Duration(ticks) * slotDuration + elapsed := time.Since(start) + + // We'll allow a small buffer for drift, say 1% + buffer := expectedDuration * 1 / 100 + assert.True(t, elapsed >= expectedDuration-buffer && elapsed <= expectedDuration+buffer, "Drifted too far from expected time. Expected: %v, Actual: %v", expectedDuration.String(), elapsed.String()) +} + +func TestMultipleSlotTickers(t *testing.T) { + const ( + numTickers = 1000 + ticksPerTimer = 3 + ) + + slotDuration := 200 * time.Millisecond + genesisTime := time.Now() + + // Start the clock to time the full execution of all tickers + start := time.Now() + + var wg sync.WaitGroup + wg.Add(numTickers) + + for i := 0; i < numTickers; i++ { + go func() { + defer wg.Done() + ticker := New(Config{slotDuration, genesisTime}) + for j := 0; j < ticksPerTimer; j++ { + <-ticker.Next() + } + }() + } + + wg.Wait() + + // Calculate the total time taken for all tickers to complete their ticks + elapsed := time.Since(start) + expectedDuration := slotDuration * ticksPerTimer + + // We'll allow a small buffer for drift, say 1% + buffer := expectedDuration * 1 / 100 + assert.True(t, elapsed <= expectedDuration+buffer, "Expected all tickers to complete within %v but took %v", expectedDuration.String(), elapsed.String()) +} + +func TestSlotSkipping(t *testing.T) { + const ( + numTicks = 100 + skipInterval = 10 // Introduce a delay every 10 ticks + slotDuration = 20 * time.Millisecond + ) + + genesisTime := time.Now() + ticker := New(Config{slotDuration, genesisTime}) + + var lastSlot phase0.Slot + for i := 1; i <= numTicks; i++ { // Starting loop from 1 for ease of skipInterval check + select { + case <-ticker.Next(): + slot := ticker.Slot() + + // Ensure we never receive slots out of order or repeatedly + require.Equal(t, slot, lastSlot+1, "Expected slot %d to be one more than the last slot %d", slot, lastSlot) + lastSlot = slot + + // If it's the 10th tick or any multiple thereof + if i%skipInterval == 0 { + // Introduce delay to skip a slot + time.Sleep(slotDuration) + + // Ensure the next slot we receive is exactly 2 slots ahead of the previous slot + <-ticker.Next() + slotAfterDelay := ticker.Slot() + require.Equal(t, lastSlot+2, slotAfterDelay, "Expected to skip a slot after introducing a delay") + + // Update the slot variable to use this new slot for further iterations + lastSlot = slotAfterDelay + } + + case <-time.After(2 * slotDuration): // Fail if we don't get a tick within a reasonable time + t.Fatalf("Did not receive expected tick for iteration %d", i) + } + } +} diff --git a/operator/validator/controller.go b/operator/validator/controller.go index 17dcfddc82..604e1fbb55 100644 --- a/operator/validator/controller.go +++ b/operator/validator/controller.go @@ -5,6 +5,7 @@ import ( "crypto/rsa" "encoding/hex" "encoding/json" + "fmt" "sync" "time" @@ -22,8 +23,10 @@ import ( "github.com/bloxapp/ssv/ibft/storage" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/network" nodestorage "github.com/bloxapp/ssv/operator/storage" + "github.com/bloxapp/ssv/operator/validatorsmap" beaconprotocol "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/message" p2pprotocol "github.com/bloxapp/ssv/protocol/v2/p2p" @@ -34,7 +37,6 @@ import ( "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - "github.com/bloxapp/ssv/protocol/v2/sync/handlers" "github.com/bloxapp/ssv/protocol/v2/types" ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" registrystorage "github.com/bloxapp/ssv/registry/storage" @@ -76,7 +78,10 @@ type ControllerOptions struct { NewDecidedHandler qbftcontroller.NewDecidedHandler DutyRoles []spectypes.BeaconRole StorageMap *storage.QBFTStores - Metrics validatorMetrics + Metrics validator.Metrics + MessageValidator validation.MessageValidator + ValidatorsMap *validatorsmap.ValidatorsMap + VerifySignatures bool // worker flags WorkersCount int `yaml:"MsgWorkersCount" env:"MSG_WORKERS_COUNT" env-default:"256" env-description:"Number of goroutines to use for message workers"` @@ -88,7 +93,8 @@ type ControllerOptions struct { // it takes care of bootstrapping, updating and managing existing validators and their shares type Controller interface { StartValidators() - ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex + AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex GetValidator(pubKey string) (*validator.Validator, bool) ExecuteDuty(logger *zap.Logger, duty *spectypes.Duty) UpdateValidatorMetaDataLoop() @@ -104,7 +110,7 @@ type Controller interface { IndicesChangeChan() chan struct{} StartValidator(share *ssvtypes.SSVShare) error - StopValidator(publicKey []byte) error + StopValidator(pubKey spectypes.ValidatorPK) error LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error UpdateFeeRecipient(owner, recipient common.Address) error @@ -120,7 +126,7 @@ type controller struct { context context.Context logger *zap.Logger - metrics validatorMetrics + metrics validator.Metrics sharesStorage registrystorage.Shares operatorsStorage registrystorage.Operators @@ -134,8 +140,8 @@ type controller struct { operatorData *registrystorage.OperatorData operatorDataMutex sync.RWMutex - validatorsMap *validatorsMap - validatorOptions *validator.Options + validatorsMap *validatorsmap.ValidatorsMap + validatorOptions validator.Options metadataUpdateInterval time.Duration @@ -144,6 +150,7 @@ type controller struct { messageRouter *messageRouter messageWorker *worker.Worker historySyncBatchSize int + messageValidator validation.MessageValidator // nonCommittees is a cache of initialized nonCommitteeValidator instances nonCommitteeValidators *ttlcache.Cache[spectypes.MessageID, *nonCommitteeValidator] @@ -156,7 +163,7 @@ type controller struct { // NewController creates a new validator controller instance func NewController(logger *zap.Logger, options ControllerOptions) Controller { - logger.Debug("setting validator controller") + logger.Debug("setting up validator controller", zap.Bool("message_validation_verify_signatures", options.VerifySignatures)) // lookup in a map that holds all relevant operators operatorsIDs := &sync.Map{} @@ -167,10 +174,10 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { Buffer: options.QueueBufferSize, } - validatorOptions := &validator.Options{ //TODO add vars + validatorOptions := validator.Options{ //TODO add vars Network: options.Network, Beacon: options.Beacon, - BeaconNetwork: options.BeaconNetwork.BeaconNetwork, + BeaconNetwork: options.BeaconNetwork.GetNetwork(), Storage: options.StorageMap, //Share: nil, // set per validator Signer: options.KeyManager, @@ -181,6 +188,9 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { Exporter: options.Exporter, BuilderProposals: options.BuilderProposals, GasLimit: options.GasLimit, + MessageValidator: options.MessageValidator, + Metrics: options.Metrics, + VerifySignatures: options.VerifySignatures, } // If full node, increase queue size to make enough room @@ -192,13 +202,14 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { } } - if options.Metrics == nil { - options.Metrics = nopMetrics{} + metrics := validator.Metrics(validator.NopMetrics{}) + if options.Metrics != nil { + metrics = options.Metrics } ctrl := controller{ logger: logger.Named(logging.NameController), - metrics: options.Metrics, + metrics: metrics, sharesStorage: options.RegistryStorage.Shares(), operatorsStorage: options.RegistryStorage, recipientsStorage: options.RegistryStorage, @@ -210,14 +221,14 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { keyManager: options.KeyManager, network: options.Network, - validatorsMap: newValidatorsMap(options.Context, validatorOptions), + validatorsMap: options.ValidatorsMap, validatorOptions: validatorOptions, metadataUpdateInterval: options.MetadataUpdateInterval, operatorsIDs: operatorsIDs, - messageRouter: newMessageRouter(), + messageRouter: newMessageRouter(logger), messageWorker: worker.NewWorker(logger, workerCfg), historySyncBatchSize: options.HistorySyncBatchSize, @@ -226,6 +237,8 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { ), metadataLastUpdated: make(map[string]time.Time), indicesChange: make(chan struct{}), + + messageValidator: options.MessageValidator, } // Start automatic expired item deletion in nonCommitteeValidators. @@ -236,22 +249,7 @@ func NewController(logger *zap.Logger, options ControllerOptions) Controller { // setupNetworkHandlers registers all the required handlers for sync protocols func (c *controller) setupNetworkHandlers() error { - syncHandlers := []*p2pprotocol.SyncHandler{ - p2pprotocol.WithHandler( - p2pprotocol.LastDecidedProtocol, - handlers.LastDecidedHandler(c.logger, c.ibftStorageMap, c.network), - ), - } - if c.validatorOptions.FullNode { - syncHandlers = append( - syncHandlers, - p2pprotocol.WithHandler( - p2pprotocol.DecidedHistoryProtocol, - // TODO: extract maxBatch to config - handlers.HistoryHandler(c.logger, c.ibftStorageMap, c.network, c.historySyncBatchSize), - ), - ) - } + syncHandlers := []*p2pprotocol.SyncHandler{} c.logger.Debug("setting up network handlers", zap.Int("count", len(syncHandlers)), zap.Bool("full_node", c.validatorOptions.FullNode), @@ -315,12 +313,12 @@ func (c *controller) handleRouterMessages() { pk := msg.GetID().GetPubKey() hexPK := hex.EncodeToString(pk) if v, ok := c.validatorsMap.GetValidator(hexPK); ok { - v.HandleMessage(c.logger, &msg) + v.HandleMessage(c.logger, msg) } else { if msg.MsgType != spectypes.SSVConsensusMsgType { continue // not supporting other types } - if !c.messageWorker.TryEnqueue(&msg) { // start to save non committee decided messages only post fork + if !c.messageWorker.TryEnqueue(msg) { // start to save non committee decided messages only post fork c.logger.Warn("Failed to enqueue post consensus message: buffer is full") } } @@ -336,7 +334,7 @@ var nonCommitteeValidatorTTLs = map[spectypes.BeaconRole]phase0.Slot{ spectypes.BNRoleSyncCommitteeContribution: 4, } -func (c *controller) handleWorkerMessages(msg *spectypes.SSVMessage) error { +func (c *controller) handleWorkerMessages(msg *queue.DecodedSSVMessage) error { // Get or create a nonCommitteeValidator for this MessageID, and lock it to prevent // other handlers from processing var ncv *nonCommitteeValidator @@ -354,7 +352,7 @@ func (c *controller) handleWorkerMessages(msg *spectypes.SSVMessage) error { return errors.Errorf("could not find validator [%s]", hex.EncodeToString(msg.GetID().GetPubKey())) } - opts := *c.validatorOptions + opts := c.validatorOptions opts.SSVShare = share ncv = &nonCommitteeValidator{ NonCommitteeValidator: validator.NewNonCommitteeValidator(c.logger, msg.GetID(), opts), @@ -459,25 +457,7 @@ func (c *controller) setupNonCommitteeValidators() { pubKeys := make([][]byte, 0, len(nonCommitteeShares)) for _, validatorShare := range nonCommitteeShares { pubKeys = append(pubKeys, validatorShare.ValidatorPubKey) - - opts := *c.validatorOptions - opts.SSVShare = validatorShare - allRoles := []spectypes.BeaconRole{ - spectypes.BNRoleAttester, - spectypes.BNRoleAggregator, - spectypes.BNRoleProposer, - spectypes.BNRoleSyncCommittee, - spectypes.BNRoleSyncCommitteeContribution, - } - for _, role := range allRoles { - messageID := spectypes.NewMsgID(ssvtypes.GetDefaultDomain(), validatorShare.ValidatorPubKey, role) - err := c.network.SyncHighestDecided(messageID) - if err != nil { - c.logger.Error("failed to sync highest decided", zap.Error(err)) - } - } } - if len(pubKeys) > 0 { c.logger.Debug("updating metadata for non-committee validators", zap.Int("count", len(pubKeys))) if err := beaconprotocol.UpdateValidatorsMetadata(c.logger, pubKeys, c, c.beacon, c.onMetadataUpdated); err != nil { @@ -548,7 +528,7 @@ func (c *controller) UpdateValidatorMetadata(pk string, metadata *beaconprotocol return nil } -// GetValidator returns a validator instance from validatorsMap +// GetValidator returns a validator instance from ValidatorsMap func (c *controller) GetValidator(pubKey string) (*validator.Validator, bool) { return c.validatorsMap.GetValidator(pubKey) } @@ -565,7 +545,7 @@ func (c *controller) ExecuteDuty(logger *zap.Logger, duty *spectypes.Duty) { logger.Error("could not create duty execute msg", zap.Error(err)) return } - dec, err := queue.DecodeSSVMessage(logger, ssvMsg) + dec, err := queue.DecodeSSVMessage(ssvMsg) if err != nil { logger.Error("could not decode duty execute msg", zap.Error(err)) return @@ -601,25 +581,36 @@ func CreateDutyExecuteMsg(duty *spectypes.Duty, pubKey phase0.BLSPubKey, domain }, nil } -// ActiveValidatorIndices fetches indices of validators who are either attesting or queued and +// CommitteeActiveIndices fetches indices of in-committee validators who are either attesting or queued and // whose activation epoch is not greater than the passed epoch. It logs a warning if an error occurs. -func (c *controller) ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { - indices := make([]phase0.ValidatorIndex, 0, len(c.validatorsMap.validatorsMap)) - err := c.validatorsMap.ForEach(func(v *validator.Validator) error { - // Beacon node throws error when trying to fetch duties for non-existing validators. - if (v.Share.BeaconMetadata.IsAttesting() || v.Share.BeaconMetadata.Status == v1.ValidatorStatePendingQueued) && - v.Share.BeaconMetadata.ActivationEpoch <= epoch { +func (c *controller) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + validators := c.validatorsMap.GetAll() + indices := make([]phase0.ValidatorIndex, 0, len(validators)) + for _, v := range validators { + if isShareActive(epoch)(v.Share) { indices = append(indices, v.Share.BeaconMetadata.Index) } - return nil - }) - if err != nil { - c.logger.Warn("failed to get all validators public keys", zap.Error(err)) } + return indices +} +func (c *controller) AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + shares := c.sharesStorage.List(nil, isShareActive(epoch)) + indices := make([]phase0.ValidatorIndex, len(shares)) + for i, share := range shares { + indices[i] = share.BeaconMetadata.Index + } return indices } +func isShareActive(epoch phase0.Epoch) func(share *ssvtypes.SSVShare) bool { + return func(share *ssvtypes.SSVShare) bool { + return share != nil && share.BeaconMetadata != nil && + (share.BeaconMetadata.IsAttesting() || share.BeaconMetadata.Status == v1.ValidatorStatePendingQueued) && + share.BeaconMetadata.ActivationEpoch <= epoch + } +} + // onMetadataUpdated is called when validator's metadata was updated func (c *controller) onMetadataUpdated(pk string, meta *beaconprotocol.ValidatorMetadata) { if meta == nil { @@ -645,24 +636,15 @@ func (c *controller) onMetadataUpdated(pk string, meta *beaconprotocol.Validator } } -// onShareRemove is called when a validator was removed -// TODO: think how we can make this function atomic (i.e. failing wouldn't stop the removal of the share) -func (c *controller) onShareRemove(pk string, removeSecret bool) error { - // remove from validatorsMap - v := c.validatorsMap.RemoveValidator(pk) +// onShareStop is called when a validator was removed or liquidated +func (c *controller) onShareStop(pubKey spectypes.ValidatorPK) { + // remove from ValidatorsMap + v := c.validatorsMap.RemoveValidator(hex.EncodeToString(pubKey)) // stop instance if v != nil { v.Stop() } - // remove the share secret from key-manager - if removeSecret { - if err := c.keyManager.RemoveShare(pk); err != nil { - return errors.Wrap(err, "could not remove share secret from key manager") - } - } - - return nil } func (c *controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { @@ -672,23 +654,56 @@ func (c *controller) onShareStart(share *ssvtypes.SSVShare) (bool, error) { } if err := c.setShareFeeRecipient(share, c.recipientsStorage.GetRecipientData); err != nil { - return false, errors.Wrap(err, "could not set share fee recipient") + return false, fmt.Errorf("could not set share fee recipient: %w", err) } // Start a committee validator. - v, err := c.validatorsMap.GetOrCreateValidator(c.logger.Named("validatorsMap"), share) - if err != nil { - return false, errors.Wrap(err, "could not get or create validator") + v, found := c.validatorsMap.GetValidator(hex.EncodeToString(share.ValidatorPubKey)) + if !found { + if !share.HasBeaconMetadata() { + return false, fmt.Errorf("beacon metadata is missing") + } + + // Share context with both the validator and the runners, + // so that when the validator is stopped, the runners are stopped as well. + ctx, cancel := context.WithCancel(c.context) + + opts := c.validatorOptions + opts.SSVShare = share + opts.DutyRunners = SetupRunners(ctx, c.logger, opts) + + v = validator.NewValidator(ctx, cancel, opts) + c.validatorsMap.CreateValidator(hex.EncodeToString(share.ValidatorPubKey), v) + + c.printShare(share, "setup validator done") + + } else { + c.printShare(v.Share, "get validator") } + return c.startValidator(v) } +func (c *controller) printShare(s *ssvtypes.SSVShare, msg string) { + committee := make([]string, len(s.Committee)) + for i, c := range s.Committee { + committee[i] = fmt.Sprintf(`[OperatorID=%d, PubKey=%x]`, c.OperatorID, c.PubKey) + } + c.logger.Debug(msg, + fields.PubKey(s.ValidatorPubKey), + zap.Uint64("node_id", s.OperatorID), + zap.Strings("committee", committee), + fields.FeeRecipient(s.FeeRecipientAddress[:]), + ) +} + func (c *controller) setShareFeeRecipient(share *ssvtypes.SSVShare, getRecipientData GetRecipientDataFunc) error { - var feeRecipient bellatrix.ExecutionAddress data, found, err := getRecipientData(nil, share.OwnerAddress) if err != nil { return errors.Wrap(err, "could not get recipient data") } + + var feeRecipient bellatrix.ExecutionAddress if !found { c.logger.Debug("setting fee recipient to owner address", fields.Validator(share.ValidatorPubKey), fields.FeeRecipient(share.OwnerAddress.Bytes())) @@ -727,11 +742,6 @@ func (c *controller) UpdateValidatorMetaDataLoop() { // Prepare share filters. filters := []registrystorage.SharesFilter{} - // Filter for validators who belong to our operator. - if !c.validatorOptions.Exporter { - filters = append(filters, registrystorage.ByOperatorID(c.GetOperatorData().ID)) - } - // Filter for validators who are not liquidated. filters = append(filters, registrystorage.ByNotLiquidated()) @@ -807,9 +817,10 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt //logger.Debug("leader", zap.Int("operator_id", int(leader))) return leader }, - Storage: options.Storage.Get(role), - Network: options.Network, - Timer: roundtimer.New(ctx, nil), + Storage: options.Storage.Get(role), + Network: options.Network, + Timer: roundtimer.New(ctx, options.BeaconNetwork, role, nil), + SignatureVerification: options.VerifySignatures, } config.ValueCheckF = valueCheckF @@ -823,29 +834,29 @@ func SetupRunners(ctx context.Context, logger *zap.Logger, options validator.Opt for _, role := range runnersType { switch role { case spectypes.BNRoleAttester: - valCheck := specssv.AttesterValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) + valCheck := specssv.AttesterValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) qbftCtrl := buildController(spectypes.BNRoleAttester, valCheck) - runners[role] = runner.NewAttesterRunnner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, valCheck, 0) + runners[role] = runner.NewAttesterRunnner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, valCheck, 0) case spectypes.BNRoleProposer: - proposedValueCheck := specssv.ProposerValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey, options.BuilderProposals) + proposedValueCheck := specssv.ProposerValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index, options.SSVShare.SharePubKey) qbftCtrl := buildController(spectypes.BNRoleProposer, proposedValueCheck) - runners[role] = runner.NewProposerRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, proposedValueCheck, 0) + runners[role] = runner.NewProposerRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, proposedValueCheck, 0) runners[role].(*runner.ProposerRunner).ProducesBlindedBlocks = options.BuilderProposals // apply blinded block flag case spectypes.BNRoleAggregator: - aggregatorValueCheckF := specssv.AggregatorValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + aggregatorValueCheckF := specssv.AggregatorValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.BNRoleAggregator, aggregatorValueCheckF) - runners[role] = runner.NewAggregatorRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, aggregatorValueCheckF, 0) + runners[role] = runner.NewAggregatorRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, aggregatorValueCheckF, 0) case spectypes.BNRoleSyncCommittee: - syncCommitteeValueCheckF := specssv.SyncCommitteeValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + syncCommitteeValueCheckF := specssv.SyncCommitteeValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.BNRoleSyncCommittee, syncCommitteeValueCheckF) - runners[role] = runner.NewSyncCommitteeRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeValueCheckF, 0) + runners[role] = runner.NewSyncCommitteeRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeValueCheckF, 0) case spectypes.BNRoleSyncCommitteeContribution: - syncCommitteeContributionValueCheckF := specssv.SyncCommitteeContributionValueCheckF(options.Signer, options.BeaconNetwork, options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) + syncCommitteeContributionValueCheckF := specssv.SyncCommitteeContributionValueCheckF(options.Signer, options.BeaconNetwork.GetBeaconNetwork(), options.SSVShare.Share.ValidatorPubKey, options.SSVShare.BeaconMetadata.Index) qbftCtrl := buildController(spectypes.BNRoleSyncCommitteeContribution, syncCommitteeContributionValueCheckF) - runners[role] = runner.NewSyncCommitteeAggregatorRunner(options.BeaconNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeContributionValueCheckF, 0) + runners[role] = runner.NewSyncCommitteeAggregatorRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer, syncCommitteeContributionValueCheckF, 0) case spectypes.BNRoleValidatorRegistration: qbftCtrl := buildController(spectypes.BNRoleValidatorRegistration, nil) - runners[role] = runner.NewValidatorRegistrationRunner(spectypes.PraterNetwork, &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer) + runners[role] = runner.NewValidatorRegistrationRunner(options.BeaconNetwork.GetBeaconNetwork(), &options.SSVShare.Share, qbftCtrl, options.Beacon, options.Network, options.Signer) } } return runners diff --git a/operator/validator/controller_test.go b/operator/validator/controller_test.go index 6a06733db2..2135d24ff3 100644 --- a/operator/validator/controller_test.go +++ b/operator/validator/controller_test.go @@ -7,17 +7,18 @@ import ( "time" "github.com/attestantio/go-eth2-client/spec/phase0" - - "github.com/bloxapp/ssv/logging" - specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/stretchr/testify/require" "go.uber.org/zap" + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/operator/validatorsmap" "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/queue/worker" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" "github.com/bloxapp/ssv/protocol/v2/types" ) @@ -32,37 +33,45 @@ func TestHandleNonCommitteeMessages(t *testing.T) { var wg sync.WaitGroup - ctr.messageWorker.UseHandler(func(msg *spectypes.SSVMessage) error { + ctr.messageWorker.UseHandler(func(msg *queue.DecodedSSVMessage) error { wg.Done() return nil }) wg.Add(2) - identifier := spectypes.NewMsgID(types.GetDefaultDomain(), []byte("pk"), spectypes.BNRoleAttester) + identifier := spectypes.NewMsgID(networkconfig.TestNetwork.Domain, []byte("pk"), spectypes.BNRoleAttester) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: identifier, - Data: generateDecidedMessage(t, identifier), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: identifier, + Data: generateDecidedMessage(t, identifier), + }, }) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ - MsgType: spectypes.SSVConsensusMsgType, - MsgID: identifier, - Data: generateChangeRoundMsg(t, identifier), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ + MsgType: spectypes.SSVConsensusMsgType, + MsgID: identifier, + Data: generateChangeRoundMsg(t, identifier), + }, }) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ // checks that not process unnecessary message - MsgType: message.SSVSyncMsgType, - MsgID: identifier, - Data: []byte("data"), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ // checks that not process unnecessary message + MsgType: message.SSVSyncMsgType, + MsgID: identifier, + Data: []byte("data"), + }, }) - ctr.messageRouter.Route(logger, spectypes.SSVMessage{ // checks that not process unnecessary message - MsgType: spectypes.SSVPartialSignatureMsgType, - MsgID: identifier, - Data: []byte("data"), + ctr.messageRouter.Route(context.TODO(), &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ // checks that not process unnecessary message + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: identifier, + Data: []byte("data"), + }, }) go func() { @@ -144,27 +153,25 @@ func TestGetIndices(t *testing.T) { logger := logging.TestLogger(t) ctr := setupController(logger, validators) - activeIndicesForCurrentEpoch := ctr.ActiveValidatorIndices(currentEpoch) + activeIndicesForCurrentEpoch := ctr.CommitteeActiveIndices(currentEpoch) require.Equal(t, 2, len(activeIndicesForCurrentEpoch)) // should return only active indices - activeIndicesForNextEpoch := ctr.ActiveValidatorIndices(currentEpoch + 1) + activeIndicesForNextEpoch := ctr.CommitteeActiveIndices(currentEpoch + 1) require.Equal(t, 3, len(activeIndicesForNextEpoch)) // should return including ValidatorStatePendingQueued } func setupController(logger *zap.Logger, validators map[string]*validator.Validator) controller { + validatorsMap := validatorsmap.New(context.TODO(), validatorsmap.WithInitialState(validators)) + return controller{ context: context.Background(), sharesStorage: nil, beacon: nil, keyManager: nil, shareEncryptionKeyProvider: nil, - validatorsMap: &validatorsMap{ - ctx: context.Background(), - lock: sync.RWMutex{}, - validatorsMap: validators, - }, - metadataUpdateInterval: 0, - messageRouter: newMessageRouter(), + validatorsMap: validatorsMap, + metadataUpdateInterval: 0, + messageRouter: newMessageRouter(logger), messageWorker: worker.NewWorker(logger, &worker.Config{ Ctx: context.Background(), WorkersCount: 1, diff --git a/operator/validator/metrics.go b/operator/validator/metrics.go index 2ab82cbfc4..d9cb36e817 100644 --- a/operator/validator/metrics.go +++ b/operator/validator/metrics.go @@ -33,31 +33,3 @@ func (c *controller) reportValidatorStatus(pk []byte, meta *beacon.ValidatorMeta c.metrics.ValidatorUnknown(pk) } } - -type validatorMetrics interface { - ValidatorInactive(publicKey []byte) - ValidatorNoIndex(publicKey []byte) - ValidatorError(publicKey []byte) - ValidatorReady(publicKey []byte) - ValidatorNotActivated(publicKey []byte) - ValidatorExiting(publicKey []byte) - ValidatorSlashed(publicKey []byte) - ValidatorNotFound(publicKey []byte) - ValidatorPending(publicKey []byte) - ValidatorRemoved(publicKey []byte) - ValidatorUnknown(publicKey []byte) -} - -type nopMetrics struct{} - -func (n nopMetrics) ValidatorInactive([]byte) {} -func (n nopMetrics) ValidatorNoIndex([]byte) {} -func (n nopMetrics) ValidatorError([]byte) {} -func (n nopMetrics) ValidatorReady([]byte) {} -func (n nopMetrics) ValidatorNotActivated([]byte) {} -func (n nopMetrics) ValidatorExiting([]byte) {} -func (n nopMetrics) ValidatorSlashed([]byte) {} -func (n nopMetrics) ValidatorNotFound([]byte) {} -func (n nopMetrics) ValidatorPending([]byte) {} -func (n nopMetrics) ValidatorRemoved([]byte) {} -func (n nopMetrics) ValidatorUnknown([]byte) {} diff --git a/operator/validator/mocks/controller.go b/operator/validator/mocks/controller.go index 6b743f6747..e7bad286b0 100644 --- a/operator/validator/mocks/controller.go +++ b/operator/validator/mocks/controller.go @@ -40,18 +40,32 @@ func (m *MockController) EXPECT() *MockControllerMockRecorder { return m.recorder } -// ActiveValidatorIndices mocks base method. -func (m *MockController) ActiveValidatorIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { +// AllActiveIndices mocks base method. +func (m *MockController) AllActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ActiveValidatorIndices", epoch) + ret := m.ctrl.Call(m, "AllActiveIndices", epoch) ret0, _ := ret[0].([]phase0.ValidatorIndex) return ret0 } -// ActiveValidatorIndices indicates an expected call of ActiveValidatorIndices. -func (mr *MockControllerMockRecorder) ActiveValidatorIndices(epoch interface{}) *gomock.Call { +// AllActiveIndices indicates an expected call of AllActiveIndices. +func (mr *MockControllerMockRecorder) AllActiveIndices(epoch interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActiveValidatorIndices", reflect.TypeOf((*MockController)(nil).ActiveValidatorIndices), epoch) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllActiveIndices", reflect.TypeOf((*MockController)(nil).AllActiveIndices), epoch) +} + +// CommitteeActiveIndices mocks base method. +func (m *MockController) CommitteeActiveIndices(epoch phase0.Epoch) []phase0.ValidatorIndex { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitteeActiveIndices", epoch) + ret0, _ := ret[0].([]phase0.ValidatorIndex) + return ret0 +} + +// CommitteeActiveIndices indicates an expected call of CommitteeActiveIndices. +func (mr *MockControllerMockRecorder) CommitteeActiveIndices(epoch interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitteeActiveIndices", reflect.TypeOf((*MockController)(nil).CommitteeActiveIndices), epoch) } // ExecuteDuty mocks base method. @@ -219,17 +233,17 @@ func (mr *MockControllerMockRecorder) StartValidators() *gomock.Call { } // StopValidator mocks base method. -func (m *MockController) StopValidator(publicKey []byte) error { +func (m *MockController) StopValidator(pubKey types.ValidatorPK) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StopValidator", publicKey) + ret := m.ctrl.Call(m, "StopValidator", pubKey) ret0, _ := ret[0].(error) return ret0 } // StopValidator indicates an expected call of StopValidator. -func (mr *MockControllerMockRecorder) StopValidator(publicKey interface{}) *gomock.Call { +func (mr *MockControllerMockRecorder) StopValidator(pubKey interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopValidator", reflect.TypeOf((*MockController)(nil).StopValidator), publicKey) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopValidator", reflect.TypeOf((*MockController)(nil).StopValidator), pubKey) } // UpdateFeeRecipient mocks base method. diff --git a/operator/validator/router.go b/operator/validator/router.go index 67ef8860a9..e090cff3bc 100644 --- a/operator/validator/router.go +++ b/operator/validator/router.go @@ -1,34 +1,40 @@ package validator import ( - spectypes "github.com/bloxapp/ssv-spec/types" + "context" + "go.uber.org/zap" "github.com/bloxapp/ssv/network/commons" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) const bufSize = 1024 -func newMessageRouter() *messageRouter { +func newMessageRouter(logger *zap.Logger) *messageRouter { return &messageRouter{ - ch: make(chan spectypes.SSVMessage, bufSize), - msgID: commons.MsgID(), + logger: logger, + ch: make(chan *queue.DecodedSSVMessage, bufSize), + msgID: commons.MsgID(), } } type messageRouter struct { - ch chan spectypes.SSVMessage - msgID commons.MsgIDFunc + logger *zap.Logger + ch chan *queue.DecodedSSVMessage + msgID commons.MsgIDFunc } -func (r *messageRouter) Route(logger *zap.Logger, message spectypes.SSVMessage) { +func (r *messageRouter) Route(ctx context.Context, message *queue.DecodedSSVMessage) { select { + case <-ctx.Done(): + r.logger.Warn("context canceled, dropping message") case r.ch <- message: default: - logger.Warn("message router buffer is full. dropping message") + r.logger.Warn("message router buffer is full, dropping message") } } -func (r *messageRouter) GetMessageChan() <-chan spectypes.SSVMessage { +func (r *messageRouter) GetMessageChan() <-chan *queue.DecodedSSVMessage { return r.ch } diff --git a/operator/validator/router_test.go b/operator/validator/router_test.go index 787e2b988d..44b3798cac 100644 --- a/operator/validator/router_test.go +++ b/operator/validator/router_test.go @@ -10,7 +10,8 @@ import ( "github.com/stretchr/testify/require" "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/networkconfig" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) func TestRouter(t *testing.T) { @@ -19,7 +20,7 @@ func TestRouter(t *testing.T) { logger := logging.TestLogger(t) - router := newMessageRouter() + router := newMessageRouter(logger) expectedCount := 1000 count := 0 @@ -40,14 +41,17 @@ func TestRouter(t *testing.T) { }() for i := 0; i < expectedCount; i++ { - msg := spectypes.SSVMessage{ - MsgType: spectypes.MsgType(i % 3), - MsgID: spectypes.NewMsgID(types.GetDefaultDomain(), []byte{1, 1, 1, 1, 1}, spectypes.BNRoleAttester), - Data: []byte(fmt.Sprintf("data-%d", i)), + msg := &queue.DecodedSSVMessage{ + SSVMessage: &spectypes.SSVMessage{ + MsgType: spectypes.MsgType(i % 3), + MsgID: spectypes.NewMsgID(networkconfig.TestNetwork.Domain, []byte{1, 1, 1, 1, 1}, spectypes.BNRoleAttester), + Data: []byte(fmt.Sprintf("data-%d", i)), + }, } - router.Route(logger, msg) + + router.Route(context.TODO(), msg) if i%2 == 0 { - go router.Route(logger, msg) + go router.Route(context.TODO(), msg) } } diff --git a/operator/validator/task_executor.go b/operator/validator/task_executor.go index 0ea2191716..f3b967b5b3 100644 --- a/operator/validator/task_executor.go +++ b/operator/validator/task_executor.go @@ -1,17 +1,16 @@ package validator import ( - "encoding/hex" - "fmt" "time" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/ethereum/go-ethereum/common" "go.uber.org/multierr" "go.uber.org/zap" "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" + "github.com/bloxapp/ssv/protocol/v2/types" ) func (c *controller) taskLogger(taskName string, fields ...zap.Field) *zap.Logger { @@ -20,7 +19,7 @@ func (c *controller) taskLogger(taskName string, fields ...zap.Field) *zap.Logge With(fields...) } -func (c *controller) StartValidator(share *ssvtypes.SSVShare) error { +func (c *controller) StartValidator(share *types.SSVShare) error { // logger := c.taskLogger("StartValidator", fields.PubKey(share.ValidatorPubKey)) // Since we don't yet have the Beacon metadata for this validator, @@ -30,41 +29,30 @@ func (c *controller) StartValidator(share *ssvtypes.SSVShare) error { return nil } -func (c *controller) StopValidator(publicKey []byte) error { - logger := c.taskLogger("StopValidator", fields.PubKey(publicKey)) +func (c *controller) StopValidator(pubKey spectypes.ValidatorPK) error { + logger := c.taskLogger("StopValidator", fields.PubKey(pubKey)) - c.metrics.ValidatorRemoved(publicKey) - if err := c.onShareRemove(hex.EncodeToString(publicKey), true); err != nil { - return err - } + c.metrics.ValidatorRemoved(pubKey) + c.onShareStop(pubKey) logger.Info("removed validator") return nil } -func (c *controller) LiquidateCluster(owner common.Address, operatorIDs []uint64, toLiquidate []*ssvtypes.SSVShare) error { - logger := c.taskLogger("LiquidateCluster", - zap.String("owner", owner.String()), - zap.Uint64s("operator_ids", operatorIDs)) +func (c *controller) LiquidateCluster(owner common.Address, operatorIDs []spectypes.OperatorID, toLiquidate []*types.SSVShare) error { + logger := c.taskLogger("LiquidateCluster", fields.Owner(owner), fields.OperatorIDs(operatorIDs)) for _, share := range toLiquidate { - // we can't remove the share secret from key-manager - // due to the fact that after activating the validators (ClusterReactivated) - // we don't have the encrypted keys to decrypt the secret, but only the owner address - if err := c.onShareRemove(hex.EncodeToString(share.ValidatorPubKey), false); err != nil { - return err - } - logger.With(fields.PubKey(share.ValidatorPubKey)).Debug("removed share") + c.onShareStop(share.ValidatorPubKey) + logger.With(fields.PubKey(share.ValidatorPubKey)).Debug("liquidated share") } return nil } -func (c *controller) ReactivateCluster(owner common.Address, operatorIDs []uint64, toReactivate []*ssvtypes.SSVShare) error { - logger := c.taskLogger("ReactivateCluster", - zap.String("owner", owner.String()), - zap.Uint64s("operator_ids", operatorIDs)) +func (c *controller) ReactivateCluster(owner common.Address, operatorIDs []spectypes.OperatorID, toReactivate []*types.SSVShare) error { + logger := c.taskLogger("ReactivateCluster", fields.Owner(owner), fields.OperatorIDs(operatorIDs)) var startedValidators int var errs error @@ -100,17 +88,14 @@ func (c *controller) UpdateFeeRecipient(owner, recipient common.Address) error { zap.String("owner", owner.String()), zap.String("fee_recipient", recipient.String())) - err := c.validatorsMap.ForEach(func(v *validator.Validator) error { + c.validatorsMap.ForEach(func(v *validator.Validator) bool { if v.Share.OwnerAddress == owner { v.Share.FeeRecipientAddress = recipient logger.Debug("updated recipient address") } - return nil + return true }) - if err != nil { - return fmt.Errorf("update validators map: %w", err) - } return nil } diff --git a/operator/validator/validators_map.go b/operator/validator/validators_map.go deleted file mode 100644 index 02d351f39c..0000000000 --- a/operator/validator/validators_map.go +++ /dev/null @@ -1,126 +0,0 @@ -package validator - -// TODO(nkryuchkov): remove old validator interface(s) -import ( - "context" - "encoding/hex" - "fmt" - "sync" - - "github.com/bloxapp/ssv/logging/fields" - - "go.uber.org/zap" - - "github.com/bloxapp/ssv/protocol/v2/ssv/validator" - "github.com/bloxapp/ssv/protocol/v2/types" -) - -// validatorIterator is the function used to iterate over existing validators -type validatorIterator func(validator *validator.Validator) error - -// validatorsMap manages a collection of running validators -type validatorsMap struct { - ctx context.Context - - optsTemplate *validator.Options - - lock sync.RWMutex - validatorsMap map[string]*validator.Validator -} - -func newValidatorsMap(ctx context.Context, optsTemplate *validator.Options) *validatorsMap { - vm := validatorsMap{ - ctx: ctx, - lock: sync.RWMutex{}, - validatorsMap: make(map[string]*validator.Validator), - optsTemplate: optsTemplate, - } - - return &vm -} - -// ForEach loops over validators -func (vm *validatorsMap) ForEach(iterator validatorIterator) error { - vm.lock.RLock() - defer vm.lock.RUnlock() - - for _, val := range vm.validatorsMap { - if err := iterator(val); err != nil { - return err - } - } - return nil -} - -// GetValidator returns a validator -func (vm *validatorsMap) GetValidator(pubKey string) (*validator.Validator, bool) { - // main lock - vm.lock.RLock() - defer vm.lock.RUnlock() - - v, ok := vm.validatorsMap[pubKey] - - return v, ok -} - -// GetOrCreateValidator creates a new validator instance if not exist -func (vm *validatorsMap) GetOrCreateValidator(logger *zap.Logger, share *types.SSVShare) (*validator.Validator, error) { - // main lock - vm.lock.Lock() - defer vm.lock.Unlock() - - pubKey := hex.EncodeToString(share.ValidatorPubKey) - if v, ok := vm.validatorsMap[pubKey]; !ok { - if !share.HasBeaconMetadata() { - return nil, fmt.Errorf("beacon metadata is missing") - } - opts := *vm.optsTemplate - opts.SSVShare = share - - // Share context with both the validator and the runners, - // so that when the validator is stopped, the runners are stopped as well. - ctx, cancel := context.WithCancel(vm.ctx) - opts.DutyRunners = SetupRunners(ctx, logger, opts) - vm.validatorsMap[pubKey] = validator.NewValidator(ctx, cancel, opts) - - printShare(share, logger, "setup validator done") - opts.SSVShare = nil - } else { - printShare(v.Share, logger, "get validator") - } - - return vm.validatorsMap[pubKey], nil -} - -// RemoveValidator removes a validator instance from the map -func (vm *validatorsMap) RemoveValidator(pubKey string) *validator.Validator { - if v, found := vm.GetValidator(pubKey); found { - vm.lock.Lock() - defer vm.lock.Unlock() - - delete(vm.validatorsMap, pubKey) - return v - } - return nil -} - -// Size returns the number of validators in the map -func (vm *validatorsMap) Size() int { - vm.lock.RLock() - defer vm.lock.RUnlock() - - return len(vm.validatorsMap) -} - -func printShare(s *types.SSVShare, logger *zap.Logger, msg string) { - committee := make([]string, len(s.Committee)) - for i, c := range s.Committee { - committee[i] = fmt.Sprintf(`[OperatorID=%d, PubKey=%x]`, c.OperatorID, c.PubKey) - } - logger.Debug(msg, - fields.PubKey(s.ValidatorPubKey), - zap.Uint64("node_id", s.OperatorID), - zap.Strings("committee", committee), - fields.FeeRecipient(s.FeeRecipientAddress[:]), - ) -} diff --git a/operator/validatorsmap/validators_map.go b/operator/validatorsmap/validators_map.go new file mode 100644 index 0000000000..badc404b1c --- /dev/null +++ b/operator/validatorsmap/validators_map.go @@ -0,0 +1,110 @@ +package validatorsmap + +// TODO(nkryuchkov): remove old validator interface(s) +import ( + "context" + "sync" + + "github.com/bloxapp/ssv/protocol/v2/ssv/validator" +) + +// validatorIterator is the function used to iterate over existing validators +type validatorIterator func(validator *validator.Validator) bool + +// ValidatorsMap manages a collection of running validators +type ValidatorsMap struct { + ctx context.Context + lock sync.RWMutex + validatorsMap map[string]*validator.Validator +} + +func New(ctx context.Context, opts ...Option) *ValidatorsMap { + vm := &ValidatorsMap{ + ctx: ctx, + lock: sync.RWMutex{}, + validatorsMap: make(map[string]*validator.Validator), + } + + for _, opt := range opts { + opt(vm) + } + + return vm +} + +// Option defines EventSyncer configuration option. +type Option func(*ValidatorsMap) + +// WithInitialState sets initial state +func WithInitialState(state map[string]*validator.Validator) Option { + return func(vm *ValidatorsMap) { + vm.validatorsMap = state + } +} + +// ForEach loops over validators +func (vm *ValidatorsMap) ForEach(iterator validatorIterator) bool { + vm.lock.RLock() + defer vm.lock.RUnlock() + + for _, val := range vm.validatorsMap { + if !iterator(val) { + return false + } + } + return true +} + +// GetAll returns all validators. +func (vm *ValidatorsMap) GetAll() []*validator.Validator { + vm.lock.RLock() + defer vm.lock.RUnlock() + + var validators []*validator.Validator + for _, val := range vm.validatorsMap { + validators = append(validators, val) + } + + return validators +} + +// GetValidator returns a validator +// TODO: pass spectypes.ValidatorPK instead of string +func (vm *ValidatorsMap) GetValidator(pubKey string) (*validator.Validator, bool) { + vm.lock.RLock() + defer vm.lock.RUnlock() + + v, ok := vm.validatorsMap[pubKey] + + return v, ok +} + +// CreateValidator creates a new validator instance +// TODO: pass spectypes.ValidatorPK instead of string +func (vm *ValidatorsMap) CreateValidator(pubKey string, v *validator.Validator) { + vm.lock.Lock() + defer vm.lock.Unlock() + + vm.validatorsMap[pubKey] = v +} + +// RemoveValidator removes a validator instance from the map +// TODO: pass spectypes.ValidatorPK instead of string +func (vm *ValidatorsMap) RemoveValidator(pubKey string) *validator.Validator { + if v, found := vm.GetValidator(pubKey); found { + vm.lock.Lock() + defer vm.lock.Unlock() + + delete(vm.validatorsMap, pubKey) + return v + } + return nil +} + +// Size returns the number of validators in the map +func (vm *ValidatorsMap) Size() int { + vm.lock.RLock() + defer vm.lock.RUnlock() + + return len(vm.validatorsMap) +} diff --git a/protocol/v2/blockchain/beacon/mock_client.go b/protocol/v2/blockchain/beacon/mock_client.go index 7360109bd1..2c8fa64f4d 100644 --- a/protocol/v2/blockchain/beacon/mock_client.go +++ b/protocol/v2/blockchain/beacon/mock_client.go @@ -643,6 +643,20 @@ func (mr *MockBeaconNodeMockRecorder) SubmitValidatorRegistration(pubkey, feeRec return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitValidatorRegistration", reflect.TypeOf((*MockBeaconNode)(nil).SubmitValidatorRegistration), pubkey, feeRecipient, sig) } +// SubmitVoluntaryExit mocks base method. +func (m *MockBeaconNode) SubmitVoluntaryExit(voluntaryExit *phase0.SignedVoluntaryExit, sig phase0.BLSSignature) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitVoluntaryExit", voluntaryExit, sig) + ret0, _ := ret[0].(error) + return ret0 +} + +// SubmitVoluntaryExit indicates an expected call of SubmitVoluntaryExit. +func (mr *MockBeaconNodeMockRecorder) SubmitVoluntaryExit(voluntaryExit, sig interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitVoluntaryExit", reflect.TypeOf((*MockBeaconNode)(nil).SubmitVoluntaryExit), voluntaryExit, sig) +} + // SyncCommitteeDuties mocks base method. func (m *MockBeaconNode) SyncCommitteeDuties(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) ([]*v1.SyncCommitteeDuty, error) { m.ctrl.T.Helper() diff --git a/protocol/v2/blockchain/beacon/mocks/network.go b/protocol/v2/blockchain/beacon/mocks/network.go index 0a129035f2..65c124cbf1 100644 --- a/protocol/v2/blockchain/beacon/mocks/network.go +++ b/protocol/v2/blockchain/beacon/mocks/network.go @@ -233,6 +233,20 @@ func (mr *MockBeaconNetworkMockRecorder) GetNetwork() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetwork", reflect.TypeOf((*MockBeaconNetwork)(nil).GetNetwork)) } +// GetSlotEndTime mocks base method. +func (m *MockBeaconNetwork) GetSlotEndTime(slot phase0.Slot) time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSlotEndTime", slot) + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetSlotEndTime indicates an expected call of GetSlotEndTime. +func (mr *MockBeaconNetworkMockRecorder) GetSlotEndTime(slot interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSlotEndTime", reflect.TypeOf((*MockBeaconNetwork)(nil).GetSlotEndTime), slot) +} + // GetSlotStartTime mocks base method. func (m *MockBeaconNetwork) GetSlotStartTime(slot phase0.Slot) time.Time { m.ctrl.T.Helper() diff --git a/protocol/v2/blockchain/beacon/network.go b/protocol/v2/blockchain/beacon/network.go index e9f0c240c4..965890540f 100644 --- a/protocol/v2/blockchain/beacon/network.go +++ b/protocol/v2/blockchain/beacon/network.go @@ -29,6 +29,7 @@ type BeaconNetwork interface { EpochStartTime(epoch phase0.Epoch) time.Time GetSlotStartTime(slot phase0.Slot) time.Time + GetSlotEndTime(slot phase0.Slot) time.Time IsFirstSlotOfEpoch(slot phase0.Slot) bool GetEpochFirstSlot(epoch phase0.Epoch) phase0.Slot @@ -82,6 +83,11 @@ func (n Network) GetSlotStartTime(slot phase0.Slot) time.Time { return start } +// GetSlotEndTime returns the end time for the given slot +func (n Network) GetSlotEndTime(slot phase0.Slot) time.Time { + return n.GetSlotStartTime(slot + 1) +} + // EstimatedCurrentSlot returns the estimation of the current slot func (n Network) EstimatedCurrentSlot() phase0.Slot { return n.EstimatedSlotAtTime(time.Now().Unix()) diff --git a/protocol/v2/blockchain/beacon/network_test.go b/protocol/v2/blockchain/beacon/network_test.go new file mode 100644 index 0000000000..a5646bf36a --- /dev/null +++ b/protocol/v2/blockchain/beacon/network_test.go @@ -0,0 +1,19 @@ +package beacon + +import ( + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/stretchr/testify/require" +) + +func TestNetwork_GetSlotEndTime(t *testing.T) { + slot := phase0.Slot(1) + + n := NewNetwork(spectypes.PraterNetwork) + slotStart := n.GetSlotStartTime(slot) + slotEnd := n.GetSlotEndTime(slot) + + require.Equal(t, n.SlotDurationSec(), slotEnd.Sub(slotStart)) +} diff --git a/protocol/v2/p2p/network.go b/protocol/v2/p2p/network.go index 8e9f99a78d..bd201dddda 100644 --- a/protocol/v2/p2p/network.go +++ b/protocol/v2/p2p/network.go @@ -132,21 +132,6 @@ func WithHandler(protocol SyncProtocol, handler RequestHandler) *SyncHandler { } } -// Syncer holds the interface for syncing data from other peers -type Syncer interface { - specqbft.Syncer - // GetHistory sync the given range from a set of peers that supports history for the given identifier - // it accepts a list of targets for the request. - GetHistory(logger *zap.Logger, mid spectypes.MessageID, from, to specqbft.Height, targets ...string) ([]SyncResult, specqbft.Height, error) - - // RegisterHandlers registers handler for the given protocol - RegisterHandlers(logger *zap.Logger, handlers ...*SyncHandler) - - // LastDecided fetches last decided from a random set of peers - // TODO: replace with specqbft.SyncHighestDecided - LastDecided(logger *zap.Logger, mid spectypes.MessageID) ([]SyncResult, error) -} - // MsgValidationResult helps other components to report message validation with a generic results scheme type MsgValidationResult int32 @@ -173,6 +158,8 @@ type ValidationReporting interface { type Network interface { Subscriber Broadcaster - Syncer ValidationReporting + + // RegisterHandlers registers handler for the given protocol + RegisterHandlers(logger *zap.Logger, handlers ...*SyncHandler) } diff --git a/protocol/v2/qbft/config.go b/protocol/v2/qbft/config.go index 580b3b03e2..21aae3df6b 100644 --- a/protocol/v2/qbft/config.go +++ b/protocol/v2/qbft/config.go @@ -3,6 +3,8 @@ package qbft import ( specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" qbftstorage "github.com/bloxapp/ssv/protocol/v2/qbft/storage" ) @@ -24,18 +26,21 @@ type IConfig interface { // GetStorage returns a storage instance GetStorage() qbftstorage.QBFTStore // GetTimer returns round timer - GetTimer() specqbft.Timer + GetTimer() roundtimer.Timer + // VerifySignatures returns if signature is checked + VerifySignatures() bool } type Config struct { - Signer spectypes.SSVSigner - SigningPK []byte - Domain spectypes.DomainType - ValueCheckF specqbft.ProposedValueCheckF - ProposerF specqbft.ProposerF - Storage qbftstorage.QBFTStore - Network specqbft.Network - Timer specqbft.Timer + Signer spectypes.SSVSigner + SigningPK []byte + Domain spectypes.DomainType + ValueCheckF specqbft.ProposedValueCheckF + ProposerF specqbft.ProposerF + Storage qbftstorage.QBFTStore + Network specqbft.Network + Timer roundtimer.Timer + SignatureVerification bool } // GetSigner returns a Signer instance @@ -74,6 +79,10 @@ func (c *Config) GetStorage() qbftstorage.QBFTStore { } // GetTimer returns round timer -func (c *Config) GetTimer() specqbft.Timer { +func (c *Config) GetTimer() roundtimer.Timer { return c.Timer } + +func (c *Config) VerifySignatures() bool { + return c.SignatureVerification +} diff --git a/protocol/v2/qbft/controller/controller.go b/protocol/v2/qbft/controller/controller.go index 84abc6600f..dd786dc993 100644 --- a/protocol/v2/qbft/controller/controller.go +++ b/protocol/v2/qbft/controller/controller.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto/sha256" "encoding/json" + "fmt" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" @@ -24,14 +25,12 @@ type Controller struct { Identifier []byte Height specqbft.Height // incremental Height for InstanceContainer // StoredInstances stores the last HistoricalInstanceCapacity in an array for message processing purposes. - StoredInstances InstanceContainer - // FutureMsgsContainer holds all msgs from a higher height - FutureMsgsContainer map[spectypes.OperatorID]specqbft.Height // maps msg signer to height of higher height received msgs - Domain spectypes.DomainType - Share *spectypes.Share - NewDecidedHandler NewDecidedHandler `json:"-"` - config qbft.IConfig - fullNode bool + StoredInstances InstanceContainer + Domain spectypes.DomainType + Share *spectypes.Share + NewDecidedHandler NewDecidedHandler `json:"-"` + config qbft.IConfig + fullNode bool } func NewController( @@ -42,14 +41,13 @@ func NewController( fullNode bool, ) *Controller { return &Controller{ - Identifier: identifier, - Height: specqbft.FirstHeight, - Domain: domain, - Share: share, - StoredInstances: make(InstanceContainer, 0, InstanceContainerDefaultCapacity), - FutureMsgsContainer: make(map[spectypes.OperatorID]specqbft.Height), - config: config, - fullNode: fullNode, + Identifier: identifier, + Height: specqbft.FirstHeight, + Domain: domain, + Share: share, + StoredInstances: make(InstanceContainer, 0, InstanceContainerDefaultCapacity), + config: config, + fullNode: fullNode, } } @@ -100,10 +98,9 @@ func (c *Controller) ProcessMsg(logger *zap.Logger, msg *specqbft.SignedMessage) if IsDecidedMsg(c.Share, msg) { return c.UponDecided(logger, msg) } else if c.isFutureMessage(msg) { - return c.UponFutureMsg(logger, msg) - } else { - return c.UponExistingInstanceMsg(logger, msg) + return nil, fmt.Errorf("future msg from height, could not process") } + return c.UponExistingInstanceMsg(logger, msg) } func (c *Controller) UponExistingInstanceMsg(logger *zap.Logger, msg *specqbft.SignedMessage) (*specqbft.SignedMessage, error) { diff --git a/protocol/v2/qbft/controller/controller_test.go b/protocol/v2/qbft/controller/controller_test.go index 35c7a39d31..cd119c9d86 100644 --- a/protocol/v2/qbft/controller/controller_test.go +++ b/protocol/v2/qbft/controller/controller_test.go @@ -1,11 +1,18 @@ package controller import ( + "encoding/json" "testing" - "github.com/bloxapp/ssv/protocol/v2/qbft" - + specqbft "github.com/bloxapp/ssv-spec/qbft" + spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/protocol/v2/qbft" + "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + "github.com/bloxapp/ssv/protocol/v2/types" ) func TestController_Marshaling(t *testing.T) { @@ -25,3 +32,60 @@ func TestController_Marshaling(t *testing.T) { require.NoError(t, err) require.EqualValues(t, byts, bytsDecoded) } + +func TestController_OnTimeoutWithRoundCheck(t *testing.T) { + // Initialize logger + logger := logging.TestLogger(t) + + testConfig := &qbft.Config{ + Signer: spectestingutils.NewTestingKeyManager(), + Network: spectestingutils.NewTestingNetwork(), + Timer: roundtimer.NewTestingTimer(), + } + + share := spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()) + inst := instance.NewInstance( + testConfig, + share, + []byte{1, 2, 3, 4}, + specqbft.FirstHeight, + ) + + // Initialize Controller + contr := &Controller{} + + // Initialize EventMsg for the test + timeoutData := types.TimeoutData{ + Height: specqbft.FirstHeight, + Round: specqbft.FirstRound, + } + + data, err := json.Marshal(timeoutData) + require.NoError(t, err) + + msg := &types.EventMsg{ + Type: types.Timeout, + Data: data, + } + + // Simulate a scenario where the instance is at a higher round + inst.State.Round = specqbft.Round(2) + contr.StoredInstances.addNewInstance(inst) + + // Call OnTimeout and capture the error + err = contr.OnTimeout(logger, *msg) + + // Assert that the error is nil and the round did not bump + require.NoError(t, err) + require.Equal(t, specqbft.Round(2), inst.State.Round, "Round should not bump") + + // Simulate a scenario where the instance is at the same or lower round + inst.State.Round = specqbft.FirstRound + + // Call OnTimeout and capture the error + err = contr.OnTimeout(logger, *msg) + + // Assert that the error is nil and the round did bump + require.NoError(t, err) + require.Equal(t, specqbft.Round(2), inst.State.Round, "Round should bump") +} diff --git a/protocol/v2/qbft/controller/decided.go b/protocol/v2/qbft/controller/decided.go index 6c239a5a90..f9b694bc8e 100644 --- a/protocol/v2/qbft/controller/decided.go +++ b/protocol/v2/qbft/controller/decided.go @@ -67,8 +67,6 @@ func (c *Controller) UponDecided(logger *zap.Logger, msg *specqbft.SignedMessage } if isFutureDecided { - // sync gap - c.GetConfig().GetNetwork().SyncDecidedByRange(spectypes.MessageIDFromBytes(c.Identifier), c.Height, msg.Message.Height) // bump height c.Height = msg.Message.Height } diff --git a/protocol/v2/qbft/controller/future_msg.go b/protocol/v2/qbft/controller/future_msg.go deleted file mode 100644 index 30a205ff6e..0000000000 --- a/protocol/v2/qbft/controller/future_msg.go +++ /dev/null @@ -1,76 +0,0 @@ -package controller - -import ( - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/protocol/v2/qbft" - "github.com/bloxapp/ssv/protocol/v2/types" -) - -func (c *Controller) UponFutureMsg(logger *zap.Logger, msg *specqbft.SignedMessage) (*specqbft.SignedMessage, error) { - if err := ValidateFutureMsg(c.GetConfig(), msg, c.Share.Committee); err != nil { - return nil, errors.Wrap(err, "invalid future msg") - } - if !c.addHigherHeightMsg(msg) { - return nil, errors.New("discarded future msg") - } - if c.f1SyncTrigger() { - logger.Debug("🔀 triggered f+1 sync", - zap.Uint64("ctrl_height", uint64(c.Height)), - zap.Uint64("msg_height", uint64(msg.Message.Height))) - return nil, c.GetConfig().GetNetwork().SyncHighestDecided(spectypes.MessageIDFromBytes(c.Identifier)) - } - return nil, nil -} - -func ValidateFutureMsg( - config qbft.IConfig, - msg *specqbft.SignedMessage, - operators []*spectypes.Operator, -) error { - if err := msg.Validate(); err != nil { - return errors.Wrap(err, "invalid decided msg") - } - - if len(msg.GetSigners()) != 1 { - return errors.New("allows 1 signer") - } - - // verify signature - if err := types.VerifyByOperators(msg.Signature, msg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") - } - - return nil -} - -// addHigherHeightMsg verifies msg, cleanup queue and adds the message if unique signer -func (c *Controller) addHigherHeightMsg(msg *specqbft.SignedMessage) bool { - // cleanup lower height msgs - cleanedQueue := make(map[spectypes.OperatorID]specqbft.Height) - signerExists := false - for signer, height := range c.FutureMsgsContainer { - if height <= c.Height { - continue - } - - if signer == msg.GetSigners()[0] { - signerExists = true - } - cleanedQueue[signer] = height - } - - if !signerExists { - cleanedQueue[msg.GetSigners()[0]] = msg.Message.Height - } - c.FutureMsgsContainer = cleanedQueue - return !signerExists -} - -// f1SyncTrigger returns true if received f+1 higher height messages from unique signers -func (c *Controller) f1SyncTrigger() bool { - return c.Share.HasPartialQuorum(len(c.FutureMsgsContainer)) -} diff --git a/protocol/v2/qbft/controller/timer.go b/protocol/v2/qbft/controller/timer.go index f073fa813c..fa3ff1e4db 100644 --- a/protocol/v2/qbft/controller/timer.go +++ b/protocol/v2/qbft/controller/timer.go @@ -19,8 +19,13 @@ func (c *Controller) OnTimeout(logger *zap.Logger, msg types.EventMsg) error { if instance == nil { return errors.New("instance is nil") } - decided, _ := instance.IsDecided() - if decided { + + if timeoutData.Round < instance.State.Round { + logger.Debug("timeout for old round", zap.Uint64("timeout round", uint64(timeoutData.Round)), zap.Uint64("instance round", uint64(instance.State.Round))) + return nil + } + + if decided, _ := instance.IsDecided(); decided { return nil } return instance.UponRoundTimeout(logger) diff --git a/protocol/v2/qbft/instance/commit.go b/protocol/v2/qbft/instance/commit.go index 5620602ea6..53d4f5855e 100644 --- a/protocol/v2/qbft/instance/commit.go +++ b/protocol/v2/qbft/instance/commit.go @@ -158,9 +158,10 @@ func BaseCommitValidation( return errors.Wrap(err, "signed commit invalid") } - // verify signature - if err := types.VerifyByOperators(signedCommit.Signature, signedCommit, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(signedCommit.Signature, signedCommit, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } return nil diff --git a/protocol/v2/qbft/instance/instance.go b/protocol/v2/qbft/instance/instance.go index 2513268e25..f0d99e92cd 100644 --- a/protocol/v2/qbft/instance/instance.go +++ b/protocol/v2/qbft/instance/instance.go @@ -66,7 +66,7 @@ func (i *Instance) Start(logger *zap.Logger, value []byte, height specqbft.Heigh i.State.Height = height i.metrics.StartStage() - i.config.GetTimer().TimeoutForRound(specqbft.FirstRound) + i.config.GetTimer().TimeoutForRound(height, specqbft.FirstRound) logger = logger.With( fields.Round(i.State.Round), @@ -95,13 +95,9 @@ func (i *Instance) Start(logger *zap.Logger, value []byte, height specqbft.Heigh } func (i *Instance) Broadcast(logger *zap.Logger, msg *specqbft.SignedMessage) error { - // logger.Debug("Broadcast", - // zap.Any("MsgType", msg.Message.MsgType), - // fields.Round(msg.Message.Round), - // zap.Any("DataRound", msg.Message.DataRound), - // fields.Height(msg.Message.Height), - // ) - + if !i.CanProcessMessages() { + return errors.New("instance stopped processing messages") + } byts, err := msg.Encode() if err != nil { return errors.Wrap(err, "could not encode message") diff --git a/protocol/v2/qbft/instance/marshalutils.go b/protocol/v2/qbft/instance/marshalutils.go new file mode 100644 index 0000000000..ba76e75453 --- /dev/null +++ b/protocol/v2/qbft/instance/marshalutils.go @@ -0,0 +1,47 @@ +package instance + +import "encoding/json" + +/////////////////////// JSON Marshalling for Tests /////////////////////// + +// region: JSON Marshalling for Instance + +// MarshalJSON is a custom JSON marshaller for Instance +func (i *Instance) MarshalJSON() ([]byte, error) { + type Alias Instance + if i.forceStop { + return json.Marshal(&struct { + ForceStop bool `json:"forceStop"` + *Alias + }{ + ForceStop: i.forceStop, + Alias: (*Alias)(i), + }) + } else { + return json.Marshal(&struct { + *Alias + }{ + Alias: (*Alias)(i), + }) + } +} + +// UnmarshalJSON is a custom JSON unmarshaller for Instance +func (i *Instance) UnmarshalJSON(data []byte) error { + type Alias Instance + aux := &struct { + ForceStop *bool `json:"forceStop,omitempty"` + *Alias + }{ + Alias: (*Alias)(i), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ForceStop != nil { + i.forceStop = *aux.ForceStop + } + return nil +} + +// endregion: JSON Marshalling for Instance diff --git a/protocol/v2/qbft/instance/metrics.go b/protocol/v2/qbft/instance/metrics.go index e2598671ad..e32e49a872 100644 --- a/protocol/v2/qbft/instance/metrics.go +++ b/protocol/v2/qbft/instance/metrics.go @@ -2,13 +2,13 @@ package instance import ( "encoding/hex" - "go.uber.org/zap" "time" specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "go.uber.org/zap" ) var ( @@ -50,7 +50,7 @@ func newMetrics(msgID spectypes.MessageID) *metrics { proposalDuration: metricsStageDuration.WithLabelValues("proposal", hexPubKey), prepareDuration: metricsStageDuration.WithLabelValues("prepare", hexPubKey), commitDuration: metricsStageDuration.WithLabelValues("commit", hexPubKey), - round: metricsRound.WithLabelValues("validator", hexPubKey), + round: metricsRound.WithLabelValues(msgID.GetRoleType().String(), hexPubKey), } } diff --git a/protocol/v2/qbft/instance/prepare.go b/protocol/v2/qbft/instance/prepare.go index 7714771b88..55748b33c2 100644 --- a/protocol/v2/qbft/instance/prepare.go +++ b/protocol/v2/qbft/instance/prepare.go @@ -159,8 +159,10 @@ func validSignedPrepareForHeightRoundAndRoot( return errors.New("msg allows 1 signer") } - if err := types.VerifyByOperators(signedPrepare.Signature, signedPrepare, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(signedPrepare.Signature, signedPrepare, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } return nil diff --git a/protocol/v2/qbft/instance/proposal.go b/protocol/v2/qbft/instance/proposal.go index a417c04fc4..a4b5303ada 100644 --- a/protocol/v2/qbft/instance/proposal.go +++ b/protocol/v2/qbft/instance/proposal.go @@ -10,7 +10,7 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/qbft" - "github.com/bloxapp/ssv/protocol/v2/types" + ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" ) // uponProposal process proposal message @@ -33,7 +33,7 @@ func (i *Instance) uponProposal(logger *zap.Logger, signedProposal *specqbft.Sig // A future justified proposal should bump us into future round and reset timer if signedProposal.Message.Round > i.State.Round { - i.config.GetTimer().TimeoutForRound(signedProposal.Message.Round) + i.config.GetTimer().TimeoutForRound(signedProposal.Message.Height, signedProposal.Message.Round) } i.bumpToRound(newRound) @@ -77,8 +77,10 @@ func isValidProposal( if len(signedProposal.GetSigners()) != 1 { return errors.New("msg allows 1 signer") } - if err := types.VerifyByOperators(signedProposal.Signature, signedProposal, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := ssvtypes.VerifyByOperators(signedProposal.Signature, signedProposal, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, operators); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } if !signedProposal.MatchedSigners([]spectypes.OperatorID{proposer(state, config, signedProposal.Message.Round)}) { return errors.New("proposal leader invalid") @@ -121,6 +123,30 @@ func isValidProposal( return errors.New("proposal is not valid with current state") } +func IsProposalJustification( + config qbft.IConfig, + share *ssvtypes.SSVShare, + roundChangeMsgs []*specqbft.SignedMessage, + prepareMsgs []*specqbft.SignedMessage, + height specqbft.Height, + round specqbft.Round, + fullData []byte, +) error { + return isProposalJustification( + &specqbft.State{ + Share: &share.Share, + Height: height, + }, + config, + roundChangeMsgs, + prepareMsgs, + height, + round, + fullData, + func(data []byte) error { return nil }, + ) +} + // isProposalJustification returns nil if the proposal and round change messages are valid and justify a proposal message for the provided round, value and leader func isProposalJustification( state *specqbft.State, @@ -256,7 +282,7 @@ func CreateProposal(state *specqbft.State, config qbft.IConfig, fullData []byte, } sig, err := config.GetSigner().SignRoot(msg, spectypes.QBFTSignatureType, state.Share.SharePubKey) if err != nil { - return nil, errors.Wrap(err, "failed signing prepare msg") + return nil, errors.Wrap(err, "failed signing proposal msg") } signedMsg := &specqbft.SignedMessage{ diff --git a/protocol/v2/qbft/instance/round_change.go b/protocol/v2/qbft/instance/round_change.go index 5b0de2e3c9..00cd676b3d 100644 --- a/protocol/v2/qbft/instance/round_change.go +++ b/protocol/v2/qbft/instance/round_change.go @@ -30,8 +30,11 @@ func (i *Instance) uponRoundChange( return nil // UponCommit was already called } - logger = logger.With(fields.Round(i.State.Round), - fields.Height(i.State.Height)) + logger = logger.With( + fields.Round(i.State.Round), + fields.Height(i.State.Height), + zap.Uint64("msg_round", uint64(signedRoundChange.Message.Round)), + ) logger.Debug("🔄 got round change", fields.Root(signedRoundChange.Message.Root), @@ -85,7 +88,9 @@ func (i *Instance) uponRoundChange( func (i *Instance) uponChangeRoundPartialQuorum(logger *zap.Logger, newRound specqbft.Round, instanceStartValue []byte) error { i.bumpToRound(newRound) i.State.ProposalAcceptedForCurrentRound = nil - i.config.GetTimer().TimeoutForRound(i.State.Round) + + i.config.GetTimer().TimeoutForRound(i.State.Height, i.State.Round) + roundChange, err := CreateRoundChange(i.State, i.config, newRound, instanceStartValue) if err != nil { return errors.Wrap(err, "failed to create round change message") @@ -247,8 +252,10 @@ func validRoundChangeForData( return errors.New("msg allows 1 signer") } - if err := types.VerifyByOperators(signedMsg.Signature, signedMsg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, state.Share.Committee); err != nil { - return errors.Wrap(err, "msg signature invalid") + if config.VerifySignatures() { + if err := types.VerifyByOperators(signedMsg.Signature, signedMsg, config.GetSignatureDomainType(), spectypes.QBFTSignatureType, state.Share.Committee); err != nil { + return errors.Wrap(err, "msg signature invalid") + } } if err := signedMsg.Message.Validate(); err != nil { @@ -377,7 +384,7 @@ func CreateRoundChange(state *specqbft.State, config qbft.IConfig, newRound spec } sig, err := config.GetSigner().SignRoot(msg, spectypes.QBFTSignatureType, state.Share.SharePubKey) if err != nil { - return nil, errors.Wrap(err, "failed signing prepare msg") + return nil, errors.Wrap(err, "failed signing round change msg") } signedMsg := &specqbft.SignedMessage{ diff --git a/protocol/v2/qbft/instance/timeout.go b/protocol/v2/qbft/instance/timeout.go index ee8e9248b7..62ae4c784c 100644 --- a/protocol/v2/qbft/instance/timeout.go +++ b/protocol/v2/qbft/instance/timeout.go @@ -1,9 +1,10 @@ package instance import ( - "github.com/bloxapp/ssv/logging/fields" "github.com/pkg/errors" "go.uber.org/zap" + + "github.com/bloxapp/ssv/logging/fields" ) var CutoffRound = 15 // stop processing instances after 8*2+120*6 = 14.2 min (~ 2 epochs) @@ -22,7 +23,7 @@ func (i *Instance) UponRoundTimeout(logger *zap.Logger) error { defer func() { i.bumpToRound(newRound) i.State.ProposalAcceptedForCurrentRound = nil - i.config.GetTimer().TimeoutForRound(i.State.Round) + i.config.GetTimer().TimeoutForRound(i.State.Height, i.State.Round) }() roundChange, err := CreateRoundChange(i.State, i.config, newRound, i.StartValue) diff --git a/protocol/v2/qbft/roundtimer/mocks/timer.go b/protocol/v2/qbft/roundtimer/mocks/timer.go new file mode 100644 index 0000000000..2a691f9ab6 --- /dev/null +++ b/protocol/v2/qbft/roundtimer/mocks/timer.go @@ -0,0 +1,100 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ./timer.go + +// Package mocks is a generated GoMock package. +package mocks + +import ( + reflect "reflect" + time "time" + + phase0 "github.com/attestantio/go-eth2-client/spec/phase0" + qbft "github.com/bloxapp/ssv-spec/qbft" + gomock "github.com/golang/mock/gomock" +) + +// MockTimer is a mock of Timer interface. +type MockTimer struct { + ctrl *gomock.Controller + recorder *MockTimerMockRecorder +} + +// MockTimerMockRecorder is the mock recorder for MockTimer. +type MockTimerMockRecorder struct { + mock *MockTimer +} + +// NewMockTimer creates a new mock instance. +func NewMockTimer(ctrl *gomock.Controller) *MockTimer { + mock := &MockTimer{ctrl: ctrl} + mock.recorder = &MockTimerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTimer) EXPECT() *MockTimerMockRecorder { + return m.recorder +} + +// TimeoutForRound mocks base method. +func (m *MockTimer) TimeoutForRound(height qbft.Height, round qbft.Round) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "TimeoutForRound", height, round) +} + +// TimeoutForRound indicates an expected call of TimeoutForRound. +func (mr *MockTimerMockRecorder) TimeoutForRound(height, round interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeoutForRound", reflect.TypeOf((*MockTimer)(nil).TimeoutForRound), height, round) +} + +// MockBeaconNetwork is a mock of BeaconNetwork interface. +type MockBeaconNetwork struct { + ctrl *gomock.Controller + recorder *MockBeaconNetworkMockRecorder +} + +// MockBeaconNetworkMockRecorder is the mock recorder for MockBeaconNetwork. +type MockBeaconNetworkMockRecorder struct { + mock *MockBeaconNetwork +} + +// NewMockBeaconNetwork creates a new mock instance. +func NewMockBeaconNetwork(ctrl *gomock.Controller) *MockBeaconNetwork { + mock := &MockBeaconNetwork{ctrl: ctrl} + mock.recorder = &MockBeaconNetworkMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBeaconNetwork) EXPECT() *MockBeaconNetworkMockRecorder { + return m.recorder +} + +// GetSlotStartTime mocks base method. +func (m *MockBeaconNetwork) GetSlotStartTime(slot phase0.Slot) time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSlotStartTime", slot) + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetSlotStartTime indicates an expected call of GetSlotStartTime. +func (mr *MockBeaconNetworkMockRecorder) GetSlotStartTime(slot interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSlotStartTime", reflect.TypeOf((*MockBeaconNetwork)(nil).GetSlotStartTime), slot) +} + +// SlotDurationSec mocks base method. +func (m *MockBeaconNetwork) SlotDurationSec() time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SlotDurationSec") + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// SlotDurationSec indicates an expected call of SlotDurationSec. +func (mr *MockBeaconNetworkMockRecorder) SlotDurationSec() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SlotDurationSec", reflect.TypeOf((*MockBeaconNetwork)(nil).SlotDurationSec)) +} diff --git a/protocol/v2/qbft/roundtimer/testing_timer.go b/protocol/v2/qbft/roundtimer/testing_timer.go new file mode 100644 index 0000000000..310a072aa3 --- /dev/null +++ b/protocol/v2/qbft/roundtimer/testing_timer.go @@ -0,0 +1,23 @@ +package roundtimer + +import specqbft "github.com/bloxapp/ssv-spec/qbft" + +type TimerState struct { + Timeouts int + Round specqbft.Round +} + +type TestQBFTTimer struct { + State TimerState +} + +func NewTestingTimer() Timer { + return &TestQBFTTimer{ + State: TimerState{}, + } +} + +func (t *TestQBFTTimer) TimeoutForRound(height specqbft.Height, round specqbft.Round) { + t.State.Timeouts++ + t.State.Round = round +} diff --git a/protocol/v2/qbft/roundtimer/timer.go b/protocol/v2/qbft/roundtimer/timer.go index df0463e695..fde166f3dc 100644 --- a/protocol/v2/qbft/roundtimer/timer.go +++ b/protocol/v2/qbft/roundtimer/timer.go @@ -6,25 +6,36 @@ import ( "sync/atomic" "time" + "github.com/attestantio/go-eth2-client/spec/phase0" specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" ) -type RoundTimeoutFunc func(specqbft.Round) time.Duration +//go:generate mockgen -package=mocks -destination=./mocks/timer.go -source=./timer.go -var ( - quickTimeoutThreshold = specqbft.Round(8) - quickTimeout = 2 * time.Second - slowTimeout = 2 * time.Minute +type OnRoundTimeoutF func(round specqbft.Round) + +const ( + QuickTimeoutThreshold = specqbft.Round(8) + QuickTimeout = 2 * time.Second + SlowTimeout = 2 * time.Minute ) -// RoundTimeout returns the number of seconds until next timeout for a give round. -// if the round is smaller than 8 -> 2s; otherwise -> 2m -// see SIP https://github.com/bloxapp/SIPs/pull/22 -func RoundTimeout(r specqbft.Round) time.Duration { - if r <= quickTimeoutThreshold { - return quickTimeout - } - return slowTimeout +// Timer is an interface for a round timer, calling the UponRoundTimeout when times out +type Timer interface { + // TimeoutForRound will reset running timer if exists and will start a new timer for a specific round + TimeoutForRound(height specqbft.Height, round specqbft.Round) +} + +type BeaconNetwork interface { + GetSlotStartTime(slot phase0.Slot) time.Time + SlotDurationSec() time.Duration +} + +type TimeoutOptions struct { + quickThreshold specqbft.Round + quick time.Duration + slow time.Duration } // RoundTimer helps to manage current instance rounds. @@ -36,28 +47,98 @@ type RoundTimer struct { // timer is the underlying time.Timer timer *time.Timer // result holds the result of the timer - done func() + done OnRoundTimeoutF // round is the current round of the timer round int64 - - roundTimeout RoundTimeoutFunc + // timeoutOptions holds the timeoutOptions for the timer + timeoutOptions TimeoutOptions + // role is the role of the instance + role spectypes.BeaconRole + // beaconNetwork is the beacon network + beaconNetwork BeaconNetwork } // New creates a new instance of RoundTimer. -func New(pctx context.Context, done func()) *RoundTimer { +func New(pctx context.Context, beaconNetwork BeaconNetwork, role spectypes.BeaconRole, done OnRoundTimeoutF) *RoundTimer { ctx, cancelCtx := context.WithCancel(pctx) return &RoundTimer{ - mtx: &sync.RWMutex{}, - ctx: ctx, - cancelCtx: cancelCtx, - timer: nil, - done: done, - roundTimeout: RoundTimeout, + mtx: &sync.RWMutex{}, + ctx: ctx, + cancelCtx: cancelCtx, + timer: nil, + done: done, + role: role, + beaconNetwork: beaconNetwork, + timeoutOptions: TimeoutOptions{ + quickThreshold: QuickTimeoutThreshold, + quick: QuickTimeout, + slow: SlowTimeout, + }, } } +// RoundTimeout calculates the timeout duration for a specific role, height, and round. +// +// Timeout Rules: +// - For roles BNRoleAttester and BNRoleSyncCommittee, the base timeout is 1/3 of the slot duration. +// - For roles BNRoleAggregator and BNRoleSyncCommitteeContribution, the base timeout is 2/3 of the slot duration. +// - For role BNRoleProposer, the timeout is either quickTimeout or slowTimeout, depending on the round. +// +// Additional Timeout: +// - For rounds less than or equal to quickThreshold, the additional timeout is 'quick' seconds. +// - For rounds greater than quickThreshold, the additional timeout is 'slow' seconds. +// +// SIP Reference: +// For more details, see SIP at https://github.com/bloxapp/SIPs/pull/22 +// +// TODO: Update SIP for Deterministic Round Timeout +// TODO: Decide if to make the proposer timeout deterministic +// +// Synchronization Note: +// To ensure synchronized timeouts across instances, the timeout is based on the duty start time, +// which is calculated from the slot height. The base timeout is set based on the role, +// and the additional timeout is added based on the round number. +func (t *RoundTimer) RoundTimeout(height specqbft.Height, round specqbft.Round) time.Duration { + // Initialize duration to zero + var baseDuration time.Duration + + // Set base duration based on role + switch t.role { + case spectypes.BNRoleAttester, spectypes.BNRoleSyncCommittee: + // third of the slot time + baseDuration = t.beaconNetwork.SlotDurationSec() / 3 + case spectypes.BNRoleAggregator, spectypes.BNRoleSyncCommitteeContribution: + // two-third of the slot time + baseDuration = t.beaconNetwork.SlotDurationSec() / 3 * 2 + default: + if round <= t.timeoutOptions.quickThreshold { + return t.timeoutOptions.quick + } + return t.timeoutOptions.slow + } + + // Calculate additional timeout based on round + var additionalTimeout time.Duration + if round <= t.timeoutOptions.quickThreshold { + additionalTimeout = time.Duration(int(round)) * t.timeoutOptions.quick + } else { + quickPortion := time.Duration(t.timeoutOptions.quickThreshold) * t.timeoutOptions.quick + slowPortion := time.Duration(int(round-t.timeoutOptions.quickThreshold)) * t.timeoutOptions.slow + additionalTimeout = quickPortion + slowPortion + } + + // Combine base duration and additional timeout + timeoutDuration := baseDuration + additionalTimeout + + // Get the start time of the duty + dutyStartTime := t.beaconNetwork.GetSlotStartTime(phase0.Slot(height)) + + // Calculate the time until the duty should start plus the timeout duration + return time.Until(dutyStartTime.Add(timeoutDuration)) +} + // OnTimeout sets a function called on timeout. -func (t *RoundTimer) OnTimeout(done func()) { +func (t *RoundTimer) OnTimeout(done OnRoundTimeoutF) { t.mtx.Lock() // write to t.done defer t.mtx.Unlock() @@ -70,9 +151,10 @@ func (t *RoundTimer) Round() specqbft.Round { } // TimeoutForRound times out for a given round. -func (t *RoundTimer) TimeoutForRound(round specqbft.Round) { +func (t *RoundTimer) TimeoutForRound(height specqbft.Height, round specqbft.Round) { atomic.StoreInt64(&t.round, int64(round)) - timeout := t.roundTimeout(round) + timeout := t.RoundTimeout(height, round) + // preparing the underlying timer timer := t.timer if timer == nil { @@ -101,7 +183,7 @@ func (t *RoundTimer) waitForRound(round specqbft.Round, timeout <-chan time.Time t.mtx.RLock() // read t.done defer t.mtx.RUnlock() if done := t.done; done != nil { - done() + done(round) } }() } diff --git a/protocol/v2/qbft/roundtimer/timer_test.go b/protocol/v2/qbft/roundtimer/timer_test.go index 8c41410db1..25ce776631 100644 --- a/protocol/v2/qbft/roundtimer/timer_test.go +++ b/protocol/v2/qbft/roundtimer/timer_test.go @@ -2,45 +2,167 @@ package roundtimer import ( "context" + "fmt" + "sync" "sync/atomic" "testing" "time" + "github.com/attestantio/go-eth2-client/spec/phase0" specqbft "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" + + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer/mocks" ) -func TestRoundTimer_TimeoutForRound(t *testing.T) { - t.Run("TimeoutForRound", func(t *testing.T) { - count := int32(0) - onTimeout := func() { - atomic.AddInt32(&count, 1) - } - timer := New(context.Background(), onTimeout) - timer.roundTimeout = func(round specqbft.Round) time.Duration { - return 1100 * time.Millisecond - } - timer.TimeoutForRound(specqbft.Round(1)) - require.Equal(t, int32(0), atomic.LoadInt32(&count)) - <-time.After(timer.roundTimeout(specqbft.Round(1)) + time.Millisecond*10) - require.Equal(t, int32(1), atomic.LoadInt32(&count)) - }) - - t.Run("timeout round before elapsed", func(t *testing.T) { - count := int32(0) - onTimeout := func() { - atomic.AddInt32(&count, 1) - } - timer := New(context.Background(), onTimeout) - timer.roundTimeout = func(round specqbft.Round) time.Duration { - return 1100 * time.Millisecond +func TestTimeoutForRound(t *testing.T) { + roles := []spectypes.BeaconRole{ + spectypes.BNRoleAttester, + spectypes.BNRoleAggregator, + spectypes.BNRoleProposer, + spectypes.BNRoleSyncCommittee, + spectypes.BNRoleSyncCommitteeContribution, + } + + for _, role := range roles { + t.Run(fmt.Sprintf("TimeoutForRound - %s: <= quickTimeoutThreshold", role), func(t *testing.T) { + testTimeoutForRound(t, role, specqbft.Round(1)) + }) + + t.Run(fmt.Sprintf("TimeoutForRound - %s: > quickTimeoutThreshold", role), func(t *testing.T) { + testTimeoutForRound(t, role, specqbft.Round(2)) + }) + + t.Run(fmt.Sprintf("TimeoutForRound - %s: before elapsed", role), func(t *testing.T) { + testTimeoutForRoundElapsed(t, role, specqbft.Round(2)) + }) + + // TODO: Decide if to make the proposer timeout deterministic + // Proposer role is not tested for multiple synchronized timers since it's not deterministic + if role == spectypes.BNRoleProposer { + continue } - timer.TimeoutForRound(specqbft.Round(1)) - <-time.After(timer.roundTimeout(specqbft.Round(1)) / 2) - timer.TimeoutForRound(specqbft.Round(2)) // reset before elapsed - require.Equal(t, int32(0), atomic.LoadInt32(&count)) - <-time.After(timer.roundTimeout(specqbft.Round(2)) + time.Millisecond*10) - require.Equal(t, int32(1), atomic.LoadInt32(&count)) - }) + t.Run(fmt.Sprintf("TimeoutForRound - %s: multiple synchronized timers", role), func(t *testing.T) { + testTimeoutForRoundMulti(t, role, specqbft.Round(1)) + }) + } +} + +func setupMockBeaconNetwork(t *testing.T) *mocks.MockBeaconNetwork { + ctrl := gomock.NewController(t) + mockBeaconNetwork := mocks.NewMockBeaconNetwork(ctrl) + + mockBeaconNetwork.EXPECT().SlotDurationSec().Return(120 * time.Millisecond).AnyTimes() + mockBeaconNetwork.EXPECT().GetSlotStartTime(gomock.Any()).DoAndReturn( + func(slot phase0.Slot) time.Time { + return time.Now() + }, + ).AnyTimes() + return mockBeaconNetwork +} + +func setupTimer(mockBeaconNetwork *mocks.MockBeaconNetwork, onTimeout OnRoundTimeoutF, role spectypes.BeaconRole, round specqbft.Round) *RoundTimer { + timer := New(context.Background(), mockBeaconNetwork, role, onTimeout) + timer.timeoutOptions = TimeoutOptions{ + quickThreshold: round, + quick: 100 * time.Millisecond, + slow: 200 * time.Millisecond, + } + + return timer +} + +func testTimeoutForRound(t *testing.T, role spectypes.BeaconRole, threshold specqbft.Round) { + mockBeaconNetwork := setupMockBeaconNetwork(t) + + count := int32(0) + onTimeout := func(round specqbft.Round) { + atomic.AddInt32(&count, 1) + } + + timer := setupTimer(mockBeaconNetwork, onTimeout, role, threshold) + + timer.TimeoutForRound(specqbft.FirstHeight, threshold) + require.Equal(t, int32(0), atomic.LoadInt32(&count)) + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, threshold) + time.Millisecond*10) + require.Equal(t, int32(1), atomic.LoadInt32(&count)) +} + +func testTimeoutForRoundElapsed(t *testing.T, role spectypes.BeaconRole, threshold specqbft.Round) { + mockBeaconNetwork := setupMockBeaconNetwork(t) + + count := int32(0) + onTimeout := func(round specqbft.Round) { + atomic.AddInt32(&count, 1) + } + + timer := setupTimer(mockBeaconNetwork, onTimeout, role, threshold) + + timer.TimeoutForRound(specqbft.FirstHeight, specqbft.FirstRound) + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, specqbft.FirstRound) / 2) + timer.TimeoutForRound(specqbft.FirstHeight, specqbft.Round(2)) // reset before elapsed + require.Equal(t, int32(0), atomic.LoadInt32(&count)) + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, specqbft.Round(2)) + time.Millisecond*10) + require.Equal(t, int32(1), atomic.LoadInt32(&count)) +} + +func testTimeoutForRoundMulti(t *testing.T, role spectypes.BeaconRole, threshold specqbft.Round) { + ctrl := gomock.NewController(t) + mockBeaconNetwork := mocks.NewMockBeaconNetwork(ctrl) + + var count int32 + var timestamps = make([]int64, 4) + var mu sync.Mutex + + onTimeout := func(index int) { + atomic.AddInt32(&count, 1) + mu.Lock() + timestamps[index] = time.Now().UnixNano() + mu.Unlock() + } + + timeNow := time.Now() + mockBeaconNetwork.EXPECT().SlotDurationSec().Return(100 * time.Millisecond).AnyTimes() + mockBeaconNetwork.EXPECT().GetSlotStartTime(gomock.Any()).DoAndReturn( + func(slot phase0.Slot) time.Time { + return timeNow + }, + ).AnyTimes() + + var wg sync.WaitGroup + for i := 0; i < 4; i++ { + wg.Add(1) + go func(index int) { + timer := New(context.Background(), mockBeaconNetwork, role, func(round specqbft.Round) { onTimeout(index) }) + timer.timeoutOptions = TimeoutOptions{ + quickThreshold: threshold, + quick: 100 * time.Millisecond, + } + timer.TimeoutForRound(specqbft.FirstHeight, specqbft.FirstRound) + wg.Done() + }(i) + time.Sleep(time.Millisecond * 10) // Introduce a sleep between creating timers + } + + wg.Wait() // Wait for all go-routines to finish + + timer := New(context.Background(), mockBeaconNetwork, role, nil) + timer.timeoutOptions = TimeoutOptions{ + quickThreshold: specqbft.Round(1), + quick: 100 * time.Millisecond, + } + + // Wait a bit more than the expected timeout to ensure all timers have triggered + <-time.After(timer.RoundTimeout(specqbft.FirstHeight, specqbft.FirstRound) + time.Millisecond*100) + + require.Equal(t, int32(4), atomic.LoadInt32(&count), "All four timers should have triggered") + + mu.Lock() + for i := 1; i < 4; i++ { + require.InDelta(t, timestamps[0], timestamps[i], float64(time.Millisecond*10), "All four timers should expire nearly at the same time") + } + mu.Unlock() } diff --git a/protocol/v2/qbft/spectest/controller_sync_type.go b/protocol/v2/qbft/spectest/controller_sync_type.go deleted file mode 100644 index 08fc7b2332..0000000000 --- a/protocol/v2/qbft/spectest/controller_sync_type.go +++ /dev/null @@ -1,55 +0,0 @@ -package qbft - -import ( - "encoding/hex" - "testing" - - qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" - "github.com/bloxapp/ssv/protocol/v2/types" - - "github.com/bloxapp/ssv-spec/qbft/spectest/tests/controller/futuremsg" - spectypes "github.com/bloxapp/ssv-spec/types" - spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" - "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/logging/fields" - "github.com/stretchr/testify/require" -) - -func RunControllerSync(t *testing.T, test *futuremsg.ControllerSyncSpecTest) { - logger := logging.TestLogger(t) - identifier := spectypes.NewMsgID(types.GetDefaultDomain(), spectestingutils.TestingValidatorPubKey[:], spectypes.BNRoleAttester) - config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), identifier.GetRoleType()) - contr := qbfttesting.NewTestingQBFTController( - identifier[:], - spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()), - config, - false, - ) - - err := contr.StartNewInstance(logger, 0, []byte{1, 2, 3, 4}) - if err != nil { - t.Fatalf(err.Error()) - } - - var lastErr error - for _, msg := range test.InputMessages { - logger = logger.With(fields.Height(msg.Message.Height)) - _, err := contr.ProcessMsg(logger, msg) - if err != nil { - lastErr = err - } - } - - syncedDecidedCnt := config.GetNetwork().(*spectestingutils.TestingNetwork).SyncHighestDecidedCnt - require.EqualValues(t, test.SyncDecidedCalledCnt, syncedDecidedCnt) - - r, err := contr.GetRoot() - require.NoError(t, err) - require.EqualValues(t, test.ControllerPostRoot, hex.EncodeToString(r[:])) - - if len(test.ExpectedError) != 0 { - require.EqualError(t, lastErr, test.ExpectedError) - } else { - require.NoError(t, lastErr) - } -} diff --git a/protocol/v2/qbft/spectest/controller_type.go b/protocol/v2/qbft/spectest/controller_type.go index 0d32a545c2..a919cc104b 100644 --- a/protocol/v2/qbft/spectest/controller_type.go +++ b/protocol/v2/qbft/spectest/controller_type.go @@ -3,6 +3,10 @@ package qbft import ( "bytes" "encoding/hex" + "encoding/json" + "fmt" + "os" + "path/filepath" "reflect" "testing" @@ -10,29 +14,32 @@ import ( spectests "github.com/bloxapp/ssv-spec/qbft/spectest/tests" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft" "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" ) func RunControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { + //temporary to override state comparisons from file not inputted one + overrideStateComparisonForControllerSpecTest(t, test) + logger := logging.TestLogger(t) - identifier := []byte{1, 2, 3, 4} - config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), spectypes.BNRoleAttester) - contr := qbfttesting.NewTestingQBFTController( - identifier[:], - spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()), - config, - false, - ) + contr := generateController(logger) var lastErr error for i, runData := range test.RunInstanceData { - if err := runInstanceWithData(t, logger, specqbft.Height(i), contr, config, identifier, runData); err != nil { + height := specqbft.Height(i) + if runData.Height != nil { + height = *runData.Height + } + if err := runInstanceWithData(t, logger, height, contr, runData); err != nil { lastErr = err } } @@ -44,13 +51,24 @@ func RunControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { } } +func generateController(logger *zap.Logger) *controller.Controller { + identifier := []byte{1, 2, 3, 4} + config := qbfttesting.TestingConfig(logger, spectestingutils.Testing4SharesSet(), spectypes.BNRoleAttester) + return qbfttesting.NewTestingQBFTController( + identifier[:], + spectestingutils.TestingShare(spectestingutils.Testing4SharesSet()), + config, + false, + ) +} + func testTimer( t *testing.T, config *qbft.Config, runData *spectests.RunInstanceData, ) { if runData.ExpectedTimerState != nil { - if timer, ok := config.GetTimer().(*spectestingutils.TestQBFTTimer); ok { + if timer, ok := config.GetTimer().(*roundtimer.TestQBFTTimer); ok { require.Equal(t, runData.ExpectedTimerState.Timeouts, timer.State.Timeouts) require.Equal(t, runData.ExpectedTimerState.Round, timer.State.Round) } @@ -79,13 +97,6 @@ func testProcessMsg( } require.EqualValues(t, runData.ExpectedDecidedState.DecidedCnt, decidedCnt, lastErr) - // verify sync decided by range calls - if runData.ExpectedDecidedState.CalledSyncDecidedByRange { - require.EqualValues(t, runData.ExpectedDecidedState.DecidedByRangeValues, config.GetNetwork().(*spectestingutils.TestingNetwork).DecidedByRange) - } else { - require.EqualValues(t, [2]specqbft.Height{0, 0}, config.GetNetwork().(*spectestingutils.TestingNetwork).DecidedByRange) - } - return lastErr } @@ -129,20 +140,20 @@ func testBroadcastedDecided( } } -func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Height, contr *controller.Controller, config *qbft.Config, identifier []byte, runData *spectests.RunInstanceData) error { +func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Height, contr *controller.Controller, runData *spectests.RunInstanceData) error { err := contr.StartNewInstance(logger, height, runData.InputValue) var lastErr error if err != nil { lastErr = err } - testTimer(t, config, runData) + testTimer(t, contr.GetConfig().(*qbft.Config), runData) - if err := testProcessMsg(t, logger, contr, config, runData); err != nil { + if err := testProcessMsg(t, logger, contr, contr.GetConfig().(*qbft.Config), runData); err != nil { lastErr = err } - testBroadcastedDecided(t, config, identifier, runData) + testBroadcastedDecided(t, contr.GetConfig().(*qbft.Config), contr.Identifier, runData) // test root r, err := contr.GetRoot() @@ -151,3 +162,24 @@ func runInstanceWithData(t *testing.T, logger *zap.Logger, height specqbft.Heigh return lastErr } + +func overrideStateComparisonForControllerSpecTest(t *testing.T, test *spectests.ControllerSpecTest) { + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("qbft", "spectest")) + require.NoError(t, err) + specDir = filepath.Join(specDir, "generate") + dir := typescomparable.GetSCDir(specDir, reflect.TypeOf(test).String()) + path := filepath.Join(dir, fmt.Sprintf("%s.json", test.TestName())) + byteValue, err := os.ReadFile(filepath.Clean(path)) + require.NoError(t, err) + sc := make([]*controller.Controller, len(test.RunInstanceData)) + require.NoError(t, json.Unmarshal(byteValue, &sc)) + + for i, runData := range test.RunInstanceData { + runData.ControllerPostState = sc[i] + + r, err := sc[i].GetRoot() + require.NoError(t, err) + + runData.ControllerPostRoot = hex.EncodeToString(r[:]) + } +} diff --git a/protocol/v2/qbft/spectest/msg_processing_type.go b/protocol/v2/qbft/spectest/msg_processing_type.go index 63c8922862..15606c2ece 100644 --- a/protocol/v2/qbft/spectest/msg_processing_type.go +++ b/protocol/v2/qbft/spectest/msg_processing_type.go @@ -3,6 +3,8 @@ package qbft import ( "encoding/hex" "fmt" + "path/filepath" + "reflect" "testing" "time" @@ -10,15 +12,19 @@ import ( spectests "github.com/bloxapp/ssv-spec/qbft/spectest/tests" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" "github.com/stretchr/testify/require" ) // RunMsgProcessing processes MsgProcessingSpecTest. It probably may be removed. func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { + overrideStateComparisonForMsgProcessingSpecTest(t, test) + // a little trick we do to instantiate all the internal instance params preByts, _ := test.Pre.Encode() msgId := specqbft.ControllerIdToMessageID(test.Pre.State.ID) @@ -49,7 +55,7 @@ func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { } if len(test.ExpectedError) != 0 { - require.EqualError(t, lastErr, test.ExpectedError) + require.EqualError(t, lastErr, test.ExpectedError, "expected %v, but got %v", test.ExpectedError, lastErr) } else { require.NoError(t, lastErr) } @@ -78,3 +84,22 @@ func RunMsgProcessing(t *testing.T, test *spectests.MsgProcessingSpecTest) { require.EqualValues(t, test.PostRoot, hex.EncodeToString(postRoot[:]), "post root not valid") } + +func overrideStateComparisonForMsgProcessingSpecTest(t *testing.T, test *spectests.MsgProcessingSpecTest) { + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("qbft", "spectest")) + require.NoError(t, err) + test.PostState, err = typescomparable.UnmarshalStateComparison(specDir, test.TestName(), + reflect.TypeOf(test).String(), + &specqbft.State{}) + require.NoError(t, err) + + r, err := test.PostState.GetRoot() + require.NoError(t, err) + + // backwards compatability test, hard coded post root must be equal to the one loaded from file + if len(test.PostRoot) > 0 { + require.EqualValues(t, test.PostRoot, hex.EncodeToString(r[:])) + } + + test.PostRoot = hex.EncodeToString(r[:]) +} diff --git a/protocol/v2/qbft/spectest/qbft_mapping_test.go b/protocol/v2/qbft/spectest/qbft_mapping_test.go index d771e98d1f..00903a0adc 100644 --- a/protocol/v2/qbft/spectest/qbft_mapping_test.go +++ b/protocol/v2/qbft/spectest/qbft_mapping_test.go @@ -8,13 +8,13 @@ import ( "testing" spectests "github.com/bloxapp/ssv-spec/qbft/spectest/tests" - "github.com/bloxapp/ssv-spec/qbft/spectest/tests/controller/futuremsg" "github.com/bloxapp/ssv-spec/qbft/spectest/tests/timeout" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/stretchr/testify/require" + "github.com/bloxapp/ssv/logging" testing2 "github.com/bloxapp/ssv/protocol/v2/qbft/testing" - "github.com/stretchr/testify/require" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" @@ -31,18 +31,12 @@ func TestQBFTMapping(t *testing.T) { panic(err.Error()) } - origDomain := types.GetDefaultDomain() types.SetDefaultDomain(testingutils.TestingSSVDomainType) - defer func() { - types.SetDefaultDomain(origDomain) - }() for name, test := range untypedTests { name, test := name, test - testName := strings.Split(name, "_")[1] testType := strings.Split(name, "_")[0] - switch testType { case reflect.TypeOf(&spectests.MsgProcessingSpecTest{}).String(): byts, err := json.Marshal(test) @@ -51,6 +45,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunMsgProcessing(t, typedTest) }) case reflect.TypeOf(&spectests.MsgSpecTest{}).String(): @@ -60,6 +55,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunMsg(t, typedTest) }) case reflect.TypeOf(&spectests.ControllerSpecTest{}).String(): @@ -69,6 +65,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunControllerSpecTest(t, typedTest) }) case reflect.TypeOf(&spectests.CreateMsgSpecTest{}).String(): @@ -78,6 +75,7 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { + t.Parallel() RunCreateMsg(t, typedTest) }) case reflect.TypeOf(&spectests.RoundRobinSpecTest{}).String(): @@ -87,21 +85,12 @@ func TestQBFTMapping(t *testing.T) { require.NoError(t, json.Unmarshal(byts, &typedTest)) t.Run(typedTest.TestName(), func(t *testing.T) { // using only spec struct so no need to run our version (TODO: check how we choose leader) + t.Parallel() typedTest.Run(t) }) /*t.Run(typedTest.TestName(), func(t *testing.T) { RunMsg(t, typedTest) })*/ - - case reflect.TypeOf(&futuremsg.ControllerSyncSpecTest{}).String(): - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &futuremsg.ControllerSyncSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { - RunControllerSync(t, typedTest) - }) case reflect.TypeOf(&timeout.SpecTest{}).String(): byts, err := json.Marshal(test) require.NoError(t, err) diff --git a/protocol/v2/qbft/spectest/timeout_type.go b/protocol/v2/qbft/spectest/timeout_type.go index 637e1dd374..73b3fe7cde 100644 --- a/protocol/v2/qbft/spectest/timeout_type.go +++ b/protocol/v2/qbft/spectest/timeout_type.go @@ -7,8 +7,11 @@ import ( "github.com/bloxapp/ssv-spec/qbft" "github.com/bloxapp/ssv-spec/types/testingutils" + "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" + "github.com/stretchr/testify/require" ) @@ -32,7 +35,7 @@ func RunTimeout(t *testing.T, test *SpecTest) { } // test calling timeout - timer, ok := test.Pre.GetConfig().GetTimer().(*testingutils.TestQBFTTimer) + timer, ok := test.Pre.GetConfig().GetTimer().(*roundtimer.TestQBFTTimer) require.True(t, ok) require.Equal(t, test.ExpectedTimerState.Timeouts, timer.State.Timeouts) require.Equal(t, test.ExpectedTimerState.Round, timer.State.Round) diff --git a/protocol/v2/qbft/testing/utils.go b/protocol/v2/qbft/testing/utils.go index 35291f0acc..c6741925ce 100644 --- a/protocol/v2/qbft/testing/utils.go +++ b/protocol/v2/qbft/testing/utils.go @@ -6,10 +6,13 @@ import ( specqbft "github.com/bloxapp/ssv-spec/qbft" "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv-spec/types/testingutils" - "github.com/bloxapp/ssv/protocol/v2/qbft" - "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/pkg/errors" "go.uber.org/zap" + + "github.com/bloxapp/ssv/protocol/v2/qbft" + "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" ) var TestingConfig = func(logger *zap.Logger, keySet *testingutils.TestKeySet, role types.BeaconRole) *qbft.Config { @@ -31,9 +34,10 @@ var TestingConfig = func(logger *zap.Logger, keySet *testingutils.TestKeySet, ro ProposerF: func(state *specqbft.State, round specqbft.Round) types.OperatorID { return 1 }, - Storage: TestingStores(logger).Get(role), - Network: testingutils.NewTestingNetwork(), - Timer: testingutils.NewTestingTimer(), + Storage: TestingStores(logger).Get(role), + Network: testingutils.NewTestingNetwork(), + Timer: roundtimer.NewTestingTimer(), + SignatureVerification: true, } } diff --git a/protocol/v2/queue/worker/message_worker.go b/protocol/v2/queue/worker/message_worker.go index ee96301870..5c9f2b3f97 100644 --- a/protocol/v2/queue/worker/message_worker.go +++ b/protocol/v2/queue/worker/message_worker.go @@ -2,11 +2,12 @@ package worker import ( "context" - spectypes "github.com/bloxapp/ssv-spec/types" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "go.uber.org/zap" + + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) var ( @@ -24,12 +25,12 @@ func init() { } // MsgHandler func that receive message.SSVMessage to handle -type MsgHandler func(msg *spectypes.SSVMessage) error +type MsgHandler func(msg *queue.DecodedSSVMessage) error // ErrorHandler func that handles an error for a specific message -type ErrorHandler func(msg *spectypes.SSVMessage, err error) error +type ErrorHandler func(msg *queue.DecodedSSVMessage, err error) error -func defaultErrHandler(msg *spectypes.SSVMessage, err error) error { +func defaultErrHandler(msg *queue.DecodedSSVMessage, err error) error { return err } @@ -46,7 +47,7 @@ type Worker struct { ctx context.Context cancel context.CancelFunc workersCount int - queue chan *spectypes.SSVMessage + queue chan *queue.DecodedSSVMessage handler MsgHandler errHandler ErrorHandler metricsPrefix string @@ -60,7 +61,7 @@ func NewWorker(logger *zap.Logger, cfg *Config) *Worker { ctx: ctx, cancel: cancel, workersCount: cfg.WorkersCount, - queue: make(chan *spectypes.SSVMessage, cfg.Buffer), + queue: make(chan *queue.DecodedSSVMessage, cfg.Buffer), errHandler: defaultErrHandler, metricsPrefix: cfg.MetrixPrefix, } @@ -78,7 +79,7 @@ func (w *Worker) init(logger *zap.Logger) { } // startWorker process functionality -func (w *Worker) startWorker(logger *zap.Logger, ch <-chan *spectypes.SSVMessage) { +func (w *Worker) startWorker(logger *zap.Logger, ch <-chan *queue.DecodedSSVMessage) { ctx, cancel := context.WithCancel(w.ctx) defer cancel() for { @@ -104,7 +105,7 @@ func (w *Worker) UseErrorHandler(errHandler ErrorHandler) { // TryEnqueue tries to enqueue a job to the given job channel. Returns true if // the operation was successful, and false if enqueuing would not have been // possible without blocking. Job is not enqueued in the latter case. -func (w *Worker) TryEnqueue(msg *spectypes.SSVMessage) bool { +func (w *Worker) TryEnqueue(msg *queue.DecodedSSVMessage) bool { select { case w.queue <- msg: return true @@ -125,7 +126,7 @@ func (w *Worker) Size() int { } // process the msg's from queue -func (w *Worker) process(logger *zap.Logger, msg *spectypes.SSVMessage) { +func (w *Worker) process(logger *zap.Logger, msg *queue.DecodedSSVMessage) { if w.handler == nil { logger.Warn("❗ no handler for worker") return diff --git a/protocol/v2/queue/worker/message_worker_test.go b/protocol/v2/queue/worker/message_worker_test.go index b5cec21317..adbf5032d0 100644 --- a/protocol/v2/queue/worker/message_worker_test.go +++ b/protocol/v2/queue/worker/message_worker_test.go @@ -6,10 +6,10 @@ import ( "testing" "time" - spectypes "github.com/bloxapp/ssv-spec/types" "github.com/stretchr/testify/require" "github.com/bloxapp/ssv/logging" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" ) func TestWorker(t *testing.T) { @@ -20,12 +20,12 @@ func TestWorker(t *testing.T) { Buffer: 2, }) - worker.UseHandler(func(msg *spectypes.SSVMessage) error { + worker.UseHandler(func(msg *queue.DecodedSSVMessage) error { require.NotNil(t, msg) return nil }) for i := 0; i < 5; i++ { - require.True(t, worker.TryEnqueue(&spectypes.SSVMessage{})) + require.True(t, worker.TryEnqueue(&queue.DecodedSSVMessage{})) time.Sleep(time.Second * 1) } } @@ -41,7 +41,7 @@ func TestManyWorkers(t *testing.T) { }) time.Sleep(time.Millisecond * 100) // wait for worker to start listen - worker.UseHandler(func(msg *spectypes.SSVMessage) error { + worker.UseHandler(func(msg *queue.DecodedSSVMessage) error { require.NotNil(t, msg) wg.Done() return nil @@ -49,7 +49,7 @@ func TestManyWorkers(t *testing.T) { for i := 0; i < 10; i++ { wg.Add(1) - require.True(t, worker.TryEnqueue(&spectypes.SSVMessage{})) + require.True(t, worker.TryEnqueue(&queue.DecodedSSVMessage{})) } wg.Wait() } @@ -65,7 +65,7 @@ func TestBuffer(t *testing.T) { }) time.Sleep(time.Millisecond * 100) // wait for worker to start listen - worker.UseHandler(func(msg *spectypes.SSVMessage) error { + worker.UseHandler(func(msg *queue.DecodedSSVMessage) error { require.NotNil(t, msg) wg.Done() time.Sleep(time.Millisecond * 100) @@ -74,7 +74,7 @@ func TestBuffer(t *testing.T) { for i := 0; i < 11; i++ { // should buffer 10 msgs wg.Add(1) - require.True(t, worker.TryEnqueue(&spectypes.SSVMessage{})) + require.True(t, worker.TryEnqueue(&queue.DecodedSSVMessage{})) } wg.Wait() } diff --git a/protocol/v2/ssv/queue/message_prioritizer_test.go b/protocol/v2/ssv/queue/message_prioritizer_test.go index f07e5e2691..deb3654b45 100644 --- a/protocol/v2/ssv/queue/message_prioritizer_test.go +++ b/protocol/v2/ssv/queue/message_prioritizer_test.go @@ -17,7 +17,6 @@ import ( "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/types" "github.com/stretchr/testify/require" - "go.uber.org/zap" ) var messagePriorityTests = []struct { @@ -125,7 +124,7 @@ func TestMessagePrioritizer(t *testing.T) { messages := make(messageSlice, len(test.messages)) for i, m := range test.messages { var err error - messages[i], err = DecodeSSVMessage(zap.L(), m.ssvMessage(test.state)) + messages[i], err = DecodeSSVMessage(m.ssvMessage(test.state)) require.NoError(t, err) } diff --git a/protocol/v2/ssv/queue/messages.go b/protocol/v2/ssv/queue/messages.go index 01c6fb945c..f69644eee7 100644 --- a/protocol/v2/ssv/queue/messages.go +++ b/protocol/v2/ssv/queue/messages.go @@ -1,25 +1,31 @@ package queue import ( + "fmt" + specqbft "github.com/bloxapp/ssv-spec/qbft" spectypes "github.com/bloxapp/ssv-spec/types" "github.com/pkg/errors" - "go.uber.org/zap" ssvmessage "github.com/bloxapp/ssv/protocol/v2/message" ssvtypes "github.com/bloxapp/ssv/protocol/v2/types" ) +var ( + ErrUnknownMessageType = fmt.Errorf("unknown message type") +) + // DecodedSSVMessage is a bundle of SSVMessage and it's decoding. +// TODO: try to make it generic type DecodedSSVMessage struct { *spectypes.SSVMessage // Body is the decoded Data. - Body interface{} // *SignedMessage | *SignedPartialSignatureMessage + Body interface{} // *SignedMessage | *SignedPartialSignatureMessage | *EventMsg } // DecodeSSVMessage decodes an SSVMessage and returns a DecodedSSVMessage. -func DecodeSSVMessage(logger *zap.Logger, m *spectypes.SSVMessage) (*DecodedSSVMessage, error) { +func DecodeSSVMessage(m *spectypes.SSVMessage) (*DecodedSSVMessage, error) { var body interface{} switch m.MsgType { case spectypes.SSVConsensusMsgType: // TODO: Or message.SSVDecidedMsgType? @@ -40,6 +46,8 @@ func DecodeSSVMessage(logger *zap.Logger, m *spectypes.SSVMessage) (*DecodedSSVM return nil, errors.Wrap(err, "failed to decode EventMsg") } body = msg + default: + return nil, ErrUnknownMessageType } return &DecodedSSVMessage{ SSVMessage: m, diff --git a/protocol/v2/ssv/queue/metrics.go b/protocol/v2/ssv/queue/metrics.go index 99d3c30ad3..36206704cc 100644 --- a/protocol/v2/ssv/queue/metrics.go +++ b/protocol/v2/ssv/queue/metrics.go @@ -1,14 +1,12 @@ package queue import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" + spectypes "github.com/bloxapp/ssv-spec/types" ) // Metrics records metrics about the Queue. type Metrics interface { - // Dropped increments the number of messages dropped from the Queue. - Dropped() + DroppedQueueMessage(messageID spectypes.MessageID) } type queueWithMetrics struct { @@ -27,35 +25,8 @@ func WithMetrics(q Queue, metrics Metrics) Queue { func (q *queueWithMetrics) TryPush(msg *DecodedSSVMessage) bool { pushed := q.Queue.TryPush(msg) if !pushed { - q.metrics.Dropped() + q.metrics.DroppedQueueMessage(msg.GetID()) } - return pushed -} - -// TODO: move to metrics/prometheus package -type prometheusMetrics struct { - dropped prometheus.Counter -} - -// NewPrometheusMetrics returns a Prometheus implementation of Metrics. -func NewPrometheusMetrics(messageID string) Metrics { - return &prometheusMetrics{ - dropped: metricMessageDropped.WithLabelValues(messageID), - } -} - -func (m *prometheusMetrics) Dropped() { - m.dropped.Inc() -} -// Register Prometheus metrics. -var ( - metricMessageDropped = promauto.NewCounterVec(prometheus.CounterOpts{ - Name: "ssv:ibft:msgq:drops", - Help: "The amount of message dropped from the validator's msg queue", - }, []string{"msg_id"}) -) - -func init() { - _ = prometheus.Register(metricMessageDropped) + return pushed } diff --git a/protocol/v2/ssv/queue/queue_test.go b/protocol/v2/ssv/queue/queue_test.go index a835779566..4b46c0e045 100644 --- a/protocol/v2/ssv/queue/queue_test.go +++ b/protocol/v2/ssv/queue/queue_test.go @@ -10,8 +10,8 @@ import ( "time" "github.com/bloxapp/ssv-spec/qbft" + spectypes "github.com/bloxapp/ssv-spec/types" "github.com/stretchr/testify/require" - "go.uber.org/zap" "golang.org/x/text/language" "golang.org/x/text/message" ) @@ -109,7 +109,7 @@ func TestPriorityQueue_Pop(t *testing.T) { queue := New(capacity) require.True(t, queue.Empty()) - msg, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) + msg, err := DecodeSSVMessage(mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) require.NoError(t, err) // Push messages. @@ -163,7 +163,7 @@ func TestPriorityQueue_Order(t *testing.T) { // Decode messages. messages := make(messageSlice, len(test.messages)) for i, m := range test.messages { - mm, err := DecodeSSVMessage(zap.L(), m.ssvMessage(test.state)) + mm, err := DecodeSSVMessage(m.ssvMessage(test.state)) require.NoError(t, err) messages[i] = mm } @@ -184,30 +184,32 @@ func TestPriorityQueue_Order(t *testing.T) { } } -type mockMetrics struct { - dropped int +type testMetrics struct { + dropped atomic.Uint64 } -func (m *mockMetrics) Dropped() { m.dropped++ } +func (n *testMetrics) DroppedQueueMessage(messageID spectypes.MessageID) { + n.dropped.Add(1) +} func TestWithMetrics(t *testing.T) { - var metrics mockMetrics - queue := WithMetrics(New(1), &metrics) + metrics := &testMetrics{} + queue := WithMetrics(New(1), metrics) require.True(t, queue.Empty()) // Push 1 message. - msg, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) + msg, err := DecodeSSVMessage(mockConsensusMessage{Height: 100, Type: qbft.PrepareMsgType}.ssvMessage(mockState)) require.NoError(t, err) pushed := queue.TryPush(msg) require.True(t, pushed) require.False(t, queue.Empty()) - require.Equal(t, 0, metrics.dropped) + require.EqualValues(t, 0, metrics.dropped.Load()) // Push above capacity. pushed = queue.TryPush(msg) require.False(t, pushed) require.False(t, queue.Empty()) - require.Equal(t, 1, metrics.dropped) + require.EqualValues(t, 1, metrics.dropped.Load()) } func BenchmarkPriorityQueue_Parallel(b *testing.B) { @@ -234,7 +236,7 @@ func benchmarkPriorityQueueParallel(b *testing.B, factory func() Queue, lossy bo messages := make([]*DecodedSSVMessage, messageCount) for i := range messages { var err error - msg, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: qbft.Height(rand.Intn(messageCount)), Type: qbft.PrepareMsgType}.ssvMessage(mockState)) + msg, err := DecodeSSVMessage(mockConsensusMessage{Height: qbft.Height(rand.Intn(messageCount)), Type: qbft.PrepareMsgType}.ssvMessage(mockState)) require.NoError(b, err) messages[i] = msg } @@ -359,7 +361,7 @@ func BenchmarkPriorityQueue_Concurrent(b *testing.B) { for _, i := range rand.Perm(messageCount) { height := qbft.FirstHeight + qbft.Height(i) for _, t := range types { - decoded, err := DecodeSSVMessage(zap.L(), mockConsensusMessage{Height: height, Type: t}.ssvMessage(mockState)) + decoded, err := DecodeSSVMessage(mockConsensusMessage{Height: height, Type: t}.ssvMessage(mockState)) require.NoError(b, err) msgs <- decoded } @@ -412,7 +414,7 @@ func BenchmarkPriorityQueue_Concurrent(b *testing.B) { } func decodeAndPush(t require.TestingT, queue Queue, msg mockMessage, state *State) *DecodedSSVMessage { - decoded, err := DecodeSSVMessage(zap.L(), msg.ssvMessage(state)) + decoded, err := DecodeSSVMessage(msg.ssvMessage(state)) require.NoError(t, err) queue.Push(decoded) return decoded diff --git a/protocol/v2/ssv/runner/runner.go b/protocol/v2/ssv/runner/runner.go index b59d404907..1fc2225e15 100644 --- a/protocol/v2/ssv/runner/runner.go +++ b/protocol/v2/ssv/runner/runner.go @@ -54,7 +54,8 @@ type BaseRunner struct { BeaconRoleType spectypes.BeaconRole // implementation vars - TimeoutF TimeoutF `json:"-"` + TimeoutF TimeoutF `json:"-"` + VerifySignatures bool `json:"-"` // highestDecidedSlot holds the highest decided duty slot and gets updated after each decided is reached highestDecidedSlot spec.Slot @@ -96,6 +97,9 @@ func NewBaseRunner( // baseStartNewDuty is a base func that all runner implementation can call to start a duty func (b *BaseRunner) baseStartNewDuty(logger *zap.Logger, runner Runner, duty *spectypes.Duty) error { + if err := b.ShouldProcessDuty(duty); err != nil { + return errors.Wrap(err, "can't start duty") + } b.baseSetupForNewDuty(duty) return runner.executeDuty(logger, duty) } @@ -265,3 +269,11 @@ func (b *BaseRunner) hasRunningDuty() bool { } return !b.State.Finished } + +func (b *BaseRunner) ShouldProcessDuty(duty *spectypes.Duty) error { + if b.QBFTController.Height >= specqbft.Height(duty.Slot) { + return errors.Errorf("duty for slot %d already passed. Current height is %d", duty.Slot, + b.QBFTController.Height) + } + return nil +} diff --git a/protocol/v2/ssv/runner/runner_signatures.go b/protocol/v2/ssv/runner/runner_signatures.go index 96b2a723f5..edfc608ea7 100644 --- a/protocol/v2/ssv/runner/runner_signatures.go +++ b/protocol/v2/ssv/runner/runner_signatures.go @@ -3,10 +3,11 @@ package runner import ( spec "github.com/attestantio/go-eth2-client/spec/phase0" spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/bloxapp/ssv/protocol/v2/types" ssz "github.com/ferranbt/fastssz" "github.com/herumi/bls-eth-go-binary/bls" "github.com/pkg/errors" + + "github.com/bloxapp/ssv/protocol/v2/types" ) func (b *BaseRunner) signBeaconObject( @@ -57,13 +58,15 @@ func (b *BaseRunner) validatePartialSigMsgForSlot( return errors.New("invalid partial sig slot") } - if err := types.VerifyByOperators(signedMsg.GetSignature(), signedMsg, b.Share.DomainType, spectypes.PartialSignatureType, b.Share.Committee); err != nil { - return errors.Wrap(err, "failed to verify PartialSignature") - } + if b.VerifySignatures { + if err := types.VerifyByOperators(signedMsg.GetSignature(), signedMsg, b.Share.DomainType, spectypes.PartialSignatureType, b.Share.Committee); err != nil { + return errors.Wrap(err, "failed to verify PartialSignature") + } - for _, msg := range signedMsg.Message.Messages { - if err := b.verifyBeaconPartialSignature(msg); err != nil { - return errors.Wrap(err, "could not verify Beacon partial Signature") + for _, msg := range signedMsg.Message.Messages { + if err := b.verifyBeaconPartialSignature(msg); err != nil { + return errors.Wrap(err, "could not verify Beacon partial Signature") + } } } diff --git a/protocol/v2/ssv/runner/timer.go b/protocol/v2/ssv/runner/timer.go index 9d8e4a315f..51e25ccbf6 100644 --- a/protocol/v2/ssv/runner/timer.go +++ b/protocol/v2/ssv/runner/timer.go @@ -9,7 +9,7 @@ import ( "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" ) -type TimeoutF func(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) func() +type TimeoutF func(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) roundtimer.OnRoundTimeoutF func (b *BaseRunner) registerTimeoutHandler(logger *zap.Logger, instance *instance.Instance, height specqbft.Height) { identifier := spectypes.MessageIDFromBytes(instance.State.ID) diff --git a/protocol/v2/ssv/runner/validator_registration.go b/protocol/v2/ssv/runner/validator_registration.go index 10bf6a39fe..68bc4351b8 100644 --- a/protocol/v2/ssv/runner/validator_registration.go +++ b/protocol/v2/ssv/runner/validator_registration.go @@ -2,6 +2,7 @@ package runner import ( "crypto/sha256" + "encoding/hex" "encoding/json" v1 "github.com/attestantio/go-eth2-client/api/v1" @@ -53,7 +54,13 @@ func NewValidatorRegistrationRunner( } func (r *ValidatorRegistrationRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { - return r.BaseRunner.baseStartNewDuty(logger, r, duty) + // Note: Unlike the other runners, this doesn't call BaseRunner.baseStartNewDuty because + // that requires a QBFTController which ValidatorRegistrationRunner doesn't have. + if r.HasRunningDuty() { + return errors.New("already running duty") + } + r.BaseRunner.baseSetupForNewDuty(duty) + return r.executeDuty(logger, duty) } // HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) @@ -85,7 +92,9 @@ func (r *ValidatorRegistrationRunner) ProcessPreConsensus(logger *zap.Logger, si return errors.Wrap(err, "could not submit validator registration") } - logger.Debug("validator registration submitted successfully", fields.FeeRecipient(r.BaseRunner.Share.FeeRecipientAddress[:])) + logger.Debug("validator registration submitted successfully", + fields.FeeRecipient(r.BaseRunner.Share.FeeRecipientAddress[:]), + zap.String("signature", hex.EncodeToString(specSig[:]))) r.GetState().Finished = true return nil diff --git a/protocol/v2/ssv/runner/voluntary_exit.go b/protocol/v2/ssv/runner/voluntary_exit.go new file mode 100644 index 0000000000..7eba30c616 --- /dev/null +++ b/protocol/v2/ssv/runner/voluntary_exit.go @@ -0,0 +1,232 @@ +package runner + +import ( + "crypto/sha256" + "encoding/json" + + "github.com/attestantio/go-eth2-client/spec/phase0" + specqbft "github.com/bloxapp/ssv-spec/qbft" + specssv "github.com/bloxapp/ssv-spec/ssv" + spectypes "github.com/bloxapp/ssv-spec/types" + "github.com/bloxapp/ssv/protocol/v2/ssv/runner/metrics" + ssz "github.com/ferranbt/fastssz" + "github.com/pkg/errors" + "go.uber.org/zap" +) + +// Duty runner for validator voluntary exit duty +type VoluntaryExitRunner struct { + BaseRunner *BaseRunner + + beacon specssv.BeaconNode + network specssv.Network + signer spectypes.KeyManager + valCheck specqbft.ProposedValueCheckF + + voluntaryExit *phase0.VoluntaryExit + + metrics metrics.ConsensusMetrics +} + +func NewVoluntaryExitRunner( + beaconNetwork spectypes.BeaconNetwork, + share *spectypes.Share, + beacon specssv.BeaconNode, + network specssv.Network, + signer spectypes.KeyManager, +) Runner { + return &VoluntaryExitRunner{ + BaseRunner: &BaseRunner{ + BeaconRoleType: spectypes.BNRoleVoluntaryExit, + BeaconNetwork: beaconNetwork, + Share: share, + }, + + beacon: beacon, + network: network, + signer: signer, + metrics: metrics.NewConsensusMetrics(spectypes.BNRoleValidatorRegistration), + } +} + +func (r *VoluntaryExitRunner) StartNewDuty(logger *zap.Logger, duty *spectypes.Duty) error { + // Note: Unlike the other runners, this doesn't call BaseRunner.baseStartNewDuty because + // that requires a QBFTController which VoluntaryExitRunner doesn't have. + if r.HasRunningDuty() { + return errors.New("already running duty") + } + r.BaseRunner.baseSetupForNewDuty(duty) + return r.executeDuty(logger, duty) +} + +// HasRunningDuty returns true if a duty is already running (StartNewDuty called and returned nil) +func (r *VoluntaryExitRunner) HasRunningDuty() bool { + return r.BaseRunner.hasRunningDuty() +} + +// Check for quorum of partial signatures over VoluntaryExit and, +// if has quorum, constructs SignedVoluntaryExit and submits to BeaconNode +func (r *VoluntaryExitRunner) ProcessPreConsensus(logger *zap.Logger, signedMsg *spectypes.SignedPartialSignatureMessage) error { + quorum, roots, err := r.BaseRunner.basePreConsensusMsgProcessing(r, signedMsg) + if err != nil { + return errors.Wrap(err, "failed processing voluntary exit message") + } + + // quorum returns true only once (first time quorum achieved) + if !quorum { + return nil + } + + // only 1 root, verified in basePreConsensusMsgProcessing + root := roots[0] + fullSig, err := r.GetState().ReconstructBeaconSig(r.GetState().PreConsensusContainer, root, r.GetShare().ValidatorPubKey) + if err != nil { + return errors.Wrap(err, "could not reconstruct voluntary exit sig") + } + specSig := phase0.BLSSignature{} + copy(specSig[:], fullSig) + + // create SignedVoluntaryExit using VoluntaryExit created on r.executeDuty() and reconstructed signature + signedVoluntaryExit := &phase0.SignedVoluntaryExit{ + Message: r.voluntaryExit, + Signature: specSig, + } + + if err := r.beacon.SubmitVoluntaryExit(signedVoluntaryExit, specSig); err != nil { + return errors.Wrap(err, "could not submit voluntary exit") + } + + r.GetState().Finished = true + return nil +} + +func (r *VoluntaryExitRunner) ProcessConsensus(logger *zap.Logger, signedMsg *specqbft.SignedMessage) error { + return errors.New("no consensus phase for voluntary exit") +} + +func (r *VoluntaryExitRunner) ProcessPostConsensus(logger *zap.Logger, signedMsg *spectypes.SignedPartialSignatureMessage) error { + return errors.New("no post consensus phase for voluntary exit") +} + +func (r *VoluntaryExitRunner) expectedPreConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { + vr, err := r.calculateVoluntaryExit() + if err != nil { + return nil, spectypes.DomainError, errors.Wrap(err, "could not calculate voluntary exit") + } + return []ssz.HashRoot{vr}, spectypes.DomainVoluntaryExit, nil +} + +// expectedPostConsensusRootsAndDomain an INTERNAL function, returns the expected post-consensus roots to sign +func (r *VoluntaryExitRunner) expectedPostConsensusRootsAndDomain() ([]ssz.HashRoot, phase0.DomainType, error) { + return nil, [4]byte{}, errors.New("no post consensus roots for voluntary exit") +} + +// Validator voluntary exit duty doesn't need consensus nor post-consensus. +// It just performs pre-consensus with VoluntaryExitPartialSig over +// a VoluntaryExit object to create a SignedVoluntaryExit +func (r *VoluntaryExitRunner) executeDuty(logger *zap.Logger, duty *spectypes.Duty) error { + voluntaryExit, err := r.calculateVoluntaryExit() + if err != nil { + return errors.Wrap(err, "could not calculate voluntary exit") + } + + // get PartialSignatureMessage with voluntaryExit root and signature + msg, err := r.BaseRunner.signBeaconObject(r, voluntaryExit, duty.Slot, spectypes.DomainVoluntaryExit) + if err != nil { + return errors.Wrap(err, "could not sign VoluntaryExit object") + } + + msgs := spectypes.PartialSignatureMessages{ + Type: spectypes.VoluntaryExitPartialSig, + Slot: duty.Slot, + Messages: []*spectypes.PartialSignatureMessage{msg}, + } + + // sign PartialSignatureMessages object + signature, err := r.GetSigner().SignRoot(msgs, spectypes.PartialSignatureType, r.GetShare().SharePubKey) + if err != nil { + return errors.Wrap(err, "could not sign randao msg") + } + signedPartialMsg := &spectypes.SignedPartialSignatureMessage{ + Message: msgs, + Signature: signature, + Signer: r.GetShare().OperatorID, + } + + // broadcast + data, err := signedPartialMsg.Encode() + if err != nil { + return errors.Wrap(err, "failed to encode signedPartialMsg with VoluntaryExit") + } + msgToBroadcast := &spectypes.SSVMessage{ + MsgType: spectypes.SSVPartialSignatureMsgType, + MsgID: spectypes.NewMsgID(r.GetShare().DomainType, r.GetShare().ValidatorPubKey, r.BaseRunner.BeaconRoleType), + Data: data, + } + if err := r.GetNetwork().Broadcast(msgToBroadcast); err != nil { + return errors.Wrap(err, "can't broadcast signedPartialMsg with VoluntaryExit") + } + + // stores value for later using in ProcessPreConsensus + r.voluntaryExit = voluntaryExit + + return nil +} + +// Returns *phase0.VoluntaryExit object with current epoch and own validator index +func (r *VoluntaryExitRunner) calculateVoluntaryExit() (*phase0.VoluntaryExit, error) { + epoch := r.BaseRunner.BeaconNetwork.EstimatedEpochAtSlot(r.BaseRunner.State.StartingDuty.Slot) + validatorIndex := r.GetState().StartingDuty.ValidatorIndex + return &phase0.VoluntaryExit{ + Epoch: epoch, + ValidatorIndex: validatorIndex, + }, nil +} + +func (r *VoluntaryExitRunner) GetBaseRunner() *BaseRunner { + return r.BaseRunner +} + +func (r *VoluntaryExitRunner) GetNetwork() specssv.Network { + return r.network +} + +func (r *VoluntaryExitRunner) GetBeaconNode() specssv.BeaconNode { + return r.beacon +} + +func (r *VoluntaryExitRunner) GetShare() *spectypes.Share { + return r.BaseRunner.Share +} + +func (r *VoluntaryExitRunner) GetState() *State { + return r.BaseRunner.State +} + +func (r *VoluntaryExitRunner) GetValCheckF() specqbft.ProposedValueCheckF { + return r.valCheck +} + +func (r *VoluntaryExitRunner) GetSigner() spectypes.KeyManager { + return r.signer +} + +// Encode returns the encoded struct in bytes or error +func (r *VoluntaryExitRunner) Encode() ([]byte, error) { + return json.Marshal(r) +} + +// Decode returns error if decoding failed +func (r *VoluntaryExitRunner) Decode(data []byte) error { + return json.Unmarshal(data, &r) +} + +// GetRoot returns the root used for signing and verification +func (r *VoluntaryExitRunner) GetRoot() ([32]byte, error) { + marshaledRoot, err := r.Encode() + if err != nil { + return [32]byte{}, errors.Wrap(err, "could not encode DutyRunnerState") + } + ret := sha256.Sum256(marshaledRoot) + return ret, nil +} diff --git a/protocol/v2/ssv/spectest/msg_processing_type.go b/protocol/v2/ssv/spectest/msg_processing_type.go index 19fd0c71c8..412b92b8da 100644 --- a/protocol/v2/ssv/spectest/msg_processing_type.go +++ b/protocol/v2/ssv/spectest/msg_processing_type.go @@ -2,6 +2,9 @@ package spectest import ( "encoding/hex" + "path/filepath" + "reflect" + "strings" "testing" specqbft "github.com/bloxapp/ssv-spec/qbft" @@ -9,12 +12,15 @@ import ( spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" "github.com/stretchr/testify/require" + "go.uber.org/zap" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/bloxapp/ssv/logging" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" ssvtesting "github.com/bloxapp/ssv/protocol/v2/ssv/testing" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" ) type MsgProcessingSpecTest struct { @@ -23,6 +29,7 @@ type MsgProcessingSpecTest struct { Duty *spectypes.Duty Messages []*spectypes.SSVMessage PostDutyRunnerStateRoot string + PostDutyRunnerState spectypes.Root `json:"-"` // Field is ignored by encoding/json // OutputMessages compares pre/ post signed partial sigs to output. We exclude consensus msgs as it's tested in consensus OutputMessages []*spectypes.SignedPartialSignatureMessage BeaconBroadcastedRoots []string @@ -36,6 +43,13 @@ func (test *MsgProcessingSpecTest) TestName() string { func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { logger := logging.TestLogger(t) + test.overrideStateComparison(t) + test.RunAsPartOfMultiTest(t, logger) +} + +func (test *MsgProcessingSpecTest) RunAsPartOfMultiTest(t *testing.T, logger *zap.Logger) { + test.Runner.GetBaseRunner().VerifySignatures = true + v := ssvtesting.BaseValidator(logger, spectestingutils.KeySetForShare(test.Runner.GetBaseRunner().Share)) v.DutyRunners[test.Runner.GetBaseRunner().BeaconRoleType] = test.Runner v.Network = test.Runner.GetNetwork().(specqbft.Network) // TODO need to align @@ -45,7 +59,7 @@ func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { lastErr = v.StartDuty(logger, test.Duty) } for _, msg := range test.Messages { - dmsg, err := queue.DecodeSSVMessage(logger, msg) + dmsg, err := queue.DecodeSSVMessage(msg) if err != nil { lastErr = err continue @@ -57,7 +71,7 @@ func RunMsgProcessing(t *testing.T, test *MsgProcessingSpecTest) { } if len(test.ExpectedError) != 0 { - require.EqualError(t, lastErr, test.ExpectedError) + require.EqualError(t, lastErr, test.ExpectedError, "expected: %v", test.ExpectedError) } else { require.NoError(t, lastErr) } @@ -143,3 +157,43 @@ func (test *MsgProcessingSpecTest) compareOutputMsgs(t *testing.T, v *validator. index++ } } + +func (test *MsgProcessingSpecTest) overrideStateComparison(t *testing.T) { + testType := reflect.TypeOf(test).String() + testType = strings.Replace(testType, "spectest.", "tests.", 1) + overrideStateComparison(t, test, test.Name, testType) +} + +func overrideStateComparison(t *testing.T, test *MsgProcessingSpecTest, name string, testType string) { + var r runner.Runner + switch test.Runner.(type) { + case *runner.AttesterRunner: + r = &runner.AttesterRunner{} + case *runner.AggregatorRunner: + r = &runner.AggregatorRunner{} + case *runner.ProposerRunner: + r = &runner.ProposerRunner{} + case *runner.SyncCommitteeRunner: + r = &runner.SyncCommitteeRunner{} + case *runner.SyncCommitteeAggregatorRunner: + r = &runner.SyncCommitteeAggregatorRunner{} + case *runner.ValidatorRegistrationRunner: + r = &runner.ValidatorRegistrationRunner{} + case *runner.VoluntaryExitRunner: + r = &runner.VoluntaryExitRunner{} + default: + t.Fatalf("unknown runner type") + } + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("ssv", "spectest")) + require.NoError(t, err) + r, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, r) + require.NoError(t, err) + + // override + test.PostDutyRunnerState = r + + root, err := r.GetRoot() + require.NoError(t, err) + + test.PostDutyRunnerStateRoot = hex.EncodeToString(root[:]) +} diff --git a/protocol/v2/ssv/spectest/multi_msg_processing_type.go b/protocol/v2/ssv/spectest/multi_msg_processing_type.go index 0b4b926f6e..4d040782e2 100644 --- a/protocol/v2/ssv/spectest/multi_msg_processing_type.go +++ b/protocol/v2/ssv/spectest/multi_msg_processing_type.go @@ -1,10 +1,20 @@ package spectest -import "testing" +import ( + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/bloxapp/ssv/logging" + "go.uber.org/zap" +) type MultiMsgProcessingSpecTest struct { Name string Tests []*MsgProcessingSpecTest + + logger *zap.Logger } func (tests *MultiMsgProcessingSpecTest) TestName() string { @@ -12,10 +22,23 @@ func (tests *MultiMsgProcessingSpecTest) TestName() string { } func (tests *MultiMsgProcessingSpecTest) Run(t *testing.T) { + tests.logger = logging.TestLogger(t) + tests.overrideStateComparison(t) + for _, test := range tests.Tests { - test := test t.Run(test.TestName(), func(t *testing.T) { - RunMsgProcessing(t, test) + test.RunAsPartOfMultiTest(t, tests.logger) }) } } + +// overrideStateComparison overrides the post state comparison for all tests in the multi test +func (tests *MultiMsgProcessingSpecTest) overrideStateComparison(t *testing.T) { + testsName := strings.ReplaceAll(tests.TestName(), " ", "_") + for _, test := range tests.Tests { + path := filepath.Join(testsName, test.TestName()) + testType := reflect.TypeOf(tests).String() + testType = strings.Replace(testType, "spectest.", "tests.", 1) + overrideStateComparison(t, test, path, testType) + } +} diff --git a/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go b/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go index c8bf0cae80..cfac13ec9d 100644 --- a/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go +++ b/protocol/v2/ssv/spectest/multi_start_new_runner_duty_type.go @@ -2,14 +2,19 @@ package spectest import ( "encoding/hex" + "path/filepath" + "reflect" + "strings" "testing" spectypes "github.com/bloxapp/ssv-spec/types" spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" + typescomparable "github.com/bloxapp/ssv-spec/types/testingutils/comparable" "github.com/stretchr/testify/require" "go.uber.org/zap" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" + protocoltesting "github.com/bloxapp/ssv/protocol/v2/testing" ) type StartNewRunnerDutySpecTest struct { @@ -17,6 +22,7 @@ type StartNewRunnerDutySpecTest struct { Runner runner.Runner Duty *spectypes.Duty PostDutyRunnerStateRoot string + PostDutyRunnerState spectypes.Root `json:"-"` // Field is ignored by encoding/json OutputMessages []*spectypes.SignedPartialSignatureMessage ExpectedError string } @@ -25,7 +31,14 @@ func (test *StartNewRunnerDutySpecTest) TestName() string { return test.Name } -func (test *StartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { +// overrideStateComparison overrides the state comparison to compare the runner state +func (test *StartNewRunnerDutySpecTest) overrideStateComparison(t *testing.T) { + testType := reflect.TypeOf(test).String() + testType = strings.Replace(testType, "spectest.", "newduty.", 1) + overrideStateComparisonForStartNewRunnerDutySpecTest(t, test, test.Name, testType) +} + +func (test *StartNewRunnerDutySpecTest) RunAsPartOfMultiTest(t *testing.T, logger *zap.Logger) { err := test.Runner.StartNewDuty(logger, test.Duty) if len(test.ExpectedError) > 0 { require.EqualError(t, err, test.ExpectedError) @@ -84,6 +97,11 @@ func (test *StartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { require.EqualValues(t, test.PostDutyRunnerStateRoot, hex.EncodeToString(postRoot[:])) } +func (test *StartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { + test.overrideStateComparison(t) + test.RunAsPartOfMultiTest(t, logger) +} + type MultiStartNewRunnerDutySpecTest struct { Name string Tests []*StartNewRunnerDutySpecTest @@ -94,10 +112,56 @@ func (tests *MultiStartNewRunnerDutySpecTest) TestName() string { } func (tests *MultiStartNewRunnerDutySpecTest) Run(t *testing.T, logger *zap.Logger) { + tests.overrideStateComparison(t) + for _, test := range tests.Tests { - test := test t.Run(test.TestName(), func(t *testing.T) { - test.Run(t, logger) + test.RunAsPartOfMultiTest(t, logger) }) } } + +// overrideStateComparison overrides the post state comparison for all tests in the multi test +func (tests *MultiStartNewRunnerDutySpecTest) overrideStateComparison(t *testing.T) { + testsName := strings.ReplaceAll(tests.TestName(), " ", "_") + for _, test := range tests.Tests { + path := filepath.Join(testsName, test.TestName()) + testType := reflect.TypeOf(tests).String() + testType = strings.Replace(testType, "spectest.", "newduty.", 1) + overrideStateComparisonForStartNewRunnerDutySpecTest(t, test, path, testType) + } +} + +func overrideStateComparisonForStartNewRunnerDutySpecTest(t *testing.T, test *StartNewRunnerDutySpecTest, name string, testType string) { + var r runner.Runner + switch test.Runner.(type) { + case *runner.AttesterRunner: + r = &runner.AttesterRunner{} + case *runner.AggregatorRunner: + r = &runner.AggregatorRunner{} + case *runner.ProposerRunner: + r = &runner.ProposerRunner{} + case *runner.SyncCommitteeRunner: + r = &runner.SyncCommitteeRunner{} + case *runner.SyncCommitteeAggregatorRunner: + r = &runner.SyncCommitteeAggregatorRunner{} + case *runner.ValidatorRegistrationRunner: + r = &runner.ValidatorRegistrationRunner{} + case *runner.VoluntaryExitRunner: + r = &runner.VoluntaryExitRunner{} + default: + t.Fatalf("unknown runner type") + } + specDir, err := protocoltesting.GetSpecDir("", filepath.Join("ssv", "spectest")) + require.NoError(t, err) + r, err = typescomparable.UnmarshalStateComparison(specDir, name, testType, r) + require.NoError(t, err) + + // override + test.PostDutyRunnerState = r + + root, err := r.GetRoot() + require.NoError(t, err) + + test.PostDutyRunnerStateRoot = hex.EncodeToString(root[:]) +} diff --git a/protocol/v2/ssv/spectest/ssv_mapping_test.go b/protocol/v2/ssv/spectest/ssv_mapping_test.go index ccb15f0285..14fac24b35 100644 --- a/protocol/v2/ssv/spectest/ssv_mapping_test.go +++ b/protocol/v2/ssv/spectest/ssv_mapping_test.go @@ -2,7 +2,6 @@ package spectest import ( "encoding/json" - "fmt" "os" "reflect" "strings" @@ -19,7 +18,6 @@ import ( "go.uber.org/zap" "github.com/bloxapp/ssv/logging" - "github.com/bloxapp/ssv/protocol/v2/qbft/controller" "github.com/bloxapp/ssv/protocol/v2/qbft/instance" qbfttesting "github.com/bloxapp/ssv/protocol/v2/qbft/testing" @@ -41,105 +39,130 @@ func TestSSVMapping(t *testing.T) { panic(err.Error()) } - origDomain := types.GetDefaultDomain() types.SetDefaultDomain(testingutils.TestingSSVDomainType) - defer func() { - types.SetDefaultDomain(origDomain) - }() for name, test := range untypedTests { name, test := name, test + r := prepareTest(t, logger, name, test) + if r != nil { + t.Run(r.name, func(t *testing.T) { + t.Parallel() + r.test(t) + }) + } + } +} - testName := strings.Split(name, "_")[1] - testType := strings.Split(name, "_")[0] +type runnable struct { + name string + test func(t *testing.T) +} - fmt.Printf("--------- %s - %s \n", testType, testName) +func prepareTest(t *testing.T, logger *zap.Logger, name string, test interface{}) *runnable { + testName := strings.Split(name, "_")[1] + testType := strings.Split(name, "_")[0] - switch testType { - case reflect.TypeOf(&tests.MsgProcessingSpecTest{}).String(): - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &MsgProcessingSpecTest{ - Runner: &runner.AttesterRunner{}, - } - // TODO fix blinded test - if strings.Contains(testName, "propose regular decide blinded") || strings.Contains(testName, "propose blinded decide regular") { - continue - } - require.NoError(t, json.Unmarshal(byts, &typedTest)) + switch testType { + case reflect.TypeOf(&tests.MsgProcessingSpecTest{}).String(): + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &MsgProcessingSpecTest{ + Runner: &runner.AttesterRunner{}, + } + // TODO: fix blinded test + if strings.Contains(testName, "propose regular decide blinded") || strings.Contains(testName, "propose blinded decide regular") { + logger.Info("skipping blinded block test", zap.String("test", testName)) + return nil + } + require.NoError(t, json.Unmarshal(byts, &typedTest)) - t.Run(typedTest.TestName(), func(t *testing.T) { + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { RunMsgProcessing(t, typedTest) - }) - case reflect.TypeOf(&tests.MultiMsgProcessingSpecTest{}).String(): - subtests := test.(map[string]interface{})["Tests"].([]interface{}) - typedTests := make([]*MsgProcessingSpecTest, 0) - for _, subtest := range subtests { - typedTests = append(typedTests, msgProcessingSpecTestFromMap(t, subtest.(map[string]interface{}))) - } - - typedTest := &MultiMsgProcessingSpecTest{ - Name: test.(map[string]interface{})["Name"].(string), - Tests: typedTests, - } + }, + } + case reflect.TypeOf(&tests.MultiMsgProcessingSpecTest{}).String(): + typedTest := &MultiMsgProcessingSpecTest{ + Name: test.(map[string]interface{})["Name"].(string), + } + subtests := test.(map[string]interface{})["Tests"].([]interface{}) + for _, subtest := range subtests { + typedTest.Tests = append(typedTest.Tests, msgProcessingSpecTestFromMap(t, subtest.(map[string]interface{}))) + } - t.Run(typedTest.TestName(), func(t *testing.T) { + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&messages.MsgSpecTest{}).String(): // no use of internal structs so can run as spec test runs - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &messages.MsgSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&messages.MsgSpecTest{}).String(): // no use of internal structs so can run as spec test runs + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &messages.MsgSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&valcheck.SpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &valcheck.SpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&valcheck.SpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &valcheck.SpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&valcheck.MultiSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &valcheck.MultiSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&valcheck.MultiSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &valcheck.MultiSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { typedTest.Run(t) - }) - case reflect.TypeOf(&synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer - byts, err := json.Marshal(test) - require.NoError(t, err) - typedTest := &synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{} - require.NoError(t, json.Unmarshal(byts, &typedTest)) - - t.Run(typedTest.TestName(), func(t *testing.T) { + }, + } + case reflect.TypeOf(&synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{}).String(): // no use of internal structs so can run as spec test runs TODO: need to use internal signer + byts, err := json.Marshal(test) + require.NoError(t, err) + typedTest := &synccommitteeaggregator.SyncCommitteeAggregatorProofSpecTest{} + require.NoError(t, json.Unmarshal(byts, &typedTest)) + + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { RunSyncCommitteeAggProof(t, typedTest) - }) - case reflect.TypeOf(&newduty.MultiStartNewRunnerDutySpecTest{}).String(): - subtests := test.(map[string]interface{})["Tests"].([]interface{}) - typedTests := make([]*StartNewRunnerDutySpecTest, 0) - for _, subtest := range subtests { - typedTests = append(typedTests, newRunnerDutySpecTestFromMap(t, subtest.(map[string]interface{}))) - } - - typedTest := &MultiStartNewRunnerDutySpecTest{ - Name: test.(map[string]interface{})["Name"].(string), - Tests: typedTests, - } + }, + } + case reflect.TypeOf(&newduty.MultiStartNewRunnerDutySpecTest{}).String(): + typedTest := &MultiStartNewRunnerDutySpecTest{ + Name: test.(map[string]interface{})["Name"].(string), + } - t.Run(typedTest.TestName(), func(t *testing.T) { + return &runnable{ + name: typedTest.TestName(), + test: func(t *testing.T) { + subtests := test.(map[string]interface{})["Tests"].([]interface{}) + for _, subtest := range subtests { + typedTest.Tests = append(typedTest.Tests, newRunnerDutySpecTestFromMap(t, subtest.(map[string]interface{}))) + } typedTest.Run(t, logger) - }) - default: - t.Fatalf("unsupported test type %s [%s]", testType, testName) + }, } + default: + t.Fatalf("unsupported test type %s [%s]", testType, testName) + return nil } } @@ -324,6 +347,10 @@ func baseRunnerForRole(logger *zap.Logger, role spectypes.BeaconRole, base *runn ret := ssvtesting.ValidatorRegistrationRunner(logger, ks) ret.(*runner.ValidatorRegistrationRunner).BaseRunner = base return ret + case spectypes.BNRoleVoluntaryExit: + ret := ssvtesting.VoluntaryExitRunner(logger, ks) + ret.(*runner.VoluntaryExitRunner).BaseRunner = base + return ret case testingutils.UnknownDutyType: ret := ssvtesting.UnknownDutyTypeRunner(logger, ks) ret.(*runner.AttesterRunner).BaseRunner = base diff --git a/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go b/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go index 9e12cab157..2fd4091732 100644 --- a/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go +++ b/protocol/v2/ssv/spectest/sync_committee_aggregator_proof_type.go @@ -24,7 +24,7 @@ func RunSyncCommitteeAggProof(t *testing.T, test *synccommitteeaggregator.SyncCo lastErr := v.StartDuty(logger, &testingutils.TestingSyncCommitteeContributionDuty) for _, msg := range test.Messages { - dmsg, err := queue.DecodeSSVMessage(logger, msg) + dmsg, err := queue.DecodeSSVMessage(msg) if err != nil { lastErr = err continue diff --git a/protocol/v2/ssv/testing/runner.go b/protocol/v2/ssv/testing/runner.go index 2d8fcc8095..7689d10073 100644 --- a/protocol/v2/ssv/testing/runner.go +++ b/protocol/v2/ssv/testing/runner.go @@ -23,14 +23,14 @@ var AttesterRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySe //} var ProposerRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { - return baseRunner(logger, spectypes.BNRoleProposer, specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil, true), keySet) + return baseRunner(logger, spectypes.BNRoleProposer, specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil), keySet) } var ProposerBlindedBlockRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { ret := baseRunner( logger, spectypes.BNRoleProposer, - specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil, true), + specssv.ProposerValueCheckF(spectestingutils.NewTestingKeyManager(), spectypes.BeaconTestNetwork, spectestingutils.TestingValidatorPubKey[:], spectestingutils.TestingValidatorIndex, nil), keySet, ) ret.(*runner.ProposerRunner).ProducesBlindedBlocks = true @@ -54,6 +54,10 @@ var ValidatorRegistrationRunner = func(logger *zap.Logger, keySet *spectestingut return ret } +var VoluntaryExitRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { + return baseRunner(logger, spectypes.BNRoleVoluntaryExit, nil, keySet) +} + var UnknownDutyTypeRunner = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet) runner.Runner { return baseRunner(logger, spectestingutils.UnknownDutyType, spectestingutils.UnknownDutyValueCheck(), keySet) } @@ -144,6 +148,14 @@ var baseRunner = func(logger *zap.Logger, role spectypes.BeaconRole, valCheck sp net, km, ) + case spectypes.BNRoleVoluntaryExit: + return runner.NewVoluntaryExitRunner( + spectypes.BeaconTestNetwork, + share, + spectestingutils.NewTestingBeaconNode(), + net, + km, + ) case spectestingutils.UnknownDutyType: ret := runner.NewAttesterRunnner( spectypes.BeaconTestNetwork, diff --git a/protocol/v2/ssv/testing/validator.go b/protocol/v2/ssv/testing/validator.go index 844145bd8c..d006111c2b 100644 --- a/protocol/v2/ssv/testing/validator.go +++ b/protocol/v2/ssv/testing/validator.go @@ -7,6 +7,7 @@ import ( spectestingutils "github.com/bloxapp/ssv-spec/types/testingutils" "go.uber.org/zap" + "github.com/bloxapp/ssv/networkconfig" "github.com/bloxapp/ssv/protocol/v2/qbft/testing" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" "github.com/bloxapp/ssv/protocol/v2/ssv/validator" @@ -22,7 +23,7 @@ var BaseValidator = func(logger *zap.Logger, keySet *spectestingutils.TestKeySet validator.Options{ Network: spectestingutils.NewTestingNetwork(), Beacon: spectestingutils.NewTestingBeaconNode(), - BeaconNetwork: spectypes.BeaconTestNetwork, + BeaconNetwork: networkconfig.TestNetwork.Beacon, Storage: testing.TestingStores(logger), SSVShare: &types.SSVShare{ Share: *spectestingutils.TestingShare(keySet), diff --git a/protocol/v2/ssv/validator/metrics.go b/protocol/v2/ssv/validator/metrics.go new file mode 100644 index 0000000000..ce1840736b --- /dev/null +++ b/protocol/v2/ssv/validator/metrics.go @@ -0,0 +1,45 @@ +package validator + +import ( + "time" + + spectypes "github.com/bloxapp/ssv-spec/types" + + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" +) + +type Metrics interface { + ValidatorInactive(publicKey []byte) + ValidatorNoIndex(publicKey []byte) + ValidatorError(publicKey []byte) + ValidatorReady(publicKey []byte) + ValidatorNotActivated(publicKey []byte) + ValidatorExiting(publicKey []byte) + ValidatorSlashed(publicKey []byte) + ValidatorNotFound(publicKey []byte) + ValidatorPending(publicKey []byte) + ValidatorRemoved(publicKey []byte) + ValidatorUnknown(publicKey []byte) + + queue.Metrics +} + +type NopMetrics struct{} + +func (n NopMetrics) ValidatorInactive([]byte) {} +func (n NopMetrics) ValidatorNoIndex([]byte) {} +func (n NopMetrics) ValidatorError([]byte) {} +func (n NopMetrics) ValidatorReady([]byte) {} +func (n NopMetrics) ValidatorNotActivated([]byte) {} +func (n NopMetrics) ValidatorExiting([]byte) {} +func (n NopMetrics) ValidatorSlashed([]byte) {} +func (n NopMetrics) ValidatorNotFound([]byte) {} +func (n NopMetrics) ValidatorPending([]byte) {} +func (n NopMetrics) ValidatorRemoved([]byte) {} +func (n NopMetrics) ValidatorUnknown([]byte) {} +func (n NopMetrics) IncomingQueueMessage(spectypes.MessageID) {} +func (n NopMetrics) OutgoingQueueMessage(spectypes.MessageID) {} +func (n NopMetrics) DroppedQueueMessage(spectypes.MessageID) {} +func (n NopMetrics) MessageQueueSize(int) {} +func (n NopMetrics) MessageQueueCapacity(int) {} +func (n NopMetrics) MessageTimeInQueue(spectypes.MessageID, time.Duration) {} diff --git a/protocol/v2/ssv/validator/msgqueue_consumer.go b/protocol/v2/ssv/validator/msgqueue_consumer.go index 7ba5efb119..ba82efa396 100644 --- a/protocol/v2/ssv/validator/msgqueue_consumer.go +++ b/protocol/v2/ssv/validator/msgqueue_consumer.go @@ -28,7 +28,8 @@ type queueContainer struct { // HandleMessage handles a spectypes.SSVMessage. // TODO: accept DecodedSSVMessage once p2p is upgraded to decode messages during validation. -func (v *Validator) HandleMessage(logger *zap.Logger, msg *spectypes.SSVMessage) { +// TODO: get rid of logger, add context +func (v *Validator) HandleMessage(logger *zap.Logger, msg *queue.DecodedSSVMessage) { v.mtx.RLock() // read v.Queues defer v.mtx.RUnlock() @@ -37,22 +38,13 @@ func (v *Validator) HandleMessage(logger *zap.Logger, msg *spectypes.SSVMessage) // fields.Role(msg.MsgID.GetRoleType())) if q, ok := v.Queues[msg.MsgID.GetRoleType()]; ok { - decodedMsg, err := queue.DecodeSSVMessage(logger, msg) - if err != nil { - logger.Warn("❗ failed to decode message", - zap.Error(err), - zap.String("msg_type", message.MsgTypeToString(msg.MsgType)), - zap.String("msg_id", msg.MsgID.String()), - ) - return - } - if pushed := q.Q.TryPush(decodedMsg); !pushed { + if pushed := q.Q.TryPush(msg); !pushed { msgID := msg.MsgID.String() logger.Warn("❗ dropping message because the queue is full", zap.String("msg_type", message.MsgTypeToString(msg.MsgType)), zap.String("msg_id", msgID)) } - // logger.Debug("📬 queue: pushed message", fields.MessageID(decodedMsg.MsgID), fields.MessageType(decodedMsg.MsgType)) + // logger.Debug("📬 queue: pushed message", fields.MessageID(msg.MsgID), fields.MessageType(msg.MsgType)) } else { logger.Error("❌ missing queue for role type", fields.Role(msg.MsgID.GetRoleType())) } diff --git a/protocol/v2/ssv/validator/non_committee_validator.go b/protocol/v2/ssv/validator/non_committee_validator.go index 3d03a44d4e..e1bcf47df7 100644 --- a/protocol/v2/ssv/validator/non_committee_validator.go +++ b/protocol/v2/ssv/validator/non_committee_validator.go @@ -9,6 +9,7 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/qbft" qbftcontroller "github.com/bloxapp/ssv/protocol/v2/qbft/controller" + "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/types" ) @@ -21,9 +22,10 @@ type NonCommitteeValidator struct { func NewNonCommitteeValidator(logger *zap.Logger, identifier spectypes.MessageID, opts Options) *NonCommitteeValidator { // currently, only need domain & storage config := &qbft.Config{ - Domain: types.GetDefaultDomain(), - Storage: opts.Storage.Get(identifier.GetRoleType()), - Network: opts.Network, + Domain: types.GetDefaultDomain(), + Storage: opts.Storage.Get(identifier.GetRoleType()), + Network: opts.Network, + SignatureVerification: opts.VerifySignatures, } ctrl := qbftcontroller.NewController(identifier[:], &opts.SSVShare.Share, types.GetDefaultDomain(), config, opts.FullNode) ctrl.StoredInstances = make(qbftcontroller.InstanceContainer, 0, nonCommitteeInstanceContainerCapacity(opts.FullNode)) @@ -39,7 +41,7 @@ func NewNonCommitteeValidator(logger *zap.Logger, identifier spectypes.MessageID } } -func (ncv *NonCommitteeValidator) ProcessMessage(logger *zap.Logger, msg *spectypes.SSVMessage) { +func (ncv *NonCommitteeValidator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMessage) { logger = logger.With(fields.PubKey(msg.MsgID.GetPubKey()), fields.Role(msg.MsgID.GetRoleType())) if err := validateMessage(ncv.Share.Share, msg); err != nil { diff --git a/protocol/v2/ssv/validator/opts.go b/protocol/v2/ssv/validator/opts.go index e1085dead6..9c2e0d81a7 100644 --- a/protocol/v2/ssv/validator/opts.go +++ b/protocol/v2/ssv/validator/opts.go @@ -6,6 +6,8 @@ import ( spectypes "github.com/bloxapp/ssv-spec/types" "github.com/bloxapp/ssv/ibft/storage" + "github.com/bloxapp/ssv/message/validation" + "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon" qbftctrl "github.com/bloxapp/ssv/protocol/v2/qbft/controller" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" "github.com/bloxapp/ssv/protocol/v2/types" @@ -19,7 +21,7 @@ const ( type Options struct { Network specqbft.Network Beacon specssv.BeaconNode - BeaconNetwork spectypes.BeaconNetwork + BeaconNetwork beacon.BeaconNetwork Storage *storage.QBFTStores SSVShare *types.SSVShare Signer spectypes.KeyManager @@ -30,6 +32,9 @@ type Options struct { BuilderProposals bool QueueSize int GasLimit uint64 + MessageValidator validation.MessageValidator + Metrics Metrics + VerifySignatures bool } func (o *Options) defaults() { diff --git a/protocol/v2/ssv/validator/startup.go b/protocol/v2/ssv/validator/startup.go index 4ca2c8acea..b316e8c9f2 100644 --- a/protocol/v2/ssv/validator/startup.go +++ b/protocol/v2/ssv/validator/startup.go @@ -1,9 +1,7 @@ package validator import ( - "context" "sync/atomic" - "time" "github.com/bloxapp/ssv-spec/p2p" spectypes "github.com/bloxapp/ssv-spec/types" @@ -56,7 +54,6 @@ func (v *Validator) Start(logger *zap.Logger) (started bool, err error) { return true, err } go v.StartQueueConsumer(logger, identifier, v.ProcessMessage) - go v.sync(logger, identifier) } return true, nil } @@ -73,27 +70,3 @@ func (v *Validator) Stop() { v.Queues = make(map[spectypes.BeaconRole]queueContainer) } } - -// sync performs highest decided sync -func (v *Validator) sync(logger *zap.Logger, mid spectypes.MessageID) { - ctx, cancel := context.WithCancel(v.ctx) - defer cancel() - - // TODO: config? - interval := time.Second - retries := 3 - - for ctx.Err() == nil { - err := v.Network.SyncHighestDecided(mid) - if err != nil { - logger.Debug("❌ failed to sync highest decided", zap.Error(err)) - retries-- - if retries > 0 { - interval *= 2 - time.Sleep(interval) - continue - } - } - return - } -} diff --git a/protocol/v2/ssv/validator/timer.go b/protocol/v2/ssv/validator/timer.go index 87013bd5dd..6b819b992b 100644 --- a/protocol/v2/ssv/validator/timer.go +++ b/protocol/v2/ssv/validator/timer.go @@ -10,12 +10,13 @@ import ( "github.com/bloxapp/ssv/logging/fields" "github.com/bloxapp/ssv/protocol/v2/message" + "github.com/bloxapp/ssv/protocol/v2/qbft/roundtimer" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/types" ) -func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) func() { - return func() { +func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID, height specqbft.Height) roundtimer.OnRoundTimeoutF { + return func(round specqbft.Round) { v.mtx.RLock() // read-lock for v.Queues, v.state defer v.mtx.RUnlock() @@ -30,12 +31,12 @@ func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID return } - msg, err := v.createTimerMessage(identifier, height) + msg, err := v.createTimerMessage(identifier, height, round) if err != nil { logger.Debug("❗ failed to create timer msg", zap.Error(err)) return } - dec, err := queue.DecodeSSVMessage(logger, msg) + dec, err := queue.DecodeSSVMessage(msg) if err != nil { logger.Debug("❌ failed to decode timer msg", zap.Error(err)) return @@ -49,8 +50,11 @@ func (v *Validator) onTimeout(logger *zap.Logger, identifier spectypes.MessageID } } -func (v *Validator) createTimerMessage(identifier spectypes.MessageID, height specqbft.Height) (*spectypes.SSVMessage, error) { - td := types.TimeoutData{Height: height} +func (v *Validator) createTimerMessage(identifier spectypes.MessageID, height specqbft.Height, round specqbft.Round) (*spectypes.SSVMessage, error) { + td := types.TimeoutData{ + Height: height, + Round: round, + } data, err := json.Marshal(td) if err != nil { return nil, errors.Wrap(err, "failed to marshal timeout data") diff --git a/protocol/v2/ssv/validator/validator.go b/protocol/v2/ssv/validator/validator.go index 7f1dd80d2e..0fa54de66a 100644 --- a/protocol/v2/ssv/validator/validator.go +++ b/protocol/v2/ssv/validator/validator.go @@ -13,6 +13,7 @@ import ( "github.com/bloxapp/ssv/ibft/storage" "github.com/bloxapp/ssv/logging/fields" + "github.com/bloxapp/ssv/message/validation" "github.com/bloxapp/ssv/protocol/v2/message" "github.com/bloxapp/ssv/protocol/v2/ssv/queue" "github.com/bloxapp/ssv/protocol/v2/ssv/runner" @@ -39,24 +40,31 @@ type Validator struct { dutyIDs *hashmap.Map[spectypes.BeaconRole, string] state uint32 + + messageValidator validation.MessageValidator } // NewValidator creates a new instance of Validator. func NewValidator(pctx context.Context, cancel func(), options Options) *Validator { options.defaults() + if options.Metrics == nil { + options.Metrics = &NopMetrics{} + } + v := &Validator{ - mtx: &sync.RWMutex{}, - ctx: pctx, - cancel: cancel, - DutyRunners: options.DutyRunners, - Network: options.Network, - Storage: options.Storage, - Share: options.SSVShare, - Signer: options.Signer, - Queues: make(map[spectypes.BeaconRole]queueContainer), - state: uint32(NotStarted), - dutyIDs: hashmap.New[spectypes.BeaconRole, string](), + mtx: &sync.RWMutex{}, + ctx: pctx, + cancel: cancel, + DutyRunners: options.DutyRunners, + Network: options.Network, + Storage: options.Storage, + Share: options.SSVShare, + Signer: options.Signer, + Queues: make(map[spectypes.BeaconRole]queueContainer), + state: uint32(NotStarted), + dutyIDs: hashmap.New[spectypes.BeaconRole, string](), + messageValidator: options.MessageValidator, } for _, dutyRunner := range options.DutyRunners { @@ -65,10 +73,9 @@ func NewValidator(pctx context.Context, cancel func(), options Options) *Validat // Setup the queue. role := dutyRunner.GetBaseRunner().BeaconRoleType - msgID := spectypes.NewMsgID(types.GetDefaultDomain(), options.SSVShare.ValidatorPubKey, role).String() v.Queues[role] = queueContainer{ - Q: queue.WithMetrics(queue.New(options.QueueSize), queue.NewPrometheusMetrics(msgID)), + Q: queue.WithMetrics(queue.New(options.QueueSize), options.Metrics), queueState: &queue.State{ HasRunningInstance: false, Height: 0, @@ -111,7 +118,7 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMess return fmt.Errorf("could not get duty runner for msg ID %v", messageID) } - if err := validateMessage(v.Share.Share, msg.SSVMessage); err != nil { + if err := validateMessage(v.Share.Share, msg); err != nil { return fmt.Errorf("message invalid for msg ID %v: %w", messageID, err) } @@ -143,7 +150,7 @@ func (v *Validator) ProcessMessage(logger *zap.Logger, msg *queue.DecodedSSVMess } } -func validateMessage(share spectypes.Share, msg *spectypes.SSVMessage) error { +func validateMessage(share spectypes.Share, msg *queue.DecodedSSVMessage) error { if !share.ValidatorPubKey.MessageIDBelongs(msg.GetID()) { return errors.New("msg ID doesn't match validator ID") } diff --git a/protocol/v2/sync/handlers/decided_history.go b/protocol/v2/sync/handlers/decided_history.go deleted file mode 100644 index 3dc960cfcb..0000000000 --- a/protocol/v2/sync/handlers/decided_history.go +++ /dev/null @@ -1,57 +0,0 @@ -package handlers - -import ( - "fmt" - - specqbft "github.com/bloxapp/ssv-spec/qbft" - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/ibft/storage" - "github.com/bloxapp/ssv/protocol/v2/message" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" -) - -// HistoryHandler handler for decided history protocol -// TODO: add msg validation and report scores -func HistoryHandler(logger *zap.Logger, storeMap *storage.QBFTStores, reporting protocolp2p.ValidationReporting, maxBatchSize int) protocolp2p.RequestHandler { - return func(msg *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { - logger := logger.With(zap.String("msg_id", fmt.Sprintf("%x", msg.MsgID))) - sm := &message.SyncMessage{} - err := sm.Decode(msg.Data) - if err != nil { - logger.Debug("❌ failed to decode message data", zap.Error(err)) - reporting.ReportValidation(logger, msg, protocolp2p.ValidationRejectLow) - sm.Status = message.StatusBadRequest - } else if sm.Protocol != message.DecidedHistoryType { - // not this protocol - // TODO: remove after v0 - return nil, nil - } else { - items := int(sm.Params.Height[1] - sm.Params.Height[0]) - if items > maxBatchSize { - sm.Params.Height[1] = sm.Params.Height[0] + specqbft.Height(maxBatchSize) - } - msgID := msg.GetID() - store := storeMap.Get(msgID.GetRoleType()) - if store == nil { - return nil, errors.New(fmt.Sprintf("not storage found for type %s", msgID.GetRoleType().String())) - } - instances, err := store.GetInstancesInRange(msgID[:], sm.Params.Height[0], sm.Params.Height[1]) - results := make([]*specqbft.SignedMessage, 0, len(instances)) - for _, instance := range instances { - results = append(results, instance.DecidedMessage) - } - sm.UpdateResults(err, results...) - } - - data, err := sm.Encode() - if err != nil { - return nil, errors.Wrap(err, "could not encode result data") - } - msg.Data = data - - return msg, nil - } -} diff --git a/protocol/v2/sync/handlers/last_decided.go b/protocol/v2/sync/handlers/last_decided.go deleted file mode 100644 index 6b33579b0f..0000000000 --- a/protocol/v2/sync/handlers/last_decided.go +++ /dev/null @@ -1,53 +0,0 @@ -package handlers - -import ( - "fmt" - - spectypes "github.com/bloxapp/ssv-spec/types" - "github.com/pkg/errors" - "go.uber.org/zap" - - "github.com/bloxapp/ssv/ibft/storage" - "github.com/bloxapp/ssv/logging/fields" - "github.com/bloxapp/ssv/protocol/v2/message" - protocolp2p "github.com/bloxapp/ssv/protocol/v2/p2p" -) - -// LastDecidedHandler handler for last-decided protocol -// TODO: add msg validation and report scores -func LastDecidedHandler(plogger *zap.Logger, storeMap *storage.QBFTStores, reporting protocolp2p.ValidationReporting) protocolp2p.RequestHandler { - return func(msg *spectypes.SSVMessage) (*spectypes.SSVMessage, error) { - logger := plogger.With(fields.PubKey(msg.MsgID.GetPubKey())) - sm := &message.SyncMessage{} - err := sm.Decode(msg.Data) - if err != nil { - logger.Debug("❌ failed to decode message data", zap.Error(err)) - reporting.ReportValidation(logger, msg, protocolp2p.ValidationRejectLow) - sm.Status = message.StatusBadRequest - } else if sm.Protocol != message.LastDecidedType { - // not this protocol - // TODO: remove after v0 - return nil, nil - } else { - msgID := msg.GetID() - store := storeMap.Get(msgID.GetRoleType()) - if store == nil { - return nil, errors.New(fmt.Sprintf("not storage found for type %s", msgID.GetRoleType().String())) - } - instance, err := store.GetHighestInstance(msgID[:]) - if err != nil { - logger.Debug("❗ failed to get highest instance", zap.Error(err)) - } else if instance != nil { - sm.UpdateResults(err, instance.DecidedMessage) - } - } - - data, err := sm.Encode() - if err != nil { - return nil, errors.Wrap(err, "could not encode result data") - } - msg.Data = data - - return msg, nil - } -} diff --git a/protocol/v2/testing/test_utils.go b/protocol/v2/testing/test_utils.go index 2b2f79e4c1..7994e60361 100644 --- a/protocol/v2/testing/test_utils.go +++ b/protocol/v2/testing/test_utils.go @@ -1,6 +1,7 @@ package testing import ( + "fmt" "os" "path" "path/filepath" @@ -145,9 +146,25 @@ func AggregateInvalidSign(t *testing.T, sks map[spectypes.OperatorID]*bls.Secret } func GetSpecTestJSON(path string, module string) ([]byte, error) { + p, err := GetSpecDir(path, module) + if err != nil { + return nil, fmt.Errorf("could not get spec test dir: %w", err) + } + return os.ReadFile(filepath.Join(filepath.Clean(p), filepath.Clean(specTestPath))) +} + +// GetSpecDir returns the path to the ssv-spec module. +func GetSpecDir(path, module string) (string, error) { + if path == "" { + var err error + path, err = os.Getwd() + if err != nil { + return "", errors.New("could not get current directory") + } + } goModFile, err := getGoModFile(path) if err != nil { - return nil, errors.New("could not get go.mod file") + return "", errors.New("could not get go.mod file") } // check if there is a replace @@ -173,7 +190,7 @@ func GetSpecTestJSON(path string, module string) ([]byte, error) { } } if req == nil { - return nil, errors.Errorf("could not find %s module", specModule) + return "", errors.Errorf("could not find %s module", specModule) } modPath = req.Mod.Path modVersion = req.Mod.Version @@ -182,14 +199,14 @@ func GetSpecTestJSON(path string, module string) ([]byte, error) { // get module path p, err := GetModulePath(modPath, modVersion) if err != nil { - return nil, errors.Wrap(err, "could not get module path") + return "", errors.Wrap(err, "could not get module path") } if _, err := os.Stat(p); os.IsNotExist(err) { - return nil, errors.Wrapf(err, "you don't have this module-%s/version-%s installed", modPath, modVersion) + return "", errors.Wrapf(err, "you don't have this module-%s/version-%s installed", modPath, modVersion) } - return os.ReadFile(filepath.Join(filepath.Clean(p), filepath.Clean(module), filepath.Clean(specTestPath))) + return filepath.Join(filepath.Clean(p), module), nil } func GetModulePath(name, version string) (string, error) { diff --git a/protocol/v2/types/bls.go b/protocol/v2/types/bls.go index 70d2b7cb0e..d4e2b39fb9 100644 --- a/protocol/v2/types/bls.go +++ b/protocol/v2/types/bls.go @@ -9,7 +9,7 @@ var blsPublicKeyCache *lru.Cache[string, bls.PublicKey] func init() { var err error - blsPublicKeyCache, err = lru.New[string, bls.PublicKey](10_000) + blsPublicKeyCache, err = lru.New[string, bls.PublicKey](128_000) if err != nil { panic(err) } diff --git a/protocol/v2/types/crypto.go b/protocol/v2/types/crypto.go index 24863a64cc..3f08b7ee5b 100644 --- a/protocol/v2/types/crypto.go +++ b/protocol/v2/types/crypto.go @@ -15,13 +15,11 @@ import ( // // TODO: rethink this function and consider moving/refactoring it. func VerifyByOperators(s spectypes.Signature, data spectypes.MessageSignature, domain spectypes.DomainType, sigType spectypes.SignatureType, operators []*spectypes.Operator) error { - // decode sig sign := &bls.Sign{} if err := sign.Deserialize(s); err != nil { return errors.Wrap(err, "failed to deserialize signature") } - // find operators pks := make([]bls.PublicKey, 0) for _, id := range data.GetSigners() { found := false @@ -41,13 +39,11 @@ func VerifyByOperators(s spectypes.Signature, data spectypes.MessageSignature, d } } - // compute root computedRoot, err := spectypes.ComputeSigningRoot(data, spectypes.ComputeSignatureDomain(domain, sigType)) if err != nil { return errors.Wrap(err, "could not compute signing root") } - // verify if res := sign.FastAggregateVerify(pks, computedRoot[:]); !res { return errors.New("failed to verify signature") } @@ -72,7 +68,6 @@ func VerifyReconstructedSignature(sig *bls.Sign, validatorPubKey []byte, root [3 return errors.Wrap(err, "could not deserialize validator pk") } - // verify reconstructed sig if res := sig.VerifyByte(&pk, root[:]); !res { return errors.New("could not reconstruct a valid signature") } diff --git a/protocol/v2/types/messages.go b/protocol/v2/types/messages.go index 121194142d..529b2ab821 100644 --- a/protocol/v2/types/messages.go +++ b/protocol/v2/types/messages.go @@ -34,6 +34,7 @@ type EventMsg struct { type TimeoutData struct { Height qbft.Height + Round qbft.Round } type ExecuteDutyData struct { @@ -57,11 +58,11 @@ func (m *EventMsg) GetExecuteDutyData() (*ExecuteDutyData, error) { } // Encode returns a msg encoded bytes or error -func (msg *EventMsg) Encode() ([]byte, error) { - return json.Marshal(msg) +func (m *EventMsg) Encode() ([]byte, error) { + return json.Marshal(m) } // Decode returns error if decoding failed -func (msg *EventMsg) Decode(data []byte) error { - return json.Unmarshal(data, &msg) +func (m *EventMsg) Decode(data []byte) error { + return json.Unmarshal(data, &m) } diff --git a/registry/storage/shares.go b/registry/storage/shares.go index 17572f0257..321bcd15c8 100644 --- a/registry/storage/shares.go +++ b/registry/storage/shares.go @@ -206,6 +206,13 @@ func ByActiveValidator() SharesFilter { } } +// ByAttesting filters for attesting validators. +func ByAttesting() SharesFilter { + return func(share *types.SSVShare) bool { + return share.HasBeaconMetadata() && share.BeaconMetadata.IsAttesting() + } +} + // ByClusterID filters by cluster id. func ByClusterID(clusterID []byte) SharesFilter { return func(share *types.SSVShare) bool { diff --git a/scripts/spec-alignment/differ.config.yaml b/scripts/spec-alignment/differ.config.yaml index 641ad31360..2440971fe0 100644 --- a/scripts/spec-alignment/differ.config.yaml +++ b/scripts/spec-alignment/differ.config.yaml @@ -8,7 +8,11 @@ ApprovedChanges: ["256a3dc0f1eb7abf","22b66e9a63ba145b","12c1c3a1622fb7cc","1c44 "db32f358b6e8e2bb","f372e174e1f34c3b","bc47b3d202e8cd0d","86a6abca1a1c16d6","1655d21d5a4cad4","ac4e427097fc5533","6b4d5a114f8066ff", "9482fb9b6a953c48","5778a05e0976a6eb","24e2c7f54d5dd1d","2a8937e50d20faa9","587c629a67ef07ed","9d06d8e0ee4e1113","e624ec802068e711", "943be3ce709a99d3","5b3bb2d2262fe8be","c20c4c7ed8d1711d","b10c6fc7dd9eee7","c121cdaab6c1c698","e12b17f3910be26b","e47bf52e962c90af", - "90b8a0c8d2c30e95","e8292a58d2eb08ab","17cf3119ac6879f2","3f31546191c9e6b2","29c96f90edc2458d","f29db2624fd63635","dff6fea2c2d32a5f"] + "90b8a0c8d2c30e95","e8292a58d2eb08ab","17cf3119ac6879f2","3f31546191c9e6b2","29c96f90edc2458d","f29db2624fd63635","dff6fea2c2d32a5f", + "ae1b53fc580ce346","c117bd5db3eeabd6","d06552d71b9ca4cd","4cb333a88af66575","2a580187c312c79a","bf8cf93c55c1eadb","6d877e24991465e4", + "b1c8e0148a4a755","2c25abb7c776bd54","a1754e08473bd1fa","4dbab14670fa155d","2a3667a499a23b16","930379d323dd95e8","65efe31656e8814f", + "1270cef2e573f846","aeafb38ca9114f12","2a83e3384b45f2d7","91fbb874b3ce2570","74ad51ca63526e1e","defd8406641d53a5"] + IgnoredIdentifiers: - logger ReducedPackageNames: diff --git a/utils/rsaencryption/testingspace/vars.go b/utils/rsaencryption/testingspace/vars.go index 27a90cc0de..f94a8da859 100644 --- a/utils/rsaencryption/testingspace/vars.go +++ b/utils/rsaencryption/testingspace/vars.go @@ -2,6 +2,7 @@ package testing var ( // SkPem is a operator private key + // #nosec G101 (Potential hardcoded credentials: RSA private key) SkPem = "-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAowE7OEbwyLkvrZ0TU4jjooyIFxNvgrY8Fj+WslyZTlyj8UDf\nFrYh5Un2u4YMdAe+cPf1XK+A/P9XX7OB4nf1OoGVB6wrC/jhLbvOH650ryUYopeY\nhlSXxGnD4vcvTvcqLLB+ue2/iySxQLpZR/6VsT3fFrEonzFTqnFCwCF28iPnJVBj\nX6T/HcTJ55IDkbtotarU6cwwNOHnHkzWrv7ityPkR4Ge11hmVG9QjROt56ehXfFs\nFo5MqSvqpYplXkI/zUNm8j/lqEdU0RXUr41L2hyKY/pVjsgmeTsN7/ZqACkHye9F\nbkV9V/VbTh7hWVLTqGSh7BY/D7gwOwfuKiq2TwIDAQABAoIBADjO3Qyn7JKHt44S\nCAI82thzkZo5M8uiJx652pMeom8k6h3SNe18XCPEuzBvbzeg20YTpHdA0vtZIeJA\ndSuwEs7pCj86SWZKvm9p3FQ+QHwpuYQwwP9Py/Svx4z6CIrEqPYaLJAvw2mCyCN+\nzk7A8vpqTa1i4H1ae4YTIuhCwWlxe1ttD6rVUYfC2rVaFJ+b8JlzFRq4bnAR8yme\nrE4iAlfgTOj9zL814qRlYQeeZhMvA8T0qWUohbr1imo5XzIJZayLocvqhZEbk0dj\nq9qKWdIpAATRjWvb+7PkjmlwNjLOhJ1phtCkc/S4j2cvo9gcS7WafxaqCl/ix4Yt\n5KvPJ8ECgYEA0Em4nMMEFXbuSM/l5UCzv3kT6H/TYO7FVh071G7QAFoloxJBZDFV\n7fHsc+uCimlG2Xt3CrGo9tsOnF/ZgDKNmtDvvjxmlPnAb5g4uhXgYNMsKQShpeRW\n/ay8CmWbsRqXZaLoI5br2kCTLwsVz2hpabAzBOr2YV3vMRB5i7COYSMCgYEAyFgL\n3DkKwsTTyVyplenoAZaS/o0mKxZnffRnHNP5QgRfT4pQkuogk+MYAeBuGsc4cTi7\nrTtytUMBABXEKGIJkAbNoASHQMUcO1vvcwhBW7Ay+oxuc0JSlnaXjowS0C0o/4qr\nQ/rpUneir+Vu/N8+6edETRkNj+5unmePEe9NBuUCgYEAgtUr31woHot8FcRxNdW0\nkpstRCe20PZqgjMOt9t7UB1P8uSuqo7K2RHTYuUWNHb4h/ejyNXbumPTA6q5Zmta\nw1pmnWo3TXCrze0iBNFlBazf2kwMdbW+Zs2vuCAm8dIwMylnA6PzNj7FtRETfBqr\nzDVfdsFYTcTBUGJ21qXqaV0CgYEAmuMPEEv9WMTo43VDGsaCeq/Zpvii+I7SphsM\nmMn8m6Bbu1e4oUxmsU7RoanMFeHNbiMpXW1namGJ5XHufDYHJJVN5Zd6pYV+JRoX\njjxkoyke0Hs/bNZqmS7ITwlWBiHT33Rqohzaw8oAObLMUq2ZqyYDtQNYa90vIkH3\n5yq1x00CgYEAs4ztQhGRbeUlqnW6Z6yfRJ6XXYqdMPhxuBxvNn/dxJ10T4W2DUuC\njSdpGXrY+ECYyXUwlXBqbaKx1K5AQD7nmu9J3l0oMkX6tSBj1OE5MabATrsW6wvT\nhkTPJZMyPUYhoBkivPUKyQXswrQV/nUQAsAcLeJShTW4gSs0M6weQAc=\n-----END RSA PRIVATE KEY-----\n" // EncryptedKeyBase64 SkPem in base64 format EncryptedKeyBase64 = "NW/6N5Ubo5T+oiT9My2wXFH5TWT7iQnN8YKUlcoFeg00OzL1S4yKrIPemdr7SM3EbPeHlBtOAM3z+06EmaNlwVdBiexSRJmgnknqwt/Ught4pKZK/WdJAEhMRwjZ3nx1Qi1TYcw7oZBaOdeTdm65QEAnsqOHk1htnUTXqsqYxVF750u8JWq3Mzr3oCN65ydSJRQoSa+lo3DikIDrXSYe1LRY5epMRrOq3cujuykuAVZQWp1vzv4w4V6mffmxaDbPpln/w28FKCxYkxG/WhwGuXR1GK6IWr3xpXPKcG+lzfvlmh4UiK1Lad/YD460oMXOKZT8apn4HL4tl9HOb6RyWQ==" diff --git a/utils/testutils.go b/utils/testutils.go new file mode 100644 index 0000000000..bfd9290b25 --- /dev/null +++ b/utils/testutils.go @@ -0,0 +1,55 @@ +package utils + +import ( + "sync" + "testing" + + "github.com/attestantio/go-eth2-client/spec/phase0" + "github.com/golang/mock/gomock" + + "github.com/bloxapp/ssv/networkconfig" + mocknetwork "github.com/bloxapp/ssv/protocol/v2/blockchain/beacon/mocks" +) + +type SlotValue struct { + mu sync.Mutex + slot phase0.Slot +} + +func (sv *SlotValue) SetSlot(s phase0.Slot) { + sv.mu.Lock() + defer sv.mu.Unlock() + sv.slot = s +} + +func (sv *SlotValue) GetSlot() phase0.Slot { + sv.mu.Lock() + defer sv.mu.Unlock() + return sv.slot +} + +func SetupMockBeaconNetwork(t *testing.T, currentSlot *SlotValue) *mocknetwork.MockBeaconNetwork { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + if currentSlot == nil { + currentSlot = &SlotValue{} + currentSlot.SetSlot(32) + } + + mockBeaconNetwork := mocknetwork.NewMockBeaconNetwork(ctrl) + mockBeaconNetwork.EXPECT().GetBeaconNetwork().Return(networkconfig.TestNetwork.Beacon.GetBeaconNetwork()).AnyTimes() + + mockBeaconNetwork.EXPECT().EstimatedCurrentSlot().DoAndReturn( + func() phase0.Slot { + return currentSlot.GetSlot() + }, + ).AnyTimes() + mockBeaconNetwork.EXPECT().EstimatedEpochAtSlot(gomock.Any()).DoAndReturn( + func(slot phase0.Slot) phase0.Epoch { + return phase0.Epoch(slot / 32) + }, + ).AnyTimes() + + return mockBeaconNetwork +}