diff --git a/Earthfile b/Earthfile index 95f6e0ee2c..306dcf3452 100644 --- a/Earthfile +++ b/Earthfile @@ -1,8 +1,8 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/mdlint:v3.2.03 AS mdlint-ci -IMPORT github.com/input-output-hk/catalyst-ci/earthly/cspell:v3.2.03 AS cspell-ci -IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.2.03 AS postgresql-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/mdlint:v3.2.07 AS mdlint-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/cspell:v3.2.07 AS cspell-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.2.07 AS postgresql-ci FROM debian:stable-slim diff --git a/catalyst-gateway/Earthfile b/catalyst-gateway/Earthfile index 0795a93f6f..8ec606226d 100644 --- a/catalyst-gateway/Earthfile +++ b/catalyst-gateway/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.2.03 AS rust-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/rust:v3.2.07 AS rust-ci #cspell: words rustfmt toolsets USERARCH stdcfgs @@ -62,7 +62,7 @@ all-hosts-build: package-cat-gateway: ARG tag="latest" - FROM alpine:3.19 + FROM alpine:3.20.3 WORKDIR /cat-gateway RUN apk add --no-cache gcc bash diff --git a/catalyst-gateway/Justfile b/catalyst-gateway/Justfile index a07d9b0c0e..724c79f5bc 100644 --- a/catalyst-gateway/Justfile +++ b/catalyst-gateway/Justfile @@ -44,12 +44,12 @@ build-cat-gateway: code-format code-lint # Run cat-gateway natively on preprod run-cat-gateway: build-cat-gateway CHAIN_FOLLOWER_SYNC_TASKS="16" \ - RUST_LOG="error,cat-gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ + RUST_LOG="error,cat_gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ CHAIN_NETWORK="Preprod" \ ./target/release/cat-gateway run --log-level debug # Run cat-gateway natively on mainnet run-cat-gateway-mainnet: build-cat-gateway CHAIN_FOLLOWER_SYNC_TASKS="1" \ - RUST_LOG="error,cat-gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ + RUST_LOG="error,cat_gateway=debug,cardano_chain_follower=debug,mithril-client=debug" \ ./target/release/cat-gateway run --log-level debug diff --git a/catalyst-gateway/bin/src/cardano/mod.rs b/catalyst-gateway/bin/src/cardano/mod.rs index 08e1c9015b..60ac5ae2ab 100644 --- a/catalyst-gateway/bin/src/cardano/mod.rs +++ b/catalyst-gateway/bin/src/cardano/mod.rs @@ -12,7 +12,7 @@ use tracing::{error, info, warn}; use crate::{ db::index::{block::index_block, session::CassandraSession}, - settings::Settings, + settings::{chain_follower, Settings}, }; // pub(crate) mod cip36_registration_obsolete; @@ -26,9 +26,13 @@ const MAX_BLOCKS_BATCH_LEN: usize = 1024; const INDEXING_DB_READY_WAIT_INTERVAL: Duration = Duration::from_secs(1); /// Start syncing a particular network -async fn start_sync_for(chain: Network) -> anyhow::Result<()> { - let cfg = ChainSyncConfig::default_for(chain); - info!(chain = %cfg.chain, "Starting Blockchain Sync"); +async fn start_sync_for(cfg: &chain_follower::EnvVars) -> anyhow::Result<()> { + let chain = cfg.chain; + let dl_config = cfg.dl_config.clone(); + + let mut cfg = ChainSyncConfig::default_for(chain); + cfg.mithril_cfg = cfg.mithril_cfg.with_dl_config(dl_config); + info!(chain = %chain, "Starting Blockchain Sync"); if let Err(error) = cfg.run().await { error!(chain=%chain, error=%error, "Failed to start chain sync task"); @@ -275,7 +279,7 @@ pub(crate) async fn start_followers() -> anyhow::Result<()> { cfg.log(); // Start Syncing the blockchain, so we can consume its data as required. - start_sync_for(cfg.chain).await?; + start_sync_for(&cfg).await?; info!(chain=%cfg.chain,"Chain Sync is started."); tokio::spawn(async move { diff --git a/catalyst-gateway/bin/src/db/index/block/certs.rs b/catalyst-gateway/bin/src/db/index/block/certs.rs index 86240df12b..1a8ecbf570 100644 --- a/catalyst-gateway/bin/src/db/index/block/certs.rs +++ b/catalyst-gateway/bin/src/db/index/block/certs.rs @@ -13,7 +13,7 @@ use crate::{ session::CassandraSession, }, service::utilities::convert::from_saturating, - settings::CassandraEnvVars, + settings::cassandra_db, }; /// Insert TXI Query and Parameters @@ -109,7 +109,7 @@ impl StakeRegistrationInsertQuery { /// Prepare Batch of Insert TXI Index Data Queries pub(crate) async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, + session: &Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result { let insert_queries = PreparedQueries::prepare_batch( session.clone(), @@ -145,7 +145,7 @@ impl CertInsertQuery { /// Prepare Batch of Insert TXI Index Data Queries pub(crate) async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, + session: &Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result { // Note: for now we have one query, but there are many certs, and later we may have more // to add here. diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs index 771cb9b5d2..d0a94fe7d5 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36.rs @@ -8,7 +8,7 @@ use tracing::error; use crate::{ db::index::queries::{PreparedQueries, SizedBatch}, - settings::CassandraEnvVars, + settings::cassandra_db, }; /// Index Registration by Stake Address @@ -82,7 +82,7 @@ impl Params { /// Prepare Batch of Insert CIP-36 Registration Index Data Queries pub(super) async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, + session: &Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result { let insert_queries = PreparedQueries::prepare_batch( session.clone(), diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs index b7f0d48d83..e990443e30 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_for_vote_key.rs @@ -8,7 +8,7 @@ use tracing::error; use crate::{ db::index::queries::{PreparedQueries, SizedBatch}, - settings::CassandraEnvVars, + settings::cassandra_db, }; /// Index Registration by Vote Key @@ -49,7 +49,7 @@ impl Params { /// Prepare Batch of Insert CIP-36 Registration Index Data Queries pub(super) async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, + session: &Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result { let insert_queries = PreparedQueries::prepare_batch( session.clone(), diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs index 0ab3fd8122..a5a82e1d26 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs +++ b/catalyst-gateway/bin/src/db/index/block/cip36/insert_cip36_invalid.rs @@ -8,7 +8,7 @@ use tracing::error; use crate::{ db::index::queries::{PreparedQueries, SizedBatch}, - settings::CassandraEnvVars, + settings::cassandra_db, }; /// Index Registration by Stake Address (Invalid Registrations) @@ -99,7 +99,7 @@ impl Params { /// Prepare Batch of Insert CIP-36 Registration Index Data Queries pub(super) async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, + session: &Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result { let insert_queries = PreparedQueries::prepare_batch( session.clone(), diff --git a/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs b/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs index 13d4c70b46..a03dca54e2 100644 --- a/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs +++ b/catalyst-gateway/bin/src/db/index/block/cip36/mod.rs @@ -14,7 +14,7 @@ use crate::{ queries::{FallibleQueryTasks, PreparedQuery, SizedBatch}, session::CassandraSession, }, - settings::CassandraEnvVars, + settings::cassandra_db, }; /// Insert CIP-36 Registration Queries @@ -39,7 +39,7 @@ impl Cip36InsertQuery { /// Prepare Batch of Insert Cip36 Registration Data Queries pub(crate) async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, + session: &Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result<(SizedBatch, SizedBatch, SizedBatch)> { let insert_cip36_batch = insert_cip36::Params::prepare_batch(session, cfg).await; let insert_cip36_invalid_batch = diff --git a/catalyst-gateway/bin/src/db/index/block/mod.rs b/catalyst-gateway/bin/src/db/index/block/mod.rs index 775b55d502..71969ef913 100644 --- a/catalyst-gateway/bin/src/db/index/block/mod.rs +++ b/catalyst-gateway/bin/src/db/index/block/mod.rs @@ -9,7 +9,7 @@ pub(crate) mod txo; use cardano_chain_follower::MultiEraBlock; use certs::CertInsertQuery; use cip36::Cip36InsertQuery; -use tracing::{debug, error}; +use tracing::error; use txi::TxiInsertQuery; use txo::TxoInsertQuery; @@ -75,7 +75,7 @@ pub(crate) async fn index_block(block: &MultiEraBlock) -> anyhow::Result<()> { match handle.await { Ok(join_res) => { match join_res { - Ok(res) => debug!(res=?res,"Query OK"), + Ok(_res) => {}, // debug!(res=?res,"Query OK") Err(error) => { // IF a query fails, assume everything else is broken. error!(error=%error,"Query Failed"); diff --git a/catalyst-gateway/bin/src/db/index/block/txi.rs b/catalyst-gateway/bin/src/db/index/block/txi.rs index 9dd4e0c8f9..f8ff02da8a 100644 --- a/catalyst-gateway/bin/src/db/index/block/txi.rs +++ b/catalyst-gateway/bin/src/db/index/block/txi.rs @@ -10,7 +10,7 @@ use crate::{ queries::{FallibleQueryTasks, PreparedQueries, PreparedQuery, SizedBatch}, session::CassandraSession, }, - settings::CassandraEnvVars, + settings::cassandra_db, }; /// Insert TXI Query and Parameters @@ -54,7 +54,7 @@ impl TxiInsertQuery { /// Prepare Batch of Insert TXI Index Data Queries pub(crate) async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, + session: &Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result { let txi_insert_queries = PreparedQueries::prepare_batch( session.clone(), diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs index 94837b5093..d6c8b7702c 100644 --- a/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo.rs @@ -9,7 +9,7 @@ use tracing::error; use crate::{ db::index::queries::{PreparedQueries, SizedBatch}, - settings::CassandraEnvVars, + settings::cassandra_db, }; /// TXO by Stake Address Indexing query @@ -54,7 +54,7 @@ impl Params { /// Prepare Batch of Staked Insert TXO Asset Index Data Queries pub(super) async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, + session: &Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result { let txo_insert_queries = PreparedQueries::prepare_batch( session.clone(), diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs index a42ea5b61e..ba7bbde7c4 100644 --- a/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_txo_asset.rs @@ -7,7 +7,7 @@ use tracing::error; use crate::{ db::index::queries::{PreparedQueries, SizedBatch}, - settings::CassandraEnvVars, + settings::cassandra_db, }; /// TXO Asset by Stake Address Indexing Query @@ -56,7 +56,7 @@ impl Params { /// Prepare Batch of Staked Insert TXO Asset Index Data Queries pub(super) async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, + session: &Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result { let txo_insert_queries = PreparedQueries::prepare_batch( session.clone(), diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs index 24957e92b3..7f68823af2 100644 --- a/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo.rs @@ -6,7 +6,7 @@ use tracing::error; use crate::{ db::index::queries::{PreparedQueries, SizedBatch}, - settings::CassandraEnvVars, + settings::cassandra_db, }; /// Unstaked TXO by Stake Address Indexing query @@ -47,7 +47,7 @@ impl Params { /// Prepare Batch of Staked Insert TXO Asset Index Data Queries pub(super) async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, + session: &Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result { let txo_insert_queries = PreparedQueries::prepare_batch( session.clone(), diff --git a/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs index 78605f92ae..250ca8ae1c 100644 --- a/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs +++ b/catalyst-gateway/bin/src/db/index/block/txo/insert_unstaked_txo_asset.rs @@ -7,7 +7,7 @@ use tracing::error; use crate::{ db::index::queries::{PreparedQueries, SizedBatch}, - settings::CassandraEnvVars, + settings::cassandra_db, }; /// Unstaked TXO Asset by Stake Address Indexing Query @@ -56,7 +56,7 @@ impl Params { /// Prepare Batch of Staked Insert TXO Asset Index Data Queries pub(super) async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, + session: &Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result { let txo_insert_queries = PreparedQueries::prepare_batch( session.clone(), diff --git a/catalyst-gateway/bin/src/db/index/block/txo/mod.rs b/catalyst-gateway/bin/src/db/index/block/txo/mod.rs index 9b4029fc3b..66bd950822 100644 --- a/catalyst-gateway/bin/src/db/index/block/txo/mod.rs +++ b/catalyst-gateway/bin/src/db/index/block/txo/mod.rs @@ -18,7 +18,7 @@ use crate::{ session::CassandraSession, }, service::utilities::convert::from_saturating, - settings::CassandraEnvVars, + settings::cassandra_db, }; /// This is used to indicate that there is no stake address. @@ -52,7 +52,7 @@ impl TxoInsertQuery { /// Prepare Batch of Insert TXI Index Data Queries pub(crate) async fn prepare_batch( - session: &Arc, cfg: &CassandraEnvVars, + session: &Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result<(SizedBatch, SizedBatch, SizedBatch, SizedBatch)> { let txo_staked_insert_batch = insert_txo::Params::prepare_batch(session, cfg).await; let txo_unstaked_insert_batch = diff --git a/catalyst-gateway/bin/src/db/index/queries/mod.rs b/catalyst-gateway/bin/src/db/index/queries/mod.rs index 505918b58f..e34db0647e 100644 --- a/catalyst-gateway/bin/src/db/index/queries/mod.rs +++ b/catalyst-gateway/bin/src/db/index/queries/mod.rs @@ -20,7 +20,7 @@ use staked_ada::{ use super::block::{ certs::CertInsertQuery, cip36::Cip36InsertQuery, txi::TxiInsertQuery, txo::TxoInsertQuery, }; -use crate::settings::{CassandraEnvVars, CASSANDRA_MIN_BATCH_SIZE}; +use crate::settings::cassandra_db; /// Batches of different sizes, prepared and ready for use. pub(crate) type SizedBatch = SkipMap>; @@ -98,7 +98,9 @@ pub(crate) type FallibleQueryTasks = Vec, cfg: &CassandraEnvVars) -> anyhow::Result { + pub(crate) async fn new( + session: Arc, cfg: &cassandra_db::EnvVars, + ) -> anyhow::Result { // We initialize like this, so that all errors preparing querys get shown before aborting. let txi_insert_queries = TxiInsertQuery::prepare_batch(&session, cfg).await; let all_txo_queries = TxoInsertQuery::prepare_batch(&session, cfg).await; @@ -154,7 +156,7 @@ impl PreparedQueries { /// It is necessary to do this because batches are pre-sized, they can not be dynamic. /// Preparing the batches in advance is a very larger performance increase. pub(crate) async fn prepare_batch( - session: Arc, query: &str, cfg: &CassandraEnvVars, + session: Arc, query: &str, cfg: &cassandra_db::EnvVars, consistency: scylla::statement::Consistency, idempotent: bool, logged: bool, ) -> anyhow::Result { let sized_batches: SizedBatch = SkipMap::new(); @@ -163,7 +165,7 @@ impl PreparedQueries { // same. let prepared = Self::prepare(session, query, consistency, idempotent).await?; - for batch_size in CASSANDRA_MIN_BATCH_SIZE..=cfg.max_batch_size { + for batch_size in cassandra_db::MIN_BATCH_SIZE..=cfg.max_batch_size { let mut batch: Batch = Batch::new(if logged { scylla::batch::BatchType::Logged } else { @@ -171,7 +173,7 @@ impl PreparedQueries { }); batch.set_consistency(consistency); batch.set_is_idempotent(idempotent); - for _ in CASSANDRA_MIN_BATCH_SIZE..=batch_size { + for _ in cassandra_db::MIN_BATCH_SIZE..=batch_size { batch.append_statement(prepared.clone()); } @@ -208,7 +210,7 @@ impl PreparedQueries { /// This will divide the batch into optimal sized chunks and execute them until all /// values have been executed or the first error is encountered. pub(crate) async fn execute_batch( - &self, session: Arc, cfg: Arc, query: PreparedQuery, + &self, session: Arc, cfg: Arc, query: PreparedQuery, values: Vec, ) -> FallibleQueryResults { let query_map = match query { diff --git a/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs b/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs index 0fe0a60bcf..3c99869030 100644 --- a/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs +++ b/catalyst-gateway/bin/src/db/index/queries/staked_ada/update_txo_spent.rs @@ -10,7 +10,7 @@ use crate::{ queries::{FallibleQueryResults, PreparedQueries, PreparedQuery, SizedBatch}, session::CassandraSession, }, - settings::CassandraEnvVars, + settings::cassandra_db, }; /// Update TXO spent query string. @@ -37,7 +37,7 @@ pub(crate) struct UpdateTxoSpentQuery; impl UpdateTxoSpentQuery { /// Prepare a batch of update TXO spent queries. pub(crate) async fn prepare_batch( - session: Arc, cfg: &CassandraEnvVars, + session: Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result { let update_txo_spent_queries = PreparedQueries::prepare_batch( session.clone(), diff --git a/catalyst-gateway/bin/src/db/index/schema/mod.rs b/catalyst-gateway/bin/src/db/index/schema/mod.rs index 4bfd4725db..89cba358da 100644 --- a/catalyst-gateway/bin/src/db/index/schema/mod.rs +++ b/catalyst-gateway/bin/src/db/index/schema/mod.rs @@ -8,7 +8,7 @@ use scylla::Session; use serde_json::json; use tracing::error; -use crate::settings::CassandraEnvVars; +use crate::settings::cassandra_db; /// Keyspace Create (Templated) const CREATE_NAMESPACE_CQL: &str = include_str!("./cql/namespace.cql"); @@ -67,7 +67,7 @@ const SCHEMAS: &[(&str, &str)] = &[ ]; /// Get the namespace for a particular db configuration -pub(crate) fn namespace(cfg: &CassandraEnvVars) -> String { +pub(crate) fn namespace(cfg: &cassandra_db::EnvVars) -> String { // Build and set the Keyspace to use. format!("{}_V{}", cfg.namespace.as_str(), SCHEMA_VERSION) } @@ -75,7 +75,7 @@ pub(crate) fn namespace(cfg: &CassandraEnvVars) -> String { /// Create the namespace we will use for this session /// Ok to run this if the namespace already exists. async fn create_namespace( - session: &mut Arc, cfg: &CassandraEnvVars, + session: &mut Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result<()> { let keyspace = namespace(cfg); @@ -102,7 +102,7 @@ async fn create_namespace( /// Create the Schema on the connected Cassandra DB pub(crate) async fn create_schema( - session: &mut Arc, cfg: &CassandraEnvVars, + session: &mut Arc, cfg: &cassandra_db::EnvVars, ) -> anyhow::Result<()> { create_namespace(session, cfg).await?; diff --git a/catalyst-gateway/bin/src/db/index/session.rs b/catalyst-gateway/bin/src/db/index/session.rs index 300bc9d92a..41c447634c 100644 --- a/catalyst-gateway/bin/src/db/index/session.rs +++ b/catalyst-gateway/bin/src/db/index/session.rs @@ -21,7 +21,7 @@ use super::{ }; use crate::{ db::index::queries, - settings::{CassandraEnvVars, Settings}, + settings::{cassandra_db, Settings}, }; /// Configuration Choices for compression @@ -55,7 +55,7 @@ pub(crate) struct CassandraSession { #[allow(dead_code)] persistent: bool, /// Configuration for this session. - cfg: Arc, + cfg: Arc, /// The actual session. session: Arc, /// All prepared queries we can use on this session. @@ -138,7 +138,7 @@ impl CassandraSession { /// /// The intention here is that we should be able to tune this based on configuration, /// but for now we don't so the `cfg` is not used yet. -fn make_execution_profile(_cfg: &CassandraEnvVars) -> ExecutionProfile { +fn make_execution_profile(_cfg: &cassandra_db::EnvVars) -> ExecutionProfile { ExecutionProfile::builder() .consistency(scylla::statement::Consistency::LocalQuorum) .serial_consistency(Some(scylla::statement::SerialConsistency::LocalSerial)) @@ -158,7 +158,7 @@ fn make_execution_profile(_cfg: &CassandraEnvVars) -> ExecutionProfile { } /// Construct a session based on the given configuration. -async fn make_session(cfg: &CassandraEnvVars) -> anyhow::Result> { +async fn make_session(cfg: &cassandra_db::EnvVars) -> anyhow::Result> { let cluster_urls: Vec<&str> = cfg.url.as_str().split(',').collect(); let mut sb = SessionBuilder::new() @@ -208,7 +208,7 @@ async fn make_session(cfg: &CassandraEnvVars) -> anyhow::Result> { /// Continuously try and init the DB, if it fails, backoff. /// /// Display reasonable logs to help diagnose DB connection issues. -async fn retry_init(cfg: CassandraEnvVars, persistent: bool) { +async fn retry_init(cfg: cassandra_db::EnvVars, persistent: bool) { let mut retry_delay = Duration::from_secs(0); let db_type = if persistent { "Persistent" } else { "Volatile" }; diff --git a/catalyst-gateway/bin/src/settings/cassandra_db.rs b/catalyst-gateway/bin/src/settings/cassandra_db.rs new file mode 100644 index 0000000000..6ce6e220a2 --- /dev/null +++ b/catalyst-gateway/bin/src/settings/cassandra_db.rs @@ -0,0 +1,121 @@ +//! Command line and environment variable settings for the service + +use tracing::info; + +use super::str_env_var::StringEnvVar; +use crate::db::{ + self, + index::session::{CompressionChoice, TlsChoice}, +}; + +/// Default Cassandra DB URL for the Persistent DB. +pub(super) const PERSISTENT_URL_DEFAULT: &str = "127.0.0.1:9042"; + +/// Default Cassandra DB URL for the Persistent DB. +pub(super) const PERSISTENT_NAMESPACE_DEFAULT: &str = "persistent"; + +/// Default Cassandra DB URL for the Persistent DB. +pub(super) const VOLATILE_URL_DEFAULT: &str = "127.0.0.1:9042"; + +/// Default Cassandra DB URL for the Persistent DB. +pub(super) const VOLATILE_NAMESPACE_DEFAULT: &str = "volatile"; + +/// Default maximum batch size. +/// This comes from: +/// +/// Scylla may support larger batches for better performance. +/// Larger batches will incur more memory overhead to store the prepared batches. +const MAX_BATCH_SIZE_DEFAULT: i64 = 30; + +/// Minimum possible batch size. +pub(crate) const MIN_BATCH_SIZE: i64 = 1; + +/// Maximum possible batch size. +const MAX_BATCH_SIZE: i64 = 256; + +/// Configuration for an individual cassandra cluster. +#[derive(Clone)] +pub(crate) struct EnvVars { + /// The Address/s of the DB. + pub(crate) url: StringEnvVar, + + /// The Namespace of Cassandra DB. + pub(crate) namespace: StringEnvVar, + + /// The `UserName` to use for the Cassandra DB. + pub(crate) username: Option, + + /// The Password to use for the Cassandra DB.. + pub(crate) password: Option, + + /// Use TLS for the connection? + pub(crate) tls: TlsChoice, + + /// Use TLS for the connection? + pub(crate) tls_cert: Option, + + /// Compression to use. + pub(crate) compression: CompressionChoice, + + /// Maximum Configured Batch size. + pub(crate) max_batch_size: i64, +} + +impl EnvVars { + /// Create a config for a cassandra cluster, identified by a default namespace. + pub(super) fn new(url: &str, namespace: &str) -> Self { + let name = namespace.to_uppercase(); + + // We can actually change the namespace, but can't change the name used for env vars. + let namespace = StringEnvVar::new(&format!("CASSANDRA_{name}_NAMESPACE"), namespace.into()); + + let tls = + StringEnvVar::new_as_enum(&format!("CASSANDRA_{name}_TLS"), TlsChoice::Disabled, false); + let compression = StringEnvVar::new_as_enum( + &format!("CASSANDRA_{name}_COMPRESSION"), + CompressionChoice::Lz4, + false, + ); + + Self { + url: StringEnvVar::new(&format!("CASSANDRA_{name}_URL"), url.into()), + namespace, + username: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_USERNAME"), false), + password: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_PASSWORD"), true), + tls, + tls_cert: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_TLS_CERT"), false), + compression, + max_batch_size: StringEnvVar::new_as( + &format!("CASSANDRA_{name}_BATCH_SIZE"), + MAX_BATCH_SIZE_DEFAULT, + MIN_BATCH_SIZE, + MAX_BATCH_SIZE, + ), + } + } + + /// Log the configuration of this Cassandra DB + pub(crate) fn log(&self, persistent: bool) { + let db_type = if persistent { "Persistent" } else { "Volatile" }; + + let auth = match (&self.username, &self.password) { + (Some(u), Some(_)) => format!("Username: {} Password: REDACTED", u.as_str()), + _ => "No Authentication".to_string(), + }; + + let tls_cert = match &self.tls_cert { + None => "No TLS Certificate Defined".to_string(), + Some(cert) => cert.as_string(), + }; + + info!( + url = self.url.as_str(), + namespace = db::index::schema::namespace(self), + auth = auth, + tls = self.tls.to_string(), + cert = tls_cert, + compression = self.compression.to_string(), + "Cassandra {db_type} DB Configuration" + ); + } +} diff --git a/catalyst-gateway/bin/src/settings/chain_follower.rs b/catalyst-gateway/bin/src/settings/chain_follower.rs new file mode 100644 index 0000000000..9311f71cbc --- /dev/null +++ b/catalyst-gateway/bin/src/settings/chain_follower.rs @@ -0,0 +1,135 @@ +//! Command line and environment variable settings for the service + +use std::{cmp::min, time::Duration}; + +use cardano_chain_follower::{turbo_downloader::DlConfig, ChainSyncConfig, Network}; +use tracing::info; + +use super::str_env_var::StringEnvVar; + +/// Default chain to follow. +const DEFAULT_NETWORK: Network = Network::Mainnet; + +/// Default number of sync tasks (must be in the range 1 to 256 inclusive.) +const DEFAULT_SYNC_TASKS: u16 = 16; + +/// Maximum number of sync tasks (must be in the range 1 to 256 inclusive.) +const MAX_SYNC_TASKS: u16 = 256; + +/// Maximum number of DL Connections (must be in the range 1 to 256 inclusive.) +const MAX_DL_CONNECTIONS: usize = 256; + +/// Maximum DL Chunk Size in MB (must be in the range 1 to 256 inclusive.) +const MAX_DL_CHUNK_SIZE: usize = 256; + +/// Maximum DL Chunk Queue Ahead (must be in the range 1 to 256 inclusive.) +const MAX_DL_CHUNK_QUEUE_AHEAD: usize = 256; + +/// Maximum DL Chunk Connect/Data Timeout in seconds (0 = Disabled). +const MAX_DL_TIMEOUT: u64 = 300; + +/// Number of bytes in a Megabyte +const ONE_MEGABYTE: usize = 1_048_576; + +/// Configuration for the chain follower. +#[derive(Clone)] +pub(crate) struct EnvVars { + /// The Blockchain we sync from. + pub(crate) chain: Network, + + /// The maximum number of sync tasks. + pub(crate) sync_tasks: u16, + + /// The Mithril Downloader Configuration. + pub(crate) dl_config: DlConfig, +} + +impl EnvVars { + /// Create a config for a cassandra cluster, identified by a default namespace. + pub(super) fn new() -> Self { + let chain = StringEnvVar::new_as_enum("CHAIN_NETWORK", DEFAULT_NETWORK, false); + let sync_tasks: u16 = StringEnvVar::new_as( + "CHAIN_FOLLOWER_SYNC_TASKS", + DEFAULT_SYNC_TASKS, + 1, + MAX_SYNC_TASKS, + ); + + let cfg = ChainSyncConfig::default_for(chain); + let mut dl_config = cfg.mithril_cfg.dl_config.clone().unwrap_or_default(); + + let workers = StringEnvVar::new_as( + "CHAIN_FOLLOWER_DL_CONNECTIONS", + dl_config.workers, + 1, + MAX_DL_CONNECTIONS, + ); + dl_config = dl_config.with_workers(workers); + + let default_dl_chunk_size = min(1, dl_config.chunk_size / ONE_MEGABYTE); + + let chunk_size = StringEnvVar::new_as( + "CHAIN_FOLLOWER_DL_CHUNK_SIZE", + default_dl_chunk_size, + 1, + MAX_DL_CHUNK_SIZE, + ) * ONE_MEGABYTE; + dl_config = dl_config.with_chunk_size(chunk_size); + + let queue_ahead = StringEnvVar::new_as( + "CHAIN_FOLLOWER_DL_QUEUE_AHEAD", + dl_config.queue_ahead, + 1, + MAX_DL_CHUNK_QUEUE_AHEAD, + ); + dl_config = dl_config.with_queue_ahead(queue_ahead); + + let default_dl_connect_timeout = match dl_config.connection_timeout { + Some(timeout) => timeout.as_secs(), + None => 0, + }; + let dl_connect_timeout = StringEnvVar::new_as( + "CHAIN_FOLLOWER_DL_CONNECT_TIMEOUT", + default_dl_connect_timeout, + 0, + MAX_DL_TIMEOUT, + ); + if dl_connect_timeout == 0 { + dl_config.connection_timeout = None; + } else { + dl_config = dl_config.with_connection_timeout(Duration::from_secs(dl_connect_timeout)); + } + + let default_dl_data_timeout = match dl_config.data_read_timeout { + Some(timeout) => timeout.as_secs(), + None => 0, + }; + let dl_data_timeout = StringEnvVar::new_as( + "CHAIN_FOLLOWER_DL_DATA_TIMEOUT", + default_dl_data_timeout, + 0, + MAX_DL_TIMEOUT, + ); + if dl_connect_timeout == 0 { + dl_config.data_read_timeout = None; + } else { + dl_config = dl_config.with_data_read_timeout(Duration::from_secs(dl_data_timeout)); + } + + Self { + chain, + sync_tasks, + dl_config, + } + } + + /// Log the configuration of this Chain Follower + pub(crate) fn log(&self) { + info!( + chain = self.chain.to_string(), + sync_tasks = self.sync_tasks, + dl_config = ?self.dl_config, + "Chain Follower Configuration" + ); + } +} diff --git a/catalyst-gateway/bin/src/settings.rs b/catalyst-gateway/bin/src/settings/mod.rs similarity index 52% rename from catalyst-gateway/bin/src/settings.rs rename to catalyst-gateway/bin/src/settings/mod.rs index b6d2d99b8f..fa96587296 100644 --- a/catalyst-gateway/bin/src/settings.rs +++ b/catalyst-gateway/bin/src/settings/mod.rs @@ -1,7 +1,5 @@ //! Command line and environment variable settings for the service use std::{ - env::{self, VarError}, - fmt::{self, Display}, net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf, str::FromStr, @@ -10,25 +8,24 @@ use std::{ }; use anyhow::anyhow; -use cardano_chain_follower::Network; use clap::Args; use cryptoxide::{blake2b::Blake2b, mac::Mac}; use dotenvy::dotenv; use duration_string::DurationString; -use strum::VariantNames; -use tracing::{error, info}; +use str_env_var::StringEnvVar; +use tracing::error; use url::Url; use crate::{ build_info::{log_build_info, BUILD_INFO}, - db::{ - self, - index::session::{CompressionChoice, TlsChoice}, - }, logger::{self, LogLevel, LOG_LEVEL_DEFAULT}, service::utilities::net::{get_public_ipv4, get_public_ipv6}, }; +pub(crate) mod cassandra_db; +pub(crate) mod chain_follower; +mod str_env_var; + /// Default address to start service on. const ADDRESS_DEFAULT: &str = "0.0.0.0:3030"; @@ -58,37 +55,6 @@ const CHECK_CONFIG_TICK_DEFAULT: &str = "5s"; const EVENT_DB_URL_DEFAULT: &str = "postgresql://postgres:postgres@localhost/catalyst_events?sslmode=disable"; -/// Default Cassandra DB URL for the Persistent DB. -const CASSANDRA_PERSISTENT_DB_URL_DEFAULT: &str = "127.0.0.1:9042"; - -/// Default Cassandra DB URL for the Persistent DB. -const CASSANDRA_PERSISTENT_DB_NAMESPACE_DEFAULT: &str = "persistent"; - -/// Default Cassandra DB URL for the Persistent DB. -const CASSANDRA_VOLATILE_DB_URL_DEFAULT: &str = "127.0.0.1:9042"; - -/// Default Cassandra DB URL for the Persistent DB. -const CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT: &str = "volatile"; - -/// Default maximum batch size. -/// This comes from: -/// -/// Scylla may support larger batches for better performance. -/// Larger batches will incur more memory overhead to store the prepared batches. -const CASSANDRA_MAX_BATCH_SIZE_DEFAULT: i64 = 30; - -/// Minimum possible batch size. -pub(crate) const CASSANDRA_MIN_BATCH_SIZE: i64 = 1; - -/// Maximum possible batch size. -const CASSANDRA_MAX_BATCH_SIZE: i64 = 256; - -/// Default chain to follow. -const CHAIN_FOLLOWER_DEFAULT: Network = Network::Mainnet; - -/// Default number of sync tasks (must be in the range 1 to 255 inclusive.) -const CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT: u16 = 16; - /// Hash the Public IPv4 and IPv6 address of the machine, and convert to a 128 bit V4 /// UUID. fn calculate_service_uuid() -> String { @@ -146,389 +112,6 @@ pub(crate) struct DocsSettings { pub(crate) server_name: Option, } -/// An environment variable read as a string. -#[derive(Clone)] -pub(crate) struct StringEnvVar { - /// Value of the env var. - value: String, - /// Whether the env var is displayed redacted or not. - redacted: bool, -} - -/// Ergonomic way of specifying if a env var needs to be redacted or not. -enum StringEnvVarParams { - /// The env var is plain and should not be redacted. - Plain(String, Option), - /// The env var is redacted and should be redacted. - Redacted(String, Option), -} - -impl From<&str> for StringEnvVarParams { - fn from(s: &str) -> Self { - StringEnvVarParams::Plain(String::from(s), None) - } -} - -impl From for StringEnvVarParams { - fn from(s: String) -> Self { - StringEnvVarParams::Plain(s, None) - } -} - -impl From<(&str, bool)> for StringEnvVarParams { - fn from((s, r): (&str, bool)) -> Self { - if r { - StringEnvVarParams::Redacted(String::from(s), None) - } else { - StringEnvVarParams::Plain(String::from(s), None) - } - } -} - -impl From<(&str, bool, &str)> for StringEnvVarParams { - fn from((s, r, c): (&str, bool, &str)) -> Self { - if r { - StringEnvVarParams::Redacted(String::from(s), Some(String::from(c))) - } else { - StringEnvVarParams::Plain(String::from(s), Some(String::from(c))) - } - } -} - -/// An environment variable read as a string. -impl StringEnvVar { - /// Read the env var from the environment. - /// - /// If not defined, read from a .env file. - /// If still not defined, use the default. - /// - /// # Arguments - /// - /// * `var_name`: &str - the name of the env var - /// * `default_value`: &str - the default value - /// - /// # Returns - /// - /// * Self - the value - /// - /// # Example - /// - /// ```rust,no_run - /// #use cat_data_service::settings::StringEnvVar; - /// - /// let var = StringEnvVar::new("MY_VAR", "default"); - /// assert_eq!(var.as_str(), "default"); - /// ``` - fn new(var_name: &str, param: StringEnvVarParams) -> Self { - let (default_value, redacted, choices) = match param { - StringEnvVarParams::Plain(s, c) => (s, false, c), - StringEnvVarParams::Redacted(s, c) => (s, true, c), - }; - - match env::var(var_name) { - Ok(value) => { - if redacted { - info!(env = var_name, value = "Redacted", "Env Var Defined"); - } else { - info!(env = var_name, value = value, "Env Var Defined"); - } - Self { value, redacted } - }, - Err(VarError::NotPresent) => { - if let Some(choices) = choices { - if redacted { - info!( - env = var_name, - default = "Default Redacted", - choices = choices, - "Env Var Defaulted" - ); - } else { - info!( - env = var_name, - default = default_value, - choices = choices, - "Env Var Defaulted" - ); - }; - } else if redacted { - info!( - env = var_name, - default = "Default Redacted", - "Env Var Defined" - ); - } else { - info!(env = var_name, default = default_value, "Env Var Defaulted"); - } - - Self { - value: default_value, - redacted, - } - }, - Err(error) => { - error!( - env = var_name, - default = default_value, - error = ?error, - "Env Var Error" - ); - Self { - value: default_value, - redacted, - } - }, - } - } - - /// New Env Var that is optional. - fn new_optional(var_name: &str, redacted: bool) -> Option { - match env::var(var_name) { - Ok(value) => { - if redacted { - info!(env = var_name, value = "Redacted", "Env Var Defined"); - } else { - info!(env = var_name, value = value, "Env Var Defined"); - } - Some(Self { value, redacted }) - }, - Err(VarError::NotPresent) => { - info!(env = var_name, "Env Var Not Set"); - None - }, - Err(error) => { - error!( - env = var_name, - error = ?error, - "Env Var Error" - ); - None - }, - } - } - - /// Convert an Envvar into the required Enum Type. - fn new_as_enum( - var_name: &str, default: T, redacted: bool, - ) -> T - where ::Err: std::fmt::Display { - let mut choices = String::new(); - for name in T::VARIANTS { - if choices.is_empty() { - choices.push('['); - } else { - choices.push(','); - } - choices.push_str(name); - } - choices.push(']'); - - let choice = StringEnvVar::new( - var_name, - (default.to_string().as_str(), redacted, choices.as_str()).into(), - ); - - let value = match T::from_str(choice.as_str()) { - Ok(var) => var, - Err(error) => { - error!(error=%error, default=%default, choices=choices, choice=%choice, "Invalid choice. Using Default."); - default - }, - }; - - value - } - - /// Convert an Envvar into an integer in the bounded range. - fn new_as_i64(var_name: &str, default: i64, min: i64, max: i64) -> i64 -where { - let choices = format!("A value in the range {min} to {max} inclusive"); - - let raw_value = StringEnvVar::new( - var_name, - (default.to_string().as_str(), false, choices.as_str()).into(), - ) - .as_string(); - - match raw_value.parse::() { - Ok(value) => { - if value < min { - error!("{var_name} out of range. Range = {min} to {max} inclusive. Clamped to {min}"); - min - } else if value > max { - error!("{var_name} out of range. Range = {min} to {max} inclusive. Clamped to {max}"); - max - } else { - value - } - }, - Err(error) => { - error!(error=%error, default=default, "{var_name} not an integer. Range = {min} to {max} inclusive. Defaulted"); - default - }, - } - } - - /// Get the read env var as a str. - /// - /// # Returns - /// - /// * &str - the value - pub(crate) fn as_str(&self) -> &str { - &self.value - } - - /// Get the read env var as a str. - /// - /// # Returns - /// - /// * &str - the value - pub(crate) fn as_string(&self) -> String { - self.value.clone() - } -} - -impl fmt::Display for StringEnvVar { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.redacted { - return write!(f, "REDACTED"); - } - write!(f, "{}", self.value) - } -} - -impl fmt::Debug for StringEnvVar { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.redacted { - return write!(f, "REDACTED"); - } - write!(f, "env: {}", self.value) - } -} - -/// Configuration for an individual cassandra cluster. -#[derive(Clone)] -pub(crate) struct CassandraEnvVars { - /// The Address/s of the DB. - pub(crate) url: StringEnvVar, - - /// The Namespace of Cassandra DB. - pub(crate) namespace: StringEnvVar, - - /// The `UserName` to use for the Cassandra DB. - pub(crate) username: Option, - - /// The Password to use for the Cassandra DB.. - pub(crate) password: Option, - - /// Use TLS for the connection? - pub(crate) tls: TlsChoice, - - /// Use TLS for the connection? - pub(crate) tls_cert: Option, - - /// Compression to use. - pub(crate) compression: CompressionChoice, - - /// Maximum Configured Batch size. - pub(crate) max_batch_size: i64, -} - -impl CassandraEnvVars { - /// Create a config for a cassandra cluster, identified by a default namespace. - fn new(url: &str, namespace: &str) -> Self { - let name = namespace.to_uppercase(); - - // We can actually change the namespace, but can't change the name used for env vars. - let namespace = StringEnvVar::new(&format!("CASSANDRA_{name}_NAMESPACE"), namespace.into()); - - let tls = - StringEnvVar::new_as_enum(&format!("CASSANDRA_{name}_TLS"), TlsChoice::Disabled, false); - let compression = StringEnvVar::new_as_enum( - &format!("CASSANDRA_{name}_COMPRESSION"), - CompressionChoice::Lz4, - false, - ); - - Self { - url: StringEnvVar::new(&format!("CASSANDRA_{name}_URL"), url.into()), - namespace, - username: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_USERNAME"), false), - password: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_PASSWORD"), true), - tls, - tls_cert: StringEnvVar::new_optional(&format!("CASSANDRA_{name}_TLS_CERT"), false), - compression, - max_batch_size: StringEnvVar::new_as_i64( - &format!("CASSANDRA_{name}_BATCH_SIZE"), - CASSANDRA_MAX_BATCH_SIZE_DEFAULT, - CASSANDRA_MIN_BATCH_SIZE, - CASSANDRA_MAX_BATCH_SIZE, - ), - } - } - - /// Log the configuration of this Cassandra DB - pub(crate) fn log(&self, persistent: bool) { - let db_type = if persistent { "Persistent" } else { "Volatile" }; - - let auth = match (&self.username, &self.password) { - (Some(u), Some(_)) => format!("Username: {} Password: REDACTED", u.as_str()), - _ => "No Authentication".to_string(), - }; - - let tls_cert = match &self.tls_cert { - None => "No TLS Certificate Defined".to_string(), - Some(cert) => cert.as_string(), - }; - - info!( - url = self.url.as_str(), - namespace = db::index::schema::namespace(self), - auth = auth, - tls = self.tls.to_string(), - cert = tls_cert, - compression = self.compression.to_string(), - "Cassandra {db_type} DB Configuration" - ); - } -} - -/// Configuration for the chain follower. -#[derive(Clone)] -pub(crate) struct ChainFollowerEnvVars { - /// The Blockchain we sync from. - pub(crate) chain: Network, - - /// The maximum number of sync tasks. - pub(crate) sync_tasks: u16, -} - -impl ChainFollowerEnvVars { - /// Create a config for a cassandra cluster, identified by a default namespace. - fn new() -> Self { - let chain = StringEnvVar::new_as_enum("CHAIN_NETWORK", CHAIN_FOLLOWER_DEFAULT, false); - let sync_tasks: u16 = StringEnvVar::new_as_i64( - "CHAIN_FOLLOWER_SYNC_TASKS", - CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT.into(), - 1, - u16::MAX.into(), - ) - .try_into() - .unwrap_or(CHAIN_FOLLOWER_SYNC_TASKS_DEFAULT); - - Self { chain, sync_tasks } - } - - /// Log the configuration of this Chain Follower - pub(crate) fn log(&self) { - info!( - chain = self.chain.to_string(), - sync_tasks = self.sync_tasks, - "Chain Follower Configuration" - ); - } -} - /// All the `EnvVars` used by the service. struct EnvVars { /// The github repo owner @@ -562,13 +145,13 @@ struct EnvVars { event_db_password: Option, /// The Config of the Persistent Cassandra DB. - cassandra_persistent_db: CassandraEnvVars, + cassandra_persistent_db: cassandra_db::EnvVars, /// The Config of the Volatile Cassandra DB. - cassandra_volatile_db: CassandraEnvVars, + cassandra_volatile_db: cassandra_db::EnvVars, /// The Chain Follower configuration - chain_follower: ChainFollowerEnvVars, + chain_follower: chain_follower::EnvVars, /// Tick every N seconds until config exists in db #[allow(unused)] @@ -613,15 +196,15 @@ static ENV_VARS: LazyLock = LazyLock::new(|| { event_db_url: StringEnvVar::new("EVENT_DB_URL", EVENT_DB_URL_DEFAULT.into()), event_db_username: StringEnvVar::new_optional("EVENT_DB_USERNAME", false), event_db_password: StringEnvVar::new_optional("EVENT_DB_PASSWORD", true), - cassandra_persistent_db: CassandraEnvVars::new( - CASSANDRA_PERSISTENT_DB_URL_DEFAULT, - CASSANDRA_PERSISTENT_DB_NAMESPACE_DEFAULT, + cassandra_persistent_db: cassandra_db::EnvVars::new( + cassandra_db::PERSISTENT_URL_DEFAULT, + cassandra_db::PERSISTENT_NAMESPACE_DEFAULT, ), - cassandra_volatile_db: CassandraEnvVars::new( - CASSANDRA_VOLATILE_DB_URL_DEFAULT, - CASSANDRA_VOLATILE_DB_NAMESPACE_DEFAULT, + cassandra_volatile_db: cassandra_db::EnvVars::new( + cassandra_db::VOLATILE_URL_DEFAULT, + cassandra_db::VOLATILE_NAMESPACE_DEFAULT, ), - chain_follower: ChainFollowerEnvVars::new(), + chain_follower: chain_follower::EnvVars::new(), check_config_tick, } }); @@ -683,7 +266,7 @@ impl Settings { } /// Get the Persistent & Volatile Cassandra DB config for this service. - pub(crate) fn cassandra_db_cfg() -> (CassandraEnvVars, CassandraEnvVars) { + pub(crate) fn cassandra_db_cfg() -> (cassandra_db::EnvVars, cassandra_db::EnvVars) { ( ENV_VARS.cassandra_persistent_db.clone(), ENV_VARS.cassandra_volatile_db.clone(), @@ -691,7 +274,7 @@ impl Settings { } /// Get the configuration of the chain follower. - pub(crate) fn follower_cfg() -> ChainFollowerEnvVars { + pub(crate) fn follower_cfg() -> chain_follower::EnvVars { ENV_VARS.chain_follower.clone() } diff --git a/catalyst-gateway/bin/src/settings/str_env_var.rs b/catalyst-gateway/bin/src/settings/str_env_var.rs new file mode 100644 index 0000000000..18e8a6414b --- /dev/null +++ b/catalyst-gateway/bin/src/settings/str_env_var.rs @@ -0,0 +1,239 @@ +//! Processing for String Environment Variables +use std::{ + env::{self, VarError}, + fmt::{self, Display}, + str::FromStr, +}; + +use strum::VariantNames; +use tracing::{error, info}; + +/// An environment variable read as a string. +#[derive(Clone)] +pub(crate) struct StringEnvVar { + /// Value of the env var. + value: String, + /// Whether the env var is displayed redacted or not. + redacted: bool, +} + +/// Ergonomic way of specifying if a env var needs to be redacted or not. +pub(super) enum StringEnvVarParams { + /// The env var is plain and should not be redacted. + Plain(String, Option), + /// The env var is redacted and should be redacted. + Redacted(String, Option), +} + +impl From<&str> for StringEnvVarParams { + fn from(s: &str) -> Self { + StringEnvVarParams::Plain(String::from(s), None) + } +} + +impl From for StringEnvVarParams { + fn from(s: String) -> Self { + StringEnvVarParams::Plain(s, None) + } +} + +impl From<(&str, bool)> for StringEnvVarParams { + fn from((s, r): (&str, bool)) -> Self { + if r { + StringEnvVarParams::Redacted(String::from(s), None) + } else { + StringEnvVarParams::Plain(String::from(s), None) + } + } +} + +impl From<(&str, bool, &str)> for StringEnvVarParams { + fn from((s, r, c): (&str, bool, &str)) -> Self { + if r { + StringEnvVarParams::Redacted(String::from(s), Some(String::from(c))) + } else { + StringEnvVarParams::Plain(String::from(s), Some(String::from(c))) + } + } +} + +/// An environment variable read as a string. +impl StringEnvVar { + /// Read the env var from the environment. + /// + /// If not defined, read from a .env file. + /// If still not defined, use the default. + /// + /// # Arguments + /// + /// * `var_name`: &str - the name of the env var + /// * `default_value`: &str - the default value + /// + /// # Returns + /// + /// * Self - the value + /// + /// # Example + /// + /// ```rust,no_run + /// #use cat_data_service::settings::StringEnvVar; + /// + /// let var = StringEnvVar::new("MY_VAR", "default"); + /// assert_eq!(var.as_str(), "default"); + /// ``` + pub(super) fn new(var_name: &str, param: StringEnvVarParams) -> Self { + let (default_value, redacted, choices) = match param { + StringEnvVarParams::Plain(s, c) => (s, false, c), + StringEnvVarParams::Redacted(s, c) => (s, true, c), + }; + + match env::var(var_name) { + Ok(value) => { + let value = Self { value, redacted }; + info!(env=var_name, value=%value, "Env Var Defined"); + value + }, + Err(err) => { + let value = Self { + value: default_value, + redacted, + }; + if err == VarError::NotPresent { + if let Some(choices) = choices { + info!(env=var_name, default=%value, choices=choices, "Env Var Defaulted"); + } else { + info!(env=var_name, default=%value, "Env Var Defaulted"); + } + } else if let Some(choices) = choices { + info!(env=var_name, default=%value, choices=choices, error=?err, + "Env Var Error"); + } else { + info!(env=var_name, default=%value, error=?err, "Env Var Error"); + } + + value + }, + } + } + + /// New Env Var that is optional. + pub(super) fn new_optional(var_name: &str, redacted: bool) -> Option { + match env::var(var_name) { + Ok(value) => { + let value = Self { value, redacted }; + info!(env = var_name, value = %value, "Env Var Defined"); + Some(value) + }, + Err(VarError::NotPresent) => { + info!(env = var_name, "Env Var Not Set"); + None + }, + Err(error) => { + error!(env = var_name, error = ?error, "Env Var Error"); + None + }, + } + } + + /// Convert an Envvar into the required Enum Type. + pub(super) fn new_as_enum( + var_name: &str, default: T, redacted: bool, + ) -> T + where ::Err: std::fmt::Display { + let mut choices = String::new(); + for name in T::VARIANTS { + if choices.is_empty() { + choices.push('['); + } else { + choices.push(','); + } + choices.push_str(name); + } + choices.push(']'); + + let choice = StringEnvVar::new( + var_name, + (default.to_string().as_str(), redacted, choices.as_str()).into(), + ); + + let value = match T::from_str(choice.as_str()) { + Ok(var) => var, + Err(error) => { + error!(error=%error, default=%default, choices=choices, choice=%choice, + "Invalid choice. Using Default."); + default + }, + }; + + value + } + + /// Convert an Envvar into an integer in the bounded range. + pub(super) fn new_as(var_name: &str, default: T, min: T, max: T) -> T + where + T: FromStr + Display + PartialOrd + tracing::Value, + ::Err: std::fmt::Display, + { + let choices = format!("A value in the range {min} to {max} inclusive"); + + let raw_value = StringEnvVar::new( + var_name, + (default.to_string().as_str(), false, choices.as_str()).into(), + ) + .as_string(); + + match raw_value.parse::() { + Ok(value) => { + if value < min { + error!("{var_name} out of range. Range = {min} to {max} inclusive. Clamped to {min}"); + min + } else if value > max { + error!("{var_name} out of range. Range = {min} to {max} inclusive. Clamped to {max}"); + max + } else { + value + } + }, + Err(error) => { + error!(error=%error, default=default, "{var_name} not an integer. Range = {min} to {max} inclusive. Defaulted"); + default + }, + } + } + + /// Get the read env var as a str. + /// + /// # Returns + /// + /// * &str - the value + pub(crate) fn as_str(&self) -> &str { + &self.value + } + + /// Get the read env var as a str. + /// + /// # Returns + /// + /// * &str - the value + pub(crate) fn as_string(&self) -> String { + self.value.clone() + } +} + +impl fmt::Display for StringEnvVar { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.redacted { + return write!(f, "REDACTED"); + } + write!(f, "{}", self.value) + } +} + +impl fmt::Debug for StringEnvVar { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.redacted { + return write!(f, "REDACTED"); + } + write!(f, "env: {}", self.value) + } +} diff --git a/catalyst-gateway/event-db/Earthfile b/catalyst-gateway/event-db/Earthfile index 823d60a0c8..79c7917b10 100644 --- a/catalyst-gateway/event-db/Earthfile +++ b/catalyst-gateway/event-db/Earthfile @@ -3,7 +3,7 @@ # the database and its associated software. VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.2.03 AS postgresql-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/postgresql:v3.2.07 AS postgresql-ci # cspell: words diff --git a/catalyst-gateway/tests/Earthfile b/catalyst-gateway/tests/Earthfile index 41d344b024..d3e0b7df81 100644 --- a/catalyst-gateway/tests/Earthfile +++ b/catalyst-gateway/tests/Earthfile @@ -1,5 +1,5 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/spectral:v3.2.03 AS spectral-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/spectral:v3.2.07 AS spectral-ci # test-lint-openapi - OpenAPI linting from an artifact # testing whether the OpenAPI generated during build stage follows good practice. diff --git a/catalyst-gateway/tests/api_tests/Earthfile b/catalyst-gateway/tests/api_tests/Earthfile index 0aac1a2e78..3ebea8e0f8 100644 --- a/catalyst-gateway/tests/api_tests/Earthfile +++ b/catalyst-gateway/tests/api_tests/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/python:v3.2.03 AS python-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/python:v3.2.07 AS python-ci builder: FROM python-ci+python-base diff --git a/catalyst-gateway/tests/schemathesis_tests/Earthfile b/catalyst-gateway/tests/schemathesis_tests/Earthfile index ded02e5a58..a10bd3e110 100644 --- a/catalyst-gateway/tests/schemathesis_tests/Earthfile +++ b/catalyst-gateway/tests/schemathesis_tests/Earthfile @@ -1,7 +1,7 @@ VERSION 0.8 package-schemathesis: - FROM python:3.12-alpine3.19 + FROM python:3.12-alpine3.20 # TODO: https://github.com/input-output-hk/catalyst-voices/issues/465 RUN apk add --no-cache gcc musl-dev RUN python -m pip install schemathesis==3.27.1 @@ -29,7 +29,7 @@ package-schemathesis: # test-fuzzer-api - Fuzzy test cat-gateway using openapi specs. # Disabled in CI, to enable it rename to `test-fuzzer-api`. fuzzer-api: - FROM earthly/dind:alpine-3.19 + FROM earthly/dind:alpine-alpine-3.20-docker-26.1.5-r0 RUN apk update && apk add iptables-legacy # workaround for https://github.com/earthly/earthly/issues/3784 RUN apk add yq zstd COPY schemathesis-docker-compose.yml . diff --git a/catalyst_voices/Earthfile b/catalyst_voices/Earthfile index ff6b0d1c25..abbf261917 100644 --- a/catalyst_voices/Earthfile +++ b/catalyst_voices/Earthfile @@ -1,7 +1,7 @@ VERSION 0.8 IMPORT ../catalyst-gateway AS catalyst-gateway -IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.06 AS flutter-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.07 AS flutter-ci # Copy all the necessary files and running bootstrap builder: @@ -77,7 +77,7 @@ build-web: END package: - FROM nginx:alpine3.18 + FROM nginx:alpine3.20-slim ARG tag='latest' COPY +build-web/web /app diff --git a/catalyst_voices/uikit_example/Earthfile b/catalyst_voices/uikit_example/Earthfile index c5e6381e6d..dd7eec8dda 100644 --- a/catalyst_voices/uikit_example/Earthfile +++ b/catalyst_voices/uikit_example/Earthfile @@ -1,7 +1,7 @@ VERSION 0.8 IMPORT ../ AS catalyst-voices -IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.06 AS flutter-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.07 AS flutter-ci # local-build-web - build web version of UIKit example. # Prefixed by "local" to make sure it's not auto triggered, the target was diff --git a/catalyst_voices_packages/catalyst_cardano/catalyst_cardano/wallet-automation/Earthfile b/catalyst_voices_packages/catalyst_cardano/catalyst_cardano/wallet-automation/Earthfile index 37f8843afa..68ae4dc003 100644 --- a/catalyst_voices_packages/catalyst_cardano/catalyst_cardano/wallet-automation/Earthfile +++ b/catalyst_voices_packages/catalyst_cardano/catalyst_cardano/wallet-automation/Earthfile @@ -1,5 +1,5 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.06 AS flutter-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/flutter:v3.2.07 AS flutter-ci deps: FROM mcr.microsoft.com/playwright:v1.45.2-jammy @@ -45,13 +45,13 @@ build-web: SAVE ARTIFACT web package-app: - FROM nginx:alpine3.18 + FROM nginx:alpine3.20-slim ARG tag='latest' COPY +build-web/web /usr/share/nginx/html/ SAVE IMAGE test-app:$tag nightly-test: - FROM earthly/dind:alpine-3.19 + FROM earthly/alpine-3.20-docker-26.1.5-r0 COPY compose.yml . WITH DOCKER \ --compose compose.yml \ diff --git a/docs/Earthfile b/docs/Earthfile index 79eb3c40d2..741d0f4867 100644 --- a/docs/Earthfile +++ b/docs/Earthfile @@ -1,6 +1,6 @@ VERSION 0.8 -IMPORT github.com/input-output-hk/catalyst-ci/earthly/docs:v3.2.03 AS docs-ci +IMPORT github.com/input-output-hk/catalyst-ci/earthly/docs:v3.2.07 AS docs-ci IMPORT .. AS repo IMPORT ../catalyst-gateway AS catalyst-gateway diff --git a/utilities/local-cluster/Earthfile b/utilities/local-cluster/Earthfile index d046ba9a5d..85a5885dde 100644 --- a/utilities/local-cluster/Earthfile +++ b/utilities/local-cluster/Earthfile @@ -30,7 +30,7 @@ cluster-test: # kubernetes-base : base container with tooling set up for local access kubernetes-base: - FROM alpine:3.19 + FROM alpine:3.20.3 # Install kubectl RUN apk update && \