Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Write Integration tests #5

Merged
merged 22 commits into from
May 24, 2024
Merged
Show file tree
Hide file tree
Changes from 17 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
599 changes: 595 additions & 4 deletions Cargo.lock

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ tracing = { version = "0.1.40" }
tracing-subscriber = { version = "0.3.18" }
url = { version = "2.5.0" }
uuid = { version = "1.7.0" }
httpmock = { version = "0.7.0" }
da-client-interface = { path = "crates/da_clients/da-client-interface" }
ethereum-da-client = { path = "crates/da_clients/ethereum" }
utils = { path = "crates/utils" }
1 change: 1 addition & 0 deletions crates/da_clients/da-client-interface/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,5 @@ edition.workspace = true
async-trait = { workspace = true }
axum = { workspace = true }
color-eyre = { workspace = true }
mockall = "0.12.1"
starknet = { workspace = true }
2 changes: 2 additions & 0 deletions crates/da_clients/da-client-interface/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use async_trait::async_trait;
use color_eyre::Result;
use starknet::core::types::FieldElement;
use mockall::{automock, predicate::*};

#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum DaVerificationStatus {
Expand All @@ -13,6 +14,7 @@ pub enum DaVerificationStatus {
}

/// Trait for every new DaClient to implement
#[automock]
#[async_trait]
pub trait DaClient: Send + Sync {
/// Should publish the state diff to the DA layer and return an external id
Expand Down
16 changes: 15 additions & 1 deletion crates/orchestrator/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,14 @@ name = "orchestrator"
version.workspace = true
edition.workspace = true

[lib]
name = "orchestrator"
path = "src/lib.rs"

[[bin]]
name = "orchestrator"
path = "src/main.rs"

[dependencies]
async-trait = { workspace = true }
axum = { workspace = true, features = ["macros"] }
Expand All @@ -12,12 +20,13 @@ da-client-interface = { workspace = true }
dotenvy = { workspace = true }
ethereum-da-client = { workspace = true, optional = true }
futures = { workspace = true }
mockall = "0.12.1"
mongodb = { workspace = true, features = ["bson-uuid-1"], optional = true }
omniqueue = { workspace = true, optional = true }
rstest = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
starknet = { workspace = true }
starknet-core = "0.9.0"
thiserror = { workspace = true }
tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] }
tracing = { workspace = true }
Expand All @@ -30,3 +39,8 @@ default = ["ethereum", "with_mongodb", "with_sqs"]
ethereum = ["ethereum-da-client"]
with_mongodb = ["mongodb"]
with_sqs = ["omniqueue"]

[dev-dependencies]
hyper = { version = "0.14", features = ["full"] }
rstest = { workspace = true }
httpmock = { workspace = true, features = ["remote"] }
49 changes: 25 additions & 24 deletions crates/orchestrator/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@ use crate::database::mongodb::config::MongoDbConfig;
use crate::database::mongodb::MongoDb;
use crate::database::{Database, DatabaseConfig};
use crate::queue::sqs::SqsQueue;
use crate::queue::QueueProvider;
use crate::queue::{QueueProvider};
use crate::utils::env_utils::get_env_var_or_panic;
use da_client_interface::{DaClient, DaConfig};
use da_client_interface::{DaClient};
use da_client_interface::DaConfig;
use dotenvy::dotenv;
use ethereum_da_client::config::EthereumDaConfig;
use ethereum_da_client::EthereumDaClient;
Expand All @@ -17,13 +18,31 @@ use tokio::sync::OnceCell;
/// by calling `config` function.
pub struct Config {
/// The starknet client to get data from the node
starknet_client: Arc<JsonRpcClient<HttpTransport>>,
pub starknet_client: Arc<JsonRpcClient<HttpTransport>>,
/// The DA client to interact with the DA layer
da_client: Box<dyn DaClient>,
pub da_client: Box<dyn DaClient>,
/// The database client
database: Box<dyn Database>,
pub database: Box<dyn Database>,
/// The queue provider
queue: Box<dyn QueueProvider>,
pub queue: Box<dyn QueueProvider>,
}

/// Initializes the app config
pub async fn init_config() -> Config {
dotenv().ok();

// init starknet client
let provider = JsonRpcClient::new(HttpTransport::new(
Url::parse(get_env_var_or_panic("MADARA_RPC_URL").as_str()).expect("Failed to parse URL"),
));

// init database
let database = Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await);

// init the queue
let queue = Box::new(SqsQueue {});

Config { starknet_client: Arc::new(provider), da_client: build_da_client(), database, queue }
}

impl Config {
Expand Down Expand Up @@ -52,24 +71,6 @@ impl Config {
/// It's initialized only once.
pub static CONFIG: OnceCell<Config> = OnceCell::const_new();

/// Initializes the app config
async fn init_config() -> Config {
dotenv().ok();

// init starknet client
let provider = JsonRpcClient::new(HttpTransport::new(
Url::parse(get_env_var_or_panic("MADARA_RPC_URL").as_str()).expect("Failed to parse URL"),
));

// init database
let database = Box::new(MongoDb::new(MongoDbConfig::new_from_env()).await);

// init the queue
let queue = Box::new(SqsQueue {});

Config { starknet_client: Arc::new(provider), da_client: build_da_client(), database, queue }
}

/// Returns the app config. Initializes if not already done.
pub async fn config() -> &'static Config {
CONFIG.get_or_init(init_config).await
Expand Down
2 changes: 2 additions & 0 deletions crates/orchestrator/src/database/mod.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use crate::jobs::types::{JobItem, JobStatus, JobType};
use async_trait::async_trait;
use color_eyre::Result;
use mockall::automock;
use std::collections::HashMap;
use uuid::Uuid;

Expand All @@ -16,6 +17,7 @@ pub mod mongodb;
/// A and B and both read the same Job entry J at nearly the same time. If A updates J at
/// time T1 and then B updates J at time T2 (T2>T1), then B's update should fail because
/// it's version of J is outdated.
#[automock]
#[async_trait]
pub trait Database: Send + Sync {
async fn create_job(&self, job: JobItem) -> Result<JobItem>;
Expand Down
33 changes: 33 additions & 0 deletions crates/orchestrator/src/jobs/da_job/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,4 +179,37 @@ mod tests {
let expected = FieldElement::from_dec_str(expected.as_str()).unwrap();
assert_eq!(da_word, expected);
}

mod test_state_update_to_blob_data {
use super::*;
use serde_json::Error;
use std::fs::{read_to_string, File};
use std::io::{self, BufRead};

#[test]
#[ignore]
fn state_update_to_blob_data_works() {
let state_update_path = "test-utils/stateUpdate.json".to_owned();
apoorvsadana marked this conversation as resolved.
Show resolved Hide resolved
let contents = read_to_string(state_update_path).expect("Couldn't find or load that file.");

let v: Result<StateUpdate, Error> = serde_json::from_str(contents.as_str());

let state_update: StateUpdate = match v {
Ok(state_update) => state_update,
Err(e) => panic!("Couldn't parse the JSON file: {}", e),
};

let blob_data = state_update_to_blob_data(630872, state_update);
assert_eq!(blob_data.len(), 4906, "Blob data length must be 4906"); // //! Length was 2375

let file = File::open("test-utils/blobStateDiffs.txt").expect("Failed to open file");
let reader = io::BufReader::new(file);

// Iterate over both the file lines and the vector simultaneously, with index for comparison
for (index, line_result) in reader.lines().enumerate() {
let line = line_result.expect("Failed to read line");
assert_eq!(blob_data[index].to_string().as_str(), line, "Line {} does not match", index + 1);
}
}
}
}
79 changes: 77 additions & 2 deletions crates/orchestrator/src/jobs/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -180,11 +180,86 @@ async fn get_job(id: Uuid) -> Result<JobItem> {

fn increment_key_in_metadata(metadata: &HashMap<String, String>, key: &str) -> Result<HashMap<String, String>> {
let mut new_metadata = metadata.clone();
let attempt = metadata.get(key).unwrap_or(&"0".to_string()).parse::<u64>()?;
new_metadata.insert(key.to_string(), (attempt + 1).to_string());
let attempt = get_u64_from_metadata(metadata, key)?;
let incremented_value = attempt.checked_add(1);
if incremented_value.is_none() {
return Err(eyre!("Incrementing key {} in metadata would exceed u64::MAX", key));
}
new_metadata.insert(key.to_string(), incremented_value.unwrap().to_string());
Comment on lines +183 to +188
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider handling non-existent keys more idiomatically by using entry API.

fn increment_key_in_metadata(metadata: &HashMap<String, String>, key: &str) -> Result<HashMap<String, String>> {
    let mut new_metadata = metadata.clone();
    let attempt = get_u64_from_metadata(metadata, key)?;
    let incremented_value = attempt.checked_add(1).ok_or_else(|| eyre!("Incrementing key {} in metadata would exceed u64::MAX", key))?;
    new_metadata.entry(key.to_string()).or_insert_with(|| "0".to_string()).replace(incremented_value.to_string());
    Ok(new_metadata)
}

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Improve error handling when parsing string to u64.

fn get_u64_from_metadata(metadata: &HashMap<String, String>, key: &str) -> Result<u64> {
    metadata.get(key).unwrap_or(&"0".to_string()).parse::<u64>().map_err(|e| eyre!("Failed to parse metadata value for key {}: {}", key, e))
}

Ok(new_metadata)
}

fn get_u64_from_metadata(metadata: &HashMap<String, String>, key: &str) -> Result<u64> {
Ok(metadata.get(key).unwrap_or(&"0".to_string()).parse::<u64>()?)
}

#[cfg(test)]
mod tests {
use super::*;

mod test_incremement_key_in_metadata {
use super::*;

#[test]
fn key_does_not_exist() {
let metadata = HashMap::new();
let key = "test_key";
let updated_metadata = increment_key_in_metadata(&metadata, key).unwrap();
assert_eq!(updated_metadata.get(key), Some(&"1".to_string()));
}

#[test]
fn key_exists_with_numeric_value() {
let mut metadata = HashMap::new();
metadata.insert("test_key".to_string(), "41".to_string());
let key = "test_key";
let updated_metadata = increment_key_in_metadata(&metadata, key).unwrap();
assert_eq!(updated_metadata.get(key), Some(&"42".to_string()));
}

#[test]
fn key_exists_with_non_numeric_value() {
let mut metadata = HashMap::new();
metadata.insert("test_key".to_string(), "not_a_number".to_string());
let key = "test_key";
let result = increment_key_in_metadata(&metadata, key);
assert!(result.is_err());
}

#[test]
fn key_exists_with_max_u64_value() {
let mut metadata = HashMap::new();
metadata.insert("test_key".to_string(), u64::MAX.to_string());
let key = "test_key";
let result = increment_key_in_metadata(&metadata, key);
assert!(result.is_err());
}
}

mod test_get_u64_from_metadata {
use super::*;

#[test]
fn key_exists_with_valid_u64_value() {
let mut metadata = HashMap::new();
metadata.insert("key1".to_string(), "12345".to_string());
let result = get_u64_from_metadata(&metadata, "key1").unwrap();
assert_eq!(result, 12345);
}

#[test]
fn key_exists_with_invalid_value() {
let mut metadata = HashMap::new();
metadata.insert("key2".to_string(), "not_a_number".to_string());
let result = get_u64_from_metadata(&metadata, "key2");
assert!(result.is_err());
}

#[test]
fn key_does_not_exist() {
let metadata = HashMap::<String, String>::new();
let result = get_u64_from_metadata(&metadata, "key3").unwrap();
assert_eq!(result, 0);
}
}
}
4 changes: 2 additions & 2 deletions crates/orchestrator/src/jobs/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ fn unwrap_external_id_failed(expected: &str, got: &ExternalId) -> color_eyre::ey
eyre!("wrong ExternalId type: expected {}, got {:?}", expected, got)
}

#[derive(Serialize, Deserialize, Debug, Clone)]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub enum JobType {
/// Submitting DA data to the DA layer
DataSubmission,
Expand All @@ -78,7 +78,7 @@ pub enum JobType {
StateUpdation,
}

#[derive(Serialize, Deserialize, Debug, Clone)]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, PartialOrd)]
pub enum JobStatus {
/// An acknowledgement that the job has been received by the
/// orchestrator and is waiting to be processed
Expand Down
19 changes: 19 additions & 0 deletions crates/orchestrator/src/lib.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
/// Config of the service. Contains configurations for DB, Queues and other services.
pub mod config;
/// Controllers for the routes
pub mod controllers;
/// Contains the trait that all database clients must implement
pub mod database;
/// Contains the trait that all jobs must implement. Also
/// contains the root level functions for which detect the job
/// type and call the corresponding job
pub mod jobs;
/// Contains the trait that all queues must implement
pub mod queue;
/// Contains the routes for the service
pub mod routes;
/// Contains the utils
pub mod utils;

#[cfg(test)]
mod tests;
25 changes: 4 additions & 21 deletions crates/orchestrator/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,24 +1,7 @@
/// Config of the service. Contains configurations for DB, Queues and other services.
mod config;
/// Controllers for the routes
mod controllers;
/// Contains the trait that all database clients must implement
mod database;
/// Contains the trait that all jobs must implement. Also
/// contains the root level functions for which detect the job
/// type and call the corresponding job
mod jobs;
/// Contains the trait that all queues must implement
mod queue;
/// Contains the routes for the service
mod routes;
/// Contains the utils
mod utils;

use crate::config::config;
use crate::queue::init_consumers;
use crate::routes::app_router;
use crate::utils::env_utils::get_env_var_or_default;
use orchestrator::config::config;
use orchestrator::queue::init_consumers;
use orchestrator::routes::app_router;
use orchestrator::utils::env_utils::get_env_var_or_default;
use dotenvy::dotenv;

/// Start the server
Expand Down
2 changes: 2 additions & 0 deletions crates/orchestrator/src/queue/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,13 @@ use async_trait::async_trait;
use color_eyre::Result;
use omniqueue::{Delivery, QueueError};

use mockall::automock;
use std::time::Duration;

/// The QueueProvider trait is used to define the methods that a queue
/// should implement to be used as a queue for the orchestrator. The
/// purpose of this trait is to allow developers to use any queue of their choice.
#[automock]
#[async_trait]
pub trait QueueProvider: Send + Sync {
async fn send_message_to_queue(&self, queue: String, payload: String, delay: Option<Duration>) -> Result<()>;
Expand Down
1 change: 1 addition & 0 deletions crates/orchestrator/src/tests/common/constants.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
pub const MADARA_RPC_URL: &str = "http://localhost:9944";
Loading
Loading