Skip to content

Commit

Permalink
fix: removed all printlns and fixed annoying stuff like unused import…
Browse files Browse the repository at this point in the history
…s or unhandled errors
  • Loading branch information
sebasti810 committed Jan 17, 2024
1 parent 3c3494a commit 9751daf
Show file tree
Hide file tree
Showing 8 changed files with 65 additions and 474 deletions.
435 changes: 15 additions & 420 deletions Cargo.lock

Large diffs are not rendered by default.

10 changes: 5 additions & 5 deletions src/da.rs
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ impl DataAvailabilityLayer for CelestiaConnection {
"Could not serialize epoch json: {}",
e
)))})?;
let blob = Blob::new(self.namespace_id.clone(), data.into_bytes()).map_err(|e| {
let blob = Blob::new(self.namespace_id.clone(), data.into_bytes()).map_err(|_| {
DataAvailabilityError::GeneralError(GeneralError::BlobCreationError)
})?;
debug!("blob: {:?}", serde_json::to_string(&blob));
Expand Down Expand Up @@ -398,7 +398,7 @@ mod da_tests {
#[tokio::test]
async fn test_sequencer_and_light_client() {
if let Err(e) = clear_file("data.json") {
println!("Fehler beim Löschen der Datei: {}", e);
debug!("Fehler beim Löschen der Datei: {}", e);
}

// simulate sequencer start
Expand Down Expand Up @@ -454,13 +454,13 @@ mod da_tests {
});

let light_client = tokio::spawn(async {
println!("light client started");
debug!("light client started");
let light_client_layer = LocalDataAvailabilityLayer::new();
loop {
let epoch = light_client_layer.get(1).await.unwrap();
// verify proofs
verify_epoch_json(epoch);
println!("light client verified epoch 1");
debug!("light client verified epoch 1");

// light_client checks time etc. tbdiscussed with distractedm1nd
tokio::time::sleep(tokio::time::Duration::from_secs(70)).await;
Expand All @@ -469,7 +469,7 @@ mod da_tests {
let epoch = light_client_layer.get(2).await.unwrap();
// verify proofs
verify_epoch_json(epoch);
println!("light client verified epoch 2");
debug!("light client verified epoch 2");
}

});
Expand Down
52 changes: 29 additions & 23 deletions src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ pub mod zk_snark;

use clap::{Parser, Subcommand};
use config::{builder::DefaultState, ConfigBuilder, File, FileFormat};
#[allow(unused_imports)]
use da::{LocalDataAvailabilityLayer, DataAvailabilityLayer, CelestiaConnection};
use serde::Deserialize;

Expand Down Expand Up @@ -127,7 +128,7 @@ fn load_config(args: CommandLineArgs) -> Result<Config, config::ConfigError> {
))
.build()?;

println!("{}", settings.get_string("log_level").unwrap_or_default());
info!("{}", settings.get_string("log_level").unwrap_or_default());

let default_config = Config::default();

Expand Down Expand Up @@ -167,6 +168,32 @@ fn load_config(args: CommandLineArgs) -> Result<Config, config::ConfigError> {
})
}

#[cfg(not(test))]
async fn initialize_da_layer(config: &Config) -> Option<Arc<dyn DataAvailabilityLayer + 'static>> {
match &config.da_layer {
DALayerOption::Celestia => {
let celestia_conf = config.clone().celestia_config.unwrap();
match CelestiaConnection::new(
&celestia_conf.connection_string,
None,
&celestia_conf.namespace_id,
).await {
Ok(da) => Some(Arc::new(da) as Arc<dyn DataAvailabilityLayer + 'static>),
Err(e) => {
error!("Failed to connect to Celestia: {}", e);
None
}
}
}
DALayerOption::None => None,
}
}

#[cfg(test)]
async fn initialize_da_layer(_config: &Config) -> Option<Arc<dyn DataAvailabilityLayer + 'static>> {
Some(Arc::new(LocalDataAvailabilityLayer::new()) as Arc<dyn DataAvailabilityLayer + 'static>)
}

/// The main function that initializes and runs the Actix web server.
///
/// # Behavior
Expand All @@ -186,28 +213,7 @@ async fn main() -> std::io::Result<()> {
pretty_env_logger::init();
dotenv().ok();

#[cfg(test)]
let da = Some(Arc::new(LocalDataAvailabilityLayer::new()) as Arc<dyn DataAvailabilityLayer + 'static>);

#[cfg(not(test))]
let da = match &config.da_layer {
DALayerOption::Celestia => {
let celestia_conf = config.clone().celestia_config.unwrap();
match CelestiaConnection::new(
&celestia_conf.connection_string,
None,
&celestia_conf.namespace_id,
).await {
Ok(da) => Some(Arc::new(da) as Arc<dyn DataAvailabilityLayer + 'static>),
Err(e) => {
error!("Failed to connect to Celestia: {}", e);
None
}
}
},
DALayerOption::None => None,
};

let da = initialize_da_layer(&config).await;

let node: Arc<dyn NodeType> = match args.command {
// LightClients need a DA layer, so we can unwrap here
Expand Down
12 changes: 6 additions & 6 deletions src/node_types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,10 @@ use bls12_381::Bls12;
use crypto_hash::{hex_digest, Algorithm};
use ed25519_dalek::{Signature, Verifier};
use std::{self, sync::Arc, time::Duration, io::ErrorKind};
use tokio::{time::sleep, task::{spawn, JoinError}};
use tokio::{time::sleep, task::spawn};
use indexed_merkle_tree::{IndexedMerkleTree, Node, error::MerkleTreeError};


use crate::{
da::{DataAvailabilityLayer, EpochJson},
storage::{ChainEntry, Database, IncomingEntry, Operation, UpdateEntryJson},
Expand Down Expand Up @@ -168,13 +169,13 @@ impl Sequencer {
Err(_) => 0,
};

self.db.set_epoch(&epoch);
self.db.reset_epoch_operation_counter();
self.db.set_epoch(&epoch).map_err(DeimosError::Database)?;
self.db.reset_epoch_operation_counter().map_err(DeimosError::Database)?;

// add the commitment for the operations ran since the last epoch
let current_commitment = self.create_tree().map_err( DeimosError::MerkleTree)?.get_commitment().map_err(DeimosError::MerkleTree)?;

self.db.add_commitment(&epoch, &current_commitment);
self.db.add_commitment(&epoch, &current_commitment).map_err(DeimosError::Database)?;

let proofs = if epoch > 0 {
let prev_epoch = epoch - 1;
Expand Down Expand Up @@ -390,7 +391,6 @@ impl Sequencer {
}
#[cfg(not(feature = "key_transparency"))]
{
println!("Key transparency is not enabled");
self.verify_signature_with_given_key(signature_with_key)
}
}
Expand All @@ -405,7 +405,7 @@ impl Sequencer {
/// Returns false if there is no public key for the id or if no public key can verify the signature
///
/// ONLY FOR KEY TRANSPARENCY APPLICATION
fn verify_signature(
fn _verify_signature(
&self,
signature_with_key: &UpdateEntryJson,
) -> Result<IncomingEntry, &'static str> {
Expand Down
4 changes: 0 additions & 4 deletions src/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -507,8 +507,6 @@ mod tests {
// In addition, it should not be possible to write keys exclusively directly into the derived dict, right?
#[test]
fn test_get_hashed_keys() {
println!("test_get_hashed_keys");

let redis_connections = setup();

let incoming_entry1 = create_incoming_entry_with_test_value("test_key1");
Expand All @@ -520,8 +518,6 @@ mod tests {
redis_connections.set_derived_entry(&incoming_entry3, &create_mock_chain_entry(), true).unwrap();

let keys = redis_connections.get_derived_keys_in_order().unwrap();

println!("keys: {:?}", keys);

// check if the returned keys are correct
let expected_keys: Vec<String> = vec![sha256(&"test_key1".to_string()), sha256(&"test_key2".to_string()), sha256(&"test_key3".to_string())];
Expand Down
9 changes: 2 additions & 7 deletions src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use crate::{
Operation, error::{ProofError, DeimosError, GeneralError},
};
use indexed_merkle_tree::{IndexedMerkleTree, MerkleProof, ProofVariant, UpdateProof};
use bellman::groth16::{self, VerifyingKey, PreparedVerifyingKey};
use bellman::groth16::{self, VerifyingKey};
use bls12_381::{Bls12, Scalar};
use rand::rngs::OsRng;
use ed25519_dalek::VerifyingKey as Ed25519VerifyingKey;
Expand Down Expand Up @@ -150,7 +150,7 @@ pub fn validate_epoch_from_proof_variants(
) {
Ok(circuit) => circuit,
Err(e) => {
return Err(DeimosError::Proof(ProofError::GenerationError));
return Err(e);
}
};

Expand All @@ -162,12 +162,9 @@ pub fn validate_epoch_from_proof_variants(
debug!("validate_epoch: creating proof for zkSNARK");
let proof = groth16::create_random_proof(circuit.clone(), &params, rng).map_err(|_| DeimosError::Proof(ProofError::GenerationError))?;

// println!("{}: {:?}", "PROOF".red(), proof);

debug!("validate_epoch: preparing verifying key for zkSNARK");
let pvk = groth16::prepare_verifying_key(&params.vk);

// println!("{}", "Extracting public parameters for zkSNARK...".yellow());
// let public_parameters = extract_public_parameters(&parsed_proofs);

let scalars: Result<Vec<Scalar>, _> = vec![
Expand Down Expand Up @@ -294,8 +291,6 @@ mod tests {
params.vk,
);

println!("{:?}", result);

assert!(result.is_ok());
assert_eq!(result.unwrap(), proof);
}
Expand Down
12 changes: 8 additions & 4 deletions src/webserver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,14 @@ async fn update_entry(
format!(r#"{{"Insert":{}}}"#, pre_processed_string)
};

session
.db
.add_merkle_proof(&epoch, &epoch_operation, &tree.get_commitment().unwrap(), &proofs);
session.db.increment_epoch_operation();
if let Err(err) = session.db.add_merkle_proof(&epoch, &epoch_operation, &tree.get_commitment().unwrap(), &proofs) {
return HttpResponse::InternalServerError().json(format!("Error adding merkle proof: {}", err));
}

if let Err(err) = session.db.increment_epoch_operation() {
return HttpResponse::InternalServerError().json(format!("Error incrementing epoch operation: {}", err));
}

HttpResponse::Ok().body("Updated entry successfully")
} else {
HttpResponse::BadRequest().body("Could not update entry")
Expand Down
5 changes: 0 additions & 5 deletions src/zk_snark/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,11 +51,6 @@ pub struct VerifyingKey {
pub ic: String,
}

enum AffineType {
G1,
G2,
}

// TODO: think about to refactor this to use a generic function, because they are very similar
// but probably something for a different PR
pub fn decode_and_convert_to_g1affine(encoded_data: &String) -> Result<G1Affine, DeimosError> {
Expand Down

0 comments on commit 9751daf

Please sign in to comment.