Skip to content

Commit

Permalink
Merge pull request #63 from threefoldtech/development_add_config_command
Browse files Browse the repository at this point in the history
Add config command to modify FL
  • Loading branch information
rawdaGastan authored Sep 9, 2024
2 parents 1fcb3a9 + feacee0 commit 94287ff
Show file tree
Hide file tree
Showing 7 changed files with 244 additions and 17 deletions.
2 changes: 1 addition & 1 deletion docker2fl/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ async fn main() -> Result<()> {
});

let fl_name = docker_image.replace([':', '/'], "-") + ".fl";
let meta = fungi::Writer::new(&fl_name).await?;
let meta = fungi::Writer::new(&fl_name, true).await?;
let store = parse_router(&opts.store).await?;

let res = docker2fl::convert(meta, store, &docker_image, credentials).await;
Expand Down
4 changes: 2 additions & 2 deletions docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,10 @@ the `block` table is used to associate data file blocks with files. An `id` fiel

the route table holds routing information for the blobs. It basically describe where to find `blobs` with certain `ids`. The routing is done as following:

> Note routing table is loaded one time when `rfs` is started and
> Note routing table is loaded one time when `rfs` is started.
- We use the first byte of the blob `id` as the `route key`
- The `route key`` is then consulted against the routing table
- The `route key` is then consulted against the routing table
- While building an `FL` all matching stores are updated with the new blob. This is how the system does replication
- On `getting` an object, the list of matching routes are tried in random order the first one to return a value is used
- Note that same range and overlapping ranges are allowed, this is how shards and replications are done.
2 changes: 1 addition & 1 deletion rfs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ If the `start-end` range is not provided a `00-FF` range is assume basically a c

This is only useful because `rfs` can accept multiple stores on the command line with different and/or overlapping ranges.

For example `-s 00-80=dir:///tmp/store0 -s 81-ff=dir://tmp/store1` means all keys that has prefix byte in range `[00-80]` will be written to /tmp/store0 all other keys `00-ff` will be written to store1.
For example `-s 00-80=dir:///tmp/store0 -s 81-ff=dir://tmp/store1` means all keys that has prefix byte in range `[00-80]` will be written to /tmp/store0 all other keys `[81-ff]` will be written to store1.

The same range can appear multiple times, which means the blob will be replicated to all the stores that matches its key prefix.

Expand Down
72 changes: 72 additions & 0 deletions rfs/src/config.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
use crate::{
fungi::{meta::Tag, Reader, Result, Writer},
store::{self, Store},
};

pub async fn tag_list(reader: Reader) -> Result<()> {
let tags = reader.tags().await?;
if !tags.is_empty() {
println!("tags:");
}
for (key, value) in tags {
println!("\t{}={}", key, value);
}
Ok(())
}

pub async fn tag_add(writer: Writer, tags: Vec<(String, String)>) -> Result<()> {
for (key, value) in tags {
writer.tag(Tag::Custom(key.as_str()), value).await?;
}
Ok(())
}

pub async fn tag_delete(writer: Writer, keys: Vec<String>, all: bool) -> Result<()> {
if all {
writer.delete_tags().await?;
return Ok(());
}
for key in keys {
writer.delete_tag(Tag::Custom(key.as_str())).await?;
}
Ok(())
}

pub async fn store_list(reader: Reader) -> Result<()> {
let routes = reader.routes().await?;
if !routes.is_empty() {
println!("routes:")
}
for route in routes {
println!(
"\trange:[{}-{}] store:{}",
route.start, route.end, route.url
);
}
Ok(())
}

pub async fn store_add(writer: Writer, stores: Vec<String>) -> Result<()> {
let store = store::parse_router(stores.as_slice()).await?;
for route in store.routes() {
writer
.route(
route.start.unwrap_or(u8::MIN),
route.end.unwrap_or(u8::MAX),
route.url,
)
.await?;
}
Ok(())
}

pub async fn store_delete(writer: Writer, stores: Vec<String>, all: bool) -> Result<()> {
if all {
writer.delete_routes().await?;
return Ok(());
}
for store in stores {
writer.delete_route(store).await?;
}
Ok(())
}
54 changes: 45 additions & 9 deletions rfs/src/fungi/meta.rs
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,14 @@ impl Reader {
Ok(value.map(|v| v.0))
}

pub async fn tags(&self) -> Result<Vec<(String, String)>> {
let tags: Vec<(String, String)> = sqlx::query_as("select key, value from tag;")
.fetch_all(&self.pool)
.await?;

Ok(tags)
}

pub async fn routes(&self) -> Result<Vec<Route>> {
let results: Vec<Route> = sqlx::query_as("select start, end, url from route;")
.fetch_all(&self.pool)
Expand Down Expand Up @@ -340,8 +348,10 @@ pub struct Writer {

impl Writer {
/// create a new mkondo writer
pub async fn new<P: AsRef<Path>>(path: P) -> Result<Self> {
let _ = tokio::fs::remove_file(&path).await;
pub async fn new<P: AsRef<Path>>(path: P, remove: bool) -> Result<Self> {
if remove {
let _ = tokio::fs::remove_file(&path).await;
}

let opts = SqliteConnectOptions::new()
.create_if_missing(true)
Expand Down Expand Up @@ -409,13 +419,39 @@ impl Writer {
}

pub async fn tag<V: AsRef<str>>(&self, tag: Tag<'_>, value: V) -> Result<()> {
sqlx::query("insert into tag (key, value) values (?, ?);")
sqlx::query("insert or replace into tag (key, value) values (?, ?);")
.bind(tag.key())
.bind(value.as_ref())
.execute(&self.pool)
.await?;
Ok(())
}
pub async fn delete_tag(&self, tag: Tag<'_>) -> Result<()> {
sqlx::query("delete from tag where key = ?;")
.bind(tag.key())
.execute(&self.pool)
.await?;
Ok(())
}

pub async fn delete_route<U: AsRef<str>>(&self, url: U) -> Result<()> {
sqlx::query("delete from route where url = ?;")
.bind(url.as_ref())
.execute(&self.pool)
.await?;
Ok(())
}

pub async fn delete_tags(&self) -> Result<()> {
sqlx::query("delete from tag;").execute(&self.pool).await?;
Ok(())
}
pub async fn delete_routes(&self) -> Result<()> {
sqlx::query("delete from route;")
.execute(&self.pool)
.await?;
Ok(())
}
}

#[cfg(test)]
Expand All @@ -425,7 +461,7 @@ mod test {
#[tokio::test]
async fn test_inode() {
const PATH: &str = "/tmp/inode.fl";
let meta = Writer::new(PATH).await.unwrap();
let meta = Writer::new(PATH, true).await.unwrap();

let ino = meta
.inode(Inode {
Expand All @@ -449,7 +485,7 @@ mod test {
#[tokio::test]
async fn test_get_children() {
const PATH: &str = "/tmp/children.fl";
let meta = Writer::new(PATH).await.unwrap();
let meta = Writer::new(PATH, true).await.unwrap();

let ino = meta
.inode(Inode {
Expand Down Expand Up @@ -486,7 +522,7 @@ mod test {
#[tokio::test]
async fn test_get_block() {
const PATH: &str = "/tmp/block.fl";
let meta = Writer::new(PATH).await.unwrap();
let meta = Writer::new(PATH, true).await.unwrap();
let hash: [u8; ID_LEN] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32,
Expand All @@ -509,7 +545,7 @@ mod test {
#[tokio::test]
async fn test_get_tag() {
const PATH: &str = "/tmp/tag.fl";
let meta = Writer::new(PATH).await.unwrap();
let meta = Writer::new(PATH, true).await.unwrap();
meta.tag(Tag::Version, "0.1").await.unwrap();
meta.tag(Tag::Author, "azmy").await.unwrap();
meta.tag(Tag::Custom("custom"), "value").await.unwrap();
Expand All @@ -535,7 +571,7 @@ mod test {
#[tokio::test]
async fn test_get_routes() {
const PATH: &str = "/tmp/route.fl";
let meta = Writer::new(PATH).await.unwrap();
let meta = Writer::new(PATH, true).await.unwrap();

meta.route(0, 128, "zdb://hub1.grid.tf").await.unwrap();
meta.route(129, 255, "zdb://hub2.grid.tf").await.unwrap();
Expand All @@ -560,7 +596,7 @@ mod test {
#[tokio::test]
async fn test_walk() {
const PATH: &str = "/tmp/walk.fl";
let meta = Writer::new(PATH).await.unwrap();
let meta = Writer::new(PATH, true).await.unwrap();

let parent = meta
.inode(Inode {
Expand Down
3 changes: 2 additions & 1 deletion rfs/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ mod pack;
pub use pack::pack;
mod unpack;
pub use unpack::unpack;
pub mod config;

const PARALLEL_UPLOAD: usize = 10; // number of files we can upload in parallel

Expand Down Expand Up @@ -53,7 +54,7 @@ mod test {
}

println!("file generation complete");
let writer = meta::Writer::new(root.join("meta.fl")).await.unwrap();
let writer = meta::Writer::new(root.join("meta.fl"), true).await.unwrap();

// while we at it we can already create 2 stores and create a router store on top
// of that.
Expand Down
Loading

0 comments on commit 94287ff

Please sign in to comment.