Skip to content

Commit

Permalink
Merge #1692
Browse files Browse the repository at this point in the history
1692: fix: use auto-detected sector size for blockdev r=dsharma-dc a=dsharma-dc

This fixes the behaviour where we pass 512 as sector size if the disk uri doesn't contain blk_size parameter. This causes pool creation failure if the underlying disk has a different sector size e.g. 4096. Instead of passing 512, we now pass 0 which lets spdk detect the device's sector size and use that value.

Co-authored-by: Diwakar Sharma <[email protected]>
  • Loading branch information
mayastor-bors and dsharma-dc committed Jul 18, 2024
2 parents a7e00d7 + 7455d9a commit 5224752
Show file tree
Hide file tree
Showing 5 changed files with 195 additions and 28 deletions.
22 changes: 22 additions & 0 deletions io-engine-tests/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,28 @@ pub fn truncate_file_bytes(path: &str, size: u64) {
assert!(output.status.success());
}

/// Automatically assign a loopdev to path
pub fn setup_loopdev_file(path: &str, sector_size: Option<u64>) -> String {
let log_sec = sector_size.unwrap_or(512);

let output = Command::new("losetup")
.args(["-f", "--show", "-b", &format!("{log_sec}"), path])
.output()
.expect("failed exec losetup");
assert!(output.status.success());
// return the assigned loop device
String::from_utf8(output.stdout).unwrap().trim().to_string()
}

/// Detach the provided loop device.
pub fn detach_loopdev(dev: &str) {
let output = Command::new("losetup")
.args(["-d", dev])
.output()
.expect("failed exec losetup");
assert!(output.status.success());
}

pub fn fscheck(device: &str) {
let output = Command::new("fsck")
.args([device, "-n"])
Expand Down
16 changes: 13 additions & 3 deletions io-engine/src/bdev/aio.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ use std::{
convert::TryFrom,
ffi::CString,
fmt::{Debug, Formatter},
os::unix::fs::FileTypeExt,
};

use async_trait::async_trait;
Expand All @@ -29,7 +30,7 @@ pub(super) struct Aio {

impl Debug for Aio {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "Aio '{}'", self.name)
write!(f, "Aio '{}', 'blk_size: {}'", self.name, self.blk_size)
}
}

Expand All @@ -47,6 +48,10 @@ impl TryFrom<&Url> for Aio {
});
}

let path_is_blockdev = std::fs::metadata(url.path())
.ok()
.map_or(false, |meta| meta.file_type().is_block_device());

let mut parameters: HashMap<String, String> =
url.query_pairs().into_owned().collect();

Expand All @@ -58,9 +63,14 @@ impl TryFrom<&Url> for Aio {
value: value.clone(),
})?
}
None => 512,
None => {
if path_is_blockdev {
0
} else {
512
}
}
};

let uuid = uri::uuid(parameters.remove("uuid")).context(
bdev_api::UuidParamParseFailed {
uri: url.to_string(),
Expand Down
19 changes: 17 additions & 2 deletions io-engine/src/bdev/uring.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
use std::{collections::HashMap, convert::TryFrom, ffi::CString};
use std::{
collections::HashMap,
convert::TryFrom,
ffi::CString,
os::unix::fs::FileTypeExt,
};

use async_trait::async_trait;
use futures::channel::oneshot;
Expand Down Expand Up @@ -36,6 +41,10 @@ impl TryFrom<&Url> for Uring {
});
}

let path_is_blockdev = std::fs::metadata(url.path())
.ok()
.map_or(false, |meta| meta.file_type().is_block_device());

let mut parameters: HashMap<String, String> =
url.query_pairs().into_owned().collect();

Expand All @@ -47,7 +56,13 @@ impl TryFrom<&Url> for Uring {
value: value.clone(),
})?
}
None => 512,
None => {
if path_is_blockdev {
0
} else {
512
}
}
};

let uuid = uri::uuid(parameters.remove("uuid")).context(
Expand Down
147 changes: 124 additions & 23 deletions io-engine/tests/lvs_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,33 @@ use std::pin::Pin;

pub mod common;

static DISKNAME1: &str = "/tmp/disk1.img";
static DISKNAME2: &str = "/tmp/disk2.img";
static TESTDIR: &str = "/tmp/io-engine-tests";
static DISKNAME1: &str = "/tmp/io-engine-tests/disk1.img";
static DISKNAME2: &str = "/tmp/io-engine-tests/disk2.img";
static DISKNAME3: &str = "/tmp/io-engine-tests/disk3.img";

#[tokio::test]
async fn lvs_pool_test() {
common::delete_file(&[DISKNAME1.into(), DISKNAME2.into()]);
// Create directory for placing test disk files
// todo: Create this from some common place and use for all other tests too.
let _ = std::process::Command::new("mkdir")
.args(["-p"])
.args([TESTDIR])
.output()
.expect("failed to execute mkdir");

common::delete_file(&[
DISKNAME1.into(),
DISKNAME2.into(),
DISKNAME3.into(),
]);
common::truncate_file(DISKNAME1, 128 * 1024);
common::truncate_file(DISKNAME2, 128 * 1024);
common::truncate_file(DISKNAME3, 128 * 1024);

//setup disk3 via loop device using a sector size of 4096.
let ldev = common::setup_loopdev_file(DISKNAME3, Some(4096));

let args = MayastorCliArgs {
reactor_mask: "0x3".into(),
..Default::default()
Expand All @@ -32,15 +51,17 @@ async fn lvs_pool_test() {

// should fail to import a pool that does not exist on disk
ms.spawn(async {
assert!(Lvs::import("tpool", "aio:///tmp/disk1.img").await.is_err())
assert!(Lvs::import("tpool", format!("aio://{DISKNAME1}").as_str())
.await
.is_err())
})
.await;

// should succeed to create a pool we can not import
ms.spawn(async {
Lvs::create_or_import(PoolArgs {
name: "tpool".into(),
disks: vec!["aio:///tmp/disk1.img".into()],
disks: vec![format!("aio://{DISKNAME1}")],
uuid: None,
cluster_size: None,
backend: PoolBackend::Lvs,
Expand All @@ -55,16 +76,23 @@ async fn lvs_pool_test() {
// have an idempotent snafu, we dont crash and
// burn
ms.spawn(async {
assert!(Lvs::create("tpool", "aio:///tmp/disk1.img", None, None)
.await
.is_err())
assert!(Lvs::create(
"tpool",
format!("aio://{DISKNAME1}").as_str(),
None,
None
)
.await
.is_err())
})
.await;

// should fail to import the pool that is already imported
// similar to above, we use the import directly
ms.spawn(async {
assert!(Lvs::import("tpool", "aio:///tmp/disk1.img").await.is_err())
assert!(Lvs::import("tpool", format!("aio://{DISKNAME1}").as_str())
.await
.is_err())
})
.await;

Expand All @@ -75,7 +103,7 @@ async fn lvs_pool_test() {
assert_eq!(pool.name(), "tpool");
assert_eq!(pool.used(), 0);
dbg!(pool.uuid());
assert_eq!(pool.base_bdev().name(), "/tmp/disk1.img");
assert_eq!(pool.base_bdev().name(), DISKNAME1);
})
.await;

Expand All @@ -90,9 +118,13 @@ async fn lvs_pool_test() {
// import and export implicitly destroy the base_bdev, for
// testing import and create we
// sometimes create the base_bdev manually
bdev_create("aio:///tmp/disk1.img").await.unwrap();
bdev_create(format!("aio://{DISKNAME1}").as_str())
.await
.unwrap();

assert!(Lvs::import("tpool", "aio:///tmp/disk1.img").await.is_ok());
assert!(Lvs::import("tpool", format!("aio://{DISKNAME1}").as_str())
.await
.is_ok());

let pool = Lvs::lookup("tpool").unwrap();
assert_eq!(pool.uuid(), uuid);
Expand All @@ -107,13 +139,22 @@ async fn lvs_pool_test() {
let uuid = pool.uuid();
pool.destroy().await.unwrap();

bdev_create("aio:///tmp/disk1.img").await.unwrap();
assert!(Lvs::import("tpool", "aio:///tmp/disk1.img").await.is_err());
bdev_create(format!("aio://{DISKNAME1}").as_str())
.await
.unwrap();
assert!(Lvs::import("tpool", format!("aio://{DISKNAME1}").as_str())
.await
.is_err());

assert_eq!(Lvs::iter().count(), 0);
assert!(Lvs::create("tpool", "aio:///tmp/disk1.img", None, None)
.await
.is_ok());
assert!(Lvs::create(
"tpool",
format!("aio://{DISKNAME1}").as_str(),
None,
None
)
.await
.is_ok());

let pool = Lvs::lookup("tpool").unwrap();
assert_ne!(uuid, pool.uuid());
Expand Down Expand Up @@ -181,7 +222,7 @@ async fn lvs_pool_test() {
pool.export().await.unwrap();
let pool = Lvs::create_or_import(PoolArgs {
name: "tpool".to_string(),
disks: vec!["aio:///tmp/disk1.img".to_string()],
disks: vec![format!("aio://{DISKNAME1}")],
uuid: None,
cluster_size: None,
backend: PoolBackend::Lvs,
Expand Down Expand Up @@ -297,8 +338,12 @@ async fn lvs_pool_test() {
// import the pool all shares should be there, but also validate
// the share that not shared to be -- not shared
ms.spawn(async {
bdev_create("aio:///tmp/disk1.img").await.unwrap();
let pool = Lvs::import("tpool", "aio:///tmp/disk1.img").await.unwrap();
bdev_create(format!("aio://{DISKNAME1}").as_str())
.await
.unwrap();
let pool = Lvs::import("tpool", format!("aio://{DISKNAME1}").as_str())
.await
.unwrap();

for l in pool.lvols().unwrap() {
if l.name() == "notshared" {
Expand All @@ -321,7 +366,7 @@ async fn lvs_pool_test() {

let pool = Lvs::create_or_import(PoolArgs {
name: "tpool".into(),
disks: vec!["aio:///tmp/disk1.img".into()],
disks: vec![format!("aio://{DISKNAME1}")],
uuid: None,
cluster_size: None,
backend: PoolBackend::Lvs,
Expand All @@ -336,6 +381,60 @@ async fn lvs_pool_test() {
})
.await;

let pool_dev_aio = ldev.clone();
// should succeed to create an aio bdev pool on a loop blockdev of 4096
// bytes sector size.
ms.spawn(async move {
Lvs::create_or_import(PoolArgs {
name: "tpool_4k_aio".into(),
disks: vec![format!("aio://{pool_dev_aio}")],
uuid: None,
cluster_size: None,
backend: PoolBackend::Lvs,
})
.await
.unwrap();
})
.await;

// should be able to find our new LVS created on loopdev, and subsequently
// destroy it.
ms.spawn(async {
let pool = Lvs::lookup("tpool_4k_aio").unwrap();
assert_eq!(pool.name(), "tpool_4k_aio");
assert_eq!(pool.used(), 0);
dbg!(pool.uuid());
pool.destroy().await.unwrap();
})
.await;

let pool_dev_uring = ldev.clone();
// should succeed to create an uring pool on a loop blockdev of 4096 bytes
// sector size.
ms.spawn(async move {
Lvs::create_or_import(PoolArgs {
name: "tpool_4k_uring".into(),
disks: vec![format!("uring://{pool_dev_uring}")],
uuid: None,
cluster_size: None,
backend: PoolBackend::Lvs,
})
.await
.unwrap();
})
.await;

// should be able to find our new LVS created on loopdev, and subsequently
// destroy it.
ms.spawn(async {
let pool = Lvs::lookup("tpool_4k_uring").unwrap();
assert_eq!(pool.name(), "tpool_4k_uring");
assert_eq!(pool.used(), 0);
dbg!(pool.uuid());
pool.destroy().await.unwrap();
})
.await;

// validate the expected state of mayastor
ms.spawn(async {
// no shares left except for the discovery controller
Expand All @@ -352,7 +451,7 @@ async fn lvs_pool_test() {
// importing a pool with the wrong name should fail
Lvs::create_or_import(PoolArgs {
name: "jpool".into(),
disks: vec!["aio:///tmp/disk1.img".into()],
disks: vec![format!("aio://{DISKNAME1}")],
uuid: None,
cluster_size: None,
backend: PoolBackend::Lvs,
Expand All @@ -369,7 +468,7 @@ async fn lvs_pool_test() {
ms.spawn(async {
let pool = Lvs::create_or_import(PoolArgs {
name: "tpool2".into(),
disks: vec!["/tmp/disk2.img".into()],
disks: vec![format!("aio://{DISKNAME2}")],
uuid: None,
cluster_size: None,
backend: PoolBackend::Lvs,
Expand All @@ -381,4 +480,6 @@ async fn lvs_pool_test() {
.await;

common::delete_file(&[DISKNAME2.into()]);
common::detach_loopdev(ldev.as_str());
common::delete_file(&[DISKNAME3.into()]);
}
19 changes: 19 additions & 0 deletions scripts/clean-cargo-tests.sh
Original file line number Diff line number Diff line change
@@ -1,8 +1,27 @@
#!/bin/env bash

SCRIPT_DIR="$(dirname "$0")"
ROOT_DIR=$(realpath "$SCRIPT_DIR/..")

sudo nvme disconnect-all

# Detach any loop devices created for test purposes
for back_file in "/tmp/io-engine-tests"/*; do
# Find loop devices associated with the disk image
devices=$(losetup -j "$back_file" -O NAME --noheadings)

# Detach each loop device found
while IFS= read -r device; do
if [ -n "$device" ]; then
echo "Detaching loop device: $device"
losetup -d "$device"
fi
done <<< "$devices"
done
# Delete the directory too
rmdir --ignore-fail-on-non-empty "/tmp/io-engine-tests"


for c in $(docker ps -a --filter "label=io.composer.test.name" --format '{{.ID}}') ; do
docker kill "$c"
docker rm "$c"
Expand Down

0 comments on commit 5224752

Please sign in to comment.