From 66f6298d526df807d5416d10e7cc07616a8de35a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 3 Apr 2021 14:00:27 +0000 Subject: [PATCH 001/775] Initial code split from https://github.com/cgwalters/ostree-container The core tar import/export is independent of OCI bits. --- .gitignore | 7 + Cargo.toml | 49 +++ LICENSE-APACHE | 202 +++++++++++ LICENSE-MIT | 19 ++ src/.gitignore | 1 + src/import.rs | 2 + src/lib.rs | 18 + src/ostree_ext.rs | 43 +++ src/tar/export.rs | 312 +++++++++++++++++ src/tar/import.rs | 428 ++++++++++++++++++++++++ src/tar/mod.rs | 14 + src/tests/it/fixtures/exampleos.tar.zst | Bin 0 -> 1052 bytes src/tests/it/main.rs | 114 +++++++ src/variant_utils.rs | 38 +++ 14 files changed, 1247 insertions(+) create mode 100644 .gitignore create mode 100644 Cargo.toml create mode 100644 LICENSE-APACHE create mode 100644 LICENSE-MIT create mode 100644 src/.gitignore create mode 100644 src/import.rs create mode 100644 src/lib.rs create mode 100644 src/ostree_ext.rs create mode 100644 src/tar/export.rs create mode 100644 src/tar/import.rs create mode 100644 src/tar/mod.rs create mode 100644 src/tests/it/fixtures/exampleos.tar.zst create mode 100644 src/tests/it/main.rs create mode 100644 src/variant_utils.rs diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..b59902fdd --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +example + + +# Added by cargo + +/target +Cargo.lock diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 000000000..01dd8a12c --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,49 @@ +[package] +authors = ["Colin Walters "] +edition = "2018" +license = "MIT OR Apache-2.0" +name = "ostree-ext" +readme = "README.md" +repository = "https://github.com/cgwalters/ostree-ext" +version = "0.1.0" + +[dependencies] +anyhow = "1.0" +camino = "1.0.4" +cjson = "0.1.1" +crossbeam = "0.8.0" +flate2 = "1.0.20" +fn-error-context = "0.1.1" +futures = "0.3.13" +gio = "0.9.1" +glib = "0.10.3" +glib-sys = "0.10.1" +gvariant = "0.4.0" +hex = "0.4.3" +libc = "0.2.92" +nix = "0.20.0" +openat = "0.1.20" +openat-ext = "0.1.13" +openssl = "0.10.33" +os_pipe = "*" +ostree-sys = "0.7.2" +serde = "1.0.125" +serde_json = "1.0.64" +tar = "0.4.33" + +[dependencies.ostree] +features = ["v2021_1"] +version = "0.10.0" + +[dependencies.phf] +features = ["macros"] +version = "0.8.0" + +[dev-dependencies] +clap = "2.33.3" +indoc = "1.0.3" +sh-inline = "0.1.0" + +[dev-dependencies.tokio] +features = ["full"] +version = "1" diff --git a/LICENSE-APACHE b/LICENSE-APACHE new file mode 100644 index 000000000..8f71f43fe --- /dev/null +++ b/LICENSE-APACHE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/LICENSE-MIT b/LICENSE-MIT new file mode 100644 index 000000000..dbd7f6572 --- /dev/null +++ b/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright (c) 2016 The openat Developers + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/.gitignore b/src/.gitignore new file mode 100644 index 000000000..eb5a316cb --- /dev/null +++ b/src/.gitignore @@ -0,0 +1 @@ +target diff --git a/src/import.rs b/src/import.rs new file mode 100644 index 000000000..e63f3c0ee --- /dev/null +++ b/src/import.rs @@ -0,0 +1,2 @@ +use super::Result; + diff --git a/src/lib.rs b/src/lib.rs new file mode 100644 index 000000000..937e1518c --- /dev/null +++ b/src/lib.rs @@ -0,0 +1,18 @@ +//! # Extension APIs for ostree +//! +//! This crate builds on top of the core ostree C library +//! and the Rust bindings to it, adding new functionality +//! written in Rust. + +//#![deny(missing_docs)] +// Good defaults +#![forbid(unused_must_use)] +#![deny(unsafe_code)] + +/// Our generic catchall fatal error, expected to be converted +/// to a string to output to a terminal or logs. +type Result = anyhow::Result; + +pub mod tar; +pub mod bindingext; +pub mod variant_utils; diff --git a/src/ostree_ext.rs b/src/ostree_ext.rs new file mode 100644 index 000000000..2fbe0efcb --- /dev/null +++ b/src/ostree_ext.rs @@ -0,0 +1,43 @@ +//! Extension traits fixing incorrectly bound things in ostree-rs +//! by defining a new function with an `x_` prefix. + +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use glib::translate::*; +use std::ptr; + +/// Extension functions which fix incorrectly bound APIs. +pub trait RepoExt { + fn x_load_variant_if_exists( + &self, + objtype: ostree::ObjectType, + checksum: &str, + ) -> Result, glib::Error>; +} + +impl RepoExt for ostree::Repo { + #[allow(unsafe_code)] + fn x_load_variant_if_exists( + &self, + objtype: ostree::ObjectType, + checksum: &str, + ) -> Result, glib::Error> { + unsafe { + let mut out_v = ptr::null_mut(); + let mut error = ptr::null_mut(); + let checksum = checksum.to_glib_none(); + let _ = ostree_sys::ostree_repo_load_variant_if_exists( + self.to_glib_none().0, + objtype.to_glib(), + checksum.0, + &mut out_v, + &mut error, + ); + if error.is_null() { + Ok(from_glib_full(out_v)) + } else { + Err(from_glib_full(error)) + } + } + } +} diff --git a/src/tar/export.rs b/src/tar/export.rs new file mode 100644 index 000000000..0c4476963 --- /dev/null +++ b/src/tar/export.rs @@ -0,0 +1,312 @@ +//! APIs for creating container images from OSTree commits + +use super::Result; + +use crate::oci; +use crate::ostree_ext::*; +use anyhow::Context; +use camino::{Utf8Path, Utf8PathBuf}; +use fn_error_context::context; +use gio::prelude::*; +use gvariant::aligned_bytes::TryAsAligned; +use gvariant::{gv, Marker, Structure}; + +use std::{borrow::Cow, collections::HashSet, path::Path}; + +// This way the default ostree -> sysroot/ostree symlink works. +const OSTREEDIR: &str = "sysroot/ostree"; + +/// The location to store the generated image +pub enum Target<'a> { + /// Generate an Open Containers image directory layout + OciDir(&'a Path), +} + +/// Convert /usr/etc back to /etc +fn map_path(p: &Utf8Path) -> std::borrow::Cow { + match p.strip_prefix("./usr/etc") { + Ok(r) => Cow::Owned(Utf8Path::new("./etc").join(r)), + _ => Cow::Borrowed(p), + } +} + +struct OstreeMetadataWriter<'a, W: std::io::Write> { + repo: &'a ostree::Repo, + out: &'a mut tar::Builder, + wrote_dirtree: HashSet, + wrote_dirmeta: HashSet, + wrote_content: HashSet, + wrote_xattrs: HashSet, +} + +fn object_path(objtype: ostree::ObjectType, checksum: &str) -> Utf8PathBuf { + let suffix = match objtype { + ostree::ObjectType::Commit => "commit", + ostree::ObjectType::CommitMeta => "commitmeta", + ostree::ObjectType::DirTree => "dirtree", + ostree::ObjectType::DirMeta => "dirmeta", + ostree::ObjectType::File => "file", + o => panic!("Unexpected object type: {:?}", o), + }; + let (first, rest) = checksum.split_at(2); + format!("{}/repo/objects/{}/{}.{}", OSTREEDIR, first, rest, suffix).into() +} + +fn xattrs_path(checksum: &str) -> Utf8PathBuf { + format!("{}/repo/xattrs/{}", OSTREEDIR, checksum).into() +} + +impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { + fn append( + &mut self, + objtype: ostree::ObjectType, + checksum: &str, + v: &glib::Variant, + ) -> Result<()> { + let set = match objtype { + ostree::ObjectType::Commit => None, + ostree::ObjectType::DirTree => Some(&mut self.wrote_dirtree), + ostree::ObjectType::DirMeta => Some(&mut self.wrote_dirmeta), + o => panic!("Unexpected object type: {:?}", o), + }; + if let Some(set) = set { + if set.contains(checksum) { + return Ok(()); + } + let inserted = set.insert(checksum.to_string()); + debug_assert!(inserted); + } + + let mut h = tar::Header::new_gnu(); + h.set_uid(0); + h.set_gid(0); + h.set_mode(0o644); + let data = v.get_data_as_bytes(); + let data = data.as_ref(); + h.set_size(data.len() as u64); + self.out + .append_data(&mut h, &object_path(objtype, checksum), data)?; + Ok(()) + } + + fn append_xattrs( + &mut self, + xattrs: &glib::Variant, + ) -> Result> { + let xattrs_data = xattrs.get_data_as_bytes(); + let xattrs_data = xattrs_data.as_ref(); + if xattrs_data.is_empty() { + return Ok(None); + } + + let mut h = tar::Header::new_gnu(); + h.set_mode(0o644); + h.set_size(0); + let digest = openssl::hash::hash(openssl::hash::MessageDigest::sha256(), xattrs_data)?; + let mut hexbuf = [0u8; 64]; + hex::encode_to_slice(digest, &mut hexbuf)?; + let checksum = std::str::from_utf8(&hexbuf)?; + let path = xattrs_path(checksum); + + if !self.wrote_xattrs.contains(checksum) { + let inserted = self.wrote_xattrs.insert(checksum.to_string()); + debug_assert!(inserted); + let mut target_header = h.clone(); + target_header.set_size(xattrs_data.len() as u64); + self.out + .append_data(&mut target_header, &path, xattrs_data)?; + } + Ok(Some((path, h))) + } + + /// Write a content object, returning the path/header that should be used + /// as a hard link to it in the target path. This matches how ostree checkouts work. + fn append_content(&mut self, checksum: &str) -> Result<(Utf8PathBuf, tar::Header)> { + let path = object_path(ostree::ObjectType::File, checksum); + + let (instream, meta, xattrs) = self.repo.load_file(checksum, gio::NONE_CANCELLABLE)?; + let meta = meta.unwrap(); + let xattrs = xattrs.unwrap(); + + let mut h = tar::Header::new_gnu(); + h.set_uid(meta.get_attribute_uint32("unix::uid") as u64); + h.set_gid(meta.get_attribute_uint32("unix::gid") as u64); + let mode = meta.get_attribute_uint32("unix::mode"); + h.set_mode(mode); + let mut target_header = h.clone(); + target_header.set_size(0); + + if !self.wrote_content.contains(checksum) { + let inserted = self.wrote_content.insert(checksum.to_string()); + debug_assert!(inserted); + + if let Some((xattrspath, mut xattrsheader)) = self.append_xattrs(&xattrs)? { + xattrsheader.set_entry_type(tar::EntryType::Link); + xattrsheader.set_link_name(xattrspath)?; + let subpath = format!("{}.xattrs", path); + self.out + .append_data(&mut xattrsheader, subpath, &mut std::io::empty())?; + } + + if let Some(instream) = instream { + h.set_entry_type(tar::EntryType::Regular); + h.set_size(meta.get_size() as u64); + let mut instream = instream.into_read(); + self.out.append_data(&mut h, &path, &mut instream)?; + } else { + h.set_size(0); + h.set_entry_type(tar::EntryType::Symlink); + h.set_link_name(meta.get_symlink_target().unwrap().as_str())?; + self.out.append_data(&mut h, &path, &mut std::io::empty())?; + } + } + + Ok((path, target_header)) + } + + /// Write a dirtree object. + fn append_dirtree>( + &mut self, + dirpath: &Utf8Path, + repo: &ostree::Repo, + checksum: &str, + cancellable: Option<&C>, + ) -> Result<()> { + let v = &repo.load_variant(ostree::ObjectType::DirTree, checksum)?; + self.append(ostree::ObjectType::DirTree, checksum, v)?; + let v = v.get_data_as_bytes(); + let v = v.try_as_aligned()?; + let v = gv!("(a(say)a(sayay))").cast(v); + let (files, dirs) = v.to_tuple(); + + if let Some(c) = cancellable { + c.set_error_if_cancelled()?; + } + + // A reusable buffer to avoid heap allocating these + let mut hexbuf = [0u8; 64]; + + for file in files { + let (name, csum) = file.to_tuple(); + let name = name.to_str(); + hex::encode_to_slice(csum, &mut hexbuf)?; + let checksum = std::str::from_utf8(&hexbuf)?; + let (objpath, mut h) = self.append_content(checksum)?; + h.set_entry_type(tar::EntryType::Link); + h.set_link_name(&objpath)?; + let subpath = &dirpath.join(name); + let subpath = map_path(subpath); + self.out + .append_data(&mut h, &*subpath, &mut std::io::empty())?; + } + + for item in dirs { + let (name, contents_csum, meta_csum) = item.to_tuple(); + let name = name.to_str(); + { + hex::encode_to_slice(meta_csum, &mut hexbuf)?; + let meta_csum = std::str::from_utf8(&hexbuf)?; + let meta_v = &repo.load_variant(ostree::ObjectType::DirMeta, meta_csum)?; + self.append(ostree::ObjectType::DirMeta, meta_csum, meta_v)?; + } + hex::encode_to_slice(contents_csum, &mut hexbuf)?; + let dirtree_csum = std::str::from_utf8(&hexbuf)?; + let subpath = &dirpath.join(name); + let subpath = map_path(subpath); + self.append_dirtree(&*subpath, repo, dirtree_csum, cancellable)?; + } + + Ok(()) + } +} + +/// Recursively walk an OSTree commit and generate data into a `[tar::Builder]` +/// which contains all of the metadata objects, as well as a hardlinked +/// stream that looks like a checkout. Extended attributes are stored specially out +/// of band of tar so that they can be reliably retrieved. +fn impl_export( + repo: &ostree::Repo, + commit_checksum: &str, + out: &mut tar::Builder, +) -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; + // Pre create the object directories + for d in 0..0xFF { + let mut h = tar::Header::new_gnu(); + h.set_entry_type(tar::EntryType::Directory); + h.set_uid(0); + h.set_gid(0); + h.set_mode(0o755); + h.set_size(0); + let path = format!("{}/repo/objects/{:#04x}", OSTREEDIR, d); + out.append_data(&mut h, &path, &mut std::io::empty())?; + } + + // Write out the xattrs directory + { + let mut h = tar::Header::new_gnu(); + h.set_entry_type(tar::EntryType::Directory); + h.set_mode(0o755); + h.set_size(0); + let path = format!("{}/repo/xattrs", OSTREEDIR); + out.append_data(&mut h, &path, &mut std::io::empty())?; + } + + let writer = &mut OstreeMetadataWriter { + repo, + out, + wrote_dirmeta: HashSet::new(), + wrote_dirtree: HashSet::new(), + wrote_content: HashSet::new(), + wrote_xattrs: HashSet::new(), + }; + let (commit_v, _) = repo.load_commit(commit_checksum)?; + let commit_v = &commit_v; + writer.append(ostree::ObjectType::Commit, commit_checksum, commit_v)?; + + if let Some(commitmeta) = + repo.x_load_variant_if_exists(ostree::ObjectType::CommitMeta, commit_checksum)? + { + writer.append(ostree::ObjectType::CommitMeta, commit_checksum, &commitmeta)?; + } + + let commit_v = commit_v.get_data_as_bytes(); + let commit_v = commit_v.try_as_aligned()?; + let commit = gv!("(a{sv}aya(say)sstayay)").cast(commit_v); + let commit = commit.to_tuple(); + let contents = &hex::encode(commit.6); + let metadata_checksum = &hex::encode(commit.7); + let metadata_v = &repo.load_variant(ostree::ObjectType::DirMeta, metadata_checksum)?; + writer.append(ostree::ObjectType::DirMeta, metadata_checksum, metadata_v)?; + + writer.append_dirtree(Utf8Path::new("./"), repo, contents, cancellable)?; + Ok(()) +} + +/// Export an ostree commit to an (uncompressed) tar archive stream. +#[context("Exporting commit")] +fn export_commit( + repo: &ostree::Repo, + rev: &str, + out: impl std::io::Write, +) -> Result<()> { + let commit = repo.resolve_rev(rev, false)?; + let mut tar = tar::Builder::new(out); + impl_export(repo, commit.unwrap().as_str(), &mut tar)?; + tar.finish()?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_map_path() { + assert_eq!(map_path("/".into()), Utf8Path::new("/")); + assert_eq!( + map_path("./usr/etc/blah".into()), + Utf8Path::new("./etc/blah") + ); + } +} diff --git a/src/tar/import.rs b/src/tar/import.rs new file mode 100644 index 000000000..146b4b2f4 --- /dev/null +++ b/src/tar/import.rs @@ -0,0 +1,428 @@ +//! APIs for extracting OSTree commits from container images + +use super::Result; +use crate::variant_utils::variant_new_from_bytes; +use anyhow::anyhow; +use camino::Utf8Path; +use fn_error_context::context; +use std::collections::HashMap; + +/// Arbitrary limit on xattrs to avoid RAM exhaustion attacks. The actual filesystem limits are often much smaller. +/// See https://en.wikipedia.org/wiki/Extended_file_attributes +/// For example, XFS limits to 614 KiB. +const MAX_XATTR_SIZE: u32 = 1024 * 1024; +/// Limit on metadata objects (dirtree/dirmeta); this is copied +/// from ostree-core.h. TODO: Bind this in introspection +const MAX_METADATA_SIZE: u32 = 10 * 1024 * 1024; + +// Variant formats, see ostree-core.h +// TODO - expose these via introspection +const OSTREE_COMMIT_FORMAT: &str = "(a{sv}aya(say)sstayay)"; +const OSTREE_DIRTREE_FORMAT: &str = "(a(say)a(sayay))"; +const OSTREE_DIRMETA_FORMAT: &str = "(uuua(ayay))"; +const OSTREE_XATTRS_FORMAT: &str = "a(ayay)"; + + +/// State tracker for the importer. The main goal is to reject multiple +/// commit objects, as well as finding metadata/content before the commit. +#[derive(Debug, PartialEq, Eq)] +enum ImportState { + Initial, + Importing(String), +} + +/// Importer machine. +struct Importer<'a> { + state: ImportState, + repo: &'a ostree::Repo, + xattrs: HashMap, + next_xattrs: Option<(String, String)>, +} + +impl<'a> Drop for Importer<'a> { + fn drop(&mut self) { + let _ = self.repo.abort_transaction(gio::NONE_CANCELLABLE); + } +} + +/// Validate size/type of a tar header for OSTree metadata object. +fn validate_metadata_header(header: &tar::Header, desc: &str) -> Result { + if header.entry_type() != tar::EntryType::Regular { + return Err(anyhow!("Invalid non-regular metadata object {}", desc)); + } + let size = header.size()?; + let max_size = MAX_METADATA_SIZE as u64; + if size > max_size { + return Err(anyhow!( + "object of size {} exceeds {} bytes", + size, + max_size + )); + } + Ok(size as usize) +} + +/// Convert a tar header to a gio::FileInfo. This only maps +/// attributes that matter to ostree. +fn header_to_gfileinfo(header: &tar::Header) -> Result { + let i = gio::FileInfo::new(); + let t = match header.entry_type() { + tar::EntryType::Regular => gio::FileType::Regular, + tar::EntryType::Symlink => gio::FileType::SymbolicLink, + o => return Err(anyhow!("Invalid tar type: {:?}", o)), + }; + i.set_file_type(t); + i.set_size(0); + let uid = header.uid()? as u32; + let gid = header.gid()? as u32; + let mode = header.mode()?; + i.set_attribute_uint32("unix::uid", uid); + i.set_attribute_uint32("unix::gid", gid); + i.set_attribute_uint32("unix::mode", mode); + if t == gio::FileType::Regular { + i.set_size(header.size()? as i64) + } else { + i.set_attribute_boolean("standard::is-symlink", true); + let target = header.link_name()?; + let target = target.ok_or_else(|| anyhow!("Invalid symlink"))?; + let target = target + .as_os_str() + .to_str() + .ok_or_else(|| anyhow!("Non-utf8 symlink"))?; + i.set_symlink_target(target); + } + + Ok(i) +} + +fn format_for_objtype(t: ostree::ObjectType) -> Option<&'static str> { + match t { + ostree::ObjectType::DirTree => Some(OSTREE_DIRTREE_FORMAT), + ostree::ObjectType::DirMeta => Some(OSTREE_DIRMETA_FORMAT), + ostree::ObjectType::Commit => Some(OSTREE_COMMIT_FORMAT), + _ => None, + } +} + +/// The C function ostree_object_type_from_string aborts on +/// unknown strings, so we have a safe version here. +fn objtype_from_string(t: &str) -> Option { + Some(match t { + "commit" => ostree::ObjectType::Commit, + "dirtree" => ostree::ObjectType::DirTree, + "dirmeta" => ostree::ObjectType::DirMeta, + "file" => ostree::ObjectType::File, + _ => return None, + }) +} + +/// Given a tar entry, read it all into a GVariant +fn entry_to_variant( + mut entry: tar::Entry, + vtype: &str, + desc: &str, +) -> Result { + let header = entry.header(); + let size = validate_metadata_header(header, desc)?; + + let mut buf: Vec = Vec::with_capacity(size); + let n = std::io::copy(&mut entry, &mut buf)?; + assert_eq!(n as usize, size); + let v = glib::Bytes::from_owned(buf); + Ok(crate::variant_utils::variant_normal_from_bytes(vtype, v)) +} + +impl<'a> Importer<'a> { + /// Import a commit object. Must be in "initial" state. This transitions into the "importing" state. + fn import_commit( + &mut self, + entry: tar::Entry, + checksum: &str, + ) -> Result<()> { + assert_eq!(self.state, ImportState::Initial); + self.import_metadata(entry, checksum, ostree::ObjectType::Commit)?; + self.state = ImportState::Importing(checksum.to_string()); + Ok(()) + } + + /// Import a metadata object. + fn import_metadata( + &mut self, + entry: tar::Entry, + checksum: &str, + objtype: ostree::ObjectType, + ) -> Result<()> { + let vtype = + format_for_objtype(objtype).ok_or_else(|| anyhow!("Unhandled objtype {}", objtype))?; + let v = entry_to_variant(entry, vtype, checksum)?; + // FIXME insert expected dirtree/dirmeta + let _ = self + .repo + .write_metadata(objtype, Some(checksum), &v, gio::NONE_CANCELLABLE)?; + Ok(()) + } + + /// Import a content object. + #[context("Processing content object {}", checksum)] + fn import_content_object( + &self, + mut entry: tar::Entry, + checksum: &str, + xattrs: Option<&glib::Variant>, + ) -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; + let (recv, mut send) = os_pipe::pipe()?; + let size = entry.header().size()?; + let header_copy = entry.header().clone(); + let repo_clone = self.repo.clone(); + crossbeam::thread::scope(move |s| -> Result<()> { + let j = s.spawn(move |_| -> Result<()> { + let i = header_to_gfileinfo(&header_copy)?; + let recv = gio::ReadInputStream::new(recv); + let (ostream, size) = + ostree::raw_file_to_content_stream(&recv, &i, xattrs, cancellable)?; + repo_clone.write_content(Some(checksum), &ostream, size, cancellable)?; + Ok(()) + }); + let n = std::io::copy(&mut entry, &mut send)?; + drop(send); + assert_eq!(n, size); + j.join().unwrap()?; + Ok(()) + }) + .unwrap()?; + + Ok(()) + } + + /// Given a tar entry that looks like an object (its path is under ostree/repo/objects/), + /// determine its type and import it. + #[context("Importing object {}", path)] + fn import_object<'b, R: std::io::Read>( + &mut self, + entry: tar::Entry<'b, R>, + path: &Utf8Path, + ) -> Result<()> { + let parentname = path + .parent() + .map(|p| p.file_name()) + .flatten() + .ok_or_else(|| anyhow!("Invalid path (no parent) {}", path))?; + if parentname.len() != 2 { + return Err(anyhow!("Invalid checksum parent {}", parentname)); + } + let mut name = path + .file_name() + .map(Utf8Path::new) + .ok_or_else(|| anyhow!("Invalid path (dir) {}", path))?; + let mut objtype = name + .extension() + .ok_or_else(|| anyhow!("Invalid objpath {}", path))?; + let is_xattrs = objtype == "xattrs"; + let xattrs = self.next_xattrs.take(); + if is_xattrs { + if xattrs.is_some() { + return Err(anyhow!("Found multiple xattrs")); + } + name = name + .file_stem() + .map(Utf8Path::new) + .ok_or_else(|| anyhow!("Invalid xattrs {}", path))?; + objtype = name + .extension() + .ok_or_else(|| anyhow!("Invalid objpath {}", path))?; + } + let checksum_rest = name + .file_stem() + .ok_or_else(|| anyhow!("Invalid objpath {}", path))?; + + if checksum_rest.len() != 62 { + return Err(anyhow!("Invalid checksum rest {}", name)); + } + let checksum = format!("{}{}", parentname, checksum_rest); + validate_sha256(&checksum)?; + let xattr_ref = if let Some((xattr_target, xattr_objref)) = xattrs { + if xattr_target.as_str() != checksum.as_str() { + return Err(anyhow!( + "Found object {} but previous xattr was {}", + checksum, + xattr_target + )); + } + let v = self + .xattrs + .get(&xattr_objref) + .ok_or_else(|| anyhow!("Failed to find xattr {}", xattr_objref))?; + Some(v) + } else { + None + }; + let objtype = objtype_from_string(&objtype) + .ok_or_else(|| anyhow!("Invalid object type {}", objtype))?; + match (objtype, is_xattrs, &self.state) { + (ostree::ObjectType::Commit, _, ImportState::Initial) => { + self.import_commit(entry, &checksum) + } + (ostree::ObjectType::File, true, ImportState::Importing(_)) => { + self.import_xattr_ref(entry, checksum) + } + (ostree::ObjectType::File, false, ImportState::Importing(_)) => { + self.import_content_object(entry, &checksum, xattr_ref) + } + (objtype, false, ImportState::Importing(_)) => { + self.import_metadata(entry, &checksum, objtype) + } + (o, _, ImportState::Initial) => { + return Err(anyhow!("Found content object {} before commit", o)) + } + (ostree::ObjectType::Commit, _, ImportState::Importing(c)) => { + return Err(anyhow!("Found multiple commit objects; original: {}", c)) + } + (objtype, true, _) => { + return Err(anyhow!("Found xattrs for non-file object type {}", objtype)) + } + } + } + + /// Handle .xattr hardlinks that contain extended attributes for + /// a content object. + #[context("Processing xattr ref")] + fn import_xattr_ref<'b, R: std::io::Read>( + &mut self, + entry: tar::Entry<'b, R>, + target: String, + ) -> Result<()> { + assert!(self.next_xattrs.is_none()); + let header = entry.header(); + if header.entry_type() != tar::EntryType::Link { + return Err(anyhow!("Non-hardlink xattr reference found for {}", target)); + } + let xattr_target = entry + .link_name()? + .ok_or_else(|| anyhow!("No xattr link content for {}", target))?; + let xattr_target = Utf8Path::from_path(&*xattr_target) + .ok_or_else(|| anyhow!("Invalid non-UTF8 xattr link {}", target))?; + let xattr_target = xattr_target + .file_name() + .ok_or_else(|| anyhow!("Invalid xattr link {}", target))?; + validate_sha256(xattr_target)?; + self.next_xattrs = Some((target, xattr_target.to_string())); + Ok(()) + } + + /// Process a special /xattrs/ entry (sha256 of xattr values). + fn import_xattrs<'b, R: std::io::Read>(&mut self, mut entry: tar::Entry<'b, R>) -> Result<()> { + match &self.state { + ImportState::Initial => return Err(anyhow!("Found xattr object {} before commit")), + ImportState::Importing(_) => {} + } + let checksum = { + let path = entry.path()?; + let name = path + .file_name() + .ok_or_else(|| anyhow!("Invalid xattr dir: {:?}", path))?; + let name = name + .to_str() + .ok_or_else(|| anyhow!("Invalid non-UTF8 xattr name: {:?}", name))?; + validate_sha256(name)?; + name.to_string() + }; + let header = entry.header(); + if header.entry_type() != tar::EntryType::Regular { + return Err(anyhow!( + "Invalid xattr entry of type {:?}", + header.entry_type() + )); + } + let n = header.size()?; + if n > MAX_XATTR_SIZE as u64 { + return Err(anyhow!("Invalid xattr size {}", n)); + } + + let mut contents = Vec::with_capacity(n as usize); + let c = std::io::copy(&mut entry, &mut contents)?; + assert_eq!(c, n); + let contents: glib::Bytes = contents.as_slice().into(); + let contents = variant_new_from_bytes(OSTREE_XATTRS_FORMAT, contents, false); + + self.xattrs.insert(checksum, contents); + Ok(()) + } + + /// Consume this importer and return the imported OSTree commit checksum. + fn commit(mut self) -> Result { + self.repo.commit_transaction(gio::NONE_CANCELLABLE)?; + match std::mem::replace(&mut self.state, ImportState::Initial) { + ImportState::Importing(c) => Ok(c), + ImportState::Initial => Err(anyhow!("Failed to find a commit object to import")), + } + } +} + +fn validate_sha256(s: &str) -> Result<()> { + if s.len() != 64 { + return Err(anyhow!("Invalid sha256 checksum (len) {}", s)); + } + if !s.chars().all(|c| matches!(c, '0'..='9' | 'a'..='f')) { + return Err(anyhow!("Invalid sha256 checksum {}", s)); + } + Ok(()) +} + +/// Read the contents of a tarball and import the ostree commit inside. The sha56 of the imported commit will be returned. +#[context("Importing")] +pub fn import_tar(repo: &ostree::Repo, src: impl std::io::Read) -> Result { + let mut importer = Importer { + state: ImportState::Initial, + repo, + xattrs: Default::default(), + next_xattrs: None, + }; + repo.prepare_transaction(gio::NONE_CANCELLABLE)?; + let mut archive = tar::Archive::new(src); + for entry in archive.entries()? { + let entry = entry?; + if entry.header().entry_type() == tar::EntryType::Directory { + continue; + } + let path = entry.path()?; + let path = &*path; + let path = + Utf8Path::from_path(path).ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", path))?; + let path = if let Ok(p) = path.strip_prefix("sysroot/ostree/repo/") { + p + } else { + continue; + }; + + if let Ok(p) = path.strip_prefix("objects/") { + // Need to clone here, otherwise we borrow from the moved entry + let p = &p.to_owned(); + importer.import_object(entry, p)?; + } else if let Ok(_) = path.strip_prefix("xattrs/") { + importer.import_xattrs(entry)?; + } + } + + importer.commit() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_validate_sha256() -> Result<()> { + validate_sha256("a86d80a3e9ff77c2e3144c787b7769b300f91ffd770221aac27bab854960b964")?; + assert!(validate_sha256("").is_err()); + assert!(validate_sha256( + "a86d80a3e9ff77c2e3144c787b7769b300f91ffd770221aac27bab854960b9644" + ) + .is_err()); + assert!(validate_sha256( + "a86d80a3E9ff77c2e3144c787b7769b300f91ffd770221aac27bab854960b964" + ) + .is_err()); + Ok(()) + } +} diff --git a/src/tar/mod.rs b/src/tar/mod.rs new file mode 100644 index 000000000..241c6a920 --- /dev/null +++ b/src/tar/mod.rs @@ -0,0 +1,14 @@ +//! # Losslessly export and import ostree commits as tar archives +//! +//! Convert an ostree commit into a tarball stream, and import +//! it again. + +//#![deny(missing_docs)] +// Good defaults +#![forbid(unused_must_use)] +#![deny(unsafe_code)] + +mod import; +pub use import::*; +mod export; +pub use export::*; diff --git a/src/tests/it/fixtures/exampleos.tar.zst b/src/tests/it/fixtures/exampleos.tar.zst new file mode 100644 index 0000000000000000000000000000000000000000..8e8969d838ae96e1575fc2c9d3773325bed2bfc0 GIT binary patch literal 1052 zcmV+%1mpWCwJ-eySbZP>X6<$%Kx@WBVHyI{ESN~3A!6F1JjwxTPP9-SWiU{1 z@FFyCaXb;H%)0PPv3`-`i9lptK2?mD9`^AGD2)a+4u*en5a@{-j>N~ir$y$x%j=~> z{C`#c|D+OYQPLWe>ZevQ(T%*UHB}-*U54z+iXH2p z-c!FHWr+@&M$(q$O(bnO{x%+;tv#>8UYLsa^yE(3;ylsu=sZzEDUF7qaFi!HZpILi zthj95Qi$xS%y`Yt6G2Vm?2+*P@Am&KStt#PVp$Z%gN$If_;@8#dSBgU>{pDL_r&*P zJV?T{B+pDsb`x25H#3u2ZL@E8GcDQ1>#xOUr+6{;@rgB|(bR*`KpdzsZBZCUS@@AS z`)}MkhI*Ln>plcvfoaQugD~(&%iX=SdDqf@U$3iN+}}10g0?a( zQN&)UKa~s5{4=I{vO~;!LhRHu2#g0wzD$7YsL`2YMnptLnt~KEi~-;xkqJbmI0q6S zK^VqB6vQA51#uL{AOsYH2#qlULWqD60V4!KgB+g&{jc#k`)L}~`C$W6^aFS)Nbori zz|rO!8if*IIW87(@dNlE*#MCA0Af;zNZqht_5m|YqaMK@OEy3x3^1B02Yfw1@M|`p z&C}QP>%uo_cmQW1K%`_q$!sjOg0*E17$E~f0et`vF05w>%mXBQUE&140M`M#2PViDfCfCC(9fbzSs;1__3wE`za%d%_$TL#=jb_1$Nv2Oqk1qaWx0U>&T(*z!? z4hSQe6=g7tSrCEZTs3P(EIAYwLKTp@e??D9*5WyvZBkZIbOghj3 zd_UmwaAUOP1(TlVrc&+?pqvr>f+V|54q(zgSIMRa1ZSNi@kT7vU_jw@syNp_@M-@6 z5g8CV8XG`YceokAxz=566Hu$3TIDZTdp^#P}V57MmL!w-teIS_2281>MLIWb5`9N52LlD#&5MPgy z0Y`Bb0%H+q=Fc&a Result { + let cancellable = gio::NONE_CANCELLABLE; + let path = Utf8Path::new(dir); + let tarpath = &path.join("exampleos.tar.zst"); + std::fs::write(tarpath, EXAMPLEOS_TAR)?; + bash!( + indoc! {" + cd {path} + ostree --repo=repo-archive init --mode=archive + ostree --repo=repo-archive commit -b {testref} --tree=tar=exampleos.tar.zst + ostree --repo=repo-archive show {testref} + ostree --repo=repo-archive ls -R -X -C {testref} + "}, + testref = TESTREF, + path = path.as_str() + )?; + std::fs::remove_file(tarpath)?; + let repopath = &path.join("repo-archive"); + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repopath.as_str(), cancellable)?; + let (_, rev) = repo.read_commit(TESTREF, cancellable)?; + let (commitv, _) = repo.load_commit(rev.as_str())?; + assert_eq!( + ostree::commit_get_content_checksum(&commitv) + .unwrap() + .as_str(), + CONTENT_CHECKSUM + ); + let ocipath = path.join("exampleos-oci"); + let ocitarget = ostree_container::buildoci::Target::OciDir(ocipath.as_ref()); + ostree_container::buildoci::build(repo, TESTREF, ocitarget)?; + bash!(r"skopeo inspect oci:{ocipath}", ocipath = ocipath.as_str())?; + Ok(ocipath) +} + +fn read_blob(ocidir: &Utf8Path, digest: &str) -> Result> { + let digest = digest + .strip_prefix("sha256:") + .ok_or_else(|| anyhow!("Unknown algorithim in digest {}", digest))?; + let f = File::open(ocidir.join("blobs/sha256").join(digest)) + .with_context(|| format!("Opening blob {}", digest))?; + Ok(std::io::BufReader::new(f)) +} + +#[context("Parsing OCI")] +fn find_layer_in_oci(ocidir: &Utf8Path) -> Result> { + let f = std::io::BufReader::new( + File::open(ocidir.join("index.json")).context("Opening index.json")?, + ); + let index: myoci::Index = serde_json::from_reader(f)?; + let manifest = index + .manifests + .get(0) + .ok_or_else(|| anyhow!("Missing manifest in index.json"))?; + let f = read_blob(ocidir, &manifest.digest)?; + let manifest: myoci::Manifest = serde_json::from_reader(f)?; + let layer = manifest + .layers + .iter() + .find(|layer| { + matches!( + layer.media_type.as_str(), + myoci::DOCKER_TYPE_LAYER | oci_distribution::manifest::IMAGE_LAYER_GZIP_MEDIA_TYPE + ) + }) + .ok_or_else(|| anyhow!("Failed to find rootfs layer"))?; + Ok(read_blob(ocidir, &layer.digest)?) +} + +#[test] +fn test_e2e() -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; + + let tempdir = tempfile::tempdir()?; + let path = Utf8Path::from_path(tempdir.path()).unwrap(); + let srcdir = &path.join("src"); + std::fs::create_dir(srcdir)?; + let ocidir = &generate_test_oci(srcdir)?; + let destdir = &path.join("dest"); + std::fs::create_dir(destdir)?; + let destrepodir = &destdir.join("repo"); + let destrepo = ostree::Repo::new_for_path(destrepodir); + destrepo.create(ostree::RepoMode::Archive, cancellable)?; + + let tarf = find_layer_in_oci(ocidir)?; + let imported_commit = ostree_container::client::import_tarball(&destrepo, tarf)?; + let (commitdata, _) = destrepo.load_commit(&imported_commit)?; + assert_eq!( + CONTENT_CHECKSUM, + ostree::commit_get_content_checksum(&commitdata) + .unwrap() + .as_str() + ); + bash!( + "ostree --repo={destrepodir} ls -R {imported_commit}", + destrepodir = destrepodir.as_str(), + imported_commit = imported_commit.as_str() + )?; + Ok(()) +} diff --git a/src/variant_utils.rs b/src/variant_utils.rs new file mode 100644 index 000000000..0104e14b8 --- /dev/null +++ b/src/variant_utils.rs @@ -0,0 +1,38 @@ +use glib::translate::*; + +#[allow(unsafe_code)] +pub(crate) fn variant_new_from_bytes(ty: &str, bytes: glib::Bytes, trusted: bool) -> glib::Variant { + unsafe { + let ty = ty.to_glib_none(); + let ty: *const libc::c_char = ty.0; + let ty = ty as *const glib_sys::GVariantType; + let bytes = bytes.to_glib_full(); + let v = glib_sys::g_variant_new_from_bytes(ty, bytes, trusted.to_glib()); + glib_sys::g_variant_ref_sink(v); + from_glib_full(v) + } +} + +#[allow(unsafe_code)] +pub(crate) fn variant_get_normal_form(v: &glib::Variant) -> glib::Variant { + unsafe { from_glib_full(glib_sys::g_variant_get_normal_form(v.to_glib_none().0)) } +} + +pub(crate) fn variant_normal_from_bytes(ty: &str, bytes: glib::Bytes) -> glib::Variant { + variant_get_normal_form(&variant_new_from_bytes(ty, bytes, false)) +} + +#[cfg(test)] +mod tests { + use super::*; + + const BUF: &[u8] = &[1u8; 4]; + + #[test] + fn test_variant_from_bytes() { + let bytes = glib::Bytes::from_static(BUF); + let v = variant_new_from_bytes("u", bytes, false); + let val: u32 = v.get().unwrap(); + assert_eq!(val, 16843009); + } +} From d0289847a046f776b6866597f85494e01ab3977f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 3 Apr 2021 21:03:28 +0000 Subject: [PATCH 002/775] Fix tests, clean up imports --- Cargo.toml | 17 +-- src/lib.rs | 2 +- src/tar/export.rs | 13 +- src/tar/import.rs | 3 +- src/tests/it/main.rs | 114 ------------------ .../it/fixtures/exampleos.tar.zst | Bin tests/it/main.rs | 76 ++++++++++++ 7 files changed, 85 insertions(+), 140 deletions(-) delete mode 100644 src/tests/it/main.rs rename {src/tests => tests}/it/fixtures/exampleos.tar.zst (100%) create mode 100644 tests/it/main.rs diff --git a/Cargo.toml b/Cargo.toml index 01dd8a12c..bc1fa7c4a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,40 +10,31 @@ version = "0.1.0" [dependencies] anyhow = "1.0" camino = "1.0.4" -cjson = "0.1.1" crossbeam = "0.8.0" -flate2 = "1.0.20" fn-error-context = "0.1.1" -futures = "0.3.13" gio = "0.9.1" glib = "0.10.3" glib-sys = "0.10.1" gvariant = "0.4.0" hex = "0.4.3" libc = "0.2.92" -nix = "0.20.0" openat = "0.1.20" openat-ext = "0.1.13" openssl = "0.10.33" os_pipe = "*" ostree-sys = "0.7.2" -serde = "1.0.125" -serde_json = "1.0.64" tar = "0.4.33" [dependencies.ostree] features = ["v2021_1"] version = "0.10.0" -[dependencies.phf] -features = ["macros"] -version = "0.8.0" - [dev-dependencies] clap = "2.33.3" indoc = "1.0.3" sh-inline = "0.1.0" +tempfile = "3.2.0" -[dev-dependencies.tokio] -features = ["full"] -version = "1" +[patch.crates-io] +ostree = { path = '../../../gitlab/fkrull/ostree-rs' } +ostree-sys = { path = '../../../gitlab/fkrull/ostree-rs/sys' } diff --git a/src/lib.rs b/src/lib.rs index 937e1518c..c3e3a97ab 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,6 +13,6 @@ /// to a string to output to a terminal or logs. type Result = anyhow::Result; +mod ostree_ext; pub mod tar; -pub mod bindingext; pub mod variant_utils; diff --git a/src/tar/export.rs b/src/tar/export.rs index 0c4476963..309a7b786 100644 --- a/src/tar/export.rs +++ b/src/tar/export.rs @@ -1,16 +1,13 @@ //! APIs for creating container images from OSTree commits -use super::Result; +use crate::Result; -use crate::oci; -use crate::ostree_ext::*; -use anyhow::Context; +use crate::ostree_ext::RepoExt; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use gio::prelude::*; use gvariant::aligned_bytes::TryAsAligned; use gvariant::{gv, Marker, Structure}; - use std::{borrow::Cow, collections::HashSet, path::Path}; // This way the default ostree -> sysroot/ostree symlink works. @@ -285,11 +282,7 @@ fn impl_export( /// Export an ostree commit to an (uncompressed) tar archive stream. #[context("Exporting commit")] -fn export_commit( - repo: &ostree::Repo, - rev: &str, - out: impl std::io::Write, -) -> Result<()> { +pub fn export_commit(repo: &ostree::Repo, rev: &str, out: impl std::io::Write) -> Result<()> { let commit = repo.resolve_rev(rev, false)?; let mut tar = tar::Builder::new(out); impl_export(repo, commit.unwrap().as_str(), &mut tar)?; diff --git a/src/tar/import.rs b/src/tar/import.rs index 146b4b2f4..a7af4114c 100644 --- a/src/tar/import.rs +++ b/src/tar/import.rs @@ -1,7 +1,7 @@ //! APIs for extracting OSTree commits from container images -use super::Result; use crate::variant_utils::variant_new_from_bytes; +use crate::Result; use anyhow::anyhow; use camino::Utf8Path; use fn_error_context::context; @@ -22,7 +22,6 @@ const OSTREE_DIRTREE_FORMAT: &str = "(a(say)a(sayay))"; const OSTREE_DIRMETA_FORMAT: &str = "(uuua(ayay))"; const OSTREE_XATTRS_FORMAT: &str = "a(ayay)"; - /// State tracker for the importer. The main goal is to reject multiple /// commit objects, as well as finding metadata/content before the commit. #[derive(Debug, PartialEq, Eq)] diff --git a/src/tests/it/main.rs b/src/tests/it/main.rs deleted file mode 100644 index fe3396cfc..000000000 --- a/src/tests/it/main.rs +++ /dev/null @@ -1,114 +0,0 @@ -use std::{fs::File, io::BufReader}; - -use anyhow::{anyhow, Context, Result}; -use camino::{Utf8Path, Utf8PathBuf}; -use fn_error_context::context; -use indoc::indoc; -use sh_inline::bash; - -use ostree_container::oci as myoci; - -const EXAMPLEOS_TAR: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); -const TESTREF: &str = "exampleos/x86_64/stable"; -const CONTENT_CHECKSUM: &str = "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; - -#[context("Generating test OCI")] -fn generate_test_oci(dir: &Utf8Path) -> Result { - let cancellable = gio::NONE_CANCELLABLE; - let path = Utf8Path::new(dir); - let tarpath = &path.join("exampleos.tar.zst"); - std::fs::write(tarpath, EXAMPLEOS_TAR)?; - bash!( - indoc! {" - cd {path} - ostree --repo=repo-archive init --mode=archive - ostree --repo=repo-archive commit -b {testref} --tree=tar=exampleos.tar.zst - ostree --repo=repo-archive show {testref} - ostree --repo=repo-archive ls -R -X -C {testref} - "}, - testref = TESTREF, - path = path.as_str() - )?; - std::fs::remove_file(tarpath)?; - let repopath = &path.join("repo-archive"); - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repopath.as_str(), cancellable)?; - let (_, rev) = repo.read_commit(TESTREF, cancellable)?; - let (commitv, _) = repo.load_commit(rev.as_str())?; - assert_eq!( - ostree::commit_get_content_checksum(&commitv) - .unwrap() - .as_str(), - CONTENT_CHECKSUM - ); - let ocipath = path.join("exampleos-oci"); - let ocitarget = ostree_container::buildoci::Target::OciDir(ocipath.as_ref()); - ostree_container::buildoci::build(repo, TESTREF, ocitarget)?; - bash!(r"skopeo inspect oci:{ocipath}", ocipath = ocipath.as_str())?; - Ok(ocipath) -} - -fn read_blob(ocidir: &Utf8Path, digest: &str) -> Result> { - let digest = digest - .strip_prefix("sha256:") - .ok_or_else(|| anyhow!("Unknown algorithim in digest {}", digest))?; - let f = File::open(ocidir.join("blobs/sha256").join(digest)) - .with_context(|| format!("Opening blob {}", digest))?; - Ok(std::io::BufReader::new(f)) -} - -#[context("Parsing OCI")] -fn find_layer_in_oci(ocidir: &Utf8Path) -> Result> { - let f = std::io::BufReader::new( - File::open(ocidir.join("index.json")).context("Opening index.json")?, - ); - let index: myoci::Index = serde_json::from_reader(f)?; - let manifest = index - .manifests - .get(0) - .ok_or_else(|| anyhow!("Missing manifest in index.json"))?; - let f = read_blob(ocidir, &manifest.digest)?; - let manifest: myoci::Manifest = serde_json::from_reader(f)?; - let layer = manifest - .layers - .iter() - .find(|layer| { - matches!( - layer.media_type.as_str(), - myoci::DOCKER_TYPE_LAYER | oci_distribution::manifest::IMAGE_LAYER_GZIP_MEDIA_TYPE - ) - }) - .ok_or_else(|| anyhow!("Failed to find rootfs layer"))?; - Ok(read_blob(ocidir, &layer.digest)?) -} - -#[test] -fn test_e2e() -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; - - let tempdir = tempfile::tempdir()?; - let path = Utf8Path::from_path(tempdir.path()).unwrap(); - let srcdir = &path.join("src"); - std::fs::create_dir(srcdir)?; - let ocidir = &generate_test_oci(srcdir)?; - let destdir = &path.join("dest"); - std::fs::create_dir(destdir)?; - let destrepodir = &destdir.join("repo"); - let destrepo = ostree::Repo::new_for_path(destrepodir); - destrepo.create(ostree::RepoMode::Archive, cancellable)?; - - let tarf = find_layer_in_oci(ocidir)?; - let imported_commit = ostree_container::client::import_tarball(&destrepo, tarf)?; - let (commitdata, _) = destrepo.load_commit(&imported_commit)?; - assert_eq!( - CONTENT_CHECKSUM, - ostree::commit_get_content_checksum(&commitdata) - .unwrap() - .as_str() - ); - bash!( - "ostree --repo={destrepodir} ls -R {imported_commit}", - destrepodir = destrepodir.as_str(), - imported_commit = imported_commit.as_str() - )?; - Ok(()) -} diff --git a/src/tests/it/fixtures/exampleos.tar.zst b/tests/it/fixtures/exampleos.tar.zst similarity index 100% rename from src/tests/it/fixtures/exampleos.tar.zst rename to tests/it/fixtures/exampleos.tar.zst diff --git a/tests/it/main.rs b/tests/it/main.rs new file mode 100644 index 000000000..2db095070 --- /dev/null +++ b/tests/it/main.rs @@ -0,0 +1,76 @@ +use anyhow::Result; +use camino::{Utf8Path, Utf8PathBuf}; +use fn_error_context::context; +use indoc::indoc; +use sh_inline::bash; +use std::io::Write; + +const EXAMPLEOS_TAR: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); +const TESTREF: &str = "exampleos/x86_64/stable"; +const CONTENT_CHECKSUM: &str = "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; + +#[context("Generating test OCI")] +fn generate_test_tarball(dir: &Utf8Path) -> Result { + let cancellable = gio::NONE_CANCELLABLE; + let path = Utf8Path::new(dir); + let src_tarpath = &path.join("exampleos.tar.zst"); + std::fs::write(src_tarpath, EXAMPLEOS_TAR)?; + bash!( + indoc! {" + cd {path} + ostree --repo=repo-archive init --mode=archive + ostree --repo=repo-archive commit -b {testref} --tree=tar=exampleos.tar.zst + ostree --repo=repo-archive show {testref} + "}, + testref = TESTREF, + path = path.as_str() + )?; + std::fs::remove_file(src_tarpath)?; + let repopath = &path.join("repo-archive"); + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repopath.as_str(), cancellable)?; + let (_, rev) = repo.read_commit(TESTREF, cancellable)?; + let (commitv, _) = repo.load_commit(rev.as_str())?; + assert_eq!( + ostree::commit_get_content_checksum(&commitv) + .unwrap() + .as_str(), + CONTENT_CHECKSUM + ); + let destpath = path.join("exampleos-export.tar"); + let mut outf = std::io::BufWriter::new(std::fs::File::create(&destpath)?); + ostree_ext::tar::export_commit(repo, rev.as_str(), &mut outf)?; + outf.flush()?; + Ok(destpath) +} + +#[test] +fn test_e2e() -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; + + let tempdir = tempfile::tempdir()?; + let path = Utf8Path::from_path(tempdir.path()).unwrap(); + let srcdir = &path.join("src"); + std::fs::create_dir(srcdir)?; + let src_tar = + &mut std::io::BufReader::new(std::fs::File::open(&generate_test_tarball(srcdir)?)?); + let destdir = &path.join("dest"); + std::fs::create_dir(destdir)?; + let destrepodir = &destdir.join("repo"); + let destrepo = ostree::Repo::new_for_path(destrepodir); + destrepo.create(ostree::RepoMode::Archive, cancellable)?; + + let imported_commit: String = ostree_ext::tar::import_tar(&destrepo, src_tar)?; + let (commitdata, _) = destrepo.load_commit(&imported_commit)?; + assert_eq!( + CONTENT_CHECKSUM, + ostree::commit_get_content_checksum(&commitdata) + .unwrap() + .as_str() + ); + bash!( + "ostree --repo={destrepodir} ls -R {imported_commit}", + destrepodir = destrepodir.as_str(), + imported_commit = imported_commit.as_str() + )?; + Ok(()) +} From 26cf018d058db08c3c2e32f86f69a4ad0fcb34c7 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 3 Apr 2021 21:10:16 +0000 Subject: [PATCH 003/775] Add README.md and other misc docs --- README.md | 38 ++++++++++++++++++++++++++++++++++++++ src/lib.rs | 6 +++--- src/ostree_ext.rs | 2 ++ src/variant_utils.rs | 3 +++ 4 files changed, 46 insertions(+), 3 deletions(-) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 000000000..7edbed509 --- /dev/null +++ b/README.md @@ -0,0 +1,38 @@ +# ostree-ext + +Extension APIs for [ostree](https://github.com/ostreedev/ostree/) that are written in Rust, using the [Rust ostree bindings](https://crates.io/crates/ostree). + +## module "tar": tar export/import + +ostree's support for exporting to a tarball is lossy by default. This adds a new export +format that is effectively a new custom repository mode combined with a hardlinked checkout. + +This new export stream can be losslessly imported back into a different repository. + +### Filesystem layout + +``` +. +├── etc # content is at traditional /etc, not /usr/etc +│   └── passwd +├── sysroot +│   └── ostree # ostree object store with hardlinks to destinations +│   ├── repo +│   │   └── objects +│   │   ├── 00 +│   │   └── 8b +│   │   └── 7df143d91c716ecfa5fc1730022f6b421b05cedee8fd52b1fc65a96030ad52.file.xattrs +│   │   └── 7df143d91c716ecfa5fc1730022f6b421b05cedee8fd52b1fc65a96030ad52.file +│   └── xattrs # A new directory with extended attributes, hardlinked with .xattr files +│   └── 58d523efd29244331392770befa2f8bd55b3ef594532d3b8dbf94b70dc72e674 +└── usr + ├── bin + │   └── bash + └── lib64 + └── libc.so +``` + +Think of this like a new ostree repository mode `tar-stream` or so, although right now it only holds a single commit. + +A major distinction is the addition of special `.xattr` files; tar variants and support library differ too much for us to rely on this making it through round trips. And further, to support the webserver-in-container we need e.g. `security.selinux` to not be changed/overwritten by the container runtime. + diff --git a/src/lib.rs b/src/lib.rs index c3e3a97ab..decb214f1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,7 +4,7 @@ //! and the Rust bindings to it, adding new functionality //! written in Rust. -//#![deny(missing_docs)] +#![deny(missing_docs)] // Good defaults #![forbid(unused_must_use)] #![deny(unsafe_code)] @@ -13,6 +13,6 @@ /// to a string to output to a terminal or logs. type Result = anyhow::Result; -mod ostree_ext; +pub mod ostree_ext; pub mod tar; -pub mod variant_utils; +mod variant_utils; diff --git a/src/ostree_ext.rs b/src/ostree_ext.rs index 2fbe0efcb..3a12c3025 100644 --- a/src/ostree_ext.rs +++ b/src/ostree_ext.rs @@ -8,6 +8,8 @@ use std::ptr; /// Extension functions which fix incorrectly bound APIs. pub trait RepoExt { + /// Version of [`ostree::Repo::load_variant_if_exists`] that correctly + /// returns an [`Option`]. fn x_load_variant_if_exists( &self, objtype: ostree::ObjectType, diff --git a/src/variant_utils.rs b/src/variant_utils.rs index 0104e14b8..fcd1cd78a 100644 --- a/src/variant_utils.rs +++ b/src/variant_utils.rs @@ -1,3 +1,6 @@ +//! Extension APIs for working with GVariant. Not strictly +//! related to ostree, but included here for convenience. + use glib::translate::*; #[allow(unsafe_code)] From ad2f5c69a8e9f51687c49884408f4264cc356bd4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 4 Apr 2021 13:38:15 +0000 Subject: [PATCH 004/775] Update to merged ostree-rs git --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index bc1fa7c4a..38987ccf6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,5 +36,5 @@ sh-inline = "0.1.0" tempfile = "3.2.0" [patch.crates-io] -ostree = { path = '../../../gitlab/fkrull/ostree-rs' } -ostree-sys = { path = '../../../gitlab/fkrull/ostree-rs/sys' } +ostree = { git = 'https://gitlab.com/fkrull/ostree-rs', rev = 'fd2b57864938e9b3c0fc0c4496da29a099ad4616' } +ostree-sys = { git = 'https://gitlab.com/fkrull/ostree-rs', rev = 'fd2b57864938e9b3c0fc0c4496da29a099ad4616' } From ab9126dc8653d8efee0219c6e9bf0501a2a1b291 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 4 Apr 2021 13:33:44 +0000 Subject: [PATCH 005/775] wip --- src/lib.rs | 1 + tests/it/main.rs | 15 +++++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index decb214f1..8380f41e8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,6 +13,7 @@ /// to a string to output to a terminal or logs. type Result = anyhow::Result; +pub mod diff; pub mod ostree_ext; pub mod tar; mod variant_utils; diff --git a/tests/it/main.rs b/tests/it/main.rs index 2db095070..203c8df31 100644 --- a/tests/it/main.rs +++ b/tests/it/main.rs @@ -9,11 +9,8 @@ const EXAMPLEOS_TAR: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); const TESTREF: &str = "exampleos/x86_64/stable"; const CONTENT_CHECKSUM: &str = "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; -#[context("Generating test OCI")] -fn generate_test_tarball(dir: &Utf8Path) -> Result { - let cancellable = gio::NONE_CANCELLABLE; - let path = Utf8Path::new(dir); - let src_tarpath = &path.join("exampleos.tar.zst"); +fn generate_test_repo(dir: &Utf8Path) -> Result { + let src_tarpath = &dir.join("exampleos.tar.zst"); std::fs::write(src_tarpath, EXAMPLEOS_TAR)?; bash!( indoc! {" @@ -26,7 +23,13 @@ fn generate_test_tarball(dir: &Utf8Path) -> Result { path = path.as_str() )?; std::fs::remove_file(src_tarpath)?; - let repopath = &path.join("repo-archive"); + Ok(dir.join("repo")) +} + +#[context("Generating test OCI")] +fn generate_test_tarball(dir: &Utf8Path) -> Result { + let cancellable = gio::NONE_CANCELLABLE; + let repopath = generate_test_repo(dir)?; let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repopath.as_str(), cancellable)?; let (_, rev) = repo.read_commit(TESTREF, cancellable)?; let (commitv, _) = repo.load_commit(rev.as_str())?; From 4f7f43c97b38deeacc815111c869243d149aee5e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 4 Apr 2021 18:00:29 +0000 Subject: [PATCH 006/775] Add diff module Taken from https://github.com/coreos/rpm-ostree/blob/master/rust/src/ostree_diff.rs --- src/diff.rs | 182 +++++++++++++++++++++++++ tests/it/fixtures/exampleos-v1.tar.zst | Bin 0 -> 492 bytes tests/it/main.rs | 75 ++++++++-- 3 files changed, 244 insertions(+), 13 deletions(-) create mode 100644 src/diff.rs create mode 100644 tests/it/fixtures/exampleos-v1.tar.zst diff --git a/src/diff.rs b/src/diff.rs new file mode 100644 index 000000000..d5c3ac627 --- /dev/null +++ b/src/diff.rs @@ -0,0 +1,182 @@ +//! Compute the difference between two OSTree commits. + +/* + * Copyright (C) 2020 Red Hat, Inc. + * + * SPDX-License-Identifier: Apache-2.0 OR MIT + */ + +use anyhow::{Context, Result}; +use fn_error_context::context; +use gio::prelude::*; +use ostree::RepoFileExt; +use std::collections::BTreeSet; +use std::fmt; + +/// Like `g_file_query_info()`, but return None if the target doesn't exist. +fn query_info_optional( + f: &gio::File, + queryattrs: &str, + queryflags: gio::FileQueryInfoFlags, +) -> Result> { + let cancellable = gio::NONE_CANCELLABLE; + match f.query_info(queryattrs, queryflags, cancellable) { + Ok(i) => Ok(Some(i)), + Err(e) => { + if let Some(ref e2) = e.kind::() { + match e2 { + gio::IOErrorEnum::NotFound => Ok(None), + _ => Err(e.into()), + } + } else { + Err(e.into()) + } + } + } +} + +/// A set of file paths. +pub type FileSet = BTreeSet; + +/// Diff between two ostree commits. +#[derive(Debug, Default)] +pub struct FileTreeDiff { + /// The prefix passed for diffing, e.g. /usr + pub subdir: Option, + /// Files that are new in an existing directory + pub added_files: FileSet, + /// New directories + pub added_dirs: FileSet, + /// Files removed + pub removed_files: FileSet, + /// Directories removed (recursively) + pub removed_dirs: FileSet, + /// Files that changed (in any way, metadata or content) + pub changed_files: FileSet, + /// Directories that changed mode/permissions + pub changed_dirs: FileSet, +} + +impl fmt::Display for FileTreeDiff { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "files(added:{} removed:{} changed:{}) dirs(added:{} removed:{} changed:{})", + self.added_files.len(), + self.removed_files.len(), + self.changed_files.len(), + self.added_dirs.len(), + self.removed_dirs.len(), + self.changed_dirs.len() + ) + } +} + +fn diff_recurse( + prefix: &str, + diff: &mut FileTreeDiff, + from: &ostree::RepoFile, + to: &ostree::RepoFile, +) -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; + let queryattrs = "standard::name,standard::type"; + let queryflags = gio::FileQueryInfoFlags::NOFOLLOW_SYMLINKS; + let from_iter = from.enumerate_children(queryattrs, queryflags, cancellable)?; + + // Iterate over the source (from) directory, and compare with the + // target (to) directory. This generates removals and changes. + while let Some(from_info) = from_iter.next_file(cancellable)? { + let from_child = from_iter.get_child(&from_info).expect("file"); + let name = from_info.get_name().expect("name"); + let name = name.to_str().expect("UTF-8 ostree name"); + let path = format!("{}{}", prefix, name); + let to_child = to.get_child(&name).expect("child"); + let to_info = query_info_optional(&to_child, queryattrs, queryflags) + .context("querying optional to")?; + let is_dir = matches!(from_info.get_file_type(), gio::FileType::Directory); + if to_info.is_some() { + let to_child = to_child.downcast::().expect("downcast"); + to_child.ensure_resolved()?; + let from_child = from_child.downcast::().expect("downcast"); + from_child.ensure_resolved()?; + + if is_dir { + let from_contents_checksum = + from_child.tree_get_contents_checksum().expect("checksum"); + let to_contents_checksum = to_child.tree_get_contents_checksum().expect("checksum"); + if from_contents_checksum != to_contents_checksum { + let subpath = format!("{}/", path); + diff_recurse(&subpath, diff, &from_child, &to_child)?; + } + let from_meta_checksum = from_child.tree_get_metadata_checksum().expect("checksum"); + let to_meta_checksum = to_child.tree_get_metadata_checksum().expect("checksum"); + if from_meta_checksum != to_meta_checksum { + diff.changed_dirs.insert(path); + } + } else { + let from_checksum = from_child.get_checksum().expect("checksum"); + let to_checksum = to_child.get_checksum().expect("checksum"); + if from_checksum != to_checksum { + diff.changed_files.insert(path); + } + } + } else if is_dir { + diff.removed_dirs.insert(path); + } else { + diff.removed_files.insert(path); + } + } + // Iterate over the target (to) directory, and find any + // files/directories which were not present in the source. + let to_iter = to.enumerate_children(queryattrs, queryflags, cancellable)?; + while let Some(to_info) = to_iter.next_file(cancellable)? { + let name = to_info.get_name().expect("name"); + let name = name.to_str().expect("UTF-8 ostree name"); + let path = format!("{}{}", prefix, name); + let from_child = from.get_child(name).expect("child"); + let from_info = query_info_optional(&from_child, queryattrs, queryflags) + .context("querying optional from")?; + if from_info.is_some() { + continue; + } + let is_dir = matches!(to_info.get_file_type(), gio::FileType::Directory); + if is_dir { + diff.added_dirs.insert(path); + } else { + diff.added_files.insert(path); + } + } + Ok(()) +} + +/// Given two ostree commits, compute the diff between them. +#[context("Computing ostree diff")] +pub fn diff>( + repo: &ostree::Repo, + from: &str, + to: &str, + subdir: Option

, +) -> Result { + let subdir = subdir.as_ref(); + let subdir = subdir.map(|s| s.as_ref()); + let (fromroot, _) = repo.read_commit(from, gio::NONE_CANCELLABLE)?; + let (toroot, _) = repo.read_commit(to, gio::NONE_CANCELLABLE)?; + let (fromroot, toroot) = if let Some(subdir) = subdir { + ( + fromroot.resolve_relative_path(subdir).expect("path"), + toroot.resolve_relative_path(subdir).expect("path"), + ) + } else { + (fromroot, toroot) + }; + let fromroot = fromroot.downcast::().expect("downcast"); + fromroot.ensure_resolved()?; + let toroot = toroot.downcast::().expect("downcast"); + toroot.ensure_resolved()?; + let mut diff = FileTreeDiff { + subdir: subdir.map(|s| s.to_string()), + ..Default::default() + }; + diff_recurse("/", &mut diff, &fromroot, &toroot)?; + Ok(diff) +} diff --git a/tests/it/fixtures/exampleos-v1.tar.zst b/tests/it/fixtures/exampleos-v1.tar.zst new file mode 100644 index 0000000000000000000000000000000000000000..de20d2dce4bf76cc51067a9267c79308817f9eda GIT binary patch literal 492 zcmV@cx#<->&%R&WL6bf2e;Z4h8cNNXy4iUcX($<{5})Kvrc$2%`LP}pNR-h6N6<=(tc(_P zSngzlpp~AR=1>sQ2&Z$IN(Bc)SezdGNeHIa(vD?S@rT`7PsG*rt49$>~CTR>1 z1Cc-i8LK?o261cG1~@tXnuBLw#X@HI97xyl;{ zKme5-Y4>pe{Bt7?&~2z*D4+0E3c0O;vPWh0k%yBf&iN%*o!IPODzaQ=yXu)5N$|p iC_g3*iJEGFCdmiLv`jsqT=56C`CgaE8Knsu(bNOSeaT?} literal 0 HcmV?d00001 diff --git a/tests/it/main.rs b/tests/it/main.rs index 203c8df31..8651e8f4c 100644 --- a/tests/it/main.rs +++ b/tests/it/main.rs @@ -5,28 +5,48 @@ use indoc::indoc; use sh_inline::bash; use std::io::Write; -const EXAMPLEOS_TAR: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); +const EXAMPLEOS_V0: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); +const EXAMPLEOS_V1: &[u8] = include_bytes!("fixtures/exampleos-v1.tar.zst"); const TESTREF: &str = "exampleos/x86_64/stable"; -const CONTENT_CHECKSUM: &str = "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; +const EXAMPLEOS_CONTENT_CHECKSUM: &str = + "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; fn generate_test_repo(dir: &Utf8Path) -> Result { let src_tarpath = &dir.join("exampleos.tar.zst"); - std::fs::write(src_tarpath, EXAMPLEOS_TAR)?; + std::fs::write(src_tarpath, EXAMPLEOS_V0)?; + bash!( indoc! {" - cd {path} - ostree --repo=repo-archive init --mode=archive - ostree --repo=repo-archive commit -b {testref} --tree=tar=exampleos.tar.zst - ostree --repo=repo-archive show {testref} + cd {dir} + ostree --repo=repo init --mode=archive + ostree --repo=repo commit -b {testref} --tree=tar=exampleos.tar.zst + ostree --repo=repo show {testref} "}, testref = TESTREF, - path = path.as_str() + dir = dir.as_str() )?; std::fs::remove_file(src_tarpath)?; Ok(dir.join("repo")) } -#[context("Generating test OCI")] +fn update_repo(repopath: &Utf8Path) -> Result<()> { + let repotmp = &repopath.join("tmp"); + let srcpath = &repotmp.join("exampleos-v1.tar.zst"); + std::fs::write(srcpath, EXAMPLEOS_V1)?; + let srcpath = srcpath.as_str(); + let repopath = repopath.as_str(); + let testref = TESTREF; + bash!( + "ostree --repo={repopath} commit -b {testref} --tree=tar={srcpath}", + testref, + repopath, + srcpath + )?; + std::fs::remove_file(srcpath)?; + Ok(()) +} + +#[context("Generating test tarball")] fn generate_test_tarball(dir: &Utf8Path) -> Result { let cancellable = gio::NONE_CANCELLABLE; let repopath = generate_test_repo(dir)?; @@ -37,9 +57,9 @@ fn generate_test_tarball(dir: &Utf8Path) -> Result { ostree::commit_get_content_checksum(&commitv) .unwrap() .as_str(), - CONTENT_CHECKSUM + EXAMPLEOS_CONTENT_CHECKSUM ); - let destpath = path.join("exampleos-export.tar"); + let destpath = dir.join("exampleos-export.tar"); let mut outf = std::io::BufWriter::new(std::fs::File::create(&destpath)?); ostree_ext::tar::export_commit(repo, rev.as_str(), &mut outf)?; outf.flush()?; @@ -47,7 +67,7 @@ fn generate_test_tarball(dir: &Utf8Path) -> Result { } #[test] -fn test_e2e() -> Result<()> { +fn test_tar_import_export() -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; let tempdir = tempfile::tempdir()?; @@ -65,7 +85,7 @@ fn test_e2e() -> Result<()> { let imported_commit: String = ostree_ext::tar::import_tar(&destrepo, src_tar)?; let (commitdata, _) = destrepo.load_commit(&imported_commit)?; assert_eq!( - CONTENT_CHECKSUM, + EXAMPLEOS_CONTENT_CHECKSUM, ostree::commit_get_content_checksum(&commitdata) .unwrap() .as_str() @@ -77,3 +97,32 @@ fn test_e2e() -> Result<()> { )?; Ok(()) } + +#[test] +fn test_diff() -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; + let tempdir = tempfile::tempdir()?; + let tempdir = Utf8Path::from_path(tempdir.path()).unwrap(); + let repopath = &generate_test_repo(tempdir)?; + update_repo(repopath)?; + let from = &format!("{}^", TESTREF); + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repopath.as_str(), cancellable)?; + let subdir: Option<&str> = None; + let diff = ostree_ext::diff::diff(repo, from, TESTREF, subdir)?; + assert!(diff.subdir.is_none()); + assert_eq!(diff.added_dirs.len(), 1); + assert_eq!(diff.added_dirs.iter().nth(0).unwrap(), "/usr/share"); + assert_eq!(diff.added_files.len(), 1); + assert_eq!(diff.added_files.iter().nth(0).unwrap(), "/usr/bin/newbin"); + assert_eq!(diff.removed_files.len(), 1); + assert_eq!(diff.removed_files.iter().nth(0).unwrap(), "/usr/bin/foo"); + let diff = ostree_ext::diff::diff(repo, from, TESTREF, Some("/usr"))?; + assert_eq!(diff.subdir.as_ref().unwrap(), "/usr"); + assert_eq!(diff.added_dirs.len(), 1); + assert_eq!(diff.added_dirs.iter().nth(0).unwrap(), "/share"); + assert_eq!(diff.added_files.len(), 1); + assert_eq!(diff.added_files.iter().nth(0).unwrap(), "/bin/newbin"); + assert_eq!(diff.removed_files.len(), 1); + assert_eq!(diff.removed_files.iter().nth(0).unwrap(), "/bin/foo"); + Ok(()) +} From 562e710933a08bf0a145c42518008a885c23771a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 5 Apr 2021 14:52:34 +0000 Subject: [PATCH 007/775] README.md: Add diff module --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 7edbed509..b43134f4a 100644 --- a/README.md +++ b/README.md @@ -36,3 +36,12 @@ Think of this like a new ostree repository mode `tar-stream` or so, although rig A major distinction is the addition of special `.xattr` files; tar variants and support library differ too much for us to rely on this making it through round trips. And further, to support the webserver-in-container we need e.g. `security.selinux` to not be changed/overwritten by the container runtime. +## module "diff": Compute the difference between two ostree commits + +```rust + let subdir: Option<&str> = None; + let refname = "fedora/coreos/x86_64/stable"; + let diff = ostree_ext::diff::diff(repo, &format!("{}^", refname), refname, subdir)?; +``` + +This is used by `rpm-ostree ex apply-live`. From e011ce29206964b7e2f1a2caa0c973951eb72ded Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 6 Apr 2021 00:47:34 +0000 Subject: [PATCH 008/775] Re-merge https://github.com/cgwalters/ostree-container I just split them out, but I think there's a decent argument for keeping them together. We'd likely just want the container bits under a feature flag. TODO: - feature flag - CLI `ostree-ext-cli container import|export` --- Cargo.toml | 41 +-- README.md | 121 ++++++++ cli/Cargo.toml | 18 ++ cli/src/main.rs | 99 ++++++ lib/Cargo.toml | 45 +++ lib/src/container/buildoci.rs | 54 ++++ lib/src/container/client.rs | 91 ++++++ {src => lib/src/container}/import.rs | 0 lib/src/container/mod.rs | 18 ++ lib/src/container/oci.rs | 290 ++++++++++++++++++ .../tests}/it/fixtures/exampleos.tar.zst | Bin lib/src/container/tests/it/main.rs | 117 +++++++ {src => lib/src}/diff.rs | 0 lib/src/import.rs | 2 + {src => lib/src}/lib.rs | 1 + {src => lib/src}/ostree_ext.rs | 0 {src => lib/src}/tar/export.rs | 0 {src => lib/src}/tar/import.rs | 0 {src => lib/src}/tar/mod.rs | 0 {src => lib/src}/variant_utils.rs | 0 .../tests}/it/fixtures/exampleos-v1.tar.zst | Bin lib/tests/it/fixtures/exampleos.tar.zst | Bin 0 -> 1052 bytes {tests => lib/tests}/it/main.rs | 0 src/.gitignore | 1 - 24 files changed, 862 insertions(+), 36 deletions(-) create mode 100644 cli/Cargo.toml create mode 100644 cli/src/main.rs create mode 100644 lib/Cargo.toml create mode 100644 lib/src/container/buildoci.rs create mode 100644 lib/src/container/client.rs rename {src => lib/src/container}/import.rs (100%) create mode 100644 lib/src/container/mod.rs create mode 100644 lib/src/container/oci.rs rename {tests => lib/src/container/tests}/it/fixtures/exampleos.tar.zst (100%) create mode 100644 lib/src/container/tests/it/main.rs rename {src => lib/src}/diff.rs (100%) create mode 100644 lib/src/import.rs rename {src => lib/src}/lib.rs (96%) rename {src => lib/src}/ostree_ext.rs (100%) rename {src => lib/src}/tar/export.rs (100%) rename {src => lib/src}/tar/import.rs (100%) rename {src => lib/src}/tar/mod.rs (100%) rename {src => lib/src}/variant_utils.rs (100%) rename {tests => lib/tests}/it/fixtures/exampleos-v1.tar.zst (100%) create mode 100644 lib/tests/it/fixtures/exampleos.tar.zst rename {tests => lib/tests}/it/main.rs (100%) delete mode 100644 src/.gitignore diff --git a/Cargo.toml b/Cargo.toml index 38987ccf6..20c82d151 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,40 +1,11 @@ -[package] -authors = ["Colin Walters "] -edition = "2018" -license = "MIT OR Apache-2.0" -name = "ostree-ext" -readme = "README.md" -repository = "https://github.com/cgwalters/ostree-ext" -version = "0.1.0" +[workspace] +members = ["cli", "lib"] -[dependencies] -anyhow = "1.0" -camino = "1.0.4" -crossbeam = "0.8.0" -fn-error-context = "0.1.1" -gio = "0.9.1" -glib = "0.10.3" -glib-sys = "0.10.1" -gvariant = "0.4.0" -hex = "0.4.3" -libc = "0.2.92" -openat = "0.1.20" -openat-ext = "0.1.13" -openssl = "0.10.33" -os_pipe = "*" -ostree-sys = "0.7.2" -tar = "0.4.33" - -[dependencies.ostree] -features = ["v2021_1"] -version = "0.10.0" - -[dev-dependencies] -clap = "2.33.3" -indoc = "1.0.3" -sh-inline = "0.1.0" -tempfile = "3.2.0" +[profile.release] +codegen-units = 1 +lto = "thin" [patch.crates-io] +oci-distribution = { git = 'https://github.com/cgwalters/krustlet', branch = 'streaming-client' } ostree = { git = 'https://gitlab.com/fkrull/ostree-rs', rev = 'fd2b57864938e9b3c0fc0c4496da29a099ad4616' } ostree-sys = { git = 'https://gitlab.com/fkrull/ostree-rs', rev = 'fd2b57864938e9b3c0fc0c4496da29a099ad4616' } diff --git a/README.md b/README.md index b43134f4a..dbfc3ea10 100644 --- a/README.md +++ b/README.md @@ -45,3 +45,124 @@ A major distinction is the addition of special `.xattr` files; tar variants and ``` This is used by `rpm-ostree ex apply-live`. + +## module "container": Encapsulate ostree commits in OCI/Docker images + + +### Bundle an OSTree repository into a container + +Given an OSTree repository, running *outside* a container: + +``` +$ ostree-container build --repo=/path/to/repo --ref=exampleos/x86_64/stable --oci-dir=/output/exampleos +``` + +`--oci-dir` creates an [OpenContainers image](https://github.com/opencontainers/image-spec/blob/master/spec.md) layout. + +You can then e.g. + +``` +$ skopeo copy oci:/output/exampleos containers-storage:localhost/exampleos +$ podman run --rm -ti --entrypoint bash localhost/exampleos +``` + +Another option is `--push quay.io/exampleos/exampleos:stable` which would push directly to a registry. This would particularly be intended to be usable inside a fully unprivileged container, just mounting in the secrets necessary to push to the target registry. + +### Take an arbitrary container and convert it to be OSTree ready + +There's nothing conceptually stopping us from having tooling that takes +an arbitrary container image and just makes it "ostree ready". Or even +just dyanamically accepting a container image that has a kernel client side. + +This *may* be in scope at some point in the future. + +#### ostree-containers and derivation + +For an ostree-based OS that is derived from Fedora, +`ostree-container build --from=registry.fedoraproject.org/fedora:33` would cause the generated container image to derive from the parent; in particular we de-duplicate content in the ostree commit from the base. + +This would work equally well for a Debian+ostree OS to do `--from=docker.io/debian:stable`. + +(In fact we may *require* this; TBD) + +### Running an ostree-container as a webserver + +It also works to run the ostree-container as a webserver, which will expose a webserver that responds to `GET /repo`. + +The effect will be as if it was built from a `Dockerfile` that contains `EXPOSE 8080`; it will work to e.g. +`kubectl run nginx --image=quay.io/exampleos/exampleos:latest --replicas=1` +and then also create a service for it. + +### Pulling an ostree-container directly + +A primary goal of this effort is to make it fully native to an ostree-based operating system to pull a container image directly too. + +This project will hence provide a CLI tool and a Rust library which speaks the Docker/OCI protocols enough to directly pull the container image, extracting it into the system `/ostree/repo` repository. + +An important aspect of this is that the system will validate the GPG signature of the target OSTree commit, as well as validating the sha256 of the contained objects. + +``` +$ ostree-container pull --repo=/ostree/repo --ref=exampleos/x86_64/stable quay.io/exampleos/exampleos:stable +``` + +A project like rpm-ostree could hence support: + +``` +$ rpm-ostree rebase quay.io/exampleos/exampleos:stable +``` +(Along with the usual `rpm-ostree upgrade` knowing to pull that container image) + +### Integrating with future container deltas + +See https://blogs.gnome.org/alexl/2020/05/13/putting-container-updates-on-a-diet/ + + +# ostree vs OCI/Docker + +Looking at this, one might ask: why even have ostree? Why not just have the operating system directly use something like the [containers/image](https://github.com/containers/image/) storage? + +The first answer to this is that it's a goal of this project to "hide" ostree usage; it should feel "native" to ship and manage the operating system "as if" it was just running a container. + +But, ostree has a *lot* of stuff built up around it and we can't just throw that away. + +## Understanding kernels + +ostree was designed from the start to manage bootable operating system trees - hence the name of the project. For example, ostree understands bootloaders and kernels/initramfs images. Container tools don't. + +## Signing + +ostree also quite early on gained an opinionated mechanism to sign images (commits) via GPG. As of this time there are multiple competing mechanisms for container signing, and it is not widely deployed. +For running random containers from `docker.io`, it can be OK to just trust TLS or pin via `@sha256` - a whole idea of Docker is that containers are isolated and it should be reasonably safe to +at least try out random containers. But for the *operating system* its integrity is paramount because it's ultimately trusted. + +## Deduplication + +ostree's hardlink store is designed around de-duplication. Operating systems can get large and they are most natural as "base images" - which in the Docker container model +are duplicated on disk. Of course storage systems like containers/image could learn to de-duplicate; but it would be a use case that *mostly* applied to just the operating system. + +## Being able to remove all container images + +In Kubernetes, the kubelet will prune the image storage periodically, removing images not backed by containers. If we store the operating system itself as an image...well, we'd +need to do something like teach the container storage to have the concept of an image that is "pinned" because it's actually the booted filesystem. Or create a "fake" container +representing the running operating system. + +Other projects in this space ended up having an "early docker" distinct from + +## Independence of complexity of container storage + +This stuff could be done - but the container storage and tooling is already quite complex, and introducing a special case like this would be treading into new ground. + +Today for example, cri-o ships a `crio-wipe.service` which removes all container storage across major version upgrades. + +ostree is a fairly simple format and has been 100% stable throughout its life so far. + +## ostree format has per-file integrity + +More on this here: https://ostreedev.github.io/ostree/related-projects/#docker + +## Allow hiding ostree while not reinventing everything + +So, again the goal here is: make it feel "native" to ship and manage the operating system "as if" it was just running a container without throwing away everything in ostree today. + + + diff --git a/cli/Cargo.toml b/cli/Cargo.toml new file mode 100644 index 000000000..d3614e692 --- /dev/null +++ b/cli/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "ostree-ext-cli" +version = "0.1.0" +authors = ["Colin Walters "] +edition = "2018" +license = "MIT OR Apache-2.0" +repository = "https://github.com/cgwalters/ostree-container" +readme = "README.md" + +[dependencies] +anyhow = "1.0" +ostree-ext = { path = "../lib" } +clap = "2.33.3" +structopt = "0.3.21" +ostree = { version = "0.10.0", features = ["v2021_1"] } +libc = "0.2.92" +tokio = { version = "1", features = ["full"] } +gio = "0.9.1" diff --git a/cli/src/main.rs b/cli/src/main.rs new file mode 100644 index 000000000..8d6c54321 --- /dev/null +++ b/cli/src/main.rs @@ -0,0 +1,99 @@ +use anyhow::Result; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +struct BuildOpts { + #[structopt(long)] + repo: String, + + #[structopt(long = "ref")] + ostree_ref: String, + + #[structopt(long)] + oci_dir: String, +} + +#[derive(Debug, StructOpt)] +struct ImportOpts { + /// Path to the repository + #[structopt(long)] + repo: String, + + /// Path to a tar archive; if unspecified, will be stdin. Currently the tar archive must not be compressed. + path: Option, +} + +#[derive(Debug, StructOpt)] +struct ExportOpts { + /// Path to the repository + #[structopt(long)] + repo: String, + + /// The ostree ref or commit to export + rev: String, +} + +#[derive(Debug, StructOpt)] +enum TarOpts { + /// Import a tar archive (currently, must not be compressed) + Import(ImportOpts), + + /// Write a tar archive to stdout + Export(ExportOpts), +} + +// #[derive(Debug, StructOpt)] +// enum ContainerOpts { +// /// Import an ostree commit embedded in a container image +// Import { +// /// Path to remote image, e.g. quay.io/exampleos/exampleos:latest +// imgref: String, +// }, + +// /// Export an ostree commit to an OCI layout +// Export(ExportOpts), +// } + +#[derive(Debug, StructOpt)] +#[structopt(name = "ostree-ext")] +#[structopt(rename_all = "kebab-case")] +enum Opt { + /// Import and export to tar + Tar(TarOpts), + // Container(ContainerOpts), +} + +fn tar_import(opts: &ImportOpts) -> Result<()> { + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, opts.repo.as_str(), gio::NONE_CANCELLABLE)?; + let imported = if let Some(path) = opts.path.as_ref() { + let instream = std::io::BufReader::new(std::fs::File::open(path)?); + ostree_ext::tar::import_tar(repo, instream)? + } else { + let stdin = std::io::stdin(); + let stdin = stdin.lock(); + ostree_ext::tar::import_tar(repo, stdin)? + }; + println!("Imported: {}", imported); + Ok(()) +} + +fn tar_export(opts: &ExportOpts) -> Result<()> { + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, opts.repo.as_str(), gio::NONE_CANCELLABLE)?; + ostree_ext::tar::export_commit(repo, opts.rev.as_str(), std::io::stdout())?; + Ok(()) +} + +fn run() -> Result<()> { + let opt = Opt::from_args(); + match opt { + Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt), + Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), + } +} + +fn main() { + if let Err(e) = run() { + eprintln!("error: {:#}", e); + std::process::exit(1); + } +} diff --git a/lib/Cargo.toml b/lib/Cargo.toml new file mode 100644 index 000000000..3add4b9c0 --- /dev/null +++ b/lib/Cargo.toml @@ -0,0 +1,45 @@ +[package] +authors = ["Colin Walters "] +edition = "2018" +license = "MIT OR Apache-2.0" +name = "ostree-ext" +readme = "README.md" +repository = "https://github.com/cgwalters/ostree-ext" +version = "0.1.0" + +[dependencies] +anyhow = "1.0" +camino = "1.0.4" +crossbeam = "0.8.0" +fn-error-context = "0.1.1" +gio = "0.9.1" +glib = "0.10.3" +glib-sys = "0.10.1" +gvariant = "0.4.0" +hex = "0.4.3" +libc = "0.2.92" +openat = "0.1.20" +openat-ext = "0.1.13" +openssl = "0.10.33" +ostree = { version = "0.10.0", features = ["v2021_1" ]} +os_pipe = "0.9.2" +ostree-sys = "0.7.2" +tar = "0.4.33" + +#ostree-container deps +cjson = "0.1.1" +flate2 = "1.0.20" +futures = "0.3.13" +phf = { version = "0.8.0", features = ["macros"] } +nix = "0.20.0" +oci-distribution = "0.6.0" +tokio = { version = "1", features = ["full"] } +serde = "1.0.125" +serde_json = "1.0.64" + +[dev-dependencies] +clap = "2.33.3" +indoc = "1.0.3" +sh-inline = "0.1.0" +tempfile = "3.2.0" +structopt = "0.3.21" diff --git a/lib/src/container/buildoci.rs b/lib/src/container/buildoci.rs new file mode 100644 index 000000000..a706fb739 --- /dev/null +++ b/lib/src/container/buildoci.rs @@ -0,0 +1,54 @@ +//! APIs for creating container images from OSTree commits + +use super::oci; +use super::Result; +use crate::tar as ostree_tar; +use anyhow::Context; +use fn_error_context::context; +use std::path::Path; + +/// The location to store the generated image +pub enum Target<'a> { + /// Generate an Open Containers image directory layout + OciDir(&'a Path), +} + +/// Write an ostree commit to an OCI blob +#[context("Writing ostree root to blob")] +fn export_ostree_ref_to_blobdir( + repo: &ostree::Repo, + rev: &str, + ocidir: &openat::Dir, +) -> Result { + let commit = repo.resolve_rev(rev, false)?.unwrap(); + let mut w = oci::LayerWriter::new(ocidir)?; + ostree_tar::export_commit(repo, commit.as_str(), &mut w)?; + w.complete() +} + +/// Generate an OCI image from a given ostree root +#[context("Building oci")] +fn build_oci(repo: &ostree::Repo, commit: &str, ocidir: &Path) -> Result<()> { + // Explicitly error if the target exists + std::fs::create_dir(ocidir).context("Creating OCI dir")?; + let ocidir = &openat::Dir::open(ocidir)?; + let writer = &mut oci::OciWriter::new(ocidir)?; + + let rootfs_blob = export_ostree_ref_to_blobdir(repo, commit, ocidir)?; + writer.set_root_layer(rootfs_blob); + writer.complete()?; + + Ok(()) +} + +/// Helper for `build()` that avoids generics +fn build_impl(repo: &ostree::Repo, ostree_ref: &str, target: Target) -> Result<()> { + match target { + Target::OciDir(d) => build_oci(repo, ostree_ref, d), + } +} + +/// Given an OSTree repository and ref, generate a container image +pub fn build>(repo: &ostree::Repo, ostree_ref: S, target: Target) -> Result<()> { + build_impl(repo, ostree_ref.as_ref(), target) +} diff --git a/lib/src/container/client.rs b/lib/src/container/client.rs new file mode 100644 index 000000000..1e4f13c64 --- /dev/null +++ b/lib/src/container/client.rs @@ -0,0 +1,91 @@ +//! APIs for extracting OSTree commits from container images + +use std::io::Write; + +use super::Result; +use anyhow::anyhow; +use fn_error_context::context; +use oci_distribution::manifest::OciDescriptor; + +/// The result of an import operation +#[derive(Debug)] +pub struct Import { + /// The ostree commit that was imported + pub ostree_commit: String, + /// The image digest retrieved + pub image_digest: String, +} + +#[context("Fetching layer descriptor")] +async fn fetch_layer_descriptor( + client: &mut oci_distribution::Client, + image_ref: &oci_distribution::Reference, +) -> Result<(String, OciDescriptor)> { + let (manifest, digest) = client.pull_manifest(image_ref).await?; + let mut layers = manifest.layers; + let orig_layer_count = layers.len(); + layers.retain(|layer| { + matches!( + layer.media_type.as_str(), + super::oci::DOCKER_TYPE_LAYER | oci_distribution::manifest::IMAGE_LAYER_GZIP_MEDIA_TYPE + ) + }); + let n = layers.len(); + + if let Some(layer) = layers.into_iter().next() { + if n > 1 { + Err(anyhow!("Expected 1 layer, found {}", n)) + } else { + Ok((digest, layer)) + } + } else { + Err(anyhow!("No layers found (orig: {})", orig_layer_count)) + } +} + +#[allow(unsafe_code)] +#[context("Importing {}", image_ref)] +async fn import_impl(repo: &ostree::Repo, image_ref: &str) -> Result { + let image_ref: oci_distribution::Reference = image_ref.parse()?; + let client = &mut oci_distribution::Client::default(); + let auth = &oci_distribution::secrets::RegistryAuth::Anonymous; + client + .auth( + &image_ref, + auth, + &oci_distribution::secrets::RegistryOperation::Pull, + ) + .await?; + let (image_digest, layer) = fetch_layer_descriptor(client, &image_ref).await?; + + let req = client + .request_layer(&image_ref, &layer.digest) + .await? + .bytes_stream(); + let (pipein, mut pipeout) = os_pipe::pipe()?; + let copier = tokio::task::spawn_blocking(move || -> anyhow::Result<()> { + let req = futures::executor::block_on_stream(req); + for v in req { + let v = v?; + pipeout.write_all(&v)?; + } + Ok(()) + }); + let repo = repo.clone(); + let import = tokio::task::spawn_blocking(move || { + let gz = flate2::read::GzDecoder::new(pipein); + crate::tar::import_tar(&repo, gz) + }); + let ostree_commit = import.await??; + copier.await??; + + Ok(Import { + ostree_commit, + image_digest, + }) +} + +/// Download and import the referenced container +pub async fn import>(repo: &ostree::Repo, image_ref: I) -> Result { + Ok(import_impl(repo, image_ref.as_ref()).await?) +} diff --git a/src/import.rs b/lib/src/container/import.rs similarity index 100% rename from src/import.rs rename to lib/src/container/import.rs diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs new file mode 100644 index 000000000..613283e63 --- /dev/null +++ b/lib/src/container/mod.rs @@ -0,0 +1,18 @@ +//! # APIs bridging OSTree and container images +//! +//! This crate contains APIs to bidirectionally map +//! between OSTree repositories and container images. + +//#![deny(missing_docs)] +// Good defaults +#![forbid(unused_must_use)] +#![deny(unsafe_code)] + +/// Our generic catchall fatal error, expected to be converted +/// to a string to output to a terminal or logs. +type Result = anyhow::Result; + +pub mod buildoci; +pub mod client; + +pub mod oci; diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs new file mode 100644 index 000000000..25cd2df56 --- /dev/null +++ b/lib/src/container/oci.rs @@ -0,0 +1,290 @@ +//! Unstable OCI API +use anyhow::{anyhow, Result}; +use flate2::write::GzEncoder; +use fn_error_context::context; +use openat_ext::*; +use openssl::hash::{Hasher, MessageDigest}; +use phf::phf_map; +use serde::{Deserialize, Serialize}; +use std::io::prelude::*; + +/// Map the value from `uname -m` to the Go architecture. +/// TODO find a more canonical home for this. +static MACHINE_TO_OCI: phf::Map<&str, &str> = phf_map! { + "x86_64" => "amd64", + "aarch64" => "arm64", +}; + +// OCI types, see https://github.com/opencontainers/image-spec/blob/master/media-types.md +const OCI_TYPE_CONFIG_JSON: &str = "application/vnd.oci.image.config.v1+json"; +const OCI_TYPE_MANIFEST_JSON: &str = "application/vnd.oci.image.manifest.v1+json"; +const OCI_TYPE_LAYER: &str = "application/vnd.oci.image.layer.v1.tar+gzip"; + +pub(crate) const DOCKER_TYPE_LAYER: &str = "application/vnd.docker.image.rootfs.diff.tar.gzip"; + +// FIXME get rid of this after updating to https://github.com/coreos/openat-ext/pull/27 +const TMPBLOB: &str = ".tmpblob"; +/// Path inside an OCI directory to the blobs +const BLOBDIR: &str = "blobs/sha256"; + +fn default_schema_version() -> u32 { + 2 +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct IndexPlatform { + pub architecture: String, + pub os: String, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct IndexManifest { + pub media_type: String, + pub digest: String, + pub size: u64, + + pub platform: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct Index { + #[serde(default = "default_schema_version")] + pub schema_version: u32, + + pub manifests: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct ManifestLayer { + pub media_type: String, + pub digest: String, + pub size: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct Manifest { + #[serde(default = "default_schema_version")] + pub schema_version: u32, + + pub layers: Vec, +} + +/// Completed blob metadata +#[derive(Debug)] +pub(crate) struct Blob { + pub(crate) sha256: String, + pub(crate) size: u64, +} + +impl Blob { + pub(crate) fn digest_id(&self) -> String { + format!("sha256:{}", self.sha256) + } +} + +/// Completed layer metadata +#[derive(Debug)] +pub(crate) struct Layer { + pub(crate) blob: Blob, + pub(crate) uncompressed_sha256: String, +} + +/// Create an OCI blob. +pub(crate) struct BlobWriter<'a> { + ocidir: &'a openat::Dir, + pub(crate) hash: Hasher, + pub(crate) target: Option>, + size: u64, +} + +/// Create an OCI layer (also a blob). +pub(crate) struct LayerWriter<'a> { + bw: BlobWriter<'a>, + uncompressed_hash: Hasher, + compressor: GzEncoder>, +} + +pub(crate) struct OciWriter<'a> { + pub(crate) dir: &'a openat::Dir, + + root_layer: Option, +} + +/// Write a serializable data (JSON) as an OCI blob +#[context("Writing json blob")] +fn write_json_blob(ocidir: &openat::Dir, v: &S) -> Result { + let mut w = BlobWriter::new(ocidir)?; + { + cjson::to_writer(&mut w, v).map_err(|e| anyhow!("{:?}", e))?; + } + + w.complete() +} + +impl<'a> OciWriter<'a> { + pub(crate) fn new(dir: &'a openat::Dir) -> Result { + dir.ensure_dir_all(BLOBDIR, 0o755)?; + dir.write_file_contents("oci-layout", 0o644, r#"{"imageLayoutVersion":"1.0.0"}"#)?; + + Ok(Self { + dir, + root_layer: None, + }) + } + + pub(crate) fn set_root_layer(&mut self, layer: Layer) { + assert!(self.root_layer.replace(layer).is_none()) + } + + #[context("Writing OCI")] + pub(crate) fn complete(&mut self) -> Result<()> { + let utsname = nix::sys::utsname::uname(); + let arch = MACHINE_TO_OCI[utsname.machine()]; + + let rootfs_blob = self.root_layer.as_ref().unwrap(); + let root_layer_id = format!("sha256:{}", rootfs_blob.uncompressed_sha256); + + let config = serde_json::json!({ + "architecture": arch, + "os": "linux", + "rootfs": { + "type": "layers", + "diff_ids": [ root_layer_id ], + }, + "history": [ + { + "commit": "created by ostree-container", + } + ] + }); + let config_blob = write_json_blob(self.dir, &config)?; + + let manifest_data = serde_json::json!({ + "schemaVersion": 2, + "config": { + "mediaType": OCI_TYPE_CONFIG_JSON, + "size": config_blob.size, + "digest": config_blob.digest_id(), + }, + "layers": [ + { "mediaType": OCI_TYPE_LAYER, + "size": rootfs_blob.blob.size, + "digest": rootfs_blob.blob.digest_id(), + } + ], + }); + let manifest_blob = write_json_blob(self.dir, &manifest_data)?; + + let index_data = serde_json::json!({ + "schemaVersion": 2, + "manifests": [ + { + "mediaType": OCI_TYPE_MANIFEST_JSON, + "digest": manifest_blob.digest_id(), + "size": manifest_blob.size, + "platform": { + "architecture": arch, + "os": "linux" + } + } + ] + }); + self.dir + .write_file_with("index.json", 0o644, |w| -> Result<()> { + cjson::to_writer(w, &index_data).map_err(|e| anyhow::anyhow!("{:?}", e))?; + Ok(()) + })?; + + Ok(()) + } +} + +impl<'a> Drop for BlobWriter<'a> { + fn drop(&mut self) { + if let Some(t) = self.target.take() { + // Defuse + let _ = t.abandon(); + } + } +} + +impl<'a> BlobWriter<'a> { + #[context("Creating blob writer")] + pub(crate) fn new(ocidir: &'a openat::Dir) -> Result { + Ok(Self { + ocidir, + hash: Hasher::new(MessageDigest::sha256())?, + // FIXME add ability to choose filename after completion + target: Some(ocidir.new_file_writer(TMPBLOB, 0o644)?), + size: 0, + }) + } + + #[context("Completing blob")] + pub(crate) fn complete(mut self) -> Result { + self.target.take().unwrap().complete()?; + let sha256 = hex::encode(self.hash.finish()?); + self.ocidir + .local_rename(TMPBLOB, &format!("{}/{}", BLOBDIR, sha256))?; + Ok(Blob { + sha256, + size: self.size, + }) + } +} + +impl<'a> std::io::Write for BlobWriter<'a> { + fn write(&mut self, srcbuf: &[u8]) -> std::io::Result { + self.hash.update(srcbuf)?; + self.target.as_mut().unwrap().writer.write_all(srcbuf)?; + self.size += srcbuf.len() as u64; + Ok(srcbuf.len()) + } + + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } +} + +impl<'a> LayerWriter<'a> { + pub(crate) fn new(ocidir: &'a openat::Dir) -> Result { + let bw = BlobWriter::new(ocidir)?; + Ok(Self { + bw, + uncompressed_hash: Hasher::new(MessageDigest::sha256())?, + compressor: GzEncoder::new(Vec::with_capacity(8192), flate2::Compression::default()), + }) + } + + #[context("Completing layer")] + pub(crate) fn complete(mut self) -> Result { + self.compressor.get_mut().clear(); + let buf = self.compressor.finish()?; + self.bw.write_all(&buf)?; + let blob = self.bw.complete()?; + let uncompressed_sha256 = hex::encode(self.uncompressed_hash.finish()?); + Ok(Layer { + blob, + uncompressed_sha256, + }) + } +} + +impl<'a> std::io::Write for LayerWriter<'a> { + fn write(&mut self, srcbuf: &[u8]) -> std::io::Result { + self.compressor.get_mut().clear(); + self.compressor.write_all(srcbuf).unwrap(); + let compressed_buf = self.compressor.get_mut().as_slice(); + self.bw.write_all(&compressed_buf)?; + Ok(srcbuf.len()) + } + + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } +} diff --git a/tests/it/fixtures/exampleos.tar.zst b/lib/src/container/tests/it/fixtures/exampleos.tar.zst similarity index 100% rename from tests/it/fixtures/exampleos.tar.zst rename to lib/src/container/tests/it/fixtures/exampleos.tar.zst diff --git a/lib/src/container/tests/it/main.rs b/lib/src/container/tests/it/main.rs new file mode 100644 index 000000000..4591e5c86 --- /dev/null +++ b/lib/src/container/tests/it/main.rs @@ -0,0 +1,117 @@ +use anyhow::{anyhow, Context, Result}; +use camino::{Utf8Path, Utf8PathBuf}; +use flate2::read::GzDecoder; +use fn_error_context::context; +use indoc::indoc; +use sh_inline::bash; +use std::fs::File; +use std::io::BufReader; + +use ostree_container::oci as myoci; + +const EXAMPLEOS_TAR: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); +const TESTREF: &str = "exampleos/x86_64/stable"; +const CONTENT_CHECKSUM: &str = "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; + +#[context("Generating test OCI")] +fn generate_test_oci(dir: &Utf8Path) -> Result { + let cancellable = gio::NONE_CANCELLABLE; + let path = Utf8Path::new(dir); + let tarpath = &path.join("exampleos.tar.zst"); + std::fs::write(tarpath, EXAMPLEOS_TAR)?; + bash!( + indoc! {" + cd {path} + ostree --repo=repo-archive init --mode=archive + ostree --repo=repo-archive commit -b {testref} --tree=tar=exampleos.tar.zst + ostree --repo=repo-archive show {testref} + ostree --repo=repo-archive ls -R -X -C {testref} + "}, + testref = TESTREF, + path = path.as_str() + )?; + std::fs::remove_file(tarpath)?; + let repopath = &path.join("repo-archive"); + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repopath.as_str(), cancellable)?; + let (_, rev) = repo.read_commit(TESTREF, cancellable)?; + let (commitv, _) = repo.load_commit(rev.as_str())?; + assert_eq!( + ostree::commit_get_content_checksum(&commitv) + .unwrap() + .as_str(), + CONTENT_CHECKSUM + ); + let ocipath = path.join("exampleos-oci"); + let ocitarget = ostree_container::buildoci::Target::OciDir(ocipath.as_ref()); + ostree_container::buildoci::build(repo, TESTREF, ocitarget)?; + bash!(r"skopeo inspect oci:{ocipath}", ocipath = ocipath.as_str())?; + Ok(ocipath) +} + +fn read_blob(ocidir: &Utf8Path, digest: &str) -> Result { + let digest = digest + .strip_prefix("sha256:") + .ok_or_else(|| anyhow!("Unknown algorithim in digest {}", digest))?; + let f = File::open(ocidir.join("blobs/sha256").join(digest)) + .with_context(|| format!("Opening blob {}", digest))?; + Ok(f) +} + +#[context("Parsing OCI")] +fn find_layer_in_oci(ocidir: &Utf8Path) -> Result>> { + let f = std::io::BufReader::new( + File::open(ocidir.join("index.json")).context("Opening index.json")?, + ); + let index: myoci::Index = serde_json::from_reader(f)?; + let manifest = index + .manifests + .get(0) + .ok_or_else(|| anyhow!("Missing manifest in index.json"))?; + let f = read_blob(ocidir, &manifest.digest)?; + let manifest: myoci::Manifest = serde_json::from_reader(f)?; + let layer = manifest + .layers + .iter() + .find(|layer| { + matches!( + layer.media_type.as_str(), + myoci::DOCKER_TYPE_LAYER | oci_distribution::manifest::IMAGE_LAYER_GZIP_MEDIA_TYPE + ) + }) + .ok_or_else(|| anyhow!("Failed to find rootfs layer"))?; + let blob = std::io::BufReader::new(read_blob(ocidir, &layer.digest)?); + let gz = flate2::read::GzDecoder::new(blob); + Ok(gz) +} + +#[test] +fn test_tar_e2e() -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; + + let tempdir = tempfile::tempdir()?; + let path = Utf8Path::from_path(tempdir.path()).unwrap(); + let srcdir = &path.join("src"); + std::fs::create_dir(srcdir)?; + let ocidir = &generate_test_oci(srcdir)?; + let destdir = &path.join("dest"); + std::fs::create_dir(destdir)?; + let destrepodir = &destdir.join("repo"); + let destrepo = ostree::Repo::new_for_path(destrepodir); + destrepo.create(ostree::RepoMode::Archive, cancellable)?; + + let tarf = find_layer_in_oci(ocidir)?; + let imported_commit: String = ostree_ext::tar::import_tar(&destrepo, tarf)?; + let (commitdata, _) = destrepo.load_commit(&imported_commit)?; + assert_eq!( + CONTENT_CHECKSUM, + ostree::commit_get_content_checksum(&commitdata) + .unwrap() + .as_str() + ); + bash!( + "ostree --repo={destrepodir} ls -R {imported_commit}", + destrepodir = destrepodir.as_str(), + imported_commit = imported_commit.as_str() + )?; + Ok(()) +} diff --git a/src/diff.rs b/lib/src/diff.rs similarity index 100% rename from src/diff.rs rename to lib/src/diff.rs diff --git a/lib/src/import.rs b/lib/src/import.rs new file mode 100644 index 000000000..e63f3c0ee --- /dev/null +++ b/lib/src/import.rs @@ -0,0 +1,2 @@ +use super::Result; + diff --git a/src/lib.rs b/lib/src/lib.rs similarity index 96% rename from src/lib.rs rename to lib/src/lib.rs index 8380f41e8..899a61802 100644 --- a/src/lib.rs +++ b/lib/src/lib.rs @@ -13,6 +13,7 @@ /// to a string to output to a terminal or logs. type Result = anyhow::Result; +pub mod container; pub mod diff; pub mod ostree_ext; pub mod tar; diff --git a/src/ostree_ext.rs b/lib/src/ostree_ext.rs similarity index 100% rename from src/ostree_ext.rs rename to lib/src/ostree_ext.rs diff --git a/src/tar/export.rs b/lib/src/tar/export.rs similarity index 100% rename from src/tar/export.rs rename to lib/src/tar/export.rs diff --git a/src/tar/import.rs b/lib/src/tar/import.rs similarity index 100% rename from src/tar/import.rs rename to lib/src/tar/import.rs diff --git a/src/tar/mod.rs b/lib/src/tar/mod.rs similarity index 100% rename from src/tar/mod.rs rename to lib/src/tar/mod.rs diff --git a/src/variant_utils.rs b/lib/src/variant_utils.rs similarity index 100% rename from src/variant_utils.rs rename to lib/src/variant_utils.rs diff --git a/tests/it/fixtures/exampleos-v1.tar.zst b/lib/tests/it/fixtures/exampleos-v1.tar.zst similarity index 100% rename from tests/it/fixtures/exampleos-v1.tar.zst rename to lib/tests/it/fixtures/exampleos-v1.tar.zst diff --git a/lib/tests/it/fixtures/exampleos.tar.zst b/lib/tests/it/fixtures/exampleos.tar.zst new file mode 100644 index 0000000000000000000000000000000000000000..8e8969d838ae96e1575fc2c9d3773325bed2bfc0 GIT binary patch literal 1052 zcmV+%1mpWCwJ-eySbZP>X6<$%Kx@WBVHyI{ESN~3A!6F1JjwxTPP9-SWiU{1 z@FFyCaXb;H%)0PPv3`-`i9lptK2?mD9`^AGD2)a+4u*en5a@{-j>N~ir$y$x%j=~> z{C`#c|D+OYQPLWe>ZevQ(T%*UHB}-*U54z+iXH2p z-c!FHWr+@&M$(q$O(bnO{x%+;tv#>8UYLsa^yE(3;ylsu=sZzEDUF7qaFi!HZpILi zthj95Qi$xS%y`Yt6G2Vm?2+*P@Am&KStt#PVp$Z%gN$If_;@8#dSBgU>{pDL_r&*P zJV?T{B+pDsb`x25H#3u2ZL@E8GcDQ1>#xOUr+6{;@rgB|(bR*`KpdzsZBZCUS@@AS z`)}MkhI*Ln>plcvfoaQugD~(&%iX=SdDqf@U$3iN+}}10g0?a( zQN&)UKa~s5{4=I{vO~;!LhRHu2#g0wzD$7YsL`2YMnptLnt~KEi~-;xkqJbmI0q6S zK^VqB6vQA51#uL{AOsYH2#qlULWqD60V4!KgB+g&{jc#k`)L}~`C$W6^aFS)Nbori zz|rO!8if*IIW87(@dNlE*#MCA0Af;zNZqht_5m|YqaMK@OEy3x3^1B02Yfw1@M|`p z&C}QP>%uo_cmQW1K%`_q$!sjOg0*E17$E~f0et`vF05w>%mXBQUE&140M`M#2PViDfCfCC(9fbzSs;1__3wE`za%d%_$TL#=jb_1$Nv2Oqk1qaWx0U>&T(*z!? z4hSQe6=g7tSrCEZTs3P(EIAYwLKTp@e??D9*5WyvZBkZIbOghj3 zd_UmwaAUOP1(TlVrc&+?pqvr>f+V|54q(zgSIMRa1ZSNi@kT7vU_jw@syNp_@M-@6 z5g8CV8XG`YceokAxz=566Hu$3TIDZTdp^#P}V57MmL!w-teIS_2281>MLIWb5`9N52LlD#&5MPgy z0Y`Bb0%H+q=Fc&a Date: Tue, 6 Apr 2021 15:08:54 +0000 Subject: [PATCH 009/775] ci: Add GH action for Rust Cargo culted from cgwalters/openat-ext --- .github/workflows/rust.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .github/workflows/rust.yml diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml new file mode 100644 index 000000000..3c13d1be2 --- /dev/null +++ b/.github/workflows/rust.yml @@ -0,0 +1,22 @@ +name: Rust + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +env: + CARGO_TERM_COLOR: always + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Build + run: cargo build --verbose + - name: Run tests + run: cargo test --verbose From c0ce36c81246e4d1dbea499a7f18b89628414ceb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 6 Apr 2021 15:35:02 +0000 Subject: [PATCH 010/775] lib: Update to openat-ext 0.2.0 --- lib/Cargo.toml | 2 +- lib/src/container/oci.rs | 20 +++----------------- 2 files changed, 4 insertions(+), 18 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 3add4b9c0..2a5e10317 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -19,7 +19,7 @@ gvariant = "0.4.0" hex = "0.4.3" libc = "0.2.92" openat = "0.1.20" -openat-ext = "0.1.13" +openat-ext = "0.2.0" openssl = "0.10.33" ostree = { version = "0.10.0", features = ["v2021_1" ]} os_pipe = "0.9.2" diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index 25cd2df56..045af1ea5 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -22,8 +22,6 @@ const OCI_TYPE_LAYER: &str = "application/vnd.oci.image.layer.v1.tar+gzip"; pub(crate) const DOCKER_TYPE_LAYER: &str = "application/vnd.docker.image.rootfs.diff.tar.gzip"; -// FIXME get rid of this after updating to https://github.com/coreos/openat-ext/pull/27 -const TMPBLOB: &str = ".tmpblob"; /// Path inside an OCI directory to the blobs const BLOBDIR: &str = "blobs/sha256"; @@ -96,7 +94,6 @@ pub(crate) struct Layer { /// Create an OCI blob. pub(crate) struct BlobWriter<'a> { - ocidir: &'a openat::Dir, pub(crate) hash: Hasher, pub(crate) target: Option>, size: u64, @@ -204,33 +201,22 @@ impl<'a> OciWriter<'a> { } } -impl<'a> Drop for BlobWriter<'a> { - fn drop(&mut self) { - if let Some(t) = self.target.take() { - // Defuse - let _ = t.abandon(); - } - } -} - impl<'a> BlobWriter<'a> { #[context("Creating blob writer")] pub(crate) fn new(ocidir: &'a openat::Dir) -> Result { Ok(Self { - ocidir, hash: Hasher::new(MessageDigest::sha256())?, // FIXME add ability to choose filename after completion - target: Some(ocidir.new_file_writer(TMPBLOB, 0o644)?), + target: Some(ocidir.new_file_writer(0o644)?), size: 0, }) } #[context("Completing blob")] pub(crate) fn complete(mut self) -> Result { - self.target.take().unwrap().complete()?; let sha256 = hex::encode(self.hash.finish()?); - self.ocidir - .local_rename(TMPBLOB, &format!("{}/{}", BLOBDIR, sha256))?; + let target = &format!("{}/{}", BLOBDIR, sha256); + self.target.take().unwrap().complete(target)?; Ok(Blob { sha256, size: self.size, From ee627dd641b9c14b6c61fed1a2b00898d7706b2d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 6 Apr 2021 15:51:06 +0000 Subject: [PATCH 011/775] ci: Fix branch name --- .github/workflows/rust.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 3c13d1be2..7ae98f3b1 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -2,9 +2,9 @@ name: Rust on: push: - branches: [ master ] + branches: [ main ] pull_request: - branches: [ master ] + branches: [ main ] env: CARGO_TERM_COLOR: always From b9240927867ba70b299be2a89800de34f8df610b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 6 Apr 2021 18:59:48 +0000 Subject: [PATCH 012/775] import: Don't crash if we already have the object In this case ostree will just drop the reader, which will break the pipe. TODO: push rather than pull based file write API --- lib/src/tar/import.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index a7af4114c..1b9a7278b 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -2,7 +2,7 @@ use crate::variant_utils::variant_new_from_bytes; use crate::Result; -use anyhow::anyhow; +use anyhow::{anyhow, Context}; use camino::Utf8Path; use fn_error_context::context; use std::collections::HashMap; @@ -170,6 +170,12 @@ impl<'a> Importer<'a> { xattrs: Option<&glib::Variant>, ) -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; + if self + .repo + .has_object(ostree::ObjectType::File, checksum, cancellable)? + { + return Ok(()); + } let (recv, mut send) = os_pipe::pipe()?; let size = entry.header().size()?; let header_copy = entry.header().clone(); @@ -183,7 +189,7 @@ impl<'a> Importer<'a> { repo_clone.write_content(Some(checksum), &ostream, size, cancellable)?; Ok(()) }); - let n = std::io::copy(&mut entry, &mut send)?; + let n = std::io::copy(&mut entry, &mut send).context("Copying object content")?; drop(send); assert_eq!(n, size); j.join().unwrap()?; @@ -196,7 +202,7 @@ impl<'a> Importer<'a> { /// Given a tar entry that looks like an object (its path is under ostree/repo/objects/), /// determine its type and import it. - #[context("Importing object {}", path)] + #[context("object {}", path)] fn import_object<'b, R: std::io::Read>( &mut self, entry: tar::Entry<'b, R>, From 187c11ba0d9a6753e6e77be4707e9aaad30d21bb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 6 Apr 2021 19:00:31 +0000 Subject: [PATCH 013/775] container/client: More explicitly join futures So it's easy to debug errors. --- lib/src/container/client.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/src/container/client.rs b/lib/src/container/client.rs index 1e4f13c64..f221110f4 100644 --- a/lib/src/container/client.rs +++ b/lib/src/container/client.rs @@ -3,7 +3,7 @@ use std::io::Write; use super::Result; -use anyhow::anyhow; +use anyhow::{anyhow, Context}; use fn_error_context::context; use oci_distribution::manifest::OciDescriptor; @@ -66,7 +66,7 @@ async fn import_impl(repo: &ostree::Repo, image_ref: &str) -> Result { let copier = tokio::task::spawn_blocking(move || -> anyhow::Result<()> { let req = futures::executor::block_on_stream(req); for v in req { - let v = v?; + let v = v.map_err(anyhow::Error::msg).context("Writing buf")?; pipeout.write_all(&v)?; } Ok(()) @@ -76,8 +76,9 @@ async fn import_impl(repo: &ostree::Repo, image_ref: &str) -> Result { let gz = flate2::read::GzDecoder::new(pipein); crate::tar::import_tar(&repo, gz) }); - let ostree_commit = import.await??; - copier.await??; + let (import_res, copy_res) = tokio::join!(import, copier); + copy_res??; + let ostree_commit = import_res??; Ok(Import { ostree_commit, From 1bd614398b9779ba2dd2950cd7b19c5815502c52 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 6 Apr 2021 19:00:52 +0000 Subject: [PATCH 014/775] cli: Expose `container` verb So one can do things from the CLI. --- cli/src/main.rs | 60 +++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 48 insertions(+), 12 deletions(-) diff --git a/cli/src/main.rs b/cli/src/main.rs index 8d6c54321..b251108a5 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,5 +1,6 @@ use anyhow::Result; use structopt::StructOpt; +use tokio::runtime::Runtime; #[derive(Debug, StructOpt)] struct BuildOpts { @@ -42,17 +43,31 @@ enum TarOpts { Export(ExportOpts), } -// #[derive(Debug, StructOpt)] -// enum ContainerOpts { -// /// Import an ostree commit embedded in a container image -// Import { -// /// Path to remote image, e.g. quay.io/exampleos/exampleos:latest -// imgref: String, -// }, - -// /// Export an ostree commit to an OCI layout -// Export(ExportOpts), -// } +#[derive(Debug, StructOpt)] +enum ContainerOpts { + /// Import an ostree commit embedded in a remote container image + Import { + /// Path to the repository + #[structopt(long)] + repo: String, + + /// Path to remote image, e.g. quay.io/exampleos/exampleos:latest + imgref: String, + }, + + /// Export an ostree commit to an OCI layout + ExportOCI { + /// Path to the repository + #[structopt(long)] + repo: String, + + /// The ostree ref or commit to export + rev: String, + + /// Export to an OCI image layout + path: String, + }, +} #[derive(Debug, StructOpt)] #[structopt(name = "ostree-ext")] @@ -60,7 +75,8 @@ enum TarOpts { enum Opt { /// Import and export to tar Tar(TarOpts), - // Container(ContainerOpts), + /// Import and export to a container image + Container(ContainerOpts), } fn tar_import(opts: &ImportOpts) -> Result<()> { @@ -83,11 +99,31 @@ fn tar_export(opts: &ExportOpts) -> Result<()> { Ok(()) } +fn container_import(repo: &str, imgref: &str) -> Result<()> { + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; + let rt = Runtime::new()?; + let res = + rt.block_on(async move { ostree_ext::container::client::import(repo, imgref).await })?; + println!("Imported: {}", res.ostree_commit); + Ok(()) +} + +fn container_export_oci(repo: &str, rev: &str, path: &str) -> Result<()> { + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; + let target = ostree_ext::container::buildoci::Target::OciDir(std::path::Path::new(path)); + ostree_ext::container::buildoci::build(repo, rev, target)?; + Ok(()) +} + fn run() -> Result<()> { let opt = Opt::from_args(); match opt { Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt), Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), + Opt::Container(ContainerOpts::Import { repo, imgref }) => container_import(&repo, &imgref), + Opt::Container(ContainerOpts::ExportOCI { repo, rev, path }) => { + container_export_oci(&repo, &rev, &path) + } } } From 1d69adde3825d13e100734bc82883ac4bf3f5be9 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 6 Apr 2021 19:31:12 +0000 Subject: [PATCH 015/775] Make `container` an optional feature The dependencies are nontrivial. Prep for adding more features, which should also be optional. --- cli/Cargo.toml | 2 +- lib/Cargo.toml | 33 ++++++++++++++++++++++++--------- lib/src/lib.rs | 1 + 3 files changed, 26 insertions(+), 10 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index d3614e692..46b1e97c6 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -9,7 +9,7 @@ readme = "README.md" [dependencies] anyhow = "1.0" -ostree-ext = { path = "../lib" } +ostree-ext = { path = "../lib", features = ["container"] } clap = "2.33.3" structopt = "0.3.21" ostree = { version = "0.10.0", features = ["v2021_1"] } diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 2a5e10317..f99cc1bff 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -7,6 +7,21 @@ readme = "README.md" repository = "https://github.com/cgwalters/ostree-ext" version = "0.1.0" + +[features] +# Enable the container API +container = [ + "cjson", + "flate2", + "futures", + "phf", + "nix", + "oci-distribution", + "tokio", + "serde", + "serde_json", +] + [dependencies] anyhow = "1.0" camino = "1.0.4" @@ -27,15 +42,15 @@ ostree-sys = "0.7.2" tar = "0.4.33" #ostree-container deps -cjson = "0.1.1" -flate2 = "1.0.20" -futures = "0.3.13" -phf = { version = "0.8.0", features = ["macros"] } -nix = "0.20.0" -oci-distribution = "0.6.0" -tokio = { version = "1", features = ["full"] } -serde = "1.0.125" -serde_json = "1.0.64" +cjson = { version = "0.1.1", optional = true } +flate2 = {version = "1.0.20", optional = true } +futures = { version = "0.3.13", optional = true } +phf = { version = "0.8.0", features = ["macros"], optional = true } +nix = { version = "0.20.0", optional = true } +oci-distribution = { version = "0.6.0", optional = true } +tokio = { version = "1", features = ["full"], optional = true } +serde = { version = "1.0.125", optional = true } +serde_json = { version = "1.0.64", optional = true } [dev-dependencies] clap = "2.33.3" diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 899a61802..0ca962e94 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -13,6 +13,7 @@ /// to a string to output to a terminal or logs. type Result = anyhow::Result; +#[cfg(feature = "container")] pub mod container; pub mod diff; pub mod ostree_ext; From 147b0d52004a8451ef00282e646ce0f8ca25b479 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 6 Apr 2021 20:28:24 +0000 Subject: [PATCH 016/775] import: Require that we read exact length I was just reading this code and I think this is cleaner. --- lib/src/tar/import.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 1b9a7278b..81ab56cf8 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -6,6 +6,7 @@ use anyhow::{anyhow, Context}; use camino::Utf8Path; use fn_error_context::context; use std::collections::HashMap; +use std::io::prelude::*; /// Arbitrary limit on xattrs to avoid RAM exhaustion attacks. The actual filesystem limits are often much smaller. /// See https://en.wikipedia.org/wiki/Extended_file_attributes @@ -344,9 +345,8 @@ impl<'a> Importer<'a> { return Err(anyhow!("Invalid xattr size {}", n)); } - let mut contents = Vec::with_capacity(n as usize); - let c = std::io::copy(&mut entry, &mut contents)?; - assert_eq!(c, n); + let mut contents = vec![0u8; n as usize]; + entry.read_exact(contents.as_mut_slice())?; let contents: glib::Bytes = contents.as_slice().into(); let contents = variant_new_from_bytes(OSTREE_XATTRS_FORMAT, contents, false); From 40186a8a06ea37461990633e913c34cebe64e361 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 6 Apr 2021 16:42:31 +0000 Subject: [PATCH 017/775] ci: Use fcos buildroot So we have the latest libostree. --- .github/workflows/rust.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 7ae98f3b1..f6ae8fafb 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -13,6 +13,7 @@ jobs: build: runs-on: ubuntu-latest + container: quay.io/cgwalters/fcos-buildroot steps: - uses: actions/checkout@v2 From 6834c472dd0d720f28095bb1f5eb2201177dc1f2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 13 Apr 2021 09:26:11 -0400 Subject: [PATCH 018/775] tar/export: Drop duplicate repo param It's already a member of the exporter struct. --- lib/src/tar/export.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 309a7b786..17aa0a0db 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -165,11 +165,12 @@ impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { fn append_dirtree>( &mut self, dirpath: &Utf8Path, - repo: &ostree::Repo, checksum: &str, cancellable: Option<&C>, ) -> Result<()> { - let v = &repo.load_variant(ostree::ObjectType::DirTree, checksum)?; + let v = &self + .repo + .load_variant(ostree::ObjectType::DirTree, checksum)?; self.append(ostree::ObjectType::DirTree, checksum, v)?; let v = v.get_data_as_bytes(); let v = v.try_as_aligned()?; @@ -203,14 +204,16 @@ impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { { hex::encode_to_slice(meta_csum, &mut hexbuf)?; let meta_csum = std::str::from_utf8(&hexbuf)?; - let meta_v = &repo.load_variant(ostree::ObjectType::DirMeta, meta_csum)?; + let meta_v = &self + .repo + .load_variant(ostree::ObjectType::DirMeta, meta_csum)?; self.append(ostree::ObjectType::DirMeta, meta_csum, meta_v)?; } hex::encode_to_slice(contents_csum, &mut hexbuf)?; let dirtree_csum = std::str::from_utf8(&hexbuf)?; let subpath = &dirpath.join(name); let subpath = map_path(subpath); - self.append_dirtree(&*subpath, repo, dirtree_csum, cancellable)?; + self.append_dirtree(&*subpath, dirtree_csum, cancellable)?; } Ok(()) @@ -276,7 +279,7 @@ fn impl_export( let metadata_v = &repo.load_variant(ostree::ObjectType::DirMeta, metadata_checksum)?; writer.append(ostree::ObjectType::DirMeta, metadata_checksum, metadata_v)?; - writer.append_dirtree(Utf8Path::new("./"), repo, contents, cancellable)?; + writer.append_dirtree(Utf8Path::new("./"), contents, cancellable)?; Ok(()) } From 997a029206d4dadc8bb04f451704c10680bc5112 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 14 Apr 2021 20:09:47 -0400 Subject: [PATCH 019/775] lib/import: Remove unused file --- lib/src/import.rs | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 lib/src/import.rs diff --git a/lib/src/import.rs b/lib/src/import.rs deleted file mode 100644 index e63f3c0ee..000000000 --- a/lib/src/import.rs +++ /dev/null @@ -1,2 +0,0 @@ -use super::Result; - From ad9d7bbc97c4a06c3e54e4a824deb2946626fbf5 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 14 Apr 2021 20:16:23 -0400 Subject: [PATCH 020/775] lib/container: Remove unused file --- lib/src/container/import.rs | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 lib/src/container/import.rs diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs deleted file mode 100644 index e63f3c0ee..000000000 --- a/lib/src/container/import.rs +++ /dev/null @@ -1,2 +0,0 @@ -use super::Result; - From e968875bb909cf63b183ca712e4c582543868e81 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 14 Apr 2021 20:15:44 -0400 Subject: [PATCH 021/775] README: Clarify various bits --- README.md | 32 ++++++-------------------------- 1 file changed, 6 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index dbfc3ea10..57b3529fb 100644 --- a/README.md +++ b/README.md @@ -49,16 +49,13 @@ This is used by `rpm-ostree ex apply-live`. ## module "container": Encapsulate ostree commits in OCI/Docker images -### Bundle an OSTree repository into a container +### Bundle an OSTree repository into an OCI container directory Given an OSTree repository, running *outside* a container: ``` -$ ostree-container build --repo=/path/to/repo --ref=exampleos/x86_64/stable --oci-dir=/output/exampleos +$ ostree-ext-cli container export-oci --repo=/path/to/repo exampleos/x86_64/stable /output/exampleos ``` - -`--oci-dir` creates an [OpenContainers image](https://github.com/opencontainers/image-spec/blob/master/spec.md) layout. - You can then e.g. ``` @@ -66,28 +63,11 @@ $ skopeo copy oci:/output/exampleos containers-storage:localhost/exampleos $ podman run --rm -ti --entrypoint bash localhost/exampleos ``` -Another option is `--push quay.io/exampleos/exampleos:stable` which would push directly to a registry. This would particularly be intended to be usable inside a fully unprivileged container, just mounting in the secrets necessary to push to the target registry. - -### Take an arbitrary container and convert it to be OSTree ready - -There's nothing conceptually stopping us from having tooling that takes -an arbitrary container image and just makes it "ostree ready". Or even -just dyanamically accepting a container image that has a kernel client side. - -This *may* be in scope at some point in the future. - -#### ostree-containers and derivation - -For an ostree-based OS that is derived from Fedora, -`ostree-container build --from=registry.fedoraproject.org/fedora:33` would cause the generated container image to derive from the parent; in particular we de-duplicate content in the ostree commit from the base. - -This would work equally well for a Debian+ostree OS to do `--from=docker.io/debian:stable`. - -(In fact we may *require* this; TBD) +You can also use e.g. `skopeo copy oci:/output/exampleos docker://quay.io/exampleos/exampleos:latest`. -### Running an ostree-container as a webserver +### Future: Running an ostree-container as a webserver -It also works to run the ostree-container as a webserver, which will expose a webserver that responds to `GET /repo`. +It also should work to run the ostree-container as a webserver, which will expose a webserver that responds to `GET /repo`. The effect will be as if it was built from a `Dockerfile` that contains `EXPOSE 8080`; it will work to e.g. `kubectl run nginx --image=quay.io/exampleos/exampleos:latest --replicas=1` @@ -102,7 +82,7 @@ This project will hence provide a CLI tool and a Rust library which speaks the D An important aspect of this is that the system will validate the GPG signature of the target OSTree commit, as well as validating the sha256 of the contained objects. ``` -$ ostree-container pull --repo=/ostree/repo --ref=exampleos/x86_64/stable quay.io/exampleos/exampleos:stable +$ ostree-ext-cli container import --repo=/ostree/repo quay.io/exampleos/exampleos:stable ``` A project like rpm-ostree could hence support: From 553408d1415e29ffb2dc7eda28e28eaa9fb8b582 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 9 Apr 2021 19:40:44 +0000 Subject: [PATCH 022/775] Update to ostree v2021.2/0.11, use new writing APIs This is a lot more efficient; before we were creating a thread per object, etc. --- Cargo.toml | 2 - cli/Cargo.toml | 2 +- lib/Cargo.toml | 2 +- lib/src/tar/import.rs | 194 +++++++++++++++++++++++++++++------------- lib/tests/it/main.rs | 4 +- 5 files changed, 141 insertions(+), 63 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 20c82d151..cd6724a5f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,5 +7,3 @@ lto = "thin" [patch.crates-io] oci-distribution = { git = 'https://github.com/cgwalters/krustlet', branch = 'streaming-client' } -ostree = { git = 'https://gitlab.com/fkrull/ostree-rs', rev = 'fd2b57864938e9b3c0fc0c4496da29a099ad4616' } -ostree-sys = { git = 'https://gitlab.com/fkrull/ostree-rs', rev = 'fd2b57864938e9b3c0fc0c4496da29a099ad4616' } diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 46b1e97c6..c75a0030a 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -12,7 +12,7 @@ anyhow = "1.0" ostree-ext = { path = "../lib", features = ["container"] } clap = "2.33.3" structopt = "0.3.21" -ostree = { version = "0.10.0", features = ["v2021_1"] } +ostree = { version = "0.11.0", features = ["v2021_2"] } libc = "0.2.92" tokio = { version = "1", features = ["full"] } gio = "0.9.1" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index f99cc1bff..0e2628296 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -36,7 +36,7 @@ libc = "0.2.92" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" -ostree = { version = "0.10.0", features = ["v2021_1" ]} +ostree = { version = "0.11.0", features = ["v2021_2" ]} os_pipe = "0.9.2" ostree-sys = "0.7.2" tar = "0.4.33" diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 81ab56cf8..631c64477 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -5,7 +5,11 @@ use crate::Result; use anyhow::{anyhow, Context}; use camino::Utf8Path; use fn_error_context::context; +use gio::prelude::*; +use glib::Cast; +use ostree::ContentWriterExt; use std::collections::HashMap; +use std::convert::TryInto; use std::io::prelude::*; /// Arbitrary limit on xattrs to avoid RAM exhaustion attacks. The actual filesystem limits are often much smaller. @@ -16,6 +20,9 @@ const MAX_XATTR_SIZE: u32 = 1024 * 1024; /// from ostree-core.h. TODO: Bind this in introspection const MAX_METADATA_SIZE: u32 = 10 * 1024 * 1024; +/// https://stackoverflow.com/questions/258091/when-should-i-use-mmap-for-file-access +const SMALL_REGFILE_SIZE: usize = 127 * 1024; + // Variant formats, see ostree-core.h // TODO - expose these via introspection const OSTREE_COMMIT_FORMAT: &str = "(a{sv}aya(say)sstayay)"; @@ -31,12 +38,23 @@ enum ImportState { Importing(String), } +#[derive(Debug, Default)] +struct ImportStats { + dirtree: u32, + dirmeta: u32, + regfile_small: u32, + regfile_large: u32, + symlinks: u32, +} + /// Importer machine. struct Importer<'a> { state: ImportState, repo: &'a ostree::Repo, xattrs: HashMap, next_xattrs: Option<(String, String)>, + + stats: ImportStats, } impl<'a> Drop for Importer<'a> { @@ -62,37 +80,11 @@ fn validate_metadata_header(header: &tar::Header, desc: &str) -> Result { Ok(size as usize) } -/// Convert a tar header to a gio::FileInfo. This only maps -/// attributes that matter to ostree. -fn header_to_gfileinfo(header: &tar::Header) -> Result { - let i = gio::FileInfo::new(); - let t = match header.entry_type() { - tar::EntryType::Regular => gio::FileType::Regular, - tar::EntryType::Symlink => gio::FileType::SymbolicLink, - o => return Err(anyhow!("Invalid tar type: {:?}", o)), - }; - i.set_file_type(t); - i.set_size(0); - let uid = header.uid()? as u32; - let gid = header.gid()? as u32; - let mode = header.mode()?; - i.set_attribute_uint32("unix::uid", uid); - i.set_attribute_uint32("unix::gid", gid); - i.set_attribute_uint32("unix::mode", mode); - if t == gio::FileType::Regular { - i.set_size(header.size()? as i64) - } else { - i.set_attribute_boolean("standard::is-symlink", true); - let target = header.link_name()?; - let target = target.ok_or_else(|| anyhow!("Invalid symlink"))?; - let target = target - .as_os_str() - .to_str() - .ok_or_else(|| anyhow!("Non-utf8 symlink"))?; - i.set_symlink_target(target); - } - - Ok(i) +fn header_attrs(header: &tar::Header) -> Result<(u32, u32, u32)> { + let uid: u32 = header.uid()?.try_into()?; + let gid: u32 = header.gid()?.try_into()?; + let mode: u32 = header.mode()?.try_into()?; + Ok((uid, gid, mode)) } fn format_for_objtype(t: ostree::ObjectType) -> Option<&'static str> { @@ -159,16 +151,113 @@ impl<'a> Importer<'a> { let _ = self .repo .write_metadata(objtype, Some(checksum), &v, gio::NONE_CANCELLABLE)?; + match objtype { + ostree::ObjectType::DirMeta => self.stats.dirmeta += 1, + ostree::ObjectType::DirTree => self.stats.dirtree += 1, + ostree::ObjectType::Commit => {} + _ => unreachable!(), + } + Ok(()) + } + + /// Import a content object. + fn import_large_regfile_object( + &mut self, + mut entry: tar::Entry, + size: usize, + checksum: &str, + xattrs: Option, + ) -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; + let (uid, gid, mode) = header_attrs(entry.header())?; + let w = self.repo.write_regfile( + Some(checksum), + uid, + gid, + libc::S_IFREG | mode, + size as u64, + xattrs.as_ref(), + )?; + { + let w = w.clone().upcast::(); + let mut buf = [0; 8192]; + loop { + let n = entry.read(&mut buf[..]).context("Reading large regfile")?; + if n == 0 { + break; + } + w.write(&buf[0..n], cancellable) + .context("Writing large regfile")?; + } + } + let c = w.finish(cancellable)?; + debug_assert_eq!(c, checksum); + self.stats.regfile_large += 1; + Ok(()) + } + + /// Import a content object. + fn import_small_regfile_object( + &mut self, + mut entry: tar::Entry, + size: usize, + checksum: &str, + xattrs: Option, + ) -> Result<()> { + let (uid, gid, mode) = header_attrs(entry.header())?; + assert!(size <= SMALL_REGFILE_SIZE); + let mut buf = vec![0u8; size]; + entry.read_exact(&mut buf[..])?; + let c = self.repo.write_regfile_inline( + Some(checksum), + uid, + gid, + mode, + xattrs.as_ref(), + &buf, + gio::NONE_CANCELLABLE, + )?; + debug_assert_eq!(c.as_str(), checksum); + self.stats.regfile_small += 1; + Ok(()) + } + + /// Import a content object. + fn import_symlink_object( + &mut self, + entry: tar::Entry, + checksum: &str, + xattrs: Option, + ) -> Result<()> { + let (uid, gid, _) = header_attrs(entry.header())?; + let target = entry + .header() + .link_name()? + .ok_or_else(|| anyhow!("Invalid symlink"))?; + let target = target + .as_os_str() + .to_str() + .ok_or_else(|| anyhow!("Non-utf8 symlink"))?; + let c = self.repo.write_symlink( + Some(checksum), + uid, + gid, + xattrs.as_ref(), + target, + gio::NONE_CANCELLABLE, + )?; + debug_assert_eq!(c.as_str(), checksum); + self.stats.symlinks += 1; Ok(()) } /// Import a content object. #[context("Processing content object {}", checksum)] fn import_content_object( - &self, - mut entry: tar::Entry, + &mut self, + entry: tar::Entry, checksum: &str, - xattrs: Option<&glib::Variant>, + xattrs: Option, ) -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; if self @@ -177,28 +266,18 @@ impl<'a> Importer<'a> { { return Ok(()); } - let (recv, mut send) = os_pipe::pipe()?; - let size = entry.header().size()?; - let header_copy = entry.header().clone(); - let repo_clone = self.repo.clone(); - crossbeam::thread::scope(move |s| -> Result<()> { - let j = s.spawn(move |_| -> Result<()> { - let i = header_to_gfileinfo(&header_copy)?; - let recv = gio::ReadInputStream::new(recv); - let (ostream, size) = - ostree::raw_file_to_content_stream(&recv, &i, xattrs, cancellable)?; - repo_clone.write_content(Some(checksum), &ostream, size, cancellable)?; - Ok(()) - }); - let n = std::io::copy(&mut entry, &mut send).context("Copying object content")?; - drop(send); - assert_eq!(n, size); - j.join().unwrap()?; - Ok(()) - }) - .unwrap()?; - - Ok(()) + let size: usize = entry.header().size()?.try_into()?; + match entry.header().entry_type() { + tar::EntryType::Regular => { + if size > SMALL_REGFILE_SIZE { + self.import_large_regfile_object(entry, size, checksum, xattrs) + } else { + self.import_small_regfile_object(entry, size, checksum, xattrs) + } + } + tar::EntryType::Symlink => self.import_symlink_object(entry, checksum, xattrs), + o => return Err(anyhow!("Invalid tar entry of type {:?}", o)), + } } /// Given a tar entry that looks like an object (its path is under ostree/repo/objects/), @@ -259,7 +338,7 @@ impl<'a> Importer<'a> { .xattrs .get(&xattr_objref) .ok_or_else(|| anyhow!("Failed to find xattr {}", xattr_objref))?; - Some(v) + Some(v.clone()) } else { None }; @@ -382,6 +461,7 @@ pub fn import_tar(repo: &ostree::Repo, src: impl std::io::Read) -> Result Result { fn test_tar_import_export() -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; - let tempdir = tempfile::tempdir()?; + let tempdir = tempfile::tempdir_in("/var/tmp")?; let path = Utf8Path::from_path(tempdir.path()).unwrap(); let srcdir = &path.join("src"); std::fs::create_dir(srcdir)?; @@ -80,7 +80,7 @@ fn test_tar_import_export() -> Result<()> { std::fs::create_dir(destdir)?; let destrepodir = &destdir.join("repo"); let destrepo = ostree::Repo::new_for_path(destrepodir); - destrepo.create(ostree::RepoMode::Archive, cancellable)?; + destrepo.create(ostree::RepoMode::BareUser, cancellable)?; let imported_commit: String = ostree_ext::tar::import_tar(&destrepo, src_tar)?; let (commitdata, _) = destrepo.load_commit(&imported_commit)?; From c8b792d6c95fbcad4214a7d513f42a23e9d9b3f7 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 15 Apr 2021 16:58:15 -0400 Subject: [PATCH 023/775] ci: Hack in updated ostree --- .github/workflows/rust.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index f6ae8fafb..415398c50 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -17,6 +17,8 @@ jobs: steps: - uses: actions/checkout@v2 + - name: Hack in updated ostree + run: rpm -Uvh https://kojipkgs.fedoraproject.org//packages/ostree/2021.2/2.fc33/x86_64/ostree-{,devel-,libs-}2021.2-2.fc33.x86_64.rpm - name: Build run: cargo build --verbose - name: Run tests From 229faf06cf4399b0484df432040bea799f3f89d7 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 15 Apr 2021 17:50:55 -0400 Subject: [PATCH 024/775] Copy README.md --- lib/README.md | 1 + 1 file changed, 1 insertion(+) create mode 120000 lib/README.md diff --git a/lib/README.md b/lib/README.md new file mode 120000 index 000000000..32d46ee88 --- /dev/null +++ b/lib/README.md @@ -0,0 +1 @@ +../README.md \ No newline at end of file From 3b721fe6e66ab3135dbb7114567e8493578a87d5 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 15 Apr 2021 17:52:00 -0400 Subject: [PATCH 025/775] lib: Remove unused crossbeam dep --- lib/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 0e2628296..f4abc6fb6 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -25,7 +25,6 @@ container = [ [dependencies] anyhow = "1.0" camino = "1.0.4" -crossbeam = "0.8.0" fn-error-context = "0.1.1" gio = "0.9.1" glib = "0.10.3" From 16a5f2f365301002454c4684c13c90d9516e5b3f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 15 Apr 2021 17:59:43 -0400 Subject: [PATCH 026/775] lib: More Cargo.toml fixes --- lib/Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index f4abc6fb6..376227bc4 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -4,8 +4,9 @@ edition = "2018" license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" -repository = "https://github.com/cgwalters/ostree-ext" +repository = "https://github.com/ostreedev/ostree-ext" version = "0.1.0" +description = "Extension APIs for OSTree" [features] From 53575743f4df18137efd5fc19a7655ea2d5ce44a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 24 Apr 2021 08:36:12 -0400 Subject: [PATCH 027/775] actions: Set read-only flag See https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#permissions --- .github/workflows/rust.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 415398c50..fa8c6d106 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -1,5 +1,8 @@ name: Rust +permissions: + actions: read + on: push: branches: [ main ] From 9d953ec62ef5704ed14e6af761225674773d39be Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 18 Apr 2021 13:37:21 -0400 Subject: [PATCH 028/775] container: Use skopeo to fetch images Closes: https://github.com/ostreedev/ostree-rs-ext/issues/6 We want to honor things like mirroring set up in `/etc/containers` and ideally things like signature verification too. Plus we need to support the Docker registry API, not pure OCI. So depending on `oci-distribution` isn't a viable plan from that perspective. We're not a Go project, so depending on github.com/containers/image directly is out, plus even if we were vendoring all that is just a bad idea. So let's use skopeo as a subprocess. I originally wrote this to use skopeo via a pipe like: `skopeo copy docker://quay.io/exampleos/exampleos oci-archive:///proc/self/fd/5` where fd `5` is a pipe, but the first blocker is that containers/image barfs if the destination is a pipe. But even more importantly, the `oci-archive://` backend just spools everything to a temporary directory and then tars it back up, entirely obviating the point of streaming. So here we do the tempdir dance ourself for now. --- .github/workflows/rust.yml | 2 + Cargo.toml | 3 - README.md | 39 ++++--- cli/Cargo.toml | 5 +- cli/src/main.rs | 54 ++++++---- lib/Cargo.toml | 67 +++++++----- lib/src/container/buildoci.rs | 54 ---------- lib/src/container/client.rs | 92 ----------------- lib/src/container/export.rs | 99 ++++++++++++++++++ lib/src/container/import.rs | 151 +++++++++++++++++++++++++++ lib/src/container/mod.rs | 160 ++++++++++++++++++++++++++++- lib/src/container/oci.rs | 45 ++++++-- lib/src/container/skopeo.rs | 19 ++++ lib/src/container/tests/it/main.rs | 117 --------------------- lib/src/lib.rs | 2 +- lib/src/tar/export.rs | 9 +- lib/src/tar/import.rs | 1 + lib/src/variant_utils.rs | 12 ++- lib/tests/it/main.rs | 60 ++++++++++- 19 files changed, 634 insertions(+), 357 deletions(-) delete mode 100644 lib/src/container/buildoci.rs delete mode 100644 lib/src/container/client.rs create mode 100644 lib/src/container/export.rs create mode 100644 lib/src/container/import.rs create mode 100644 lib/src/container/skopeo.rs delete mode 100644 lib/src/container/tests/it/main.rs diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index fa8c6d106..dd965dd6d 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -19,6 +19,8 @@ jobs: container: quay.io/cgwalters/fcos-buildroot steps: + - name: Install skopeo + run: yum -y install skopeo - uses: actions/checkout@v2 - name: Hack in updated ostree run: rpm -Uvh https://kojipkgs.fedoraproject.org//packages/ostree/2021.2/2.fc33/x86_64/ostree-{,devel-,libs-}2021.2-2.fc33.x86_64.rpm diff --git a/Cargo.toml b/Cargo.toml index cd6724a5f..d3940e2cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,3 @@ members = ["cli", "lib"] [profile.release] codegen-units = 1 lto = "thin" - -[patch.crates-io] -oci-distribution = { git = 'https://github.com/cgwalters/krustlet', branch = 'streaming-client' } diff --git a/README.md b/README.md index 57b3529fb..d776e20ea 100644 --- a/README.md +++ b/README.md @@ -49,49 +49,48 @@ This is used by `rpm-ostree ex apply-live`. ## module "container": Encapsulate ostree commits in OCI/Docker images -### Bundle an OSTree repository into an OCI container directory - -Given an OSTree repository, running *outside* a container: +### Export an OSTree commit into a container image ``` -$ ostree-ext-cli container export-oci --repo=/path/to/repo exampleos/x86_64/stable /output/exampleos +$ ostree-ext-cli container export --repo=/path/to/repo exampleos/x86_64/stable docker://quay.io/exampleos/exampleos:stable ``` You can then e.g. ``` -$ skopeo copy oci:/output/exampleos containers-storage:localhost/exampleos -$ podman run --rm -ti --entrypoint bash localhost/exampleos +$ podman run --rm -ti --entrypoint bash quay.io/exampleos/exampleos:stable ``` -You can also use e.g. `skopeo copy oci:/output/exampleos docker://quay.io/exampleos/exampleos:latest`. - -### Future: Running an ostree-container as a webserver - -It also should work to run the ostree-container as a webserver, which will expose a webserver that responds to `GET /repo`. - -The effect will be as if it was built from a `Dockerfile` that contains `EXPOSE 8080`; it will work to e.g. -`kubectl run nginx --image=quay.io/exampleos/exampleos:latest --replicas=1` -and then also create a service for it. +Running the container directly for e.g. CI testing is one use case. But more importantly, this container image +can be pushed to any registry, and used as part of ostree-based operating system release engineering. -### Pulling an ostree-container directly +### Importing an ostree-container directly A primary goal of this effort is to make it fully native to an ostree-based operating system to pull a container image directly too. -This project will hence provide a CLI tool and a Rust library which speaks the Docker/OCI protocols enough to directly pull the container image, extracting it into the system `/ostree/repo` repository. +FUTURE: An important aspect of this is that the system will validate the GPG signature of the target OSTree commit, as well as validating the sha256 of the contained objects. -An important aspect of this is that the system will validate the GPG signature of the target OSTree commit, as well as validating the sha256 of the contained objects. +The CLI offers a method to import the exported commit: ``` -$ ostree-ext-cli container import --repo=/ostree/repo quay.io/exampleos/exampleos:stable +$ ostree-ext-cli container import --repo=/ostree/repo docker://quay.io/exampleos/exampleos:stable ``` -A project like rpm-ostree could hence support: +But a project like rpm-ostree could hence support: ``` $ rpm-ostree rebase quay.io/exampleos/exampleos:stable ``` + (Along with the usual `rpm-ostree upgrade` knowing to pull that container image) +### Future: Running an ostree-container as a webserver + +It also should work to run the ostree-container as a webserver, which will expose a webserver that responds to `GET /repo`. + +The effect will be as if it was built from a `Dockerfile` that contains `EXPOSE 8080`; it will work to e.g. +`kubectl run nginx --image=quay.io/exampleos/exampleos:latest --replicas=1` +and then also create a service for it. + ### Integrating with future container deltas See https://blogs.gnome.org/alexl/2020/05/13/putting-container-updates-on-a-diet/ diff --git a/cli/Cargo.toml b/cli/Cargo.toml index c75a0030a..b89793d18 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -9,10 +9,13 @@ readme = "README.md" [dependencies] anyhow = "1.0" -ostree-ext = { path = "../lib", features = ["container"] } +ostree-ext = { path = "../lib" } clap = "2.33.3" structopt = "0.3.21" ostree = { version = "0.11.0", features = ["v2021_2"] } libc = "0.2.92" tokio = { version = "1", features = ["full"] } gio = "0.9.1" +log = "0.4.0" +env_logger = "0.8.3" + diff --git a/cli/src/main.rs b/cli/src/main.rs index b251108a5..f5088f160 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,6 +1,6 @@ use anyhow::Result; +use std::convert::TryInto; use structopt::StructOpt; -use tokio::runtime::Runtime; #[derive(Debug, StructOpt)] struct BuildOpts { @@ -51,12 +51,18 @@ enum ContainerOpts { #[structopt(long)] repo: String, - /// Path to remote image, e.g. quay.io/exampleos/exampleos:latest + /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest + imgref: String, + }, + + /// Print information about an exported ostree-container image. + Info { + /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest imgref: String, }, /// Export an ostree commit to an OCI layout - ExportOCI { + Export { /// Path to the repository #[structopt(long)] repo: String, @@ -64,8 +70,8 @@ enum ContainerOpts { /// The ostree ref or commit to export rev: String, - /// Export to an OCI image layout - path: String, + /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest + imgref: String, }, } @@ -99,36 +105,48 @@ fn tar_export(opts: &ExportOpts) -> Result<()> { Ok(()) } -fn container_import(repo: &str, imgref: &str) -> Result<()> { +async fn container_import(repo: &str, imgref: &str) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; - let rt = Runtime::new()?; - let res = - rt.block_on(async move { ostree_ext::container::client::import(repo, imgref).await })?; + let imgref = imgref.try_into()?; + let res = ostree_ext::container::import(repo, &imgref).await?; println!("Imported: {}", res.ostree_commit); Ok(()) } -fn container_export_oci(repo: &str, rev: &str, path: &str) -> Result<()> { +async fn container_export(repo: &str, rev: &str, imgref: &str) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; - let target = ostree_ext::container::buildoci::Target::OciDir(std::path::Path::new(path)); - ostree_ext::container::buildoci::build(repo, rev, target)?; + let imgref = imgref.try_into()?; + let pushed = ostree_ext::container::export(repo, rev, &imgref).await?; + println!("{}", pushed); Ok(()) } -fn run() -> Result<()> { +async fn container_info(imgref: &str) -> Result<()> { + let imgref = imgref.try_into()?; + let info = ostree_ext::container::fetch_manifest_info(&imgref).await?; + println!("{} @{}", imgref, info.manifest_digest); + Ok(()) +} + +async fn run() -> Result<()> { + env_logger::init(); let opt = Opt::from_args(); match opt { Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt), Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), - Opt::Container(ContainerOpts::Import { repo, imgref }) => container_import(&repo, &imgref), - Opt::Container(ContainerOpts::ExportOCI { repo, rev, path }) => { - container_export_oci(&repo, &rev, &path) + Opt::Container(ContainerOpts::Info { imgref }) => container_info(imgref.as_str()).await, + Opt::Container(ContainerOpts::Import { repo, imgref }) => { + container_import(&repo, &imgref).await + } + Opt::Container(ContainerOpts::Export { repo, rev, imgref }) => { + container_export(&repo, &rev, &imgref).await } } } -fn main() { - if let Err(e) = run() { +#[tokio::main] +async fn main() { + if let Err(e) = run().await { eprintln!("error: {:#}", e); std::process::exit(1); } diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 376227bc4..a6fdf9543 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -1,30 +1,16 @@ [package] authors = ["Colin Walters "] +description = "Extension APIs for OSTree" edition = "2018" license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-ext" version = "0.1.0" -description = "Extension APIs for OSTree" - - -[features] -# Enable the container API -container = [ - "cjson", - "flate2", - "futures", - "phf", - "nix", - "oci-distribution", - "tokio", - "serde", - "serde_json", -] [dependencies] anyhow = "1.0" +bytes = "1.0.1" camino = "1.0.4" fn-error-context = "0.1.1" gio = "0.9.1" @@ -36,25 +22,50 @@ libc = "0.2.92" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" -ostree = { version = "0.11.0", features = ["v2021_2" ]} os_pipe = "0.9.2" ostree-sys = "0.7.2" tar = "0.4.33" +tempfile = "3.2.0" +tracing = "0.1" +log = "0.4" + +[dependencies.cjson] +version = "0.1.1" + +[dependencies.flate2] +version = "1.0.20" + +[dependencies.futures] +version = "0.3.13" -#ostree-container deps -cjson = { version = "0.1.1", optional = true } -flate2 = {version = "1.0.20", optional = true } -futures = { version = "0.3.13", optional = true } -phf = { version = "0.8.0", features = ["macros"], optional = true } -nix = { version = "0.20.0", optional = true } -oci-distribution = { version = "0.6.0", optional = true } -tokio = { version = "1", features = ["full"], optional = true } -serde = { version = "1.0.125", optional = true } -serde_json = { version = "1.0.64", optional = true } +[dependencies.nix] +version = "0.20.0" + +[dependencies.ostree] +features = ["v2021_2"] +version = "0.11.0" + +[dependencies.phf] +features = ["macros"] +version = "0.8.0" + +[dependencies.serde] +features = ["derive"] +version = "1.0.125" + +[dependencies.serde_json] +version = "1.0.64" + +[dependencies.tokio] +features = ["full"] +version = "1" + +[dependencies.tokio-util] +features = ["io"] +version = "0.6" [dev-dependencies] clap = "2.33.3" indoc = "1.0.3" sh-inline = "0.1.0" -tempfile = "3.2.0" structopt = "0.3.21" diff --git a/lib/src/container/buildoci.rs b/lib/src/container/buildoci.rs deleted file mode 100644 index a706fb739..000000000 --- a/lib/src/container/buildoci.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! APIs for creating container images from OSTree commits - -use super::oci; -use super::Result; -use crate::tar as ostree_tar; -use anyhow::Context; -use fn_error_context::context; -use std::path::Path; - -/// The location to store the generated image -pub enum Target<'a> { - /// Generate an Open Containers image directory layout - OciDir(&'a Path), -} - -/// Write an ostree commit to an OCI blob -#[context("Writing ostree root to blob")] -fn export_ostree_ref_to_blobdir( - repo: &ostree::Repo, - rev: &str, - ocidir: &openat::Dir, -) -> Result { - let commit = repo.resolve_rev(rev, false)?.unwrap(); - let mut w = oci::LayerWriter::new(ocidir)?; - ostree_tar::export_commit(repo, commit.as_str(), &mut w)?; - w.complete() -} - -/// Generate an OCI image from a given ostree root -#[context("Building oci")] -fn build_oci(repo: &ostree::Repo, commit: &str, ocidir: &Path) -> Result<()> { - // Explicitly error if the target exists - std::fs::create_dir(ocidir).context("Creating OCI dir")?; - let ocidir = &openat::Dir::open(ocidir)?; - let writer = &mut oci::OciWriter::new(ocidir)?; - - let rootfs_blob = export_ostree_ref_to_blobdir(repo, commit, ocidir)?; - writer.set_root_layer(rootfs_blob); - writer.complete()?; - - Ok(()) -} - -/// Helper for `build()` that avoids generics -fn build_impl(repo: &ostree::Repo, ostree_ref: &str, target: Target) -> Result<()> { - match target { - Target::OciDir(d) => build_oci(repo, ostree_ref, d), - } -} - -/// Given an OSTree repository and ref, generate a container image -pub fn build>(repo: &ostree::Repo, ostree_ref: S, target: Target) -> Result<()> { - build_impl(repo, ostree_ref.as_ref(), target) -} diff --git a/lib/src/container/client.rs b/lib/src/container/client.rs deleted file mode 100644 index f221110f4..000000000 --- a/lib/src/container/client.rs +++ /dev/null @@ -1,92 +0,0 @@ -//! APIs for extracting OSTree commits from container images - -use std::io::Write; - -use super::Result; -use anyhow::{anyhow, Context}; -use fn_error_context::context; -use oci_distribution::manifest::OciDescriptor; - -/// The result of an import operation -#[derive(Debug)] -pub struct Import { - /// The ostree commit that was imported - pub ostree_commit: String, - /// The image digest retrieved - pub image_digest: String, -} - -#[context("Fetching layer descriptor")] -async fn fetch_layer_descriptor( - client: &mut oci_distribution::Client, - image_ref: &oci_distribution::Reference, -) -> Result<(String, OciDescriptor)> { - let (manifest, digest) = client.pull_manifest(image_ref).await?; - let mut layers = manifest.layers; - let orig_layer_count = layers.len(); - layers.retain(|layer| { - matches!( - layer.media_type.as_str(), - super::oci::DOCKER_TYPE_LAYER | oci_distribution::manifest::IMAGE_LAYER_GZIP_MEDIA_TYPE - ) - }); - let n = layers.len(); - - if let Some(layer) = layers.into_iter().next() { - if n > 1 { - Err(anyhow!("Expected 1 layer, found {}", n)) - } else { - Ok((digest, layer)) - } - } else { - Err(anyhow!("No layers found (orig: {})", orig_layer_count)) - } -} - -#[allow(unsafe_code)] -#[context("Importing {}", image_ref)] -async fn import_impl(repo: &ostree::Repo, image_ref: &str) -> Result { - let image_ref: oci_distribution::Reference = image_ref.parse()?; - let client = &mut oci_distribution::Client::default(); - let auth = &oci_distribution::secrets::RegistryAuth::Anonymous; - client - .auth( - &image_ref, - auth, - &oci_distribution::secrets::RegistryOperation::Pull, - ) - .await?; - let (image_digest, layer) = fetch_layer_descriptor(client, &image_ref).await?; - - let req = client - .request_layer(&image_ref, &layer.digest) - .await? - .bytes_stream(); - let (pipein, mut pipeout) = os_pipe::pipe()?; - let copier = tokio::task::spawn_blocking(move || -> anyhow::Result<()> { - let req = futures::executor::block_on_stream(req); - for v in req { - let v = v.map_err(anyhow::Error::msg).context("Writing buf")?; - pipeout.write_all(&v)?; - } - Ok(()) - }); - let repo = repo.clone(); - let import = tokio::task::spawn_blocking(move || { - let gz = flate2::read::GzDecoder::new(pipein); - crate::tar::import_tar(&repo, gz) - }); - let (import_res, copy_res) = tokio::join!(import, copier); - copy_res??; - let ostree_commit = import_res??; - - Ok(Import { - ostree_commit, - image_digest, - }) -} - -/// Download and import the referenced container -pub async fn import>(repo: &ostree::Repo, image_ref: I) -> Result { - Ok(import_impl(repo, image_ref.as_ref()).await?) -} diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs new file mode 100644 index 000000000..0f617ba1c --- /dev/null +++ b/lib/src/container/export.rs @@ -0,0 +1,99 @@ +//! APIs for creating container images from OSTree commits + +use super::*; +use crate::{tar as ostree_tar, variant_utils}; +use anyhow::Context; +use fn_error_context::context; +use std::path::Path; + +/// Write an ostree commit to an OCI blob +#[context("Writing ostree root to blob")] +fn export_ostree_ref_to_blobdir( + repo: &ostree::Repo, + rev: &str, + ocidir: &openat::Dir, +) -> Result { + let commit = repo.resolve_rev(rev, false)?.unwrap(); + let mut w = oci::LayerWriter::new(ocidir)?; + ostree_tar::export_commit(repo, commit.as_str(), &mut w)?; + w.complete() +} + +/// Generate an OCI image from a given ostree root +#[context("Building oci")] +fn build_oci(repo: &ostree::Repo, rev: &str, ocidir_path: &Path) -> Result { + // Explicitly error if the target exists + std::fs::create_dir(ocidir_path).context("Creating OCI dir")?; + let ocidir = &openat::Dir::open(ocidir_path)?; + let writer = &mut oci::OciWriter::new(ocidir)?; + + let commit = repo.resolve_rev(rev, false)?.unwrap(); + let commit = commit.as_str(); + let (commit_v, _) = repo.load_commit(commit)?; + let commit_meta = &variant_utils::variant_tuple_get(&commit_v, 0).unwrap(); + let commit_meta = glib::VariantDict::new(Some(commit_meta)); + + if let Some(version) = + commit_meta.lookup_value("version", Some(glib::VariantTy::new("s").unwrap())) + { + let version = version.get_str().unwrap(); + writer.add_config_annotation("version", version); + writer.add_manifest_annotation("ostree.version", version); + } + + writer.add_config_annotation(OSTREE_COMMIT_LABEL, commit); + writer.add_manifest_annotation(OSTREE_COMMIT_LABEL, commit); + + let rootfs_blob = export_ostree_ref_to_blobdir(repo, commit, ocidir)?; + writer.set_root_layer(rootfs_blob); + writer.complete()?; + + Ok(ImageReference { + transport: Transport::OciDir, + name: ocidir_path.to_str().unwrap().to_string(), + }) +} + +/// Helper for `build()` that avoids generics +async fn build_impl( + repo: &ostree::Repo, + ostree_ref: &str, + dest: &ImageReference, +) -> Result { + if dest.transport == Transport::OciDir { + let _copied: ImageReference = build_oci(repo, ostree_ref, Path::new(dest.name.as_str()))?; + } else { + let tempdir = tempfile::tempdir_in("/var/tmp")?; + let tempdest = tempdir.path().join("d"); + let tempdest = tempdest.to_str().unwrap(); + let src = build_oci(repo, ostree_ref, Path::new(tempdest))?; + + let mut cmd = skopeo::new_cmd(); + log::trace!("Copying {} to {}", src, dest); + cmd.stdout(std::process::Stdio::null()) + .arg("copy") + .arg(src.to_string()) + .arg(dest.to_string()); + let proc = super::skopeo::spawn(cmd)?; + let output = proc.wait_with_output().await?; + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(anyhow::anyhow!("skopeo failed: {}\n", stderr)); + } + } + // FIXME - it's obviously broken to do this push -> inspect cycle because of the possibility + // of a race condition, but we need to patch skopeo to have the equivalent of `podman push --digestfile`. + let info = super::import::fetch_manifest_info(dest).await?; + Ok(dest.with_digest(info.manifest_digest.as_str())) +} + +/// Given an OSTree repository and ref, generate a container image. +/// +/// The returned `ImageReference` will contain a digested (e.g. `@sha256:`) version of the destination. +pub async fn export>( + repo: &ostree::Repo, + ostree_ref: S, + dest: &ImageReference, +) -> Result { + build_impl(repo, ostree_ref.as_ref(), dest).await +} diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs new file mode 100644 index 000000000..1bf2ba13d --- /dev/null +++ b/lib/src/container/import.rs @@ -0,0 +1,151 @@ +//! APIs for extracting OSTree commits from container images + +use super::*; +use anyhow::anyhow; +use fn_error_context::context; +use futures::prelude::*; +use std::io::prelude::*; +use std::process::Stdio; +use tokio::io::AsyncRead; + +/// Download the manifest for a target image. +#[context("Fetching manifest")] +pub async fn fetch_manifest_info(imgref: &ImageReference) -> Result { + let (_, manifest_digest) = fetch_manifest(imgref).await?; + // Sadly this seems to be lost when pushing to e.g. quay.io, which means we can't use it. + // let commit = manifest + // .annotations + // .as_ref() + // .map(|a| a.get(OSTREE_COMMIT_LABEL)) + // .flatten() + // .ok_or_else(|| anyhow!("Missing annotation {}", OSTREE_COMMIT_LABEL))?; + Ok(OstreeContainerManifestInfo { manifest_digest }) +} + +/// Download the manifest for a target image. +#[context("Fetching manifest")] +async fn fetch_manifest(imgref: &ImageReference) -> Result<(oci::Manifest, String)> { + let mut proc = skopeo::new_cmd(); + proc.args(&["inspect", "--raw"]).arg(imgref.to_string()); + proc.stdout(Stdio::piped()); + let proc = skopeo::spawn(proc)?.wait_with_output().await?; + if !proc.status.success() { + let errbuf = String::from_utf8_lossy(&proc.stderr); + return Err(anyhow!("skopeo inspect failed\n{}", errbuf)); + } + let raw_manifest = proc.stdout; + let digest = openssl::hash::hash(openssl::hash::MessageDigest::sha256(), &raw_manifest)?; + let digest = format!("sha256:{}", hex::encode(digest.as_ref())); + Ok((serde_json::from_slice(&raw_manifest)?, digest)) +} + +/// Bridge from AsyncRead to Read. +/// +/// This creates a pipe and a "driver" future (which could be spawned or not). +fn copy_async_read_to_sync_pipe( + s: S, +) -> Result<(impl Read, impl Future>)> { + let (pipein, mut pipeout) = os_pipe::pipe()?; + + let copier = async move { + let mut input = tokio_util::io::ReaderStream::new(s).boxed(); + while let Some(buf) = input.next().await { + let buf = buf?; + // TODO blocking executor + pipeout.write_all(&buf)?; + } + Ok::<_, anyhow::Error>(()) + }; + + Ok((pipein, copier)) +} + +/// Fetch a remote docker/OCI image into a local tarball, extract a specific blob. +async fn fetch_oci_archive_blob<'s>( + imgref: &ImageReference, + blobid: &str, +) -> Result { + let mut proc = skopeo::new_cmd(); + proc.stdout(Stdio::null()); + let tempdir = tempfile::tempdir_in("/var/tmp")?; + let target = &tempdir.path().join("d"); + tracing::trace!("skopeo pull starting to {:?}", target); + proc.arg("copy") + .arg(imgref.to_string()) + .arg(format!("oci://{}", target.to_str().unwrap())); + skopeo::spawn(proc)? + .wait() + .err_into() + .and_then(|e| async move { + if !e.success() { + return Err(anyhow!("skopeo failed: {}", e)); + } + Ok(()) + }) + .await?; + tracing::trace!("skopeo pull done"); + Ok(tokio::fs::File::open(target.join("blobs/sha256/").join(blobid)).await?) +} + +/// The result of an import operation +#[derive(Debug)] +pub struct Import { + /// The ostree commit that was imported + pub ostree_commit: String, + /// The image digest retrieved + pub image_digest: String, +} + +fn find_layer_blobid(manifest: &oci::Manifest) -> Result { + let layers: Vec<_> = manifest + .layers + .iter() + .filter(|&layer| { + matches!( + layer.media_type.as_str(), + super::oci::DOCKER_TYPE_LAYER | oci::OCI_TYPE_LAYER + ) + }) + .collect(); + + let n = layers.len(); + if let Some(layer) = layers.into_iter().next() { + if n > 1 { + Err(anyhow!("Expected 1 layer, found {}", n)) + } else { + let digest = layer.digest.as_str(); + let hash = digest + .strip_prefix("sha256:") + .ok_or_else(|| anyhow!("Expected sha256: in digest: {}", digest))?; + Ok(hash.into()) + } + } else { + Err(anyhow!("No layers found (orig: {})", manifest.layers.len())) + } +} + +/// Fetch a container image and import its embedded OSTree commit. +#[context("Importing {}", imgref)] +pub async fn import(repo: &ostree::Repo, imgref: &ImageReference) -> Result { + let (manifest, image_digest) = fetch_manifest(imgref).await?; + let manifest = &manifest; + let layerid = find_layer_blobid(manifest)?; + tracing::trace!("target blob: {}", layerid); + let blob = fetch_oci_archive_blob(imgref, layerid.as_str()).await?; + tracing::trace!("reading blob"); + let (pipein, copydriver) = copy_async_read_to_sync_pipe(blob)?; + let repo = repo.clone(); + let import = tokio::task::spawn_blocking(move || { + // FIXME don't hardcode compression, we need to detect it + let gz = flate2::read::GzDecoder::new(pipein); + crate::tar::import_tar(&repo, gz) + }) + .map_err(anyhow::Error::msg); + let (import, _copydriver) = tokio::try_join!(import, copydriver)?; + let ostree_commit = import?; + tracing::trace!("created commit {}", ostree_commit); + Ok(Import { + ostree_commit, + image_digest, + }) +} diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 613283e63..b4b5f3a43 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -8,11 +8,165 @@ #![forbid(unused_must_use)] #![deny(unsafe_code)] +use anyhow::anyhow; +use std::convert::{TryFrom, TryInto}; + +/// The label injected into a container image that contains the ostree commit SHA-256. +pub const OSTREE_COMMIT_LABEL: &str = "ostree.commit"; + /// Our generic catchall fatal error, expected to be converted /// to a string to output to a terminal or logs. type Result = anyhow::Result; -pub mod buildoci; -pub mod client; +/// Information about the image manifest. +pub struct OstreeContainerManifestInfo { + /// The manifest digest (`sha256:`) + pub manifest_digest: String, +} + +/// A backend/transport for OCI/Docker images. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum Transport { + /// A remote Docker/OCI registry (`registry:` or `docker://`) + Registry, + /// A local OCI directory (`oci:`) + OciDir, + /// A local OCI archive tarball (`oci-archive:`) + OciArchive, +} + +/// Combination of a remote image reference and transport. +/// +/// For example, +#[derive(Debug)] +pub struct ImageReference { + /// The storage and transport for the image + pub transport: Transport, + /// The image name (e.g. `quay.io/somerepo/someimage:latest`) + pub name: String, +} + +impl ImageReference { + /// Create a new `ImageReference` that refers to a specific digest. + /// + /// ```rust + /// use std::convert::TryInto; + /// let r: ostree_ext::container::ImageReference = "docker://quay.io/exampleos/exampleos:latest".try_into().unwrap(); + /// let n = r.with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); + /// assert_eq!(n.name, "quay.io/exampleos/exampleos@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); + /// ``` + pub fn with_digest(&self, digest: &str) -> Self { + let name = self.name.as_str(); + let name = if let Some(idx) = name.rfind('@') { + name.split_at(idx).0 + } else if let Some(idx) = name.rfind(':') { + name.split_at(idx).0 + } else { + name + }; + Self { + transport: self.transport, + name: format!("{}@{}", name, digest), + } + } +} + +impl TryFrom<&str> for Transport { + type Error = anyhow::Error; + + fn try_from(value: &str) -> Result { + Ok(match value { + "registry" | "docker" => Self::Registry, + "oci" => Self::OciDir, + "oci-archive" => Self::OciArchive, + o => return Err(anyhow!("Unknown transport '{}'", o)), + }) + } +} + +impl TryFrom<&str> for ImageReference { + type Error = anyhow::Error; + + fn try_from(value: &str) -> Result { + let mut parts = value.splitn(2, ":"); + let transport_name = parts.next().unwrap(); + let transport: Transport = transport_name.try_into()?; + let mut name = parts + .next() + .ok_or_else(|| anyhow!("Missing ':' in {}", value))?; + if name.is_empty() { + return Err(anyhow!("Invalid empty name in {}", value)); + } + if transport_name == "docker" { + name = name + .strip_prefix("//") + .ok_or_else(|| anyhow!("Missing // in docker:// in {}", value))?; + } + Ok(Self { + transport, + name: name.to_string(), + }) + } +} + +impl std::fmt::Display for Transport { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match self { + // TODO once skopeo supports this, canonicalize as registry: + Self::Registry => "docker://", + Self::OciArchive => "oci-archive:", + Self::OciDir => "oci:", + }; + f.write_str(s) + } +} + +impl std::fmt::Display for ImageReference { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}{}", self.transport, self.name) + } +} + +mod export; +pub use export::*; +mod import; +pub use import::*; +mod oci; +mod skopeo; + +#[cfg(test)] +mod tests { + use super::*; + + const INVALID_IRS: &[&str] = &["", "foo://", "docker:blah", "registry:", "foo:bar"]; + + #[test] + fn test_imagereference() { + let ir: ImageReference = "registry:quay.io/exampleos/blah".try_into().unwrap(); + assert_eq!(ir.transport, Transport::Registry); + assert_eq!(ir.name, "quay.io/exampleos/blah"); + assert_eq!(ir.to_string(), "docker://quay.io/exampleos/blah"); + + let digested = ir + .with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); + assert_eq!(digested.name, "quay.io/exampleos/blah@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); + assert_eq!(digested.with_digest("sha256:52f562806109f5746be31ccf21f5569fd2ce8c32deb0d14987b440ed39e34e20").name, "quay.io/exampleos/blah@sha256:52f562806109f5746be31ccf21f5569fd2ce8c32deb0d14987b440ed39e34e20"); + + let with_tag: ImageReference = "registry:quay.io/exampleos/blah:sometag" + .try_into() + .unwrap(); + let digested = with_tag + .with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); + assert_eq!(digested.name, "quay.io/exampleos/blah@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); -pub mod oci; + for &v in INVALID_IRS { + match ImageReference::try_from(v) { + Ok(_) => panic!("Should fail to parse: {}", v), + Err(_) => {} + } + } + let ir: ImageReference = "oci:somedir".try_into().unwrap(); + assert_eq!(ir.transport, Transport::OciDir); + assert_eq!(ir.name, "somedir"); + } +} diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index 045af1ea5..7c091facc 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -1,4 +1,6 @@ -//! Unstable OCI API +//! Internal API to interact with Open Container Images; mostly +//! oriented towards generating images. + use anyhow::{anyhow, Result}; use flate2::write::GzEncoder; use fn_error_context::context; @@ -6,7 +8,10 @@ use openat_ext::*; use openssl::hash::{Hasher, MessageDigest}; use phf::phf_map; use serde::{Deserialize, Serialize}; -use std::io::prelude::*; +use std::{ + collections::{BTreeMap, HashMap}, + io::prelude::*, +}; /// Map the value from `uname -m` to the Go architecture. /// TODO find a more canonical home for this. @@ -16,10 +21,11 @@ static MACHINE_TO_OCI: phf::Map<&str, &str> = phf_map! { }; // OCI types, see https://github.com/opencontainers/image-spec/blob/master/media-types.md -const OCI_TYPE_CONFIG_JSON: &str = "application/vnd.oci.image.config.v1+json"; -const OCI_TYPE_MANIFEST_JSON: &str = "application/vnd.oci.image.manifest.v1+json"; -const OCI_TYPE_LAYER: &str = "application/vnd.oci.image.layer.v1.tar+gzip"; - +pub(crate) const OCI_TYPE_CONFIG_JSON: &str = "application/vnd.oci.image.config.v1+json"; +pub(crate) const OCI_TYPE_MANIFEST_JSON: &str = "application/vnd.oci.image.manifest.v1+json"; +pub(crate) const OCI_TYPE_LAYER: &str = "application/vnd.oci.image.layer.v1.tar+gzip"; +#[allow(dead_code)] +pub(crate) const IMAGE_LAYER_GZIP_MEDIA_TYPE: &str = "application/vnd.oci.image.layer.v1.tar+gzip"; pub(crate) const DOCKER_TYPE_LAYER: &str = "application/vnd.docker.image.rootfs.diff.tar.gzip"; /// Path inside an OCI directory to the blobs @@ -70,6 +76,7 @@ pub(crate) struct Manifest { pub schema_version: u32, pub layers: Vec, + pub annotations: Option>, } /// Completed blob metadata @@ -109,6 +116,9 @@ pub(crate) struct LayerWriter<'a> { pub(crate) struct OciWriter<'a> { pub(crate) dir: &'a openat::Dir, + config_annotations: HashMap, + manifest_annotations: HashMap, + root_layer: Option, } @@ -130,6 +140,8 @@ impl<'a> OciWriter<'a> { Ok(Self { dir, + config_annotations: Default::default(), + manifest_annotations: Default::default(), root_layer: None, }) } @@ -138,6 +150,19 @@ impl<'a> OciWriter<'a> { assert!(self.root_layer.replace(layer).is_none()) } + pub(crate) fn add_manifest_annotation, V: AsRef>(&mut self, k: K, v: V) { + let k = k.as_ref(); + let v = v.as_ref(); + self.manifest_annotations + .insert(k.to_string(), v.to_string()); + } + + pub(crate) fn add_config_annotation, V: AsRef>(&mut self, k: K, v: V) { + let k = k.as_ref(); + let v = v.as_ref(); + self.config_annotations.insert(k.to_string(), v.to_string()); + } + #[context("Writing OCI")] pub(crate) fn complete(&mut self) -> Result<()> { let utsname = nix::sys::utsname::uname(); @@ -149,6 +174,9 @@ impl<'a> OciWriter<'a> { let config = serde_json::json!({ "architecture": arch, "os": "linux", + "config": { + "Labels": self.config_annotations, + }, "rootfs": { "type": "layers", "diff_ids": [ root_layer_id ], @@ -162,7 +190,7 @@ impl<'a> OciWriter<'a> { let config_blob = write_json_blob(self.dir, &config)?; let manifest_data = serde_json::json!({ - "schemaVersion": 2, + "schemaVersion": default_schema_version(), "config": { "mediaType": OCI_TYPE_CONFIG_JSON, "size": config_blob.size, @@ -174,11 +202,12 @@ impl<'a> OciWriter<'a> { "digest": rootfs_blob.blob.digest_id(), } ], + "annotations": self.manifest_annotations, }); let manifest_blob = write_json_blob(self.dir, &manifest_data)?; let index_data = serde_json::json!({ - "schemaVersion": 2, + "schemaVersion": default_schema_version(), "manifests": [ { "mediaType": OCI_TYPE_MANIFEST_JSON, diff --git a/lib/src/container/skopeo.rs b/lib/src/container/skopeo.rs new file mode 100644 index 000000000..ee926fab3 --- /dev/null +++ b/lib/src/container/skopeo.rs @@ -0,0 +1,19 @@ +//! Fork skopeo as a subprocess + +use super::Result; +use anyhow::Context; +use std::process::Stdio; +use tokio::process::Command; + +/// Create a Command builder for skopeo. +pub(crate) fn new_cmd() -> tokio::process::Command { + let mut cmd = Command::new("skopeo"); + cmd.kill_on_drop(true); + cmd +} + +/// Spawn the child process +pub(crate) fn spawn(mut cmd: Command) -> Result { + let cmd = cmd.stdin(Stdio::null()).stderr(Stdio::piped()); + Ok(cmd.spawn().context("Failed to exec skopeo")?) +} diff --git a/lib/src/container/tests/it/main.rs b/lib/src/container/tests/it/main.rs deleted file mode 100644 index 4591e5c86..000000000 --- a/lib/src/container/tests/it/main.rs +++ /dev/null @@ -1,117 +0,0 @@ -use anyhow::{anyhow, Context, Result}; -use camino::{Utf8Path, Utf8PathBuf}; -use flate2::read::GzDecoder; -use fn_error_context::context; -use indoc::indoc; -use sh_inline::bash; -use std::fs::File; -use std::io::BufReader; - -use ostree_container::oci as myoci; - -const EXAMPLEOS_TAR: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); -const TESTREF: &str = "exampleos/x86_64/stable"; -const CONTENT_CHECKSUM: &str = "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; - -#[context("Generating test OCI")] -fn generate_test_oci(dir: &Utf8Path) -> Result { - let cancellable = gio::NONE_CANCELLABLE; - let path = Utf8Path::new(dir); - let tarpath = &path.join("exampleos.tar.zst"); - std::fs::write(tarpath, EXAMPLEOS_TAR)?; - bash!( - indoc! {" - cd {path} - ostree --repo=repo-archive init --mode=archive - ostree --repo=repo-archive commit -b {testref} --tree=tar=exampleos.tar.zst - ostree --repo=repo-archive show {testref} - ostree --repo=repo-archive ls -R -X -C {testref} - "}, - testref = TESTREF, - path = path.as_str() - )?; - std::fs::remove_file(tarpath)?; - let repopath = &path.join("repo-archive"); - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repopath.as_str(), cancellable)?; - let (_, rev) = repo.read_commit(TESTREF, cancellable)?; - let (commitv, _) = repo.load_commit(rev.as_str())?; - assert_eq!( - ostree::commit_get_content_checksum(&commitv) - .unwrap() - .as_str(), - CONTENT_CHECKSUM - ); - let ocipath = path.join("exampleos-oci"); - let ocitarget = ostree_container::buildoci::Target::OciDir(ocipath.as_ref()); - ostree_container::buildoci::build(repo, TESTREF, ocitarget)?; - bash!(r"skopeo inspect oci:{ocipath}", ocipath = ocipath.as_str())?; - Ok(ocipath) -} - -fn read_blob(ocidir: &Utf8Path, digest: &str) -> Result { - let digest = digest - .strip_prefix("sha256:") - .ok_or_else(|| anyhow!("Unknown algorithim in digest {}", digest))?; - let f = File::open(ocidir.join("blobs/sha256").join(digest)) - .with_context(|| format!("Opening blob {}", digest))?; - Ok(f) -} - -#[context("Parsing OCI")] -fn find_layer_in_oci(ocidir: &Utf8Path) -> Result>> { - let f = std::io::BufReader::new( - File::open(ocidir.join("index.json")).context("Opening index.json")?, - ); - let index: myoci::Index = serde_json::from_reader(f)?; - let manifest = index - .manifests - .get(0) - .ok_or_else(|| anyhow!("Missing manifest in index.json"))?; - let f = read_blob(ocidir, &manifest.digest)?; - let manifest: myoci::Manifest = serde_json::from_reader(f)?; - let layer = manifest - .layers - .iter() - .find(|layer| { - matches!( - layer.media_type.as_str(), - myoci::DOCKER_TYPE_LAYER | oci_distribution::manifest::IMAGE_LAYER_GZIP_MEDIA_TYPE - ) - }) - .ok_or_else(|| anyhow!("Failed to find rootfs layer"))?; - let blob = std::io::BufReader::new(read_blob(ocidir, &layer.digest)?); - let gz = flate2::read::GzDecoder::new(blob); - Ok(gz) -} - -#[test] -fn test_tar_e2e() -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; - - let tempdir = tempfile::tempdir()?; - let path = Utf8Path::from_path(tempdir.path()).unwrap(); - let srcdir = &path.join("src"); - std::fs::create_dir(srcdir)?; - let ocidir = &generate_test_oci(srcdir)?; - let destdir = &path.join("dest"); - std::fs::create_dir(destdir)?; - let destrepodir = &destdir.join("repo"); - let destrepo = ostree::Repo::new_for_path(destrepodir); - destrepo.create(ostree::RepoMode::Archive, cancellable)?; - - let tarf = find_layer_in_oci(ocidir)?; - let imported_commit: String = ostree_ext::tar::import_tar(&destrepo, tarf)?; - let (commitdata, _) = destrepo.load_commit(&imported_commit)?; - assert_eq!( - CONTENT_CHECKSUM, - ostree::commit_get_content_checksum(&commitdata) - .unwrap() - .as_str() - ); - bash!( - "ostree --repo={destrepodir} ls -R {imported_commit}", - destrepodir = destrepodir.as_str(), - imported_commit = imported_commit.as_str() - )?; - Ok(()) -} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 0ca962e94..791c38249 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -13,9 +13,9 @@ /// to a string to output to a terminal or logs. type Result = anyhow::Result; -#[cfg(feature = "container")] pub mod container; pub mod diff; pub mod ostree_ext; pub mod tar; +#[allow(unsafe_code)] mod variant_utils; diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 17aa0a0db..2294b7b56 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -8,17 +8,12 @@ use fn_error_context::context; use gio::prelude::*; use gvariant::aligned_bytes::TryAsAligned; use gvariant::{gv, Marker, Structure}; -use std::{borrow::Cow, collections::HashSet, path::Path}; +use std::borrow::Cow; +use std::collections::HashSet; // This way the default ostree -> sysroot/ostree symlink works. const OSTREEDIR: &str = "sysroot/ostree"; -/// The location to store the generated image -pub enum Target<'a> { - /// Generate an Open Containers image directory layout - OciDir(&'a Path), -} - /// Convert /usr/etc back to /etc fn map_path(p: &Utf8Path) -> std::borrow::Cow { match p.strip_prefix("./usr/etc") { diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 631c64477..c6d9118b7 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -38,6 +38,7 @@ enum ImportState { Importing(String), } +/// Statistics from import. #[derive(Debug, Default)] struct ImportStats { dirtree: u32, diff --git a/lib/src/variant_utils.rs b/lib/src/variant_utils.rs index fcd1cd78a..39be41d2c 100644 --- a/lib/src/variant_utils.rs +++ b/lib/src/variant_utils.rs @@ -3,7 +3,6 @@ use glib::translate::*; -#[allow(unsafe_code)] pub(crate) fn variant_new_from_bytes(ty: &str, bytes: glib::Bytes, trusted: bool) -> glib::Variant { unsafe { let ty = ty.to_glib_none(); @@ -16,7 +15,6 @@ pub(crate) fn variant_new_from_bytes(ty: &str, bytes: glib::Bytes, trusted: bool } } -#[allow(unsafe_code)] pub(crate) fn variant_get_normal_form(v: &glib::Variant) -> glib::Variant { unsafe { from_glib_full(glib_sys::g_variant_get_normal_form(v.to_glib_none().0)) } } @@ -25,6 +23,16 @@ pub(crate) fn variant_normal_from_bytes(ty: &str, bytes: glib::Bytes) -> glib::V variant_get_normal_form(&variant_new_from_bytes(ty, bytes, false)) } +pub(crate) fn variant_tuple_get(v: &glib::Variant, n: usize) -> Option { + let v = v.to_glib_none(); + let l = unsafe { glib_sys::g_variant_n_children(v.0) }; + if n >= l { + None + } else { + unsafe { from_glib_full(glib_sys::g_variant_get_child_value(v.0, n)) } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index a976015a0..0e1da6c73 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1,9 +1,10 @@ -use anyhow::Result; +use anyhow::{Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use indoc::indoc; +use ostree_ext::container::{ImageReference, Transport}; use sh_inline::bash; -use std::io::Write; +use std::{io::Write, process::Command}; const EXAMPLEOS_V0: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); const EXAMPLEOS_V1: &[u8] = include_bytes!("fixtures/exampleos-v1.tar.zst"); @@ -11,6 +12,7 @@ const TESTREF: &str = "exampleos/x86_64/stable"; const EXAMPLEOS_CONTENT_CHECKSUM: &str = "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; +#[context("Generating test repo")] fn generate_test_repo(dir: &Utf8Path) -> Result { let src_tarpath = &dir.join("exampleos.tar.zst"); std::fs::write(src_tarpath, EXAMPLEOS_V0)?; @@ -19,7 +21,7 @@ fn generate_test_repo(dir: &Utf8Path) -> Result { indoc! {" cd {dir} ostree --repo=repo init --mode=archive - ostree --repo=repo commit -b {testref} --tree=tar=exampleos.tar.zst + ostree --repo=repo commit -b {testref} --bootable --add-metadata-string=version=42.0 --tree=tar=exampleos.tar.zst ostree --repo=repo show {testref} "}, testref = TESTREF, @@ -98,6 +100,58 @@ fn test_tar_import_export() -> Result<()> { Ok(()) } +fn skopeo_inspect(imgref: &str) -> Result { + let out = Command::new("skopeo") + .args(&["inspect", imgref]) + .stdout(std::process::Stdio::piped()) + .output()?; + Ok(String::from_utf8(out.stdout)?) +} + +#[tokio::test] +async fn test_container_import_export() -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; + + let tempdir = tempfile::tempdir_in("/var/tmp")?; + let path = Utf8Path::from_path(tempdir.path()).unwrap(); + let srcdir = &path.join("src"); + std::fs::create_dir(srcdir)?; + let destdir = &path.join("dest"); + std::fs::create_dir(destdir)?; + let srcrepopath = &generate_test_repo(srcdir)?; + let srcrepo = &ostree::Repo::new_for_path(srcrepopath); + srcrepo.open(cancellable)?; + let testrev = srcrepo + .resolve_rev(TESTREF, false) + .context("Failed to resolve ref")? + .unwrap(); + let destrepo = &ostree::Repo::new_for_path(destdir); + destrepo.create(ostree::RepoMode::BareUser, cancellable)?; + + let srcoci_path = &srcdir.join("oci"); + let srcoci = ImageReference { + transport: Transport::OciDir, + name: srcoci_path.as_str().to_string(), + }; + let pushed = ostree_ext::container::export(srcrepo, TESTREF, &srcoci) + .await + .context("exporting")?; + assert!(srcoci_path.exists()); + let digest = pushed.name.rsplitn(2, "@").next().unwrap(); + + let inspect = skopeo_inspect(&srcoci.to_string())?; + assert!(inspect.contains(r#""version": "42.0""#)); + + let inspect = ostree_ext::container::fetch_manifest_info(&srcoci).await?; + assert_eq!(inspect.manifest_digest, digest); + + let import = ostree_ext::container::import(destrepo, &srcoci) + .await + .context("importing")?; + assert_eq!(import.ostree_commit, testrev.as_str()); + Ok(()) +} + #[test] fn test_diff() -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; From 6f444544a79bae7958a7dc4730f7a26d86cf91a0 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 26 Apr 2021 17:01:54 -0400 Subject: [PATCH 029/775] Make variant_utils a public module Getting things into gtk-rs would take a while before we can depend on it, and perhaps we want to move in the direction of the `gvariant` crate anyways. For now let's just make this a public stable API so it can be shared between here and rpm-ostree and e.g. ostree's test suite. --- lib/src/container/export.rs | 2 +- lib/src/lib.rs | 2 +- lib/src/variant_utils.rs | 16 +++++++++++----- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index 0f617ba1c..93aa5b652 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -30,7 +30,7 @@ fn build_oci(repo: &ostree::Repo, rev: &str, ocidir_path: &Path) -> Result glib::Variant { +/// Create a new GVariant from data. +pub fn variant_new_from_bytes(ty: &str, bytes: glib::Bytes, trusted: bool) -> glib::Variant { unsafe { let ty = ty.to_glib_none(); let ty: *const libc::c_char = ty.0; @@ -15,15 +18,18 @@ pub(crate) fn variant_new_from_bytes(ty: &str, bytes: glib::Bytes, trusted: bool } } -pub(crate) fn variant_get_normal_form(v: &glib::Variant) -> glib::Variant { +/// Get the normal form of a GVariant. +pub fn variant_get_normal_form(v: &glib::Variant) -> glib::Variant { unsafe { from_glib_full(glib_sys::g_variant_get_normal_form(v.to_glib_none().0)) } } -pub(crate) fn variant_normal_from_bytes(ty: &str, bytes: glib::Bytes) -> glib::Variant { +/// Create a normal-form GVariant from raw bytes. +pub fn variant_normal_from_bytes(ty: &str, bytes: glib::Bytes) -> glib::Variant { variant_get_normal_form(&variant_new_from_bytes(ty, bytes, false)) } -pub(crate) fn variant_tuple_get(v: &glib::Variant, n: usize) -> Option { +/// Extract a child from a variant. +pub fn variant_get_child_value(v: &glib::Variant, n: usize) -> Option { let v = v.to_glib_none(); let l = unsafe { glib_sys::g_variant_n_children(v.0) }; if n >= l { From cb223a0a6927aba88808c76658fa38848463be46 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 27 Apr 2021 09:27:30 -0400 Subject: [PATCH 030/775] Make tar import API async We're currently using the synchronous tar library, so we need to do bridging. This pushes the bridging down a layer, changing our tar import API to be async. Then things will be even clear if we switch to e.g. https://crates.io/crates/tokio-tar But we need https://github.com/vorot93/tokio-tar/pull/3 --- cli/src/main.rs | 13 +++---- lib/Cargo.toml | 1 + lib/src/async_util.rs | 25 ++++++++++++ lib/src/container/import.rs | 36 ++--------------- lib/src/lib.rs | 1 + lib/src/tar/import.rs | 78 +++++++++++++++++++++---------------- lib/tests/it/main.rs | 9 ++--- 7 files changed, 86 insertions(+), 77 deletions(-) create mode 100644 lib/src/async_util.rs diff --git a/cli/src/main.rs b/cli/src/main.rs index f5088f160..17c2ba762 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -85,15 +85,14 @@ enum Opt { Container(ContainerOpts), } -fn tar_import(opts: &ImportOpts) -> Result<()> { +async fn tar_import(opts: &ImportOpts) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, opts.repo.as_str(), gio::NONE_CANCELLABLE)?; let imported = if let Some(path) = opts.path.as_ref() { - let instream = std::io::BufReader::new(std::fs::File::open(path)?); - ostree_ext::tar::import_tar(repo, instream)? + let instream = tokio::fs::File::open(path).await?; + ostree_ext::tar::import_tar(repo, instream).await? } else { - let stdin = std::io::stdin(); - let stdin = stdin.lock(); - ostree_ext::tar::import_tar(repo, stdin)? + let stdin = tokio::io::stdin(); + ostree_ext::tar::import_tar(repo, stdin).await? }; println!("Imported: {}", imported); Ok(()) @@ -132,7 +131,7 @@ async fn run() -> Result<()> { env_logger::init(); let opt = Opt::from_args(); match opt { - Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt), + Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt).await, Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), Opt::Container(ContainerOpts::Info { imgref }) => container_info(imgref.as_str()).await, Opt::Container(ContainerOpts::Import { repo, imgref }) => { diff --git a/lib/Cargo.toml b/lib/Cargo.toml index a6fdf9543..a9767b977 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -10,6 +10,7 @@ version = "0.1.0" [dependencies] anyhow = "1.0" +async-compression = { version = "0.3.8", features = ["tokio", "gzip"] } bytes = "1.0.1" camino = "1.0.4" fn-error-context = "0.1.1" diff --git a/lib/src/async_util.rs b/lib/src/async_util.rs new file mode 100644 index 000000000..fa676d121 --- /dev/null +++ b/lib/src/async_util.rs @@ -0,0 +1,25 @@ +use anyhow::Result; +use futures::prelude::*; +use std::io::prelude::*; +use tokio::io::AsyncRead; + +/// Bridge from AsyncRead to Read. +/// +/// This creates a pipe and a "driver" future (which could be spawned or not). +pub(crate) fn copy_async_read_to_sync_pipe( + s: S, +) -> Result<(impl Read, impl Future>)> { + let (pipein, mut pipeout) = os_pipe::pipe()?; + + let copier = async move { + let mut input = tokio_util::io::ReaderStream::new(s).boxed(); + while let Some(buf) = input.next().await { + let buf = buf?; + // TODO blocking executor + pipeout.write_all(&buf)?; + } + Ok::<_, anyhow::Error>(()) + }; + + Ok((pipein, copier)) +} diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 1bf2ba13d..70cda5bfb 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -4,7 +4,6 @@ use super::*; use anyhow::anyhow; use fn_error_context::context; use futures::prelude::*; -use std::io::prelude::*; use std::process::Stdio; use tokio::io::AsyncRead; @@ -39,27 +38,6 @@ async fn fetch_manifest(imgref: &ImageReference) -> Result<(oci::Manifest, Strin Ok((serde_json::from_slice(&raw_manifest)?, digest)) } -/// Bridge from AsyncRead to Read. -/// -/// This creates a pipe and a "driver" future (which could be spawned or not). -fn copy_async_read_to_sync_pipe( - s: S, -) -> Result<(impl Read, impl Future>)> { - let (pipein, mut pipeout) = os_pipe::pipe()?; - - let copier = async move { - let mut input = tokio_util::io::ReaderStream::new(s).boxed(); - while let Some(buf) = input.next().await { - let buf = buf?; - // TODO blocking executor - pipeout.write_all(&buf)?; - } - Ok::<_, anyhow::Error>(()) - }; - - Ok((pipein, copier)) -} - /// Fetch a remote docker/OCI image into a local tarball, extract a specific blob. async fn fetch_oci_archive_blob<'s>( imgref: &ImageReference, @@ -132,17 +110,11 @@ pub async fn import(repo: &ostree::Repo, imgref: &ImageReference) -> Result = anyhow::Result; +mod async_util; pub mod container; pub mod diff; pub mod ostree_ext; diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index c6d9118b7..c478ef947 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -5,6 +5,7 @@ use crate::Result; use anyhow::{anyhow, Context}; use camino::Utf8Path; use fn_error_context::context; +use futures::prelude::*; use gio::prelude::*; use glib::Cast; use ostree::ContentWriterExt; @@ -455,42 +456,53 @@ fn validate_sha256(s: &str) -> Result<()> { } /// Read the contents of a tarball and import the ostree commit inside. The sha56 of the imported commit will be returned. -#[context("Importing")] -pub fn import_tar(repo: &ostree::Repo, src: impl std::io::Read) -> Result { - let mut importer = Importer { - state: ImportState::Initial, - repo, - xattrs: Default::default(), - next_xattrs: None, - stats: Default::default(), - }; - repo.prepare_transaction(gio::NONE_CANCELLABLE)?; - let mut archive = tar::Archive::new(src); - for entry in archive.entries()? { - let entry = entry?; - if entry.header().entry_type() == tar::EntryType::Directory { - continue; - } - let path = entry.path()?; - let path = &*path; - let path = - Utf8Path::from_path(path).ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", path))?; - let path = if let Ok(p) = path.strip_prefix("sysroot/ostree/repo/") { - p - } else { - continue; +pub async fn import_tar( + repo: &ostree::Repo, + src: impl tokio::io::AsyncRead + Send + Unpin + 'static, +) -> Result { + let (pipein, copydriver) = crate::async_util::copy_async_read_to_sync_pipe(src)?; + let repo = repo.clone(); + let import = tokio::task::spawn_blocking(move || { + let repo = &repo; + let mut importer = Importer { + state: ImportState::Initial, + repo, + xattrs: Default::default(), + next_xattrs: None, + stats: Default::default(), }; - - if let Ok(p) = path.strip_prefix("objects/") { - // Need to clone here, otherwise we borrow from the moved entry - let p = &p.to_owned(); - importer.import_object(entry, p)?; - } else if let Ok(_) = path.strip_prefix("xattrs/") { - importer.import_xattrs(entry)?; + repo.prepare_transaction(gio::NONE_CANCELLABLE)?; + let mut archive = tar::Archive::new(pipein); + for entry in archive.entries()? { + let entry = entry?; + if entry.header().entry_type() == tar::EntryType::Directory { + continue; + } + let path = entry.path()?; + let path = &*path; + let path = Utf8Path::from_path(path) + .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", path))?; + let path = if let Ok(p) = path.strip_prefix("sysroot/ostree/repo/") { + p + } else { + continue; + }; + + if let Ok(p) = path.strip_prefix("objects/") { + // Need to clone here, otherwise we borrow from the moved entry + let p = &p.to_owned(); + importer.import_object(entry, p)?; + } else if let Ok(_) = path.strip_prefix("xattrs/") { + importer.import_xattrs(entry)?; + } } - } - importer.commit() + importer.commit() + }) + .map_err(anyhow::Error::msg); + let (import, _copydriver) = tokio::try_join!(import, copydriver)?; + let import = import?; + Ok(import) } #[cfg(test)] diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 0e1da6c73..b77a8939a 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -68,23 +68,22 @@ fn generate_test_tarball(dir: &Utf8Path) -> Result { Ok(destpath) } -#[test] -fn test_tar_import_export() -> Result<()> { +#[tokio::test] +async fn test_tar_import_export() -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; let tempdir = tempfile::tempdir_in("/var/tmp")?; let path = Utf8Path::from_path(tempdir.path()).unwrap(); let srcdir = &path.join("src"); std::fs::create_dir(srcdir)?; - let src_tar = - &mut std::io::BufReader::new(std::fs::File::open(&generate_test_tarball(srcdir)?)?); + let src_tar = tokio::fs::File::open(&generate_test_tarball(srcdir)?).await?; let destdir = &path.join("dest"); std::fs::create_dir(destdir)?; let destrepodir = &destdir.join("repo"); let destrepo = ostree::Repo::new_for_path(destrepodir); destrepo.create(ostree::RepoMode::BareUser, cancellable)?; - let imported_commit: String = ostree_ext::tar::import_tar(&destrepo, src_tar)?; + let imported_commit: String = ostree_ext::tar::import_tar(&destrepo, src_tar).await?; let (commitdata, _) = destrepo.load_commit(&imported_commit)?; assert_eq!( EXAMPLEOS_CONTENT_CHECKSUM, From bb53603dc3f9624f8869566d115e30f53cbe179d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 27 Apr 2021 13:51:04 -0400 Subject: [PATCH 031/775] Use tracing, not logging It's much better for async. --- cli/Cargo.toml | 4 +++- cli/src/main.rs | 3 ++- lib/Cargo.toml | 1 - lib/src/container/export.rs | 4 +++- lib/src/container/import.rs | 5 +++-- 5 files changed, 11 insertions(+), 6 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index b89793d18..425714417 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -17,5 +17,7 @@ libc = "0.2.92" tokio = { version = "1", features = ["full"] } gio = "0.9.1" log = "0.4.0" -env_logger = "0.8.3" +tracing = "0.1" +tracing-subscriber = "0.2.17" + diff --git a/cli/src/main.rs b/cli/src/main.rs index 17c2ba762..c0e131fb5 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -128,7 +128,8 @@ async fn container_info(imgref: &str) -> Result<()> { } async fn run() -> Result<()> { - env_logger::init(); + tracing_subscriber::fmt::init(); + tracing::trace!("starting"); let opt = Opt::from_args(); match opt { Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt).await, diff --git a/lib/Cargo.toml b/lib/Cargo.toml index a9767b977..7496a8c77 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -28,7 +28,6 @@ ostree-sys = "0.7.2" tar = "0.4.33" tempfile = "3.2.0" tracing = "0.1" -log = "0.4" [dependencies.cjson] version = "0.1.1" diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index 93aa5b652..7e65cba15 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -5,6 +5,7 @@ use crate::{tar as ostree_tar, variant_utils}; use anyhow::Context; use fn_error_context::context; use std::path::Path; +use tracing::{instrument, Level}; /// Write an ostree commit to an OCI blob #[context("Writing ostree root to blob")] @@ -55,6 +56,7 @@ fn build_oci(repo: &ostree::Repo, rev: &str, ocidir_path: &Path) -> Result Result { /// Fetch a container image and import its embedded OSTree commit. #[context("Importing {}", imgref)] +#[instrument(skip(repo))] pub async fn import(repo: &ostree::Repo, imgref: &ImageReference) -> Result { let (manifest, image_digest) = fetch_manifest(imgref).await?; let manifest = &manifest; let layerid = find_layer_blobid(manifest)?; - tracing::trace!("target blob: {}", layerid); + event!(Level::DEBUG, "target blob: {}", layerid); let blob = fetch_oci_archive_blob(imgref, layerid.as_str()).await?; let blob = tokio::io::BufReader::new(blob); - tracing::trace!("reading blob"); // TODO also detect zstd let blob = async_compression::tokio::bufread::GzipDecoder::new(blob); let ostree_commit = crate::tar::import_tar(&repo, blob).await?; From 3ecb88708c1f277f48b10a68acb5af46cb97a1ad Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 27 Apr 2021 20:36:24 -0400 Subject: [PATCH 032/775] container: Use docker-archive: to stream image It turns out that for whatever reasons (presumably mostly historical) in skopeo, while `oci-archive:` is backed by just writing everything to a tempdir and then tarring it up, the `docker-archive:` format is actually streaming. And it's *way* nicer and more efficient to stream. Then the last piece of this puzzle is that I realized we can use a named pipe i.e. `mkfifo` to avoid having the containers/image stack crash because it wants to invoke `realpath()` on the provided target. By simply giving our pipe a name, it can do so (though it's pointless) but this way we work with shipped versions of skopeo and don't need to wait months for patches to propagate. Also as a bonus, here skopeo ends up decompressing the layer for us, so we don't need to e.g. handle gzip vs zstd in our code. --- lib/Cargo.toml | 1 + lib/src/async_util.rs | 6 +- lib/src/container/import.rs | 142 +++++++++++++++++++++++++++++------- lib/src/container/skopeo.rs | 1 + 4 files changed, 124 insertions(+), 26 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 7496a8c77..35ce69923 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -28,6 +28,7 @@ ostree-sys = "0.7.2" tar = "0.4.33" tempfile = "3.2.0" tracing = "0.1" +tokio-stream = "0.1.5" [dependencies.cjson] version = "0.1.1" diff --git a/lib/src/async_util.rs b/lib/src/async_util.rs index fa676d121..a4c9b5f04 100644 --- a/lib/src/async_util.rs +++ b/lib/src/async_util.rs @@ -16,7 +16,11 @@ pub(crate) fn copy_async_read_to_sync_pipe Ok(()), + _ => Err(e), + })?; } Ok::<_, anyhow::Error>(()) }; diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 0a231d32b..bf3bbd7e0 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -1,9 +1,11 @@ //! APIs for extracting OSTree commits from container images use super::*; -use anyhow::anyhow; +use anyhow::{anyhow, Context}; +use camino::Utf8Path; use fn_error_context::context; use futures::prelude::*; +use std::io::prelude::*; use std::process::Stdio; use tokio::io::AsyncRead; use tracing::{event, instrument, Level}; @@ -39,31 +41,120 @@ async fn fetch_manifest(imgref: &ImageReference) -> Result<(oci::Manifest, Strin Ok((serde_json::from_slice(&raw_manifest)?, digest)) } -/// Fetch a remote docker/OCI image into a local tarball, extract a specific blob. -async fn fetch_oci_archive_blob<'s>( +/// Read the contents of the first .tar we find +pub async fn find_layer_tar( + src: impl AsyncRead + Send + Unpin + 'static, + blobid: &str, +) -> Result<(impl AsyncRead, impl Future>)> { + let (pipein, input_copydriver) = crate::async_util::copy_async_read_to_sync_pipe(src)?; + let (tx_buf, rx_buf) = tokio::sync::mpsc::channel(2); + let blob_symlink_target = format!("../{}.tar", blobid); + let import = tokio::task::spawn_blocking(move || { + let mut archive = tar::Archive::new(pipein); + let mut buf = vec![0u8; 8192]; + for entry in archive.entries()? { + let mut entry = entry.context("Reading entry")?; + let path = entry.path()?; + let path = &*path; + let path = Utf8Path::from_path(path) + .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", path))?; + let t = entry.header().entry_type(); + + // We generally expect our layer to be first, but let's just skip anything + // unexpected to be robust against changes in skopeo. + if path.extension() != Some("tar") { + continue; + } + + match t { + tar::EntryType::Symlink => { + if let Some(name) = path.file_name() { + if name == "layer.tar" { + let target = entry + .link_name()? + .ok_or_else(|| anyhow!("Invalid link {}", path))?; + let target = Utf8Path::from_path(&*target) + .ok_or_else(|| anyhow!("Invalid non-UTF8 path {:?}", target))?; + if target != blob_symlink_target { + return Err(anyhow!( + "Found unexpected layer link {} -> {}", + path, + target + )); + } + } + } + } + tar::EntryType::Regular => loop { + let n = entry + .read(&mut buf[..]) + .context("Reading tar file contents")?; + let done = 0 == n; + let r = Ok::<_, std::io::Error>(bytes::Bytes::copy_from_slice(&buf[0..n])); + let receiver_closed = tx_buf.blocking_send(r).is_err(); + if receiver_closed || done { + return Ok::<_, anyhow::Error>(()); + } + }, + _ => continue, + } + } + Err(anyhow!("Failed to find layer {}", blob_symlink_target)) + }) + .map_err(anyhow::Error::msg); + let stream = tokio_stream::wrappers::ReceiverStream::new(rx_buf); + let reader = tokio_util::io::StreamReader::new(stream); + // Is there a better way to do this? + let worker = async move { + let (import, input_copydriver) = tokio::join!(import, input_copydriver); + let _: () = import?.context("Import worker")?; + let _: () = input_copydriver.context("Layer input copy driver failed")?; + Ok::<_, anyhow::Error>(()) + }; + Ok((reader, worker)) +} + +/// Fetch a remote docker/OCI image and extract a specific uncompressed layer. +async fn fetch_layer<'s>( imgref: &ImageReference, blobid: &str, -) -> Result { +) -> Result<( + impl AsyncRead + Unpin + Send, + impl Future>, +)> { let mut proc = skopeo::new_cmd(); proc.stdout(Stdio::null()); - let tempdir = tempfile::tempdir_in("/var/tmp")?; - let target = &tempdir.path().join("d"); - tracing::trace!("skopeo pull starting to {:?}", target); + let tempdir = tempfile::Builder::new() + .prefix("ostree-rs-ext") + .tempdir_in("/var/tmp")?; + let tempdir = Utf8Path::from_path(tempdir.path()).unwrap(); + let fifo = &tempdir.join("skopeo.pipe"); + nix::unistd::mkfifo( + fifo.as_os_str(), + nix::sys::stat::Mode::from_bits(0o600).unwrap(), + )?; + tracing::trace!("skopeo pull starting to {}", fifo); proc.arg("copy") .arg(imgref.to_string()) - .arg(format!("oci://{}", target.to_str().unwrap())); - skopeo::spawn(proc)? - .wait() - .err_into() - .and_then(|e| async move { - if !e.success() { - return Err(anyhow!("skopeo failed: {}", e)); - } - Ok(()) - }) - .await?; - tracing::trace!("skopeo pull done"); - Ok(tokio::fs::File::open(target.join("blobs/sha256/").join(blobid)).await?) + .arg(format!("docker-archive:{}", fifo)); + let mut proc = skopeo::spawn(proc)?; + let fifo_reader = tokio::fs::File::open(fifo).await?; + let waiter = async move { + let res = proc.wait().await?; + if !res.success() { + return Err(anyhow!("skopeo failed: {}", res)); + } + Ok(()) + } + .boxed(); + let (contents, worker) = find_layer_tar(fifo_reader, blobid).await?; + let worker = async move { + let (worker, waiter) = tokio::join!(worker, waiter); + let _: () = worker.context("Layer worker failed")?; + let _: () = waiter?; + Ok::<_, anyhow::Error>(()) + }; + Ok((contents, worker)) } /// The result of an import operation @@ -111,12 +202,13 @@ pub async fn import(repo: &ostree::Repo, imgref: &ImageReference) -> Result tokio::process::Command { let mut cmd = Command::new("skopeo"); + cmd.stdin(Stdio::null()); cmd.kill_on_drop(true); cmd } From 0d52f36969634ac060fdbb16dad1cba08a31744e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 28 Apr 2021 17:19:25 -0400 Subject: [PATCH 033/775] cli+container: Add support for progress This demos a lot better than watching the CLI sit there and do nothing until done! --- cli/Cargo.toml | 1 + cli/src/main.rs | 27 +++++++++++++-- lib/src/container/import.rs | 68 ++++++++++++++++++++++++++++++++++--- lib/src/tar/import.rs | 3 ++ lib/tests/it/main.rs | 2 +- 5 files changed, 93 insertions(+), 8 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 425714417..c3eead4e7 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -11,6 +11,7 @@ readme = "README.md" anyhow = "1.0" ostree-ext = { path = "../lib" } clap = "2.33.3" +indicatif = "0.15.0" structopt = "0.3.21" ostree = { version = "0.11.0", features = ["v2021_2"] } libc = "0.2.92" diff --git a/cli/src/main.rs b/cli/src/main.rs index c0e131fb5..a64ad73ef 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -107,9 +107,30 @@ fn tar_export(opts: &ExportOpts) -> Result<()> { async fn container_import(repo: &str, imgref: &str) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; let imgref = imgref.try_into()?; - let res = ostree_ext::container::import(repo, &imgref).await?; - println!("Imported: {}", res.ostree_commit); - Ok(()) + let (tx_progress, rx_progress) = tokio::sync::watch::channel(Default::default()); + let target = indicatif::ProgressDrawTarget::stdout(); + let style = indicatif::ProgressStyle::default_bar(); + let pb = indicatif::ProgressBar::new_spinner(); + pb.set_draw_target(target); + pb.set_style(style.template("{spinner} {prefix} {msg}")); + pb.enable_steady_tick(200); + pb.set_message("Downloading..."); + let import = ostree_ext::container::import(repo, &imgref, Some(tx_progress)); + tokio::pin!(import); + tokio::pin!(rx_progress); + loop { + tokio::select! { + _ = rx_progress.changed() => { + let n = rx_progress.borrow().processed_bytes; + pb.set_message(&format!("Processed: {}", indicatif::HumanBytes(n))); + } + import = &mut import => { + pb.finish(); + println!("Imported: {}", import?.ostree_commit); + return Ok(()) + } + } + } } async fn container_export(repo: &str, rev: &str, imgref: &str) -> Result<()> { diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index bf3bbd7e0..00ce48f44 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -6,10 +6,60 @@ use camino::Utf8Path; use fn_error_context::context; use futures::prelude::*; use std::io::prelude::*; +use std::pin::Pin; use std::process::Stdio; use tokio::io::AsyncRead; use tracing::{event, instrument, Level}; +/// The result of an import operation +#[derive(Copy, Clone, Debug, Default)] +pub struct ImportProgress { + /// Number of bytes downloaded (approximate) + pub processed_bytes: u64, +} + +type Progress = tokio::sync::watch::Sender; + +/// A read wrapper that updates the download progress. +struct ProgressReader { + reader: Box, + progress: Option, +} + +impl AsyncRead for ProgressReader { + fn poll_read( + mut self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + buf: &mut tokio::io::ReadBuf<'_>, + ) -> std::task::Poll> { + let pinned = Pin::new(&mut self.reader); + let len = buf.filled().len(); + match pinned.poll_read(cx, buf) { + v @ std::task::Poll::Ready(Ok(_)) => { + let success = if let Some(progress) = self.progress.as_ref() { + let state = { + let mut state = *progress.borrow(); + let newlen = buf.filled().len(); + debug_assert!(newlen >= len); + let read = (newlen - len) as u64; + state.processed_bytes += read; + state + }; + // Ignore errors, if the caller disconnected from progress that's OK. + progress.send(state).is_ok() + } else { + true + }; + if !success { + let _ = self.progress.take(); + } + v + } + o => o, + } + } +} + /// Download the manifest for a target image. #[context("Fetching manifest")] pub async fn fetch_manifest_info(imgref: &ImageReference) -> Result { @@ -66,6 +116,8 @@ pub async fn find_layer_tar( continue; } + event!(Level::DEBUG, "Found {}", path); + match t { tar::EntryType::Symlink => { if let Some(name) = path.file_name() { @@ -118,6 +170,7 @@ pub async fn find_layer_tar( async fn fetch_layer<'s>( imgref: &ImageReference, blobid: &str, + progress: Option>, ) -> Result<( impl AsyncRead + Unpin + Send, impl Future>, @@ -138,7 +191,10 @@ async fn fetch_layer<'s>( .arg(imgref.to_string()) .arg(format!("docker-archive:{}", fifo)); let mut proc = skopeo::spawn(proc)?; - let fifo_reader = tokio::fs::File::open(fifo).await?; + let fifo_reader = ProgressReader { + reader: Box::new(tokio::fs::File::open(fifo).await?), + progress: progress, + }; let waiter = async move { let res = proc.wait().await?; if !res.success() { @@ -196,13 +252,17 @@ fn find_layer_blobid(manifest: &oci::Manifest) -> Result { /// Fetch a container image and import its embedded OSTree commit. #[context("Importing {}", imgref)] -#[instrument(skip(repo))] -pub async fn import(repo: &ostree::Repo, imgref: &ImageReference) -> Result { +#[instrument(skip(repo, progress))] +pub async fn import( + repo: &ostree::Repo, + imgref: &ImageReference, + progress: Option>, +) -> Result { let (manifest, image_digest) = fetch_manifest(imgref).await?; let manifest = &manifest; let layerid = find_layer_blobid(manifest)?; event!(Level::DEBUG, "target blob: {}", layerid); - let (blob, worker) = fetch_layer(imgref, layerid.as_str()).await?; + let (blob, worker) = fetch_layer(imgref, layerid.as_str(), progress).await?; let blob = tokio::io::BufReader::new(blob); let import = crate::tar::import_tar(&repo, blob); let (ostree_commit, worker) = tokio::join!(import, worker); diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index c478ef947..60f93373e 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -12,6 +12,7 @@ use ostree::ContentWriterExt; use std::collections::HashMap; use std::convert::TryInto; use std::io::prelude::*; +use tracing::{event, instrument, Level}; /// Arbitrary limit on xattrs to avoid RAM exhaustion attacks. The actual filesystem limits are often much smaller. /// See https://en.wikipedia.org/wiki/Extended_file_attributes @@ -135,6 +136,7 @@ impl<'a> Importer<'a> { ) -> Result<()> { assert_eq!(self.state, ImportState::Initial); self.import_metadata(entry, checksum, ostree::ObjectType::Commit)?; + event!(Level::DEBUG, "Imported {}.commit", checksum); self.state = ImportState::Importing(checksum.to_string()); Ok(()) } @@ -456,6 +458,7 @@ fn validate_sha256(s: &str) -> Result<()> { } /// Read the contents of a tarball and import the ostree commit inside. The sha56 of the imported commit will be returned. +#[instrument(skip(repo, src))] pub async fn import_tar( repo: &ostree::Repo, src: impl tokio::io::AsyncRead + Send + Unpin + 'static, diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index b77a8939a..b3acc16ff 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -144,7 +144,7 @@ async fn test_container_import_export() -> Result<()> { let inspect = ostree_ext::container::fetch_manifest_info(&srcoci).await?; assert_eq!(inspect.manifest_digest, digest); - let import = ostree_ext::container::import(destrepo, &srcoci) + let import = ostree_ext::container::import(destrepo, &srcoci, None) .await .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); From 93c24238ec444d51e82ad72f21e411e25e895e88 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 7 May 2021 15:27:18 -0400 Subject: [PATCH 034/775] Add support for IMA signatures This code is adapted from https://github.com/coreos/rpm-ostree/pull/2747 Overall, there's a lot of apparent synergy between ostree and IMA. Both are file based. IMA signatures cover file content and some metadata. The goal of EVM is also to cover security-relevant xattrs (unlike e.g. fs-verity). This initial code is known to generate bootable Fedora CoreOS instances, but has not seen extensive testing beyond that. --- cli/src/main.rs | 36 ++++ lib/src/ima.rs | 347 +++++++++++++++++++++++++++++++++++++++ lib/src/lib.rs | 1 + lib/src/variant_utils.rs | 43 +++++ 4 files changed, 427 insertions(+) create mode 100644 lib/src/ima.rs diff --git a/cli/src/main.rs b/cli/src/main.rs index c0e131fb5..1ce5dc436 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -75,6 +75,22 @@ enum ContainerOpts { }, } +#[derive(Debug, StructOpt)] +struct ImaSignOpts { + /// Path to the repository + #[structopt(long)] + repo: String, + /// The ostree ref or commit to use as a base + src_rev: String, + /// The ostree ref to use for writing the signed commit + target_ref: String, + + /// Digest algorithm + algorithm: String, + /// Path to IMA key + key: String, +} + #[derive(Debug, StructOpt)] #[structopt(name = "ostree-ext")] #[structopt(rename_all = "kebab-case")] @@ -83,6 +99,7 @@ enum Opt { Tar(TarOpts), /// Import and export to a container image Container(ContainerOpts), + ImaSign(ImaSignOpts), } async fn tar_import(opts: &ImportOpts) -> Result<()> { @@ -127,6 +144,24 @@ async fn container_info(imgref: &str) -> Result<()> { Ok(()) } +fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { + let repo = + &ostree::Repo::open_at(libc::AT_FDCWD, cmdopts.repo.as_str(), gio::NONE_CANCELLABLE)?; + let signopts = ostree_ext::ima::ImaOpts { + algorithm: cmdopts.algorithm.clone(), + key: cmdopts.key.clone(), + }; + let signed_commit = ostree_ext::ima::ima_sign(repo, cmdopts.src_rev.as_str(), &signopts)?; + repo.set_ref_immediate( + None, + cmdopts.target_ref.as_str(), + Some(signed_commit.as_str()), + gio::NONE_CANCELLABLE, + )?; + println!("{} => {}", cmdopts.target_ref, signed_commit); + Ok(()) +} + async fn run() -> Result<()> { tracing_subscriber::fmt::init(); tracing::trace!("starting"); @@ -141,6 +176,7 @@ async fn run() -> Result<()> { Opt::Container(ContainerOpts::Export { repo, rev, imgref }) => { container_export(&repo, &rev, &imgref).await } + Opt::ImaSign(ref opts) => ima_sign(opts), } } diff --git a/lib/src/ima.rs b/lib/src/ima.rs new file mode 100644 index 000000000..d2ad3df8e --- /dev/null +++ b/lib/src/ima.rs @@ -0,0 +1,347 @@ +//! Write IMA signatures to an ostree commit + +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use crate::variant_utils; +use anyhow::{Context, Result}; +use fn_error_context::context; +use gio::prelude::InputStreamExtManual; +use glib::translate::*; +use glib::Cast; +use gvariant::aligned_bytes::TryAsAligned; +use gvariant::{gv, Marker, Structure}; +use openat_ext::FileExt; +use std::collections::{BTreeMap, HashMap}; +use std::ffi::CString; +use std::fs::File; +use std::os::unix::io::AsRawFd; +use std::os::unix::prelude::{FromRawFd, IntoRawFd}; +use std::process::{Command, Stdio}; +use std::rc::Rc; +use std::{convert::TryInto, io::Seek}; + +/// Extended attribute keys used for IMA. +const IMA_XATTRS: &[&str] = &["security.ima", "security.evm"]; +const SELINUX_XATTR: &[u8] = b"security.selinux\0"; + +/// Attributes to configure IMA signatures. +#[derive(Debug, Clone)] +pub struct ImaOpts { + /// Digest algorithm + pub algorithm: String, + + /// Path to IMA key + pub key: String, +} + +/// Convert a GVariant of type `a(ayay)` to a mutable map +fn xattrs_to_map(v: &glib::Variant) -> BTreeMap, Vec> { + let v = v.get_data_as_bytes(); + let v = v.try_as_aligned().unwrap(); + let v = gv!("a(ayay)").cast(v); + let mut map: BTreeMap, Vec> = BTreeMap::new(); + for e in v.iter() { + let (k, v) = e.to_tuple(); + map.insert(k.into(), v.into()); + } + map +} + +/// Reserialize a map to GVariant of type `a(ayay)` +fn xattrmap_serialize(map: &BTreeMap, Vec>) -> glib::Variant { + let map: Vec<_> = map.into_iter().collect(); + variant_utils::new_variant_a_ayay(&map) +} + +struct CommitRewriter<'a> { + repo: &'a ostree::Repo, + ima: &'a ImaOpts, + tempdir: tempfile::TempDir, + /// Files that we already changed + rewritten_files: HashMap>, +} + +#[allow(unsafe_code)] +#[context("Gathering xattr {}", k)] +fn steal_xattr(f: &File, k: &str) -> Result> { + let k = &CString::new(k)?; + unsafe { + let k = k.as_ptr() as *const _; + let r = libc::fgetxattr(f.as_raw_fd(), k, std::ptr::null_mut(), 0); + if r < 0 { + return Err(nix::Error::last().into()); + } + let sz: usize = r.try_into()?; + let mut buf = vec![0u8; sz]; + let r = libc::fgetxattr(f.as_raw_fd(), k, buf.as_mut_ptr() as *mut _, sz); + if r < 0 { + return Err(nix::Error::last().into()); + } + let r = libc::fremovexattr(f.as_raw_fd(), k); + if r < 0 { + return Err(nix::Error::last().into()); + } + Ok(buf) + } +} + +impl<'a> CommitRewriter<'a> { + fn new(repo: &'a ostree::Repo, ima: &'a ImaOpts) -> Result { + Ok(Self { + repo, + ima, + tempdir: tempfile::tempdir_in(format!("/proc/self/fd/{}/tmp", repo.get_dfd()))?, + rewritten_files: Default::default(), + }) + } + + /// Use `evmctl` to generate an IMA signature on a file, then + /// scrape the xattr value out of it (removing it). + /// + /// evmctl can write a separate file but it picks the name...so + /// we do this hacky dance of `--xattr-user` instead. + #[allow(unsafe_code)] + #[context("Invoking evmctl")] + fn ima_sign( + &self, + instream: &gio::InputStream, + selinux: Option<&Vec>, + ) -> Result, Vec>> { + let mut tempf = tempfile::NamedTempFile::new_in(self.tempdir.path())?; + // If we're operating on a bare repo, we can clone the file (copy_file_range) directly. + if let Some(instream) = instream.clone().downcast::().ok() { + // View the fd as a File + let instream_fd = unsafe { File::from_raw_fd(instream.as_raw_fd()) }; + instream_fd.copy_to(tempf.as_file_mut())?; + // Leak to avoid double close + let _ = instream_fd.into_raw_fd(); + } else { + // If we're operating on an archive repo, then we need to uncompress + // and recompress... + let mut instream = instream.clone().into_read(); + let _n = std::io::copy(&mut instream, tempf.as_file_mut())?; + } + tempf.seek(std::io::SeekFrom::Start(0))?; + + let mut proc = Command::new("evmctl"); + proc.current_dir(self.tempdir.path()) + .args(&[ + "sign", + "--portable", + "--xattr-user", + "--key", + self.ima.key.as_str(), + ]) + .args(&["--hashalgo", self.ima.algorithm.as_str()]); + if let Some(selinux) = selinux { + let selinux = std::str::from_utf8(selinux) + .context("Non-UTF8 selinux value")? + .trim_end_matches('\0'); + proc.args(&["--selinux", selinux]); + } + + let proc = proc + .arg("--imasig") + .arg(tempf.path().file_name().unwrap()) + .stdout(Stdio::null()) + .stderr(Stdio::piped()); + let status = proc.output().context("Spawning evmctl")?; + if !status.status.success() { + return Err(anyhow::anyhow!( + "evmctl failed: {:?}\n{}", + status.status, + String::from_utf8_lossy(&status.stderr), + )); + } + let mut r = HashMap::new(); + for &k in IMA_XATTRS { + let user_k = k.replace("security.", "user."); + let v = steal_xattr(tempf.as_file(), user_k.as_str())?; + // NUL terminate the key + let k = CString::new(k)?.into_bytes_with_nul(); + r.insert(k, v); + } + Ok(r) + } + + #[context("Content object {}", checksum)] + fn map_file(&mut self, checksum: &str) -> Result> { + if let Some(r) = self.rewritten_files.get(checksum) { + return Ok(Rc::clone(r)); + } + let cancellable = gio::NONE_CANCELLABLE; + let (instream, meta, xattrs) = self.repo.load_file(checksum, cancellable)?; + let instream = if let Some(i) = instream { + i + } else { + // If there's no input stream, it must be a symlink. Skip it. + let r: Rc = checksum.into(); + self.rewritten_files + .insert(checksum.to_string(), Rc::clone(&r)); + return Ok(r); + }; + let meta = meta.unwrap(); + let mut xattrs = xattrs_to_map(&xattrs.unwrap()); + + let selinux = xattrs.get(SELINUX_XATTR); + + // Now inject the IMA xattr + let xattrs = { + let signed = self.ima_sign(&instream, selinux)?; + xattrs.extend(signed); + let r = xattrmap_serialize(&xattrs); + r + }; + // Now reload the input stream + let (instream, _, _) = self.repo.load_file(checksum, cancellable)?; + let instream = instream.unwrap(); + let (ostream, size) = + ostree::raw_file_to_content_stream(&instream, &meta, Some(&xattrs), cancellable)?; + let new_checksum = self + .repo + .write_content(None, &ostream, size, cancellable)? + .to_hex(); + + let r: Rc = new_checksum.into(); + self.rewritten_files + .insert(checksum.to_string(), Rc::clone(&r)); + Ok(r) + } + + /// Write a dirtree object. + #[allow(unsafe_code)] + fn map_dirtree(&mut self, checksum: &str) -> Result { + let src = &self + .repo + .load_variant(ostree::ObjectType::DirTree, checksum)?; + let src = src.get_data_as_bytes(); + let src = src.try_as_aligned()?; + let src = gv!("(a(say)a(sayay))").cast(src); + let (files, dirs) = src.to_tuple(); + + // A reusable buffer to avoid heap allocating these + let mut hexbuf = [0u8; 64]; + + let new_files_builder = + unsafe { glib_sys::g_variant_builder_new(b"a(say)\0".as_ptr() as *const _) }; + for file in files { + let (name, csum) = file.to_tuple(); + let name = name.to_str(); + hex::encode_to_slice(csum, &mut hexbuf)?; + let checksum = std::str::from_utf8(&hexbuf)?; + let mapped = self.map_file(checksum)?; + let mapped = hex::decode(&*mapped)?; + unsafe { + // Unwrap safety: The name won't have NULs + let name = CString::new(name).unwrap(); + let mapped_checksum_v = variant_utils::new_variant_bytearray(&mapped); + let name_p = name.as_ptr(); + glib_sys::g_variant_builder_add( + new_files_builder, + b"(s@ay)\0".as_ptr() as *const _, + name_p, + mapped_checksum_v.to_glib_none().0, + ); + } + } + let new_files: glib::Variant = unsafe { + let v = glib_sys::g_variant_builder_end(new_files_builder); + glib_sys::g_variant_ref_sink(v); + from_glib_full(v) + }; + + let new_dirs_builder = + unsafe { glib_sys::g_variant_builder_new(b"a(sayay)\0".as_ptr() as *const _) }; + for item in dirs { + let (name, contents_csum, meta_csum_bytes) = item.to_tuple(); + let name = name.to_str(); + hex::encode_to_slice(contents_csum, &mut hexbuf)?; + let contents_csum = std::str::from_utf8(&hexbuf)?; + let mapped = self.map_dirtree(&contents_csum)?; + let mapped = hex::decode(mapped)?; + unsafe { + // Unwrap safety: The name won't have NULs + let name = CString::new(name).unwrap(); + let mapped_checksum_v = variant_utils::new_variant_bytearray(&mapped); + let meta_checksum_v = variant_utils::new_variant_bytearray(meta_csum_bytes); + glib_sys::g_variant_builder_add( + new_dirs_builder, + b"(s@ay@ay)\0".as_ptr() as *const _, + name.as_ptr(), + mapped_checksum_v.to_glib_none().0, + meta_checksum_v.to_glib_none().0, + ); + } + } + let new_dirs: glib::Variant = unsafe { + let v = glib_sys::g_variant_builder_end(new_dirs_builder); + glib_sys::g_variant_ref_sink(v); + from_glib_full(v) + }; + + let new_dirtree: glib::Variant = unsafe { + let v = glib_sys::g_variant_new( + b"(@a(say)@a(sayay))\0".as_ptr() as *const _, + new_files.to_glib_none().0, + new_dirs.to_glib_none().0, + std::ptr::null_mut::(), + ); + glib_sys::g_variant_ref_sink(v); + from_glib_full(v) + }; + + let mapped = self + .repo + .write_metadata( + ostree::ObjectType::DirTree, + None, + &new_dirtree, + gio::NONE_CANCELLABLE, + )? + .to_hex(); + + Ok(mapped) + } + + /// Write a commit object. + #[context("Mapping {}", rev)] + fn map_commit(&mut self, rev: &str) -> Result { + let checksum = self.repo.resolve_rev(rev, false)?.unwrap(); + let cancellable = gio::NONE_CANCELLABLE; + let (commit_v, _) = self.repo.load_commit(&checksum)?; + let commit_v = &commit_v; + + let commit_bytes = commit_v.get_data_as_bytes(); + let commit_bytes = commit_bytes.try_as_aligned()?; + let commit = gv!("(a{sv}aya(say)sstayay)").cast(commit_bytes); + let commit = commit.to_tuple(); + let contents = &hex::encode(commit.6); + + let new_dt = self.map_dirtree(contents)?; + + let n_parts = 8; + let mut parts = Vec::with_capacity(n_parts); + for i in 0..n_parts { + parts.push(variant_utils::variant_get_child_value(&commit_v, i).unwrap()); + } + let new_dt = hex::decode(new_dt)?; + parts[6] = variant_utils::new_variant_bytearray(&new_dt); + let new_commit = variant_utils::new_variant_tuple(&parts); + + let new_commit_checksum = self + .repo + .write_metadata(ostree::ObjectType::Commit, None, &new_commit, cancellable)? + .to_hex(); + + Ok(new_commit_checksum) + } +} + +/// Given an OSTree commit and an IMA configuration, generate a new commit object with IMA signatures. +/// +/// The generated commit object will inherit all metadata from the existing commit object +/// such as version, etc. +pub fn ima_sign(repo: &ostree::Repo, ostree_ref: &str, opts: &ImaOpts) -> Result { + let writer = &mut CommitRewriter::new(&repo, &opts)?; + Ok(writer.map_commit(ostree_ref)?) +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index a17912f18..c7a284f5b 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -16,6 +16,7 @@ type Result = anyhow::Result; mod async_util; pub mod container; pub mod diff; +pub mod ima; pub mod ostree_ext; pub mod tar; #[allow(unsafe_code)] diff --git a/lib/src/variant_utils.rs b/lib/src/variant_utils.rs index f0eea30b8..48b3f1760 100644 --- a/lib/src/variant_utils.rs +++ b/lib/src/variant_utils.rs @@ -3,6 +3,8 @@ //! avoiding another crate for this. In the future, some of these //! may migrate into gtk-rs. +use std::mem::size_of; + use glib::translate::*; /// Create a new GVariant from data. @@ -28,6 +30,30 @@ pub fn variant_normal_from_bytes(ty: &str, bytes: glib::Bytes) -> glib::Variant variant_get_normal_form(&variant_new_from_bytes(ty, bytes, false)) } +/// Create a new `ay` GVariant. +pub fn new_variant_bytearray(buf: &[u8]) -> glib::Variant { + unsafe { + let r = glib_sys::g_variant_new_fixed_array( + b"y\0".as_ptr() as *const _, + buf.as_ptr() as *const _, + buf.len(), + size_of::(), + ); + glib_sys::g_variant_ref_sink(r); + from_glib_full(r) + } +} + +/// Create a new GVariant tuple from the provided variants. +pub fn new_variant_tuple<'a>(items: impl IntoIterator) -> glib::Variant { + let v: Vec<_> = items.into_iter().map(|v| v.to_glib_none().0).collect(); + unsafe { + let r = glib_sys::g_variant_new_tuple(v.as_ptr(), v.len()); + glib_sys::g_variant_ref_sink(r); + from_glib_full(r) + } +} + /// Extract a child from a variant. pub fn variant_get_child_value(v: &glib::Variant, n: usize) -> Option { let v = v.to_glib_none(); @@ -39,6 +65,23 @@ pub fn variant_get_child_value(v: &glib::Variant, n: usize) -> Option>(items: &[(T, T)]) -> glib::Variant { + unsafe { + let ty = glib::VariantTy::new("a(ayay)").unwrap(); + let builder = glib_sys::g_variant_builder_new(ty.as_ptr() as *const _); + for (k, v) in items { + let k = new_variant_bytearray(k.as_ref()); + let v = new_variant_bytearray(v.as_ref()); + let val = new_variant_tuple(&[k, v]); + glib_sys::g_variant_builder_add_value(builder, val.to_glib_none().0); + } + let v = glib_sys::g_variant_builder_end(builder); + glib_sys::g_variant_ref_sink(v); + from_glib_full(v) + } +} + #[cfg(test)] mod tests { use super::*; From 148d3d2553a59d420821f0638019c181cf6bf886 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 10 May 2021 10:27:02 -0400 Subject: [PATCH 035/775] Fix async -> sync read bridge Followup to https://users.rust-lang.org/t/best-practices-for-bridging-async-and-sync-particularly-read-write/58994/2 This is *way* simpler and more obviously correct, and faster I'd imagine. Now the caller doesn't need to juggle two futures, which simplifies things there a lot. This unearthed a bug in the container import code where we really need to read to the end, otherwise skopeo gets an `EPIPE`. --- lib/src/async_util.rs | 73 +++++++++++++++++++++++++------------ lib/src/container/import.rs | 34 +++++++++++------ lib/src/tar/import.rs | 5 +-- 3 files changed, 75 insertions(+), 37 deletions(-) diff --git a/lib/src/async_util.rs b/lib/src/async_util.rs index a4c9b5f04..10f9ec60f 100644 --- a/lib/src/async_util.rs +++ b/lib/src/async_util.rs @@ -1,29 +1,56 @@ -use anyhow::Result; -use futures::prelude::*; use std::io::prelude::*; -use tokio::io::AsyncRead; +use std::pin::Pin; +use tokio::io::{AsyncRead, AsyncReadExt}; + +struct ReadBridge { + reader: Pin>, + rt: tokio::runtime::Handle, +} + +impl Read for ReadBridge { + fn read(&mut self, buf: &mut [u8]) -> std::io::Result { + let mut reader = self.reader.as_mut(); + self.rt.block_on(async { reader.read(buf).await }) + } +} /// Bridge from AsyncRead to Read. -/// -/// This creates a pipe and a "driver" future (which could be spawned or not). -pub(crate) fn copy_async_read_to_sync_pipe( - s: S, -) -> Result<(impl Read, impl Future>)> { - let (pipein, mut pipeout) = os_pipe::pipe()?; +pub(crate) fn async_read_to_sync( + reader: S, +) -> impl Read + Send + Unpin + 'static { + let rt = tokio::runtime::Handle::current(); + let reader = Box::pin(reader); + ReadBridge { reader, rt } +} + +#[cfg(test)] +mod test { + use std::convert::TryInto; + + use super::*; + use anyhow::Result; - let copier = async move { - let mut input = tokio_util::io::ReaderStream::new(s).boxed(); - while let Some(buf) = input.next().await { - let buf = buf?; - // TODO blocking executor - // Note broken pipe is OK, just means the caller stopped reading - pipeout.write_all(&buf).or_else(|e| match e.kind() { - std::io::ErrorKind::BrokenPipe => Ok(()), - _ => Err(e), - })?; - } - Ok::<_, anyhow::Error>(()) - }; + async fn test_reader_len( + r: impl AsyncRead + Unpin + Send + 'static, + expected_len: usize, + ) -> Result<()> { + let mut r = async_read_to_sync(r); + let res = tokio::task::spawn_blocking(move || { + let mut buf = Vec::new(); + r.read_to_end(&mut buf)?; + Ok::<_, anyhow::Error>(buf) + }) + .await?; + assert_eq!(res?.len(), expected_len); + Ok(()) + } - Ok((pipein, copier)) + #[tokio::test] + async fn test_async_read_to_sync() -> Result<()> { + test_reader_len(tokio::io::empty(), 0).await?; + let bash = tokio::fs::File::open("/usr/bin/sh").await?; + let bash_len = bash.metadata().await?.len(); + test_reader_len(bash, bash_len.try_into().unwrap()).await?; + Ok(()) + } } diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 00ce48f44..95b126d60 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -96,14 +96,19 @@ pub async fn find_layer_tar( src: impl AsyncRead + Send + Unpin + 'static, blobid: &str, ) -> Result<(impl AsyncRead, impl Future>)> { - let (pipein, input_copydriver) = crate::async_util::copy_async_read_to_sync_pipe(src)?; + let pipein = crate::async_util::async_read_to_sync(src); let (tx_buf, rx_buf) = tokio::sync::mpsc::channel(2); let blob_symlink_target = format!("../{}.tar", blobid); let import = tokio::task::spawn_blocking(move || { let mut archive = tar::Archive::new(pipein); let mut buf = vec![0u8; 8192]; + let mut found = false; for entry in archive.entries()? { let mut entry = entry.context("Reading entry")?; + if found { + // Continue to read to the end to avoid broken pipe error from skopeo + continue; + } let path = entry.path()?; let path = &*path; let path = Utf8Path::from_path(path) @@ -145,22 +150,25 @@ pub async fn find_layer_tar( let r = Ok::<_, std::io::Error>(bytes::Bytes::copy_from_slice(&buf[0..n])); let receiver_closed = tx_buf.blocking_send(r).is_err(); if receiver_closed || done { - return Ok::<_, anyhow::Error>(()); + found = true; + break; } }, _ => continue, } } - Err(anyhow!("Failed to find layer {}", blob_symlink_target)) + if found { + Ok(()) + } else { + Err(anyhow!("Failed to find layer {}", blob_symlink_target)) + } }) .map_err(anyhow::Error::msg); let stream = tokio_stream::wrappers::ReceiverStream::new(rx_buf); let reader = tokio_util::io::StreamReader::new(stream); - // Is there a better way to do this? let worker = async move { - let (import, input_copydriver) = tokio::join!(import, input_copydriver); - let _: () = import?.context("Import worker")?; - let _: () = input_copydriver.context("Layer input copy driver failed")?; + let import = import.await?; + let _: () = import.context("Import worker")?; Ok::<_, anyhow::Error>(()) }; Ok((reader, worker)) @@ -190,15 +198,19 @@ async fn fetch_layer<'s>( proc.arg("copy") .arg(imgref.to_string()) .arg(format!("docker-archive:{}", fifo)); - let mut proc = skopeo::spawn(proc)?; + let proc = skopeo::spawn(proc)?; let fifo_reader = ProgressReader { reader: Box::new(tokio::fs::File::open(fifo).await?), progress: progress, }; let waiter = async move { - let res = proc.wait().await?; - if !res.success() { - return Err(anyhow!("skopeo failed: {}", res)); + let res = proc.wait_with_output().await?; + if !res.status.success() { + return Err(anyhow!( + "skopeo failed: {}\n{}", + res.status, + String::from_utf8_lossy(&res.stderr) + )); } Ok(()) } diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 60f93373e..55f11accf 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -463,7 +463,7 @@ pub async fn import_tar( repo: &ostree::Repo, src: impl tokio::io::AsyncRead + Send + Unpin + 'static, ) -> Result { - let (pipein, copydriver) = crate::async_util::copy_async_read_to_sync_pipe(src)?; + let pipein = crate::async_util::async_read_to_sync(src); let repo = repo.clone(); let import = tokio::task::spawn_blocking(move || { let repo = &repo; @@ -503,8 +503,7 @@ pub async fn import_tar( importer.commit() }) .map_err(anyhow::Error::msg); - let (import, _copydriver) = tokio::try_join!(import, copydriver)?; - let import = import?; + let import: String = import.await??; Ok(import) } From 30a5e79f41733131ab01918941641adfaec58788 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 11 May 2021 10:12:48 -0400 Subject: [PATCH 036/775] container/import: Extract layer tar discovery into helper function This code was getting too nested and rust-analyzer makes it very easy to hoist things to helper functions. --- lib/src/container/import.rs | 135 +++++++++++++++++++----------------- 1 file changed, 73 insertions(+), 62 deletions(-) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 95b126d60..e8ffa86fb 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -100,68 +100,7 @@ pub async fn find_layer_tar( let (tx_buf, rx_buf) = tokio::sync::mpsc::channel(2); let blob_symlink_target = format!("../{}.tar", blobid); let import = tokio::task::spawn_blocking(move || { - let mut archive = tar::Archive::new(pipein); - let mut buf = vec![0u8; 8192]; - let mut found = false; - for entry in archive.entries()? { - let mut entry = entry.context("Reading entry")?; - if found { - // Continue to read to the end to avoid broken pipe error from skopeo - continue; - } - let path = entry.path()?; - let path = &*path; - let path = Utf8Path::from_path(path) - .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", path))?; - let t = entry.header().entry_type(); - - // We generally expect our layer to be first, but let's just skip anything - // unexpected to be robust against changes in skopeo. - if path.extension() != Some("tar") { - continue; - } - - event!(Level::DEBUG, "Found {}", path); - - match t { - tar::EntryType::Symlink => { - if let Some(name) = path.file_name() { - if name == "layer.tar" { - let target = entry - .link_name()? - .ok_or_else(|| anyhow!("Invalid link {}", path))?; - let target = Utf8Path::from_path(&*target) - .ok_or_else(|| anyhow!("Invalid non-UTF8 path {:?}", target))?; - if target != blob_symlink_target { - return Err(anyhow!( - "Found unexpected layer link {} -> {}", - path, - target - )); - } - } - } - } - tar::EntryType::Regular => loop { - let n = entry - .read(&mut buf[..]) - .context("Reading tar file contents")?; - let done = 0 == n; - let r = Ok::<_, std::io::Error>(bytes::Bytes::copy_from_slice(&buf[0..n])); - let receiver_closed = tx_buf.blocking_send(r).is_err(); - if receiver_closed || done { - found = true; - break; - } - }, - _ => continue, - } - } - if found { - Ok(()) - } else { - Err(anyhow!("Failed to find layer {}", blob_symlink_target)) - } + find_layer_tar_sync(pipein, blob_symlink_target, tx_buf) }) .map_err(anyhow::Error::msg); let stream = tokio_stream::wrappers::ReceiverStream::new(rx_buf); @@ -174,6 +113,78 @@ pub async fn find_layer_tar( Ok((reader, worker)) } +// Helper function invoked to synchronously parse a tar stream, finding +// the desired layer tarball and writing its contents via a stream of byte chunks +// to a channel. +fn find_layer_tar_sync( + pipein: impl Read + Send + Unpin, + blob_symlink_target: String, + tx_buf: tokio::sync::mpsc::Sender>, +) -> Result<()> { + let mut archive = tar::Archive::new(pipein); + let mut buf = vec![0u8; 8192]; + let mut found = false; + for entry in archive.entries()? { + let mut entry = entry.context("Reading entry")?; + if found { + // Continue to read to the end to avoid broken pipe error from skopeo + continue; + } + let path = entry.path()?; + let path = &*path; + let path = + Utf8Path::from_path(path).ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", path))?; + let t = entry.header().entry_type(); + + // We generally expect our layer to be first, but let's just skip anything + // unexpected to be robust against changes in skopeo. + if path.extension() != Some("tar") { + continue; + } + + event!(Level::DEBUG, "Found {}", path); + + match t { + tar::EntryType::Symlink => { + if let Some(name) = path.file_name() { + if name == "layer.tar" { + let target = entry + .link_name()? + .ok_or_else(|| anyhow!("Invalid link {}", path))?; + let target = Utf8Path::from_path(&*target) + .ok_or_else(|| anyhow!("Invalid non-UTF8 path {:?}", target))?; + if target != blob_symlink_target { + return Err(anyhow!( + "Found unexpected layer link {} -> {}", + path, + target + )); + } + } + } + } + tar::EntryType::Regular => loop { + let n = entry + .read(&mut buf[..]) + .context("Reading tar file contents")?; + let done = 0 == n; + let r = Ok::<_, std::io::Error>(bytes::Bytes::copy_from_slice(&buf[0..n])); + let receiver_closed = tx_buf.blocking_send(r).is_err(); + if receiver_closed || done { + found = true; + break; + } + }, + _ => continue, + } + } + if found { + Ok(()) + } else { + Err(anyhow!("Failed to find layer {}", blob_symlink_target)) + } +} + /// Fetch a remote docker/OCI image and extract a specific uncompressed layer. async fn fetch_layer<'s>( imgref: &ImageReference, From a9093cd5719f9cb44f661eb0ccd46d6eb2dd4b2a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 11 May 2021 18:19:42 -0400 Subject: [PATCH 037/775] Remove some unused deps These were parts of async skopeo that didn't pan out or aren't necessary anymore. --- lib/Cargo.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 35ce69923..14ae3cfdd 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -10,7 +10,6 @@ version = "0.1.0" [dependencies] anyhow = "1.0" -async-compression = { version = "0.3.8", features = ["tokio", "gzip"] } bytes = "1.0.1" camino = "1.0.4" fn-error-context = "0.1.1" @@ -23,7 +22,6 @@ libc = "0.2.92" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" -os_pipe = "0.9.2" ostree-sys = "0.7.2" tar = "0.4.33" tempfile = "3.2.0" From 4a7150c0bcd05120a7276afbb9e7917bdcd6913a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 11 May 2021 18:26:18 -0400 Subject: [PATCH 038/775] README.md: Minor updates --- README.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index d776e20ea..c5fee6d64 100644 --- a/README.md +++ b/README.md @@ -121,11 +121,15 @@ are duplicated on disk. Of course storage systems like containers/image could l ## Being able to remove all container images -In Kubernetes, the kubelet will prune the image storage periodically, removing images not backed by containers. If we store the operating system itself as an image...well, we'd -need to do something like teach the container storage to have the concept of an image that is "pinned" because it's actually the booted filesystem. Or create a "fake" container -representing the running operating system. +In Kubernetes, the kubelet will prune the image storage periodically, removing images not backed by containers. If we store the operating system itself as an image...well, we'd need to do something like teach the container storage to have the concept of an image that is "pinned" because it's actually the booted filesystem. Or create a "fake" container representing the running operating system. -Other projects in this space ended up having an "early docker" distinct from +Other projects in this space ended up having an "early docker" distinct from the "main docker" which brings its own large set of challenges. + +## SELinux + +OSTree has *first class* support for SELinux. It was baked into the design from the very start. Handling SELinux is very tricky because it's a part of the operating system that can influence *everything else*. And specifically file labels. + +In this approach we aren't trying to inject xattrs into the tar stream; they're stored out of band for reliability. ## Independence of complexity of container storage From 29c0164d1c69e9d67c9dfaab436d5e2fde6aabfc Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 12 May 2021 16:45:55 -0400 Subject: [PATCH 039/775] Release 0.1.1 --- cli/Cargo.toml | 2 +- lib/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index c3eead4e7..ba59d0133 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ostree-ext-cli" -version = "0.1.0" +version = "0.1.1" authors = ["Colin Walters "] edition = "2018" license = "MIT OR Apache-2.0" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 14ae3cfdd..dbed98a0c 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-ext" -version = "0.1.0" +version = "0.1.1" [dependencies] anyhow = "1.0" From 60d9e0d5dd6c0cc8522328a8e2d16b8ec7568e1b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 12 May 2021 17:12:30 -0400 Subject: [PATCH 040/775] container: Also bind containers-storage It's extremely useful to be able to push images into the local container storage. --- lib/src/container/mod.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index b4b5f3a43..fbab8dd6d 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -33,6 +33,8 @@ pub enum Transport { OciDir, /// A local OCI archive tarball (`oci-archive:`) OciArchive, + /// Local container storage (`containers-storage:`) + ContainerStorage, } /// Combination of a remote image reference and transport. @@ -79,6 +81,7 @@ impl TryFrom<&str> for Transport { "registry" | "docker" => Self::Registry, "oci" => Self::OciDir, "oci-archive" => Self::OciArchive, + "containers-storage" => Self::ContainerStorage, o => return Err(anyhow!("Unknown transport '{}'", o)), }) } @@ -116,6 +119,7 @@ impl std::fmt::Display for Transport { Self::Registry => "docker://", Self::OciArchive => "oci-archive:", Self::OciDir => "oci:", + Self::ContainerStorage => "containers-storage:", }; f.write_str(s) } @@ -139,6 +143,10 @@ mod tests { use super::*; const INVALID_IRS: &[&str] = &["", "foo://", "docker:blah", "registry:", "foo:bar"]; + const VALID_IRS: &[&str] = &[ + "containers-storage:localhost/someimage", + "docker://quay.io/exampleos/blah:sometag", + ]; #[test] fn test_imagereference() { @@ -159,6 +167,10 @@ mod tests { .with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); assert_eq!(digested.name, "quay.io/exampleos/blah@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); + for &v in VALID_IRS { + ImageReference::try_from(v).unwrap(); + } + for &v in INVALID_IRS { match ImageReference::try_from(v) { Ok(_) => panic!("Should fail to parse: {}", v), From 1ead3c9e4334f35182d7f7b5ca998164e1a22c9c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 12 May 2021 20:19:50 -0400 Subject: [PATCH 041/775] container/export: When writing to `containers-storage:`, don't compress It's really inefficient to do gzip only to have the destination decompress again. --- lib/src/container/export.rs | 22 +++++++++++++++++----- lib/src/container/oci.rs | 4 ++-- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index 7e65cba15..f687682ee 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -13,16 +13,22 @@ fn export_ostree_ref_to_blobdir( repo: &ostree::Repo, rev: &str, ocidir: &openat::Dir, + compression: Option, ) -> Result { let commit = repo.resolve_rev(rev, false)?.unwrap(); - let mut w = oci::LayerWriter::new(ocidir)?; + let mut w = oci::LayerWriter::new(ocidir, compression)?; ostree_tar::export_commit(repo, commit.as_str(), &mut w)?; w.complete() } /// Generate an OCI image from a given ostree root #[context("Building oci")] -fn build_oci(repo: &ostree::Repo, rev: &str, ocidir_path: &Path) -> Result { +fn build_oci( + repo: &ostree::Repo, + rev: &str, + ocidir_path: &Path, + compression: Option, +) -> Result { // Explicitly error if the target exists std::fs::create_dir(ocidir_path).context("Creating OCI dir")?; let ocidir = &openat::Dir::open(ocidir_path)?; @@ -45,7 +51,7 @@ fn build_oci(repo: &ostree::Repo, rev: &str, ocidir_path: &Path) -> Result Result { + let compression = if dest.transport == Transport::ContainerStorage { + Some(flate2::Compression::none()) + } else { + None + }; if dest.transport == Transport::OciDir { - let _copied: ImageReference = build_oci(repo, ostree_ref, Path::new(dest.name.as_str()))?; + let _copied: ImageReference = + build_oci(repo, ostree_ref, Path::new(dest.name.as_str()), compression)?; } else { let tempdir = tempfile::tempdir_in("/var/tmp")?; let tempdest = tempdir.path().join("d"); let tempdest = tempdest.to_str().unwrap(); - let src = build_oci(repo, ostree_ref, Path::new(tempdest))?; + let src = build_oci(repo, ostree_ref, Path::new(tempdest), compression)?; let mut cmd = skopeo::new_cmd(); tracing::event!(Level::DEBUG, "Copying {} to {}", src, dest); diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index 7c091facc..2e18cf69a 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -267,12 +267,12 @@ impl<'a> std::io::Write for BlobWriter<'a> { } impl<'a> LayerWriter<'a> { - pub(crate) fn new(ocidir: &'a openat::Dir) -> Result { + pub(crate) fn new(ocidir: &'a openat::Dir, c: Option) -> Result { let bw = BlobWriter::new(ocidir)?; Ok(Self { bw, uncompressed_hash: Hasher::new(MessageDigest::sha256())?, - compressor: GzEncoder::new(Vec::with_capacity(8192), flate2::Compression::default()), + compressor: GzEncoder::new(Vec::with_capacity(8192), c.unwrap_or_default()), }) } From 0960f7507e914ff2e493f086b929261f244cff41 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 12 May 2021 20:56:22 -0400 Subject: [PATCH 042/775] tar/import: Cache a reusable buffer Instead of heap allocating and zero initializing one for each object. --- lib/src/tar/import.rs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 55f11accf..48b603db2 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -57,6 +57,9 @@ struct Importer<'a> { xattrs: HashMap, next_xattrs: Option<(String, String)>, + // Reusable buffer for reads. See also https://github.com/rust-lang/rust/issues/78485 + buf: Vec, + stats: ImportStats, } @@ -184,13 +187,14 @@ impl<'a> Importer<'a> { )?; { let w = w.clone().upcast::(); - let mut buf = [0; 8192]; loop { - let n = entry.read(&mut buf[..]).context("Reading large regfile")?; + let n = entry + .read(&mut self.buf[..]) + .context("Reading large regfile")?; if n == 0 { break; } - w.write(&buf[0..n], cancellable) + w.write(&self.buf[0..n], cancellable) .context("Writing large regfile")?; } } @@ -470,6 +474,7 @@ pub async fn import_tar( let mut importer = Importer { state: ImportState::Initial, repo, + buf: vec![0u8; 16384], xattrs: Default::default(), next_xattrs: None, stats: Default::default(), From ae2b69440f7e570b1b8eed0b29e1b01835fa8a89 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 13 May 2021 09:55:26 -0400 Subject: [PATCH 043/775] Release 0.1.2 Mainly adds `container export containers-storage:` which is going to be very useful for things like CI tests, and also helps complete the picture of bidirectional mapping between ostree and containers. --- cli/Cargo.toml | 2 +- lib/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index ba59d0133..ec3c22f53 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ostree-ext-cli" -version = "0.1.1" +version = "0.1.2" authors = ["Colin Walters "] edition = "2018" license = "MIT OR Apache-2.0" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index dbed98a0c..13e7e198c 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-ext" -version = "0.1.1" +version = "0.1.2" [dependencies] anyhow = "1.0" From 0262af281c9cbe502c398dbbad95d96a90e149d3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 21 May 2021 13:42:03 -0400 Subject: [PATCH 044/775] lib: Link to zlib It's going to be more tuned for e.g. target CPU than the Rust version, and we already link to it in process. --- lib/Cargo.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 13e7e198c..91fcce3f7 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -33,6 +33,8 @@ version = "0.1.1" [dependencies.flate2] version = "1.0.20" +features = ["zlib"] +default-features = false [dependencies.futures] version = "0.3.13" From 87dabaaac3e229c0daeaf3ab62b6f6d0f8c3ac19 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 21 May 2021 16:18:50 -0400 Subject: [PATCH 045/775] variant_utils: Add VariantDictExt A lot of rpm-ostree code parses `glib::VariantDict` with strings and booleans. --- lib/src/variant_utils.rs | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/lib/src/variant_utils.rs b/lib/src/variant_utils.rs index 48b3f1760..7c3da647f 100644 --- a/lib/src/variant_utils.rs +++ b/lib/src/variant_utils.rs @@ -82,6 +82,28 @@ pub fn new_variant_a_ayay>(items: &[(T, T)]) -> glib::Variant { } } +/// Extension trait for `glib::VariantDict`. +pub trait VariantDictExt { + /// Find (and duplicate) a string-valued key in this dictionary. + fn lookup_str(&self, k: &str) -> Option; + /// Find a `bool`-valued key in this dictionary. + fn lookup_bool(&self, k: &str) -> Option; +} + +impl VariantDictExt for glib::VariantDict { + fn lookup_str(&self, k: &str) -> Option { + // Unwrap safety: Passing the GVariant type string gives us the right value type + self.lookup_value(k, Some(glib::VariantTy::new("s").unwrap())) + .map(|v| v.get_str().unwrap().to_string()) + } + + fn lookup_bool(&self, k: &str) -> Option { + // Unwrap safety: Passing the GVariant type string gives us the right value type + self.lookup_value(k, Some(glib::VariantTy::new("b").unwrap())) + .map(|v| v.get().unwrap()) + } +} + #[cfg(test)] mod tests { use super::*; @@ -95,4 +117,11 @@ mod tests { let val: u32 = v.get().unwrap(); assert_eq!(val, 16843009); } + + #[test] + fn test_variantdict() { + let d = glib::VariantDict::new(None); + d.insert("foo", &"bar"); + assert_eq!(d.lookup_str("foo"), Some("bar".to_string())); + } } From 963e891eee645d8928c9464e3aa1dbc2955c0f6d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 24 May 2021 10:24:49 -0400 Subject: [PATCH 046/775] lib/Cargo.toml: Clean up declarations At some point I think rust-analyzer did automatic imports and that made things an inconsistent mess. I find the "inline" declarations *much* more readable. --- lib/Cargo.toml | 47 ++++++++++------------------------------------- 1 file changed, 10 insertions(+), 37 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 91fcce3f7..3f3b1ea81 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -12,59 +12,32 @@ version = "0.1.2" anyhow = "1.0" bytes = "1.0.1" camino = "1.0.4" +cjson = "0.1.1" +flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.1.1" +futures = "0.3.13" gio = "0.9.1" glib = "0.10.3" glib-sys = "0.10.1" gvariant = "0.4.0" hex = "0.4.3" libc = "0.2.92" +nix = "0.20.0" +phf = { features = ["macros"], version = "0.8.0" } openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" +ostree = { features = ["v2021_2"], version = "0.11.0" } ostree-sys = "0.7.2" +serde = { features = ["derive"], version = "1.0.125" } +serde_json = "1.0.64" tar = "0.4.33" tempfile = "3.2.0" +tokio = { features = ["full"], version = "1" } +tokio-util = { features = ["io"], version = "0.6" } tracing = "0.1" tokio-stream = "0.1.5" -[dependencies.cjson] -version = "0.1.1" - -[dependencies.flate2] -version = "1.0.20" -features = ["zlib"] -default-features = false - -[dependencies.futures] -version = "0.3.13" - -[dependencies.nix] -version = "0.20.0" - -[dependencies.ostree] -features = ["v2021_2"] -version = "0.11.0" - -[dependencies.phf] -features = ["macros"] -version = "0.8.0" - -[dependencies.serde] -features = ["derive"] -version = "1.0.125" - -[dependencies.serde_json] -version = "1.0.64" - -[dependencies.tokio] -features = ["full"] -version = "1" - -[dependencies.tokio-util] -features = ["io"] -version = "0.6" - [dev-dependencies] clap = "2.33.3" indoc = "1.0.3" From 7a5cac910ca90d31c55bff5aed5c20921d4e1d31 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 24 May 2021 18:24:54 -0400 Subject: [PATCH 047/775] Export full CLI in library Ultimately I think it will work best if we fully encapsulate `ostree-ext-cli` in `rpm-ostree` in the short term. There's a strong tension in having two binaries. It *logically* makes sense, but logistically is harder (duplicating CI, documentation, crate dependency management, etc.) Keep the `cli` crate for now, but move all of the functionality of it into a simple `run_from_iter()` function that can be called by projects like rpm-ostree. --- cli/Cargo.toml | 1 - cli/src/main.rs | 197 +---------------------------------------- lib/Cargo.toml | 2 + lib/src/cli.rs | 227 ++++++++++++++++++++++++++++++++++++++++++++++++ lib/src/lib.rs | 1 + 5 files changed, 231 insertions(+), 197 deletions(-) create mode 100644 lib/src/cli.rs diff --git a/cli/Cargo.toml b/cli/Cargo.toml index ec3c22f53..23cbed594 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -11,7 +11,6 @@ readme = "README.md" anyhow = "1.0" ostree-ext = { path = "../lib" } clap = "2.33.3" -indicatif = "0.15.0" structopt = "0.3.21" ostree = { version = "0.11.0", features = ["v2021_2"] } libc = "0.2.92" diff --git a/cli/src/main.rs b/cli/src/main.rs index 2387ba03e..64910451a 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,204 +1,9 @@ use anyhow::Result; -use std::convert::TryInto; -use structopt::StructOpt; - -#[derive(Debug, StructOpt)] -struct BuildOpts { - #[structopt(long)] - repo: String, - - #[structopt(long = "ref")] - ostree_ref: String, - - #[structopt(long)] - oci_dir: String, -} - -#[derive(Debug, StructOpt)] -struct ImportOpts { - /// Path to the repository - #[structopt(long)] - repo: String, - - /// Path to a tar archive; if unspecified, will be stdin. Currently the tar archive must not be compressed. - path: Option, -} - -#[derive(Debug, StructOpt)] -struct ExportOpts { - /// Path to the repository - #[structopt(long)] - repo: String, - - /// The ostree ref or commit to export - rev: String, -} - -#[derive(Debug, StructOpt)] -enum TarOpts { - /// Import a tar archive (currently, must not be compressed) - Import(ImportOpts), - - /// Write a tar archive to stdout - Export(ExportOpts), -} - -#[derive(Debug, StructOpt)] -enum ContainerOpts { - /// Import an ostree commit embedded in a remote container image - Import { - /// Path to the repository - #[structopt(long)] - repo: String, - - /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest - imgref: String, - }, - - /// Print information about an exported ostree-container image. - Info { - /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest - imgref: String, - }, - - /// Export an ostree commit to an OCI layout - Export { - /// Path to the repository - #[structopt(long)] - repo: String, - - /// The ostree ref or commit to export - rev: String, - - /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest - imgref: String, - }, -} - -#[derive(Debug, StructOpt)] -struct ImaSignOpts { - /// Path to the repository - #[structopt(long)] - repo: String, - /// The ostree ref or commit to use as a base - src_rev: String, - /// The ostree ref to use for writing the signed commit - target_ref: String, - - /// Digest algorithm - algorithm: String, - /// Path to IMA key - key: String, -} - -#[derive(Debug, StructOpt)] -#[structopt(name = "ostree-ext")] -#[structopt(rename_all = "kebab-case")] -enum Opt { - /// Import and export to tar - Tar(TarOpts), - /// Import and export to a container image - Container(ContainerOpts), - ImaSign(ImaSignOpts), -} - -async fn tar_import(opts: &ImportOpts) -> Result<()> { - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, opts.repo.as_str(), gio::NONE_CANCELLABLE)?; - let imported = if let Some(path) = opts.path.as_ref() { - let instream = tokio::fs::File::open(path).await?; - ostree_ext::tar::import_tar(repo, instream).await? - } else { - let stdin = tokio::io::stdin(); - ostree_ext::tar::import_tar(repo, stdin).await? - }; - println!("Imported: {}", imported); - Ok(()) -} - -fn tar_export(opts: &ExportOpts) -> Result<()> { - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, opts.repo.as_str(), gio::NONE_CANCELLABLE)?; - ostree_ext::tar::export_commit(repo, opts.rev.as_str(), std::io::stdout())?; - Ok(()) -} - -async fn container_import(repo: &str, imgref: &str) -> Result<()> { - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; - let imgref = imgref.try_into()?; - let (tx_progress, rx_progress) = tokio::sync::watch::channel(Default::default()); - let target = indicatif::ProgressDrawTarget::stdout(); - let style = indicatif::ProgressStyle::default_bar(); - let pb = indicatif::ProgressBar::new_spinner(); - pb.set_draw_target(target); - pb.set_style(style.template("{spinner} {prefix} {msg}")); - pb.enable_steady_tick(200); - pb.set_message("Downloading..."); - let import = ostree_ext::container::import(repo, &imgref, Some(tx_progress)); - tokio::pin!(import); - tokio::pin!(rx_progress); - loop { - tokio::select! { - _ = rx_progress.changed() => { - let n = rx_progress.borrow().processed_bytes; - pb.set_message(&format!("Processed: {}", indicatif::HumanBytes(n))); - } - import = &mut import => { - pb.finish(); - println!("Imported: {}", import?.ostree_commit); - return Ok(()) - } - } - } -} - -async fn container_export(repo: &str, rev: &str, imgref: &str) -> Result<()> { - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; - let imgref = imgref.try_into()?; - let pushed = ostree_ext::container::export(repo, rev, &imgref).await?; - println!("{}", pushed); - Ok(()) -} - -async fn container_info(imgref: &str) -> Result<()> { - let imgref = imgref.try_into()?; - let info = ostree_ext::container::fetch_manifest_info(&imgref).await?; - println!("{} @{}", imgref, info.manifest_digest); - Ok(()) -} - -fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { - let repo = - &ostree::Repo::open_at(libc::AT_FDCWD, cmdopts.repo.as_str(), gio::NONE_CANCELLABLE)?; - let signopts = ostree_ext::ima::ImaOpts { - algorithm: cmdopts.algorithm.clone(), - key: cmdopts.key.clone(), - }; - let signed_commit = ostree_ext::ima::ima_sign(repo, cmdopts.src_rev.as_str(), &signopts)?; - repo.set_ref_immediate( - None, - cmdopts.target_ref.as_str(), - Some(signed_commit.as_str()), - gio::NONE_CANCELLABLE, - )?; - println!("{} => {}", cmdopts.target_ref, signed_commit); - Ok(()) -} async fn run() -> Result<()> { tracing_subscriber::fmt::init(); tracing::trace!("starting"); - let opt = Opt::from_args(); - match opt { - Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt).await, - Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), - Opt::Container(ContainerOpts::Info { imgref }) => container_info(imgref.as_str()).await, - Opt::Container(ContainerOpts::Import { repo, imgref }) => { - container_import(&repo, &imgref).await - } - Opt::Container(ContainerOpts::Export { repo, rev, imgref }) => { - container_export(&repo, &rev, &imgref).await - } - Opt::ImaSign(ref opts) => ima_sign(opts), - } + ostree_ext::cli::run_from_iter(std::env::args_os()).await } #[tokio::main] diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 3f3b1ea81..5a7f8d719 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -16,6 +16,7 @@ cjson = "0.1.1" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.1.1" futures = "0.3.13" +indicatif = "0.15.0" gio = "0.9.1" glib = "0.10.3" glib-sys = "0.10.1" @@ -31,6 +32,7 @@ ostree = { features = ["v2021_2"], version = "0.11.0" } ostree-sys = "0.7.2" serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" +structopt = "0.3.21" tar = "0.4.33" tempfile = "3.2.0" tokio = { features = ["full"], version = "1" } diff --git a/lib/src/cli.rs b/lib/src/cli.rs new file mode 100644 index 000000000..f38b9f347 --- /dev/null +++ b/lib/src/cli.rs @@ -0,0 +1,227 @@ +//! # Commandline parsing +//! +//! While there is a separate `ostree-ext-cli` crate that +//! can be installed and used directly, the CLI code is +//! also exported as a library too, so that projects +//! such as `rpm-ostree` can directly reuse it. + +use anyhow::Result; +use std::convert::TryInto; +use std::ffi::OsString; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +struct BuildOpts { + #[structopt(long)] + repo: String, + + #[structopt(long = "ref")] + ostree_ref: String, + + #[structopt(long)] + oci_dir: String, +} + +/// Options for importing a tar archive. +#[derive(Debug, StructOpt)] +struct ImportOpts { + /// Path to the repository + #[structopt(long)] + repo: String, + + /// Path to a tar archive; if unspecified, will be stdin. Currently the tar archive must not be compressed. + path: Option, +} + +/// Options for exporting a tar archive. +#[derive(Debug, StructOpt)] +struct ExportOpts { + /// Path to the repository + #[structopt(long)] + repo: String, + + /// The ostree ref or commit to export + rev: String, +} + +/// Options for import/export to tar archives. +#[derive(Debug, StructOpt)] +enum TarOpts { + /// Import a tar archive (currently, must not be compressed) + Import(ImportOpts), + + /// Write a tar archive to stdout + Export(ExportOpts), +} + +/// Options for container import/export. +#[derive(Debug, StructOpt)] +enum ContainerOpts { + /// Import an ostree commit embedded in a remote container image + Import { + /// Path to the repository + #[structopt(long)] + repo: String, + + /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest + imgref: String, + }, + + /// Print information about an exported ostree-container image. + Info { + /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest + imgref: String, + }, + + /// Export an ostree commit to an OCI layout + Export { + /// Path to the repository + #[structopt(long)] + repo: String, + + /// The ostree ref or commit to export + rev: String, + + /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest + imgref: String, + }, +} + +/// Options for the Integrity Measurement Architecture (IMA). +#[derive(Debug, StructOpt)] +struct ImaSignOpts { + /// Path to the repository + #[structopt(long)] + repo: String, + /// The ostree ref or commit to use as a base + src_rev: String, + /// The ostree ref to use for writing the signed commit + target_ref: String, + + /// Digest algorithm + algorithm: String, + /// Path to IMA key + key: String, +} + +/// Toplevel options for extended ostree functionality. +#[derive(Debug, StructOpt)] +#[structopt(name = "ostree-ext")] +#[structopt(rename_all = "kebab-case")] +enum Opt { + /// Import and export to tar + Tar(TarOpts), + /// Import and export to a container image + Container(ContainerOpts), + /// IMA signatures + ImaSign(ImaSignOpts), +} + +/// Import a tar archive containing an ostree commit. +async fn tar_import(opts: &ImportOpts) -> Result<()> { + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, opts.repo.as_str(), gio::NONE_CANCELLABLE)?; + let imported = if let Some(path) = opts.path.as_ref() { + let instream = tokio::fs::File::open(path).await?; + crate::tar::import_tar(repo, instream).await? + } else { + let stdin = tokio::io::stdin(); + crate::tar::import_tar(repo, stdin).await? + }; + println!("Imported: {}", imported); + Ok(()) +} + +/// Export a tar archive containing an ostree commit. +fn tar_export(opts: &ExportOpts) -> Result<()> { + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, opts.repo.as_str(), gio::NONE_CANCELLABLE)?; + crate::tar::export_commit(repo, opts.rev.as_str(), std::io::stdout())?; + Ok(()) +} + +/// Import a container image with an encapsulated ostree commit. +async fn container_import(repo: &str, imgref: &str) -> Result<()> { + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; + let imgref = imgref.try_into()?; + let (tx_progress, rx_progress) = tokio::sync::watch::channel(Default::default()); + let target = indicatif::ProgressDrawTarget::stdout(); + let style = indicatif::ProgressStyle::default_bar(); + let pb = indicatif::ProgressBar::new_spinner(); + pb.set_draw_target(target); + pb.set_style(style.template("{spinner} {prefix} {msg}")); + pb.enable_steady_tick(200); + pb.set_message("Downloading..."); + let import = crate::container::import(repo, &imgref, Some(tx_progress)); + tokio::pin!(import); + tokio::pin!(rx_progress); + loop { + tokio::select! { + _ = rx_progress.changed() => { + let n = rx_progress.borrow().processed_bytes; + pb.set_message(&format!("Processed: {}", indicatif::HumanBytes(n))); + } + import = &mut import => { + pb.finish(); + println!("Imported: {}", import?.ostree_commit); + return Ok(()) + } + } + } +} + +/// Export a container image with an encapsulated ostree commit. +async fn container_export(repo: &str, rev: &str, imgref: &str) -> Result<()> { + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; + let imgref = imgref.try_into()?; + let pushed = crate::container::export(repo, rev, &imgref).await?; + println!("{}", pushed); + Ok(()) +} + +/// Load metadata for a container image with an encapsulated ostree commit. +async fn container_info(imgref: &str) -> Result<()> { + let imgref = imgref.try_into()?; + let info = crate::container::fetch_manifest_info(&imgref).await?; + println!("{} @{}", imgref, info.manifest_digest); + Ok(()) +} + +/// Add IMA signatures to an ostree commit, generating a new commit. +fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { + let repo = + &ostree::Repo::open_at(libc::AT_FDCWD, cmdopts.repo.as_str(), gio::NONE_CANCELLABLE)?; + let signopts = crate::ima::ImaOpts { + algorithm: cmdopts.algorithm.clone(), + key: cmdopts.key.clone(), + }; + let signed_commit = crate::ima::ima_sign(repo, cmdopts.src_rev.as_str(), &signopts)?; + repo.set_ref_immediate( + None, + cmdopts.target_ref.as_str(), + Some(signed_commit.as_str()), + gio::NONE_CANCELLABLE, + )?; + println!("{} => {}", cmdopts.target_ref, signed_commit); + Ok(()) +} + +/// Parse the provided arguments and execute. +/// Calls [`clap::Error::exit`] on failure, printing the error message and aborting the program. +pub async fn run_from_iter(args: I) -> Result<()> +where + I: IntoIterator, + I::Item: Into + Clone, +{ + let opt = Opt::from_iter(args); + match opt { + Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt).await, + Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), + Opt::Container(ContainerOpts::Info { imgref }) => container_info(imgref.as_str()).await, + Opt::Container(ContainerOpts::Import { repo, imgref }) => { + container_import(&repo, &imgref).await + } + Opt::Container(ContainerOpts::Export { repo, rev, imgref }) => { + container_export(&repo, &rev, &imgref).await + } + Opt::ImaSign(ref opts) => ima_sign(opts), + } +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index c7a284f5b..2c097db17 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -14,6 +14,7 @@ type Result = anyhow::Result; mod async_util; +pub mod cli; pub mod container; pub mod diff; pub mod ima; From 64428cebbe269c27747c9cccc90bedf08d9c938a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 26 May 2021 13:27:52 -0400 Subject: [PATCH 048/775] cli: Add `container import --write-ref` option This will be needed for https://github.com/coreos/fedora-coreos-tracker/issues/828 so coreos-assembler can find the commit it imported. --- lib/src/cli.rs | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index f38b9f347..07a02d01d 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -65,6 +65,10 @@ enum ContainerOpts { /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest imgref: String, + + /// Create an ostree ref pointing to the imported commit + #[structopt(long)] + write_ref: Option, }, /// Print information about an exported ostree-container image. @@ -139,7 +143,7 @@ fn tar_export(opts: &ExportOpts) -> Result<()> { } /// Import a container image with an encapsulated ostree commit. -async fn container_import(repo: &str, imgref: &str) -> Result<()> { +async fn container_import(repo: &str, imgref: &str, write_ref: Option<&str>) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; let imgref = imgref.try_into()?; let (tx_progress, rx_progress) = tokio::sync::watch::channel(Default::default()); @@ -153,7 +157,7 @@ async fn container_import(repo: &str, imgref: &str) -> Result<()> { let import = crate::container::import(repo, &imgref, Some(tx_progress)); tokio::pin!(import); tokio::pin!(rx_progress); - loop { + let import = loop { tokio::select! { _ = rx_progress.changed() => { let n = rx_progress.borrow().processed_bytes; @@ -161,11 +165,24 @@ async fn container_import(repo: &str, imgref: &str) -> Result<()> { } import = &mut import => { pb.finish(); - println!("Imported: {}", import?.ostree_commit); - return Ok(()) + break import?; } } + }; + + if let Some(write_ref) = write_ref { + repo.set_ref_immediate( + None, + write_ref, + Some(import.ostree_commit.as_str()), + gio::NONE_CANCELLABLE, + )?; + println!("Imported: {} => {}", write_ref, import.ostree_commit.as_str()); + } else { + println!("Imported: {}", import.ostree_commit); } + + Ok(()) } /// Export a container image with an encapsulated ostree commit. @@ -216,8 +233,8 @@ where Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt).await, Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), Opt::Container(ContainerOpts::Info { imgref }) => container_info(imgref.as_str()).await, - Opt::Container(ContainerOpts::Import { repo, imgref }) => { - container_import(&repo, &imgref).await + Opt::Container(ContainerOpts::Import { repo, imgref, write_ref }) => { + container_import(&repo, &imgref, write_ref.as_deref()).await } Opt::Container(ContainerOpts::Export { repo, rev, imgref }) => { container_export(&repo, &rev, &imgref).await From e25de032be74c55848814a654c9e8148cf081600 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 27 May 2021 13:20:04 -0400 Subject: [PATCH 049/775] container/export: Add ability to configure exported container Part of replacing the cosa/RHCOS oscontainer bits, which sets labels today with things like specific RPM versions. --- lib/src/cli.rs | 60 ++++++++++++++++++++++++++++++++----- lib/src/container/export.rs | 34 ++++++++++++++++++--- lib/src/container/oci.rs | 16 ++++++++-- lib/tests/it/main.rs | 15 ++++++++-- 4 files changed, 108 insertions(+), 17 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 07a02d01d..57f650f3f 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -6,10 +6,13 @@ //! such as `rpm-ostree` can directly reuse it. use anyhow::Result; +use std::collections::BTreeMap; use std::convert::TryInto; use std::ffi::OsString; use structopt::StructOpt; +use crate::container::Config; + #[derive(Debug, StructOpt)] struct BuildOpts { #[structopt(long)] @@ -88,6 +91,14 @@ enum ContainerOpts { /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest imgref: String, + + /// Additional labels for the container + #[structopt(name="label", long, short)] + labels: Vec, + + /// Corresponds to the Dockerfile `CMD` instruction. + #[structopt(long)] + cmd: Option>, }, } @@ -177,7 +188,11 @@ async fn container_import(repo: &str, imgref: &str, write_ref: Option<&str>) -> Some(import.ostree_commit.as_str()), gio::NONE_CANCELLABLE, )?; - println!("Imported: {} => {}", write_ref, import.ostree_commit.as_str()); + println!( + "Imported: {} => {}", + write_ref, + import.ostree_commit.as_str() + ); } else { println!("Imported: {}", import.ostree_commit); } @@ -186,10 +201,20 @@ async fn container_import(repo: &str, imgref: &str, write_ref: Option<&str>) -> } /// Export a container image with an encapsulated ostree commit. -async fn container_export(repo: &str, rev: &str, imgref: &str) -> Result<()> { +async fn container_export( + repo: &str, + rev: &str, + imgref: &str, + labels: BTreeMap, + cmd: Option>, +) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; + let config = Config { + labels: Some(labels), + cmd, + }; let imgref = imgref.try_into()?; - let pushed = crate::container::export(repo, rev, &imgref).await?; + let pushed = crate::container::export(repo, rev, &config, &imgref).await?; println!("{}", pushed); Ok(()) } @@ -233,11 +258,30 @@ where Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt).await, Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), Opt::Container(ContainerOpts::Info { imgref }) => container_info(imgref.as_str()).await, - Opt::Container(ContainerOpts::Import { repo, imgref, write_ref }) => { - container_import(&repo, &imgref, write_ref.as_deref()).await - } - Opt::Container(ContainerOpts::Export { repo, rev, imgref }) => { - container_export(&repo, &rev, &imgref).await + Opt::Container(ContainerOpts::Import { + repo, + imgref, + write_ref, + }) => container_import(&repo, &imgref, write_ref.as_deref()).await, + Opt::Container(ContainerOpts::Export { + repo, + rev, + imgref, + labels, + cmd, + }) => { + let labels: Result> = labels + .into_iter() + .map(|l| { + let mut parts = l.splitn(2, '='); + let k = parts.next().unwrap(); + let v = parts + .next() + .ok_or_else(|| anyhow::anyhow!("Missing '=' in label {}", l))?; + Ok((k.to_string(), v.to_string())) + }) + .collect(); + container_export(&repo, &rev, &imgref, labels?, cmd).await } Opt::ImaSign(ref opts) => ima_sign(opts), } diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index f687682ee..2295c8485 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -4,9 +4,19 @@ use super::*; use crate::{tar as ostree_tar, variant_utils}; use anyhow::Context; use fn_error_context::context; +use std::collections::BTreeMap; use std::path::Path; use tracing::{instrument, Level}; +/// Configuration for the generated container. +#[derive(Debug, Default)] +pub struct Config { + /// Additional labels. + pub labels: Option>, + /// The equivalent of a `Dockerfile`'s `CMD` instruction. + pub cmd: Option>, +} + /// Write an ostree commit to an OCI blob #[context("Writing ostree root to blob")] fn export_ostree_ref_to_blobdir( @@ -27,6 +37,7 @@ fn build_oci( repo: &ostree::Repo, rev: &str, ocidir_path: &Path, + config: &Config, compression: Option, ) -> Result { // Explicitly error if the target exists @@ -51,6 +62,14 @@ fn build_oci( writer.add_config_annotation(OSTREE_COMMIT_LABEL, commit); writer.add_manifest_annotation(OSTREE_COMMIT_LABEL, commit); + for (k, v) in config.labels.iter().map(|k| k.iter()).flatten() { + writer.add_config_annotation(k, v); + } + if let Some(cmd) = config.cmd.as_ref() { + let cmd: Vec<_> = cmd.iter().map(|s| s.as_str()).collect(); + writer.set_cmd(&cmd); + } + let rootfs_blob = export_ostree_ref_to_blobdir(repo, commit, ocidir, compression)?; writer.set_root_layer(rootfs_blob); writer.complete()?; @@ -66,6 +85,7 @@ fn build_oci( async fn build_impl( repo: &ostree::Repo, ostree_ref: &str, + config: &Config, dest: &ImageReference, ) -> Result { let compression = if dest.transport == Transport::ContainerStorage { @@ -74,13 +94,18 @@ async fn build_impl( None }; if dest.transport == Transport::OciDir { - let _copied: ImageReference = - build_oci(repo, ostree_ref, Path::new(dest.name.as_str()), compression)?; + let _copied: ImageReference = build_oci( + repo, + ostree_ref, + Path::new(dest.name.as_str()), + config, + compression, + )?; } else { let tempdir = tempfile::tempdir_in("/var/tmp")?; let tempdest = tempdir.path().join("d"); let tempdest = tempdest.to_str().unwrap(); - let src = build_oci(repo, ostree_ref, Path::new(tempdest), compression)?; + let src = build_oci(repo, ostree_ref, Path::new(tempdest), config, compression)?; let mut cmd = skopeo::new_cmd(); tracing::event!(Level::DEBUG, "Copying {} to {}", src, dest); @@ -107,7 +132,8 @@ async fn build_impl( pub async fn export>( repo: &ostree::Repo, ostree_ref: S, + config: &Config, dest: &ImageReference, ) -> Result { - build_impl(repo, ostree_ref.as_ref(), dest).await + build_impl(repo, ostree_ref.as_ref(), config, dest).await } diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index 2e18cf69a..e59d4fe0b 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -119,6 +119,8 @@ pub(crate) struct OciWriter<'a> { config_annotations: HashMap, manifest_annotations: HashMap, + cmd: Option>, + root_layer: Option, } @@ -143,6 +145,7 @@ impl<'a> OciWriter<'a> { config_annotations: Default::default(), manifest_annotations: Default::default(), root_layer: None, + cmd: None, }) } @@ -150,6 +153,10 @@ impl<'a> OciWriter<'a> { assert!(self.root_layer.replace(layer).is_none()) } + pub(crate) fn set_cmd(&mut self, e: &[&str]) { + self.cmd = Some(e.iter().map(|s| s.to_string()).collect()); + } + pub(crate) fn add_manifest_annotation, V: AsRef>(&mut self, k: K, v: V) { let k = k.as_ref(); let v = v.as_ref(); @@ -171,12 +178,15 @@ impl<'a> OciWriter<'a> { let rootfs_blob = self.root_layer.as_ref().unwrap(); let root_layer_id = format!("sha256:{}", rootfs_blob.uncompressed_sha256); + let mut ctrconfig = serde_json::Map::new(); + ctrconfig.insert("Labels".to_string(), serde_json::to_value(&self.config_annotations)?); + if let Some(cmd) = self.cmd.as_deref() { + ctrconfig.insert("Cmd".to_string(), serde_json::to_value(cmd)?); + } let config = serde_json::json!({ "architecture": arch, "os": "linux", - "config": { - "Labels": self.config_annotations, - }, + "config": ctrconfig, "rootfs": { "type": "layers", "diff_ids": [ root_layer_id ], diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index b3acc16ff..bc0db0ae3 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -2,7 +2,7 @@ use anyhow::{Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use indoc::indoc; -use ostree_ext::container::{ImageReference, Transport}; +use ostree_ext::container::{Config, ImageReference, Transport}; use sh_inline::bash; use std::{io::Write, process::Command}; @@ -132,7 +132,16 @@ async fn test_container_import_export() -> Result<()> { transport: Transport::OciDir, name: srcoci_path.as_str().to_string(), }; - let pushed = ostree_ext::container::export(srcrepo, TESTREF, &srcoci) + let config = Config { + labels: Some( + [("foo", "bar"), ("test", "value")] + .iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect(), + ), + cmd: Some(vec!["/bin/bash".to_string()]), + }; + let pushed = ostree_ext::container::export(srcrepo, TESTREF, &config, &srcoci) .await .context("exporting")?; assert!(srcoci_path.exists()); @@ -140,6 +149,8 @@ async fn test_container_import_export() -> Result<()> { let inspect = skopeo_inspect(&srcoci.to_string())?; assert!(inspect.contains(r#""version": "42.0""#)); + assert!(inspect.contains(r#""foo": "bar""#)); + assert!(inspect.contains(r#""test": "value""#)); let inspect = ostree_ext::container::fetch_manifest_info(&srcoci).await?; assert_eq!(inspect.manifest_digest, digest); From 345acb4f9e2f0bb1d70aa6b5ea85d0886ca88684 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 27 May 2021 12:44:56 -0400 Subject: [PATCH 050/775] Fix all clippy lints Came up in review of another PR; clippy is good. --- .github/workflows/rust.yml | 1 + lib/src/container/import.rs | 2 +- lib/src/container/mod.rs | 7 +++---- lib/src/container/skopeo.rs | 2 +- lib/src/ima.rs | 9 ++++----- lib/src/tar/import.rs | 6 +++--- lib/tests/it/main.rs | 14 +++++++------- 7 files changed, 20 insertions(+), 21 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index dd965dd6d..cf127e6a9 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -28,3 +28,4 @@ jobs: run: cargo build --verbose - name: Run tests run: cargo test --verbose + diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index e8ffa86fb..75b67512c 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -212,7 +212,7 @@ async fn fetch_layer<'s>( let proc = skopeo::spawn(proc)?; let fifo_reader = ProgressReader { reader: Box::new(tokio::fs::File::open(fifo).await?), - progress: progress, + progress, }; let waiter = async move { let res = proc.wait_with_output().await?; diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index fbab8dd6d..2f9a6e846 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -91,7 +91,7 @@ impl TryFrom<&str> for ImageReference { type Error = anyhow::Error; fn try_from(value: &str) -> Result { - let mut parts = value.splitn(2, ":"); + let mut parts = value.splitn(2, ':'); let transport_name = parts.next().unwrap(); let transport: Transport = transport_name.try_into()?; let mut name = parts @@ -172,9 +172,8 @@ mod tests { } for &v in INVALID_IRS { - match ImageReference::try_from(v) { - Ok(_) => panic!("Should fail to parse: {}", v), - Err(_) => {} + if ImageReference::try_from(v).is_ok() { + panic!("Should fail to parse: {}", v) } } let ir: ImageReference = "oci:somedir".try_into().unwrap(); diff --git a/lib/src/container/skopeo.rs b/lib/src/container/skopeo.rs index a5e8eea00..6f5d91400 100644 --- a/lib/src/container/skopeo.rs +++ b/lib/src/container/skopeo.rs @@ -16,5 +16,5 @@ pub(crate) fn new_cmd() -> tokio::process::Command { /// Spawn the child process pub(crate) fn spawn(mut cmd: Command) -> Result { let cmd = cmd.stdin(Stdio::null()).stderr(Stdio::piped()); - Ok(cmd.spawn().context("Failed to exec skopeo")?) + cmd.spawn().context("Failed to exec skopeo") } diff --git a/lib/src/ima.rs b/lib/src/ima.rs index d2ad3df8e..6978f8b9d 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -49,7 +49,7 @@ fn xattrs_to_map(v: &glib::Variant) -> BTreeMap, Vec> { /// Reserialize a map to GVariant of type `a(ayay)` fn xattrmap_serialize(map: &BTreeMap, Vec>) -> glib::Variant { - let map: Vec<_> = map.into_iter().collect(); + let map: Vec<_> = map.iter().collect(); variant_utils::new_variant_a_ayay(&map) } @@ -109,7 +109,7 @@ impl<'a> CommitRewriter<'a> { ) -> Result, Vec>> { let mut tempf = tempfile::NamedTempFile::new_in(self.tempdir.path())?; // If we're operating on a bare repo, we can clone the file (copy_file_range) directly. - if let Some(instream) = instream.clone().downcast::().ok() { + if let Ok(instream) = instream.clone().downcast::() { // View the fd as a File let instream_fd = unsafe { File::from_raw_fd(instream.as_raw_fd()) }; instream_fd.copy_to(tempf.as_file_mut())?; @@ -189,8 +189,7 @@ impl<'a> CommitRewriter<'a> { let xattrs = { let signed = self.ima_sign(&instream, selinux)?; xattrs.extend(signed); - let r = xattrmap_serialize(&xattrs); - r + xattrmap_serialize(&xattrs) }; // Now reload the input stream let (instream, _, _) = self.repo.load_file(checksum, cancellable)?; @@ -343,5 +342,5 @@ impl<'a> CommitRewriter<'a> { /// such as version, etc. pub fn ima_sign(repo: &ostree::Repo, ostree_ref: &str, opts: &ImaOpts) -> Result { let writer = &mut CommitRewriter::new(&repo, &opts)?; - Ok(writer.map_commit(ostree_ref)?) + writer.map_commit(ostree_ref) } diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 48b603db2..bf27d1344 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -89,7 +89,7 @@ fn validate_metadata_header(header: &tar::Header, desc: &str) -> Result { fn header_attrs(header: &tar::Header) -> Result<(u32, u32, u32)> { let uid: u32 = header.uid()?.try_into()?; let gid: u32 = header.gid()?.try_into()?; - let mode: u32 = header.mode()?.try_into()?; + let mode: u32 = header.mode()?; Ok((uid, gid, mode)) } @@ -404,7 +404,7 @@ impl<'a> Importer<'a> { } /// Process a special /xattrs/ entry (sha256 of xattr values). - fn import_xattrs<'b, R: std::io::Read>(&mut self, mut entry: tar::Entry<'b, R>) -> Result<()> { + fn import_xattrs(&mut self, mut entry: tar::Entry) -> Result<()> { match &self.state { ImportState::Initial => return Err(anyhow!("Found xattr object {} before commit")), ImportState::Importing(_) => {} @@ -500,7 +500,7 @@ pub async fn import_tar( // Need to clone here, otherwise we borrow from the moved entry let p = &p.to_owned(); importer.import_object(entry, p)?; - } else if let Ok(_) = path.strip_prefix("xattrs/") { + } else if path.strip_prefix("xattrs/").is_ok() { importer.import_xattrs(entry)?; } } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index b3acc16ff..6dde36c2b 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -136,7 +136,7 @@ async fn test_container_import_export() -> Result<()> { .await .context("exporting")?; assert!(srcoci_path.exists()); - let digest = pushed.name.rsplitn(2, "@").next().unwrap(); + let digest = pushed.name.rsplitn(2, '@').next().unwrap(); let inspect = skopeo_inspect(&srcoci.to_string())?; assert!(inspect.contains(r#""version": "42.0""#)); @@ -164,18 +164,18 @@ fn test_diff() -> Result<()> { let diff = ostree_ext::diff::diff(repo, from, TESTREF, subdir)?; assert!(diff.subdir.is_none()); assert_eq!(diff.added_dirs.len(), 1); - assert_eq!(diff.added_dirs.iter().nth(0).unwrap(), "/usr/share"); + assert_eq!(diff.added_dirs.iter().next().unwrap(), "/usr/share"); assert_eq!(diff.added_files.len(), 1); - assert_eq!(diff.added_files.iter().nth(0).unwrap(), "/usr/bin/newbin"); + assert_eq!(diff.added_files.iter().next().unwrap(), "/usr/bin/newbin"); assert_eq!(diff.removed_files.len(), 1); - assert_eq!(diff.removed_files.iter().nth(0).unwrap(), "/usr/bin/foo"); + assert_eq!(diff.removed_files.iter().next().unwrap(), "/usr/bin/foo"); let diff = ostree_ext::diff::diff(repo, from, TESTREF, Some("/usr"))?; assert_eq!(diff.subdir.as_ref().unwrap(), "/usr"); assert_eq!(diff.added_dirs.len(), 1); - assert_eq!(diff.added_dirs.iter().nth(0).unwrap(), "/share"); + assert_eq!(diff.added_dirs.iter().next().unwrap(), "/share"); assert_eq!(diff.added_files.len(), 1); - assert_eq!(diff.added_files.iter().nth(0).unwrap(), "/bin/newbin"); + assert_eq!(diff.added_files.iter().next().unwrap(), "/bin/newbin"); assert_eq!(diff.removed_files.len(), 1); - assert_eq!(diff.removed_files.iter().nth(0).unwrap(), "/bin/foo"); + assert_eq!(diff.removed_files.iter().next().unwrap(), "/bin/foo"); Ok(()) } From 0284da4a5f4d71684f79a8b41009a84dd1d4f101 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 2 Jun 2021 14:21:19 -0400 Subject: [PATCH 051/775] Release 0.1.3 --- cli/Cargo.toml | 2 +- lib/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 23cbed594..484a20c89 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ostree-ext-cli" -version = "0.1.2" +version = "0.1.3" authors = ["Colin Walters "] edition = "2018" license = "MIT OR Apache-2.0" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 5a7f8d719..adb8f04eb 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-ext" -version = "0.1.2" +version = "0.1.3" [dependencies] anyhow = "1.0" From ae7114322b228051c79cfcb07492d366459e3b40 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 3 Jun 2021 16:58:06 -0400 Subject: [PATCH 052/775] oci: Add crate name/version in history For debugging purposes. --- lib/src/container/oci.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index e59d4fe0b..cdbf2501c 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -183,6 +183,7 @@ impl<'a> OciWriter<'a> { if let Some(cmd) = self.cmd.as_deref() { ctrconfig.insert("Cmd".to_string(), serde_json::to_value(cmd)?); } + let created_by = concat!("created by ", env!("CARGO_PKG_VERSION")); let config = serde_json::json!({ "architecture": arch, "os": "linux", @@ -193,7 +194,7 @@ impl<'a> OciWriter<'a> { }, "history": [ { - "commit": "created by ostree-container", + "commit": created_by, } ] }); From b61d51405694afcad30a118edbc941b3482a1af9 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 3 Jun 2021 17:33:57 -0400 Subject: [PATCH 053/775] oci: Generate correct uncompressed hash I broke this in a refactoring, turns out `docker` checks it but `podman` doesn't (which seems to clearly be a bug). Reported on chat by someone trying out running via `docker`. --- lib/src/container/oci.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index cdbf2501c..babbb55ec 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -305,6 +305,7 @@ impl<'a> std::io::Write for LayerWriter<'a> { fn write(&mut self, srcbuf: &[u8]) -> std::io::Result { self.compressor.get_mut().clear(); self.compressor.write_all(srcbuf).unwrap(); + self.uncompressed_hash.update(srcbuf)?; let compressed_buf = self.compressor.get_mut().as_slice(); self.bw.write_all(&compressed_buf)?; Ok(srcbuf.len()) From d2668e31b665e03878dcb0d0c811b905e9d7b98c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 4 Jun 2021 08:42:14 -0400 Subject: [PATCH 054/775] oci: Delgate layer flush to blob flush This is actually a no op right now, but that may change. In any case it's more correct. --- lib/src/container/oci.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index babbb55ec..0995a0553 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -312,6 +312,6 @@ impl<'a> std::io::Write for LayerWriter<'a> { } fn flush(&mut self) -> std::io::Result<()> { - Ok(()) + self.bw.flush() } } From a19e8b6de8498df016bb31404e7c6ea2f0495a80 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 4 Jun 2021 09:05:00 -0400 Subject: [PATCH 055/775] oci: Add a basic test This could cover a lot more obviously but then we'd need to actually *read* OCI, which would require a lot more here and I don't want this code to grow into a full blown Rust OCI library. I plan to invest in end-to-end tests that e.g. use skopeo more here too. --- lib/src/container/oci.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index 0995a0553..842a1617b 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -315,3 +315,25 @@ impl<'a> std::io::Write for LayerWriter<'a> { self.bw.flush() } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_build() -> Result<()> { + let td = tempfile::tempdir()?; + let td = &openat::Dir::open(td.path())?; + let mut w = OciWriter::new(td)?; + let mut layerw = LayerWriter::new(td, None)?; + layerw.write_all(b"pretend this is a tarball")?; + let root_layer = layerw.complete()?; + assert_eq!( + root_layer.uncompressed_sha256, + "349438e5faf763e8875b43de4d7101540ef4d865190336c2cc549a11f33f8d7c" + ); + w.set_root_layer(root_layer); + w.complete()?; + Ok(()) + } +} From 5147edb94305d717055b0da074f6d0e8aae2f456 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 17 Jun 2021 17:36:12 -0400 Subject: [PATCH 056/775] tar/export: Add larger buffer for reads I noticed we were just using an 8k buffer; bumping this up to 128k as used by coreutils cat is slightly faster here (just a few percent), but it also makes `strace` way less noisy. --- lib/src/tar/export.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 2294b7b56..9713fcc3b 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -10,10 +10,15 @@ use gvariant::aligned_bytes::TryAsAligned; use gvariant::{gv, Marker, Structure}; use std::borrow::Cow; use std::collections::HashSet; +use std::io::BufReader; // This way the default ostree -> sysroot/ostree symlink works. const OSTREEDIR: &str = "sysroot/ostree"; +/// A decently large buffer, as used by e.g. coreutils `cat`. +/// System calls are expensive. +const BUF_CAPACITY: usize = 131072; + /// Convert /usr/etc back to /etc fn map_path(p: &Utf8Path) -> std::borrow::Cow { match p.strip_prefix("./usr/etc") { @@ -143,7 +148,7 @@ impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { if let Some(instream) = instream { h.set_entry_type(tar::EntryType::Regular); h.set_size(meta.get_size() as u64); - let mut instream = instream.into_read(); + let mut instream = BufReader::with_capacity(BUF_CAPACITY, instream.into_read()); self.out.append_data(&mut h, &path, &mut instream)?; } else { h.set_size(0); From 2cc1682f62a7f4bd76d3eb8eaa39dff3c5eb16d4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 17 Jun 2021 17:44:23 -0400 Subject: [PATCH 057/775] ci: Use latest buildroot, drop ostree override The latest official buildroot has what we need. --- .github/workflows/rust.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index cf127e6a9..92e64d8fe 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -16,14 +16,12 @@ jobs: build: runs-on: ubuntu-latest - container: quay.io/cgwalters/fcos-buildroot + container: quay.io/coreos-assembler/fcos-buildroot:testing-devel steps: - name: Install skopeo run: yum -y install skopeo - uses: actions/checkout@v2 - - name: Hack in updated ostree - run: rpm -Uvh https://kojipkgs.fedoraproject.org//packages/ostree/2021.2/2.fc33/x86_64/ostree-{,devel-,libs-}2021.2-2.fc33.x86_64.rpm - name: Build run: cargo build --verbose - name: Run tests From e83dc1b515cf6b0ad713c9d3d6b61bd3c85246ad Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 17 Jun 2021 17:45:45 -0400 Subject: [PATCH 058/775] Run `cargo fmt` and add ci check --- .github/workflows/rust.yml | 2 ++ lib/src/cli.rs | 2 +- lib/src/container/oci.rs | 5 ++++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 92e64d8fe..aa3e9eab3 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -22,6 +22,8 @@ jobs: - name: Install skopeo run: yum -y install skopeo - uses: actions/checkout@v2 + - name: Format + run: cargo fmt -- --check -l - name: Build run: cargo build --verbose - name: Run tests diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 57f650f3f..19ad390a8 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -93,7 +93,7 @@ enum ContainerOpts { imgref: String, /// Additional labels for the container - #[structopt(name="label", long, short)] + #[structopt(name = "label", long, short)] labels: Vec, /// Corresponds to the Dockerfile `CMD` instruction. diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index 842a1617b..a9a09717b 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -179,7 +179,10 @@ impl<'a> OciWriter<'a> { let root_layer_id = format!("sha256:{}", rootfs_blob.uncompressed_sha256); let mut ctrconfig = serde_json::Map::new(); - ctrconfig.insert("Labels".to_string(), serde_json::to_value(&self.config_annotations)?); + ctrconfig.insert( + "Labels".to_string(), + serde_json::to_value(&self.config_annotations)?, + ); if let Some(cmd) = self.cmd.as_deref() { ctrconfig.insert("Cmd".to_string(), serde_json::to_value(cmd)?); } From ba1dbb039d9541543405efe5859cf8ada1426efb Mon Sep 17 00:00:00 2001 From: Kelvin Fan Date: Sat, 19 Jun 2021 10:43:40 -0400 Subject: [PATCH 059/775] Add Questions and Answers section --- docs/questions-and-answers.md | 40 +++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 docs/questions-and-answers.md diff --git a/docs/questions-and-answers.md b/docs/questions-and-answers.md new file mode 100644 index 000000000..2c7abb32a --- /dev/null +++ b/docs/questions-and-answers.md @@ -0,0 +1,40 @@ +# Questions and answers + +## module "container": Encapsulate OSTree commits in OCI/Docker images + +### How is this different from the "tarball-of-archive-repo" approach currently used in RHEL CoreOS? Aren't both encapsulating an OSTree commit in an OCI image? + +- The "tarball-of-archive-repo" approach is essentially just putting an OSTree repo in archive mode under `/srv` as an additional layer over a regular RHEL base image. In the new data format, users can do e.g. `podman run --rm -ti quay.io/fedora/fedora-coreos:stable bash`. This could be quite useful for some tests for OSTree commits (at one point we had a test that literally booted a whole VM to run `rpm -q` - it'd be much cheaper to do those kinds of "OS sanity checks" in a container). + +- The new data format is intentionally designed to be streamed; the files inside the tarball are ordered by (commit, metadata, content ...). With "tarball-of-archive-repo" as is today that's not true, so we need to pull and extract the whole thing to a temporary location, which is inefficient. See also https://github.com/ostreedev/ostree-rs-ext/issues/1. + +- We have a much clearer story for adding Docker/OCI style _derivation_ later. + +- The new data format abstracts away OSTree a bit more and avoids needing people to think about OSTree unnecessarily. + +### Why pull from a container image instead of the current (older) method of pulling from OSTree repos? + +A good example is for people who want to do offline/disconnected installations and updates. They will almost certainly have container images they want to pull too - now the OS is just another container image. Users no longer need to mirror OSTree repos. Overall, as mentioned already, we want to abstract away OSTree a bit more. + +### Can users view this as a regular container image? + +Yes, and it also provides some extras. In addition to being able to be run as a container, if the host is OSTree-based, the host itself can be deployed/updated into this image, too. There is also GPG signing and per-file integrity validation that comes with OSTree. + +### So then would this OSTree commit in container image also work as a bootimage (bootable from a USB drive)? + +No. Though there could certainly be kernels and initramfses in the (OSTree commit in the) container image, that doesn't make it bootable. OSTree _understands_ bootloaders and can update kernels/initramfs images, but it doesn't update bootloaders, that is [bootupd](https://github.com/coreos/bootupd)'s job. Furthermore, this is still a container image, made of tarballs and manifests; it is not formatted to be a disk image (e.g. it doesn't have a FAT32 formatted ESP). Related to this topic is https://github.com/iximiuz/docker-to-linux, which illustrates the difference between a docker image and a bootable image. +TL;DR, OSTree commit in container image is meant only to deliver OS updates (OSTree commits), not bootable disk images. + +### How much deduplication do we still get with this new approach? + +Unfortunately, today, we do indeed need to download more than actually needed, but the files will still be deduplicated on disk, just like before. So we still won't be storing extra files, but we will be downloading extra files. +But for users doing offline mirroring, this shouldn't matter that much. In OpenShift, the entire image is downloaded today, as well. +Nevertheless, see https://github.com/ostreedev/ostree-rs-ext/#integrating-with-future-container-deltas. + +### Will there be support for "layers" in the OSTree commit in container image? + +Not yet, but, as mentioned above, this opens up the possibility of doing OCI style derivation, so this could certainly be added later. It would be useful to make this image as familiar to admins as possible. Right now, the ostree-rs-ext client is only parsing one layer of the container image. + +### How will mirroring image registries work? + +since ostree-rs-ext uses skopeo (which uses `containers/image`), mirroring is transparently supported, i.e. admins can configure their mirroring in `containers-registries.conf` and it'll just work. From c820f940200ed56a7a8408274e9781d860986d14 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 24 Jun 2021 17:15:52 -0400 Subject: [PATCH 060/775] variant_utils: Add API to create an `as` variant Since this comes up a lot in ostree APIs. Closes: https://github.com/ostreedev/ostree-rs/issues/5 --- lib/src/variant_utils.rs | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/lib/src/variant_utils.rs b/lib/src/variant_utils.rs index 7c3da647f..3ca2ff4f1 100644 --- a/lib/src/variant_utils.rs +++ b/lib/src/variant_utils.rs @@ -3,9 +3,9 @@ //! avoiding another crate for this. In the future, some of these //! may migrate into gtk-rs. -use std::mem::size_of; - use glib::translate::*; +use glib::ToVariant; +use std::mem::size_of; /// Create a new GVariant from data. pub fn variant_new_from_bytes(ty: &str, bytes: glib::Bytes, trusted: bool) -> glib::Variant { @@ -82,6 +82,21 @@ pub fn new_variant_a_ayay>(items: &[(T, T)]) -> glib::Variant { } } +/// Create a new GVariant of type `as`. +pub fn new_variant_as(items: &[&str]) -> glib::Variant { + unsafe { + let ty = glib::VariantTy::new("as").unwrap(); + let builder = glib_sys::g_variant_builder_new(ty.as_ptr() as *const _); + for &k in items { + let k = k.to_variant(); + glib_sys::g_variant_builder_add_value(builder, k.to_glib_none().0); + } + let v = glib_sys::g_variant_builder_end(builder); + glib_sys::g_variant_ref_sink(v); + from_glib_full(v) + } +} + /// Extension trait for `glib::VariantDict`. pub trait VariantDictExt { /// Find (and duplicate) a string-valued key in this dictionary. @@ -124,4 +139,19 @@ mod tests { d.insert("foo", &"bar"); assert_eq!(d.lookup_str("foo"), Some("bar".to_string())); } + + #[test] + fn test_variant_as() { + let _ = new_variant_as(&[]); + let v = new_variant_as(&["foo", "bar"]); + assert_eq!( + variant_get_child_value(&v, 0).unwrap().get_str().unwrap(), + "foo" + ); + assert_eq!( + variant_get_child_value(&v, 1).unwrap().get_str().unwrap(), + "bar" + ); + assert!(variant_get_child_value(&v, 2).is_none()); + } } From 93a7a80e20a19cc30bbc4870f42f39bf08b9232d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 14 Jul 2021 09:09:30 -0400 Subject: [PATCH 061/775] container/import: Only close skopeo fifo once the process has exited We're hitting this race condition when teaching rpm-ostree to consume container images. The current skopeo code creates two async tasks, one to read from the FIFO (pipe), and one to wait on the child process. Currently when we find the layer tarball we're expecting, we close the FIFO. This causes any further messages from skopeo to fail with `EPIPE` which it's not expecting, which will then cause skopeo to exit with a failure. Rework the logic here so that we hold open the FIFO until the process has exited. The "tar parsing" import code takes ownership of the stream, but then passes it back to the caller. --- lib/src/container/import.rs | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 75b67512c..01e05e72b 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -91,24 +91,37 @@ async fn fetch_manifest(imgref: &ImageReference) -> Result<(oci::Manifest, Strin Ok((serde_json::from_slice(&raw_manifest)?, digest)) } -/// Read the contents of the first .tar we find +/// Read the contents of the first .tar we find. +/// The first return value is an `AsyncRead` of that tar file. +/// The second return value is a background worker task that will +/// return back to the caller the provided input stream (converted +/// to a synchronous reader). This ensures the caller can take +/// care of closing the input stream. pub async fn find_layer_tar( src: impl AsyncRead + Send + Unpin + 'static, blobid: &str, -) -> Result<(impl AsyncRead, impl Future>)> { +) -> Result<( + impl AsyncRead, + impl Future>, +)> { + // Convert the async input stream to synchronous, becuase we currently use the + // sync tar crate. let pipein = crate::async_util::async_read_to_sync(src); + // An internal channel of Bytes let (tx_buf, rx_buf) = tokio::sync::mpsc::channel(2); let blob_symlink_target = format!("../{}.tar", blobid); let import = tokio::task::spawn_blocking(move || { find_layer_tar_sync(pipein, blob_symlink_target, tx_buf) }) .map_err(anyhow::Error::msg); + // Bridge the channel to an AsyncRead let stream = tokio_stream::wrappers::ReceiverStream::new(rx_buf); let reader = tokio_util::io::StreamReader::new(stream); + // This async task owns the internal worker thread, which also owns the provided + // input stream which we return to the caller. let worker = async move { - let import = import.await?; - let _: () = import.context("Import worker")?; - Ok::<_, anyhow::Error>(()) + let src_as_sync = import.await?.context("Import worker")?; + Ok::<_, anyhow::Error>(src_as_sync) }; Ok((reader, worker)) } @@ -120,7 +133,7 @@ fn find_layer_tar_sync( pipein: impl Read + Send + Unpin, blob_symlink_target: String, tx_buf: tokio::sync::mpsc::Sender>, -) -> Result<()> { +) -> Result { let mut archive = tar::Archive::new(pipein); let mut buf = vec![0u8; 8192]; let mut found = false; @@ -179,7 +192,7 @@ fn find_layer_tar_sync( } } if found { - Ok(()) + Ok(archive.into_inner()) } else { Err(anyhow!("Failed to find layer {}", blob_symlink_target)) } @@ -229,8 +242,8 @@ async fn fetch_layer<'s>( let (contents, worker) = find_layer_tar(fifo_reader, blobid).await?; let worker = async move { let (worker, waiter) = tokio::join!(worker, waiter); - let _: () = worker.context("Layer worker failed")?; let _: () = waiter?; + let _pipein = worker.context("Layer worker failed")?; Ok::<_, anyhow::Error>(()) }; Ok((contents, worker)) From 80b001db42e14bdd82392dffdf1b133284670137 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 14 Jul 2021 10:21:43 -0400 Subject: [PATCH 062/775] Release 0.1.4 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index adb8f04eb..57c8506ff 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-ext" -version = "0.1.3" +version = "0.1.4" [dependencies] anyhow = "1.0" From da3656327df00a50641cb9104676a500bab6dd68 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 25 Jul 2021 16:10:33 -0400 Subject: [PATCH 063/775] Port to glib 0.14 This depends on the WIP port for ostree-rs. --- cli/Cargo.toml | 6 ++--- lib/Cargo.toml | 9 ++++---- lib/src/container/export.rs | 2 +- lib/src/diff.rs | 23 +++++++++---------- lib/src/ima.rs | 8 +++---- lib/src/lib.rs | 1 - lib/src/ostree_ext.rs | 45 ------------------------------------- lib/src/tar/export.rs | 21 +++++++++-------- lib/src/tar/import.rs | 1 - lib/src/variant_utils.rs | 8 +++---- 10 files changed, 36 insertions(+), 88 deletions(-) delete mode 100644 lib/src/ostree_ext.rs diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 484a20c89..5c346480c 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -12,12 +12,10 @@ anyhow = "1.0" ostree-ext = { path = "../lib" } clap = "2.33.3" structopt = "0.3.21" -ostree = { version = "0.11.0", features = ["v2021_2"] } +ostree = { version = "0.12.0", features = ["v2021_2"] } libc = "0.2.92" tokio = { version = "1", features = ["full"] } -gio = "0.9.1" +gio = "0.14" log = "0.4.0" tracing = "0.1" tracing-subscriber = "0.2.17" - - diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 57c8506ff..611e5d76b 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -17,9 +17,9 @@ flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.1.1" futures = "0.3.13" indicatif = "0.15.0" -gio = "0.9.1" -glib = "0.10.3" -glib-sys = "0.10.1" +gio = "0.14" +glib = "0.14" +glib-sys = "0.14" gvariant = "0.4.0" hex = "0.4.3" libc = "0.2.92" @@ -28,8 +28,7 @@ phf = { features = ["macros"], version = "0.8.0" } openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" -ostree = { features = ["v2021_2"], version = "0.11.0" } -ostree-sys = "0.7.2" +ostree = { features = ["v2021_2"], version = "0.12.0" } serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" structopt = "0.3.21" diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index 2295c8485..ad52c3cfd 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -54,7 +54,7 @@ fn build_oci( if let Some(version) = commit_meta.lookup_value("version", Some(glib::VariantTy::new("s").unwrap())) { - let version = version.get_str().unwrap(); + let version = version.str().unwrap(); writer.add_config_annotation("version", version); writer.add_manifest_annotation("ostree.version", version); } diff --git a/lib/src/diff.rs b/lib/src/diff.rs index d5c3ac627..bd3b7fd5b 100644 --- a/lib/src/diff.rs +++ b/lib/src/diff.rs @@ -9,7 +9,6 @@ use anyhow::{Context, Result}; use fn_error_context::context; use gio::prelude::*; -use ostree::RepoFileExt; use std::collections::BTreeSet; use std::fmt; @@ -86,14 +85,14 @@ fn diff_recurse( // Iterate over the source (from) directory, and compare with the // target (to) directory. This generates removals and changes. while let Some(from_info) = from_iter.next_file(cancellable)? { - let from_child = from_iter.get_child(&from_info).expect("file"); - let name = from_info.get_name().expect("name"); + let from_child = from_iter.child(&from_info); + let name = from_info.name(); let name = name.to_str().expect("UTF-8 ostree name"); let path = format!("{}{}", prefix, name); - let to_child = to.get_child(&name).expect("child"); + let to_child = to.child(&name); let to_info = query_info_optional(&to_child, queryattrs, queryflags) .context("querying optional to")?; - let is_dir = matches!(from_info.get_file_type(), gio::FileType::Directory); + let is_dir = matches!(from_info.file_type(), gio::FileType::Directory); if to_info.is_some() { let to_child = to_child.downcast::().expect("downcast"); to_child.ensure_resolved()?; @@ -114,8 +113,8 @@ fn diff_recurse( diff.changed_dirs.insert(path); } } else { - let from_checksum = from_child.get_checksum().expect("checksum"); - let to_checksum = to_child.get_checksum().expect("checksum"); + let from_checksum = from_child.checksum().expect("checksum"); + let to_checksum = to_child.checksum().expect("checksum"); if from_checksum != to_checksum { diff.changed_files.insert(path); } @@ -130,16 +129,16 @@ fn diff_recurse( // files/directories which were not present in the source. let to_iter = to.enumerate_children(queryattrs, queryflags, cancellable)?; while let Some(to_info) = to_iter.next_file(cancellable)? { - let name = to_info.get_name().expect("name"); + let name = to_info.name(); let name = name.to_str().expect("UTF-8 ostree name"); let path = format!("{}{}", prefix, name); - let from_child = from.get_child(name).expect("child"); + let from_child = from.child(name); let from_info = query_info_optional(&from_child, queryattrs, queryflags) .context("querying optional from")?; if from_info.is_some() { continue; } - let is_dir = matches!(to_info.get_file_type(), gio::FileType::Directory); + let is_dir = matches!(to_info.file_type(), gio::FileType::Directory); if is_dir { diff.added_dirs.insert(path); } else { @@ -163,8 +162,8 @@ pub fn diff>( let (toroot, _) = repo.read_commit(to, gio::NONE_CANCELLABLE)?; let (fromroot, toroot) = if let Some(subdir) = subdir { ( - fromroot.resolve_relative_path(subdir).expect("path"), - toroot.resolve_relative_path(subdir).expect("path"), + fromroot.resolve_relative_path(subdir), + toroot.resolve_relative_path(subdir), ) } else { (fromroot, toroot) diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 6978f8b9d..af33af124 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -36,7 +36,7 @@ pub struct ImaOpts { /// Convert a GVariant of type `a(ayay)` to a mutable map fn xattrs_to_map(v: &glib::Variant) -> BTreeMap, Vec> { - let v = v.get_data_as_bytes(); + let v = v.data_as_bytes(); let v = v.try_as_aligned().unwrap(); let v = gv!("a(ayay)").cast(v); let mut map: BTreeMap, Vec> = BTreeMap::new(); @@ -90,7 +90,7 @@ impl<'a> CommitRewriter<'a> { Ok(Self { repo, ima, - tempdir: tempfile::tempdir_in(format!("/proc/self/fd/{}/tmp", repo.get_dfd()))?, + tempdir: tempfile::tempdir_in(format!("/proc/self/fd/{}/tmp", repo.dfd()))?, rewritten_files: Default::default(), }) } @@ -213,7 +213,7 @@ impl<'a> CommitRewriter<'a> { let src = &self .repo .load_variant(ostree::ObjectType::DirTree, checksum)?; - let src = src.get_data_as_bytes(); + let src = src.data_as_bytes(); let src = src.try_as_aligned()?; let src = gv!("(a(say)a(sayay))").cast(src); let (files, dirs) = src.to_tuple(); @@ -310,7 +310,7 @@ impl<'a> CommitRewriter<'a> { let (commit_v, _) = self.repo.load_commit(&checksum)?; let commit_v = &commit_v; - let commit_bytes = commit_v.get_data_as_bytes(); + let commit_bytes = commit_v.data_as_bytes(); let commit_bytes = commit_bytes.try_as_aligned()?; let commit = gv!("(a{sv}aya(say)sstayay)").cast(commit_bytes); let commit = commit.to_tuple(); diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 2c097db17..a778090e8 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -18,7 +18,6 @@ pub mod cli; pub mod container; pub mod diff; pub mod ima; -pub mod ostree_ext; pub mod tar; #[allow(unsafe_code)] pub mod variant_utils; diff --git a/lib/src/ostree_ext.rs b/lib/src/ostree_ext.rs deleted file mode 100644 index 3a12c3025..000000000 --- a/lib/src/ostree_ext.rs +++ /dev/null @@ -1,45 +0,0 @@ -//! Extension traits fixing incorrectly bound things in ostree-rs -//! by defining a new function with an `x_` prefix. - -// SPDX-License-Identifier: Apache-2.0 OR MIT - -use glib::translate::*; -use std::ptr; - -/// Extension functions which fix incorrectly bound APIs. -pub trait RepoExt { - /// Version of [`ostree::Repo::load_variant_if_exists`] that correctly - /// returns an [`Option`]. - fn x_load_variant_if_exists( - &self, - objtype: ostree::ObjectType, - checksum: &str, - ) -> Result, glib::Error>; -} - -impl RepoExt for ostree::Repo { - #[allow(unsafe_code)] - fn x_load_variant_if_exists( - &self, - objtype: ostree::ObjectType, - checksum: &str, - ) -> Result, glib::Error> { - unsafe { - let mut out_v = ptr::null_mut(); - let mut error = ptr::null_mut(); - let checksum = checksum.to_glib_none(); - let _ = ostree_sys::ostree_repo_load_variant_if_exists( - self.to_glib_none().0, - objtype.to_glib(), - checksum.0, - &mut out_v, - &mut error, - ); - if error.is_null() { - Ok(from_glib_full(out_v)) - } else { - Err(from_glib_full(error)) - } - } - } -} diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 9713fcc3b..0999e294c 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -2,7 +2,6 @@ use crate::Result; -use crate::ostree_ext::RepoExt; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use gio::prelude::*; @@ -78,7 +77,7 @@ impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { h.set_uid(0); h.set_gid(0); h.set_mode(0o644); - let data = v.get_data_as_bytes(); + let data = v.data_as_bytes(); let data = data.as_ref(); h.set_size(data.len() as u64); self.out @@ -90,7 +89,7 @@ impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { &mut self, xattrs: &glib::Variant, ) -> Result> { - let xattrs_data = xattrs.get_data_as_bytes(); + let xattrs_data = xattrs.data_as_bytes(); let xattrs_data = xattrs_data.as_ref(); if xattrs_data.is_empty() { return Ok(None); @@ -126,9 +125,9 @@ impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { let xattrs = xattrs.unwrap(); let mut h = tar::Header::new_gnu(); - h.set_uid(meta.get_attribute_uint32("unix::uid") as u64); - h.set_gid(meta.get_attribute_uint32("unix::gid") as u64); - let mode = meta.get_attribute_uint32("unix::mode"); + h.set_uid(meta.attribute_uint32("unix::uid") as u64); + h.set_gid(meta.attribute_uint32("unix::gid") as u64); + let mode = meta.attribute_uint32("unix::mode"); h.set_mode(mode); let mut target_header = h.clone(); target_header.set_size(0); @@ -147,13 +146,13 @@ impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { if let Some(instream) = instream { h.set_entry_type(tar::EntryType::Regular); - h.set_size(meta.get_size() as u64); + h.set_size(meta.size() as u64); let mut instream = BufReader::with_capacity(BUF_CAPACITY, instream.into_read()); self.out.append_data(&mut h, &path, &mut instream)?; } else { h.set_size(0); h.set_entry_type(tar::EntryType::Symlink); - h.set_link_name(meta.get_symlink_target().unwrap().as_str())?; + h.set_link_name(meta.symlink_target().unwrap().as_str())?; self.out.append_data(&mut h, &path, &mut std::io::empty())?; } } @@ -172,7 +171,7 @@ impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { .repo .load_variant(ostree::ObjectType::DirTree, checksum)?; self.append(ostree::ObjectType::DirTree, checksum, v)?; - let v = v.get_data_as_bytes(); + let v = v.data_as_bytes(); let v = v.try_as_aligned()?; let v = gv!("(a(say)a(sayay))").cast(v); let (files, dirs) = v.to_tuple(); @@ -265,12 +264,12 @@ fn impl_export( writer.append(ostree::ObjectType::Commit, commit_checksum, commit_v)?; if let Some(commitmeta) = - repo.x_load_variant_if_exists(ostree::ObjectType::CommitMeta, commit_checksum)? + repo.load_variant_if_exists(ostree::ObjectType::CommitMeta, commit_checksum)? { writer.append(ostree::ObjectType::CommitMeta, commit_checksum, &commitmeta)?; } - let commit_v = commit_v.get_data_as_bytes(); + let commit_v = commit_v.data_as_bytes(); let commit_v = commit_v.try_as_aligned()?; let commit = gv!("(a{sv}aya(say)sstayay)").cast(commit_v); let commit = commit.to_tuple(); diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index bf27d1344..336ac4158 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -8,7 +8,6 @@ use fn_error_context::context; use futures::prelude::*; use gio::prelude::*; use glib::Cast; -use ostree::ContentWriterExt; use std::collections::HashMap; use std::convert::TryInto; use std::io::prelude::*; diff --git a/lib/src/variant_utils.rs b/lib/src/variant_utils.rs index 3ca2ff4f1..7ad529e89 100644 --- a/lib/src/variant_utils.rs +++ b/lib/src/variant_utils.rs @@ -14,7 +14,7 @@ pub fn variant_new_from_bytes(ty: &str, bytes: glib::Bytes, trusted: bool) -> gl let ty: *const libc::c_char = ty.0; let ty = ty as *const glib_sys::GVariantType; let bytes = bytes.to_glib_full(); - let v = glib_sys::g_variant_new_from_bytes(ty, bytes, trusted.to_glib()); + let v = glib_sys::g_variant_new_from_bytes(ty, bytes, trusted.into_glib()); glib_sys::g_variant_ref_sink(v); from_glib_full(v) } @@ -109,7 +109,7 @@ impl VariantDictExt for glib::VariantDict { fn lookup_str(&self, k: &str) -> Option { // Unwrap safety: Passing the GVariant type string gives us the right value type self.lookup_value(k, Some(glib::VariantTy::new("s").unwrap())) - .map(|v| v.get_str().unwrap().to_string()) + .map(|v| v.str().unwrap().to_string()) } fn lookup_bool(&self, k: &str) -> Option { @@ -145,11 +145,11 @@ mod tests { let _ = new_variant_as(&[]); let v = new_variant_as(&["foo", "bar"]); assert_eq!( - variant_get_child_value(&v, 0).unwrap().get_str().unwrap(), + variant_get_child_value(&v, 0).unwrap().str().unwrap(), "foo" ); assert_eq!( - variant_get_child_value(&v, 1).unwrap().get_str().unwrap(), + variant_get_child_value(&v, 1).unwrap().str().unwrap(), "bar" ); assert!(variant_get_child_value(&v, 2).is_none()); From f921ab2d4a659d269c8f51960d574d0b52064f1b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 25 Jul 2021 17:45:13 -0400 Subject: [PATCH 064/775] Update to use new GLib variant bindings This drops some unsafe code and increases ergonomics. I'd like to kill our `variant_utils` entirely but that will still need more work. --- lib/src/container/export.rs | 4 +- lib/src/ima.rs | 34 ++++++++----- lib/src/tar/import.rs | 7 ++- lib/src/variant_utils.rs | 98 +++---------------------------------- 4 files changed, 34 insertions(+), 109 deletions(-) diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index ad52c3cfd..90c7fd222 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -1,7 +1,7 @@ //! APIs for creating container images from OSTree commits use super::*; -use crate::{tar as ostree_tar, variant_utils}; +use crate::tar as ostree_tar; use anyhow::Context; use fn_error_context::context; use std::collections::BTreeMap; @@ -48,7 +48,7 @@ fn build_oci( let commit = repo.resolve_rev(rev, false)?.unwrap(); let commit = commit.as_str(); let (commit_v, _) = repo.load_commit(commit)?; - let commit_meta = &variant_utils::variant_get_child_value(&commit_v, 0).unwrap(); + let commit_meta = &commit_v.child_value(0); let commit_meta = glib::VariantDict::new(Some(commit_meta)); if let Some(version) = diff --git a/lib/src/ima.rs b/lib/src/ima.rs index af33af124..5b2eec1f9 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -2,12 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT -use crate::variant_utils; use anyhow::{Context, Result}; use fn_error_context::context; use gio::prelude::InputStreamExtManual; +use glib::prelude::*; use glib::translate::*; use glib::Cast; +use glib::Variant; use gvariant::aligned_bytes::TryAsAligned; use gvariant::{gv, Marker, Structure}; use openat_ext::FileExt; @@ -47,10 +48,19 @@ fn xattrs_to_map(v: &glib::Variant) -> BTreeMap, Vec> { map } -/// Reserialize a map to GVariant of type `a(ayay)` -fn xattrmap_serialize(map: &BTreeMap, Vec>) -> glib::Variant { - let map: Vec<_> = map.iter().collect(); - variant_utils::new_variant_a_ayay(&map) +/// Create a new GVariant of type a(ayay). This is used by OSTree's extended attributes. +fn new_variant_a_ayay<'a, T: 'a + AsRef<[u8]>>( + items: impl IntoIterator, +) -> glib::Variant { + let children: Vec<_> = items + .into_iter() + .map(|(a, b)| { + let a = a.as_ref(); + let b = b.as_ref(); + Variant::from_tuple(&[a.to_variant(), b.to_variant()]) + }) + .collect(); + Variant::from_array::<(&[u8], &[u8])>(&children) } struct CommitRewriter<'a> { @@ -189,7 +199,7 @@ impl<'a> CommitRewriter<'a> { let xattrs = { let signed = self.ima_sign(&instream, selinux)?; xattrs.extend(signed); - xattrmap_serialize(&xattrs) + new_variant_a_ayay(&xattrs) }; // Now reload the input stream let (instream, _, _) = self.repo.load_file(checksum, cancellable)?; @@ -233,7 +243,7 @@ impl<'a> CommitRewriter<'a> { unsafe { // Unwrap safety: The name won't have NULs let name = CString::new(name).unwrap(); - let mapped_checksum_v = variant_utils::new_variant_bytearray(&mapped); + let mapped_checksum_v = mapped.to_variant(); let name_p = name.as_ptr(); glib_sys::g_variant_builder_add( new_files_builder, @@ -261,8 +271,8 @@ impl<'a> CommitRewriter<'a> { unsafe { // Unwrap safety: The name won't have NULs let name = CString::new(name).unwrap(); - let mapped_checksum_v = variant_utils::new_variant_bytearray(&mapped); - let meta_checksum_v = variant_utils::new_variant_bytearray(meta_csum_bytes); + let mapped_checksum_v = mapped.to_variant(); + let meta_checksum_v = meta_csum_bytes.to_variant(); glib_sys::g_variant_builder_add( new_dirs_builder, b"(s@ay@ay)\0".as_ptr() as *const _, @@ -321,11 +331,11 @@ impl<'a> CommitRewriter<'a> { let n_parts = 8; let mut parts = Vec::with_capacity(n_parts); for i in 0..n_parts { - parts.push(variant_utils::variant_get_child_value(&commit_v, i).unwrap()); + parts.push(commit_v.child_value(i)); } let new_dt = hex::decode(new_dt)?; - parts[6] = variant_utils::new_variant_bytearray(&new_dt); - let new_commit = variant_utils::new_variant_tuple(&parts); + parts[6] = new_dt.to_variant(); + let new_commit = Variant::from_tuple(&parts); let new_commit_checksum = self .repo diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 336ac4158..f27adb560 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -1,13 +1,12 @@ //! APIs for extracting OSTree commits from container images -use crate::variant_utils::variant_new_from_bytes; use crate::Result; use anyhow::{anyhow, Context}; use camino::Utf8Path; use fn_error_context::context; use futures::prelude::*; use gio::prelude::*; -use glib::Cast; +use glib::{Cast, Variant}; use std::collections::HashMap; use std::convert::TryInto; use std::io::prelude::*; @@ -29,7 +28,7 @@ const SMALL_REGFILE_SIZE: usize = 127 * 1024; const OSTREE_COMMIT_FORMAT: &str = "(a{sv}aya(say)sstayay)"; const OSTREE_DIRTREE_FORMAT: &str = "(a(say)a(sayay))"; const OSTREE_DIRMETA_FORMAT: &str = "(uuua(ayay))"; -const OSTREE_XATTRS_FORMAT: &str = "a(ayay)"; +// const OSTREE_XATTRS_FORMAT: &str = "a(ayay)"; /// State tracker for the importer. The main goal is to reject multiple /// commit objects, as well as finding metadata/content before the commit. @@ -434,7 +433,7 @@ impl<'a> Importer<'a> { let mut contents = vec![0u8; n as usize]; entry.read_exact(contents.as_mut_slice())?; let contents: glib::Bytes = contents.as_slice().into(); - let contents = variant_new_from_bytes(OSTREE_XATTRS_FORMAT, contents, false); + let contents = Variant::from_bytes::<&[(&[u8], &[u8])]>(&contents); self.xattrs.insert(checksum, contents); Ok(()) diff --git a/lib/src/variant_utils.rs b/lib/src/variant_utils.rs index 7ad529e89..5ef994293 100644 --- a/lib/src/variant_utils.rs +++ b/lib/src/variant_utils.rs @@ -4,11 +4,14 @@ //! may migrate into gtk-rs. use glib::translate::*; -use glib::ToVariant; -use std::mem::size_of; + +/// Get the normal form of a GVariant. +pub fn variant_get_normal_form(v: &glib::Variant) -> glib::Variant { + unsafe { from_glib_full(glib_sys::g_variant_get_normal_form(v.to_glib_none().0)) } +} /// Create a new GVariant from data. -pub fn variant_new_from_bytes(ty: &str, bytes: glib::Bytes, trusted: bool) -> glib::Variant { +fn variant_new_from_bytes(ty: &str, bytes: glib::Bytes, trusted: bool) -> glib::Variant { unsafe { let ty = ty.to_glib_none(); let ty: *const libc::c_char = ty.0; @@ -20,83 +23,11 @@ pub fn variant_new_from_bytes(ty: &str, bytes: glib::Bytes, trusted: bool) -> gl } } -/// Get the normal form of a GVariant. -pub fn variant_get_normal_form(v: &glib::Variant) -> glib::Variant { - unsafe { from_glib_full(glib_sys::g_variant_get_normal_form(v.to_glib_none().0)) } -} - /// Create a normal-form GVariant from raw bytes. -pub fn variant_normal_from_bytes(ty: &str, bytes: glib::Bytes) -> glib::Variant { +pub(crate) fn variant_normal_from_bytes(ty: &str, bytes: glib::Bytes) -> glib::Variant { variant_get_normal_form(&variant_new_from_bytes(ty, bytes, false)) } -/// Create a new `ay` GVariant. -pub fn new_variant_bytearray(buf: &[u8]) -> glib::Variant { - unsafe { - let r = glib_sys::g_variant_new_fixed_array( - b"y\0".as_ptr() as *const _, - buf.as_ptr() as *const _, - buf.len(), - size_of::(), - ); - glib_sys::g_variant_ref_sink(r); - from_glib_full(r) - } -} - -/// Create a new GVariant tuple from the provided variants. -pub fn new_variant_tuple<'a>(items: impl IntoIterator) -> glib::Variant { - let v: Vec<_> = items.into_iter().map(|v| v.to_glib_none().0).collect(); - unsafe { - let r = glib_sys::g_variant_new_tuple(v.as_ptr(), v.len()); - glib_sys::g_variant_ref_sink(r); - from_glib_full(r) - } -} - -/// Extract a child from a variant. -pub fn variant_get_child_value(v: &glib::Variant, n: usize) -> Option { - let v = v.to_glib_none(); - let l = unsafe { glib_sys::g_variant_n_children(v.0) }; - if n >= l { - None - } else { - unsafe { from_glib_full(glib_sys::g_variant_get_child_value(v.0, n)) } - } -} - -/// Create a new GVariant of type a(ayay). This is used by OSTree's extended attributes. -pub fn new_variant_a_ayay>(items: &[(T, T)]) -> glib::Variant { - unsafe { - let ty = glib::VariantTy::new("a(ayay)").unwrap(); - let builder = glib_sys::g_variant_builder_new(ty.as_ptr() as *const _); - for (k, v) in items { - let k = new_variant_bytearray(k.as_ref()); - let v = new_variant_bytearray(v.as_ref()); - let val = new_variant_tuple(&[k, v]); - glib_sys::g_variant_builder_add_value(builder, val.to_glib_none().0); - } - let v = glib_sys::g_variant_builder_end(builder); - glib_sys::g_variant_ref_sink(v); - from_glib_full(v) - } -} - -/// Create a new GVariant of type `as`. -pub fn new_variant_as(items: &[&str]) -> glib::Variant { - unsafe { - let ty = glib::VariantTy::new("as").unwrap(); - let builder = glib_sys::g_variant_builder_new(ty.as_ptr() as *const _); - for &k in items { - let k = k.to_variant(); - glib_sys::g_variant_builder_add_value(builder, k.to_glib_none().0); - } - let v = glib_sys::g_variant_builder_end(builder); - glib_sys::g_variant_ref_sink(v); - from_glib_full(v) - } -} - /// Extension trait for `glib::VariantDict`. pub trait VariantDictExt { /// Find (and duplicate) a string-valued key in this dictionary. @@ -139,19 +70,4 @@ mod tests { d.insert("foo", &"bar"); assert_eq!(d.lookup_str("foo"), Some("bar".to_string())); } - - #[test] - fn test_variant_as() { - let _ = new_variant_as(&[]); - let v = new_variant_as(&["foo", "bar"]); - assert_eq!( - variant_get_child_value(&v, 0).unwrap().str().unwrap(), - "foo" - ); - assert_eq!( - variant_get_child_value(&v, 1).unwrap().str().unwrap(), - "bar" - ); - assert!(variant_get_child_value(&v, 2).is_none()); - } } From 7704d55eb6048b9f6f18d0366dc44f796db73dc6 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 27 Jul 2021 12:13:29 -0400 Subject: [PATCH 065/775] Release 0.2.0 Breaking changes: - glib 0.14 - Removed most now unnecessary bits from `variant_utils` --- cli/Cargo.toml | 2 +- lib/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 5c346480c..363d39601 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ostree-ext-cli" -version = "0.1.3" +version = "0.1.4" authors = ["Colin Walters "] edition = "2018" license = "MIT OR Apache-2.0" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 611e5d76b..66e5d5285 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-ext" -version = "0.1.4" +version = "0.2.0" [dependencies] anyhow = "1.0" From 76e8b14395648173501a66d7dcd0baf417bc62d3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 27 Jul 2021 13:05:51 -0400 Subject: [PATCH 066/775] README.md: Note why ostree tar is lossy Came up on IRC. --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c5fee6d64..812a2231d 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,9 @@ Extension APIs for [ostree](https://github.com/ostreedev/ostree/) that are writt ## module "tar": tar export/import -ostree's support for exporting to a tarball is lossy by default. This adds a new export -format that is effectively a new custom repository mode combined with a hardlinked checkout. +ostree's support for exporting to a tarball is lossy because it doesn't have e.g. commit +metadata. This adds a new export format that is effectively a new custom repository mode +combined with a hardlinked checkout. This new export stream can be losslessly imported back into a different repository. From d813a2a14112866997de24379d40b9a2e0a2994a Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Wed, 28 Jul 2021 10:32:20 +0000 Subject: [PATCH 067/775] cargo/lib: update all dependencies This sorts the dependencies in 'lib' manifest, updates them all to the latest versions, and massages code for all changed APIs. --- lib/Cargo.toml | 10 +++++----- lib/src/cli.rs | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 66e5d5285..b4eb5b746 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -14,30 +14,30 @@ bytes = "1.0.1" camino = "1.0.4" cjson = "0.1.1" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } -fn-error-context = "0.1.1" +fn-error-context = "0.2.0" futures = "0.3.13" -indicatif = "0.15.0" gio = "0.14" glib = "0.14" glib-sys = "0.14" gvariant = "0.4.0" hex = "0.4.3" +indicatif = "0.16.0" libc = "0.2.92" -nix = "0.20.0" -phf = { features = ["macros"], version = "0.8.0" } +nix = "0.22.0" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" ostree = { features = ["v2021_2"], version = "0.12.0" } +phf = { features = ["macros"], version = "0.9.0" } serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" structopt = "0.3.21" tar = "0.4.33" tempfile = "3.2.0" tokio = { features = ["full"], version = "1" } +tokio-stream = "0.1.5" tokio-util = { features = ["io"], version = "0.6" } tracing = "0.1" -tokio-stream = "0.1.5" [dev-dependencies] clap = "2.33.3" diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 19ad390a8..62d6ca912 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -172,7 +172,7 @@ async fn container_import(repo: &str, imgref: &str, write_ref: Option<&str>) -> tokio::select! { _ = rx_progress.changed() => { let n = rx_progress.borrow().processed_bytes; - pb.set_message(&format!("Processed: {}", indicatif::HumanBytes(n))); + pb.set_message(format!("Processed: {}", indicatif::HumanBytes(n))); } import = &mut import => { pb.finish(); From 5e3bc26eed0ac61c9906359893477015ac7ae5ad Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 28 Jul 2021 13:12:27 -0400 Subject: [PATCH 068/775] Release 0.2.1 To pull in the updated deps, to avoid having lots more to vendor for rpm-ostree. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index b4eb5b746..d6b202e97 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-ext" -version = "0.2.0" +version = "0.2.1" [dependencies] anyhow = "1.0" From 3744a9dc66575695d7f66dded928ee317df2ed74 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 30 Jul 2021 10:22:33 -0400 Subject: [PATCH 069/775] oci: Pass through other architectures I think the OCI arch matches the kernel architecture on other platforms. --- lib/src/container/oci.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index a9a09717b..9ffc52381 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -173,7 +173,8 @@ impl<'a> OciWriter<'a> { #[context("Writing OCI")] pub(crate) fn complete(&mut self) -> Result<()> { let utsname = nix::sys::utsname::uname(); - let arch = MACHINE_TO_OCI[utsname.machine()]; + let machine = utsname.machine(); + let arch = MACHINE_TO_OCI.get(machine).unwrap_or(&machine); let rootfs_blob = self.root_layer.as_ref().unwrap(); let root_layer_id = format!("sha256:{}", rootfs_blob.uncompressed_sha256); From 9e4a25f34d1c8009bc05f7c020542359107c706f Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Mon, 2 Aug 2021 09:03:59 +0000 Subject: [PATCH 070/775] cargo: fix repository location This updates the repository URLs to the new location. --- cli/Cargo.toml | 2 +- lib/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 363d39601..309bedc13 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.4" authors = ["Colin Walters "] edition = "2018" license = "MIT OR Apache-2.0" -repository = "https://github.com/cgwalters/ostree-container" +repository = "https://github.com/ostreedev/ostree-rs-ext" readme = "README.md" [dependencies] diff --git a/lib/Cargo.toml b/lib/Cargo.toml index d6b202e97..da2b25140 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -5,7 +5,7 @@ edition = "2018" license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" -repository = "https://github.com/ostreedev/ostree-ext" +repository = "https://github.com/ostreedev/ostree-rs-ext" version = "0.2.1" [dependencies] From ea7baf23e650c607af2269d126d99c5f32f695ec Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 4 Aug 2021 11:51:48 -0400 Subject: [PATCH 071/775] Use glib-sys via re-exported glib::ffi In general only -sys crates should depend on other -sys crates. By using the re-export, we avoid needing to keep a version lock between glib and glib-sys in our main crate. --- lib/Cargo.toml | 1 - lib/src/ima.rs | 20 ++++++++++---------- lib/src/variant_utils.rs | 8 ++++---- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index da2b25140..fac6a61a6 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -18,7 +18,6 @@ fn-error-context = "0.2.0" futures = "0.3.13" gio = "0.14" glib = "0.14" -glib-sys = "0.14" gvariant = "0.4.0" hex = "0.4.3" indicatif = "0.16.0" diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 5b2eec1f9..2758c61ee 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -232,7 +232,7 @@ impl<'a> CommitRewriter<'a> { let mut hexbuf = [0u8; 64]; let new_files_builder = - unsafe { glib_sys::g_variant_builder_new(b"a(say)\0".as_ptr() as *const _) }; + unsafe { glib::ffi::g_variant_builder_new(b"a(say)\0".as_ptr() as *const _) }; for file in files { let (name, csum) = file.to_tuple(); let name = name.to_str(); @@ -245,7 +245,7 @@ impl<'a> CommitRewriter<'a> { let name = CString::new(name).unwrap(); let mapped_checksum_v = mapped.to_variant(); let name_p = name.as_ptr(); - glib_sys::g_variant_builder_add( + glib::ffi::g_variant_builder_add( new_files_builder, b"(s@ay)\0".as_ptr() as *const _, name_p, @@ -254,13 +254,13 @@ impl<'a> CommitRewriter<'a> { } } let new_files: glib::Variant = unsafe { - let v = glib_sys::g_variant_builder_end(new_files_builder); - glib_sys::g_variant_ref_sink(v); + let v = glib::ffi::g_variant_builder_end(new_files_builder); + glib::ffi::g_variant_ref_sink(v); from_glib_full(v) }; let new_dirs_builder = - unsafe { glib_sys::g_variant_builder_new(b"a(sayay)\0".as_ptr() as *const _) }; + unsafe { glib::ffi::g_variant_builder_new(b"a(sayay)\0".as_ptr() as *const _) }; for item in dirs { let (name, contents_csum, meta_csum_bytes) = item.to_tuple(); let name = name.to_str(); @@ -273,7 +273,7 @@ impl<'a> CommitRewriter<'a> { let name = CString::new(name).unwrap(); let mapped_checksum_v = mapped.to_variant(); let meta_checksum_v = meta_csum_bytes.to_variant(); - glib_sys::g_variant_builder_add( + glib::ffi::g_variant_builder_add( new_dirs_builder, b"(s@ay@ay)\0".as_ptr() as *const _, name.as_ptr(), @@ -283,19 +283,19 @@ impl<'a> CommitRewriter<'a> { } } let new_dirs: glib::Variant = unsafe { - let v = glib_sys::g_variant_builder_end(new_dirs_builder); - glib_sys::g_variant_ref_sink(v); + let v = glib::ffi::g_variant_builder_end(new_dirs_builder); + glib::ffi::g_variant_ref_sink(v); from_glib_full(v) }; let new_dirtree: glib::Variant = unsafe { - let v = glib_sys::g_variant_new( + let v = glib::ffi::g_variant_new( b"(@a(say)@a(sayay))\0".as_ptr() as *const _, new_files.to_glib_none().0, new_dirs.to_glib_none().0, std::ptr::null_mut::(), ); - glib_sys::g_variant_ref_sink(v); + glib::ffi::g_variant_ref_sink(v); from_glib_full(v) }; diff --git a/lib/src/variant_utils.rs b/lib/src/variant_utils.rs index 5ef994293..aa703bb7a 100644 --- a/lib/src/variant_utils.rs +++ b/lib/src/variant_utils.rs @@ -7,7 +7,7 @@ use glib::translate::*; /// Get the normal form of a GVariant. pub fn variant_get_normal_form(v: &glib::Variant) -> glib::Variant { - unsafe { from_glib_full(glib_sys::g_variant_get_normal_form(v.to_glib_none().0)) } + unsafe { from_glib_full(glib::ffi::g_variant_get_normal_form(v.to_glib_none().0)) } } /// Create a new GVariant from data. @@ -15,10 +15,10 @@ fn variant_new_from_bytes(ty: &str, bytes: glib::Bytes, trusted: bool) -> glib:: unsafe { let ty = ty.to_glib_none(); let ty: *const libc::c_char = ty.0; - let ty = ty as *const glib_sys::GVariantType; + let ty = ty as *const glib::ffi::GVariantType; let bytes = bytes.to_glib_full(); - let v = glib_sys::g_variant_new_from_bytes(ty, bytes, trusted.into_glib()); - glib_sys::g_variant_ref_sink(v); + let v = glib::ffi::g_variant_new_from_bytes(ty, bytes, trusted.into_glib()); + glib::ffi::g_variant_ref_sink(v); from_glib_full(v) } } From 6c26930509edb9fd79c924c3db8cac3e468ceb1a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 5 Aug 2021 13:16:11 -0400 Subject: [PATCH 072/775] Use `glib` via re-export from `gio` This is part of thinning out our dependency chain; things like dependabot don't understand the fixed relationship between `glib` and `gio`. --- lib/Cargo.toml | 1 - lib/src/container/export.rs | 1 + lib/src/ima.rs | 4 ++-- lib/src/tar/export.rs | 1 + lib/src/tar/import.rs | 3 ++- lib/src/variant_utils.rs | 1 + 6 files changed, 7 insertions(+), 4 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index fac6a61a6..3a5474ee1 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -17,7 +17,6 @@ flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" futures = "0.3.13" gio = "0.14" -glib = "0.14" gvariant = "0.4.0" hex = "0.4.3" indicatif = "0.16.0" diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index 90c7fd222..9e37756f0 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -4,6 +4,7 @@ use super::*; use crate::tar as ostree_tar; use anyhow::Context; use fn_error_context::context; +use gio::glib; use std::collections::BTreeMap; use std::path::Path; use tracing::{instrument, Level}; diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 2758c61ee..aba8af046 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -4,8 +4,8 @@ use anyhow::{Context, Result}; use fn_error_context::context; -use gio::prelude::InputStreamExtManual; -use glib::prelude::*; +use gio::glib; +use gio::prelude::*; use glib::translate::*; use glib::Cast; use glib::Variant; diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 0999e294c..2b2670b85 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -4,6 +4,7 @@ use crate::Result; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; +use gio::glib; use gio::prelude::*; use gvariant::aligned_bytes::TryAsAligned; use gvariant::{gv, Marker, Structure}; diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index f27adb560..2961770fc 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -5,8 +5,9 @@ use anyhow::{anyhow, Context}; use camino::Utf8Path; use fn_error_context::context; use futures::prelude::*; +use gio::glib; use gio::prelude::*; -use glib::{Cast, Variant}; +use glib::Variant; use std::collections::HashMap; use std::convert::TryInto; use std::io::prelude::*; diff --git a/lib/src/variant_utils.rs b/lib/src/variant_utils.rs index aa703bb7a..f0d0591bc 100644 --- a/lib/src/variant_utils.rs +++ b/lib/src/variant_utils.rs @@ -3,6 +3,7 @@ //! avoiding another crate for this. In the future, some of these //! may migrate into gtk-rs. +use gio::glib; use glib::translate::*; /// Get the normal form of a GVariant. From e091f625ba050f9d6130960442bff4a58b8e1bce Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 5 Aug 2021 17:27:13 -0400 Subject: [PATCH 073/775] ima: Use new GVariant code, drop unsafe While in theory using the unsafe path could be more efficient, in practice I think the impedance mismatch between e.g. Rust `&str` and C strings requires duplicating anyways. So just build up a Rust data structure and then convert it all in one go. --- lib/src/ima.rs | 56 +++++--------------------------------------------- 1 file changed, 5 insertions(+), 51 deletions(-) diff --git a/lib/src/ima.rs b/lib/src/ima.rs index aba8af046..d9184b835 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -6,7 +6,6 @@ use anyhow::{Context, Result}; use fn_error_context::context; use gio::glib; use gio::prelude::*; -use glib::translate::*; use glib::Cast; use glib::Variant; use gvariant::aligned_bytes::TryAsAligned; @@ -218,7 +217,6 @@ impl<'a> CommitRewriter<'a> { } /// Write a dirtree object. - #[allow(unsafe_code)] fn map_dirtree(&mut self, checksum: &str) -> Result { let src = &self .repo @@ -231,8 +229,7 @@ impl<'a> CommitRewriter<'a> { // A reusable buffer to avoid heap allocating these let mut hexbuf = [0u8; 64]; - let new_files_builder = - unsafe { glib::ffi::g_variant_builder_new(b"a(say)\0".as_ptr() as *const _) }; + let mut new_files = Vec::new(); for file in files { let (name, csum) = file.to_tuple(); let name = name.to_str(); @@ -240,27 +237,10 @@ impl<'a> CommitRewriter<'a> { let checksum = std::str::from_utf8(&hexbuf)?; let mapped = self.map_file(checksum)?; let mapped = hex::decode(&*mapped)?; - unsafe { - // Unwrap safety: The name won't have NULs - let name = CString::new(name).unwrap(); - let mapped_checksum_v = mapped.to_variant(); - let name_p = name.as_ptr(); - glib::ffi::g_variant_builder_add( - new_files_builder, - b"(s@ay)\0".as_ptr() as *const _, - name_p, - mapped_checksum_v.to_glib_none().0, - ); - } + new_files.push((name, mapped)); } - let new_files: glib::Variant = unsafe { - let v = glib::ffi::g_variant_builder_end(new_files_builder); - glib::ffi::g_variant_ref_sink(v); - from_glib_full(v) - }; - let new_dirs_builder = - unsafe { glib::ffi::g_variant_builder_new(b"a(sayay)\0".as_ptr() as *const _) }; + let mut new_dirs = Vec::new(); for item in dirs { let (name, contents_csum, meta_csum_bytes) = item.to_tuple(); let name = name.to_str(); @@ -268,36 +248,10 @@ impl<'a> CommitRewriter<'a> { let contents_csum = std::str::from_utf8(&hexbuf)?; let mapped = self.map_dirtree(&contents_csum)?; let mapped = hex::decode(mapped)?; - unsafe { - // Unwrap safety: The name won't have NULs - let name = CString::new(name).unwrap(); - let mapped_checksum_v = mapped.to_variant(); - let meta_checksum_v = meta_csum_bytes.to_variant(); - glib::ffi::g_variant_builder_add( - new_dirs_builder, - b"(s@ay@ay)\0".as_ptr() as *const _, - name.as_ptr(), - mapped_checksum_v.to_glib_none().0, - meta_checksum_v.to_glib_none().0, - ); - } + new_dirs.push((name, mapped, meta_csum_bytes)); } - let new_dirs: glib::Variant = unsafe { - let v = glib::ffi::g_variant_builder_end(new_dirs_builder); - glib::ffi::g_variant_ref_sink(v); - from_glib_full(v) - }; - let new_dirtree: glib::Variant = unsafe { - let v = glib::ffi::g_variant_new( - b"(@a(say)@a(sayay))\0".as_ptr() as *const _, - new_files.to_glib_none().0, - new_dirs.to_glib_none().0, - std::ptr::null_mut::(), - ); - glib::ffi::g_variant_ref_sink(v); - from_glib_full(v) - }; + let new_dirtree = (new_files, new_dirs).to_variant(); let mapped = self .repo From 808d29d7665d353d4a60c33fe00f23b8869e3a9b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 6 Aug 2021 09:18:21 -0400 Subject: [PATCH 074/775] Use gio via `ostree::gio` By using the re-export, we avoid needing to keep a version lock between glib and glib-sys in our main crate. This helps dependabot, etc. --- cli/Cargo.toml | 1 - lib/Cargo.toml | 1 - lib/src/cli.rs | 1 + lib/src/container/export.rs | 1 + lib/src/diff.rs | 1 + lib/src/ima.rs | 1 + lib/src/tar/export.rs | 1 + lib/src/tar/import.rs | 1 + lib/src/variant_utils.rs | 1 + lib/tests/it/main.rs | 1 + 10 files changed, 8 insertions(+), 2 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 309bedc13..56583d2db 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -15,7 +15,6 @@ structopt = "0.3.21" ostree = { version = "0.12.0", features = ["v2021_2"] } libc = "0.2.92" tokio = { version = "1", features = ["full"] } -gio = "0.14" log = "0.4.0" tracing = "0.1" tracing-subscriber = "0.2.17" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 3a5474ee1..ccc8fa059 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -16,7 +16,6 @@ cjson = "0.1.1" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" futures = "0.3.13" -gio = "0.14" gvariant = "0.4.0" hex = "0.4.3" indicatif = "0.16.0" diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 62d6ca912..1694a3446 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -6,6 +6,7 @@ //! such as `rpm-ostree` can directly reuse it. use anyhow::Result; +use ostree::gio; use std::collections::BTreeMap; use std::convert::TryInto; use std::ffi::OsString; diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index 9e37756f0..6fd098eb9 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -5,6 +5,7 @@ use crate::tar as ostree_tar; use anyhow::Context; use fn_error_context::context; use gio::glib; +use ostree::gio; use std::collections::BTreeMap; use std::path::Path; use tracing::{instrument, Level}; diff --git a/lib/src/diff.rs b/lib/src/diff.rs index bd3b7fd5b..f965d25b2 100644 --- a/lib/src/diff.rs +++ b/lib/src/diff.rs @@ -9,6 +9,7 @@ use anyhow::{Context, Result}; use fn_error_context::context; use gio::prelude::*; +use ostree::gio; use std::collections::BTreeSet; use std::fmt; diff --git a/lib/src/ima.rs b/lib/src/ima.rs index aba8af046..f0d84adf9 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -12,6 +12,7 @@ use glib::Variant; use gvariant::aligned_bytes::TryAsAligned; use gvariant::{gv, Marker, Structure}; use openat_ext::FileExt; +use ostree::gio; use std::collections::{BTreeMap, HashMap}; use std::ffi::CString; use std::fs::File; diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 2b2670b85..032f92967 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -8,6 +8,7 @@ use gio::glib; use gio::prelude::*; use gvariant::aligned_bytes::TryAsAligned; use gvariant::{gv, Marker, Structure}; +use ostree::gio; use std::borrow::Cow; use std::collections::HashSet; use std::io::BufReader; diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 2961770fc..ea0ece160 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -8,6 +8,7 @@ use futures::prelude::*; use gio::glib; use gio::prelude::*; use glib::Variant; +use ostree::gio; use std::collections::HashMap; use std::convert::TryInto; use std::io::prelude::*; diff --git a/lib/src/variant_utils.rs b/lib/src/variant_utils.rs index f0d0591bc..c9ebee475 100644 --- a/lib/src/variant_utils.rs +++ b/lib/src/variant_utils.rs @@ -5,6 +5,7 @@ use gio::glib; use glib::translate::*; +use ostree::gio; /// Get the normal form of a GVariant. pub fn variant_get_normal_form(v: &glib::Variant) -> glib::Variant { diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 9dc533df8..f2ed761b0 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -2,6 +2,7 @@ use anyhow::{Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use indoc::indoc; +use ostree::gio; use ostree_ext::container::{Config, ImageReference, Transport}; use sh_inline::bash; use std::{io::Write, process::Command}; From 2e18af46b0c9a5010999ff11a654771bf4a21845 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 5 Aug 2021 16:42:55 -0400 Subject: [PATCH 075/775] Try to fix the docs.rs build I am totally cargo culting the bits from ostree-rs without understanding them. I guess this somehow avoids having the dependencies installed in the random docs.rs build container? --- lib/Cargo.toml | 6 ++++++ lib/src/lib.rs | 1 + 2 files changed, 7 insertions(+) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index fac6a61a6..bd1e05f7d 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -43,3 +43,9 @@ clap = "2.33.3" indoc = "1.0.3" sh-inline = "0.1.0" structopt = "0.3.21" + +[package.metadata.docs.rs] +features = ["dox"] + +[features] +dox = ["ostree/dox"] diff --git a/lib/src/lib.rs b/lib/src/lib.rs index a778090e8..830dd0464 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -8,6 +8,7 @@ // Good defaults #![forbid(unused_must_use)] #![deny(unsafe_code)] +#![cfg_attr(feature = "dox", feature(doc_cfg))] /// Our generic catchall fatal error, expected to be converted /// to a string to output to a terminal or logs. From 867412aac368be01cde77d08d2e00bd37ce97fa1 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 6 Aug 2021 10:37:52 -0400 Subject: [PATCH 076/775] Re-export our dependencies (`ostree`, `gio`, `glib`) and add a prelude This is the equivalent of https://github.com/ostreedev/ostree-rs/pull/13/commits/a1e5bc3f320a28b3a7fd80b9a7f6b0ff76540a23 This allows our users to avoid adding explicit dependencies on those libraries, because in practice e.g. `ostree` and `gio` must be versioned together. --- cli/Cargo.toml | 1 - lib/src/lib.rs | 13 +++++++++++++ lib/tests/it/main.rs | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 56583d2db..f102346cf 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -12,7 +12,6 @@ anyhow = "1.0" ostree-ext = { path = "../lib" } clap = "2.33.3" structopt = "0.3.21" -ostree = { version = "0.12.0", features = ["v2021_2"] } libc = "0.2.92" tokio = { version = "1", features = ["full"] } log = "0.4.0" diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 830dd0464..e01edcebc 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -10,6 +10,13 @@ #![deny(unsafe_code)] #![cfg_attr(feature = "dox", feature(doc_cfg))] +// Re-export our dependencies. See https://gtk-rs.org/blog/2021/06/22/new-release.html +// "Dependencies are re-exported". Users will need e.g. `gio::File`, so this avoids +// them needing to update matching versions. +pub use ostree; +pub use ostree::gio; +pub use ostree::gio::glib; + /// Our generic catchall fatal error, expected to be converted /// to a string to output to a terminal or logs. type Result = anyhow::Result; @@ -22,3 +29,9 @@ pub mod ima; pub mod tar; #[allow(unsafe_code)] pub mod variant_utils; + +/// Prelude, intended for glob import. +pub mod prelude { + #[doc(hidden)] + pub use ostree::prelude::*; +} diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index f2ed761b0..1940fd6b8 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -2,8 +2,8 @@ use anyhow::{Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use indoc::indoc; -use ostree::gio; use ostree_ext::container::{Config, ImageReference, Transport}; +use ostree_ext::gio; use sh_inline::bash; use std::{io::Write, process::Command}; From 51d8c0902d538f67243cd47c058bb9b4ff61ec44 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 6 Aug 2021 14:26:32 -0400 Subject: [PATCH 077/775] Release 0.2.2 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index db43a29b4..d7840d1e7 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.2.1" +version = "0.2.2" [dependencies] anyhow = "1.0" From 7823b7a1d397258e584b4a0ec03af6fea172d9a5 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 3 Aug 2021 18:11:44 -0400 Subject: [PATCH 078/775] lib: Drop gvariant_utils module We can now reuse the main GLib 0.14 code since we have the binding for `g_variant_get_normal_form()`. --- lib/src/lib.rs | 3 -- lib/src/tar/import.rs | 37 ++++++++------------ lib/src/variant_utils.rs | 75 ---------------------------------------- 3 files changed, 15 insertions(+), 100 deletions(-) delete mode 100644 lib/src/variant_utils.rs diff --git a/lib/src/lib.rs b/lib/src/lib.rs index e01edcebc..3137cb655 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -27,9 +27,6 @@ pub mod container; pub mod diff; pub mod ima; pub mod tar; -#[allow(unsafe_code)] -pub mod variant_utils; - /// Prelude, intended for glob import. pub mod prelude { #[doc(hidden)] diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index ea0ece160..8d760127d 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -25,13 +25,6 @@ const MAX_METADATA_SIZE: u32 = 10 * 1024 * 1024; /// https://stackoverflow.com/questions/258091/when-should-i-use-mmap-for-file-access const SMALL_REGFILE_SIZE: usize = 127 * 1024; -// Variant formats, see ostree-core.h -// TODO - expose these via introspection -const OSTREE_COMMIT_FORMAT: &str = "(a{sv}aya(say)sstayay)"; -const OSTREE_DIRTREE_FORMAT: &str = "(a(say)a(sayay))"; -const OSTREE_DIRMETA_FORMAT: &str = "(uuua(ayay))"; -// const OSTREE_XATTRS_FORMAT: &str = "a(ayay)"; - /// State tracker for the importer. The main goal is to reject multiple /// commit objects, as well as finding metadata/content before the commit. #[derive(Debug, PartialEq, Eq)] @@ -93,15 +86,6 @@ fn header_attrs(header: &tar::Header) -> Result<(u32, u32, u32)> { Ok((uid, gid, mode)) } -fn format_for_objtype(t: ostree::ObjectType) -> Option<&'static str> { - match t { - ostree::ObjectType::DirTree => Some(OSTREE_DIRTREE_FORMAT), - ostree::ObjectType::DirMeta => Some(OSTREE_DIRMETA_FORMAT), - ostree::ObjectType::Commit => Some(OSTREE_COMMIT_FORMAT), - _ => None, - } -} - /// The C function ostree_object_type_from_string aborts on /// unknown strings, so we have a safe version here. fn objtype_from_string(t: &str) -> Option { @@ -115,9 +99,8 @@ fn objtype_from_string(t: &str) -> Option { } /// Given a tar entry, read it all into a GVariant -fn entry_to_variant( +fn entry_to_variant( mut entry: tar::Entry, - vtype: &str, desc: &str, ) -> Result { let header = entry.header(); @@ -127,7 +110,8 @@ fn entry_to_variant( let n = std::io::copy(&mut entry, &mut buf)?; assert_eq!(n as usize, size); let v = glib::Bytes::from_owned(buf); - Ok(crate::variant_utils::variant_normal_from_bytes(vtype, v)) + let v = Variant::from_bytes::(&v); + Ok(v.normal_form()) } impl<'a> Importer<'a> { @@ -151,9 +135,18 @@ impl<'a> Importer<'a> { checksum: &str, objtype: ostree::ObjectType, ) -> Result<()> { - let vtype = - format_for_objtype(objtype).ok_or_else(|| anyhow!("Unhandled objtype {}", objtype))?; - let v = entry_to_variant(entry, vtype, checksum)?; + let v = match objtype { + ostree::ObjectType::DirTree => { + entry_to_variant::<_, ostree::TreeVariantType>(entry, checksum)? + } + ostree::ObjectType::DirMeta => { + entry_to_variant::<_, ostree::DirmetaVariantType>(entry, checksum)? + } + ostree::ObjectType::Commit => { + entry_to_variant::<_, ostree::CommitVariantType>(entry, checksum)? + } + o => return Err(anyhow!("Invalid metadata object type; {:?}", o)), + }; // FIXME insert expected dirtree/dirmeta let _ = self .repo diff --git a/lib/src/variant_utils.rs b/lib/src/variant_utils.rs deleted file mode 100644 index c9ebee475..000000000 --- a/lib/src/variant_utils.rs +++ /dev/null @@ -1,75 +0,0 @@ -//! Extension APIs for working with GVariant. Not strictly -//! related to ostree, but included here in the interest of -//! avoiding another crate for this. In the future, some of these -//! may migrate into gtk-rs. - -use gio::glib; -use glib::translate::*; -use ostree::gio; - -/// Get the normal form of a GVariant. -pub fn variant_get_normal_form(v: &glib::Variant) -> glib::Variant { - unsafe { from_glib_full(glib::ffi::g_variant_get_normal_form(v.to_glib_none().0)) } -} - -/// Create a new GVariant from data. -fn variant_new_from_bytes(ty: &str, bytes: glib::Bytes, trusted: bool) -> glib::Variant { - unsafe { - let ty = ty.to_glib_none(); - let ty: *const libc::c_char = ty.0; - let ty = ty as *const glib::ffi::GVariantType; - let bytes = bytes.to_glib_full(); - let v = glib::ffi::g_variant_new_from_bytes(ty, bytes, trusted.into_glib()); - glib::ffi::g_variant_ref_sink(v); - from_glib_full(v) - } -} - -/// Create a normal-form GVariant from raw bytes. -pub(crate) fn variant_normal_from_bytes(ty: &str, bytes: glib::Bytes) -> glib::Variant { - variant_get_normal_form(&variant_new_from_bytes(ty, bytes, false)) -} - -/// Extension trait for `glib::VariantDict`. -pub trait VariantDictExt { - /// Find (and duplicate) a string-valued key in this dictionary. - fn lookup_str(&self, k: &str) -> Option; - /// Find a `bool`-valued key in this dictionary. - fn lookup_bool(&self, k: &str) -> Option; -} - -impl VariantDictExt for glib::VariantDict { - fn lookup_str(&self, k: &str) -> Option { - // Unwrap safety: Passing the GVariant type string gives us the right value type - self.lookup_value(k, Some(glib::VariantTy::new("s").unwrap())) - .map(|v| v.str().unwrap().to_string()) - } - - fn lookup_bool(&self, k: &str) -> Option { - // Unwrap safety: Passing the GVariant type string gives us the right value type - self.lookup_value(k, Some(glib::VariantTy::new("b").unwrap())) - .map(|v| v.get().unwrap()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - const BUF: &[u8] = &[1u8; 4]; - - #[test] - fn test_variant_from_bytes() { - let bytes = glib::Bytes::from_static(BUF); - let v = variant_new_from_bytes("u", bytes, false); - let val: u32 = v.get().unwrap(); - assert_eq!(val, 16843009); - } - - #[test] - fn test_variantdict() { - let d = glib::VariantDict::new(None); - d.insert("foo", &"bar"); - assert_eq!(d.lookup_str("foo"), Some("bar".to_string())); - } -} From a53c1bb73dd375b02b652875e9eeff573a76710a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 25 Aug 2021 16:23:13 -0400 Subject: [PATCH 079/775] tar: Correctly pass import/export commit metadata There were multiple bugs here. Prep for implementing signing: https://github.com/ostreedev/ostree-rs-ext/issues/2 --- lib/src/lib.rs | 33 +++++++++++++++++++++++++++++++++ lib/src/tar/export.rs | 4 ++-- lib/src/tar/import.rs | 24 +++++++++++++++--------- lib/tests/it/main.rs | 8 ++++++-- 4 files changed, 56 insertions(+), 13 deletions(-) diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 3137cb655..5e35c2696 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -32,3 +32,36 @@ pub mod prelude { #[doc(hidden)] pub use ostree::prelude::*; } + +/// Temporary holding place for fixed APIs +#[allow(unsafe_code)] +mod ostree_ffi_fixed { + use super::*; + use ostree::prelude::*; + + /// https://github.com/ostreedev/ostree/pull/2422 + pub(crate) fn read_commit_detached_metadata>( + repo: &ostree::Repo, + checksum: &str, + cancellable: Option<&P>, + ) -> std::result::Result, glib::Error> { + use glib::translate::*; + use std::ptr; + unsafe { + let mut out_metadata = ptr::null_mut(); + let mut error = ptr::null_mut(); + let _ = ostree::ffi::ostree_repo_read_commit_detached_metadata( + repo.to_glib_none().0, + checksum.to_glib_none().0, + &mut out_metadata, + cancellable.map(|p| p.as_ref()).to_glib_none().0, + &mut error, + ); + if error.is_null() { + Ok(from_glib_full(out_metadata)) + } else { + Err(from_glib_full(error)) + } + } + } +} diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 032f92967..804ce1e69 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -62,7 +62,7 @@ impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { v: &glib::Variant, ) -> Result<()> { let set = match objtype { - ostree::ObjectType::Commit => None, + ostree::ObjectType::Commit | ostree::ObjectType::CommitMeta => None, ostree::ObjectType::DirTree => Some(&mut self.wrote_dirtree), ostree::ObjectType::DirMeta => Some(&mut self.wrote_dirmeta), o => panic!("Unexpected object type: {:?}", o), @@ -266,7 +266,7 @@ fn impl_export( writer.append(ostree::ObjectType::Commit, commit_checksum, commit_v)?; if let Some(commitmeta) = - repo.load_variant_if_exists(ostree::ObjectType::CommitMeta, commit_checksum)? + crate::ostree_ffi_fixed::read_commit_detached_metadata(repo, commit_checksum, cancellable)? { writer.append(ostree::ObjectType::CommitMeta, commit_checksum, &commitmeta)?; } diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 8d760127d..58d74cdbb 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -91,6 +91,7 @@ fn header_attrs(header: &tar::Header) -> Result<(u32, u32, u32)> { fn objtype_from_string(t: &str) -> Option { Some(match t { "commit" => ostree::ObjectType::Commit, + "commitmeta" => ostree::ObjectType::CommitMeta, "dirtree" => ostree::ObjectType::DirTree, "dirmeta" => ostree::ObjectType::DirMeta, "file" => ostree::ObjectType::File, @@ -137,25 +138,30 @@ impl<'a> Importer<'a> { ) -> Result<()> { let v = match objtype { ostree::ObjectType::DirTree => { + self.stats.dirtree += 1; entry_to_variant::<_, ostree::TreeVariantType>(entry, checksum)? } ostree::ObjectType::DirMeta => { + self.stats.dirmeta += 1; entry_to_variant::<_, ostree::DirmetaVariantType>(entry, checksum)? } ostree::ObjectType::Commit => { entry_to_variant::<_, ostree::CommitVariantType>(entry, checksum)? } + ostree::ObjectType::CommitMeta => entry_to_variant::< + _, + std::collections::HashMap, + >(entry, checksum)?, o => return Err(anyhow!("Invalid metadata object type; {:?}", o)), }; - // FIXME insert expected dirtree/dirmeta - let _ = self - .repo - .write_metadata(objtype, Some(checksum), &v, gio::NONE_CANCELLABLE)?; - match objtype { - ostree::ObjectType::DirMeta => self.stats.dirmeta += 1, - ostree::ObjectType::DirTree => self.stats.dirtree += 1, - ostree::ObjectType::Commit => {} - _ => unreachable!(), + if objtype == ostree::ObjectType::CommitMeta { + self.repo + .write_commit_detached_metadata(checksum, Some(&v), gio::NONE_CANCELLABLE)?; + } else { + // FIXME validate here https://github.com/ostreedev/ostree-rs-ext/issues/1 + let _ = self + .repo + .write_metadata(objtype, Some(checksum), &v, gio::NONE_CANCELLABLE)?; } Ok(()) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 1940fd6b8..1e5c5edd3 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -22,7 +22,7 @@ fn generate_test_repo(dir: &Utf8Path) -> Result { indoc! {" cd {dir} ostree --repo=repo init --mode=archive - ostree --repo=repo commit -b {testref} --bootable --add-metadata-string=version=42.0 --tree=tar=exampleos.tar.zst + ostree --repo=repo commit -b {testref} --bootable --add-metadata-string=version=42.0 --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst ostree --repo=repo show {testref} "}, testref = TESTREF, @@ -93,7 +93,11 @@ async fn test_tar_import_export() -> Result<()> { .as_str() ); bash!( - "ostree --repo={destrepodir} ls -R {imported_commit}", + r#" + ostree --repo={destrepodir} ls -R {imported_commit} + val=$(ostree --repo={destrepodir} show --print-detached-metadata-key=my-detached-key {imported_commit}) + test "${{val}}" = "'my-detached-value'" + "#, destrepodir = destrepodir.as_str(), imported_commit = imported_commit.as_str() )?; From 22afeec9cac58d4931d0e95b8996aaf70e4f1cdf Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 30 Aug 2021 13:57:17 -0400 Subject: [PATCH 080/775] Fix various clippy lints --- lib/src/container/import.rs | 2 +- lib/src/container/oci.rs | 2 +- lib/src/ima.rs | 4 ++-- lib/src/tar/import.rs | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 01e05e72b..b166dafbb 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -300,7 +300,7 @@ pub async fn import( event!(Level::DEBUG, "target blob: {}", layerid); let (blob, worker) = fetch_layer(imgref, layerid.as_str(), progress).await?; let blob = tokio::io::BufReader::new(blob); - let import = crate::tar::import_tar(&repo, blob); + let import = crate::tar::import_tar(repo, blob); let (ostree_commit, worker) = tokio::join!(import, worker); let ostree_commit = ostree_commit?; let _: () = worker?; diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index 9ffc52381..dbe73751f 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -311,7 +311,7 @@ impl<'a> std::io::Write for LayerWriter<'a> { self.compressor.write_all(srcbuf).unwrap(); self.uncompressed_hash.update(srcbuf)?; let compressed_buf = self.compressor.get_mut().as_slice(); - self.bw.write_all(&compressed_buf)?; + self.bw.write_all(compressed_buf)?; Ok(srcbuf.len()) } diff --git a/lib/src/ima.rs b/lib/src/ima.rs index de4354972..97bc280aa 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -247,7 +247,7 @@ impl<'a> CommitRewriter<'a> { let name = name.to_str(); hex::encode_to_slice(contents_csum, &mut hexbuf)?; let contents_csum = std::str::from_utf8(&hexbuf)?; - let mapped = self.map_dirtree(&contents_csum)?; + let mapped = self.map_dirtree(contents_csum)?; let mapped = hex::decode(mapped)?; new_dirs.push((name, mapped, meta_csum_bytes)); } @@ -306,6 +306,6 @@ impl<'a> CommitRewriter<'a> { /// The generated commit object will inherit all metadata from the existing commit object /// such as version, etc. pub fn ima_sign(repo: &ostree::Repo, ostree_ref: &str, opts: &ImaOpts) -> Result { - let writer = &mut CommitRewriter::new(&repo, &opts)?; + let writer = &mut CommitRewriter::new(repo, opts)?; writer.map_commit(ostree_ref) } diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 58d74cdbb..467e9abf0 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -349,7 +349,7 @@ impl<'a> Importer<'a> { } else { None }; - let objtype = objtype_from_string(&objtype) + let objtype = objtype_from_string(objtype) .ok_or_else(|| anyhow!("Invalid object type {}", objtype))?; match (objtype, is_xattrs, &self.state) { (ostree::ObjectType::Commit, _, ImportState::Initial) => { @@ -379,9 +379,9 @@ impl<'a> Importer<'a> { /// Handle .xattr hardlinks that contain extended attributes for /// a content object. #[context("Processing xattr ref")] - fn import_xattr_ref<'b, R: std::io::Read>( + fn import_xattr_ref( &mut self, - entry: tar::Entry<'b, R>, + entry: tar::Entry, target: String, ) -> Result<()> { assert!(self.next_xattrs.is_none()); From 707535482949aff906c3e916b630b02d0f5d40d6 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 9 Sep 2021 10:11:35 -0400 Subject: [PATCH 081/775] tar/import: Minor state machine cleanup Validate upfront that we only get xattrs with content objects; this way the rest of the match doesn't need to ignore the boolean. --- lib/src/tar/import.rs | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 467e9abf0..4a8094e83 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -351,28 +351,27 @@ impl<'a> Importer<'a> { }; let objtype = objtype_from_string(objtype) .ok_or_else(|| anyhow!("Invalid object type {}", objtype))?; - match (objtype, is_xattrs, &self.state) { - (ostree::ObjectType::Commit, _, ImportState::Initial) => { + if is_xattrs && objtype != ostree::ObjectType::File { + return Err(anyhow!("Found xattrs for non-file object type {}", objtype)); + } + match (objtype, &self.state) { + (ostree::ObjectType::Commit, ImportState::Initial) => { self.import_commit(entry, &checksum) } - (ostree::ObjectType::File, true, ImportState::Importing(_)) => { - self.import_xattr_ref(entry, checksum) - } - (ostree::ObjectType::File, false, ImportState::Importing(_)) => { - self.import_content_object(entry, &checksum, xattr_ref) + (ostree::ObjectType::Commit, ImportState::Importing(c)) => { + return Err(anyhow!("Found multiple commit objects; original: {}", c)) } - (objtype, false, ImportState::Importing(_)) => { - self.import_metadata(entry, &checksum, objtype) + (ostree::ObjectType::File, ImportState::Importing(_)) => { + if is_xattrs { + self.import_xattr_ref(entry, checksum) + } else { + self.import_content_object(entry, &checksum, xattr_ref) + } } - (o, _, ImportState::Initial) => { + (objtype, ImportState::Importing(_)) => self.import_metadata(entry, &checksum, objtype), + (o, ImportState::Initial) => { return Err(anyhow!("Found content object {} before commit", o)) } - (ostree::ObjectType::Commit, _, ImportState::Importing(c)) => { - return Err(anyhow!("Found multiple commit objects; original: {}", c)) - } - (objtype, true, _) => { - return Err(anyhow!("Found xattrs for non-file object type {}", objtype)) - } } } From 1be22245d740ee28842e9aa3df5d4820e5c64cd5 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 9 Sep 2021 10:12:11 -0400 Subject: [PATCH 082/775] tar: Validate we get an error with an empty tarball And then we can later extend this to more error cases. --- lib/tests/it/main.rs | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 1e5c5edd3..46794406e 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -4,7 +4,9 @@ use fn_error_context::context; use indoc::indoc; use ostree_ext::container::{Config, ImageReference, Transport}; use ostree_ext::gio; +use ostree_ext::prelude::*; use sh_inline::bash; +use std::convert::TryFrom; use std::{io::Write, process::Command}; const EXAMPLEOS_V0: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); @@ -69,20 +71,32 @@ fn generate_test_tarball(dir: &Utf8Path) -> Result { Ok(destpath) } -#[tokio::test] -async fn test_tar_import_export() -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; - +fn test_tar_import_prep() -> Result<(tempfile::TempDir, ostree::Repo)> { let tempdir = tempfile::tempdir_in("/var/tmp")?; let path = Utf8Path::from_path(tempdir.path()).unwrap(); - let srcdir = &path.join("src"); - std::fs::create_dir(srcdir)?; - let src_tar = tokio::fs::File::open(&generate_test_tarball(srcdir)?).await?; let destdir = &path.join("dest"); std::fs::create_dir(destdir)?; let destrepodir = &destdir.join("repo"); let destrepo = ostree::Repo::new_for_path(destrepodir); - destrepo.create(ostree::RepoMode::BareUser, cancellable)?; + destrepo.create(ostree::RepoMode::BareUser, gio::NONE_CANCELLABLE)?; + Ok((tempdir, destrepo)) +} + +#[tokio::test] +async fn test_tar_import_empty() -> Result<()> { + let (_tempdir, destrepo) = test_tar_import_prep()?; + let r = ostree_ext::tar::import_tar(&destrepo, tokio::io::empty()).await; + assert!(r.is_err()); + Ok(()) +} + +#[tokio::test] +async fn test_tar_import_export() -> Result<()> { + let (tempdir, destrepo) = test_tar_import_prep()?; + let path = Utf8Path::from_path(tempdir.path()).unwrap(); + let srcdir = &path.join("src"); + std::fs::create_dir(srcdir)?; + let src_tar = tokio::fs::File::open(&generate_test_tarball(srcdir)?).await?; let imported_commit: String = ostree_ext::tar::import_tar(&destrepo, src_tar).await?; let (commitdata, _) = destrepo.load_commit(&imported_commit)?; @@ -92,6 +106,8 @@ async fn test_tar_import_export() -> Result<()> { .unwrap() .as_str() ); + // So awesome. Look how many ways dealing with filenames can fail! + let destrepodir = Utf8PathBuf::try_from(destrepo.path().unwrap().path().unwrap()).unwrap(); bash!( r#" ostree --repo={destrepodir} ls -R {imported_commit} From aba2f8159ec55c49fbb78f96dfe658edd934862e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 9 Sep 2021 11:10:00 -0400 Subject: [PATCH 083/775] lib: Bump to 0.3 We're going to make some semver-incompatible changes. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index d7840d1e7..de6e98637 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.2.2" +version = "0.3.0" [dependencies] anyhow = "1.0" From 9365c48b6ef568db8aed00dab05234d77c9bb48e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 9 Sep 2021 11:10:00 -0400 Subject: [PATCH 084/775] lib: Bump to ostree 0.9 We can drop our FFI workaround. --- lib/Cargo.toml | 2 +- lib/src/lib.rs | 33 --------------------------------- lib/src/tar/export.rs | 4 +--- 3 files changed, 2 insertions(+), 37 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index de6e98637..2a16b8f11 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -24,7 +24,7 @@ nix = "0.22.0" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" -ostree = { features = ["v2021_2"], version = "0.12.0" } +ostree = { features = ["v2021_2"], version = "0.13.0" } phf = { features = ["macros"], version = "0.9.0" } serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 5e35c2696..3137cb655 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -32,36 +32,3 @@ pub mod prelude { #[doc(hidden)] pub use ostree::prelude::*; } - -/// Temporary holding place for fixed APIs -#[allow(unsafe_code)] -mod ostree_ffi_fixed { - use super::*; - use ostree::prelude::*; - - /// https://github.com/ostreedev/ostree/pull/2422 - pub(crate) fn read_commit_detached_metadata>( - repo: &ostree::Repo, - checksum: &str, - cancellable: Option<&P>, - ) -> std::result::Result, glib::Error> { - use glib::translate::*; - use std::ptr; - unsafe { - let mut out_metadata = ptr::null_mut(); - let mut error = ptr::null_mut(); - let _ = ostree::ffi::ostree_repo_read_commit_detached_metadata( - repo.to_glib_none().0, - checksum.to_glib_none().0, - &mut out_metadata, - cancellable.map(|p| p.as_ref()).to_glib_none().0, - &mut error, - ); - if error.is_null() { - Ok(from_glib_full(out_metadata)) - } else { - Err(from_glib_full(error)) - } - } - } -} diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 804ce1e69..942b214bf 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -265,9 +265,7 @@ fn impl_export( let commit_v = &commit_v; writer.append(ostree::ObjectType::Commit, commit_checksum, commit_v)?; - if let Some(commitmeta) = - crate::ostree_ffi_fixed::read_commit_detached_metadata(repo, commit_checksum, cancellable)? - { + if let Some(commitmeta) = repo.read_commit_detached_metadata(commit_checksum, cancellable)? { writer.append(ostree::ObjectType::CommitMeta, commit_checksum, &commitmeta)?; } From d991fcb159a970ad06d86975ae4325a3f5834b34 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 9 Sep 2021 11:46:11 -0400 Subject: [PATCH 085/775] lib: Only depend on futures-util I saw this in https://www.reddit.com/r/rust/comments/pkr9aa/is_the_crate_dependency_becoming_a_problem/ That said it turns out `glib` is depending on `futures-executor` right now too. --- lib/Cargo.toml | 2 +- lib/src/container/import.rs | 2 +- lib/src/tar/import.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index d7840d1e7..a4b6fd782 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -15,7 +15,7 @@ camino = "1.0.4" cjson = "0.1.1" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" -futures = "0.3.13" +futures-util = "0.3.13" gvariant = "0.4.0" hex = "0.4.3" indicatif = "0.16.0" diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index b166dafbb..e963e7545 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -4,7 +4,7 @@ use super::*; use anyhow::{anyhow, Context}; use camino::Utf8Path; use fn_error_context::context; -use futures::prelude::*; +use futures_util::{Future, FutureExt, TryFutureExt}; use std::io::prelude::*; use std::pin::Pin; use std::process::Stdio; diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 4a8094e83..44e924e61 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -4,7 +4,7 @@ use crate::Result; use anyhow::{anyhow, Context}; use camino::Utf8Path; use fn_error_context::context; -use futures::prelude::*; +use futures_util::TryFutureExt; use gio::glib; use gio::prelude::*; use glib::Variant; From 4545336bd2c602ac364e961e5a0ce464f3371722 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 9 Sep 2021 15:33:37 -0400 Subject: [PATCH 086/775] lib/tar/import: Move more logic into Importer struct Let's keep the code inside an anonymous closure smaller. --- lib/src/tar/import.rs | 75 +++++++++++++++++++++++-------------------- 1 file changed, 41 insertions(+), 34 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 44e924e61..ac983d93c 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -116,6 +116,17 @@ fn entry_to_variant( } impl<'a> Importer<'a> { + fn new(repo: &'a ostree::Repo) -> Self { + Self { + state: ImportState::Initial, + repo, + buf: vec![0u8; 16384], + xattrs: Default::default(), + next_xattrs: None, + stats: Default::default(), + } + } + /// Import a commit object. Must be in "initial" state. This transitions into the "importing" state. fn import_commit( &mut self, @@ -439,6 +450,34 @@ impl<'a> Importer<'a> { Ok(()) } + fn import(&mut self, archive: &mut tar::Archive) -> Result<()> { + self.repo.prepare_transaction(gio::NONE_CANCELLABLE)?; + for entry in archive.entries()? { + let entry = entry?; + if entry.header().entry_type() == tar::EntryType::Directory { + continue; + } + let path = entry.path()?; + let path = &*path; + let path = Utf8Path::from_path(path) + .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", path))?; + let path = if let Ok(p) = path.strip_prefix("sysroot/ostree/repo/") { + p + } else { + continue; + }; + + if let Ok(p) = path.strip_prefix("objects/") { + // Need to clone here, otherwise we borrow from the moved entry + let p = &p.to_owned(); + self.import_object(entry, p)?; + } else if path.strip_prefix("xattrs/").is_ok() { + self.import_xattrs(entry)?; + } + } + Ok(()) + } + /// Consume this importer and return the imported OSTree commit checksum. fn commit(mut self) -> Result { self.repo.commit_transaction(gio::NONE_CANCELLABLE)?; @@ -468,41 +507,9 @@ pub async fn import_tar( let pipein = crate::async_util::async_read_to_sync(src); let repo = repo.clone(); let import = tokio::task::spawn_blocking(move || { - let repo = &repo; - let mut importer = Importer { - state: ImportState::Initial, - repo, - buf: vec![0u8; 16384], - xattrs: Default::default(), - next_xattrs: None, - stats: Default::default(), - }; - repo.prepare_transaction(gio::NONE_CANCELLABLE)?; let mut archive = tar::Archive::new(pipein); - for entry in archive.entries()? { - let entry = entry?; - if entry.header().entry_type() == tar::EntryType::Directory { - continue; - } - let path = entry.path()?; - let path = &*path; - let path = Utf8Path::from_path(path) - .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", path))?; - let path = if let Ok(p) = path.strip_prefix("sysroot/ostree/repo/") { - p - } else { - continue; - }; - - if let Ok(p) = path.strip_prefix("objects/") { - // Need to clone here, otherwise we borrow from the moved entry - let p = &p.to_owned(); - importer.import_object(entry, p)?; - } else if path.strip_prefix("xattrs/").is_ok() { - importer.import_xattrs(entry)?; - } - } - + let mut importer = Importer::new(&repo); + importer.import(&mut archive)?; importer.commit() }) .map_err(anyhow::Error::msg); From 64e3892d7e09a1a8b53869b35d43c78490226163 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 9 Sep 2021 15:33:37 -0400 Subject: [PATCH 087/775] lib/tar/import: Remove lifetime from Importer It's really easier to just bump the gobject refcount; not worth carrying a lifetime for this. --- lib/src/tar/import.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index ac983d93c..6edebdf10 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -44,9 +44,9 @@ struct ImportStats { } /// Importer machine. -struct Importer<'a> { +struct Importer { state: ImportState, - repo: &'a ostree::Repo, + repo: ostree::Repo, xattrs: HashMap, next_xattrs: Option<(String, String)>, @@ -56,7 +56,7 @@ struct Importer<'a> { stats: ImportStats, } -impl<'a> Drop for Importer<'a> { +impl Drop for Importer { fn drop(&mut self) { let _ = self.repo.abort_transaction(gio::NONE_CANCELLABLE); } @@ -115,11 +115,11 @@ fn entry_to_variant( Ok(v.normal_form()) } -impl<'a> Importer<'a> { - fn new(repo: &'a ostree::Repo) -> Self { +impl Importer { + fn new(repo: &ostree::Repo) -> Self { Self { state: ImportState::Initial, - repo, + repo: repo.clone(), buf: vec![0u8; 16384], xattrs: Default::default(), next_xattrs: None, From 562b5742f9e617c3867e7b227df1bea6a7e45579 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 10 Sep 2021 07:00:26 -0400 Subject: [PATCH 088/775] lib/tar/import: Explicitly parse commit object first Prep for signing. Rather than maintaining a state machine, just explicitly read the first object and validate it as a commit. This way we can later also read the commitmeta in the same way and validate both before writing them to the repo. --- lib/src/tar/import.rs | 188 ++++++++++++++++++++++-------------------- 1 file changed, 100 insertions(+), 88 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 6edebdf10..a372c8a76 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -3,6 +3,7 @@ use crate::Result; use anyhow::{anyhow, Context}; use camino::Utf8Path; +use camino::Utf8PathBuf; use fn_error_context::context; use futures_util::TryFutureExt; use gio::glib; @@ -25,14 +26,8 @@ const MAX_METADATA_SIZE: u32 = 10 * 1024 * 1024; /// https://stackoverflow.com/questions/258091/when-should-i-use-mmap-for-file-access const SMALL_REGFILE_SIZE: usize = 127 * 1024; -/// State tracker for the importer. The main goal is to reject multiple -/// commit objects, as well as finding metadata/content before the commit. -#[derive(Debug, PartialEq, Eq)] -enum ImportState { - Initial, - Importing(String), -} - +// The prefix for filenames that contain content we actually look at. +const REPO_PREFIX: &str = "sysroot/ostree/repo/"; /// Statistics from import. #[derive(Debug, Default)] struct ImportStats { @@ -45,7 +40,6 @@ struct ImportStats { /// Importer machine. struct Importer { - state: ImportState, repo: ostree::Repo, xattrs: HashMap, next_xattrs: Option<(String, String)>, @@ -115,10 +109,45 @@ fn entry_to_variant( Ok(v.normal_form()) } +/// Parse an object path into (parent, rest, objtype). +/// Normal ostree object paths look like 00/1234.commit. +/// In the tar format, we may also see 00/1234.file.xattrs. +fn parse_object_entry_path(path: &Utf8Path) -> Result<(&str, &Utf8Path, &str)> { + // The "sharded" commit directory. + let parentname = path + .parent() + .map(|p| p.file_name()) + .flatten() + .ok_or_else(|| anyhow!("Invalid path (no parent) {}", path))?; + if parentname.len() != 2 { + return Err(anyhow!("Invalid checksum parent {}", parentname)); + } + let name = path + .file_name() + .map(Utf8Path::new) + .ok_or_else(|| anyhow!("Invalid path (dir) {}", path))?; + let objtype = name + .extension() + .ok_or_else(|| anyhow!("Invalid objpath {}", path))?; + Ok((parentname, name, objtype)) +} + +fn parse_checksum(parent: &str, name: &Utf8Path) -> Result { + let checksum_rest = name + .file_stem() + .ok_or_else(|| anyhow!("Invalid object path part {}", name))?; + + if checksum_rest.len() != 62 { + return Err(anyhow!("Invalid checksum part {}", checksum_rest)); + } + let checksum = format!("{}{}", parent, checksum_rest); + validate_sha256(&checksum)?; + Ok(checksum) +} + impl Importer { fn new(repo: &ostree::Repo) -> Self { Self { - state: ImportState::Initial, repo: repo.clone(), buf: vec![0u8; 16384], xattrs: Default::default(), @@ -127,17 +156,22 @@ impl Importer { } } - /// Import a commit object. Must be in "initial" state. This transitions into the "importing" state. - fn import_commit( - &mut self, - entry: tar::Entry, - checksum: &str, - ) -> Result<()> { - assert_eq!(self.state, ImportState::Initial); - self.import_metadata(entry, checksum, ostree::ObjectType::Commit)?; - event!(Level::DEBUG, "Imported {}.commit", checksum); - self.state = ImportState::Importing(checksum.to_string()); - Ok(()) + // Given a tar entry, filter it out if it doesn't start with the repository prefix. + // It is an error if the filename is invalid UTF-8. If it is valid UTF-8, return + // an owned copy of the path. + fn filter_entry( + e: tar::Entry, + ) -> Result, Utf8PathBuf)>> { + let orig_path = e.path()?; + let path = Utf8Path::from_path(&*orig_path) + .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", orig_path))?; + // Ignore the regular non-object file hardlinks we inject + if let Ok(path) = path.strip_prefix(REPO_PREFIX) { + let path = path.into(); + Ok(Some((e, path))) + } else { + Ok(None) + } } /// Import a metadata object. @@ -306,21 +340,8 @@ impl Importer { entry: tar::Entry<'b, R>, path: &Utf8Path, ) -> Result<()> { - let parentname = path - .parent() - .map(|p| p.file_name()) - .flatten() - .ok_or_else(|| anyhow!("Invalid path (no parent) {}", path))?; - if parentname.len() != 2 { - return Err(anyhow!("Invalid checksum parent {}", parentname)); - } - let mut name = path - .file_name() - .map(Utf8Path::new) - .ok_or_else(|| anyhow!("Invalid path (dir) {}", path))?; - let mut objtype = name - .extension() - .ok_or_else(|| anyhow!("Invalid objpath {}", path))?; + let (parentname, mut name, mut objtype) = parse_object_entry_path(path)?; + let is_xattrs = objtype == "xattrs"; let xattrs = self.next_xattrs.take(); if is_xattrs { @@ -335,15 +356,7 @@ impl Importer { .extension() .ok_or_else(|| anyhow!("Invalid objpath {}", path))?; } - let checksum_rest = name - .file_stem() - .ok_or_else(|| anyhow!("Invalid objpath {}", path))?; - - if checksum_rest.len() != 62 { - return Err(anyhow!("Invalid checksum rest {}", name)); - } - let checksum = format!("{}{}", parentname, checksum_rest); - validate_sha256(&checksum)?; + let checksum = parse_checksum(parentname, name)?; let xattr_ref = if let Some((xattr_target, xattr_objref)) = xattrs { if xattr_target.as_str() != checksum.as_str() { return Err(anyhow!( @@ -365,24 +378,18 @@ impl Importer { if is_xattrs && objtype != ostree::ObjectType::File { return Err(anyhow!("Found xattrs for non-file object type {}", objtype)); } - match (objtype, &self.state) { - (ostree::ObjectType::Commit, ImportState::Initial) => { - self.import_commit(entry, &checksum) - } - (ostree::ObjectType::Commit, ImportState::Importing(c)) => { - return Err(anyhow!("Found multiple commit objects; original: {}", c)) + match objtype { + ostree::ObjectType::Commit => { + return Err(anyhow!("Found multiple commit objects")); } - (ostree::ObjectType::File, ImportState::Importing(_)) => { + ostree::ObjectType::File => { if is_xattrs { self.import_xattr_ref(entry, checksum) } else { self.import_content_object(entry, &checksum, xattr_ref) } } - (objtype, ImportState::Importing(_)) => self.import_metadata(entry, &checksum, objtype), - (o, ImportState::Initial) => { - return Err(anyhow!("Found content object {} before commit", o)) - } + objtype => self.import_metadata(entry, &checksum, objtype), } } @@ -414,10 +421,6 @@ impl Importer { /// Process a special /xattrs/ entry (sha256 of xattr values). fn import_xattrs(&mut self, mut entry: tar::Entry) -> Result<()> { - match &self.state { - ImportState::Initial => return Err(anyhow!("Found xattr object {} before commit")), - ImportState::Importing(_) => {} - } let checksum = { let path = entry.path()?; let name = path @@ -450,41 +453,51 @@ impl Importer { Ok(()) } - fn import(&mut self, archive: &mut tar::Archive) -> Result<()> { + fn import(mut self, archive: &mut tar::Archive) -> Result { self.repo.prepare_transaction(gio::NONE_CANCELLABLE)?; - for entry in archive.entries()? { - let entry = entry?; - if entry.header().entry_type() == tar::EntryType::Directory { - continue; + + // Create an iterator that skips over directories; we just care about the file names. + let mut ents = archive.entries()?.filter_map(|e| match e { + Ok(e) => { + if e.header().entry_type() == tar::EntryType::Directory { + return None; + } + Self::filter_entry(e).transpose() } - let path = entry.path()?; - let path = &*path; - let path = Utf8Path::from_path(path) - .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", path))?; - let path = if let Ok(p) = path.strip_prefix("sysroot/ostree/repo/") { - p - } else { - continue; - }; + Err(e) => Some(Err(anyhow::Error::msg(e))), + }); + + // Read the commit object. + let (commit_ent, commit_path) = ents + .next() + .ok_or_else(|| anyhow!("Commit object not found"))??; + + if commit_ent.header().entry_type() != tar::EntryType::Regular { + return Err(anyhow!( + "Expected regular file for commit object, not {:?}", + commit_ent.header().entry_type() + )); + } + let (parentname, name, objtype) = parse_object_entry_path(&commit_path)?; + let checksum = parse_checksum(parentname, name)?; + if objtype != "commit" { + return Err(anyhow!("Expected commit object, not {:?}", objtype)); + } + self.import_metadata(commit_ent, &checksum, ostree::ObjectType::Commit)?; + event!(Level::DEBUG, "Imported {}.commit", checksum); + + for entry in ents { + let (entry, path) = entry?; if let Ok(p) = path.strip_prefix("objects/") { - // Need to clone here, otherwise we borrow from the moved entry - let p = &p.to_owned(); self.import_object(entry, p)?; } else if path.strip_prefix("xattrs/").is_ok() { self.import_xattrs(entry)?; } } - Ok(()) - } - - /// Consume this importer and return the imported OSTree commit checksum. - fn commit(mut self) -> Result { self.repo.commit_transaction(gio::NONE_CANCELLABLE)?; - match std::mem::replace(&mut self.state, ImportState::Initial) { - ImportState::Importing(c) => Ok(c), - ImportState::Initial => Err(anyhow!("Failed to find a commit object to import")), - } + + Ok(checksum) } } @@ -508,9 +521,8 @@ pub async fn import_tar( let repo = repo.clone(); let import = tokio::task::spawn_blocking(move || { let mut archive = tar::Archive::new(pipein); - let mut importer = Importer::new(&repo); - importer.import(&mut archive)?; - importer.commit() + let importer = Importer::new(&repo); + importer.import(&mut archive) }) .map_err(anyhow::Error::msg); let import: String = import.await??; From 3925d3a68ed4f3f1c02e2a38d6a639183309272d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 10 Sep 2021 11:05:49 -0400 Subject: [PATCH 089/775] lib/tar/import: Split out a helper to parse an object path Prep for further work. --- lib/src/tar/import.rs | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index a372c8a76..30675979e 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -174,6 +174,14 @@ impl Importer { } } + fn parse_metadata_entry(path: &Utf8Path) -> Result<(String, ostree::ObjectType)> { + let (parentname, name, objtype) = parse_object_entry_path(path)?; + let checksum = parse_checksum(parentname, name)?; + let objtype = objtype_from_string(objtype) + .ok_or_else(|| anyhow!("Invalid object type {}", objtype))?; + Ok((checksum, objtype)) + } + /// Import a metadata object. fn import_metadata( &mut self, @@ -478,12 +486,11 @@ impl Importer { commit_ent.header().entry_type() )); } - let (parentname, name, objtype) = parse_object_entry_path(&commit_path)?; - let checksum = parse_checksum(parentname, name)?; - if objtype != "commit" { + let (checksum, objtype) = Self::parse_metadata_entry(&commit_path)?; + if objtype != ostree::ObjectType::Commit { return Err(anyhow!("Expected commit object, not {:?}", objtype)); } - self.import_metadata(commit_ent, &checksum, ostree::ObjectType::Commit)?; + self.import_metadata(commit_ent, &checksum, objtype)?; event!(Level::DEBUG, "Imported {}.commit", checksum); for entry in ents { @@ -533,6 +540,19 @@ pub async fn import_tar( mod tests { use super::*; + #[test] + fn test_parse_metadata_entry() { + let c = "a8/6d80a3e9ff77c2e3144c787b7769b300f91ffd770221aac27bab854960b964"; + let invalid = format!("{}.blah", c); + for &k in &["", "42", c, &invalid] { + assert!(Importer::parse_metadata_entry(k.into()).is_err()) + } + let valid = format!("{}.commit", c); + let r = Importer::parse_metadata_entry(valid.as_str().into()).unwrap(); + assert_eq!(r.0, c.replace('/', "")); + assert_eq!(r.1, ostree::ObjectType::Commit); + } + #[test] fn test_validate_sha256() -> Result<()> { validate_sha256("a86d80a3e9ff77c2e3144c787b7769b300f91ffd770221aac27bab854960b964")?; From 66f62bf8d1019396a08eeab877b180c594f85434 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 10 Sep 2021 17:18:45 -0400 Subject: [PATCH 090/775] ci: Update ostree from updates-testing The karma thing is soooo ridiculous... --- .github/workflows/rust.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index aa3e9eab3..dd013be3a 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -21,6 +21,8 @@ jobs: steps: - name: Install skopeo run: yum -y install skopeo + - name: Update ostree + run: yum -y --enablerepo=updates-testing update ostree-devel - uses: actions/checkout@v2 - name: Format run: cargo fmt -- --check -l From 4b1f12857bbae9477e2b4d79091669c103e05ab8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 10 Sep 2021 11:12:58 -0400 Subject: [PATCH 091/775] lib/tar/import: Accept options for a remote to use for signatures This allows doing GPG/signapi verification of an imported commit. To implement this, read both the commit and commitmeta objects first if we have a remote, then call the signature verification API. --- lib/Cargo.toml | 3 +- lib/src/cli.rs | 4 +- lib/src/container/import.rs | 2 +- lib/src/tar/import.rs | 117 +++++++++++--- .../it/fixtures/ostree-gpg-test-home.tar.gz | Bin 0 -> 13515 bytes lib/tests/it/main.rs | 147 +++++++++++++++--- 6 files changed, 228 insertions(+), 45 deletions(-) create mode 100644 lib/tests/it/fixtures/ostree-gpg-test-home.tar.gz diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 690519d1f..d55ff0961 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -20,11 +20,12 @@ gvariant = "0.4.0" hex = "0.4.3" indicatif = "0.16.0" libc = "0.2.92" +maplit = "1.0.2" nix = "0.22.0" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" -ostree = { features = ["v2021_2"], version = "0.13.0" } +ostree = { features = ["v2021_4"], version = "0.13.0" } phf = { features = ["macros"], version = "0.9.0" } serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 1694a3446..1a93dbaee 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -138,10 +138,10 @@ async fn tar_import(opts: &ImportOpts) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, opts.repo.as_str(), gio::NONE_CANCELLABLE)?; let imported = if let Some(path) = opts.path.as_ref() { let instream = tokio::fs::File::open(path).await?; - crate::tar::import_tar(repo, instream).await? + crate::tar::import_tar(repo, instream, None).await? } else { let stdin = tokio::io::stdin(); - crate::tar::import_tar(repo, stdin).await? + crate::tar::import_tar(repo, stdin, None).await? }; println!("Imported: {}", imported); Ok(()) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index e963e7545..c015711a6 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -300,7 +300,7 @@ pub async fn import( event!(Level::DEBUG, "target blob: {}", layerid); let (blob, worker) = fetch_layer(imgref, layerid.as_str(), progress).await?; let blob = tokio::io::BufReader::new(blob); - let import = crate::tar::import_tar(repo, blob); + let import = crate::tar::import_tar(repo, blob, None); let (ostree_commit, worker) = tokio::join!(import, worker); let ostree_commit = ostree_commit?; let _: () = worker?; diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 30675979e..d6d64d0b1 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -41,6 +41,7 @@ struct ImportStats { /// Importer machine. struct Importer { repo: ostree::Repo, + remote: Option, xattrs: HashMap, next_xattrs: Option<(String, String)>, @@ -146,9 +147,10 @@ fn parse_checksum(parent: &str, name: &Utf8Path) -> Result { } impl Importer { - fn new(repo: &ostree::Repo) -> Self { + fn new(repo: &ostree::Repo, remote: Option) -> Self { Self { repo: repo.clone(), + remote, buf: vec![0u8; 16384], xattrs: Default::default(), next_xattrs: None, @@ -198,24 +200,14 @@ impl Importer { self.stats.dirmeta += 1; entry_to_variant::<_, ostree::DirmetaVariantType>(entry, checksum)? } - ostree::ObjectType::Commit => { - entry_to_variant::<_, ostree::CommitVariantType>(entry, checksum)? - } - ostree::ObjectType::CommitMeta => entry_to_variant::< - _, - std::collections::HashMap, - >(entry, checksum)?, o => return Err(anyhow!("Invalid metadata object type; {:?}", o)), }; - if objtype == ostree::ObjectType::CommitMeta { + // FIXME validate here that this checksum was in the set we expected. + // https://github.com/ostreedev/ostree-rs-ext/issues/1 + let actual = self.repo - .write_commit_detached_metadata(checksum, Some(&v), gio::NONE_CANCELLABLE)?; - } else { - // FIXME validate here https://github.com/ostreedev/ostree-rs-ext/issues/1 - let _ = self - .repo .write_metadata(objtype, Some(checksum), &v, gio::NONE_CANCELLABLE)?; - } + assert_eq!(actual.to_hex(), checksum); Ok(()) } @@ -490,8 +482,88 @@ impl Importer { if objtype != ostree::ObjectType::Commit { return Err(anyhow!("Expected commit object, not {:?}", objtype)); } - self.import_metadata(commit_ent, &checksum, objtype)?; - event!(Level::DEBUG, "Imported {}.commit", checksum); + let commit = entry_to_variant::<_, ostree::CommitVariantType>(commit_ent, &checksum)?; + + let (next_ent, nextent_path) = ents + .next() + .ok_or_else(|| anyhow!("End of stream after commit object"))??; + let (next_checksum, next_objtype) = Self::parse_metadata_entry(&nextent_path)?; + + if let Some(remote) = self.remote.as_deref() { + if next_checksum != checksum { + return Err(anyhow!( + "Expected commitmeta checksum {}, found {}", + checksum, + next_checksum + )); + } + if next_objtype != ostree::ObjectType::CommitMeta { + return Err(anyhow!( + "Using remote {} for verification; Expected commitmeta object, not {:?}", + remote, + objtype + )); + } + let commitmeta = entry_to_variant::<_, std::collections::HashMap>( + next_ent, + &next_checksum, + )?; + + // Now that we have both the commit and detached metadata in memory, verify that + // the signatures in the detached metadata correctly sign the commit. + self.repo.signature_verify_commit_data( + remote, + &commit.data_as_bytes(), + &commitmeta.data_as_bytes(), + ostree::RepoVerifyFlags::empty(), + )?; + + // Write the commit object, which also verifies its checksum. + let actual_checksum = self.repo.write_metadata( + objtype, + Some(&checksum), + &commit, + gio::NONE_CANCELLABLE, + )?; + assert_eq!(actual_checksum.to_hex(), checksum); + event!(Level::DEBUG, "Imported {}.commit", checksum); + + // Finally, write the detached metadata. + self.repo.write_commit_detached_metadata( + &checksum, + Some(&commitmeta), + gio::NONE_CANCELLABLE, + )?; + } else { + // We're not doing any validation of the commit, so go ahead and write it. + let actual_checksum = self.repo.write_metadata( + objtype, + Some(&checksum), + &commit, + gio::NONE_CANCELLABLE, + )?; + assert_eq!(actual_checksum.to_hex(), checksum); + event!(Level::DEBUG, "Imported {}.commit", checksum); + + // Write the next object, whether it's commit metadata or not. + let (meta_checksum, meta_objtype) = Self::parse_metadata_entry(&nextent_path)?; + match meta_objtype { + ostree::ObjectType::CommitMeta => { + let commitmeta = entry_to_variant::< + _, + std::collections::HashMap, + >(next_ent, &meta_checksum)?; + self.repo.write_commit_detached_metadata( + &checksum, + Some(&commitmeta), + gio::NONE_CANCELLABLE, + )?; + } + _ => { + self.import_object(next_ent, &nextent_path)?; + } + } + } for entry in ents { let (entry, path) = entry?; @@ -518,17 +590,26 @@ fn validate_sha256(s: &str) -> Result<()> { Ok(()) } +/// Configuration for tar import. +#[derive(Debug, Default)] +pub struct TarImportOptions { + /// Name of the remote to use for signature verification. + pub remote: Option, +} + /// Read the contents of a tarball and import the ostree commit inside. The sha56 of the imported commit will be returned. #[instrument(skip(repo, src))] pub async fn import_tar( repo: &ostree::Repo, src: impl tokio::io::AsyncRead + Send + Unpin + 'static, + options: Option, ) -> Result { + let options = options.unwrap_or_default(); let pipein = crate::async_util::async_read_to_sync(src); let repo = repo.clone(); let import = tokio::task::spawn_blocking(move || { let mut archive = tar::Archive::new(pipein); - let importer = Importer::new(&repo); + let importer = Importer::new(&repo, options.remote); importer.import(&mut archive) }) .map_err(anyhow::Error::msg); diff --git a/lib/tests/it/fixtures/ostree-gpg-test-home.tar.gz b/lib/tests/it/fixtures/ostree-gpg-test-home.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..1160f474f89d331c55819b1b3f8405c8c2e701e4 GIT binary patch literal 13515 zcmV;+G&IW}iwFP!000001MR#ASQOd1_X~ocZA1mLVn!S7?&@r=s?NDOcZ&|4bIy%9 z=P>3Rz%Yu6IS!y0Fr#1=Fz1XpemLRIoU`ZLz0dRAnK}1t{tr+OU5l!s*INJed+U9x zdtqY!bYZJS#$vy1ab)6u*#4>*9G*-#U;{j>f5KUFV`%@D!$()e_L-Z}nB|KkAy@Spl0AYy@-RP3L3Nx$jey#ITC z6GUgS1*syYC{-kbcx*V8OIM{rJR!{eX*Wt<{f}E*&fcj^XF$YE4dPHJ2SOJ>40gZ_ zsUR4nNwrRw4)JkJe4~)gvoZYmkk|{VC1#7&1&LH@DHZav18lB33~DuDHOGsRYC{wQ zn`vUHi8ujl)VnOUfE{b00D3>c9P;}>SRm&S=}fUBC}ne5B8^Uqjd1v0oC>n)Bq16~ zVt1jP7M@6pamoEWsg|Z-_%RNjiYulwFm@(KO4V~BN*h9y2}26K#ZA+&*!GYX%jU|0 zJhCo;61W9^vzQ@v;P^y6kxv!+JyO^UTQ#Jp&W4bL#-K~$_i3F5R@5k07+B;WPsfGA zVVjX=4*2ALR0L;{YiKqcT_vTc!#*#l2oWS?r3C|b&8kW9+39E9i)2r{uO z88xbBaqMadBvUH|ShbRELWx*nyACe~^-OtG4->6g0E@ODbRq=tAaufbe#_fdNI_H33NIP zB!y3bL@2Pzz+@3T7$TdbWbjBHoC77nqCI*8)2R|W#8@i_WO68kpoeC5a>G^w5_O{~ z0uKd8V=?I*$QOledzjaE%2h;u*xDaYZx$B8;JNZ%3uT` z<04p&76Kv?vY!^jI(P($+A0Y^2u@@Z&?#!f7jbwXA%_LxRCtuzZ4&u#LW9%cSDC^Z zF@=R7L2@9>QmCU?y(5GOsCXnyiSpGU1pzD6JM?B;lqW!pWE(_7#1hm`U4)oGs)dRu zOaR5>V0cCBsFR9S+XZ%+pAmJbBNz(C&bHwRnvltW3kZoCRz$CYqPBoShO)x62;@h2 z3N>O63ArAMiv?+|bb%P~8GR0c&8y<-bOrmfLL#%7jtl9GBAZ3-_dsGSh7n@B zWFoB9i4s|SX0}2YUbYTrAk(+G!TH_X z;DexxgQBxATsGCr;t9MYiiN-i`Fb6iZUQwTHAA7+DH%-2Xm$#@2m{6hFbWbJz;Ub| zw#vp7Vp(B{86`17wlL`R*xh(?h@g_Q2wIsfK(cBG08VRUG6Z}-->9^P0EZ0E66-ZI zgHuEjGJ}9sRuIh^Hq4Yu6n3)BjB;XyHnUJFXHx@FftQTtnN?1}qQyj<5)GegU=z%0N)X4Q z1yoo$j!wb{86qZ+0ZBy~mssR>I#ARI6{ZReX0)YALV3cW!28m971PHO=Aem}M3kX^P72~3srFsTk7>$4in66@ZoUAqf?J79f#eJaoWKb!llTA79{AplLP|fY)mUT*RW#C?ZBN z4Q+;~IGI4}M+LcLjmc$`8e~{A*Gq*ddY0F0aGUH@2*Zj}Bn*pIjUX0jSYna0@J3kx zhc#4_Lx2WjusANujf!IVVt~Ta5#)B*N7DN^kb}vGG!Yg{9@R#arVuHh{+0jOf5d+h zna%Vw10oUR=WRd*Q2ip84H8zbA0Wa8*(Mn*M@&%@$ku`(6%R&CEHK3Wi2+gHAn=bE z5P-RSjL##d$rx@<^=fJH`c+!3E$Fm!{ZN?Zg9B)g}oU8KqFYooLMhM(FA-M1GL(->L6Z4 zl1nIv---&=KULuo$(74T527 zlp=K_TpcCI3F9yXr>FXq3Tu!EkqNSsTz$4#IV#G44dg@L{27QF@6_M%f`|I0f^}($qX1Knyn|G zL>Ms%OJIRoz6sCN!%=_4MZ+tkfuPQ(BDs__GSe=hD+x3;mIne-4?OeI zrJ4az)etC$X+rQD1Ny=&fKKHY5Rq1=S281E9;_iSB78&C0(-+wvR_E|iERdt+My+r z9H7uiA|ohuKuwY;tBHYT6o&h5at-H6pPW%(FOF1KonL`B|%rjCeWEgI<`p6OETfOf{>pXk&?`6G8m>(`CKpxGn9D03o9q$=t`Z} zDRWsvQA{;q@ica-AFn5f@b&>K)S$Wv5LrCJA#9BBnk^7hMYu^TV|lZAP(kG3<3g=Z4_6JVwzQ> z3k9uApWbBQM|C{27R7Q`FQx>*W^npdRw>%Vz=4oRYmm{!G`rcUra-0um#LPctwJVE zA!Y+yh8I!t2n2#g5OrFe6qH6PL{u0(foIW%BpL+oi5dxNHvx{QJ%AFBoA77|&vME5 zB)5&t!*~IWizsz6JlsGvjN^TD7fRws^W+2r%jS215*$vVQDh+AFAEAhTz@3OK`FgqgOr1o68QWe30oagVjzr6B(PemsW<|W zbOAd%8nLJtCOb2P*ZISM!OIZioggovrta6Yvc%Pe0ibSjd z71@Idnm9t2UCeS*85kv9>XY#F5fYU|azSC8ohi`JNG9k1!+*9v|PefjFwgdKU+sZDTPrg zOyx?AIxSTgvgjoe1(9c$K~V%S;9Uq$B11$zJ%pF4MZ~DxBVwxzUWF6Q)6=C~olxpU zaUw2_OUv*m0Z0H;!#+M}192FpE&$=k2FMxJIwe@4g2!Q@>3FRV_E`Nehfmkx0#Xl^ z?2j-|N{PR^cPvHInl&M5t(w0`NgPLI**W4Pu25)wkFPMt+WsTnJl>qJbb67J~wY(d)!x#R7FiB!@6mtAHW2 z>I5*TmUyGoV8pHwV#6$-$jo*UTt0`*A|pXTmp&v!Inf?5!9bUV$WYX!fQg8Q8J3Au z0h-f*aLEF)43CpAR91(IsKUWCzK`M8siIbqNkH@{%zljkWf4gIbgElRG*Zc;NcG4K z>GTR*1fw(3j2;)LB6;0nrN!VPn>j&T81mccQ4U)xkw_&tiHxqtp$MLk2=-K)lY~$5 zKspTrBB}x*tQtcnK{$?4AeDuP9HNyL)-v_NsEro}97@=(_Zx7Uu!qWXqi}K@*~*s& zoz^#Wv?2tOmY`=aL|8o9$_bJkLXw|l(*X8>8YNa@ zqeh>N=%Iu>TE85yVU=n|budP$?^G|0JD~Mi zoTgqTx9Lwh$4~r6CjU3`|72YC?o`0(sTSnF zU;lw8ka0EjAAg1ZWB>nD3sU{U|F`G(C;gAZ5()pQ|A|-A8$FSYthllt4JN!1y=ev8pk{hI-WvLJM7bwCKgCeX>S(CJEt(xBJ)bTY_n!Ld}> z---)d)lmtSAM}_wEac~qMx!7)f4&R%yG$%bkFYQnk=knHm>8fABV~&TVLHJEVIw?3 zRL}Q>O>&&Uh|%~F8jmg@p!kr*!?ydQT8bjVMC;rFqd;w9I6O)REEb?~Y|yTux-FKl zflN||gdQD2k`Vba48_UUMfg4%6=l$y&{UrZOJh+a^r*oqcWCfxo>9ZWkkoD&Sx3?k zg0M)S6GJdr6b6H!4$E_b0umQdF=b??7-b3qHVntdRGESf8K|+E<%|f{sv}sLHnSLF z@o6?6lgZGlR8)c(lrk6ulfrM8DI;PPU#f&)Bg(+XYr}Mdgs+v6?Mhf}qDHN*u!y9L zasmPcPQr`gqdY2>=@jCq0fCt!GLqaZhYc1;g%}=Ara{3vl2MBfX#IMR$w$&SDFhit z~gIxZO!fD(@yFsWod2Ee8FL-liog0310`}fp8_&@MJ#QzC3{{L6#&-Q;q)Q7MI zdb&Yj#sCpc^tZ$!{b&F85&ppcf0u=cvc6T~$Xrsdf~53nXetjzkI`a18oW=96G$PE zOv!T-Obm@duMTl-7!=5r$Y3s(C{~COA)e)x1RZ9*M8+UtsD1{I>DC6wA|lCRwt`L% z5Ohby5raTVXChdD9W}ykK1#v~xUpiA1tfDFRtqiQr@81Rs~L=jVXoFGBjY3pUgWSw zG>8<(g&~;6k-MuSc@m4Jbz${l0V=4oN{v=O!($VfJX%&n8Da+UUb}-3(D@~DBM5ti z5{!|{ARttRkKp$DWYsaXo5|BiS^BWq!b7?FVpwWa6KD{f9|BcmAC=+eyV+){S1%0F z(NSBF2>a;R07tI&U=<3Y7Yjo)*smc;Ty8%pU_-gtAR1xus7#U2%LM3n2`3z3Lol8! zu@kU-Cdff>daJ{rV>4~l{*UF-4K@D%@2P+AfBgRt|0mY?|6id$+y9LcrwB3yV2utB z2r)rDO{^plsZ_dDq5IkY1FApp|KDXFFm4J9gLgP2T)^blhB-u^F@TE_2wn{ZF!IEp zEa2uujYPa23jH4hVImkE3p^z&mlGr5yn65yJ zT@;R56#+3kEl4GTPQKeKaOp{$5EUht_?R%BstFLiGP{{BWbt@7b_lPx6SVfQhU7P5 zye_*{tBh&{Xv7R)4AF08&UnC8{f^oI1`nmxYV8oy%hnrJJW%Q8dCX441_VuXmKv>& z*n<*3QC~nFq^+UNlPMjcRpNB|Eqjro`5XDRoBMKnF8%RbzXBj>>pj?3|U8d&l-0FL~9fc=zaqhZ3$F{@%vl z-#0vJMcZWoLbvi&-FwYv?Z13AJfWUFrjraaD}iAvO{v9;))RQn4uNqCWcEYkQ6oTs|*IO&#dP+_8xYeP|h_|iAiHW znOdV`?G)~wv*oPMEPIu{azL;84scO0|`Pqh~@x|cR0$PbK@od+=1n|}3(XZw= z>DV#%Nx`uX@pBqp1h0;=@yCBMyS(QU@~}X+j6)G&_CFwJ8n!3~Iv53iZQ;$@iZ0Aio+sspJk^ zcCXp|@}|%3Y_WWF_bq&74b+-GrZ7D2)6h~yspUMhBv9JC&xi9GM*6rtKadE_n-7*| z)LorYQOJ{)-|7?}^Y!_ld6v1?n^sJ!_%Qxymsia?Hd~w!^bhXUrd_veotZOh&e!6q z(4ewr*8yCW<@sdO%*jt1h;Nuy&d7D+aO1))?H~LBw~)DE4^v-x?STH?-l9M z4O4hoWwu3WkmuF*MzP_CLznMga$o={rAaz2y~s;$(zs;Gma3iI;yyP$du8g@#X}EC z+Ku|OdeykOr?)*iSRn~5Q((-sG7h1NTU;@B2sW?5%kNU0p^!bv<8J zSUz>i^3ItfT&R~PE`J=mkJFrU+nF~YDrwPodc1P&w4~Kp!}?4wY_tmG4qD&lT8v(@ zb8J$5m)>+Yn>_74}7w{t!R_nOLdUfk0#?^{VNUl?>E~;L9O0BrqTJ2(MH6Iy+s@r(P@lKbR z=~q5{eOo7HnQ5>3@XQm13&NUr8@{j}c(@NY8o9#tfg5WzEoYqr2DVsE$zO3}Nv*7nx6 z-&9wuS=ZinD|a7Uz@BoW ztB!MBRQA3>(<7p7-Pgz8WX8$KYZf%xCRw)3rW}(p>$$&i!M-hRjlQ#&^9ELi;P!aj zl9XHU(uJ1-C%1`4L-SWiO0dppiZ{((C z4Vr#u<;%cG-}iUsOig{6P*zVhr%U0@^)E*qaN-&;ZSA+}^czPn@!Td{X*Pcyr1!16 zR=@Cv#EbJz3`ga?F$RZxXfoqj>cpgV%h~;Iti!&Un9yL|Zr+EKifqrYLz}wR>3;WM z<3{fO{2$Lu!#y6*wf4eS53HB3Em_v@%|ZV;!@xM&vORs$@C^#zX_{uV8T!7X{K_b% z@NnVeW&>FF`lCB^&~0jV_tH{--uNI~MW|ENWEs%E5R-Vb96Q2P zce+gyFkhHA_6G^EaWuC7(*}Fz@Js3xcmD7m`NjD@#-ACr{9JjL`neMg{XVSyu3~e8 zZrRJ?I>7_?y&II{fy&VhYf19H>ExH{SUsEB@L&X?zS*z_Kt}o zGL9G2Y8I@EW~w&HV&hJzD;n{^ev z)3bgYc6-|7xnsN49ea6b3tv-YR(#CoF|W#skh|rF2qlvbXX7)n_YT3c3f3hX7QgK* zdp|8XruUJIEkzS-6Zd_3eR#>LD=muIi|Vap=S1w=N1ThbR7zSuIRwO~-1;=(sIH$$ z+GEn;!pH3A^Jlc|W`u>&u0<8ax6hb9rkt8I`jInbN(1zz>w7hL&VZq*EmICG-@!c) zA9G@29q0E4yB_VEjAWjT^bz6ieM(KbAFUwvzo^e+cz(-qMdnicalmn#Leo4hJxZt20;RZ?L<@C>Ko)X~&7w;n$}rfd(P$DK}eU0XtVvbpxM z%M+UvOxxL?IcwSt?y!upLk{n_5Gc*=I!DUuGBYxiI(hZ5{twq&NZctqvijWx!IaF} zv{lR2zN+(zUfikOHCy()?Zdw3ei(aYx!F!i$6mScj@tWbXb>T%qiXfmVU53xZ66R_ zc(Q-fmC=)$M))0i6hJS|J{tEhbAHK)_5+%vbiEALnSD5IZu?N#zQOIFlbh;eUX`tU za0X7gdhpr}6|`-wy>rX)*p#hp0zOxl>qQ$D{$gNXIYrBc+xK=J{-}7>@XR#Kk`8m8 zSIw(E@Ibh*VY>Y0CP_tMkD`?=cMt8k_flEU7X>fIkei-rlR3?GkLpRgedPEZq*3K- z;)EwNR@7l<;>vSM_N_ZUHGfj+oI!)<_i_w!wyk$QpECNp+pV{bp%7IcCe7--fV24- zzOA|9wx^WRryGr&_$sHrz6mC}8 z9(Uo$;2oRkGI>;j4`a;TQ9xc!nDrqE+;jbua2qLxyyjO*f)y% zq`_Iu5XV6On8}TYwD`FB{Nl%tx%n$*%HrqKdbRQRUMpwZ%WivmG+f-%x+fR);&peX zB37109!_W;LtUG2vU2ZT?K9|8Wqiz+%ybkCqL3wL`a}Y)E4F+e3vSPOscrQ{rkD9! zhX+1QKJYQQ5A0VCOIY5L%Vq=Xlfh{Ml)QfoD$%NwpVs9$C~~yjB0qG%@Gs(+Tl0 zFIHXL_G#0MRY}8b^)N;SUabLM9k2ZEZt9+E=NlY(#tf8~o|xAM`^in3)+UeBadH!8 ziT|>6oPl&Oe(CxT#)Z9a>&h`Lk@id0SJIa&9}YU@#EofN!h3RTUu>^tqxxJK#!Wgu z`^fS%!N3P;Pluizw!un&Y~S)|#od%riJ~~)(P!TF4*wC|*zBL_4fs92QGUX=jNxew@wp8^OXoTT_v ztHm$s>B{ff^E=TJ_x0V^Wd-v^Y~rw>rYlrLrS9oig`d@@mhEKe8)0e;ZpMJ7t7|QJ z`O)+uJmkofU+L`^^V^p6rs=sM1K(T=c(xwO zTXa2RY8GYm=9V9~-$-XL*Bgx=A7(aOzNy4`H_8sh45S>VwB9%HY@6gpi(_3+H%;_4 z`q*i+(A8tz;$H1GzpNzI7kqBvB_hiXbMD6VN=y(}jqY;))1;4V?>+fG?7Y|L`^Vvo zk#FlzM>ThBl`($YvVRl)^2T?Kx=fi$FT0YzqAc6J-9GzaN>*I^Vb~S;I< z^EvR@8#yo>xtX1tQ6Sw(+MXvk@|?3ZkMLs_ePhv?PVio|HPGGPF+3N&B)@%3S@%V) zN8~j)u=lon$PsnZTpV%E=CMKWlz^jRU%e{I+_i4a)byT}Wu>7!dw=J>=2^RhPs_I+ z?E7lOg5*1o)-|{q*CqjXbo`GA3H0j&i+r}8&Rc`l0{d83eS7CpD(c7;ggYIw&R5gh z*^0#*7d4Nz9Q*ktX~A^Tl;l$C$&ZUZP2+9|*3r+o+G9eWa<3!LvsD+Wui%Q4*0Ig`48DH(l zn%EVy>vzqEWZ|nlhuk~9?u^2_^Zt{8eV*-|@%3xx&f;ZPjt*$UZI}%$xUx)r+tg;} z@?}27ZnLZ?KIXu@g2=RWxg$2OIWxA;y3+n)Rflc7ys|CA9>-R5uPvzm<1~MH;`@7b zMxn+`kuDIN*Ne#w9^dX5rrRH+S%o9ZIt~q2)a&qSNS6DS?UvupnwAsIQcfGNqPZxx z->x}Hjqi*P4o#>xx#is9Q*Vq>H6f*`OEU5)H?n=psGltOnYy>(UR|HFbI2RW?~Kc8 z9b2^XJ40sW^-#NU^&c**gZc2rI$g3vJ$X;w*op&boXRPR2@}4*62nF2FHS*akL~@e zQ*xAcc~@S6tihtIoaM1@(`8|^xGd@ZE0cGnPa3j+2ygl9>qVGHyz+X_M~JF)Q@y8( z+b!8E#vkumdo+?)e!_G4;W4slxTV&kqJsGSQ>LD{nbAIXSJ#a5Ela$rynT;c4LycM z@6LU#8@Yt<{5yJES#4k&?rm?D3HCbjzWG(&)Uq9pD}5IjnoNd{&p-MJYJTf_S&{Fd z5eN65_2}_Jqh(;jqi2Jj?u_^0h8tIu7rkouL#HL9^E&OSY;tn`ytlc}Z*^H4|3=;p z_k91b%QxerKP-Ot>eJ#g{g2*gB+CghzaP~`m5^bdJEqaB6Ge+&FJ?E+$-%s6TtM7> zck^_nY{&UhCpt{-7F2`=94ojKwnPF>z=&BFY};}hF`Tw_^3w5StNS4$$) zAZ<1k8|G{sU`RHm=Cm$68Na6Um4PF+s04iv7H)N~Ujk{Jiz<$1Jxb5sGyP$HEx{@& zbM4s?8MMc&UEW6(dsno{n4?oSTG#Ct1N#tnp>_7`h7X6w5wV9+(q4c?=TKt zayF8cU9sb6t$jYiwHME&ClK}H_HnD-Ei{)qVcqR-4xh^05^cK8et1>SJFW1KE^VxY z#d{y+O?W>yWkH+ya~gh}H|qPZhI`}S`3;v0KvsNLW^Xl)^Emd@{JhG+U7kwt`)7=D z@zd>hdR;m+HZyDAl{cxOhW(dJ(Z!FHiY}yUL@^^KeduS{Kc`9Q)V`~Ez|Q+8)%!+A zR9f$|>l^qfdFM}Ns!z^Kh2=3JwkHg0$@PV$P^UZ?L)h4zK6aRuS2|)KNG3Ft0aR`ZRk|@oPR0 zPy24tJ{+@g)*#s^!;D~XXe4=;;^w&zlci~{3()WjXyDW^_l{}084WJn&xBF&>+Z%( z>HpyPs}rS7tNBr%mw6(mY2apA60Ls!*+1+i>09Qk+xY$KAzxN4Es#7VeK1U()2kI$ zb#-jvnL`7(D?@juhsFZhQ4ukdZSP`wBV7w>fzlPUk~@a|JHz8b9Kvx?A^n6_;zQmyV&VY zu2=KE@1TqceKQ_^u6J(sup^)Q)LF{zvZt3U_Edvq6=UnMX^pm&g!r@HpH|Q4Fkypw z#1ud6xcl~_ZarG9++zJMlYiAwUhzKW;)^A_+h&DZRyqCKsoG=hZ;+jZ>!+7Y!ZL|_ zdjI(J+PJ}J*6d5$N68D{nJ0gq#oKjv&#ZMxcS`3RoUJNJ>W?PBLS1fR>5@~ctqc6_ z)+NF3@a>!X9i4C6(ETpc*mXpuqCunCi@NMizBwXKn^HOZ@kTPU@$~MkOj&DLY|PM2 zk6V_t(=2DJ`p`RK=5TVFHOrl$eC7=78n8}1QF?h{v!b}S)ItM4i?*rx_MRs)x~a%2NluKUfdpvS#;kn*NZh3lDnm3Bw z15Pcvd~fx@`<(}LuuR@mc6jNK+yxYK@^?z#mrWU7DC5cj#bQTX!!ZddA7WY^=sWmo zyEBTC(bqP#!rv%<6yvJ@C_^o;%D(;L)M2xy!W`Z64#-zlt=u?w-P1lL=eIugba-yt zA42w4>?lvuP*vZZ(EWPzt@eiN14a)M{U}3Ui+AXz+J39q&TzT|s%#+%)5>6aBczPABYi`SunGuu2k6(4h^PVW1d{w1#4)6J9Hu5JH)sPtK+^5TN)aTH|1 z&d)xYerCf-Et*c3UYK=%bczzdD%Z4C3^9{RpEvfOs@SnC(kF9d zTV~>sYq$%^qRTOv?AKFXu}h~Md0k(Jn|o&53;ElW;zo0+cGR1uiOlMV+4I@izwi0@ zm^UX=OP;n}KVe2&@zE2$@l$u)Us%RxW!p2m%0o5Ck`eba~0;EBjW%`JL~j` zCn!w^bPL-?`B*k=i!tyO}D%~b}i-jkp}Y!&v?qyjYCjErzVa8 zKvC+~ttZ!gIg44j;_fprlbv#D^s%iuqY11|iujn5XldT`maTK<85?mqxqaj7ctyu` zZ*J5(GNRp^P5Mi#+6XY2Xn@LzDMb4DC0Z)hcsM3!FWzyx!$gIh5A# zN?K-JT|j+MIbI8Nc5J;wlLS; zUuLm9egYA;|3`dV`3t`NYG9qtCw|otB-!qgk%k^sZ$7OyKf$#ty3uiX+Ez*yif}+D)miD36J8GGaQaG~YTQ;0~aDH&s z)I-mfZ|ibC(cBodEx#g}nAq_%y5NWMi_;qRc1@r+`%L}-xSs0vZjZFOGH4-v7=HLM z>V}g&oackH>6eomf4OQJoObl}jHz`R4au5Qmj2Da{-Ag zxIMoOx0<56>fu&X>GUsDI>mi6shUcs-zuGI%1i#_@{*r?s;0c;xAKyjoc_O%)Bn|{ zYI6F&UgNUh0U{R`QvxW7BRHGWzs78OM{tubOMm7L= F000-6ulE1| literal 0 HcmV?d00001 diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 46794406e..fcb5d1ff1 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -3,31 +3,53 @@ use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use indoc::indoc; use ostree_ext::container::{Config, ImageReference, Transport}; -use ostree_ext::gio; -use ostree_ext::prelude::*; +use ostree_ext::tar::TarImportOptions; +use ostree_ext::{gio, glib}; use sh_inline::bash; -use std::convert::TryFrom; +use std::convert::TryInto; use std::{io::Write, process::Command}; +const OSTREE_GPG_HOME: &[u8] = include_bytes!("fixtures/ostree-gpg-test-home.tar.gz"); +const TEST_GPG_KEYID_1: &str = "7FCA23D8472CDAFA"; +#[allow(dead_code)] +const TEST_GPG_KEYFPR_1: &str = "5E65DE75AB1C501862D476347FCA23D8472CDAFA"; const EXAMPLEOS_V0: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); const EXAMPLEOS_V1: &[u8] = include_bytes!("fixtures/exampleos-v1.tar.zst"); const TESTREF: &str = "exampleos/x86_64/stable"; const EXAMPLEOS_CONTENT_CHECKSUM: &str = "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; +fn assert_err_contains(r: Result, s: impl AsRef) { + let s = s.as_ref(); + let msg = r.err().unwrap().to_string(); + if !msg.contains(s) { + panic!(r#"Error message "{}" did not contain "{}""#, msg, s); + } +} + #[context("Generating test repo")] fn generate_test_repo(dir: &Utf8Path) -> Result { let src_tarpath = &dir.join("exampleos.tar.zst"); std::fs::write(src_tarpath, EXAMPLEOS_V0)?; + let gpghome = dir.join("gpghome"); + { + let dec = flate2::read::GzDecoder::new(OSTREE_GPG_HOME); + let mut a = tar::Archive::new(dec); + a.unpack(&gpghome)?; + }; + bash!( indoc! {" cd {dir} ostree --repo=repo init --mode=archive - ostree --repo=repo commit -b {testref} --bootable --add-metadata-string=version=42.0 --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst + ostree --repo=repo commit -b {testref} --bootable --add-metadata-string=version=42.0 --gpg-homedir={gpghome} --gpg-sign={keyid} \ + --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst ostree --repo=repo show {testref} "}, testref = TESTREF, + gpghome = gpghome.as_str(), + keyid = TEST_GPG_KEYID_1, dir = dir.as_str() )?; std::fs::remove_file(src_tarpath)?; @@ -71,50 +93,129 @@ fn generate_test_tarball(dir: &Utf8Path) -> Result { Ok(destpath) } -fn test_tar_import_prep() -> Result<(tempfile::TempDir, ostree::Repo)> { - let tempdir = tempfile::tempdir_in("/var/tmp")?; - let path = Utf8Path::from_path(tempdir.path()).unwrap(); - let destdir = &path.join("dest"); - std::fs::create_dir(destdir)?; - let destrepodir = &destdir.join("repo"); - let destrepo = ostree::Repo::new_for_path(destrepodir); - destrepo.create(ostree::RepoMode::BareUser, gio::NONE_CANCELLABLE)?; - Ok((tempdir, destrepo)) +struct Fixture { + // Just holds a reference + _tempdir: tempfile::TempDir, + path: Utf8PathBuf, + destrepo: ostree::Repo, + destrepo_path: Utf8PathBuf, +} + +impl Fixture { + fn new() -> Result { + let _tempdir = tempfile::tempdir_in("/var/tmp")?; + let path: &Utf8Path = _tempdir.path().try_into().unwrap(); + let path = path.to_path_buf(); + let destdir = &path.join("dest"); + std::fs::create_dir(destdir)?; + let destrepo_path = destdir.join("repo"); + let destrepo = ostree::Repo::new_for_path(&destrepo_path); + destrepo.create(ostree::RepoMode::BareUser, gio::NONE_CANCELLABLE)?; + Ok(Self { + _tempdir, + path, + destrepo, + destrepo_path, + }) + } } #[tokio::test] async fn test_tar_import_empty() -> Result<()> { - let (_tempdir, destrepo) = test_tar_import_prep()?; - let r = ostree_ext::tar::import_tar(&destrepo, tokio::io::empty()).await; + let fixture = Fixture::new()?; + let destrepo = ostree::Repo::new_for_path(&fixture.destrepo_path); + destrepo.open(gio::NONE_CANCELLABLE)?; + let r = ostree_ext::tar::import_tar(&destrepo, tokio::io::empty(), None).await; assert!(r.is_err()); Ok(()) } +#[tokio::test] +async fn test_tar_import_signed() -> Result<()> { + let fixture = Fixture::new()?; + let srcdir = &fixture.path.join("src"); + std::fs::create_dir(srcdir)?; + + let test_tar = &generate_test_tarball(srcdir)?; + + // Verify we fail with an unknown remote. + let src_tar = tokio::fs::File::open(test_tar).await?; + let r = ostree_ext::tar::import_tar( + &fixture.destrepo, + src_tar, + Some(TarImportOptions { + remote: Some("nosuchremote".to_string()), + }), + ) + .await; + assert_err_contains(r, r#"Remote "nosuchremote" not found"#); + + // Test a remote, but without a key + let opts = glib::VariantDict::new(None); + opts.insert("gpg-verify", &true); + opts.insert("custom-backend", &"ostree-rs-ext"); + fixture + .destrepo + .remote_add("myremote", None, Some(&opts.end()), gio::NONE_CANCELLABLE)?; + let src_tar = tokio::fs::File::open(test_tar).await?; + let r = ostree_ext::tar::import_tar( + &fixture.destrepo, + src_tar, + Some(TarImportOptions { + remote: Some("myremote".to_string()), + }), + ) + .await; + assert_err_contains(r, r#"Can't check signature: public key not found"#); + + // And signed correctly + bash!( + "ostree --repo={repo} remote gpg-import --stdin myremote < {p}/gpghome/key1.asc", + repo = fixture.destrepo_path.as_str(), + p = srcdir.as_str() + )?; + let src_tar = tokio::fs::File::open(test_tar).await?; + let imported = ostree_ext::tar::import_tar( + &fixture.destrepo, + src_tar, + Some(TarImportOptions { + remote: Some("myremote".to_string()), + }), + ) + .await?; + let (commitdata, _) = fixture.destrepo.load_commit(&imported)?; + assert_eq!( + EXAMPLEOS_CONTENT_CHECKSUM, + ostree::commit_get_content_checksum(&commitdata) + .unwrap() + .as_str() + ); + Ok(()) +} + #[tokio::test] async fn test_tar_import_export() -> Result<()> { - let (tempdir, destrepo) = test_tar_import_prep()?; - let path = Utf8Path::from_path(tempdir.path()).unwrap(); - let srcdir = &path.join("src"); + let fixture = Fixture::new()?; + let srcdir = &fixture.path.join("src"); std::fs::create_dir(srcdir)?; let src_tar = tokio::fs::File::open(&generate_test_tarball(srcdir)?).await?; - let imported_commit: String = ostree_ext::tar::import_tar(&destrepo, src_tar).await?; - let (commitdata, _) = destrepo.load_commit(&imported_commit)?; + let imported_commit: String = + ostree_ext::tar::import_tar(&fixture.destrepo, src_tar, None).await?; + let (commitdata, _) = fixture.destrepo.load_commit(&imported_commit)?; assert_eq!( EXAMPLEOS_CONTENT_CHECKSUM, ostree::commit_get_content_checksum(&commitdata) .unwrap() .as_str() ); - // So awesome. Look how many ways dealing with filenames can fail! - let destrepodir = Utf8PathBuf::try_from(destrepo.path().unwrap().path().unwrap()).unwrap(); bash!( r#" ostree --repo={destrepodir} ls -R {imported_commit} val=$(ostree --repo={destrepodir} show --print-detached-metadata-key=my-detached-key {imported_commit}) test "${{val}}" = "'my-detached-value'" "#, - destrepodir = destrepodir.as_str(), + destrepodir = fixture.destrepo_path.as_str(), imported_commit = imported_commit.as_str() )?; Ok(()) From 751c8961031fd4bff9787a87c4ea6f4601ba0340 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 12 Sep 2021 16:32:03 -0400 Subject: [PATCH 092/775] lib/container/import: Add option to verify signatures This is a thin wrapper for the tar option to verify a signature. --- lib/src/cli.rs | 8 ++++-- lib/src/container/import.rs | 21 ++++++++++++--- lib/tests/it/main.rs | 53 +++++++++++++++++++++++++++++-------- 3 files changed, 65 insertions(+), 17 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 1a93dbaee..05eaafd7d 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -12,7 +12,7 @@ use std::convert::TryInto; use std::ffi::OsString; use structopt::StructOpt; -use crate::container::Config; +use crate::container::{Config, ImportOptions}; #[derive(Debug, StructOpt)] struct BuildOpts { @@ -166,7 +166,11 @@ async fn container_import(repo: &str, imgref: &str, write_ref: Option<&str>) -> pb.set_style(style.template("{spinner} {prefix} {msg}")); pb.enable_steady_tick(200); pb.set_message("Downloading..."); - let import = crate::container::import(repo, &imgref, Some(tx_progress)); + let opts = ImportOptions { + progress: Some(tx_progress), + ..Default::default() + }; + let import = crate::container::import(repo, &imgref, Some(opts)); tokio::pin!(import); tokio::pin!(rx_progress); let import = loop { diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index c015711a6..562b81ce7 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -286,21 +286,34 @@ fn find_layer_blobid(manifest: &oci::Manifest) -> Result { } } +/// Configuration for container fetches. +#[derive(Debug, Default)] +pub struct ImportOptions { + /// Name of the remote to use for signature verification. + pub remote: Option, + /// Channel which will receive progress updates + pub progress: Option>, +} + /// Fetch a container image and import its embedded OSTree commit. #[context("Importing {}", imgref)] -#[instrument(skip(repo, progress))] +#[instrument(skip(repo, options))] pub async fn import( repo: &ostree::Repo, imgref: &ImageReference, - progress: Option>, + options: Option, ) -> Result { + let options = options.unwrap_or_default(); let (manifest, image_digest) = fetch_manifest(imgref).await?; let manifest = &manifest; let layerid = find_layer_blobid(manifest)?; event!(Level::DEBUG, "target blob: {}", layerid); - let (blob, worker) = fetch_layer(imgref, layerid.as_str(), progress).await?; + let (blob, worker) = fetch_layer(imgref, layerid.as_str(), options.progress).await?; let blob = tokio::io::BufReader::new(blob); - let import = crate::tar::import_tar(repo, blob, None); + let taropts = crate::tar::TarImportOptions { + remote: options.remote, + }; + let import = crate::tar::import_tar(repo, blob, Some(taropts)); let (ostree_commit, worker) = tokio::join!(import, worker); let ostree_commit = ostree_commit?; let _: () = worker?; diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index fcb5d1ff1..4a0dfaa27 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -2,7 +2,7 @@ use anyhow::{Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use indoc::indoc; -use ostree_ext::container::{Config, ImageReference, Transport}; +use ostree_ext::container::{Config, ImageReference, ImportOptions, Transport}; use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; use sh_inline::bash; @@ -21,7 +21,7 @@ const EXAMPLEOS_CONTENT_CHECKSUM: &str = fn assert_err_contains(r: Result, s: impl AsRef) { let s = s.as_ref(); - let msg = r.err().unwrap().to_string(); + let msg = format!("{:#}", r.err().unwrap()); if !msg.contains(s) { panic!(r#"Error message "{}" did not contain "{}""#, msg, s); } @@ -232,13 +232,9 @@ fn skopeo_inspect(imgref: &str) -> Result { #[tokio::test] async fn test_container_import_export() -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; - - let tempdir = tempfile::tempdir_in("/var/tmp")?; - let path = Utf8Path::from_path(tempdir.path()).unwrap(); - let srcdir = &path.join("src"); + let fixture = Fixture::new()?; + let srcdir = &fixture.path.join("src"); std::fs::create_dir(srcdir)?; - let destdir = &path.join("dest"); - std::fs::create_dir(destdir)?; let srcrepopath = &generate_test_repo(srcdir)?; let srcrepo = &ostree::Repo::new_for_path(srcrepopath); srcrepo.open(cancellable)?; @@ -246,8 +242,6 @@ async fn test_container_import_export() -> Result<()> { .resolve_rev(TESTREF, false) .context("Failed to resolve ref")? .unwrap(); - let destrepo = &ostree::Repo::new_for_path(destdir); - destrepo.create(ostree::RepoMode::BareUser, cancellable)?; let srcoci_path = &srcdir.join("oci"); let srcoci = ImageReference { @@ -277,10 +271,47 @@ async fn test_container_import_export() -> Result<()> { let inspect = ostree_ext::container::fetch_manifest_info(&srcoci).await?; assert_eq!(inspect.manifest_digest, digest); - let import = ostree_ext::container::import(destrepo, &srcoci, None) + // No remote matching + let opts = ImportOptions { + remote: Some("unknownremote".to_string()), + ..Default::default() + }; + let r = ostree_ext::container::import(&fixture.destrepo, &srcoci, Some(opts)) + .await + .context("importing"); + assert_err_contains(r, r#"Remote "unknownremote" not found"#); + + // Test with a signature + let opts = glib::VariantDict::new(None); + opts.insert("gpg-verify", &true); + opts.insert("custom-backend", &"ostree-rs-ext"); + fixture + .destrepo + .remote_add("myremote", None, Some(&opts.end()), gio::NONE_CANCELLABLE)?; + bash!( + "ostree --repo={repo} remote gpg-import --stdin myremote < {p}/gpghome/key1.asc", + repo = fixture.destrepo_path.as_str(), + p = srcdir.as_str() + )?; + + let opts = ImportOptions { + remote: Some("myremote".to_string()), + ..Default::default() + }; + + let import = ostree_ext::container::import(&fixture.destrepo, &srcoci, Some(opts)) .await .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); + + // Test without signature verification + // Create a new repo + let fixture = Fixture::new()?; + let import = ostree_ext::container::import(&fixture.destrepo, &srcoci, None) + .await + .context("importing")?; + assert_eq!(import.ostree_commit, testrev.as_str()); + Ok(()) } From a11d17d23c6671e5a2d9d979edc26da2ca874b8b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 13 Sep 2021 11:03:15 -0400 Subject: [PATCH 093/775] lib/container: Introduce OstreeImageReference with SignatureSource I'm trying to put my foot down and say that *by default* booting and updating an operating system should require a cryptographic signature from the vendor as implemented by e.g. ostree (GPG or "signapi" which can do simple ed25519+libsodium signatures). Other signature mechanisms are possible and the containers/image ecosystem supports signatures, but they are not widely deployed. In order to implement ostree-based GPG/signapi verification, we need the configuration for an ostree remote. Hence, we support `ostree-remote-image:$remotename:$containerref`. For example, `ostree-remote-image:fedora:registry:quay.io/coreos/fedora-coreos:stable`. Having a canonical stringified representation of this will allow using it on command lines and in user interfaces. For example: ``` $ ostree remote add fedora --set=gpgkeypath=/etc/pki/rpm-gpg --custom-backend=ostree-ext $ ostree-ext-cli rebase ostree-remote-image:fedora:quay.io/coreos/fedora-coreos:stable ``` (`ostree-ext-cli rebase` doesn't exist, but maybe it will later) In addition, a signature policy of `ostree-image-signed` expresses that we won't do signature verification via ostree, but we *will* try to ensure that the containers/image (skopeo) path uses some signature mechanism. Finally we offer `ostree-unverified-image` to just boot a container without caring about signatures. --- lib/src/container/export.rs | 6 +- lib/src/container/import.rs | 31 +++++--- lib/src/container/mod.rs | 143 +++++++++++++++++++++++++++++++++++- lib/src/container/skopeo.rs | 87 ++++++++++++++++++++++ lib/tests/it/main.rs | 37 ++++++---- 5 files changed, 276 insertions(+), 28 deletions(-) diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index 6fd098eb9..01ac3b015 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -122,9 +122,13 @@ async fn build_impl( return Err(anyhow::anyhow!("skopeo failed: {}\n", stderr)); } } + let imgref = OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref: dest.to_owned(), + }; // FIXME - it's obviously broken to do this push -> inspect cycle because of the possibility // of a race condition, but we need to patch skopeo to have the equivalent of `podman push --digestfile`. - let info = super::import::fetch_manifest_info(dest).await?; + let info = super::import::fetch_manifest_info(&imgref).await?; Ok(dest.with_digest(info.manifest_digest.as_str())) } diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 562b81ce7..ca2e4d100 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -62,7 +62,9 @@ impl AsyncRead for ProgressReader { /// Download the manifest for a target image. #[context("Fetching manifest")] -pub async fn fetch_manifest_info(imgref: &ImageReference) -> Result { +pub async fn fetch_manifest_info( + imgref: &OstreeImageReference, +) -> Result { let (_, manifest_digest) = fetch_manifest(imgref).await?; // Sadly this seems to be lost when pushing to e.g. quay.io, which means we can't use it. // let commit = manifest @@ -76,9 +78,11 @@ pub async fn fetch_manifest_info(imgref: &ImageReference) -> Result Result<(oci::Manifest, String)> { +async fn fetch_manifest(imgref: &OstreeImageReference) -> Result<(oci::Manifest, String)> { let mut proc = skopeo::new_cmd(); - proc.args(&["inspect", "--raw"]).arg(imgref.to_string()); + let imgref_base = &imgref.imgref; + proc.args(&["inspect", "--raw"]) + .arg(imgref_base.to_string()); proc.stdout(Stdio::piped()); let proc = skopeo::spawn(proc)?.wait_with_output().await?; if !proc.status.success() { @@ -200,7 +204,7 @@ fn find_layer_tar_sync( /// Fetch a remote docker/OCI image and extract a specific uncompressed layer. async fn fetch_layer<'s>( - imgref: &ImageReference, + imgref: &OstreeImageReference, blobid: &str, progress: Option>, ) -> Result<( @@ -220,7 +224,7 @@ async fn fetch_layer<'s>( )?; tracing::trace!("skopeo pull starting to {}", fifo); proc.arg("copy") - .arg(imgref.to_string()) + .arg(imgref.imgref.to_string()) .arg(format!("docker-archive:{}", fifo)); let proc = skopeo::spawn(proc)?; let fifo_reader = ProgressReader { @@ -289,8 +293,6 @@ fn find_layer_blobid(manifest: &oci::Manifest) -> Result { /// Configuration for container fetches. #[derive(Debug, Default)] pub struct ImportOptions { - /// Name of the remote to use for signature verification. - pub remote: Option, /// Channel which will receive progress updates pub progress: Option>, } @@ -300,9 +302,14 @@ pub struct ImportOptions { #[instrument(skip(repo, options))] pub async fn import( repo: &ostree::Repo, - imgref: &ImageReference, + imgref: &OstreeImageReference, options: Option, ) -> Result { + if matches!(imgref.sigverify, SignatureSource::ContainerPolicy) + && skopeo::container_policy_is_default_insecure()? + { + return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); + } let options = options.unwrap_or_default(); let (manifest, image_digest) = fetch_manifest(imgref).await?; let manifest = &manifest; @@ -310,9 +317,11 @@ pub async fn import( event!(Level::DEBUG, "target blob: {}", layerid); let (blob, worker) = fetch_layer(imgref, layerid.as_str(), options.progress).await?; let blob = tokio::io::BufReader::new(blob); - let taropts = crate::tar::TarImportOptions { - remote: options.remote, - }; + let mut taropts: crate::tar::TarImportOptions = Default::default(); + match &imgref.sigverify { + SignatureSource::OstreeRemote(remote) => taropts.remote = Some(remote.clone()), + SignatureSource::ContainerPolicy | SignatureSource::ContainerPolicyAllowInsecure => {} + } let import = crate::tar::import_tar(repo, blob, Some(taropts)); let (ostree_commit, worker) = tokio::join!(import, worker); let ostree_commit = ostree_commit?; diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 2f9a6e846..f7dc7d753 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -40,7 +40,7 @@ pub enum Transport { /// Combination of a remote image reference and transport. /// /// For example, -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ImageReference { /// The storage and transport for the image pub transport: Transport, @@ -48,6 +48,29 @@ pub struct ImageReference { pub name: String, } +/// Policy for signature verification. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum SignatureSource { + /// Fetches will use the named ostree remote for signature verification of the ostree commit. + OstreeRemote(String), + /// Fetches will defer to the `containers-policy.json`, but we make a best effort to reject `default: insecureAcceptAnything` policy. + ContainerPolicy, + /// NOT RECOMMENDED. Fetches will defer to the `containers-policy.json` default which is usually `insecureAcceptAnything`. + ContainerPolicyAllowInsecure, +} + +/// Combination of an ostree remote (for signature verification) and an image reference. +/// +/// For example, myremote:docker://quay.io/somerepo/someimage.latest +#[derive(Debug, Clone)] +pub struct OstreeImageReference { + /// The ostree remote name. + /// This will be used for signature verification. + pub sigverify: SignatureSource, + /// The container image reference. + pub imgref: ImageReference, +} + impl ImageReference { /// Create a new `ImageReference` that refers to a specific digest. /// @@ -73,6 +96,23 @@ impl ImageReference { } } +impl OstreeImageReference { + /// Create a new `OstreeImageReference` that refers to a specific digest. + /// + /// ```rust + /// use std::convert::TryInto; + /// let r: ostree_ext::container::OstreeImageReference = "ostree-remote-image:myremote:docker://quay.io/exampleos/exampleos:latest".try_into().unwrap(); + /// let n = r.with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); + /// assert_eq!(n.imgref.name, "quay.io/exampleos/exampleos@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); + /// ``` + pub fn with_digest(&self, digest: &str) -> Self { + Self { + sigverify: self.sigverify.clone(), + imgref: self.imgref.with_digest(digest), + } + } +} + impl TryFrom<&str> for Transport { type Error = anyhow::Error; @@ -112,6 +152,52 @@ impl TryFrom<&str> for ImageReference { } } +impl TryFrom<&str> for SignatureSource { + type Error = anyhow::Error; + + fn try_from(value: &str) -> Result { + match value { + "ostree-image-signed" => Ok(Self::ContainerPolicy), + "ostree-unverified-image" => Ok(Self::ContainerPolicyAllowInsecure), + o => match o.strip_prefix("ostree-remote-image:") { + Some(rest) => Ok(Self::OstreeRemote(rest.to_string())), + _ => Err(anyhow!("Invalid signature source: {}", o)), + }, + } + } +} + +impl TryFrom<&str> for OstreeImageReference { + type Error = anyhow::Error; + + fn try_from(value: &str) -> Result { + let mut parts = value.splitn(2, ':'); + // Safety: Split always returns at least one value. + let first = parts.next().unwrap(); + let mut second = parts + .next() + .ok_or_else(|| anyhow!("Missing ':' in {}", value))?; + let sigverify = match first { + "ostree-image-signed" => SignatureSource::ContainerPolicy, + "ostree-unverified-image" => SignatureSource::ContainerPolicyAllowInsecure, + "ostree-remote-image" => { + let mut subparts = second.splitn(2, ':'); + // Safety: Split always returns at least one value. + let remote = subparts.next().unwrap(); + second = subparts + .next() + .ok_or_else(|| anyhow!("Missing second ':' in {}", value))?; + SignatureSource::OstreeRemote(remote.to_string()) + } + o => { + return Err(anyhow!("Invalid signature source: {}", o)); + } + }; + let imgref = second.try_into()?; + Ok(Self { sigverify, imgref }) + } +} + impl std::fmt::Display for Transport { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let s = match self { @@ -131,6 +217,20 @@ impl std::fmt::Display for ImageReference { } } +impl std::fmt::Display for OstreeImageReference { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match &self.sigverify { + SignatureSource::OstreeRemote(r) => { + write!(f, "ostree-remote-image:{}:{}", r, self.imgref) + } + SignatureSource::ContainerPolicy => write!(f, "ostree-image-signed:{}", self.imgref), + SignatureSource::ContainerPolicyAllowInsecure => { + write!(f, "ostree-unverified-image:{}", self.imgref) + } + } + } +} + mod export; pub use export::*; mod import; @@ -180,4 +280,45 @@ mod tests { assert_eq!(ir.transport, Transport::OciDir); assert_eq!(ir.name, "somedir"); } + + #[test] + fn test_ostreeimagereference() { + let ir_s = "ostree-remote-image:myremote:registry:quay.io/exampleos/blah"; + let ir: OstreeImageReference = ir_s.try_into().unwrap(); + assert_eq!( + ir.sigverify, + SignatureSource::OstreeRemote("myremote".to_string()) + ); + assert_eq!(ir.imgref.transport, Transport::Registry); + assert_eq!(ir.imgref.name, "quay.io/exampleos/blah"); + assert_eq!( + ir.to_string(), + "ostree-remote-image:myremote:docker://quay.io/exampleos/blah" + ); + + let digested = ir + .with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); + assert_eq!(digested.imgref.name, "quay.io/exampleos/blah@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); + assert_eq!(digested.with_digest("sha256:52f562806109f5746be31ccf21f5569fd2ce8c32deb0d14987b440ed39e34e20").imgref.name, "quay.io/exampleos/blah@sha256:52f562806109f5746be31ccf21f5569fd2ce8c32deb0d14987b440ed39e34e20"); + + let ir_s = "ostree-image-signed:docker://quay.io/exampleos/blah"; + let ir: OstreeImageReference = ir_s.try_into().unwrap(); + assert_eq!(ir.sigverify, SignatureSource::ContainerPolicy); + assert_eq!(ir.imgref.transport, Transport::Registry); + assert_eq!(ir.imgref.name, "quay.io/exampleos/blah"); + assert_eq!( + ir.to_string(), + "ostree-image-signed:docker://quay.io/exampleos/blah" + ); + + let ir_s = "ostree-unverified-image:docker://quay.io/exampleos/blah"; + let ir: OstreeImageReference = ir_s.try_into().unwrap(); + assert_eq!(ir.sigverify, SignatureSource::ContainerPolicyAllowInsecure); + assert_eq!(ir.imgref.transport, Transport::Registry); + assert_eq!(ir.imgref.name, "quay.io/exampleos/blah"); + assert_eq!( + ir.to_string(), + "ostree-unverified-image:docker://quay.io/exampleos/blah" + ); + } } diff --git a/lib/src/container/skopeo.rs b/lib/src/container/skopeo.rs index 6f5d91400..50d02ea7e 100644 --- a/lib/src/container/skopeo.rs +++ b/lib/src/container/skopeo.rs @@ -2,9 +2,42 @@ use super::Result; use anyhow::Context; +use serde::Deserialize; use std::process::Stdio; use tokio::process::Command; +const POLICY_PATH: &str = "/etc/containers/policy.json"; +const INSECURE_ACCEPT_ANYTHING: &str = "insecureAcceptAnything"; + +#[derive(Deserialize)] +struct PolicyEntry { + #[serde(rename = "type")] + ty: String, +} +#[derive(Deserialize)] +struct ContainerPolicy { + default: Option>, +} + +impl ContainerPolicy { + fn is_default_insecure(&self) -> bool { + if let Some(default) = self.default.as_deref() { + match default.split_first() { + Some((v, &[])) => return v.ty == INSECURE_ACCEPT_ANYTHING, + _ => false, + } + } else { + false + } + } +} + +pub(crate) fn container_policy_is_default_insecure() -> Result { + let r = std::io::BufReader::new(std::fs::File::open(POLICY_PATH)?); + let policy: ContainerPolicy = serde_json::from_reader(r)?; + Ok(policy.is_default_insecure()) +} + /// Create a Command builder for skopeo. pub(crate) fn new_cmd() -> tokio::process::Command { let mut cmd = Command::new("skopeo"); @@ -18,3 +51,57 @@ pub(crate) fn spawn(mut cmd: Command) -> Result { let cmd = cmd.stdin(Stdio::null()).stderr(Stdio::piped()); cmd.spawn().context("Failed to exec skopeo") } + +#[cfg(test)] +mod tests { + use super::*; + + // Default value as of the Fedora 34 containers-common-1-21.fc34.noarch package. + const DEFAULT_POLICY: &str = indoc::indoc! {r#" + { + "default": [ + { + "type": "insecureAcceptAnything" + } + ], + "transports": + { + "docker-daemon": + { + "": [{"type":"insecureAcceptAnything"}] + } + } + } + "#}; + + // Stripped down copy from the manual. + const REASONABLY_LOCKED_DOWN: &str = indoc::indoc! { r#" + { + "default": [{"type": "reject"}], + "transports": { + "dir": { + "": [{"type": "insecureAcceptAnything"}] + }, + "atomic": { + "hostname:5000/myns/official": [ + { + "type": "signedBy", + "keyType": "GPGKeys", + "keyPath": "/path/to/official-pubkey.gpg" + } + ] + } + } + } + "#}; + + #[test] + fn policy_is_insecure() { + let p: ContainerPolicy = serde_json::from_str(DEFAULT_POLICY).unwrap(); + assert!(p.is_default_insecure()); + for &v in &["{}", REASONABLY_LOCKED_DOWN] { + let p: ContainerPolicy = serde_json::from_str(v).unwrap(); + assert!(!p.is_default_insecure()); + } + } +} diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 4a0dfaa27..9c4038ab0 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -2,7 +2,9 @@ use anyhow::{Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use indoc::indoc; -use ostree_ext::container::{Config, ImageReference, ImportOptions, Transport}; +use ostree_ext::container::{ + Config, ImageReference, OstreeImageReference, SignatureSource, Transport, +}; use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; use sh_inline::bash; @@ -244,7 +246,7 @@ async fn test_container_import_export() -> Result<()> { .unwrap(); let srcoci_path = &srcdir.join("oci"); - let srcoci = ImageReference { + let srcoci_imgref = ImageReference { transport: Transport::OciDir, name: srcoci_path.as_str().to_string(), }; @@ -257,26 +259,31 @@ async fn test_container_import_export() -> Result<()> { ), cmd: Some(vec!["/bin/bash".to_string()]), }; - let pushed = ostree_ext::container::export(srcrepo, TESTREF, &config, &srcoci) + let pushed = ostree_ext::container::export(srcrepo, TESTREF, &config, &srcoci_imgref) .await .context("exporting")?; assert!(srcoci_path.exists()); let digest = pushed.name.rsplitn(2, '@').next().unwrap(); - let inspect = skopeo_inspect(&srcoci.to_string())?; + let inspect = skopeo_inspect(&srcoci_imgref.to_string())?; assert!(inspect.contains(r#""version": "42.0""#)); assert!(inspect.contains(r#""foo": "bar""#)); assert!(inspect.contains(r#""test": "value""#)); - let inspect = ostree_ext::container::fetch_manifest_info(&srcoci).await?; + let srcoci_unverified = OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref: srcoci_imgref.clone(), + }; + + let inspect = ostree_ext::container::fetch_manifest_info(&srcoci_unverified).await?; assert_eq!(inspect.manifest_digest, digest); // No remote matching - let opts = ImportOptions { - remote: Some("unknownremote".to_string()), - ..Default::default() + let srcoci_unknownremote = OstreeImageReference { + sigverify: SignatureSource::OstreeRemote("unknownremote".to_string()), + imgref: srcoci_imgref.clone(), }; - let r = ostree_ext::container::import(&fixture.destrepo, &srcoci, Some(opts)) + let r = ostree_ext::container::import(&fixture.destrepo, &srcoci_unknownremote, None) .await .context("importing"); assert_err_contains(r, r#"Remote "unknownremote" not found"#); @@ -294,12 +301,12 @@ async fn test_container_import_export() -> Result<()> { p = srcdir.as_str() )?; - let opts = ImportOptions { - remote: Some("myremote".to_string()), - ..Default::default() + // No remote matching + let srcoci_verified = OstreeImageReference { + sigverify: SignatureSource::OstreeRemote("myremote".to_string()), + imgref: srcoci_imgref.clone(), }; - - let import = ostree_ext::container::import(&fixture.destrepo, &srcoci, Some(opts)) + let import = ostree_ext::container::import(&fixture.destrepo, &srcoci_verified, None) .await .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); @@ -307,7 +314,7 @@ async fn test_container_import_export() -> Result<()> { // Test without signature verification // Create a new repo let fixture = Fixture::new()?; - let import = ostree_ext::container::import(&fixture.destrepo, &srcoci, None) + let import = ostree_ext::container::import(&fixture.destrepo, &srcoci_unverified, None) .await .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); From d561a2a6d4eac973761a0330d8f3a0ab00f12461 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 14 Sep 2021 19:51:03 -0400 Subject: [PATCH 094/775] container: Add support for `ostree-remote-registry` This is a convenient shorthand for the very common case of fetching from a registry (i.e. `docker://`). --- lib/src/container/mod.rs | 63 ++++++++++++++++++++++++++++------------ 1 file changed, 44 insertions(+), 19 deletions(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index f7dc7d753..3efcd3dda 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -9,7 +9,9 @@ #![deny(unsafe_code)] use anyhow::anyhow; +use std::borrow::Cow; use std::convert::{TryFrom, TryInto}; +use std::ops::Deref; /// The label injected into a container image that contains the ostree commit SHA-256. pub const OSTREE_COMMIT_LABEL: &str = "ostree.commit"; @@ -174,26 +176,43 @@ impl TryFrom<&str> for OstreeImageReference { let mut parts = value.splitn(2, ':'); // Safety: Split always returns at least one value. let first = parts.next().unwrap(); - let mut second = parts + let second = parts .next() .ok_or_else(|| anyhow!("Missing ':' in {}", value))?; - let sigverify = match first { - "ostree-image-signed" => SignatureSource::ContainerPolicy, - "ostree-unverified-image" => SignatureSource::ContainerPolicyAllowInsecure, - "ostree-remote-image" => { + let (sigverify, rest) = match first { + "ostree-image-signed" => (SignatureSource::ContainerPolicy, Cow::Borrowed(second)), + "ostree-unverified-image" => ( + SignatureSource::ContainerPolicyAllowInsecure, + Cow::Borrowed(second), + ), + "ostree-remote-registry" => { let mut subparts = second.splitn(2, ':'); // Safety: Split always returns at least one value. let remote = subparts.next().unwrap(); - second = subparts + let rest = subparts .next() .ok_or_else(|| anyhow!("Missing second ':' in {}", value))?; - SignatureSource::OstreeRemote(remote.to_string()) + ( + SignatureSource::OstreeRemote(remote.to_string()), + Cow::Owned(format!("registry:{}", rest)), + ) + } + "ostree-remote-image" => { + let mut subparts = second.splitn(2, ':'); + // Safety: Split always returns at least one value. + let remote = subparts.next().unwrap(); + let second = Cow::Borrowed( + subparts + .next() + .ok_or_else(|| anyhow!("Missing second ':' in {}", value))?, + ); + (SignatureSource::OstreeRemote(remote.to_string()), second) } o => { return Err(anyhow!("Invalid signature source: {}", o)); } }; - let imgref = second.try_into()?; + let imgref = rest.deref().try_into()?; Ok(Self { sigverify, imgref }) } } @@ -283,19 +302,25 @@ mod tests { #[test] fn test_ostreeimagereference() { + // Test both long form `ostree-remote-image:$myremote:registry` and the + // shorthand `ostree-remote-registry:$myremote`. let ir_s = "ostree-remote-image:myremote:registry:quay.io/exampleos/blah"; - let ir: OstreeImageReference = ir_s.try_into().unwrap(); - assert_eq!( - ir.sigverify, - SignatureSource::OstreeRemote("myremote".to_string()) - ); - assert_eq!(ir.imgref.transport, Transport::Registry); - assert_eq!(ir.imgref.name, "quay.io/exampleos/blah"); - assert_eq!( - ir.to_string(), - "ostree-remote-image:myremote:docker://quay.io/exampleos/blah" - ); + let ir_registry = "ostree-remote-registry:myremote:quay.io/exampleos/blah"; + for &ir_s in &[ir_s, ir_registry] { + let ir: OstreeImageReference = ir_s.try_into().unwrap(); + assert_eq!( + ir.sigverify, + SignatureSource::OstreeRemote("myremote".to_string()) + ); + assert_eq!(ir.imgref.transport, Transport::Registry); + assert_eq!(ir.imgref.name, "quay.io/exampleos/blah"); + assert_eq!( + ir.to_string(), + "ostree-remote-image:myremote:docker://quay.io/exampleos/blah" + ); + } + let ir: OstreeImageReference = ir_s.try_into().unwrap(); let digested = ir .with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); assert_eq!(digested.imgref.name, "quay.io/exampleos/blah@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); From 177b0cc5527ffe33a3eff6ff47a2e5fed08bad5d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 15 Sep 2021 12:49:32 -0400 Subject: [PATCH 095/775] lib/src/container: Add some more comments From code review. --- lib/src/container/mod.rs | 1 + lib/src/container/skopeo.rs | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 3efcd3dda..371caa783 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -185,6 +185,7 @@ impl TryFrom<&str> for OstreeImageReference { SignatureSource::ContainerPolicyAllowInsecure, Cow::Borrowed(second), ), + // This is a shorthand for ostree-remote-image with registry: "ostree-remote-registry" => { let mut subparts = second.splitn(2, ':'); // Safety: Split always returns at least one value. diff --git a/lib/src/container/skopeo.rs b/lib/src/container/skopeo.rs index 50d02ea7e..65744cf06 100644 --- a/lib/src/container/skopeo.rs +++ b/lib/src/container/skopeo.rs @@ -6,6 +6,10 @@ use serde::Deserialize; use std::process::Stdio; use tokio::process::Command; +// See `man containers-policy.json` and +// https://github.com/containers/image/blob/main/signature/policy_types.go +// Ideally we add something like `skopeo pull --disallow-insecure-accept-anything` +// but for now we parse the policy. const POLICY_PATH: &str = "/etc/containers/policy.json"; const INSECURE_ACCEPT_ANYTHING: &str = "insecureAcceptAnything"; From 7eaf014e206db3a4b657b3ce9523f869fccfaeb5 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 15 Sep 2021 12:54:09 -0400 Subject: [PATCH 096/775] lib/src/container: Add PartialEq+Eq for image references Since we now have aliases, it will be very useful for code to be able to compare them for equality so we can properly detect changes. --- lib/src/container/mod.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 371caa783..7338aca68 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -42,7 +42,7 @@ pub enum Transport { /// Combination of a remote image reference and transport. /// /// For example, -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct ImageReference { /// The storage and transport for the image pub transport: Transport, @@ -64,7 +64,7 @@ pub enum SignatureSource { /// Combination of an ostree remote (for signature verification) and an image reference. /// /// For example, myremote:docker://quay.io/somerepo/someimage.latest -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct OstreeImageReference { /// The ostree remote name. /// This will be used for signature verification. @@ -322,6 +322,9 @@ mod tests { } let ir: OstreeImageReference = ir_s.try_into().unwrap(); + // test our Eq implementation + assert_eq!(&ir, &OstreeImageReference::try_from(ir_registry).unwrap()); + let digested = ir .with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); assert_eq!(digested.imgref.name, "quay.io/exampleos/blah@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); From 8ec8200b160fe58d90e49acc509e08342427e130 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 15 Sep 2021 14:34:55 -0400 Subject: [PATCH 097/775] (cargo-release) start next development iteration 0.3.1-alpha.0 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index d55ff0961..fb1056091 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.3.0" +version = "0.3.1-alpha.0" [dependencies] anyhow = "1.0" From 1c6e9a6c834630a1153b674ec7f6d2f6821b8bb4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 15 Sep 2021 17:02:42 -0400 Subject: [PATCH 098/775] lib: Fix misc clippy issues Nothing major. --- lib/src/cli.rs | 1 - lib/src/container/skopeo.rs | 2 +- lib/src/tar/import.rs | 4 +--- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 05eaafd7d..1ad0201b3 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -168,7 +168,6 @@ async fn container_import(repo: &str, imgref: &str, write_ref: Option<&str>) -> pb.set_message("Downloading..."); let opts = ImportOptions { progress: Some(tx_progress), - ..Default::default() }; let import = crate::container::import(repo, &imgref, Some(opts)); tokio::pin!(import); diff --git a/lib/src/container/skopeo.rs b/lib/src/container/skopeo.rs index 65744cf06..ace8bb645 100644 --- a/lib/src/container/skopeo.rs +++ b/lib/src/container/skopeo.rs @@ -27,7 +27,7 @@ impl ContainerPolicy { fn is_default_insecure(&self) -> bool { if let Some(default) = self.default.as_deref() { match default.split_first() { - Some((v, &[])) => return v.ty == INSECURE_ACCEPT_ANYTHING, + Some((v, &[])) => v.ty == INSECURE_ACCEPT_ANYTHING, _ => false, } } else { diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index d6d64d0b1..f898b5ce0 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -379,9 +379,7 @@ impl Importer { return Err(anyhow!("Found xattrs for non-file object type {}", objtype)); } match objtype { - ostree::ObjectType::Commit => { - return Err(anyhow!("Found multiple commit objects")); - } + ostree::ObjectType::Commit => Err(anyhow!("Found multiple commit objects")), ostree::ObjectType::File => { if is_xattrs { self.import_xattr_ref(entry, checksum) From b9c90343478bcb99ca0b55fb1840c66e09e808e5 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 15 Sep 2021 18:04:54 -0400 Subject: [PATCH 099/775] lib/{tar,container}: Add a lot more module level docs This overlaps with the `README.md` but I plan to tweak things in a bit to have that link to `docs.rs`. --- lib/src/container/mod.rs | 32 ++++++++++++++++++++++++++------ lib/src/tar/mod.rs | 33 +++++++++++++++++++++++++++++++-- 2 files changed, 57 insertions(+), 8 deletions(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 7338aca68..4cb3bff57 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -1,7 +1,29 @@ //! # APIs bridging OSTree and container images //! -//! This crate contains APIs to bidirectionally map -//! between OSTree repositories and container images. +//! This module contains APIs to bidirectionally map between a single OSTree commit and a container image wrapping it. +//! Because container images are just layers of tarballs, this builds on the [`crate::tar`] module. +//! +//! To emphasize this, the current high level model is that this is a one-to-one mapping - an ostree commit +//! can be exported (wrapped) into a container image, which will have exactly one layer. Upon import +//! back into an ostree repository, all container metadata except for its digested checksum will be discarded. +//! +//! ## Signatures +//! +//! OSTree supports GPG and ed25519 signatures natively, and it's expected by default that +//! when booting from a fetched container image, one verifies ostree-level signatures. +//! For ostree, a signing configuration is specified via an ostree remote. In order to +//! pair this configuration together, this library defines a "URL-like" string schema: +//! +//! `ostree-remote-registry::` +//! +//! A concrete instantiation might be e.g.: `ostree-remote-registry:fedora:quay.io/coreos/fedora-coreos:stable` +//! +//! To parse and generate these strings, see [`OstreeImageReference`]. +//! +//! ## Layering +//! +//! A key feature of container images is support for layering. At the moment, support +//! for this is [planned but not implemented](https://github.com/ostreedev/ostree-rs-ext/issues/12). //#![deny(missing_docs)] // Good defaults @@ -61,13 +83,11 @@ pub enum SignatureSource { ContainerPolicyAllowInsecure, } -/// Combination of an ostree remote (for signature verification) and an image reference. +/// Combination of a signature verification mechanism, and a standard container image reference. /// -/// For example, myremote:docker://quay.io/somerepo/someimage.latest #[derive(Debug, Clone, PartialEq, Eq)] pub struct OstreeImageReference { - /// The ostree remote name. - /// This will be used for signature verification. + /// The signature verification mechanism. pub sigverify: SignatureSource, /// The container image reference. pub imgref: ImageReference, diff --git a/lib/src/tar/mod.rs b/lib/src/tar/mod.rs index 241c6a920..ee3e41cf9 100644 --- a/lib/src/tar/mod.rs +++ b/lib/src/tar/mod.rs @@ -1,7 +1,36 @@ //! # Losslessly export and import ostree commits as tar archives //! -//! Convert an ostree commit into a tarball stream, and import -//! it again. +//! Convert an ostree commit into a tarball stream, and import it again, including +//! support for OSTree signature verification. +//! +//! In the current libostree C library, while it supports export to tar, this +//! process is lossy - commit metadata is discarded. Further, re-importing +//! requires recalculating all of the object checksums, and tying these +//! together, it does not support verifying ostree level cryptographic signatures +//! such as GPG/ed25519. +//! +//! # Tar stream layout +//! +//! In order to solve these problems, this new tar serialization format effectively +//! combines *both* a `/ostree/repo/objects` directory and a checkout in `/usr`, where +//! the latter are hardlinks to the former. +//! +//! The exported stream will have the ostree metadata first; in particular the commit object. +//! Following the commit object is the `.commitmeta` object, which contains any cryptographic +//! signatures. +//! +//! This library then supports verifying the pair of (commit, commitmeta) using an ostree +//! remote, in the same way that `ostree pull` will do. +//! +//! The remainder of the stream is a breadth-first traversal of dirtree/dirmeta objects and the +//! content objects they reference. +//! +//! # Extended attributes +//! +//! Extended attributes are a complex subject for tar, which has many variants. Further, +//! when exporting bootable ostree commits to container images, it is not actually desired +//! to have the container runtime try to unpack and apply those. For this reason, this module +//! serializes extended attributes into separate `.xattr` files associated with each ostree object. //#![deny(missing_docs)] // Good defaults From 5442771dd7e75d3128c13736c6688f4e431a7608 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 15 Sep 2021 18:06:16 -0400 Subject: [PATCH 100/775] lib/cli: Fix link to clap Because we're not importing `clap` directly but only indirectly via `structopt`, we got a warning from `cargo doc`. --- lib/src/cli.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 05eaafd7d..c696a290e 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -252,7 +252,7 @@ fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { } /// Parse the provided arguments and execute. -/// Calls [`clap::Error::exit`] on failure, printing the error message and aborting the program. +/// Calls [`structopt::clap::Error::exit`] on failure, printing the error message and aborting the program. pub async fn run_from_iter(args: I) -> Result<()> where I: IntoIterator, From c9675ed2bdcc07ec9ee937fb1fe8f48809a805e3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 16 Sep 2021 10:08:11 -0400 Subject: [PATCH 101/775] lib/container/import: Drop commented code Since it won't work in the near future and we don't need it, just drop it. --- lib/src/container/import.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index ca2e4d100..67e3f46de 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -66,13 +66,6 @@ pub async fn fetch_manifest_info( imgref: &OstreeImageReference, ) -> Result { let (_, manifest_digest) = fetch_manifest(imgref).await?; - // Sadly this seems to be lost when pushing to e.g. quay.io, which means we can't use it. - // let commit = manifest - // .annotations - // .as_ref() - // .map(|a| a.get(OSTREE_COMMIT_LABEL)) - // .flatten() - // .ok_or_else(|| anyhow!("Missing annotation {}", OSTREE_COMMIT_LABEL))?; Ok(OstreeContainerManifestInfo { manifest_digest }) } From 9d3db6035cfb84f00bc2ceec44713a69f5b99b10 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 16 Sep 2021 10:31:09 -0400 Subject: [PATCH 102/775] lib/container/import: Minor code cleanup Prep for future work. --- lib/src/container/import.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 67e3f46de..10f5f4e46 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -141,20 +141,15 @@ fn find_layer_tar_sync( continue; } let path = entry.path()?; - let path = &*path; - let path = - Utf8Path::from_path(path).ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", path))?; - let t = entry.header().entry_type(); - + let path: &Utf8Path = path.deref().try_into()?; // We generally expect our layer to be first, but let's just skip anything // unexpected to be robust against changes in skopeo. if path.extension() != Some("tar") { continue; } - event!(Level::DEBUG, "Found {}", path); - match t { + match entry.header().entry_type() { tar::EntryType::Symlink => { if let Some(name) = path.file_name() { if name == "layer.tar" { From fe48934288b6753ec6f189e1efa90c0a004e010e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 16 Sep 2021 10:34:17 -0400 Subject: [PATCH 103/775] lib/container/import: Add a clarifying comment --- lib/src/container/import.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 10f5f4e46..aaf5827f1 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -123,7 +123,7 @@ pub async fn find_layer_tar( Ok((reader, worker)) } -// Helper function invoked to synchronously parse a tar stream, finding +// Helper function invoked to synchronously parse a `docker-archive:` formatted tar stream, finding // the desired layer tarball and writing its contents via a stream of byte chunks // to a channel. fn find_layer_tar_sync( From e2a1095497867ad230fcf468d5b70123cd3a3bbc Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 16 Sep 2021 11:42:09 -0400 Subject: [PATCH 104/775] lib/container/import: Add some implementation docs Was debating making this public module doc, but I think documenting the implementation right now is most useful. --- lib/src/container/import.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index aaf5827f1..8ddc0b79a 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -1,5 +1,33 @@ //! APIs for extracting OSTree commits from container images +// # Implementation +// +// This code currently forks off `/usr/bin/skopeo` as a subprocess, and uses +// it to fetch the container content and convert it into a `docker-archive:` +// formatted tarball stream, which is written to a FIFO and parsed by +// this code. +// +// The rationale for this is that `/usr/bin/skopeo` is a frontend for +// the Go library https://github.com/containers/image/ which supports +// key things we want for production use like: +// +// - Image mirroring and remapping; effectively `man containers-registries.conf` +// For example, we need to support an administrator mirroring an ostree-container +// into a disconnected registry, without changing all the pull specs. +// - Signing +// +// # Import phases +// +// First, we support explicitly fetching just the manifest: https://github.com/opencontainers/image-spec/blob/main/manifest.md +// This will give us information about the layers it contains, and crucially the digest (sha256) of +// the manifest is how higher level software can detect changes. +// +// Once we have the manifest, we expect it to point to a single `application/vnd.oci.image.layer.v1.tar+gzip` layer, +// which is exactly what is exported by the [`crate::tar::export`] process. +// +// What we get from skopeo is a `docker-archive:` tarball, which then will contain this *inner* tarball +// layer that we extract and pass to the [`crate::tar::import`] code. + use super::*; use anyhow::{anyhow, Context}; use camino::Utf8Path; From 53aa5e40d0b87c61cfef10accf3bcfc89f18104f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 16 Sep 2021 16:05:01 -0400 Subject: [PATCH 105/775] lib: Bump to 0.4.0-alpha.0 I plan to make some API changes in the `container` module at least. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index fb1056091..584661609 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.3.1-alpha.0" +version = "0.4.0-alpha.0" [dependencies] anyhow = "1.0" From 243b43d08521a28777354e7b225aa84b171c0526 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 17 Sep 2021 10:33:16 -0400 Subject: [PATCH 106/775] container/import: Check skopeo errors first Right now a failure during skopeo pull when ostree-remote verification is enabled actually manifests as "no commit found". Prioritize emitting a skopeo error, because a failure there probably has the real error. --- lib/src/container/import.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 8ddc0b79a..5e4ac4545 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -340,8 +340,10 @@ pub async fn import( } let import = crate::tar::import_tar(repo, blob, Some(taropts)); let (ostree_commit, worker) = tokio::join!(import, worker); - let ostree_commit = ostree_commit?; + // Let any errors from skopeo take precedence, because a failure to parse/find the layer tarball + // is likely due to an underlying error from that. let _: () = worker?; + let ostree_commit = ostree_commit?; event!(Level::DEBUG, "created commit {}", ostree_commit); Ok(Import { ostree_commit, From ed246cb48d7139f62b40d1a5ffa377c47205c39d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 17 Sep 2021 11:30:38 -0400 Subject: [PATCH 107/775] lib/container: Remove with_digest() method Per `man containers-transports`, transports like `oci-archive:` don't support digests, and even major ones like `containers-storage:` and `docker://` don't quite handle them in the same way. For now, avoid trying to mutate transport references. It's just the push path that was doing this; instead just return the digest as a string. We will likely need to special case `docker://` but that can come later. --- lib/src/container/export.rs | 6 ++-- lib/src/container/mod.rs | 59 ------------------------------------- lib/tests/it/main.rs | 3 +- 3 files changed, 4 insertions(+), 64 deletions(-) diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index 01ac3b015..c66180dd1 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -89,7 +89,7 @@ async fn build_impl( ostree_ref: &str, config: &Config, dest: &ImageReference, -) -> Result { +) -> Result { let compression = if dest.transport == Transport::ContainerStorage { Some(flate2::Compression::none()) } else { @@ -129,7 +129,7 @@ async fn build_impl( // FIXME - it's obviously broken to do this push -> inspect cycle because of the possibility // of a race condition, but we need to patch skopeo to have the equivalent of `podman push --digestfile`. let info = super::import::fetch_manifest_info(&imgref).await?; - Ok(dest.with_digest(info.manifest_digest.as_str())) + Ok(info.manifest_digest) } /// Given an OSTree repository and ref, generate a container image. @@ -140,6 +140,6 @@ pub async fn export>( ostree_ref: S, config: &Config, dest: &ImageReference, -) -> Result { +) -> Result { build_impl(repo, ostree_ref.as_ref(), config, dest).await } diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 4cb3bff57..f0791d20d 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -93,48 +93,6 @@ pub struct OstreeImageReference { pub imgref: ImageReference, } -impl ImageReference { - /// Create a new `ImageReference` that refers to a specific digest. - /// - /// ```rust - /// use std::convert::TryInto; - /// let r: ostree_ext::container::ImageReference = "docker://quay.io/exampleos/exampleos:latest".try_into().unwrap(); - /// let n = r.with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); - /// assert_eq!(n.name, "quay.io/exampleos/exampleos@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); - /// ``` - pub fn with_digest(&self, digest: &str) -> Self { - let name = self.name.as_str(); - let name = if let Some(idx) = name.rfind('@') { - name.split_at(idx).0 - } else if let Some(idx) = name.rfind(':') { - name.split_at(idx).0 - } else { - name - }; - Self { - transport: self.transport, - name: format!("{}@{}", name, digest), - } - } -} - -impl OstreeImageReference { - /// Create a new `OstreeImageReference` that refers to a specific digest. - /// - /// ```rust - /// use std::convert::TryInto; - /// let r: ostree_ext::container::OstreeImageReference = "ostree-remote-image:myremote:docker://quay.io/exampleos/exampleos:latest".try_into().unwrap(); - /// let n = r.with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); - /// assert_eq!(n.imgref.name, "quay.io/exampleos/exampleos@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); - /// ``` - pub fn with_digest(&self, digest: &str) -> Self { - Self { - sigverify: self.sigverify.clone(), - imgref: self.imgref.with_digest(digest), - } - } -} - impl TryFrom<&str> for Transport { type Error = anyhow::Error; @@ -295,18 +253,6 @@ mod tests { assert_eq!(ir.name, "quay.io/exampleos/blah"); assert_eq!(ir.to_string(), "docker://quay.io/exampleos/blah"); - let digested = ir - .with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); - assert_eq!(digested.name, "quay.io/exampleos/blah@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); - assert_eq!(digested.with_digest("sha256:52f562806109f5746be31ccf21f5569fd2ce8c32deb0d14987b440ed39e34e20").name, "quay.io/exampleos/blah@sha256:52f562806109f5746be31ccf21f5569fd2ce8c32deb0d14987b440ed39e34e20"); - - let with_tag: ImageReference = "registry:quay.io/exampleos/blah:sometag" - .try_into() - .unwrap(); - let digested = with_tag - .with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); - assert_eq!(digested.name, "quay.io/exampleos/blah@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); - for &v in VALID_IRS { ImageReference::try_from(v).unwrap(); } @@ -345,11 +291,6 @@ mod tests { // test our Eq implementation assert_eq!(&ir, &OstreeImageReference::try_from(ir_registry).unwrap()); - let digested = ir - .with_digest("sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); - assert_eq!(digested.imgref.name, "quay.io/exampleos/blah@sha256:41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3"); - assert_eq!(digested.with_digest("sha256:52f562806109f5746be31ccf21f5569fd2ce8c32deb0d14987b440ed39e34e20").imgref.name, "quay.io/exampleos/blah@sha256:52f562806109f5746be31ccf21f5569fd2ce8c32deb0d14987b440ed39e34e20"); - let ir_s = "ostree-image-signed:docker://quay.io/exampleos/blah"; let ir: OstreeImageReference = ir_s.try_into().unwrap(); assert_eq!(ir.sigverify, SignatureSource::ContainerPolicy); diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 9c4038ab0..c5d91bf08 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -259,11 +259,10 @@ async fn test_container_import_export() -> Result<()> { ), cmd: Some(vec!["/bin/bash".to_string()]), }; - let pushed = ostree_ext::container::export(srcrepo, TESTREF, &config, &srcoci_imgref) + let digest = ostree_ext::container::export(srcrepo, TESTREF, &config, &srcoci_imgref) .await .context("exporting")?; assert!(srcoci_path.exists()); - let digest = pushed.name.rsplitn(2, '@').next().unwrap(); let inspect = skopeo_inspect(&srcoci_imgref.to_string())?; assert!(inspect.contains(r#""version": "42.0""#)); From 55921d6561a8407be7080368f47a06179ce49fb7 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 17 Sep 2021 14:49:11 -0400 Subject: [PATCH 108/775] tests: Verify we currently reject a derived container Prep for supporting this. --- .../it/fixtures/exampleos-derive.ociarchive | Bin 0 -> 14336 bytes lib/tests/it/main.rs | 21 ++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 lib/tests/it/fixtures/exampleos-derive.ociarchive diff --git a/lib/tests/it/fixtures/exampleos-derive.ociarchive b/lib/tests/it/fixtures/exampleos-derive.ociarchive new file mode 100644 index 0000000000000000000000000000000000000000..5b4162f9a161aed6963d9c006bc20ddc5454c64a GIT binary patch literal 14336 zcmeHN30RD4`yYi$IoV~Mnj(pseeuOuDvl6YqSdT5RFkG8$$PS#HY5k-XtQKbI>=I) zEU7~^B(j$F6jIT&%)HP4dB+ULIfw6j*Kz*Wf6;Z(v)=dp{_fxXd){Uqu8){2vBv$Q zKSUyx$e_`1#P>hH|Ba2}NK_J;M4}VP6b6n6n>0GknD&oGr8XWU3FHL8kOIWwz%Tjx zck{o0xG&lIA2i2RA=Bvp6b=5P{8NcA{}jlCZ~6c4pvfosr%_o928T;yF}M^e zjYlR^m|PB-DHM>H91fpBBME3c8jH_^!z>DmBBTo`R2G>=6B79x4nxTMOu$4Ekw~Xf ze_!{SK8Zpne_H=!Dv?aZ852Ka@2d{~Kd=8#0xv+o2^8=NQwU@tnKYTmnoMFWAu*>= zm{W)hD>8*dW)SHlk_%xHffK;<76l4;fk6QRxSr$7r&HmuSOPmfBEO(@u)`Dk2}NFn zDWL>AzqN!Zu7rg%mdu%I9V7{`_7U?qKGqVh$ZyJT`X9OnjR`6BHhoPGS(bbQYCHq(iDx88j}BMCUWO0xploqzYMdE|o`TFqk9~hmKL7%3yJs zEFqIeVK9i$X zk;&)9T= zpV;5Rc!sU5u~6hAm_p-FX;eCeE?`p0TppJ}6)?$Mh%t>vC&A~WvbYomhbH6^X%reu z$Rm@OJibuCqcQ{>s7rjlvB=L@m4Eou0Rn4|pCA~sYm9{e(<<=8@@ntoINw;sGjFjZ z&|0vL3HlZo z{HnvD1PRYu;LBO2(MaL#R%}x=0`+&_D=?`Ot%rijpi!YzsZ|v!72&T1sM-MIvx2YsmR{>c z{(lmqKtLoj=u82fCZG$sP?;D+8k5WW_wN#qNlZu03oK8s7@(^yQJ zfJuR)A7X?eN5&NN>xITA6Dd4_kRgEHgd7r`!Q-+7Oc)>(av_;0WI$PEF$6qVpj;Xh zQWBp-qA}sw%%U^7R5I&BXk-#K3?7;sKR!yivnz|2)QoL*z^s5#w#EK-VQpf8f)FZO?iS(>;Zd4==xVZNP~6?%Eae znsph^X?xev68j6!hvg(Af^)-? zK~vY0L;n&7R|tw3GiH_*$&c<^VtaiBYA~nBezQwm>AV31{$C0s22~?34QcyJz5M zu=;ZSlgP9`)<2a-`Bwj*f%B^lf2{wP&$NGV^BU6UdAe`t>fmHg-zN`VuWefUc;9i~ zCt1FuGU5L;>ji!u0Sf}!bOtM`=C74atvu+z*d*olP|Bs5q?BS|+oRP1``qg49-ZKt z)I4ou3M@t!tVv`Vm`|OugHls7>B`IQO#Gg$Q7OCwNzMK8qc?zQrZh z7tQQcSk$-d@p~LALc>j`A<@W^0{s_xF}za>`+PxYwm9_Xg3_~ya65Eo-q&3+*3?# zMI#P@B2(0da{C2h-8oYQ!xCyosb zL(pa|<@+etp!nz(FuI3b0`isd*Ae$r0KJS>8bTXbNIH$6T4^{NXun0)fVwxHs%aQR zMEm=`A)N0@A%IX#5ZdY>#;SvmJBU2vl0;kRl9n4nR8&AUqP>tTMl446S zlIkFHfn^@5f$FmOr;(1Au|1B|5siB_~|D?M$%;#vgTkHylXi3K`T0kUzT z>6tFrY$yb93d7;u8VrZgXfxca$`-`s3Xr_S=2O@O3fQdx{%_f^+k!B=@qdbn9T*H_ z!CV8J+O>|_H5;dP-AfIF;YSz>x*!YzonqX37%H*7J?sM15av~x!UHhsP6ewT>tY1? z3oa!2F!M0d963(>}X|%RviehiE2TH^my7$G$nTaW)0FR2e;c$ z#Bw2OjdrB0s6ZusZh?GH5MTFBHTYUO33ct&j+75n0g(W*cc}CtPf;0kLltQ~IBlvb zYa$39qY5^>OWGXS!`=#vl&^Og9eX- zXn3OgtLvneYJgm^_5VRVs}eY&J7uEApC(@?HAOWP`oMHOLzBRz$G7FxW143QAnBhX z7K#h~)cjZq&3%cTrOanqlDJ68n|y@Jy1$0rj(oVuFAb z7Ph?;m1Sb_X>@iPvZD)jKhIux`jh~7n4g2-S4|cssh?f;*#RHO7qzkugE>SUs-!C* zlvr5{VNq$+T?2|Ng@>vj{}Xk`Fm$MvYBCa|Dd~L86Kk{vA*sWX8mVy~ezeW=&Zjw% zW2><+JPdxr2n{IkeE>VAJ{)XN;r-vAO`(|a9{frm;n`&-Sesc*e!lN9C|-ADe$HDl z_m)ya$NR*Y*r zamHnGr-$I$rN$ywz~E8V=R8Oz+#91@h!5vF+;~WzlSa4S=5&71y~j5;?JaXCTQc(Q zvO~o>4G$zM>;2o3UTkY=Nnvg9s^4E_({3ZJY{|6|*L6GktYQu;^tT&PU8FU%Xn9c* zv0}NUXyMa^x3}&zT5V<2v1Eej(w%#C!~13Bo>x0 zM^8$eGI^W>FXfHlZPx-K#7Xy9A+pH4#M0WA+2o_VfDz+n4_WUqf;DnvTsu;$@Z1Nq z4<}k2u^=rVh;Ul(v@Y`JS=8me^H^lD)ogC~`6XKZcg#+{d%P=e`7&_!Nk`cA!bRd^ zc8Zft$WvGS%St2*%@`kP1O9b8?S`jBmNk1LrF9S}rHb}GD}Q1CQ`}(T_FUfrOGf%T z&tKvU&-D0JUgkdConD~yDaZS{lWVGjg-MFdL5ow?)l_+1@O}91>C^8`Wv?X=%9W9c zz_bC}Qe^ndO(-p0e$wd3jTJFTap9qaJ^p2s>hLvpTtp6;S9gf`nfiUTF6_Qts6-C+ zfShX2XW%*x9nOqTJ9>X$2-{1TDK^i5~;Oasu^L}1y@_Bx&yot>IF7`H7z z*gwrSV5f)ZD${Q70NY-q9Z_Y;kBy^-CH8SJIvCdj?ByUNUpf+K4^t}Hb!Kg?VB~hN zpX(Zse|fQoP4yY~{kLA;XVcv)0&mBp4VEV4Dyxsu6-~Rl9Mc?S$>*AG^U%wRyB3G& zNO$VR_TUJmsJP3uB*(k%v3X0FpRW)VtS+OZ)UH3;KvRSTU^O`-SRpl}9lUKCIw}SH_w;oLX zbqhrC%ApqXn9;PEwF*27IM*Yo$NOF7jyb1Q969Upmb~4jGp8I~q&sKvs#B?X(=8?& zEGl)-8l-K6zsFdvsRwEX|y$O z`_L`K2%FivMwq3YbHTk?nIMlGc->&Rj=k=6W25UEG}bb1g5L6%{hPX{60Z_cK*n8# z^lsgyU+ZEPR9s5G^rF(UqV3*5zbzH5jhoLlRNb$*()PHI41f7el(S*KSFe6OmUc3B zRM7f0<^9EuF2TUd!B7!c)eIc0TtU+IQTT-Jn`466Qv*(0UD^waO_(%kq`cFY-R;*i z+p;|O(S|znQEYJv=+N75?$%z|;yiy}>-E6wX@?sw9qE?bmi#pM3P5+&ROSRcJ2;{# zvoy2dH1N6p@D)!e`+2U_#W&Y1V-;=91I?4!$h*DiVW?*u@6JP4wB-@lGturB-{rQ8 znojB6BI}p%pM)CNCp*oXw8);c<6PDPGTK5u4MA zS_2mhta`Y(csDP3)FxWhw!x!DAGjJ&yM_Glz|09*oii(*M48UMf>mf6BA(xPLVt+l z#vnKSHG?jJj7A`dLUtLurGx9!LBypws}AB!W087b{|==D+=wU%u%P8{J?TB0LOL%Cebu9zebfWKUS8FWQpkLDDVJHAukK>{?ZFNxz@v>VZb5bkELZ z&f4u{N3N-jc`#Eu&jeDQibuqQ|$P>^M1y$^ni`l*j~!v|g6u#$V-+!5`zyoU&l^ z?3=w?dYRms!Pl1gc7;AIW zvWs^!!g_M+4aZ(?KD!SO!UHN^{p${x3kC4GHv|IUYZXZBW+DbrZsce zNZSd+tUbxD9WNtvrky2(p`qX`W$N)4In#o-kM6$(5((~`j)_)WWT0|;`k{WWNhMwX(L4nW7Azy?RB$-s+U z`jX3idv_T0DZFwaYV`IW3%l3>Xhso;JZ#H}K4n@IqF1uIPN2Jq)DmU5e6UaWgqrm8 zTVDC;yiAXu>tsgMj(iFjmx0U@UD}0TU;ktFM#F7Ug)(sbZWsI14~u%;7EV8QW_lYd zaeRT>+P6QaH7IH?-qpG2;I~2XMj}kva7X0nI2pO3I2gg$z7b?Kob2CcXC|d5XGZO@ zVA~P^k9GlCFh$VMVg9&7*Zg1Zv`biQKhAsez@bH(OGbi->6ykeCaz44%Pd?!?tEQb zEMRnZLc!0t1-2MG2R`$s&mRzi4u29oX56!px3+Sdm;Rc&f3LO2#5mHY4%T*f`HjrHLhO1*!n?QLPfF_;?!kPw z`|atZlNHat_e1lGEgz-qFLjXK?-?I=D0vYl>ijO;4bkfJc5_D954{$?rr`L@)Tx=< z`lgn8XQb)f5;f`GTO!#yKz=y*zH=}tpPhyxHt;XKpfX_HMv+QGyf&66p|Lpt$?X8n zWZJTDV!t!HiaG5iN3i*EGI%@7durc~bBo11L);@5+(7q2Pp~ z(sl6VCarw8mpmY@s5V=R;yV0g&~t zetzhvQ=)SiVcQaWFDVAS-lZ(eTiVU8oweX@v-GkNh;;+yC(-!vG><{HfjGx=!wW+T zeBPAmJ-<5F^6I8=<5{?&Il9v$4-fnP#1Kxn?P)N|9W=lSZ$t*1iA6J@y0|of4b^O2 zWo+NmAT<%52dfW(hAyZ5;wU#o!9-N&D9CP5{J72J1z6R9)GbXUWPmu+0NWjfaaCgY znJo7GgIc!2}Et@c;%F=+OD;TFi>y(r: Result, s: impl AsRef) { let s = s.as_ref(); let msg = format!("{:#}", r.err().unwrap()); @@ -322,6 +325,24 @@ async fn test_container_import_export() -> Result<()> { Ok(()) } +/// We should currently reject an image with multiple layers. +#[tokio::test] +async fn test_container_import_derive() -> Result<()> { + let fixture = Fixture::new()?; + let exampleos_path = &fixture.path.join("exampleos.ociarchive"); + std::fs::write(exampleos_path, EXAMPLEOS_DERIVED_OCI)?; + let exampleos_ref = OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref: ImageReference { + transport: Transport::OciArchive, + name: exampleos_path.to_string(), + }, + }; + let r = ostree_ext::container::import(&fixture.destrepo, &exampleos_ref, None).await; + assert_err_contains(r, "Expected 1 layer, found 2"); + Ok(()) +} + #[test] fn test_diff() -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; From 3cee33b00002ffa42819725883bdca4bd2252184 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 17 Sep 2021 12:21:06 -0400 Subject: [PATCH 109/775] lib/container: Change manifest fetch API to return raw bytes and digest This allows us to use it more consistently. We also drop the unnecessary wrapper structure. --- lib/src/cli.rs | 4 ++-- lib/src/container/export.rs | 4 ++-- lib/src/container/import.rs | 19 +++++-------------- lib/src/container/mod.rs | 6 ------ lib/tests/it/main.rs | 4 ++-- 5 files changed, 11 insertions(+), 26 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 82d7e7b7b..6a595ab76 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -226,8 +226,8 @@ async fn container_export( /// Load metadata for a container image with an encapsulated ostree commit. async fn container_info(imgref: &str) -> Result<()> { let imgref = imgref.try_into()?; - let info = crate::container::fetch_manifest_info(&imgref).await?; - println!("{} @{}", imgref, info.manifest_digest); + let (_, digest) = crate::container::fetch_manifest(&imgref).await?; + println!("{} digest: {}", imgref, digest); Ok(()) } diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index c66180dd1..39b160402 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -128,8 +128,8 @@ async fn build_impl( }; // FIXME - it's obviously broken to do this push -> inspect cycle because of the possibility // of a race condition, but we need to patch skopeo to have the equivalent of `podman push --digestfile`. - let info = super::import::fetch_manifest_info(&imgref).await?; - Ok(info.manifest_digest) + let (_, digest) = super::import::fetch_manifest(&imgref).await?; + Ok(digest) } /// Given an OSTree repository and ref, generate a container image. diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 5e4ac4545..b74b2b828 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -88,18 +88,9 @@ impl AsyncRead for ProgressReader { } } -/// Download the manifest for a target image. +/// Download the manifest for a target image and its sha256 digest. #[context("Fetching manifest")] -pub async fn fetch_manifest_info( - imgref: &OstreeImageReference, -) -> Result { - let (_, manifest_digest) = fetch_manifest(imgref).await?; - Ok(OstreeContainerManifestInfo { manifest_digest }) -} - -/// Download the manifest for a target image. -#[context("Fetching manifest")] -async fn fetch_manifest(imgref: &OstreeImageReference) -> Result<(oci::Manifest, String)> { +pub async fn fetch_manifest(imgref: &OstreeImageReference) -> Result<(Vec, String)> { let mut proc = skopeo::new_cmd(); let imgref_base = &imgref.imgref; proc.args(&["inspect", "--raw"]) @@ -113,7 +104,7 @@ async fn fetch_manifest(imgref: &OstreeImageReference) -> Result<(oci::Manifest, let raw_manifest = proc.stdout; let digest = openssl::hash::hash(openssl::hash::MessageDigest::sha256(), &raw_manifest)?; let digest = format!("sha256:{}", hex::encode(digest.as_ref())); - Ok((serde_json::from_slice(&raw_manifest)?, digest)) + Ok((raw_manifest, digest)) } /// Read the contents of the first .tar we find. @@ -328,8 +319,8 @@ pub async fn import( } let options = options.unwrap_or_default(); let (manifest, image_digest) = fetch_manifest(imgref).await?; - let manifest = &manifest; - let layerid = find_layer_blobid(manifest)?; + let manifest: oci::Manifest = serde_json::from_slice(&manifest)?; + let layerid = find_layer_blobid(&manifest)?; event!(Level::DEBUG, "target blob: {}", layerid); let (blob, worker) = fetch_layer(imgref, layerid.as_str(), options.progress).await?; let blob = tokio::io::BufReader::new(blob); diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index f0791d20d..aba26a9df 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -42,12 +42,6 @@ pub const OSTREE_COMMIT_LABEL: &str = "ostree.commit"; /// to a string to output to a terminal or logs. type Result = anyhow::Result; -/// Information about the image manifest. -pub struct OstreeContainerManifestInfo { - /// The manifest digest (`sha256:`) - pub manifest_digest: String, -} - /// A backend/transport for OCI/Docker images. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum Transport { diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 9f927c9a1..f047b2e0b 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -277,8 +277,8 @@ async fn test_container_import_export() -> Result<()> { imgref: srcoci_imgref.clone(), }; - let inspect = ostree_ext::container::fetch_manifest_info(&srcoci_unverified).await?; - assert_eq!(inspect.manifest_digest, digest); + let (_, pushed_digest) = ostree_ext::container::fetch_manifest(&srcoci_unverified).await?; + assert_eq!(pushed_digest, digest); // No remote matching let srcoci_unknownremote = OstreeImageReference { From ede98900d38cf9c434495c32a9e5d4da22cbb857 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 17 Sep 2021 12:31:57 -0400 Subject: [PATCH 110/775] lib/container: Add API to fetch from already downloaded manifest The flow here is then nicer, because at a high level apps like rpm-ostree will want to do: - Fetch manifest, digest pair - Do we already have this? If so, we're done - Otherwise, perform the fetch using that already downloaded manifest Previously we were fetching the manifest twice. --- lib/src/container/import.rs | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index b74b2b828..09f5de41c 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -312,14 +312,30 @@ pub async fn import( imgref: &OstreeImageReference, options: Option, ) -> Result { + let (manifest, image_digest) = fetch_manifest(imgref).await?; + let ostree_commit = import_from_manifest(repo, imgref, &manifest, options).await?; + Ok(Import { + ostree_commit, + image_digest, + }) +} + +/// Fetch a container image using an in-memory manifest and import its embedded OSTree commit. +#[context("Importing {}", imgref)] +#[instrument(skip(repo, options, manifest_bytes))] +pub async fn import_from_manifest( + repo: &ostree::Repo, + imgref: &OstreeImageReference, + manifest_bytes: &[u8], + options: Option, +) -> Result { if matches!(imgref.sigverify, SignatureSource::ContainerPolicy) && skopeo::container_policy_is_default_insecure()? { return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); } let options = options.unwrap_or_default(); - let (manifest, image_digest) = fetch_manifest(imgref).await?; - let manifest: oci::Manifest = serde_json::from_slice(&manifest)?; + let manifest: oci::Manifest = serde_json::from_slice(manifest_bytes)?; let layerid = find_layer_blobid(&manifest)?; event!(Level::DEBUG, "target blob: {}", layerid); let (blob, worker) = fetch_layer(imgref, layerid.as_str(), options.progress).await?; @@ -336,8 +352,5 @@ pub async fn import( let _: () = worker?; let ostree_commit = ostree_commit?; event!(Level::DEBUG, "created commit {}", ostree_commit); - Ok(Import { - ostree_commit, - image_digest, - }) + Ok(ostree_commit) } From 83973f196e0b9153e256fe11dc5231c40813f7fb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 17 Sep 2021 13:23:50 -0400 Subject: [PATCH 111/775] lib/container: Make use of `skopeo copy --digestfile` if present This came from https://github.com/containers/skopeo/pull/1266 and has landed in Fedora 34 at least. --- lib/Cargo.toml | 2 ++ lib/src/container/export.rs | 42 +++++++++++++++++++++++++------------ lib/src/container/skopeo.rs | 28 +++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 13 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 584661609..0c2b111ea 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -11,6 +11,7 @@ version = "0.4.0-alpha.0" [dependencies] anyhow = "1.0" bytes = "1.0.1" +bitflags = "1.3.2" camino = "1.0.4" cjson = "0.1.1" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } @@ -19,6 +20,7 @@ futures-util = "0.3.13" gvariant = "0.4.0" hex = "0.4.3" indicatif = "0.16.0" +lazy_static = "1.4.0" libc = "0.2.92" maplit = "1.0.2" nix = "0.22.0" diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index 39b160402..9a1317a32 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -95,7 +95,7 @@ async fn build_impl( } else { None }; - if dest.transport == Transport::OciDir { + let digest = if dest.transport == Transport::OciDir { let _copied: ImageReference = build_oci( repo, ostree_ref, @@ -103,33 +103,49 @@ async fn build_impl( config, compression, )?; + None } else { let tempdir = tempfile::tempdir_in("/var/tmp")?; let tempdest = tempdir.path().join("d"); let tempdest = tempdest.to_str().unwrap(); + let digestfile = if skopeo::skopeo_has_features(skopeo::SkopeoFeatures::COPY_DIGESTFILE)? { + Some(tempdir.path().join("digestfile")) + } else { + None + }; + let src = build_oci(repo, ostree_ref, Path::new(tempdest), config, compression)?; let mut cmd = skopeo::new_cmd(); tracing::event!(Level::DEBUG, "Copying {} to {}", src, dest); - cmd.stdout(std::process::Stdio::null()) - .arg("copy") - .arg(src.to_string()) - .arg(dest.to_string()); + cmd.stdout(std::process::Stdio::null()).arg("copy"); + if let Some(ref digestfile) = digestfile { + cmd.arg("--digestfile"); + cmd.arg(digestfile); + } + cmd.args(&[src.to_string(), dest.to_string()]); let proc = super::skopeo::spawn(cmd)?; let output = proc.wait_with_output().await?; if !output.status.success() { let stderr = String::from_utf8_lossy(&output.stderr); return Err(anyhow::anyhow!("skopeo failed: {}\n", stderr)); } - } - let imgref = OstreeImageReference { - sigverify: SignatureSource::ContainerPolicyAllowInsecure, - imgref: dest.to_owned(), + digestfile + .map(|p| -> Result { Ok(std::fs::read_to_string(p)?.trim().to_string()) }) + .transpose()? }; - // FIXME - it's obviously broken to do this push -> inspect cycle because of the possibility - // of a race condition, but we need to patch skopeo to have the equivalent of `podman push --digestfile`. - let (_, digest) = super::import::fetch_manifest(&imgref).await?; - Ok(digest) + if let Some(digest) = digest { + Ok(digest) + } else { + // If `skopeo copy` doesn't have `--digestfile` yet, then fall back + // to running an inspect cycle. + let imgref = OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref: dest.to_owned(), + }; + let (_, digest) = super::import::fetch_manifest(&imgref).await?; + Ok(digest) + } } /// Given an OSTree repository and ref, generate a container image. diff --git a/lib/src/container/skopeo.rs b/lib/src/container/skopeo.rs index ace8bb645..476ee9b7a 100644 --- a/lib/src/container/skopeo.rs +++ b/lib/src/container/skopeo.rs @@ -13,6 +13,34 @@ use tokio::process::Command; const POLICY_PATH: &str = "/etc/containers/policy.json"; const INSECURE_ACCEPT_ANYTHING: &str = "insecureAcceptAnything"; +bitflags::bitflags! { + pub(crate) struct SkopeoFeatures: u32 { + const COPY_DIGESTFILE = 0b00000001; + } +} + +lazy_static::lazy_static! { + static ref SKOPEO_FEATURES: Result = { + let mut features = SkopeoFeatures::empty(); + let c = std::process::Command::new("skopeo") + .args(&["copy", "--help"]) + .stderr(std::process::Stdio::piped()) + .output()?; + let stdout = String::from_utf8_lossy(&c.stderr); + if stdout.contains("--digestfile") { + features.insert(SkopeoFeatures::COPY_DIGESTFILE); + } + Ok(features) + }; +} + +pub(crate) fn skopeo_has_features(wanted: SkopeoFeatures) -> Result { + match &*SKOPEO_FEATURES { + Ok(found) => Ok(found.intersects(wanted)), + Err(e) => Err(anyhow::Error::msg(e)), + } +} + #[derive(Deserialize)] struct PolicyEntry { #[serde(rename = "type")] From f132e494124af01cf0b4cb78bf381667e7e705ca Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 20 Sep 2021 09:22:01 -0400 Subject: [PATCH 112/775] tests: Strengthen error check for empty archive Just reading the test code and I want to do this in more tests. --- lib/tests/it/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 9f927c9a1..6bc792715 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -131,7 +131,7 @@ async fn test_tar_import_empty() -> Result<()> { let destrepo = ostree::Repo::new_for_path(&fixture.destrepo_path); destrepo.open(gio::NONE_CANCELLABLE)?; let r = ostree_ext::tar::import_tar(&destrepo, tokio::io::empty(), None).await; - assert!(r.is_err()); + assert_err_contains(r, "Commit object not found"); Ok(()) } From 1bba1e8ddeb75823a2ff76ddcf8ea715accf469f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 20 Sep 2021 09:07:15 -0400 Subject: [PATCH 113/775] lib/container: Explicitly drain stream Rather than passing the FIFO stream ownership into and back out of the parser, just pass an `&mut` reference, and then explicitly drain it in the worker thread. This motivates then cleaning up our "worker" futures. Add some comments too. Prep for future work. --- lib/src/container/import.rs | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 5e4ac4545..19c9faaa0 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -118,36 +118,30 @@ async fn fetch_manifest(imgref: &OstreeImageReference) -> Result<(oci::Manifest, /// Read the contents of the first .tar we find. /// The first return value is an `AsyncRead` of that tar file. -/// The second return value is a background worker task that will -/// return back to the caller the provided input stream (converted -/// to a synchronous reader). This ensures the caller can take -/// care of closing the input stream. +/// The second return value is a background worker task that +/// owns stream processing. pub async fn find_layer_tar( src: impl AsyncRead + Send + Unpin + 'static, blobid: &str, -) -> Result<( - impl AsyncRead, - impl Future>, -)> { +) -> Result<(impl AsyncRead, impl Future>>)> { // Convert the async input stream to synchronous, becuase we currently use the // sync tar crate. let pipein = crate::async_util::async_read_to_sync(src); // An internal channel of Bytes let (tx_buf, rx_buf) = tokio::sync::mpsc::channel(2); let blob_symlink_target = format!("../{}.tar", blobid); - let import = tokio::task::spawn_blocking(move || { - find_layer_tar_sync(pipein, blob_symlink_target, tx_buf) + let worker = tokio::task::spawn_blocking(move || { + let mut pipein = pipein; + let r = + find_layer_tar_sync(&mut pipein, blob_symlink_target, tx_buf).context("Import worker"); + // Ensure we read the entirety of the stream, otherwise skopeo will get an EPIPE. + let _ = std::io::copy(&mut pipein, &mut std::io::sink()); + r }) .map_err(anyhow::Error::msg); // Bridge the channel to an AsyncRead let stream = tokio_stream::wrappers::ReceiverStream::new(rx_buf); let reader = tokio_util::io::StreamReader::new(stream); - // This async task owns the internal worker thread, which also owns the provided - // input stream which we return to the caller. - let worker = async move { - let src_as_sync = import.await?.context("Import worker")?; - Ok::<_, anyhow::Error>(src_as_sync) - }; Ok((reader, worker)) } @@ -158,7 +152,7 @@ fn find_layer_tar_sync( pipein: impl Read + Send + Unpin, blob_symlink_target: String, tx_buf: tokio::sync::mpsc::Sender>, -) -> Result { +) -> Result<()> { let mut archive = tar::Archive::new(pipein); let mut buf = vec![0u8; 8192]; let mut found = false; @@ -212,7 +206,7 @@ fn find_layer_tar_sync( } } if found { - Ok(archive.into_inner()) + Ok(()) } else { Err(anyhow!("Failed to find layer {}", blob_symlink_target)) } @@ -260,10 +254,12 @@ async fn fetch_layer<'s>( } .boxed(); let (contents, worker) = find_layer_tar(fifo_reader, blobid).await?; + // This worker task joins the result of the stream processing thread with monitoring the skopeo process. let worker = async move { let (worker, waiter) = tokio::join!(worker, waiter); + // Explicitly declare as `()` to verify we have the right number of `?`. let _: () = waiter?; - let _pipein = worker.context("Layer worker failed")?; + let _: () = worker??; Ok::<_, anyhow::Error>(()) }; Ok((contents, worker)) From 54a18d888aedfab053db6040808270792dd718f3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 20 Sep 2021 10:00:35 -0400 Subject: [PATCH 114/775] tests: Add srcrepo to Fixture Since most tests want to operate on a src and dest repo, move the creation of the srcrepo into `Fixture`. General cleanup and prep for future work. --- lib/tests/it/main.rs | 49 ++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 25 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 6bc792715..e025314cb 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -79,21 +79,19 @@ fn update_repo(repopath: &Utf8Path) -> Result<()> { } #[context("Generating test tarball")] -fn generate_test_tarball(dir: &Utf8Path) -> Result { +fn initial_export(fixture: &Fixture) -> Result { let cancellable = gio::NONE_CANCELLABLE; - let repopath = generate_test_repo(dir)?; - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repopath.as_str(), cancellable)?; - let (_, rev) = repo.read_commit(TESTREF, cancellable)?; - let (commitv, _) = repo.load_commit(rev.as_str())?; + let (_, rev) = fixture.srcrepo.read_commit(TESTREF, cancellable)?; + let (commitv, _) = fixture.srcrepo.load_commit(rev.as_str())?; assert_eq!( ostree::commit_get_content_checksum(&commitv) .unwrap() .as_str(), EXAMPLEOS_CONTENT_CHECKSUM ); - let destpath = dir.join("exampleos-export.tar"); + let destpath = fixture.path.join("exampleos-export.tar"); let mut outf = std::io::BufWriter::new(std::fs::File::create(&destpath)?); - ostree_ext::tar::export_commit(repo, rev.as_str(), &mut outf)?; + ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut outf)?; outf.flush()?; Ok(destpath) } @@ -102,6 +100,8 @@ struct Fixture { // Just holds a reference _tempdir: tempfile::TempDir, path: Utf8PathBuf, + srcdir: Utf8PathBuf, + srcrepo: ostree::Repo, destrepo: ostree::Repo, destrepo_path: Utf8PathBuf, } @@ -111,6 +111,13 @@ impl Fixture { let _tempdir = tempfile::tempdir_in("/var/tmp")?; let path: &Utf8Path = _tempdir.path().try_into().unwrap(); let path = path.to_path_buf(); + + let srcdir = path.join("src"); + std::fs::create_dir(&srcdir)?; + let srcrepo_path = generate_test_repo(&srcdir)?; + let srcrepo = + ostree::Repo::open_at(libc::AT_FDCWD, srcrepo_path.as_str(), gio::NONE_CANCELLABLE)?; + let destdir = &path.join("dest"); std::fs::create_dir(destdir)?; let destrepo_path = destdir.join("repo"); @@ -119,6 +126,8 @@ impl Fixture { Ok(Self { _tempdir, path, + srcdir, + srcrepo, destrepo, destrepo_path, }) @@ -138,10 +147,7 @@ async fn test_tar_import_empty() -> Result<()> { #[tokio::test] async fn test_tar_import_signed() -> Result<()> { let fixture = Fixture::new()?; - let srcdir = &fixture.path.join("src"); - std::fs::create_dir(srcdir)?; - - let test_tar = &generate_test_tarball(srcdir)?; + let test_tar = &initial_export(&fixture)?; // Verify we fail with an unknown remote. let src_tar = tokio::fs::File::open(test_tar).await?; @@ -177,7 +183,7 @@ async fn test_tar_import_signed() -> Result<()> { bash!( "ostree --repo={repo} remote gpg-import --stdin myremote < {p}/gpghome/key1.asc", repo = fixture.destrepo_path.as_str(), - p = srcdir.as_str() + p = fixture.srcdir.as_str() )?; let src_tar = tokio::fs::File::open(test_tar).await?; let imported = ostree_ext::tar::import_tar( @@ -201,9 +207,7 @@ async fn test_tar_import_signed() -> Result<()> { #[tokio::test] async fn test_tar_import_export() -> Result<()> { let fixture = Fixture::new()?; - let srcdir = &fixture.path.join("src"); - std::fs::create_dir(srcdir)?; - let src_tar = tokio::fs::File::open(&generate_test_tarball(srcdir)?).await?; + let src_tar = tokio::fs::File::open(&initial_export(&fixture)?).await?; let imported_commit: String = ostree_ext::tar::import_tar(&fixture.destrepo, src_tar, None).await?; @@ -236,19 +240,14 @@ fn skopeo_inspect(imgref: &str) -> Result { #[tokio::test] async fn test_container_import_export() -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; let fixture = Fixture::new()?; - let srcdir = &fixture.path.join("src"); - std::fs::create_dir(srcdir)?; - let srcrepopath = &generate_test_repo(srcdir)?; - let srcrepo = &ostree::Repo::new_for_path(srcrepopath); - srcrepo.open(cancellable)?; - let testrev = srcrepo + let testrev = fixture + .srcrepo .resolve_rev(TESTREF, false) .context("Failed to resolve ref")? .unwrap(); - let srcoci_path = &srcdir.join("oci"); + let srcoci_path = &fixture.path.join("oci"); let srcoci_imgref = ImageReference { transport: Transport::OciDir, name: srcoci_path.as_str().to_string(), @@ -262,7 +261,7 @@ async fn test_container_import_export() -> Result<()> { ), cmd: Some(vec!["/bin/bash".to_string()]), }; - let digest = ostree_ext::container::export(srcrepo, TESTREF, &config, &srcoci_imgref) + let digest = ostree_ext::container::export(&fixture.srcrepo, TESTREF, &config, &srcoci_imgref) .await .context("exporting")?; assert!(srcoci_path.exists()); @@ -300,7 +299,7 @@ async fn test_container_import_export() -> Result<()> { bash!( "ostree --repo={repo} remote gpg-import --stdin myremote < {p}/gpghome/key1.asc", repo = fixture.destrepo_path.as_str(), - p = srcdir.as_str() + p = fixture.srcdir.as_str() )?; // No remote matching From 7d291dbe1df4b1dfc4e0edd27a2bd08cfdf94063 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 20 Sep 2021 10:51:27 -0400 Subject: [PATCH 115/775] tests: Drop redundant destrepo creation It's been in `Fixture` for a while. --- lib/tests/it/main.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index e025314cb..118c514b6 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -137,9 +137,7 @@ impl Fixture { #[tokio::test] async fn test_tar_import_empty() -> Result<()> { let fixture = Fixture::new()?; - let destrepo = ostree::Repo::new_for_path(&fixture.destrepo_path); - destrepo.open(gio::NONE_CANCELLABLE)?; - let r = ostree_ext::tar::import_tar(&destrepo, tokio::io::empty(), None).await; + let r = ostree_ext::tar::import_tar(&fixture.destrepo, tokio::io::empty(), None).await; assert_err_contains(r, "Commit object not found"); Ok(()) } From 788700f9c532ff455e2b808ac5d6c3291caaf5e8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 20 Sep 2021 09:38:46 -0400 Subject: [PATCH 116/775] tests: Verify that exported tar is bit-for-bit identical It's handy to have some level of predictability, so verify that the tar stream we output for a given commit is bit-for-bit identical when invoked twice - with a one second delay to flush out at least things like modification times. We will never be able to assert on a particular checksum, because e.g. things like the gzip algorithm may compress differently when the code changes. Related discussion in https://github.com/cgwalters/git-evtag#tarball-reproducibility --- lib/tests/it/main.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 4c8dcd5a0..9c3089008 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -142,6 +142,28 @@ async fn test_tar_import_empty() -> Result<()> { Ok(()) } +#[tokio::test] +async fn test_tar_export_reproducible() -> Result<()> { + let fixture = Fixture::new()?; + let (_, rev) = fixture + .srcrepo + .read_commit(TESTREF, gio::NONE_CANCELLABLE)?; + let export1 = { + let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; + ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h)?; + h.finish()? + }; + // Artificial delay to flush out mtimes (one second granularity baseline, plus another 100ms for good measure). + std::thread::sleep(std::time::Duration::from_millis(1100)); + let export2 = { + let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; + ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h)?; + h.finish()? + }; + assert_eq!(*export1, *export2); + Ok(()) +} + #[tokio::test] async fn test_tar_import_signed() -> Result<()> { let fixture = Fixture::new()?; From 7e5bf6a51e1486e2cc543fcccc0d6a1fcc4453ac Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 21 Sep 2021 11:07:58 -0400 Subject: [PATCH 117/775] lib/container: Split off helper to find all layers into oci Prep for derivation, where we want to find all layers. Move this to a method on `Manifest`, and add a unit test for it. Also while we're here, change the function to borrow and not clone on general principle. --- lib/src/container/import.rs | 32 ++++++------------ lib/src/container/oci.rs | 67 +++++++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 21 deletions(-) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index c030199de..56efb9590 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -120,7 +120,10 @@ pub async fn find_layer_tar( let pipein = crate::async_util::async_read_to_sync(src); // An internal channel of Bytes let (tx_buf, rx_buf) = tokio::sync::mpsc::channel(2); - let blob_symlink_target = format!("../{}.tar", blobid); + let blob_sha256 = blobid + .strip_prefix("sha256:") + .ok_or_else(|| anyhow!("Expected sha256: in digest: {}", blobid))?; + let blob_symlink_target = format!("../{}.tar", blob_sha256); let worker = tokio::task::spawn_blocking(move || { let mut pipein = pipein; let r = @@ -265,31 +268,18 @@ pub struct Import { pub image_digest: String, } -fn find_layer_blobid(manifest: &oci::Manifest) -> Result { - let layers: Vec<_> = manifest - .layers - .iter() - .filter(|&layer| { - matches!( - layer.media_type.as_str(), - super::oci::DOCKER_TYPE_LAYER | oci::OCI_TYPE_LAYER - ) - }) - .collect(); - +fn require_one_layer_blob(manifest: &oci::Manifest) -> Result<&str> { + let layers = manifest.find_layer_blobids()?; let n = layers.len(); if let Some(layer) = layers.into_iter().next() { if n > 1 { Err(anyhow!("Expected 1 layer, found {}", n)) } else { - let digest = layer.digest.as_str(); - let hash = digest - .strip_prefix("sha256:") - .ok_or_else(|| anyhow!("Expected sha256: in digest: {}", digest))?; - Ok(hash.into()) + Ok(layer) } } else { - Err(anyhow!("No layers found (orig: {})", manifest.layers.len())) + // Validated by find_layer_blobids() + unreachable!() } } @@ -332,9 +322,9 @@ pub async fn import_from_manifest( } let options = options.unwrap_or_default(); let manifest: oci::Manifest = serde_json::from_slice(manifest_bytes)?; - let layerid = find_layer_blobid(&manifest)?; + let layerid = require_one_layer_blob(&manifest)?; event!(Level::DEBUG, "target blob: {}", layerid); - let (blob, worker) = fetch_layer(imgref, layerid.as_str(), options.progress).await?; + let (blob, worker) = fetch_layer(imgref, layerid, options.progress).await?; let blob = tokio::io::BufReader::new(blob); let mut taropts: crate::tar::TarImportOptions = Default::default(); match &imgref.sigverify { diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index dbe73751f..2779f3c9e 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -79,6 +79,31 @@ pub(crate) struct Manifest { pub annotations: Option>, } +impl Manifest { + /// Return all layer (non-metadata) blobs. + /// It is an error if there are no layers present. + pub(crate) fn find_layer_blobids(&self) -> Result> { + let layers: Vec<_> = self + .layers + .iter() + .filter_map(|layer| { + if matches!( + layer.media_type.as_str(), + DOCKER_TYPE_LAYER | OCI_TYPE_LAYER + ) { + Some(layer.digest.as_str()) + } else { + None + } + }) + .collect(); + if layers.is_empty() { + return Err(anyhow!("No layers found")); + } + Ok(layers) + } +} + /// Completed blob metadata #[derive(Debug)] pub(crate) struct Blob { @@ -324,6 +349,48 @@ impl<'a> std::io::Write for LayerWriter<'a> { mod tests { use super::*; + const MANIFEST_DERIVE: &str = r#"{ + "schemaVersion": 2, + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:54977ab597b345c2238ba28fe18aad751e5c59dc38b9393f6f349255f0daa7fc", + "size": 754 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:ee02768e65e6fb2bb7058282338896282910f3560de3e0d6cd9b1d5985e8360d", + "size": 5462 + }, + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:d203cef7e598fa167cb9e8b703f9f20f746397eca49b51491da158d64968b429", + "size": 214 + } + ], + "annotations": { + "ostree.commit": "3cb6170b6945065c2475bc16d7bebcc84f96b4c677811a6751e479b89f8c3770", + "ostree.version": "42.0" + } + } + "#; + + #[test] + fn manifest() -> Result<()> { + let m: Manifest = serde_json::from_str(MANIFEST_DERIVE)?; + let mut blobids = m.find_layer_blobids()?.into_iter(); + assert_eq!( + blobids.next().unwrap(), + "sha256:ee02768e65e6fb2bb7058282338896282910f3560de3e0d6cd9b1d5985e8360d" + ); + assert_eq!( + blobids.next().unwrap(), + "sha256:d203cef7e598fa167cb9e8b703f9f20f746397eca49b51491da158d64968b429" + ); + assert!(blobids.next().is_none()); + Ok(()) + } + #[test] fn test_build() -> Result<()> { let td = tempfile::tempdir()?; From b85ae8dc40631d5bc42f220a5568ba86ce113b20 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 21 Sep 2021 16:48:45 -0400 Subject: [PATCH 118/775] test: Add an `#[ignore]`d test to verify registry integration Right now we're only testing `oci-archive:` really, let's actually test pushing/pulling via Docker registry API. Since this requires external setup, mark it as `#[ignore]`. --- lib/tests/it/main.rs | 51 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 9c3089008..7de5b8055 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -20,6 +20,7 @@ const EXAMPLEOS_V1: &[u8] = include_bytes!("fixtures/exampleos-v1.tar.zst"); const TESTREF: &str = "exampleos/x86_64/stable"; const EXAMPLEOS_CONTENT_CHECKSUM: &str = "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; +const TEST_REGISTRY_DEFAULT: &str = "localhost:5000"; /// Image that contains a base exported layer, then a `podman build` of an added file on top. const EXAMPLEOS_DERIVED_OCI: &[u8] = include_bytes!("fixtures/exampleos-derive.ociarchive"); @@ -32,6 +33,15 @@ fn assert_err_contains(r: Result, s: impl AsRef) { } } +lazy_static::lazy_static! { + static ref TEST_REGISTRY: String = { + match std::env::var_os("TEST_REGISTRY") { + Some(t) => t.to_str().unwrap().to_owned(), + None => TEST_REGISTRY_DEFAULT.to_string() + } + }; +} + #[context("Generating test repo")] fn generate_test_repo(dir: &Utf8Path) -> Result { let src_tarpath = &dir.join("exampleos.tar.zst"); @@ -361,6 +371,47 @@ async fn test_container_import_derive() -> Result<()> { Ok(()) } +#[ignore] +#[tokio::test] +// Verify that we can push and pull to a registry, not just oci-archive:. +// This requires a registry set up externally right now. One can run a HTTP registry via e.g. +// `podman run --rm -ti -p 5000:5000 --name registry docker.io/library/registry:2` +// but that doesn't speak HTTPS and adding that is complex. +// A simple option is setting up e.g. quay.io/$myuser/exampleos and then do: +// Then you can run this test via `env TEST_REGISTRY=quay.io/$myuser cargo test -- --ignored`. +async fn test_container_import_export_registry() -> Result<()> { + let tr = &*TEST_REGISTRY; + let fixture = Fixture::new()?; + let testrev = fixture + .srcrepo + .resolve_rev(TESTREF, false) + .context("Failed to resolve ref")? + .unwrap(); + let src_imgref = ImageReference { + transport: Transport::Registry, + name: format!("{}/exampleos", tr), + }; + let config = Config { + cmd: Some(vec!["/bin/bash".to_string()]), + ..Default::default() + }; + let digest = ostree_ext::container::export(&fixture.srcrepo, TESTREF, &config, &src_imgref) + .await + .context("exporting to registry")?; + let mut digested_imgref = src_imgref.clone(); + digested_imgref.name = format!("{}@{}", src_imgref.name, digest); + + let import_ref = OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref: digested_imgref, + }; + let import = ostree_ext::container::import(&fixture.destrepo, &import_ref, None) + .await + .context("importing")?; + assert_eq!(import.ostree_commit, testrev.as_str()); + Ok(()) +} + #[test] fn test_diff() -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; From 1633ea744e1fc70b7753a04ac3e594e7b815982e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 20 Sep 2021 18:00:53 -0400 Subject: [PATCH 119/775] lib/tar: Add new `write` module APIs to write a tarball stream into an OSTree commit. This functionality already exists in libostree mostly, this API adds a higher level, more ergonomic Rust frontend to it. In the future, this may also evolve into parsing the tar stream in Rust, not in C. Prep for container derivation. --- lib/src/cmdext.rs | 21 ++++++++++ lib/src/ima.rs | 2 +- lib/src/lib.rs | 3 ++ lib/src/tar/import.rs | 2 +- lib/src/tar/mod.rs | 2 + lib/src/tar/write.rs | 98 +++++++++++++++++++++++++++++++++++++++++++ lib/tests/it/main.rs | 14 +++++++ 7 files changed, 140 insertions(+), 2 deletions(-) create mode 100644 lib/src/cmdext.rs create mode 100644 lib/src/tar/write.rs diff --git a/lib/src/cmdext.rs b/lib/src/cmdext.rs new file mode 100644 index 000000000..bd1da4ea1 --- /dev/null +++ b/lib/src/cmdext.rs @@ -0,0 +1,21 @@ +use std::os::unix::prelude::{CommandExt, RawFd}; + +pub(crate) trait CommandRedirectionExt { + /// Pass a file descriptor into the target process. + /// IMPORTANT: `fd` must be valid (i.e. cannot be closed) until after [`std::Process::Command::spawn`] or equivalent is invoked. + fn take_fd_n(&mut self, fd: i32, target: i32) -> &mut Self; +} + +#[allow(unsafe_code)] +impl CommandRedirectionExt for std::process::Command { + fn take_fd_n(&mut self, fd: i32, target: i32) -> &mut Self { + unsafe { + self.pre_exec(move || { + nix::unistd::dup2(fd, target as RawFd) + .map(|_r| ()) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("{}", e))) + }); + } + self + } +} diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 97bc280aa..bfece36b3 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -49,7 +49,7 @@ fn xattrs_to_map(v: &glib::Variant) -> BTreeMap, Vec> { } /// Create a new GVariant of type a(ayay). This is used by OSTree's extended attributes. -fn new_variant_a_ayay<'a, T: 'a + AsRef<[u8]>>( +pub(crate) fn new_variant_a_ayay<'a, T: 'a + AsRef<[u8]>>( items: impl IntoIterator, ) -> glib::Variant { let children: Vec<_> = items diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 3137cb655..9f71e9bf8 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -27,6 +27,9 @@ pub mod container; pub mod diff; pub mod ima; pub mod tar; + +mod cmdext; + /// Prelude, intended for glob import. pub mod prelude { #[doc(hidden)] diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index f898b5ce0..5aa53dca3 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -24,7 +24,7 @@ const MAX_XATTR_SIZE: u32 = 1024 * 1024; const MAX_METADATA_SIZE: u32 = 10 * 1024 * 1024; /// https://stackoverflow.com/questions/258091/when-should-i-use-mmap-for-file-access -const SMALL_REGFILE_SIZE: usize = 127 * 1024; +pub(crate) const SMALL_REGFILE_SIZE: usize = 127 * 1024; // The prefix for filenames that contain content we actually look at. const REPO_PREFIX: &str = "sysroot/ostree/repo/"; diff --git a/lib/src/tar/mod.rs b/lib/src/tar/mod.rs index ee3e41cf9..4eb9d57bd 100644 --- a/lib/src/tar/mod.rs +++ b/lib/src/tar/mod.rs @@ -41,3 +41,5 @@ mod import; pub use import::*; mod export; pub use export::*; +mod write; +pub use write::*; diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs new file mode 100644 index 000000000..8872bd086 --- /dev/null +++ b/lib/src/tar/write.rs @@ -0,0 +1,98 @@ +//! APIs to write a tarball stream into an OSTree commit. +//! +//! This functionality already exists in libostree mostly, +//! this API adds a higher level, more ergonomic Rust frontend +//! to it. +//! +//! In the future, this may also evolve into parsing the tar +//! stream in Rust, not in C. + +use crate::cmdext::CommandRedirectionExt; +use crate::Result; +use anyhow::anyhow; +use std::os::unix::prelude::AsRawFd; +use tokio::io::AsyncReadExt; +use tracing::instrument; + +/// Configuration for tar layer commits. +#[derive(Debug, Default)] +pub struct WriteTarOptions<'a> { + /// Base ostree commit hash + pub base: Option<&'a str>, + /// Enable SELinux labeling from the base commit + /// Requires the `base` option. + pub selinux: bool, +} + +/// Write the contents of a tarball as an ostree commit. +#[allow(unsafe_code)] // For raw fd bits +#[instrument(skip(repo, src))] +pub async fn write_tar( + repo: &ostree::Repo, + mut src: impl tokio::io::AsyncRead + Send + Unpin + 'static, + refname: &str, + options: Option>, +) -> Result { + use std::process::Stdio; + let options = options.unwrap_or_default(); + let mut c = std::process::Command::new("ostree"); + let repofd = repo.dfd_as_file()?; + { + let c = c + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .args(&["commit"]); + c.take_fd_n(repofd.as_raw_fd(), 3); + c.arg("--repo=/proc/self/fd/3"); + if let Some(base) = options.base { + if options.selinux { + c.arg("--selinux-policy-from-base"); + } + c.arg(&format!("--tree=ref={}", base)); + } + c.args(&[ + "--no-bindings", + "--tar-autocreate-parents", + "--tree=tar=/proc/self/fd/0", + "--branch", + refname, + ]); + } + let mut c = tokio::process::Command::from(c); + c.kill_on_drop(true); + let mut r = c.spawn()?; + // Safety: We passed piped() for all of these + let mut child_stdin = r.stdin.take().unwrap(); + let mut child_stdout = r.stdout.take().unwrap(); + let mut child_stderr = r.stderr.take().unwrap(); + // Copy our input to child stdout + let input_copier = async move { + let _n = tokio::io::copy(&mut src, &mut child_stdin).await?; + drop(child_stdin); + Ok::<_, anyhow::Error>(()) + }; + // Gather stdout/stderr to buffers + let output_copier = async move { + let mut child_stdout_buf = String::new(); + let mut child_stderr_buf = String::new(); + let (_a, _b) = tokio::try_join!( + child_stdout.read_to_string(&mut child_stdout_buf), + child_stderr.read_to_string(&mut child_stderr_buf) + )?; + Ok::<_, anyhow::Error>((child_stdout_buf, child_stderr_buf)) + }; + + let (_, (child_stdout, child_stderr)) = tokio::try_join!(input_copier, output_copier)?; + let status = r.wait().await?; + if !status.success() { + return Err(anyhow!( + "Failed to commit tar: {:?}: {}", + status, + child_stderr + )); + } + // TODO: trim string in place + let s = child_stdout.trim(); + Ok(s.to_string()) +} diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 9c3089008..71b62108e 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -250,6 +250,20 @@ async fn test_tar_import_export() -> Result<()> { Ok(()) } +#[tokio::test] +async fn test_tar_write() -> Result<()> { + let fixture = Fixture::new()?; + let r = ostree_ext::tar::write_tar(&fixture.destrepo, EXAMPLEOS_V0, "exampleos", None).await?; + let (commitdata, _) = fixture.destrepo.load_commit(&r)?; + assert_eq!( + EXAMPLEOS_CONTENT_CHECKSUM, + ostree::commit_get_content_checksum(&commitdata) + .unwrap() + .as_str() + ); + Ok(()) +} + fn skopeo_inspect(imgref: &str) -> Result { let out = Command::new("skopeo") .args(&["inspect", imgref]) From 678a4428a320f83e4ebba1585666193f16865d70 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 21 Sep 2021 14:18:53 -0400 Subject: [PATCH 120/775] Fetch via container-image-proxy https://github.com/cgwalters/container-image-proxy is prototype code to expose containers/image via a HTTP API suitable for use in non-Go programs. This has many advantages over us forking skopeo; the key one being on-demand layer fetching. --- .github/workflows/rust.yml | 6 +- ci/installdeps.sh | 10 ++ lib/Cargo.toml | 2 + lib/src/cli.rs | 37 ++++-- lib/src/container/imageproxy.rs | 139 +++++++++++++++++++ lib/src/container/import.rs | 229 +++++--------------------------- lib/src/container/mod.rs | 1 + lib/src/container/skopeo.rs | 3 +- 8 files changed, 216 insertions(+), 211 deletions(-) create mode 100755 ci/installdeps.sh create mode 100644 lib/src/container/imageproxy.rs diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index dd013be3a..05e8ac884 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -19,11 +19,9 @@ jobs: container: quay.io/coreos-assembler/fcos-buildroot:testing-devel steps: - - name: Install skopeo - run: yum -y install skopeo - - name: Update ostree - run: yum -y --enablerepo=updates-testing update ostree-devel - uses: actions/checkout@v2 + - name: Install deps + run: ./ci/installdeps.sh - name: Format run: cargo fmt -- --check -l - name: Build diff --git a/ci/installdeps.sh b/ci/installdeps.sh new file mode 100755 index 000000000..606032edb --- /dev/null +++ b/ci/installdeps.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -xeuo pipefail + +yum -y install skopeo +yum -y --enablerepo=updates-testing update ostree-devel + +git clone --depth=1 https://github.com/cgwalters/container-image-proxy +cd container-image-proxy +make +install -m 0755 bin/container-image-proxy /usr/bin/ diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 0c2b111ea..96028300b 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -10,6 +10,7 @@ version = "0.4.0-alpha.0" [dependencies] anyhow = "1.0" +async-compression = { version = "0.3", features = ["gzip", "tokio"] } bytes = "1.0.1" bitflags = "1.3.2" camino = "1.0.4" @@ -19,6 +20,7 @@ fn-error-context = "0.2.0" futures-util = "0.3.13" gvariant = "0.4.0" hex = "0.4.3" +hyper = { version = "0.14", features = ["full"] } indicatif = "0.16.0" lazy_static = "1.4.0" libc = "0.2.92" diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 6a595ab76..2b15ea4ec 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -73,6 +73,10 @@ enum ContainerOpts { /// Create an ostree ref pointing to the imported commit #[structopt(long)] write_ref: Option, + + /// Don't display progress + #[structopt(long)] + quiet: bool, }, /// Print information about an exported ostree-container image. @@ -155,17 +159,27 @@ fn tar_export(opts: &ExportOpts) -> Result<()> { } /// Import a container image with an encapsulated ostree commit. -async fn container_import(repo: &str, imgref: &str, write_ref: Option<&str>) -> Result<()> { +async fn container_import( + repo: &str, + imgref: &str, + write_ref: Option<&str>, + quiet: bool, +) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; let imgref = imgref.try_into()?; let (tx_progress, rx_progress) = tokio::sync::watch::channel(Default::default()); let target = indicatif::ProgressDrawTarget::stdout(); let style = indicatif::ProgressStyle::default_bar(); - let pb = indicatif::ProgressBar::new_spinner(); - pb.set_draw_target(target); - pb.set_style(style.template("{spinner} {prefix} {msg}")); - pb.enable_steady_tick(200); - pb.set_message("Downloading..."); + let pb = if !quiet { + let pb = indicatif::ProgressBar::new_spinner(); + pb.set_draw_target(target); + pb.set_style(style.template("{spinner} {prefix} {msg}")); + pb.enable_steady_tick(200); + pb.set_message("Downloading..."); + Some(pb) + } else { + None + }; let opts = ImportOptions { progress: Some(tx_progress), }; @@ -176,10 +190,14 @@ async fn container_import(repo: &str, imgref: &str, write_ref: Option<&str>) -> tokio::select! { _ = rx_progress.changed() => { let n = rx_progress.borrow().processed_bytes; - pb.set_message(format!("Processed: {}", indicatif::HumanBytes(n))); + if let Some(pb) = pb.as_ref() { + pb.set_message(format!("Processed: {}", indicatif::HumanBytes(n))); + } } import = &mut import => { - pb.finish(); + if let Some(pb) = pb.as_ref() { + pb.finish(); + } break import?; } } @@ -266,7 +284,8 @@ where repo, imgref, write_ref, - }) => container_import(&repo, &imgref, write_ref.as_deref()).await, + quiet, + }) => container_import(&repo, &imgref, write_ref.as_deref(), quiet).await, Opt::Container(ContainerOpts::Export { repo, rev, diff --git a/lib/src/container/imageproxy.rs b/lib/src/container/imageproxy.rs new file mode 100644 index 000000000..75c94dd93 --- /dev/null +++ b/lib/src/container/imageproxy.rs @@ -0,0 +1,139 @@ +//! Run container-image-proxy as a subprocess. +//! This allows fetching a container image manifest and layers in a streaming fashioni. + +use super::{ImageReference, Result}; +use crate::cmdext::CommandRedirectionExt; +use anyhow::Context; +use futures_util::{Future, FutureExt, TryFutureExt, TryStreamExt}; +use hyper::body::HttpBody; +use hyper::client::conn::{Builder, SendRequest}; +use hyper::{Body, Request, StatusCode}; +use std::os::unix::prelude::AsRawFd; +use std::pin::Pin; +use std::process::Stdio; +use tokio::io::{AsyncBufRead, AsyncReadExt}; + +// What we get from boxing a fallible tokio::spawn() closure. Note the nested Result. +type JoinFuture = Pin>>>>; + +/// Manage a child process proxy to fetch container images. +pub(crate) struct ImageProxy { + proc: tokio::process::Child, + request_sender: SendRequest, + stderr: JoinFuture, + driver: JoinFuture<()>, +} + +impl ImageProxy { + pub(crate) async fn new(imgref: &ImageReference) -> Result { + // Communicate over an anonymous socketpair(2) + let (mysock, childsock) = tokio::net::UnixStream::pair()?; + let childsock = childsock.into_std()?; + let mut c = std::process::Command::new("container-image-proxy"); + c.arg(&imgref.to_string()); + c.stdout(Stdio::null()).stderr(Stdio::piped()); + if let Some(port) = std::env::var_os("OSTREE_IMAGE_PROXY_PORT") { + c.arg("--port"); + c.arg(port); + } else { + // Pass one half of the pair as fd 3 to the child + let target_fd = 3; + c.arg("--sockfd"); + c.arg(&format!("{}", target_fd)); + c.take_fd_n(childsock.as_raw_fd(), target_fd); + } + let mut c = tokio::process::Command::from(c); + c.kill_on_drop(true); + let mut proc = c.spawn()?; + // We've passed over the fd, close it. + drop(childsock); + + // Safety: We passed `Stdio::piped()` above + let mut child_stderr = proc.stderr.take().unwrap(); + + // Connect via HTTP to the child + let (request_sender, connection) = Builder::new().handshake::<_, Body>(mysock).await?; + // Background driver that manages things like timeouts. + let driver = tokio::spawn(connection.map_err(anyhow::Error::msg)) + .map_err(anyhow::Error::msg) + .boxed(); + let stderr = tokio::spawn(async move { + let mut buf = String::new(); + child_stderr.read_to_string(&mut buf).await?; + Ok(buf) + }) + .map_err(anyhow::Error::msg) + .boxed(); + Ok(Self { + proc, + stderr, + request_sender, + driver, + }) + } + + pub(crate) async fn fetch_manifest(&mut self) -> Result<(String, Vec)> { + let req = Request::builder() + .header("Host", "localhost") + .method("GET") + .uri("/manifest") + .body(Body::from(""))?; + let mut resp = self.request_sender.send_request(req).await?; + if resp.status() != StatusCode::OK { + return Err(anyhow::anyhow!("error from proxy: {}", resp.status())); + } + let hname = "Manifest-Digest"; + let digest = resp + .headers() + .get(hname) + .ok_or_else(|| anyhow::anyhow!("Missing {} header", hname))? + .to_str() + .with_context(|| format!("Invalid {} header", hname))? + .to_string(); + let mut ret = Vec::new(); + while let Some(chunk) = resp.body_mut().data().await { + let chunk = chunk?; + ret.extend_from_slice(&chunk); + } + Ok((digest, ret)) + } + + pub(crate) async fn fetch_blob( + &mut self, + digest: &str, + ) -> Result { + let uri = format!("/blobs/{}", digest); + let req = Request::builder() + .header("Host", "localhost") + .method("GET") + .uri(&uri) + .body(Body::from(""))?; + let resp = self.request_sender.send_request(req).await?; + let status = resp.status(); + let body = TryStreamExt::map_err(resp.into_body(), |e| { + std::io::Error::new(std::io::ErrorKind::Other, e) + }); + let mut body = tokio_util::io::StreamReader::new(body); + if status != StatusCode::OK { + let mut s = String::new(); + let _: usize = body.read_to_string(&mut s).await?; + return Err(anyhow::anyhow!("error from proxy: {}: {}", status, s)); + } + Ok(body) + } + + pub(crate) async fn finalize(mut self) -> Result<()> { + // For now discard any errors from the connection + drop(self.request_sender); + let _r = self.driver.await??; + let status = self.proc.wait().await?; + if !status.success() { + if let Some(stderr) = self.stderr.await.map(|v| v.ok()).ok().flatten() { + anyhow::bail!("proxy failed: {}\n{}", status, stderr) + } else { + anyhow::bail!("proxy failed: {} (failed to fetch stderr)", status) + } + } + Ok(()) + } +} diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 56efb9590..f11a9eb7b 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -1,41 +1,37 @@ //! APIs for extracting OSTree commits from container images +//! +//! # External depenendency on container-image-proxy +//! +//! This code requires https://github.com/cgwalters/container-image-proxy +//! installed as a binary in $PATH. +//! +//! The rationale for this is that while there exist Rust crates to speak +//! the Docker distribution API, the Go library https://github.com/containers/image/ +//! supports key things we want for production use like: +//! +//! - Image mirroring and remapping; effectively `man containers-registries.conf` +//! For example, we need to support an administrator mirroring an ostree-container +//! into a disconnected registry, without changing all the pull specs. +//! - Signing +//! +//! Additionally, the proxy "upconverts" manifests into OCI, so we don't need to care +//! about parsing the Docker manifest format (as used by most registries still). +//! +//! // # Implementation // -// This code currently forks off `/usr/bin/skopeo` as a subprocess, and uses -// it to fetch the container content and convert it into a `docker-archive:` -// formatted tarball stream, which is written to a FIFO and parsed by -// this code. -// -// The rationale for this is that `/usr/bin/skopeo` is a frontend for -// the Go library https://github.com/containers/image/ which supports -// key things we want for production use like: -// -// - Image mirroring and remapping; effectively `man containers-registries.conf` -// For example, we need to support an administrator mirroring an ostree-container -// into a disconnected registry, without changing all the pull specs. -// - Signing -// -// # Import phases -// // First, we support explicitly fetching just the manifest: https://github.com/opencontainers/image-spec/blob/main/manifest.md // This will give us information about the layers it contains, and crucially the digest (sha256) of // the manifest is how higher level software can detect changes. // // Once we have the manifest, we expect it to point to a single `application/vnd.oci.image.layer.v1.tar+gzip` layer, // which is exactly what is exported by the [`crate::tar::export`] process. -// -// What we get from skopeo is a `docker-archive:` tarball, which then will contain this *inner* tarball -// layer that we extract and pass to the [`crate::tar::import`] code. use super::*; use anyhow::{anyhow, Context}; -use camino::Utf8Path; use fn_error_context::context; -use futures_util::{Future, FutureExt, TryFutureExt}; -use std::io::prelude::*; use std::pin::Pin; -use std::process::Stdio; use tokio::io::AsyncRead; use tracing::{event, instrument, Level}; @@ -91,174 +87,11 @@ impl AsyncRead for ProgressReader { /// Download the manifest for a target image and its sha256 digest. #[context("Fetching manifest")] pub async fn fetch_manifest(imgref: &OstreeImageReference) -> Result<(Vec, String)> { - let mut proc = skopeo::new_cmd(); - let imgref_base = &imgref.imgref; - proc.args(&["inspect", "--raw"]) - .arg(imgref_base.to_string()); - proc.stdout(Stdio::piped()); - let proc = skopeo::spawn(proc)?.wait_with_output().await?; - if !proc.status.success() { - let errbuf = String::from_utf8_lossy(&proc.stderr); - return Err(anyhow!("skopeo inspect failed\n{}", errbuf)); - } - let raw_manifest = proc.stdout; - let digest = openssl::hash::hash(openssl::hash::MessageDigest::sha256(), &raw_manifest)?; - let digest = format!("sha256:{}", hex::encode(digest.as_ref())); + let mut proxy = imageproxy::ImageProxy::new(&imgref.imgref).await?; + let (digest, raw_manifest) = proxy.fetch_manifest().await?; Ok((raw_manifest, digest)) } -/// Read the contents of the first .tar we find. -/// The first return value is an `AsyncRead` of that tar file. -/// The second return value is a background worker task that -/// owns stream processing. -pub async fn find_layer_tar( - src: impl AsyncRead + Send + Unpin + 'static, - blobid: &str, -) -> Result<(impl AsyncRead, impl Future>>)> { - // Convert the async input stream to synchronous, becuase we currently use the - // sync tar crate. - let pipein = crate::async_util::async_read_to_sync(src); - // An internal channel of Bytes - let (tx_buf, rx_buf) = tokio::sync::mpsc::channel(2); - let blob_sha256 = blobid - .strip_prefix("sha256:") - .ok_or_else(|| anyhow!("Expected sha256: in digest: {}", blobid))?; - let blob_symlink_target = format!("../{}.tar", blob_sha256); - let worker = tokio::task::spawn_blocking(move || { - let mut pipein = pipein; - let r = - find_layer_tar_sync(&mut pipein, blob_symlink_target, tx_buf).context("Import worker"); - // Ensure we read the entirety of the stream, otherwise skopeo will get an EPIPE. - let _ = std::io::copy(&mut pipein, &mut std::io::sink()); - r - }) - .map_err(anyhow::Error::msg); - // Bridge the channel to an AsyncRead - let stream = tokio_stream::wrappers::ReceiverStream::new(rx_buf); - let reader = tokio_util::io::StreamReader::new(stream); - Ok((reader, worker)) -} - -// Helper function invoked to synchronously parse a `docker-archive:` formatted tar stream, finding -// the desired layer tarball and writing its contents via a stream of byte chunks -// to a channel. -fn find_layer_tar_sync( - pipein: impl Read + Send + Unpin, - blob_symlink_target: String, - tx_buf: tokio::sync::mpsc::Sender>, -) -> Result<()> { - let mut archive = tar::Archive::new(pipein); - let mut buf = vec![0u8; 8192]; - let mut found = false; - for entry in archive.entries()? { - let mut entry = entry.context("Reading entry")?; - if found { - // Continue to read to the end to avoid broken pipe error from skopeo - continue; - } - let path = entry.path()?; - let path: &Utf8Path = path.deref().try_into()?; - // We generally expect our layer to be first, but let's just skip anything - // unexpected to be robust against changes in skopeo. - if path.extension() != Some("tar") { - continue; - } - event!(Level::DEBUG, "Found {}", path); - - match entry.header().entry_type() { - tar::EntryType::Symlink => { - if let Some(name) = path.file_name() { - if name == "layer.tar" { - let target = entry - .link_name()? - .ok_or_else(|| anyhow!("Invalid link {}", path))?; - let target = Utf8Path::from_path(&*target) - .ok_or_else(|| anyhow!("Invalid non-UTF8 path {:?}", target))?; - if target != blob_symlink_target { - return Err(anyhow!( - "Found unexpected layer link {} -> {}", - path, - target - )); - } - } - } - } - tar::EntryType::Regular => loop { - let n = entry - .read(&mut buf[..]) - .context("Reading tar file contents")?; - let done = 0 == n; - let r = Ok::<_, std::io::Error>(bytes::Bytes::copy_from_slice(&buf[0..n])); - let receiver_closed = tx_buf.blocking_send(r).is_err(); - if receiver_closed || done { - found = true; - break; - } - }, - _ => continue, - } - } - if found { - Ok(()) - } else { - Err(anyhow!("Failed to find layer {}", blob_symlink_target)) - } -} - -/// Fetch a remote docker/OCI image and extract a specific uncompressed layer. -async fn fetch_layer<'s>( - imgref: &OstreeImageReference, - blobid: &str, - progress: Option>, -) -> Result<( - impl AsyncRead + Unpin + Send, - impl Future>, -)> { - let mut proc = skopeo::new_cmd(); - proc.stdout(Stdio::null()); - let tempdir = tempfile::Builder::new() - .prefix("ostree-rs-ext") - .tempdir_in("/var/tmp")?; - let tempdir = Utf8Path::from_path(tempdir.path()).unwrap(); - let fifo = &tempdir.join("skopeo.pipe"); - nix::unistd::mkfifo( - fifo.as_os_str(), - nix::sys::stat::Mode::from_bits(0o600).unwrap(), - )?; - tracing::trace!("skopeo pull starting to {}", fifo); - proc.arg("copy") - .arg(imgref.imgref.to_string()) - .arg(format!("docker-archive:{}", fifo)); - let proc = skopeo::spawn(proc)?; - let fifo_reader = ProgressReader { - reader: Box::new(tokio::fs::File::open(fifo).await?), - progress, - }; - let waiter = async move { - let res = proc.wait_with_output().await?; - if !res.status.success() { - return Err(anyhow!( - "skopeo failed: {}\n{}", - res.status, - String::from_utf8_lossy(&res.stderr) - )); - } - Ok(()) - } - .boxed(); - let (contents, worker) = find_layer_tar(fifo_reader, blobid).await?; - // This worker task joins the result of the stream processing thread with monitoring the skopeo process. - let worker = async move { - let (worker, waiter) = tokio::join!(worker, waiter); - // Explicitly declare as `()` to verify we have the right number of `?`. - let _: () = waiter?; - let _: () = worker??; - Ok::<_, anyhow::Error>(()) - }; - Ok((contents, worker)) -} - /// The result of an import operation #[derive(Debug)] pub struct Import { @@ -324,19 +157,23 @@ pub async fn import_from_manifest( let manifest: oci::Manifest = serde_json::from_slice(manifest_bytes)?; let layerid = require_one_layer_blob(&manifest)?; event!(Level::DEBUG, "target blob: {}", layerid); - let (blob, worker) = fetch_layer(imgref, layerid, options.progress).await?; - let blob = tokio::io::BufReader::new(blob); + let mut proxy = imageproxy::ImageProxy::new(&imgref.imgref).await?; + let blob = proxy.fetch_blob(layerid).await?; + let blob = async_compression::tokio::bufread::GzipDecoder::new(blob); + let blob = ProgressReader { + reader: Box::new(blob), + progress: options.progress, + }; let mut taropts: crate::tar::TarImportOptions = Default::default(); match &imgref.sigverify { SignatureSource::OstreeRemote(remote) => taropts.remote = Some(remote.clone()), SignatureSource::ContainerPolicy | SignatureSource::ContainerPolicyAllowInsecure => {} } - let import = crate::tar::import_tar(repo, blob, Some(taropts)); - let (ostree_commit, worker) = tokio::join!(import, worker); - // Let any errors from skopeo take precedence, because a failure to parse/find the layer tarball - // is likely due to an underlying error from that. - let _: () = worker?; - let ostree_commit = ostree_commit?; + let ostree_commit = crate::tar::import_tar(repo, blob, Some(taropts)) + .await + .with_context(|| format!("Parsing blob {}", layerid))?; + // FIXME write ostree commit after proxy finalization + proxy.finalize().await?; event!(Level::DEBUG, "created commit {}", ostree_commit); Ok(ostree_commit) } diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index aba26a9df..1628f4056 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -227,6 +227,7 @@ mod export; pub use export::*; mod import; pub use import::*; +mod imageproxy; mod oci; mod skopeo; diff --git a/lib/src/container/skopeo.rs b/lib/src/container/skopeo.rs index 476ee9b7a..ccc957521 100644 --- a/lib/src/container/skopeo.rs +++ b/lib/src/container/skopeo.rs @@ -1,7 +1,6 @@ //! Fork skopeo as a subprocess -use super::Result; -use anyhow::Context; +use anyhow::{Context, Result}; use serde::Deserialize; use std::process::Stdio; use tokio::process::Command; From 432fa5938586974eaf8ba1840b120af47c5c841a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Sep 2021 10:50:55 -0400 Subject: [PATCH 121/775] imageproxy: Add context to failure to spawn Since missing the binary is probably going to be common. --- lib/src/container/imageproxy.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/container/imageproxy.rs b/lib/src/container/imageproxy.rs index 75c94dd93..f9824d30b 100644 --- a/lib/src/container/imageproxy.rs +++ b/lib/src/container/imageproxy.rs @@ -44,7 +44,7 @@ impl ImageProxy { } let mut c = tokio::process::Command::from(c); c.kill_on_drop(true); - let mut proc = c.spawn()?; + let mut proc = c.spawn().context("Failed to spawn container-image-proxy")?; // We've passed over the fd, close it. drop(childsock); From 55ea4a839ced0ba60093303d125725ebec92db79 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Sep 2021 11:50:02 -0400 Subject: [PATCH 122/775] imageproxy: Add some docstrings On general principle. --- lib/src/container/imageproxy.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/src/container/imageproxy.rs b/lib/src/container/imageproxy.rs index f9824d30b..70e6ec265 100644 --- a/lib/src/container/imageproxy.rs +++ b/lib/src/container/imageproxy.rs @@ -1,5 +1,6 @@ //! Run container-image-proxy as a subprocess. -//! This allows fetching a container image manifest and layers in a streaming fashioni. +//! This allows fetching a container image manifest and layers in a streaming fashion. +//! More information: https://github.com/cgwalters/container-image-proxy use super::{ImageReference, Result}; use crate::cmdext::CommandRedirectionExt; @@ -25,6 +26,7 @@ pub(crate) struct ImageProxy { } impl ImageProxy { + /// Create an image proxy that fetches the target image. pub(crate) async fn new(imgref: &ImageReference) -> Result { // Communicate over an anonymous socketpair(2) let (mysock, childsock) = tokio::net::UnixStream::pair()?; @@ -72,6 +74,8 @@ impl ImageProxy { }) } + /// Fetch the manifest. + /// https://github.com/opencontainers/image-spec/blob/main/manifest.md pub(crate) async fn fetch_manifest(&mut self) -> Result<(String, Vec)> { let req = Request::builder() .header("Host", "localhost") @@ -98,6 +102,10 @@ impl ImageProxy { Ok((digest, ret)) } + /// Fetch a blob identified by e.g. `sha256:`. + /// https://github.com/opencontainers/image-spec/blob/main/descriptor.md + /// Note that right now the proxy does verification of the digest: + /// https://github.com/cgwalters/container-image-proxy/issues/1#issuecomment-926712009 pub(crate) async fn fetch_blob( &mut self, digest: &str, @@ -122,6 +130,7 @@ impl ImageProxy { Ok(body) } + /// Close the HTTP connection and wait for the child process to exit successfully. pub(crate) async fn finalize(mut self) -> Result<()> { // For now discard any errors from the connection drop(self.request_sender); From 25bd7f9ede4a9d7b4d0121361ae1365fa0727359 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Sep 2021 14:46:42 -0400 Subject: [PATCH 123/775] lib/container: Use pin-project for ProgressReader I recently read through https://fasterthanli.me/articles/pin-and-suffering and realized the use of pin-project is what we need here. This avoids us need an `Unpin` bound which is nicer. I also switched to avoid mutating our own structure (if progress receiver disconnects we just keep trying to send) because I was fighting `Pin` + mutability. --- lib/Cargo.toml | 1 + lib/src/container/import.rs | 25 +++++++++++-------------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 96028300b..40edef47b 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -31,6 +31,7 @@ openat-ext = "0.2.0" openssl = "0.10.33" ostree = { features = ["v2021_4"], version = "0.13.0" } phf = { features = ["macros"], version = "0.9.0" } +pin-project = "1.0" serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" structopt = "0.3.21" diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index f11a9eb7b..598dc0c97 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -31,7 +31,6 @@ use super::*; use anyhow::{anyhow, Context}; use fn_error_context::context; -use std::pin::Pin; use tokio::io::AsyncRead; use tracing::{event, instrument, Level}; @@ -45,22 +44,25 @@ pub struct ImportProgress { type Progress = tokio::sync::watch::Sender; /// A read wrapper that updates the download progress. -struct ProgressReader { - reader: Box, +#[pin_project::pin_project] +struct ProgressReader { + #[pin] + reader: T, + #[pin] progress: Option, } -impl AsyncRead for ProgressReader { +impl AsyncRead for ProgressReader { fn poll_read( - mut self: std::pin::Pin<&mut Self>, + self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>, buf: &mut tokio::io::ReadBuf<'_>, ) -> std::task::Poll> { - let pinned = Pin::new(&mut self.reader); + let this = self.project(); let len = buf.filled().len(); - match pinned.poll_read(cx, buf) { + match this.reader.poll_read(cx, buf) { v @ std::task::Poll::Ready(Ok(_)) => { - let success = if let Some(progress) = self.progress.as_ref() { + if let Some(progress) = this.progress.as_ref().get_ref() { let state = { let mut state = *progress.borrow(); let newlen = buf.filled().len(); @@ -70,12 +72,7 @@ impl AsyncRead for ProgressReader { state }; // Ignore errors, if the caller disconnected from progress that's OK. - progress.send(state).is_ok() - } else { - true - }; - if !success { - let _ = self.progress.take(); + let _ = progress.send(state); } v } From 9ddc4849e92505d56f5fb217712509832e43b52e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Sep 2021 15:09:08 -0400 Subject: [PATCH 124/775] =?UTF-8?q?lib:=20Clean=20up=20AsyncRead=20?= =?UTF-8?q?=E2=86=92=20Read=20bridge?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I have now realized why so many APIs in std and tokio etc. return named but generic structures. It's because it helps ensure that things like `Send` bounds are automatically propagated as needed. If instead one wraps it behind a generic function, then all bounds need to be specified on the return value (so you'd need one for `_send` types and one for non-send etc). Prep for some other container cleanups. --- lib/src/async_util.rs | 27 +++++++++++++++------------ lib/src/tar/import.rs | 5 +++-- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/lib/src/async_util.rs b/lib/src/async_util.rs index 10f9ec60f..f5d5b7d4f 100644 --- a/lib/src/async_util.rs +++ b/lib/src/async_util.rs @@ -2,25 +2,28 @@ use std::io::prelude::*; use std::pin::Pin; use tokio::io::{AsyncRead, AsyncReadExt}; -struct ReadBridge { - reader: Pin>, +/// A [`std::io::Read`] implementation backed by an asynchronous source. +pub(crate) struct ReadBridge { + reader: Pin>, rt: tokio::runtime::Handle, } -impl Read for ReadBridge { +impl Read for ReadBridge { fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - let mut reader = self.reader.as_mut(); + let reader = &mut self.reader; self.rt.block_on(async { reader.read(buf).await }) } } -/// Bridge from AsyncRead to Read. -pub(crate) fn async_read_to_sync( - reader: S, -) -> impl Read + Send + Unpin + 'static { - let rt = tokio::runtime::Handle::current(); - let reader = Box::pin(reader); - ReadBridge { reader, rt } +impl ReadBridge { + /// Create a [`std::io::Read`] implementation backed by an asynchronous source. + /// + /// This is useful with e.g. [`tokio::task::spawn_blocking`]. + pub(crate) fn new(reader: T) -> Self { + let reader = Box::pin(reader); + let rt = tokio::runtime::Handle::current(); + ReadBridge { reader, rt } + } } #[cfg(test)] @@ -34,7 +37,7 @@ mod test { r: impl AsyncRead + Unpin + Send + 'static, expected_len: usize, ) -> Result<()> { - let mut r = async_read_to_sync(r); + let mut r = ReadBridge::new(r); let res = tokio::task::spawn_blocking(move || { let mut buf = Vec::new(); r.read_to_end(&mut buf)?; diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 5aa53dca3..056439f64 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -1,5 +1,6 @@ //! APIs for extracting OSTree commits from container images +use crate::async_util::ReadBridge; use crate::Result; use anyhow::{anyhow, Context}; use camino::Utf8Path; @@ -603,10 +604,10 @@ pub async fn import_tar( options: Option, ) -> Result { let options = options.unwrap_or_default(); - let pipein = crate::async_util::async_read_to_sync(src); + let src = ReadBridge::new(src); let repo = repo.clone(); let import = tokio::task::spawn_blocking(move || { - let mut archive = tar::Archive::new(pipein); + let mut archive = tar::Archive::new(src); let importer = Importer::new(&repo, options.remote); importer.import(&mut archive) }) From 984cc4884d38dce973139a00d8f5e50e1f7208a6 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 25 Sep 2021 08:50:42 -0400 Subject: [PATCH 125/775] =?UTF-8?q?lib/tar/write:=20Translate=20/etc=20?= =?UTF-8?q?=E2=86=92=20/usr/etc=20by=20default?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need this on general principle, because while ostree tries to support both `/etc` and `/usr/etc`, in practice rpm-ostree effectively requires ostree commits to have `/usr/etc`. --- lib/src/tar/write.rs | 1 + lib/tests/it/main.rs | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index 8872bd086..ac8e655f5 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -54,6 +54,7 @@ pub async fn write_tar( c.args(&[ "--no-bindings", "--tar-autocreate-parents", + r#"--tar-pathname-filter=^etc(.*),usr/etc\1"#, "--tree=tar=/proc/self/fd/0", "--branch", refname, diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 0b28580f4..ec52837bd 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -264,6 +264,9 @@ async fn test_tar_import_export() -> Result<()> { async fn test_tar_write() -> Result<()> { let fixture = Fixture::new()?; let r = ostree_ext::tar::write_tar(&fixture.destrepo, EXAMPLEOS_V0, "exampleos", None).await?; + // Here, we're importing a raw tarball into an ostree commit; this is a subtly different + // path than what we do above for the flow of "unpack tarball + ostree commit + export tar". + // But, they should be content-identical. let (commitdata, _) = fixture.destrepo.load_commit(&r)?; assert_eq!( EXAMPLEOS_CONTENT_CHECKSUM, @@ -271,6 +274,26 @@ async fn test_tar_write() -> Result<()> { .unwrap() .as_str() ); + + // Test translating /etc to /usr/etc + let tmpetc = fixture.path.join("tmproot/etc"); + let tmproot = tmpetc.parent().unwrap(); + let tmptar = fixture.path.join("testlayer.tar"); + std::fs::create_dir_all(&tmpetc)?; + std::fs::write(tmpetc.join("someconfig.conf"), b"")?; + bash!( + "tar cf {tmptar} -C {tmproot} .", + tmptar = tmptar.as_str(), + tmproot = tmproot.as_str() + )?; + let src = tokio::fs::File::open(&tmptar).await?; + let layer_commit = ostree_ext::tar::write_tar(&fixture.destrepo, src, "layer", None).await?; + bash!( + "ostree --repo={repo} ls {layer_commit} /usr/etc/someconfig.conf >/dev/null", + repo = fixture.destrepo_path.as_str(), + layer_commit = layer_commit.as_str() + )?; + Ok(()) } From 16a5338eafd44848f7c28199476a9e0e87828852 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 25 Sep 2021 13:33:34 -0400 Subject: [PATCH 126/775] lib: Lower bitflags requirement to just "1" This is dealt with in a newer nix but we need to match what is locked in rpm-ostree right now. See https://github.com/nix-rust/nix/commit/5495bbce52d3541f90d13e692a1cef34e186e100 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 40edef47b..5ec67dd97 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -12,7 +12,7 @@ version = "0.4.0-alpha.0" anyhow = "1.0" async-compression = { version = "0.3", features = ["gzip", "tokio"] } bytes = "1.0.1" -bitflags = "1.3.2" +bitflags = "1" camino = "1.0.4" cjson = "0.1.1" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } From 5132e283c2b9717c72f662ef3ec1e88724d23b26 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Sep 2021 14:39:00 -0400 Subject: [PATCH 127/775] lib/container: Correctly handle uncompressed layers Trying to pull from `containers-storage:` it actually exports uncompressed tarballs, which makes sense. Clean up our layer handling to correctly match on the media type and return the correct decompressor. This is also prep for handling zstd. --- lib/src/container/imageproxy.rs | 11 +++++- lib/src/container/import.rs | 20 +++++------ lib/src/container/oci.rs | 60 +++++++++++++-------------------- 3 files changed, 42 insertions(+), 49 deletions(-) diff --git a/lib/src/container/imageproxy.rs b/lib/src/container/imageproxy.rs index 70e6ec265..d834357eb 100644 --- a/lib/src/container/imageproxy.rs +++ b/lib/src/container/imageproxy.rs @@ -2,7 +2,7 @@ //! This allows fetching a container image manifest and layers in a streaming fashion. //! More information: https://github.com/cgwalters/container-image-proxy -use super::{ImageReference, Result}; +use super::{oci, ImageReference, Result}; use crate::cmdext::CommandRedirectionExt; use anyhow::Context; use futures_util::{Future, FutureExt, TryFutureExt, TryStreamExt}; @@ -130,6 +130,15 @@ impl ImageProxy { Ok(body) } + /// A wrapper for [`fetch_blob`] which fetches a layer and decompresses it. + pub(crate) async fn fetch_layer_decompress( + &mut self, + layer: &oci::ManifestLayer, + ) -> Result> { + let blob = self.fetch_blob(layer.digest.as_str()).await?; + Ok(layer.new_async_decompressor(blob)?) + } + /// Close the HTTP connection and wait for the child process to exit successfully. pub(crate) async fn finalize(mut self) -> Result<()> { // For now discard any errors from the connection diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 598dc0c97..df5c811c7 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -98,14 +98,13 @@ pub struct Import { pub image_digest: String, } -fn require_one_layer_blob(manifest: &oci::Manifest) -> Result<&str> { - let layers = manifest.find_layer_blobids()?; - let n = layers.len(); - if let Some(layer) = layers.into_iter().next() { +fn require_one_layer_blob(manifest: &oci::Manifest) -> Result<&oci::ManifestLayer> { + let n = manifest.layers.len(); + if let Some(layer) = manifest.layers.iter().next() { if n > 1 { Err(anyhow!("Expected 1 layer, found {}", n)) } else { - Ok(layer) + Ok(&layer) } } else { // Validated by find_layer_blobids() @@ -152,13 +151,12 @@ pub async fn import_from_manifest( } let options = options.unwrap_or_default(); let manifest: oci::Manifest = serde_json::from_slice(manifest_bytes)?; - let layerid = require_one_layer_blob(&manifest)?; - event!(Level::DEBUG, "target blob: {}", layerid); + let layer = require_one_layer_blob(&manifest)?; + event!(Level::DEBUG, "target blob: {}", layer.digest.as_str()); let mut proxy = imageproxy::ImageProxy::new(&imgref.imgref).await?; - let blob = proxy.fetch_blob(layerid).await?; - let blob = async_compression::tokio::bufread::GzipDecoder::new(blob); + let blob = proxy.fetch_layer_decompress(layer).await?; let blob = ProgressReader { - reader: Box::new(blob), + reader: blob, progress: options.progress, }; let mut taropts: crate::tar::TarImportOptions = Default::default(); @@ -168,7 +166,7 @@ pub async fn import_from_manifest( } let ostree_commit = crate::tar::import_tar(repo, blob, Some(taropts)) .await - .with_context(|| format!("Parsing blob {}", layerid))?; + .with_context(|| format!("Parsing blob {}", layer.digest))?; // FIXME write ostree commit after proxy finalization proxy.finalize().await?; event!(Level::DEBUG, "created commit {}", ostree_commit); diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index 2779f3c9e..c257e93e7 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -12,6 +12,7 @@ use std::{ collections::{BTreeMap, HashMap}, io::prelude::*, }; +use tokio::io::AsyncBufRead; /// Map the value from `uname -m` to the Go architecture. /// TODO find a more canonical home for this. @@ -23,10 +24,10 @@ static MACHINE_TO_OCI: phf::Map<&str, &str> = phf_map! { // OCI types, see https://github.com/opencontainers/image-spec/blob/master/media-types.md pub(crate) const OCI_TYPE_CONFIG_JSON: &str = "application/vnd.oci.image.config.v1+json"; pub(crate) const OCI_TYPE_MANIFEST_JSON: &str = "application/vnd.oci.image.manifest.v1+json"; -pub(crate) const OCI_TYPE_LAYER: &str = "application/vnd.oci.image.layer.v1.tar+gzip"; -#[allow(dead_code)] -pub(crate) const IMAGE_LAYER_GZIP_MEDIA_TYPE: &str = "application/vnd.oci.image.layer.v1.tar+gzip"; -pub(crate) const DOCKER_TYPE_LAYER: &str = "application/vnd.docker.image.rootfs.diff.tar.gzip"; +pub(crate) const OCI_TYPE_LAYER_GZIP: &str = "application/vnd.oci.image.layer.v1.tar+gzip"; +pub(crate) const OCI_TYPE_LAYER_TAR: &str = "application/vnd.oci.image.layer.v1.tar"; +// FIXME - use containers/image to fully convert the manifest to OCI +const DOCKER_TYPE_LAYER_TARGZ: &str = "application/vnd.docker.image.rootfs.diff.tar.gzip"; /// Path inside an OCI directory to the blobs const BLOBDIR: &str = "blobs/sha256"; @@ -69,6 +70,22 @@ pub(crate) struct ManifestLayer { pub size: u64, } +impl ManifestLayer { + /// Create a decompressor for this layer, given a stream of input. + pub fn new_async_decompressor( + &self, + src: impl AsyncBufRead + Send + Unpin + 'static, + ) -> Result> { + match self.media_type.as_str() { + OCI_TYPE_LAYER_GZIP | DOCKER_TYPE_LAYER_TARGZ => Ok(Box::new( + tokio::io::BufReader::new(async_compression::tokio::bufread::GzipDecoder::new(src)), + )), + OCI_TYPE_LAYER_TAR => Ok(Box::new(src)), + o => Err(anyhow::anyhow!("Unhandled layer type: {}", o)), + } + } +} + #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub(crate) struct Manifest { @@ -79,31 +96,6 @@ pub(crate) struct Manifest { pub annotations: Option>, } -impl Manifest { - /// Return all layer (non-metadata) blobs. - /// It is an error if there are no layers present. - pub(crate) fn find_layer_blobids(&self) -> Result> { - let layers: Vec<_> = self - .layers - .iter() - .filter_map(|layer| { - if matches!( - layer.media_type.as_str(), - DOCKER_TYPE_LAYER | OCI_TYPE_LAYER - ) { - Some(layer.digest.as_str()) - } else { - None - } - }) - .collect(); - if layers.is_empty() { - return Err(anyhow!("No layers found")); - } - Ok(layers) - } -} - /// Completed blob metadata #[derive(Debug)] pub(crate) struct Blob { @@ -237,7 +229,7 @@ impl<'a> OciWriter<'a> { "digest": config_blob.digest_id(), }, "layers": [ - { "mediaType": OCI_TYPE_LAYER, + { "mediaType": OCI_TYPE_LAYER_GZIP, "size": rootfs_blob.blob.size, "digest": rootfs_blob.blob.digest_id(), } @@ -378,16 +370,10 @@ mod tests { #[test] fn manifest() -> Result<()> { let m: Manifest = serde_json::from_str(MANIFEST_DERIVE)?; - let mut blobids = m.find_layer_blobids()?.into_iter(); assert_eq!( - blobids.next().unwrap(), + m.layers[0].digest.as_str(), "sha256:ee02768e65e6fb2bb7058282338896282910f3560de3e0d6cd9b1d5985e8360d" ); - assert_eq!( - blobids.next().unwrap(), - "sha256:d203cef7e598fa167cb9e8b703f9f20f746397eca49b51491da158d64968b429" - ); - assert!(blobids.next().is_none()); Ok(()) } From 939822bdffff12d82b0f53c9ac6cf96a0caf2ff0 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 27 Sep 2021 10:04:53 -0400 Subject: [PATCH 128/775] oci: Add `#[derive(Clone, PartialEq, Eq)]` It's generally useful. Prep for later patches. --- lib/src/container/oci.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index c257e93e7..86a1c8151 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -36,14 +36,14 @@ fn default_schema_version() -> u32 { 2 } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub(crate) struct IndexPlatform { pub architecture: String, pub os: String, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub(crate) struct IndexManifest { pub media_type: String, @@ -53,7 +53,7 @@ pub(crate) struct IndexManifest { pub platform: Option, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub(crate) struct Index { #[serde(default = "default_schema_version")] @@ -62,7 +62,7 @@ pub(crate) struct Index { pub manifests: Vec, } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub(crate) struct ManifestLayer { pub media_type: String, @@ -86,7 +86,7 @@ impl ManifestLayer { } } -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub(crate) struct Manifest { #[serde(default = "default_schema_version")] From 7163262f02bfb5292665bc0888a34a14528eaffb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 27 Sep 2021 16:40:16 -0400 Subject: [PATCH 129/775] Add tokio_util module This takes the code from https://github.com/coreos/rpm-ostree/blob/d6ed262f83b33e7fd454699b96661ba323d04128/rust/src/utils.rs#L163 which makes more sense here because: - We depend on both glib and tokio and the code isn't rpm-ostree specific - We will want to use this internally to ensure our worker threads get cancelled. --- lib/src/lib.rs | 1 + lib/src/tokio_util.rs | 46 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 lib/src/tokio_util.rs diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 9f71e9bf8..9a25b1df8 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -27,6 +27,7 @@ pub mod container; pub mod diff; pub mod ima; pub mod tar; +pub mod tokio_util; mod cmdext; diff --git a/lib/src/tokio_util.rs b/lib/src/tokio_util.rs new file mode 100644 index 000000000..20a7aefb4 --- /dev/null +++ b/lib/src/tokio_util.rs @@ -0,0 +1,46 @@ +//! Helpers for bridging GLib async/mainloop with Tokio. + +use anyhow::Result; +use futures_util::Future; +use ostree::prelude::CancellableExt; + +/// Call a faillible future, while monitoring `cancellable` and return an error if cancelled. +pub async fn run_with_cancellable(f: F, cancellable: &ostree::gio::Cancellable) -> Result +where + F: Future>, +{ + // Bridge GCancellable to a tokio notification + let notify = std::sync::Arc::new(tokio::sync::Notify::new()); + let notify2 = notify.clone(); + cancellable.connect_cancelled(move |_| notify2.notify_one()); + cancellable.set_error_if_cancelled()?; + tokio::select! { + r = f => r, + _ = notify.notified() => { + Err(anyhow::anyhow!("Operation was cancelled")) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_cancellable() { + let cancellable = ostree::gio::Cancellable::new(); + + let cancellable_copy = cancellable.clone(); + let s = async move { + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + cancellable_copy.cancel(); + }; + let r = async move { + tokio::time::sleep(std::time::Duration::from_secs(200)).await; + Ok(()) + }; + let r = run_with_cancellable(r, &cancellable); + let (_, r) = tokio::join!(s, r); + assert!(r.is_err()); + } +} From e3dde5db1f6b20eaa4919c6a615a6c71c2019f7e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 28 Sep 2021 13:03:33 -0400 Subject: [PATCH 130/775] lib: Fix some rustdoc warnings URLs need `<>` wrapping. --- lib/src/container/imageproxy.rs | 2 +- lib/src/container/import.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/src/container/imageproxy.rs b/lib/src/container/imageproxy.rs index d834357eb..005a43f66 100644 --- a/lib/src/container/imageproxy.rs +++ b/lib/src/container/imageproxy.rs @@ -1,6 +1,6 @@ //! Run container-image-proxy as a subprocess. //! This allows fetching a container image manifest and layers in a streaming fashion. -//! More information: https://github.com/cgwalters/container-image-proxy +//! More information: use super::{oci, ImageReference, Result}; use crate::cmdext::CommandRedirectionExt; diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index df5c811c7..59211b266 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -2,11 +2,11 @@ //! //! # External depenendency on container-image-proxy //! -//! This code requires https://github.com/cgwalters/container-image-proxy +//! This code requires //! installed as a binary in $PATH. //! //! The rationale for this is that while there exist Rust crates to speak -//! the Docker distribution API, the Go library https://github.com/containers/image/ +//! the Docker distribution API, the Go library //! supports key things we want for production use like: //! //! - Image mirroring and remapping; effectively `man containers-registries.conf` From 934000208edf2729b0c7d44beb4dfac62293ba11 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 28 Sep 2021 13:57:03 -0400 Subject: [PATCH 131/775] tokio_util: Add API to do GLib+`GCancellable` from `async fn` This is the inverse of the previous API; we have cases today where we're spawning a helper blocking thread that calls functions internally that use `GCancellable`, but we weren't passed one. See https://github.com/gtk-rs/gtk-rs-core/issues/240 --- lib/src/tar/import.rs | 57 ++++++++++++++++++++----------------------- lib/src/tokio_util.rs | 28 ++++++++++++++++++++- 2 files changed, 54 insertions(+), 31 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 056439f64..b6c38021b 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -219,8 +219,8 @@ impl Importer { size: usize, checksum: &str, xattrs: Option, + cancellable: Option<&gio::Cancellable>, ) -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; let (uid, gid, mode) = header_attrs(entry.header())?; let w = self.repo.write_regfile( Some(checksum), @@ -256,6 +256,7 @@ impl Importer { size: usize, checksum: &str, xattrs: Option, + cancellable: Option<&gio::Cancellable>, ) -> Result<()> { let (uid, gid, mode) = header_attrs(entry.header())?; assert!(size <= SMALL_REGFILE_SIZE); @@ -268,7 +269,7 @@ impl Importer { mode, xattrs.as_ref(), &buf, - gio::NONE_CANCELLABLE, + cancellable, )?; debug_assert_eq!(c.as_str(), checksum); self.stats.regfile_small += 1; @@ -311,8 +312,8 @@ impl Importer { entry: tar::Entry, checksum: &str, xattrs: Option, + cancellable: Option<&gio::Cancellable>, ) -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; if self .repo .has_object(ostree::ObjectType::File, checksum, cancellable)? @@ -323,9 +324,9 @@ impl Importer { match entry.header().entry_type() { tar::EntryType::Regular => { if size > SMALL_REGFILE_SIZE { - self.import_large_regfile_object(entry, size, checksum, xattrs) + self.import_large_regfile_object(entry, size, checksum, xattrs, cancellable) } else { - self.import_small_regfile_object(entry, size, checksum, xattrs) + self.import_small_regfile_object(entry, size, checksum, xattrs, cancellable) } } tar::EntryType::Symlink => self.import_symlink_object(entry, checksum, xattrs), @@ -340,6 +341,7 @@ impl Importer { &mut self, entry: tar::Entry<'b, R>, path: &Utf8Path, + cancellable: Option<&gio::Cancellable>, ) -> Result<()> { let (parentname, mut name, mut objtype) = parse_object_entry_path(path)?; @@ -385,7 +387,7 @@ impl Importer { if is_xattrs { self.import_xattr_ref(entry, checksum) } else { - self.import_content_object(entry, &checksum, xattr_ref) + self.import_content_object(entry, &checksum, xattr_ref, cancellable) } } objtype => self.import_metadata(entry, &checksum, objtype), @@ -452,8 +454,12 @@ impl Importer { Ok(()) } - fn import(mut self, archive: &mut tar::Archive) -> Result { - self.repo.prepare_transaction(gio::NONE_CANCELLABLE)?; + fn import( + mut self, + archive: &mut tar::Archive, + cancellable: Option<&gio::Cancellable>, + ) -> Result { + self.repo.prepare_transaction(cancellable)?; // Create an iterator that skips over directories; we just care about the file names. let mut ents = archive.entries()?.filter_map(|e| match e { @@ -518,29 +524,20 @@ impl Importer { )?; // Write the commit object, which also verifies its checksum. - let actual_checksum = self.repo.write_metadata( - objtype, - Some(&checksum), - &commit, - gio::NONE_CANCELLABLE, - )?; + let actual_checksum = + self.repo + .write_metadata(objtype, Some(&checksum), &commit, cancellable)?; assert_eq!(actual_checksum.to_hex(), checksum); event!(Level::DEBUG, "Imported {}.commit", checksum); // Finally, write the detached metadata. - self.repo.write_commit_detached_metadata( - &checksum, - Some(&commitmeta), - gio::NONE_CANCELLABLE, - )?; + self.repo + .write_commit_detached_metadata(&checksum, Some(&commitmeta), cancellable)?; } else { // We're not doing any validation of the commit, so go ahead and write it. - let actual_checksum = self.repo.write_metadata( - objtype, - Some(&checksum), - &commit, - gio::NONE_CANCELLABLE, - )?; + let actual_checksum = + self.repo + .write_metadata(objtype, Some(&checksum), &commit, cancellable)?; assert_eq!(actual_checksum.to_hex(), checksum); event!(Level::DEBUG, "Imported {}.commit", checksum); @@ -559,7 +556,7 @@ impl Importer { )?; } _ => { - self.import_object(next_ent, &nextent_path)?; + self.import_object(next_ent, &nextent_path, cancellable)?; } } } @@ -568,12 +565,12 @@ impl Importer { let (entry, path) = entry?; if let Ok(p) = path.strip_prefix("objects/") { - self.import_object(entry, p)?; + self.import_object(entry, p, cancellable)?; } else if path.strip_prefix("xattrs/").is_ok() { self.import_xattrs(entry)?; } } - self.repo.commit_transaction(gio::NONE_CANCELLABLE)?; + self.repo.commit_transaction(cancellable)?; Ok(checksum) } @@ -606,10 +603,10 @@ pub async fn import_tar( let options = options.unwrap_or_default(); let src = ReadBridge::new(src); let repo = repo.clone(); - let import = tokio::task::spawn_blocking(move || { + let import = crate::tokio_util::spawn_blocking_cancellable(move |cancellable| { let mut archive = tar::Archive::new(src); let importer = Importer::new(&repo, options.remote); - importer.import(&mut archive) + importer.import(&mut archive, Some(cancellable)) }) .map_err(anyhow::Error::msg); let import: String = import.await??; diff --git a/lib/src/tokio_util.rs b/lib/src/tokio_util.rs index 20a7aefb4..e842b8f2e 100644 --- a/lib/src/tokio_util.rs +++ b/lib/src/tokio_util.rs @@ -2,10 +2,11 @@ use anyhow::Result; use futures_util::Future; +use ostree::gio; use ostree::prelude::CancellableExt; /// Call a faillible future, while monitoring `cancellable` and return an error if cancelled. -pub async fn run_with_cancellable(f: F, cancellable: &ostree::gio::Cancellable) -> Result +pub async fn run_with_cancellable(f: F, cancellable: &gio::Cancellable) -> Result where F: Future>, { @@ -22,6 +23,31 @@ where } } +struct CancelOnDrop(gio::Cancellable); + +impl Drop for CancelOnDrop { + fn drop(&mut self) { + self.0.cancel(); + } +} + +/// Wrapper for [`tokio::task::spawn_blocking`] which provides a [`gio::Cancellable`] that will be triggered on drop. +/// +/// This function should be used in a Rust/tokio native `async fn`, but that want to invoke +/// GLib style blocking APIs that use `GCancellable`. The cancellable will be triggered when this +/// future is dropped, which helps bound thread usage. +/// +/// This is in a sense the inverse of [`run_with_cancellable`]. +pub fn spawn_blocking_cancellable(f: F) -> tokio::task::JoinHandle +where + F: FnOnce(&gio::Cancellable) -> R + Send + 'static, + R: Send + 'static, +{ + tokio::task::spawn_blocking(move || { + let dropper = CancelOnDrop(gio::Cancellable::new()); + f(&dropper.0) + }) +} #[cfg(test)] mod tests { use super::*; From 3a190577cf142fb6609fbbec8bf0a3e68bb82067 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 28 Sep 2021 12:44:59 -0400 Subject: [PATCH 132/775] Add new refescape module Prep for work on the new container module, where we want to store container image references (e.g. `docker://quay.io/coreos/fedora`) as ostree refs. Several bits of that are not valid in ostree refs, such as the `:` or the double `//` (which would be an empty filesystem path). This escaping scheme uses `_` in a similar way as a `\` character is used in other syntax. For example, `:` is `_3A_` (hexadecimal). `//` is escaped as `/_2F_` (i.e. the second `/` is escaped). --- lib/Cargo.toml | 1 + lib/src/lib.rs | 1 + lib/src/refescape.rs | 198 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 200 insertions(+) create mode 100644 lib/src/refescape.rs diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 5ec67dd97..bdefb2341 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -45,6 +45,7 @@ tracing = "0.1" [dev-dependencies] clap = "2.33.3" indoc = "1.0.3" +quickcheck = "1" sh-inline = "0.1.0" structopt = "0.3.21" diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 9a25b1df8..3f7ce54c5 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -26,6 +26,7 @@ pub mod cli; pub mod container; pub mod diff; pub mod ima; +pub mod refescape; pub mod tar; pub mod tokio_util; diff --git a/lib/src/refescape.rs b/lib/src/refescape.rs new file mode 100644 index 000000000..7c9f2b0e1 --- /dev/null +++ b/lib/src/refescape.rs @@ -0,0 +1,198 @@ +//! Escape strings for use in ostree refs. +//! +//! It can be desirable to map arbitrary identifiers, such as RPM/dpkg +//! package names or container image references (e.g. `docker://quay.io/examplecorp/os:latest`) +//! into ostree refs (branch names) which have a quite restricted set +//! of valid characters; basically alphanumeric, plus `/`, `-`, `_`. +//! +//! This escaping scheme uses `_` in a similar way as a `\` character is +//! used in Rust unicode escaped values. For example, `:` is `_3A_` (hexadecimal). +//! Because the empty path is not valid, `//` is escaped as `/_2F_` (i.e. the second `/` is escaped). + +use anyhow::Result; +use std::convert::TryInto; +use std::fmt::Write; + +/// Escape a single string; this is a backend of [`prefix_escape_for_ref`]. +fn escape_for_ref(s: &str) -> Result { + if s.is_empty() { + return Err(anyhow::anyhow!("Invalid empty string for ref")); + } + fn escape_c(r: &mut String, c: char) { + write!(r, "_{:02X}_", c as u32).unwrap() + } + let mut r = String::new(); + let mut it = s + .chars() + .map(|c| { + if c == '\0' { + Err(anyhow::anyhow!( + "Invalid embedded NUL in string for ostree ref" + )) + } else { + Ok(c) + } + }) + .peekable(); + + let mut previous_alphanumeric = false; + while let Some(c) = it.next() { + let has_next = it.peek().is_some(); + let c = c?; + let current_alphanumeric = c.is_ascii_alphanumeric(); + match c { + c if current_alphanumeric => r.push(c), + '/' if previous_alphanumeric && has_next => r.push(c), + // Pass through `-` unconditionally + '-' => r.push(c), + // The underscore `_` quotes itself `__`. + '_' => r.push_str("__"), + o => escape_c(&mut r, o), + } + previous_alphanumeric = current_alphanumeric; + } + Ok(r) +} + +/// Compute a string suitable for use as an OSTree ref, where `s` can be a (nearly) +/// arbitrary UTF-8 string. This requires a non-empty prefix. +/// +/// The restrictions on `s` are: +/// - The empty string is not supported +/// - There may not be embedded `NUL` (`\0`) characters. +/// +/// The intention behind requiring a prefix is that a common need is to use e.g. +/// [`ostree::Repo::list_refs`] to find refs of a certain "type". +/// +/// # Examples: +/// +/// ```rust +/// # fn test() -> anyhow::Result<()> { +/// use ostree_ext::refescape; +/// let s = "registry:quay.io/coreos/fedora:latest"; +/// assert_eq!(refescape::prefix_escape_for_ref("container", s)?, +/// "container/registry_3A_quay_2E_io/coreos/fedora_3A_latest"); +/// # Ok(()) +/// # } +/// ``` +pub fn prefix_escape_for_ref(prefix: &str, s: &str) -> Result { + Ok(format!("{}/{}", prefix, escape_for_ref(s)?)) +} + +/// Reverse the effect of [`escape_for_ref()`]. +fn unescape_for_ref(s: &str) -> Result { + let mut r = String::new(); + let mut it = s.chars(); + let mut buf = String::new(); + while let Some(c) = it.next() { + match c { + c if c.is_ascii_alphanumeric() => { + r.push(c); + } + '-' | '/' => r.push(c), + '_' => { + let next = it.next(); + if let Some('_') = next { + r.push('_') + } else if let Some(c) = next { + buf.clear(); + buf.push(c); + while let Some(c) = it.next() { + if c == '_' { + break; + } + buf.push(c); + } + let v = u32::from_str_radix(&buf, 16)?; + let c: char = v.try_into()?; + r.push(c); + } + } + o => anyhow::bail!("Invalid character {}", o), + } + } + Ok(r) +} + +/// Remove a prefix from an ostree ref, and return the unescaped remainder. +/// +/// # Examples: +/// +/// ```rust +/// # fn test() -> anyhow::Result<()> { +/// use ostree_ext::refescape; +/// let s = "registry:quay.io/coreos/fedora:latest"; +/// assert_eq!(refescape::unprefix_unescape_ref("container", "container/registry_3A_quay_2E_io/coreos/fedora_3A_latest")?, s); +/// # Ok(()) +/// # } +/// ``` +pub fn unprefix_unescape_ref(prefix: &str, ostree_ref: &str) -> Result { + let rest = ostree_ref + .strip_prefix(prefix) + .map(|s| s.strip_prefix('/')) + .flatten() + .ok_or_else(|| { + anyhow::anyhow!( + "ref does not match expected prefix {}/: {}", + ostree_ref, + prefix + ) + })?; + Ok(unescape_for_ref(rest)?) +} + +#[cfg(test)] +mod test { + use super::*; + use quickcheck::{quickcheck, TestResult}; + + const TESTPREFIX: &str = "testprefix/blah"; + + const UNCHANGED: &[&str] = &["foo", "foo/bar/baz-blah/foo"]; + const ROUNDTRIP: &[&str] = &[ + "localhost:5000/foo:latest", + "fedora/x86_64/coreos", + "/foo/bar/foo.oci-archive", + "docker://quay.io/exampleos/blah:latest", + "oci-archive:/path/to/foo.ociarchive", + ]; + const CORNERCASES: &[&str] = &["/", "blah/", "/foo/"]; + + #[test] + fn escape() { + // These strings shouldn't change + for &v in UNCHANGED { + let escaped = &escape_for_ref(v).unwrap(); + ostree::validate_rev(escaped).unwrap(); + assert_eq!(escaped.as_str(), v); + } + // Roundtrip cases, plus unchanged cases + for &v in UNCHANGED.iter().chain(ROUNDTRIP).chain(CORNERCASES) { + let escaped = &prefix_escape_for_ref(TESTPREFIX, v).unwrap(); + ostree::validate_rev(escaped).unwrap(); + let unescaped = unprefix_unescape_ref(TESTPREFIX, &escaped).unwrap(); + assert_eq!(v, unescaped); + } + // Explicit test + assert_eq!( + escape_for_ref(ROUNDTRIP[0]).unwrap(), + "localhost_3A_5000/foo_3A_latest" + ); + } + + fn roundtrip(s: String) -> TestResult { + // Ensure we only try strings which match the predicates. + let r = prefix_escape_for_ref(TESTPREFIX, &s); + let escaped = match r { + Ok(v) => v, + Err(_) => return TestResult::discard(), + }; + let unescaped = unprefix_unescape_ref(TESTPREFIX, &escaped).unwrap(); + TestResult::from_bool(unescaped == s) + } + + #[test] + fn qcheck() { + quickcheck(roundtrip as fn(String) -> TestResult); + } +} From 66b2809b47e1bc39b47f969f09e400b5f47330fc Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 28 Sep 2021 18:03:34 -0400 Subject: [PATCH 133/775] tar/write: Don't actually derive from base when using selinux Using `--selinux-policy-from-base` is very convenient but wrong here because it means that each derived commit also has the whole base filesystem tree, which greatly obscures its logical content. Instead, we get the base when we dynamically union things at the end. As noted the API we really want here is https://github.com/ostreedev/ostree/pull/2447 but it will take a bit for ostree to release with it. And even once it does, we need to do some other changes to switch over to parsing the tarball directly too. --- lib/src/tar/write.rs | 49 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 6 deletions(-) diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index ac8e655f5..31cd0bdb2 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -9,8 +9,11 @@ use crate::cmdext::CommandRedirectionExt; use crate::Result; -use anyhow::anyhow; +use anyhow::{anyhow, Context}; +use ostree::gio; +use ostree::prelude::FileExt; use std::os::unix::prelude::AsRawFd; +use std::path::Path; use tokio::io::AsyncReadExt; use tracing::instrument; @@ -24,6 +27,31 @@ pub struct WriteTarOptions<'a> { pub selinux: bool, } +struct TempSePolicy { + tempdir: tempfile::TempDir, +} + +// Copy of logic from https://github.com/ostreedev/ostree/pull/2447 +// to avoid waiting for backport + releases +fn sepolicy_from_base(repo: &ostree::Repo, base: &str) -> Result { + let cancellable = gio::NONE_CANCELLABLE; + let policypath = "usr/etc/selinux"; + let tempdir = tempfile::tempdir()?; + let (root, _) = repo.read_commit(base, cancellable)?; + let policyroot = root.resolve_relative_path(policypath); + if policyroot.query_exists(cancellable) { + let policydest = tempdir.path().join(policypath); + std::fs::create_dir_all(policydest.parent().unwrap())?; + let opts = ostree::RepoCheckoutAtOptions { + mode: ostree::RepoCheckoutMode::User, + subpath: Some(Path::new(policypath).to_owned()), + ..Default::default() + }; + repo.checkout_at(Some(&opts), ostree::AT_FDCWD, policydest, base, cancellable)?; + } + Ok(TempSePolicy { tempdir: tempdir }) +} + /// Write the contents of a tarball as an ostree commit. #[allow(unsafe_code)] // For raw fd bits #[instrument(skip(repo, src))] @@ -35,6 +63,15 @@ pub async fn write_tar( ) -> Result { use std::process::Stdio; let options = options.unwrap_or_default(); + let sepolicy = if options.selinux { + if let Some(base) = options.base { + Some(sepolicy_from_base(repo, base).context("tar: Preparing sepolicy")?) + } else { + None + } + } else { + None + }; let mut c = std::process::Command::new("ostree"); let repofd = repo.dfd_as_file()?; { @@ -45,11 +82,9 @@ pub async fn write_tar( .args(&["commit"]); c.take_fd_n(repofd.as_raw_fd(), 3); c.arg("--repo=/proc/self/fd/3"); - if let Some(base) = options.base { - if options.selinux { - c.arg("--selinux-policy-from-base"); - } - c.arg(&format!("--tree=ref={}", base)); + if let Some(sepolicy) = sepolicy.as_ref() { + c.arg("--selinux-policy"); + c.arg(sepolicy.tempdir.path()); } c.args(&[ "--no-bindings", @@ -86,6 +121,8 @@ pub async fn write_tar( let (_, (child_stdout, child_stderr)) = tokio::try_join!(input_copier, output_copier)?; let status = r.wait().await?; + // Ensure this lasted until the process exited + drop(sepolicy); if !status.success() { return Err(anyhow!( "Failed to commit tar: {:?}: {}", From b7ba07556c8c54a719f47d9a8f1ef47b5b7a0e4b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 30 Sep 2021 17:34:35 -0400 Subject: [PATCH 134/775] Bump to ostree 0.13.2 This lets us use the auto transaction bits. --- lib/Cargo.toml | 2 +- lib/src/tar/import.rs | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index bdefb2341..6f58f8f9d 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -29,7 +29,7 @@ nix = "0.22.0" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" -ostree = { features = ["v2021_4"], version = "0.13.0" } +ostree = { features = ["v2021_4"], version = "0.13.2" } phf = { features = ["macros"], version = "0.9.0" } pin-project = "1.0" serde = { features = ["derive"], version = "1.0.125" } diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index b6c38021b..2d2eb8ae9 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -459,7 +459,9 @@ impl Importer { archive: &mut tar::Archive, cancellable: Option<&gio::Cancellable>, ) -> Result { - self.repo.prepare_transaction(cancellable)?; + // Unfortunately our use of `&mut self` here clashes with borrowing the repo + let txn_repo = self.repo.clone(); + let txn = txn_repo.auto_transaction(cancellable)?; // Create an iterator that skips over directories; we just care about the file names. let mut ents = archive.entries()?.filter_map(|e| match e { @@ -570,7 +572,7 @@ impl Importer { self.import_xattrs(entry)?; } } - self.repo.commit_transaction(cancellable)?; + txn.commit(cancellable)?; Ok(checksum) } From eacd71291eee03b9c68e17c584ff4d9f0d4f3d36 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 1 Oct 2021 16:30:49 -0400 Subject: [PATCH 135/775] cli: Use the terms "encapsulate" and "unencapsulate" Since we're moving towards more "native" support for container images, we need to *very clearly* differentiate between the code that currently uses the terms "import" and "export" which are somewhat ambiguous. Starting with the CLI, replace "export" with "encapsulate" and "import" with "unencapsulate". This makes things much clearer I think - for example, that when unencapsulating, we discard the container image wrapper. When dealing with container images natively, we cannot fully discard the wrapper (manifest, etc.) because we need to understand the layers. While we're here, copy paste updated bits from the Rust module doc into the `README.md`. I hope in the future to deduplicate this. --- README.md | 35 +++++++++++++++++++++++++++-------- lib/src/cli.rs | 12 +++++++----- 2 files changed, 34 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 812a2231d..4829df396 100644 --- a/README.md +++ b/README.md @@ -49,11 +49,27 @@ This is used by `rpm-ostree ex apply-live`. ## module "container": Encapsulate ostree commits in OCI/Docker images - -### Export an OSTree commit into a container image +This module contains APIs to bidirectionally map between a single OSTree commit and a container image wrapping it. +Because container images are just layers of tarballs, this builds on the [`crate::tar`] module. +To emphasize this, the current high level model is that this is a one-to-one mapping - an ostree commit +can be exported (wrapped) into a container image, which will have exactly one layer. Upon import +back into an ostree repository, all container metadata except for its digested checksum will be discarded. +#### Signatures +OSTree supports GPG and ed25519 signatures natively, and it's expected by default that +when booting from a fetched container image, one verifies ostree-level signatures. +For ostree, a signing configuration is specified via an ostree remote. In order to +pair this configuration together, this library defines a "URL-like" string schema: +`ostree-remote-registry::` +A concrete instantiation might be e.g.: `ostree-remote-registry:fedora:quay.io/coreos/fedora-coreos:stable` +To parse and generate these strings, see [`OstreeImageReference`]. +#### Layering + +A key feature of container images is support for layering. At the moment, support +for this is [planned but not implemented](https://github.com/ostreedev/ostree-rs-ext/issues/12). +### Encapsulate an OSTree commit inside a container image ``` -$ ostree-ext-cli container export --repo=/path/to/repo exampleos/x86_64/stable docker://quay.io/exampleos/exampleos:stable +$ ostree-ext-cli container encapsulate --repo=/path/to/repo exampleos/x86_64/stable docker://quay.io/exampleos/exampleos:stable ``` You can then e.g. @@ -64,22 +80,25 @@ $ podman run --rm -ti --entrypoint bash quay.io/exampleos/exampleos:stable Running the container directly for e.g. CI testing is one use case. But more importantly, this container image can be pushed to any registry, and used as part of ostree-based operating system release engineering. -### Importing an ostree-container directly +### Unencapsulate an ostree-container directly A primary goal of this effort is to make it fully native to an ostree-based operating system to pull a container image directly too. -FUTURE: An important aspect of this is that the system will validate the GPG signature of the target OSTree commit, as well as validating the sha256 of the contained objects. +The CLI offers a method to "unencapsulate" - fetch a container image in a streaming fashion and +import the embedded OSTree commit. Here, you must use a prefix scheme which defines signature verification. -The CLI offers a method to import the exported commit: +- `ostree-remote-image:$remote:$imagereference`: This declares that the OSTree commit embedded in the image reference should be verified using the ostree remote config `$remote`. +- `ostree-image-signed:$imagereference`: Fetch via the containers/image stack, but require *some* signature verification (not via ostree). +- `ostree-unverified-image:$imagereference`: Don't do any signature verification ``` -$ ostree-ext-cli container import --repo=/ostree/repo docker://quay.io/exampleos/exampleos:stable +$ ostree-ext-cli container unencapsulate --repo=/ostree/repo ostree-remote-image:someremote:docker://quay.io/exampleos/exampleos:stable ``` But a project like rpm-ostree could hence support: ``` -$ rpm-ostree rebase quay.io/exampleos/exampleos:stable +$ rpm-ostree rebase ostree-remote-image:someremote:quay.io/exampleos/exampleos:stable ``` (Along with the usual `rpm-ostree upgrade` knowing to pull that container image) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 2b15ea4ec..a76d1f7ff 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -61,8 +61,9 @@ enum TarOpts { /// Options for container import/export. #[derive(Debug, StructOpt)] enum ContainerOpts { + #[structopt(alias = "import")] /// Import an ostree commit embedded in a remote container image - Import { + Unencapsulate { /// Path to the repository #[structopt(long)] repo: String, @@ -85,8 +86,9 @@ enum ContainerOpts { imgref: String, }, - /// Export an ostree commit to an OCI layout - Export { + /// Wrap an ostree commit into a container + #[structopt(alias = "export")] + Encapsulate { /// Path to the repository #[structopt(long)] repo: String, @@ -280,13 +282,13 @@ where Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt).await, Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), Opt::Container(ContainerOpts::Info { imgref }) => container_info(imgref.as_str()).await, - Opt::Container(ContainerOpts::Import { + Opt::Container(ContainerOpts::Unencapsulate { repo, imgref, write_ref, quiet, }) => container_import(&repo, &imgref, write_ref.as_deref(), quiet).await, - Opt::Container(ContainerOpts::Export { + Opt::Container(ContainerOpts::Encapsulate { repo, rev, imgref, From 520fc40b4bb48847f0261e585d64685d2a42d37e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 29 Sep 2021 16:33:45 -0400 Subject: [PATCH 136/775] lib/tar: Pre-filter tar archive in Rust MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than trying to extend the ostree C code to support arbitrary transformations, let's do pre-parsing here in safe Rust. We keep the `/etc` → `/usr/etc` bits, but we also just completely drop everything not in `/usr` now. As noted in the comment, this pre-validation will hopefully also catch any corrupt tarballs that might be exploitable in the C libarchive codebase. --- lib/src/async_util.rs | 31 +++++- lib/src/tar/write.rs | 237 ++++++++++++++++++++++++++++++++++++++---- lib/tests/it/main.rs | 28 +++-- 3 files changed, 256 insertions(+), 40 deletions(-) diff --git a/lib/src/async_util.rs b/lib/src/async_util.rs index f5d5b7d4f..8aed32c3f 100644 --- a/lib/src/async_util.rs +++ b/lib/src/async_util.rs @@ -1,6 +1,6 @@ use std::io::prelude::*; use std::pin::Pin; -use tokio::io::{AsyncRead, AsyncReadExt}; +use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; /// A [`std::io::Read`] implementation backed by an asynchronous source. pub(crate) struct ReadBridge { @@ -26,6 +26,35 @@ impl ReadBridge { } } +/// A [`std::io::Write`] implementation backed by an asynchronous source. +pub(crate) struct WriteBridge { + w: Pin>, + rt: tokio::runtime::Handle, +} + +impl Write for WriteBridge { + fn write(&mut self, buf: &[u8]) -> std::io::Result { + let w = &mut self.w; + self.rt.block_on(async { w.write(buf).await }) + } + + fn flush(&mut self) -> std::io::Result<()> { + let w = &mut self.w; + self.rt.block_on(async { w.flush().await }) + } +} + +impl WriteBridge { + /// Create a [`std::io::Write`] implementation backed by an asynchronous source. + /// + /// This is useful with e.g. [`tokio::task::spawn_blocking`]. + pub(crate) fn new(reader: T) -> Self { + let w = Box::pin(reader); + let rt = tokio::runtime::Handle::current(); + WriteBridge { w, rt } + } +} + #[cfg(test)] mod test { use std::convert::TryInto; diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index 31cd0bdb2..b4ae95bd5 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -7,33 +7,46 @@ //! In the future, this may also evolve into parsing the tar //! stream in Rust, not in C. +use crate::async_util::{ReadBridge, WriteBridge}; use crate::cmdext::CommandRedirectionExt; use crate::Result; use anyhow::{anyhow, Context}; +use camino::{Utf8Component, Utf8Path, Utf8PathBuf}; use ostree::gio; use ostree::prelude::FileExt; +use std::collections::BTreeMap; +use std::convert::TryInto; +use std::io::{BufWriter, Write}; use std::os::unix::prelude::AsRawFd; use std::path::Path; -use tokio::io::AsyncReadExt; +use std::process::Stdio; +use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite}; use tracing::instrument; /// Configuration for tar layer commits. #[derive(Debug, Default)] -pub struct WriteTarOptions<'a> { +pub struct WriteTarOptions { /// Base ostree commit hash - pub base: Option<&'a str>, + pub base: Option, /// Enable SELinux labeling from the base commit /// Requires the `base` option. pub selinux: bool, } -struct TempSePolicy { - tempdir: tempfile::TempDir, +/// The result of writing a tar stream. +/// +/// This includes some basic data on the number of files that were filtered +/// out because they were not in `/usr`. +pub struct WriteTarResult { + /// The resulting OSTree commit SHA-256. + pub commit: String, + /// Number of paths in a prefix (e.g. `/var` or `/boot`) which were discarded. + pub filtered: BTreeMap, } // Copy of logic from https://github.com/ostreedev/ostree/pull/2447 // to avoid waiting for backport + releases -fn sepolicy_from_base(repo: &ostree::Repo, base: &str) -> Result { +fn sepolicy_from_base(repo: &ostree::Repo, base: &str) -> Result { let cancellable = gio::NONE_CANCELLABLE; let policypath = "usr/etc/selinux"; let tempdir = tempfile::tempdir()?; @@ -49,7 +62,116 @@ fn sepolicy_from_base(repo: &ostree::Repo, base: &str) -> Result { }; repo.checkout_at(Some(&opts), ostree::AT_FDCWD, policydest, base, cancellable)?; } - Ok(TempSePolicy { tempdir: tempdir }) + Ok(tempdir) +} + +#[derive(Debug)] +enum NormalizedPathResult<'a> { + Filtered(&'a str), + Normal(Utf8PathBuf), +} + +fn normalize_validate_path<'a>(path: &'a Utf8Path) -> Result> { + // This converts e.g. `foo//bar/./baz` into `foo/bar/baz`. + let mut components = path + .components() + .map(|part| { + match part { + // Convert absolute paths to relative + camino::Utf8Component::RootDir => Ok(camino::Utf8Component::CurDir), + // Allow ./ and regular parts + camino::Utf8Component::Normal(_) | camino::Utf8Component::CurDir => Ok(part), + // Barf on Windows paths as well as Unix path uplinks `..` + _ => Err(anyhow!("Invalid path: {}", path)), + } + }) + .peekable(); + let mut ret = Utf8PathBuf::new(); + // Insert a leading `./` if not present + if let Some(Ok(camino::Utf8Component::Normal(_))) = components.peek() { + ret.push(camino::Utf8Component::CurDir); + } + let mut found_first = false; + for part in components { + let part = part?; + if !found_first { + if let Utf8Component::Normal(part) = part { + found_first = true; + // Now, rewrite /etc -> /usr/etc, and discard everything not in /usr. + match part { + "usr" => ret.push(part), + "etc" => { + ret.push("usr/etc"); + } + o => return Ok(NormalizedPathResult::Filtered(o)), + } + } else { + ret.push(part); + } + } else { + ret.push(part); + } + } + + Ok(NormalizedPathResult::Normal(ret)) +} + +/// Perform various filtering on imported tar archives. +/// - Move /etc to /usr/etc +/// - Entirely drop files not in /usr +/// +/// This also acts as a Rust "pre-parser" of the tar archive, hopefully +/// catching anything corrupt that might be exploitable from the C libarchive side. +/// Remember that we're parsing this while we're downloading it, and in order +/// to verify integrity we rely on the total sha256 of the blob, so all content +/// written before then must be considered untrusted. +fn filter_tar(src: impl std::io::Read, dest: impl std::io::Write) -> Result> { + let src = std::io::BufReader::new(src); + let mut src = tar::Archive::new(src); + let dest = BufWriter::new(dest); + let mut dest = tar::Builder::new(dest); + let mut filtered = BTreeMap::new(); + + let ents = src.entries()?; + for entry in ents { + let entry = entry?; + let path = entry.path()?; + let path: &Utf8Path = (&*path).try_into()?; + + let normalized = match normalize_validate_path(path)? { + NormalizedPathResult::Filtered(path) => { + if let Some(v) = filtered.get_mut(path) { + *v += 1; + } else { + filtered.insert(path.to_string(), 1); + } + continue; + } + NormalizedPathResult::Normal(path) => path, + }; + + let mut header = entry.header().clone(); + dest.append_data(&mut header, normalized, entry)?; + } + dest.into_inner()?.flush()?; + Ok(filtered) +} + +/// Asynchronous wrapper for filter_tar() +async fn filter_tar_async( + src: impl AsyncRead + Send + 'static, + mut dest: impl AsyncWrite + Send + Unpin, +) -> Result> { + let (tx_buf, mut rx_buf) = tokio::io::duplex(8192); + let tar_transformer = tokio::task::spawn_blocking(move || -> Result<_> { + let src = ReadBridge::new(src); + let dest = WriteBridge::new(tx_buf); + filter_tar(src, dest) + }); + let copier = tokio::io::copy(&mut rx_buf, &mut dest); + let (r, v) = tokio::join!(tar_transformer, copier); + let _v: u64 = v?; + Ok(r??) } /// Write the contents of a tarball as an ostree commit. @@ -57,15 +179,15 @@ fn sepolicy_from_base(repo: &ostree::Repo, base: &str) -> Result { #[instrument(skip(repo, src))] pub async fn write_tar( repo: &ostree::Repo, - mut src: impl tokio::io::AsyncRead + Send + Unpin + 'static, + src: impl tokio::io::AsyncRead + Send + Unpin + 'static, refname: &str, - options: Option>, -) -> Result { - use std::process::Stdio; + options: Option, +) -> Result { + let repo = repo.clone(); let options = options.unwrap_or_default(); let sepolicy = if options.selinux { if let Some(base) = options.base { - Some(sepolicy_from_base(repo, base).context("tar: Preparing sepolicy")?) + Some(sepolicy_from_base(&repo, &base).context("tar: Preparing sepolicy")?) } else { None } @@ -84,12 +206,11 @@ pub async fn write_tar( c.arg("--repo=/proc/self/fd/3"); if let Some(sepolicy) = sepolicy.as_ref() { c.arg("--selinux-policy"); - c.arg(sepolicy.tempdir.path()); + c.arg(sepolicy.path()); } c.args(&[ "--no-bindings", "--tar-autocreate-parents", - r#"--tar-pathname-filter=^etc(.*),usr/etc\1"#, "--tree=tar=/proc/self/fd/0", "--branch", refname, @@ -99,15 +220,11 @@ pub async fn write_tar( c.kill_on_drop(true); let mut r = c.spawn()?; // Safety: We passed piped() for all of these - let mut child_stdin = r.stdin.take().unwrap(); + let child_stdin = r.stdin.take().unwrap(); let mut child_stdout = r.stdout.take().unwrap(); let mut child_stderr = r.stderr.take().unwrap(); - // Copy our input to child stdout - let input_copier = async move { - let _n = tokio::io::copy(&mut src, &mut child_stdin).await?; - drop(child_stdin); - Ok::<_, anyhow::Error>(()) - }; + // Copy the filtered tar stream to child stdin + let filtered_result = filter_tar_async(src, child_stdin); // Gather stdout/stderr to buffers let output_copier = async move { let mut child_stdout_buf = String::new(); @@ -119,7 +236,8 @@ pub async fn write_tar( Ok::<_, anyhow::Error>((child_stdout_buf, child_stderr_buf)) }; - let (_, (child_stdout, child_stderr)) = tokio::try_join!(input_copier, output_copier)?; + let (filtered_result, (child_stdout, child_stderr)) = + tokio::try_join!(filtered_result, output_copier)?; let status = r.wait().await?; // Ensure this lasted until the process exited drop(sepolicy); @@ -132,5 +250,78 @@ pub async fn write_tar( } // TODO: trim string in place let s = child_stdout.trim(); - Ok(s.to_string()) + Ok(WriteTarResult { + commit: s.to_string(), + filtered: filtered_result, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::Cursor; + + #[test] + fn test_normalize_path() { + let valid = &[ + ("/usr/bin/blah", "./usr/bin/blah"), + ("usr/bin/blah", "./usr/bin/blah"), + ("usr///share/.//blah", "./usr/share/blah"), + ("./", "."), + ]; + for &(k, v) in valid { + let r = normalize_validate_path(k.into()).unwrap(); + match r { + NormalizedPathResult::Filtered(o) => { + panic!("Case {} should not be filtered as {}", k, o) + } + NormalizedPathResult::Normal(p) => { + assert_eq!(v, p.as_str()); + } + } + } + let filtered = &[ + ("/boot/vmlinuz", "boot"), + ("var/lib/blah", "var"), + ("./var/lib/blah", "var"), + ]; + for &(k, v) in filtered { + match normalize_validate_path(k.into()).unwrap() { + NormalizedPathResult::Filtered(f) => { + assert_eq!(v, f); + } + NormalizedPathResult::Normal(_) => { + panic!("{} should be filtered", k) + } + } + } + let errs = &["usr/foo/../../bar"]; + for &k in errs { + assert!(normalize_validate_path(k.into()).is_err()); + } + } + + #[tokio::test] + async fn tar_filter() -> Result<()> { + let tempd = tempfile::tempdir()?; + let rootfs = &tempd.path().join("rootfs"); + std::fs::create_dir_all(rootfs.join("etc/systemd/system"))?; + std::fs::write(rootfs.join("etc/systemd/system/foo.service"), "fooservice")?; + std::fs::write(rootfs.join("blah"), "blah")?; + let rootfs_tar_path = &tempd.path().join("rootfs.tar"); + let rootfs_tar = std::fs::File::create(rootfs_tar_path)?; + let mut rootfs_tar = tar::Builder::new(rootfs_tar); + rootfs_tar.append_dir_all(".", rootfs)?; + let _ = rootfs_tar.into_inner()?; + let mut dest = Vec::new(); + let src = tokio::io::BufReader::new(tokio::fs::File::open(rootfs_tar_path).await?); + filter_tar_async(src, &mut dest).await?; + let dest = dest.as_slice(); + let mut final_tar = tar::Archive::new(Cursor::new(dest)); + let destdir = &tempd.path().join("destdir"); + final_tar.unpack(destdir)?; + assert!(destdir.join("usr/etc/systemd/system/foo.service").exists()); + assert!(!destdir.join("blah").exists()); + Ok(()) + } } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index ec52837bd..4f9c8b9d8 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -263,36 +263,32 @@ async fn test_tar_import_export() -> Result<()> { #[tokio::test] async fn test_tar_write() -> Result<()> { let fixture = Fixture::new()?; - let r = ostree_ext::tar::write_tar(&fixture.destrepo, EXAMPLEOS_V0, "exampleos", None).await?; - // Here, we're importing a raw tarball into an ostree commit; this is a subtly different - // path than what we do above for the flow of "unpack tarball + ostree commit + export tar". - // But, they should be content-identical. - let (commitdata, _) = fixture.destrepo.load_commit(&r)?; - assert_eq!( - EXAMPLEOS_CONTENT_CHECKSUM, - ostree::commit_get_content_checksum(&commitdata) - .unwrap() - .as_str() - ); - // Test translating /etc to /usr/etc let tmpetc = fixture.path.join("tmproot/etc"); - let tmproot = tmpetc.parent().unwrap(); - let tmptar = fixture.path.join("testlayer.tar"); std::fs::create_dir_all(&tmpetc)?; std::fs::write(tmpetc.join("someconfig.conf"), b"")?; + let tmproot = tmpetc.parent().unwrap(); + let tmpvarlib = &tmproot.join("var/lib"); + std::fs::create_dir_all(tmpvarlib)?; + std::fs::write(tmpvarlib.join("foo.log"), "foolog")?; + std::fs::write(tmpvarlib.join("bar.log"), "barlog")?; + std::fs::create_dir_all(tmproot.join("boot"))?; + let tmptar = fixture.path.join("testlayer.tar"); bash!( "tar cf {tmptar} -C {tmproot} .", tmptar = tmptar.as_str(), tmproot = tmproot.as_str() )?; let src = tokio::fs::File::open(&tmptar).await?; - let layer_commit = ostree_ext::tar::write_tar(&fixture.destrepo, src, "layer", None).await?; + let r = ostree_ext::tar::write_tar(&fixture.destrepo, src, "layer", None).await?; bash!( "ostree --repo={repo} ls {layer_commit} /usr/etc/someconfig.conf >/dev/null", repo = fixture.destrepo_path.as_str(), - layer_commit = layer_commit.as_str() + layer_commit = r.commit.as_str() )?; + assert_eq!(r.filtered.len(), 2); + assert_eq!(*r.filtered.get("var").unwrap(), 4); + assert_eq!(*r.filtered.get("boot").unwrap(), 1); Ok(()) } From 3a28e2d511ac36b48d2b9468f75d2d1532846c67 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 4 Oct 2021 15:08:28 -0400 Subject: [PATCH 137/775] cli: Switch to nested match for container opts A bit more rightward drift, but less duplication. Prep for further commands. --- lib/src/cli.rs | 56 ++++++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 27 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index a76d1f7ff..c834967b5 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -281,33 +281,35 @@ where match opt { Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt).await, Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), - Opt::Container(ContainerOpts::Info { imgref }) => container_info(imgref.as_str()).await, - Opt::Container(ContainerOpts::Unencapsulate { - repo, - imgref, - write_ref, - quiet, - }) => container_import(&repo, &imgref, write_ref.as_deref(), quiet).await, - Opt::Container(ContainerOpts::Encapsulate { - repo, - rev, - imgref, - labels, - cmd, - }) => { - let labels: Result> = labels - .into_iter() - .map(|l| { - let mut parts = l.splitn(2, '='); - let k = parts.next().unwrap(); - let v = parts - .next() - .ok_or_else(|| anyhow::anyhow!("Missing '=' in label {}", l))?; - Ok((k.to_string(), v.to_string())) - }) - .collect(); - container_export(&repo, &rev, &imgref, labels?, cmd).await - } + Opt::Container(o) => match o { + ContainerOpts::Info { imgref } => container_info(imgref.as_str()).await, + ContainerOpts::Unencapsulate { + repo, + imgref, + write_ref, + quiet, + } => container_import(&repo, &imgref, write_ref.as_deref(), quiet).await, + ContainerOpts::Encapsulate { + repo, + rev, + imgref, + labels, + cmd, + } => { + let labels: Result> = labels + .into_iter() + .map(|l| { + let mut parts = l.splitn(2, '='); + let k = parts.next().unwrap(); + let v = parts + .next() + .ok_or_else(|| anyhow::anyhow!("Missing '=' in label {}", l))?; + Ok((k.to_string(), v.to_string())) + }) + .collect(); + container_export(&repo, &rev, &imgref, labels?, cmd).await + } + }, Opt::ImaSign(ref opts) => ima_sign(opts), } } From 4500941b1067faf3c75b83dbb5df6df05f44fb01 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 5 Oct 2021 18:20:02 -0400 Subject: [PATCH 138/775] container/oci: Add config to Manifest It's required, and we need it to compute the "ImageID". xref https://github.com/ostreedev/ostree-rs-ext/issues/115 --- lib/src/container/oci.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index 86a1c8151..c8e1f8a7e 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -92,10 +92,19 @@ pub(crate) struct Manifest { #[serde(default = "default_schema_version")] pub schema_version: u32, + pub config: ManifestLayer, pub layers: Vec, pub annotations: Option>, } +impl Manifest { + /// Return the digest of the configuration layer. + /// https://github.com/opencontainers/image-spec/blob/main/config.md + pub(crate) fn imageid(&self) -> &str { + self.config.digest.as_str() + } +} + /// Completed blob metadata #[derive(Debug)] pub(crate) struct Blob { From 5dd5fa18bc5cdac23656a003acdeb5939018e927 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 5 Oct 2021 18:24:59 -0400 Subject: [PATCH 139/775] container/oci: Use our Manifest struct when writing This way we get type safety, we're testing the API etc. --- lib/src/container/oci.rs | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index c8e1f8a7e..4c9724c84 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -8,10 +8,8 @@ use openat_ext::*; use openssl::hash::{Hasher, MessageDigest}; use phf::phf_map; use serde::{Deserialize, Serialize}; -use std::{ - collections::{BTreeMap, HashMap}, - io::prelude::*, -}; +use std::collections::HashMap; +use std::io::prelude::*; use tokio::io::AsyncBufRead; /// Map the value from `uname -m` to the Go architecture. @@ -94,7 +92,7 @@ pub(crate) struct Manifest { pub config: ManifestLayer, pub layers: Vec, - pub annotations: Option>, + pub annotations: Option>, } impl Manifest { @@ -230,22 +228,21 @@ impl<'a> OciWriter<'a> { }); let config_blob = write_json_blob(self.dir, &config)?; - let manifest_data = serde_json::json!({ - "schemaVersion": default_schema_version(), - "config": { - "mediaType": OCI_TYPE_CONFIG_JSON, - "size": config_blob.size, - "digest": config_blob.digest_id(), + let manifest = Manifest { + schema_version: default_schema_version(), + config: ManifestLayer { + media_type: OCI_TYPE_CONFIG_JSON.to_string(), + size: config_blob.size, + digest: config_blob.digest_id(), }, - "layers": [ - { "mediaType": OCI_TYPE_LAYER_GZIP, - "size": rootfs_blob.blob.size, - "digest": rootfs_blob.blob.digest_id(), - } - ], - "annotations": self.manifest_annotations, - }); - let manifest_blob = write_json_blob(self.dir, &manifest_data)?; + layers: vec![ManifestLayer { + media_type: OCI_TYPE_LAYER_GZIP.to_string(), + size: rootfs_blob.blob.size, + digest: rootfs_blob.blob.digest_id(), + }], + annotations: Some(self.manifest_annotations.drain().collect()), + }; + let manifest_blob = write_json_blob(self.dir, &manifest)?; let index_data = serde_json::json!({ "schemaVersion": default_schema_version(), From 7660711a3ed16f34dd9bb4136fcb7a53cbc20f5d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 5 Oct 2021 10:54:56 -0400 Subject: [PATCH 140/775] tests: Use --no-bindings for base commit Since we're not meaning to fetch this via libostree, using bindings inhibits native pulls for forthcoming `container copy` work. --- lib/tests/it/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 4f9c8b9d8..e19c2ceef 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -58,7 +58,7 @@ fn generate_test_repo(dir: &Utf8Path) -> Result { indoc! {" cd {dir} ostree --repo=repo init --mode=archive - ostree --repo=repo commit -b {testref} --bootable --add-metadata-string=version=42.0 --gpg-homedir={gpghome} --gpg-sign={keyid} \ + ostree --repo=repo commit -b {testref} --bootable --no-bindings --add-metadata-string=version=42.0 --gpg-homedir={gpghome} --gpg-sign={keyid} \ --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst ostree --repo=repo show {testref} "}, @@ -79,7 +79,7 @@ fn update_repo(repopath: &Utf8Path) -> Result<()> { let repopath = repopath.as_str(); let testref = TESTREF; bash!( - "ostree --repo={repopath} commit -b {testref} --tree=tar={srcpath}", + "ostree --repo={repopath} commit -b {testref} --no-bindings --tree=tar={srcpath}", testref, repopath, srcpath From 944cf763a7d8d9102a6fd052c66c58121ac5dccc Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 16 Sep 2021 20:34:47 -0400 Subject: [PATCH 141/775] Add a new container/store module The initial scope of this project was just "encapsulating" ostree commits in containers. However, when doing that a very, very natural question arises: Why not support *deriving* from that base image container, and have the tooling natively support importing it? This initial prototype code implements that. Here, we still use the `tar::import` path for the base image - we expect it to have a pre-generated ostree commit. This new `container::store` module processes layered images and generates (client side) ostree commits from the tar layers. There's a whole lot of new infrastructure we need around mapping ostree refs to blobs and images, etc. --- lib/src/cli.rs | 150 ++++++- lib/src/container/deploy.rs | 53 +++ lib/src/container/mod.rs | 2 + lib/src/container/store.rs | 383 ++++++++++++++++++ lib/src/tar/write.rs | 4 + .../fixtures/exampleos-derive-v2.ociarchive | Bin 0 -> 14336 bytes lib/tests/it/main.rs | 123 +++++- 7 files changed, 712 insertions(+), 3 deletions(-) create mode 100644 lib/src/container/deploy.rs create mode 100644 lib/src/container/store.rs create mode 100644 lib/tests/it/fixtures/exampleos-derive-v2.ociarchive diff --git a/lib/src/cli.rs b/lib/src/cli.rs index c834967b5..a12098ad1 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -8,11 +8,12 @@ use anyhow::Result; use ostree::gio; use std::collections::BTreeMap; -use std::convert::TryInto; +use std::convert::{TryFrom, TryInto}; use std::ffi::OsString; use structopt::StructOpt; -use crate::container::{Config, ImportOptions}; +use crate::container::store::{LayeredImageImporter, PrepareResult}; +use crate::container::{Config, ImportOptions, OstreeImageReference}; #[derive(Debug, StructOpt)] struct BuildOpts { @@ -107,6 +108,63 @@ enum ContainerOpts { #[structopt(long)] cmd: Option>, }, + + /// Commands for working with (possibly layered, non-encapsulated) container images. + Image(ContainerImageOpts), +} + +/// Options for import/export to tar archives. +#[derive(Debug, StructOpt)] +enum ContainerImageOpts { + /// List container images + List { + /// Path to the repository + #[structopt(long)] + repo: String, + }, + + /// Pull (or update) a container image. + Pull { + /// Path to the repository + #[structopt(long)] + repo: String, + + /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest + imgref: String, + }, + + /// Copy a pulled container image from one repo to another. + Copy { + /// Path to the source repository + #[structopt(long)] + src_repo: String, + + /// Path to the destination repository + #[structopt(long)] + dest_repo: String, + + /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest + imgref: String, + }, + + /// Perform initial deployment for a container image + Deploy { + /// Path to the system root + #[structopt(long)] + sysroot: String, + + /// Name for the state directory, also known as "osname". + #[structopt(long)] + stateroot: String, + + /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest + #[structopt(long)] + imgref: String, + + #[structopt(long)] + /// Add a kernel argument + karg: Option>, + }, } /// Options for the Integrity Measurement Architecture (IMA). @@ -251,6 +309,52 @@ async fn container_info(imgref: &str) -> Result<()> { Ok(()) } +/// Write a layered container image into an OSTree commit. +async fn container_store(repo: &str, imgref: &str) -> Result<()> { + let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; + let imgref = imgref.try_into()?; + let mut imp = LayeredImageImporter::new(&repo, &imgref).await?; + let prep = match imp.prepare().await? { + PrepareResult::AlreadyPresent(c) => { + println!("No changes in {} => {}", imgref, c); + return Ok(()); + } + PrepareResult::Ready(r) => r, + }; + if prep.base_layer.commit.is_none() { + let size = crate::glib::format_size(prep.base_layer.size()); + println!( + "Downloading base layer: {} ({})", + prep.base_layer.digest(), + size + ); + } else { + println!("Using base: {}", prep.base_layer.digest()); + } + for layer in prep.layers.iter() { + if layer.commit.is_some() { + println!("Using layer: {}", layer.digest()); + } else { + let size = crate::glib::format_size(layer.size()); + println!("Downloading layer: {} ({})", layer.digest(), size); + } + } + let import = imp.import(prep).await?; + if !import.layer_filtered_content.is_empty() { + for (layerid, filtered) in import.layer_filtered_content { + eprintln!("Unsupported paths filtered from {}:", layerid); + for (prefix, count) in filtered { + eprintln!(" {}: {}", prefix, count); + } + } + } + println!( + "Wrote: {} => {} => {}", + imgref, import.ostree_ref, import.commit + ); + Ok(()) +} + /// Add IMA signatures to an ostree commit, generating a new commit. fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { let repo = @@ -309,6 +413,48 @@ where .collect(); container_export(&repo, &rev, &imgref, labels?, cmd).await } + ContainerOpts::Image(opts) => match opts { + ContainerImageOpts::List { repo } => { + let repo = + &ostree::Repo::open_at(libc::AT_FDCWD, &repo, gio::NONE_CANCELLABLE)?; + for image in crate::container::store::list_images(&repo)? { + println!("{}", image); + } + Ok(()) + } + ContainerImageOpts::Pull { repo, imgref } => container_store(&repo, &imgref).await, + ContainerImageOpts::Copy { + src_repo, + dest_repo, + imgref, + } => { + let src_repo = + &ostree::Repo::open_at(libc::AT_FDCWD, &src_repo, gio::NONE_CANCELLABLE)?; + let dest_repo = + &ostree::Repo::open_at(libc::AT_FDCWD, &dest_repo, gio::NONE_CANCELLABLE)?; + let imgref = OstreeImageReference::try_from(imgref.as_str())?; + crate::container::store::copy(src_repo, dest_repo, &imgref).await + } + ContainerImageOpts::Deploy { + sysroot, + stateroot, + imgref, + karg, + } => { + let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); + let imgref = OstreeImageReference::try_from(imgref.as_str())?; + let kargs = karg.as_deref(); + let kargs = kargs.map(|v| { + let r: Vec<_> = v.iter().map(|s| s.as_str()).collect(); + r + }); + let options = crate::container::deploy::DeployOpts { + kargs: kargs.as_deref(), + }; + crate::container::deploy::deploy(sysroot, &stateroot, &imgref, Some(options)) + .await + } + }, }, Opt::ImaSign(ref opts) => ima_sign(opts), } diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs new file mode 100644 index 000000000..9d638d06f --- /dev/null +++ b/lib/src/container/deploy.rs @@ -0,0 +1,53 @@ +//! Perform initial setup for a container image based system root + +use super::OstreeImageReference; +use crate::container::store::PrepareResult; +use anyhow::Result; +use ostree::glib; + +/// The key in the OSTree origin which holds a serialized [`super::OstreeImageReference`]. +pub const ORIGIN_CONTAINER: &str = "container"; + +async fn pull_idempotent(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result { + let mut imp = super::store::LayeredImageImporter::new(repo, imgref).await?; + match imp.prepare().await? { + PrepareResult::AlreadyPresent(r) => Ok(r), + PrepareResult::Ready(prep) => Ok(imp.import(prep).await?.commit), + } +} + +/// Options configuring deployment. +#[derive(Debug, Default)] +pub struct DeployOpts<'a> { + /// Kernel arguments to use. + pub kargs: Option<&'a [&'a str]>, +} + +/// Write a container image to an OSTree deployment. +/// +/// This API is currently intended for only an initial deployment. +pub async fn deploy<'opts>( + sysroot: &ostree::Sysroot, + stateroot: &str, + imgref: &OstreeImageReference, + options: Option>, +) -> Result<()> { + let cancellable = ostree::gio::NONE_CANCELLABLE; + let options = options.unwrap_or_default(); + let repo = &sysroot.repo().unwrap(); + let commit = &pull_idempotent(repo, imgref).await?; + let origin = glib::KeyFile::new(); + origin.set_string("ostree", ORIGIN_CONTAINER, &imgref.to_string()); + let deployment = &sysroot.deploy_tree( + Some(stateroot), + commit, + Some(&origin), + None, + options.kargs.unwrap_or_default(), + cancellable, + )?; + let flags = ostree::SysrootSimpleWriteDeploymentFlags::NONE; + sysroot.simple_write_deployment(Some(stateroot), deployment, None, flags, cancellable)?; + sysroot.cleanup(cancellable)?; + Ok(()) +} diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 1628f4056..2612a47f3 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -223,6 +223,7 @@ impl std::fmt::Display for OstreeImageReference { } } +pub mod deploy; mod export; pub use export::*; mod import; @@ -230,6 +231,7 @@ pub use import::*; mod imageproxy; mod oci; mod skopeo; +pub mod store; #[cfg(test)] mod tests { diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs new file mode 100644 index 000000000..1c9b282f0 --- /dev/null +++ b/lib/src/container/store.rs @@ -0,0 +1,383 @@ +//! APIs for storing (layered) container images as OSTree commits +//! +//! # Extension of import support +//! +//! This code supports ingesting arbitrary layered container images from an ostree-exported +//! base. See [`super::import`] for more information on encaspulation of images. + +use super::imageproxy::ImageProxy; +use super::oci::ManifestLayer; +use super::*; +use crate::refescape; +use anyhow::{anyhow, Context}; +use fn_error_context::context; +use ostree::prelude::{Cast, ToVariant}; +use ostree::{gio, glib}; +use std::collections::{BTreeMap, HashMap}; + +/// The ostree ref prefix for blobs. +const LAYER_PREFIX: &str = "ostree/container/blob"; +/// The ostree ref prefix for image references. +const IMAGE_PREFIX: &str = "ostree/container/image"; + +/// The key injected into the merge commit for the manifest digest. +const META_MANIFEST_DIGEST: &str = "ostree.manifest-digest"; +/// The key injected into the merge commit with the manifest serialized as JSON. +const META_MANIFEST: &str = "ostree.manifest"; + +/// Convert e.g. sha256:12345... into `/ostree/container/blob/sha256_2B12345...`. +fn ref_for_blob_digest(d: &str) -> Result { + refescape::prefix_escape_for_ref(LAYER_PREFIX, d) +} + +/// Convert e.g. sha256:12345... into `/ostree/container/blob/sha256_2B12345...`. +fn ref_for_layer(l: &oci::ManifestLayer) -> Result { + ref_for_blob_digest(l.digest.as_str()) +} + +/// Convert e.g. sha256:12345... into `/ostree/container/blob/sha256_2B12345...`. +fn ref_for_image(l: &ImageReference) -> Result { + refescape::prefix_escape_for_ref(IMAGE_PREFIX, &l.to_string()) +} + +/// Context for importing a container image. +pub struct LayeredImageImporter { + repo: ostree::Repo, + proxy: ImageProxy, + imgref: OstreeImageReference, + ostree_ref: String, +} + +/// Result of invoking [`LayeredImageImporter::prepare`]. +pub enum PrepareResult { + /// The image reference is already present; the contained string is the OSTree commit. + AlreadyPresent(String), + /// The image needs to be downloaded + Ready(PreparedImport), +} + +/// A container image layer with associated downloaded-or-not state. +#[derive(Debug)] +pub struct ManifestLayerState { + layer: oci::ManifestLayer, + /// The ostree ref name for this layer. + pub ostree_ref: String, + /// The ostree commit that caches this layer, if present. + pub commit: Option, +} + +impl ManifestLayerState { + /// The cryptographic checksum. + pub fn digest(&self) -> &str { + self.layer.digest.as_str() + } + + /// The (possibly compressed) size. + pub fn size(&self) -> u64 { + self.layer.size + } +} + +/// Information about which layers need to be downloaded. +#[derive(Debug)] +pub struct PreparedImport { + /// The manifest digest that was found + pub manifest_digest: String, + /// The previously stored manifest digest. + pub previous_manifest_digest: Option, + /// The previously stored image ID. + pub previous_imageid: Option, + /// The required base layer. + pub base_layer: ManifestLayerState, + /// Any further layers. + pub layers: Vec, + /// TODO: serialize this into the commit object + manifest: oci::Manifest, +} + +/// A successful import of a container image. +#[derive(Debug, PartialEq, Eq)] +pub struct CompletedImport { + /// The ostree ref used for the container image. + pub ostree_ref: String, + /// The current commit. + pub commit: String, + /// A mapping from layer blob IDs to a count of content filtered out + /// by toplevel path. + pub layer_filtered_content: BTreeMap>, +} + +// Given a manifest, compute its ostree ref name and cached ostree commit +fn query_layer(repo: &ostree::Repo, layer: ManifestLayer) -> Result { + let ostree_ref = ref_for_layer(&layer)?; + let commit = repo.resolve_rev(&ostree_ref, true)?.map(|s| s.to_string()); + Ok(ManifestLayerState { + layer, + ostree_ref, + commit, + }) +} + +fn manifest_from_commitmeta(commit_meta: &glib::VariantDict) -> Result { + let manifest_bytes: String = commit_meta + .lookup::(META_MANIFEST)? + .ok_or_else(|| anyhow!("Failed to find {} metadata key", META_MANIFEST))?; + let manifest: oci::Manifest = serde_json::from_str(&manifest_bytes)?; + Ok(manifest) +} + +fn manifest_from_commit(commit: &glib::Variant) -> Result { + let commit_meta = &commit.child_value(0); + let commit_meta = &ostree::glib::VariantDict::new(Some(commit_meta)); + manifest_from_commitmeta(commit_meta) +} + +impl LayeredImageImporter { + /// Create a new importer. + pub async fn new(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result { + let proxy = ImageProxy::new(&imgref.imgref).await?; + let repo = repo.clone(); + let ostree_ref = ref_for_image(&imgref.imgref)?; + Ok(LayeredImageImporter { + repo, + proxy, + ostree_ref, + imgref: imgref.clone(), + }) + } + + /// Determine if there is a new manifest, and if so return its digest. + #[context("Fetching manifest")] + pub async fn prepare(&mut self) -> Result { + match &self.imgref.sigverify { + SignatureSource::ContainerPolicy if skopeo::container_policy_is_default_insecure()? => { + return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); + } + SignatureSource::OstreeRemote(_) => { + return Err(anyhow!( + "Cannot currently verify layered containers via ostree remote" + )); + } + _ => {} + } + + let (manifest_digest, manifest_bytes) = self.proxy.fetch_manifest().await?; + let manifest: oci::Manifest = serde_json::from_slice(&manifest_bytes)?; + let new_imageid = manifest.imageid(); + + // Query for previous stored state + let (previous_manifest_digest, previous_imageid) = + if let Some(merge_commit) = self.repo.resolve_rev(&self.ostree_ref, true)? { + let (merge_commit_obj, _) = self.repo.load_commit(merge_commit.as_str())?; + let commit_meta = &merge_commit_obj.child_value(0); + let commit_meta = ostree::glib::VariantDict::new(Some(commit_meta)); + let previous_digest: String = + commit_meta.lookup(META_MANIFEST_DIGEST)?.ok_or_else(|| { + anyhow!("Missing {} metadata on merge commit", META_MANIFEST_DIGEST) + })?; + // If the manifest digests match, we're done. + if previous_digest == manifest_digest { + return Ok(PrepareResult::AlreadyPresent(merge_commit.to_string())); + } + // Failing that, if they have the same imageID, we're also done. + let previous_manifest = manifest_from_commitmeta(&commit_meta)?; + if previous_manifest.imageid() == new_imageid { + return Ok(PrepareResult::AlreadyPresent(merge_commit.to_string())); + } + ( + Some(previous_digest), + Some(previous_manifest.imageid().to_string()), + ) + } else { + (None, None) + }; + + let mut layers = manifest.layers.iter().cloned(); + // We require a base layer. + let base_layer = layers.next().ok_or_else(|| anyhow!("No layers found"))?; + let base_layer = query_layer(&self.repo, base_layer)?; + + let layers: Result> = layers + .map(|layer| -> Result<_> { query_layer(&self.repo, layer) }) + .collect(); + let layers = layers?; + + let imp = PreparedImport { + manifest, + manifest_digest, + previous_manifest_digest, + previous_imageid, + base_layer, + layers, + }; + Ok(PrepareResult::Ready(imp)) + } + + /// Import a layered container image + pub async fn import(mut self, import: PreparedImport) -> Result { + // First download the base image (if necessary) - we need the SELinux policy + // there to label all following layers. + let base_layer = import.base_layer; + let base_commit = if let Some(c) = base_layer.commit { + c + } else { + let blob = self.proxy.fetch_layer_decompress(&base_layer.layer).await?; + let commit = crate::tar::import_tar(&self.repo, blob, None) + .await + .with_context(|| format!("Parsing blob {}", &base_layer.digest()))?; + // TODO support ref writing in tar import + self.repo.set_ref_immediate( + None, + base_layer.ostree_ref.as_str(), + Some(commit.as_str()), + gio::NONE_CANCELLABLE, + )?; + commit + }; + + let mut layer_commits = Vec::new(); + let mut layer_filtered_content = BTreeMap::new(); + for layer in import.layers { + if let Some(c) = layer.commit { + layer_commits.push(c.to_string()); + } else { + let blob = self.proxy.fetch_layer_decompress(&layer.layer).await?; + // An important aspect of this is that we SELinux label the derived layers using + // the base policy. + let opts = crate::tar::WriteTarOptions { + base: Some(base_commit.clone()), + selinux: true, + }; + let r = + crate::tar::write_tar(&self.repo, blob, layer.ostree_ref.as_str(), Some(opts)) + .await + .with_context(|| format!("Parsing layer blob {}", layer.digest()))?; + layer_commits.push(r.commit); + if !r.filtered.is_empty() { + layer_filtered_content.insert(layer.digest().to_string(), r.filtered); + } + } + } + + // We're done with the proxy, make sure it didn't have any errors. + self.proxy.finalize().await?; + + let serialized_manifest = serde_json::to_string(&import.manifest)?; + let mut metadata = HashMap::new(); + metadata.insert(META_MANIFEST_DIGEST, import.manifest_digest.to_variant()); + metadata.insert(META_MANIFEST, serialized_manifest.to_variant()); + metadata.insert( + "ostree.importer.version", + env!("CARGO_PKG_VERSION").to_variant(), + ); + let metadata = metadata.to_variant(); + + // Destructure to transfer ownership to thread + let repo = self.repo; + let target_ref = self.ostree_ref; + let (ostree_ref, commit) = crate::tokio_util::spawn_blocking_cancellable( + move |cancellable| -> Result<(String, String)> { + let cancellable = Some(cancellable); + let repo = &repo; + let txn = repo.auto_transaction(cancellable)?; + let (base_commit_tree, _) = repo.read_commit(&base_commit, cancellable)?; + let base_commit_tree = base_commit_tree.downcast::().unwrap(); + let base_contents_obj = base_commit_tree.tree_get_contents_checksum().unwrap(); + let base_metadata_obj = base_commit_tree.tree_get_metadata_checksum().unwrap(); + let mt = ostree::MutableTree::from_checksum( + &repo, + &base_contents_obj, + &base_metadata_obj, + ); + // Layer all subsequent commits + for commit in layer_commits { + let (layer_tree, _) = repo.read_commit(&commit, cancellable)?; + repo.write_directory_to_mtree(&layer_tree, &mt, None, cancellable)?; + } + + let merged_root = repo.write_mtree(&mt, cancellable)?; + let merged_root = merged_root.downcast::().unwrap(); + let merged_commit = repo.write_commit( + None, + None, + None, + Some(&metadata), + &merged_root, + cancellable, + )?; + repo.transaction_set_ref(None, &target_ref, Some(merged_commit.as_str())); + txn.commit(cancellable)?; + Ok((target_ref, merged_commit.to_string())) + }, + ) + .await??; + Ok(CompletedImport { + ostree_ref, + commit, + layer_filtered_content, + }) + } +} + +/// List all images stored +pub fn list_images(repo: &ostree::Repo) -> Result> { + let cancellable = gio::NONE_CANCELLABLE; + let refs = repo.list_refs_ext( + Some(IMAGE_PREFIX), + ostree::RepoListRefsExtFlags::empty(), + cancellable, + )?; + let r: Result> = refs + .keys() + .map(|imgname| refescape::unprefix_unescape_ref(IMAGE_PREFIX, imgname)) + .collect(); + Ok(r?) +} + +/// Copy a downloaded image from one repository to another. +pub async fn copy( + src_repo: &ostree::Repo, + dest_repo: &ostree::Repo, + imgref: &OstreeImageReference, +) -> Result<()> { + let ostree_ref = ref_for_image(&imgref.imgref)?; + let rev = src_repo.resolve_rev(&ostree_ref, false)?.unwrap(); + let (commit_obj, _) = src_repo.load_commit(rev.as_str())?; + let manifest: oci::Manifest = manifest_from_commit(&commit_obj)?; + // Create a task to copy each layer, plus the final ref + let layer_refs = manifest + .layers + .iter() + .map(|layer| ref_for_layer(&layer)) + .chain(std::iter::once(Ok(ostree_ref))); + for ostree_ref in layer_refs { + let ostree_ref = ostree_ref?; + let src_repo = src_repo.clone(); + let dest_repo = dest_repo.clone(); + crate::tokio_util::spawn_blocking_cancellable(move |cancellable| -> Result<_> { + let cancellable = Some(cancellable); + let srcfd = &format!("file:///proc/self/fd/{}", src_repo.dfd()); + let flags = ostree::RepoPullFlags::MIRROR; + let opts = glib::VariantDict::new(None); + let refs = [ostree_ref.as_str()]; + // Some older archives may have bindings, we don't need to verify them. + opts.insert("disable-verify-bindings", &true); + opts.insert("refs", &&refs[..]); + opts.insert("flags", &(flags.bits() as i32)); + let options = opts.to_variant(); + dest_repo.pull_with_options(&srcfd, &options, None, cancellable)?; + Ok(()) + }) + .await??; + } + Ok(()) +} + +/// Remove the specified images and their corresponding blobs. +pub fn prune_images(_repo: &ostree::Repo, _imgs: &[&str]) -> Result<()> { + // Most robust approach is to iterate over all known images, load the + // manifest and build the set of reachable blobs, then compute the set + // Set(unreachable) = Set(all) - Set(reachable) + // And remove the unreachable ones. + unimplemented!() +} diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index b4ae95bd5..f156e06ce 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -208,6 +208,10 @@ pub async fn write_tar( c.arg("--selinux-policy"); c.arg(sepolicy.path()); } + c.arg(&format!( + "--add-metadata-string=ostree.importer.version={}", + env!("CARGO_PKG_VERSION") + )); c.args(&[ "--no-bindings", "--tar-autocreate-parents", diff --git a/lib/tests/it/fixtures/exampleos-derive-v2.ociarchive b/lib/tests/it/fixtures/exampleos-derive-v2.ociarchive new file mode 100644 index 0000000000000000000000000000000000000000..42b91b1187a133f27dac1a11a4c29d757ee0ab20 GIT binary patch literal 14336 zcmeHNcU%)$*N&h9mc@o*4F(hugh`+HsHBxr|9Ep>lJ!668eijonKzHdWPK|d{73$0AkhDGDvS2r|Nk8{`JMmiA{w71;)uj- zI*){O-y475s{G35HZDU0Ry44sX`8w$rK9_l*XcqIZU2V#G;EBH1=l8iDcv#MFN#~otLgddUSps+T15I z0xP!|*YAKd<<`KC)6yBf)AwZ**qu7rIfS=xfhc~Jo&FEq5-{=f^n;r^Y@EOB@};bz zsi)@N*s^JazgmHfFttuBRRo}YSrB(`=9uCJ)!VCGr3<9 zc+>Vo+M>kE60c>Q>tJMBU2*sL$XhRmVWw+@z6p7@Q|7j+x$@i#hUArXkz1}lRzFH3 z*H1i}lr<}8F9-_o=;EBbaNE-UbIxBjIpM!P%m+YP0H9CW%N2DXb^Av-@_$&^AL5@c zWHVU|geK&(1Z*A^Md@@gDyE5rECHRx=JDtZ8i&E6iTMJU75FSFhbI)UiGO@5Di(YO zr+*XwR4SA8G5(4CPh(K&-{b!?X#TUqVHBCbTja}|FA9`Nq<)mqG#iRQ>L-?XQAUSR zd__VDZ*Hi+h%%bO^Y{0W2zWtomfdncp{-OPv6cApyhOGi=G!i(S}&DB5jGT|#7iU# zf@?nPD@HTai=O+!)I1+*wMv=;b0!4%ueSIZ_)93;i*ldK);xL%74hx`v zn0x`1C1mqOe1QOEh&e1iL%?FQQ7VggvpAeZFsylj0&hu>NDvepsHq6PLKZ_4?QpS= z#4mUS?0hU94t~pFj;73>Fn8)$yI@(MosU$&^RbihC4Qqn>HpUKctJDtp$nT7n6f7M zLId3>nq_dvj?a^M6XP>@e36ePi%F!m{9uWXkmqgtG3`)oX*65<$At7x=D3eqw8_ne zLcAjoYpe3q{#XRLJ5`g$NB5(U}Y} z6{R9fo`A+vzQhXD z7NJuaOa_y#VTE?8Cm$}R*b&~5d7F(An3<38llohkO|Z8&6H9zVqfsHkpu;-Cp@~q0 zPerJFzEC6(u=pG*3XKu+MI1yx6$t4<_?!v12q?TQjsSt0pv~}wkeS5KOhW*8+kqlG zo}Va$kV`Yuu+G<4Q;w1VgB?<11RM*BdP!%a!};_p@HlMM2^ z{QtFkpiks}+Wd0s|D(HCpH9zHeZ!W8 zBzgKidGKcW%jJ)E9`Sv0)^}h!{L8eP<>wJNE3na^x2kOBa>dxveg1QXCSNz9XHKLh z-xN1KS{As|y{hWbQU1`1r?sfaYEVJIZnT@_*wLHm6%{rYUbUx_x37;*7VJ%|>yjU{ zDm1il*R~?}>iXgJi(-B8^j4Ksb^Ug~$FUMT!h9SS)22+>)hyU0wU|pk^8|B?KCBMGhVYVZ(b8zp99Kju~X5(HK?58z~bFtgVHV%WQ_;?6rfLAX{UDi3j9zZu)PKHo45{m$ejKW zLy0*XG;>-!yMW*b-0qFM4_>FI{$OPQPL8PugOuDNkgtxvf-T+ypq4>uV<>|#$)FaL ztBt1s{dZUZsCw(ExeSenZ2HhQhS%GYDIiQ|gud2@AzC9^T>3}cBx?OlM_=3l*mw4Q%wT*&#E;@b%2ryFi2_$(B4{X6||)Wi&53e$<+Vf)?nZ7~Ha>R@G3QDrkHXuxi8%wM3bu1&Vx- zruXv)&itgsp}Uq319g1(n>c9Ll80$vn*EO3Q>!`_8mCd+QLEZqr@Bn#`4r=**wd@z zdwx#`DV$5tq15p2;qY85N$(GI;OkUr=>SZ%J}E#6pZ@?h>2TS7&CrLilha60O&xz3 z6Tr1fyY=8IgryX4nlaedLa-z zMp{r>bRG+X4%tPJc(V$6TLS{IoCwftOQ>2G6gqaa$!?IfF%t$Irb!bzIzVk7nc7FK z@X)EZ)2Z*IQ%@jAxtz=pgilU^-63Ln5u~sZRBM4g9iQ~L;RVq zFic0z#>wc3R@^Baxb9JL{l_$@WBK6N_%`hMigG16A9syt1Q#3fTDURlG`%ViQ4w8_ z^-+2{j5J4DKbeCyw1Df4IA%Q?x5Jy07nb0%PS-%bCy1|luNizJx53>yG-1U(H9#bQ z?9G~daZyzooTst08k{iKgf#*}25XFsXp`4PDY@%`YxxISo$IA#jVlCL6WY_Evxzk0 zsdJw-OsgL$+**gt$-zU0K{Pz^UFB7BYb`)-#Qgsto;3~}(Vjd~r%&gvW4fR+fjWr1 zo~Co)ydxVgRuehT96-`P#jI54x@h?^4~qK|lfUG=NwDDP0@zNctDlZ68?@#C2Q9UK*yME7 z@w%g47h1wg{$+dY{ECIcYLA{=G^*7@bUCy31}CuhK)VbN>QH{(z(vTz>5h31SyNM3 zlQ+7YalZRFZ_SQ-j`!vcxIKUWO@o>TvL)62jfpQd*4HO{`HxiMO1=f<;PiJ3Wzr}Q!t!eYz5$5x@>@bYzayIwOnTK(m zw$FJyxB`!jT2j)~l+>z!#U_4l!ON%|y#80^M91`0*@WfmO^}gG9(6M`={t2?4yT90Y=jl#sa{JqgZ0W*1JEUVn_ALy5 zn-P3Sk>h_s6+ak+D6rK5Z(A)rA2mFPos#Oq*_y#x9A2FXnr~DT6yA;oUQjZxZb| zdU#2Xf(JbYjtQO>c3^vXU;Uv^hg$XMG%^737OT{@cWHq zqdRsWqQgzmi-p1VNgy|&-TBdzBA0XlZ||#?+K9%>fsxY0yPQx()WzL*Dqm&O4haJL z51Z0wrAL3xfB|t$Sf$EyC(u8z+v=bdbrwZJ(tEFWUO2<5D)+sIv(iOTY zIQIVW){6_~gVRr%!><%NOAkA!j=jX5x^>M`V`uTS;ZbA2zpkg`dCC-Lr>v$o^a6Jn zk{!>AUrhcvuD5tou5W=gJN3QisW{`4O25)9{?l!#1!|vSvfpA_MR|xgQMERBPV$P1 zGOu&K58pq1I?Y`1Mh2l=615wc*MMuPG@t1SckV1WW_mDh;fBPxh%m}_|9kZEh=5y* zB#!BqHcN!*T|4WY+jhNBjqO*0oN~`+;0g)vhsLKIy5BPtuRjFpZQ`?DVKPNm^C<%J zKsheP@ z)yr*dPAN``q>Pu>1$dU~w$*+X*1qR_Q%Iy*a8&oomTE^2*op3tlChr4{$$&&DR-Yr8)HQ%rIj zaLI_y%w$^xA__zpjHQUf5r6xnU4gIgMQLmT2;fZOLxcs&n^JpN)0Iw1856m?f)~MuG zXC2-E^A-T{IIQBHK5}-+@vElmFK#kfheVEa-{XcsUxYg39WEqpcfA+%%A!(0+Ar z2zPAY3ERvaFxbS2HUnB(eYx#^$|=^xxsO&=Sq|h%lR>l5E=%{O!g|-4I~%S9Wsf^h zlXC%t$vQsb>FKAZ) zuXgg36QFK9*frPtRtI-dqy&Mr?;6l66QtDwSv0oQ%smxc84n^er!L(`GLOZo z!Q}TiJuok_DA0Y@@NfKx@e2K+ZxKlFts8Zb?^W!^Fj8A`CTRJQaq@J zMTXrHNEKU`7G-wrV!f=V>2br;)6q%WTnkADXSj5J{Jy#V@HKE8v~o%I7r>$UO007k zFi&n$R^M#Q0QUQz-`A0Y1dpxaZZOC*vD%W+jbu>ql~WxYY6+OrZ* zI|cN&E`2*4y*Ml2K<&Oe!!8%{bo)cyi~`gSo^ zZ32e2a&IiGY~ltZ;AbZl9gjS|C#2v!s3^IL_4l%gD0AP|H&k7J0z7M%qTtq~C~iL? zxh=URp5EA6ydK}6QYEh(3kqtt@t2Lcob9x_tH+$8KCUHQL`DxTIY$rfJ9%Xpqk9)O z{>;MmyhRZ`_ME`&;*!DLlQp2y1o)|ox0k8OEhQknSon_Jt}0bDVT)phH{#s577(2S zoaU;r5soPxtcMN1BpggW%$qoR*4imoJJxp?dTjz{%Vq0fMuwZuT)gJ^!w~<<8o*Gs zaW#omFvef)^`Q3wqL#QlS?smJ&eHte`P*sX%G_$>Az5{&calMi<*|Zfe-P>RXz;SG zcBLzuxPzJLWBl&Dvg|up-jIrotK)I8#-qk(w=T~D`(U$d3e zgVF*vy#Y-z*gU+{5g1ISaKWK*3h-iUS6No)j?LXV6<#BqGJnaUEI$+O> zIc|O<)Tn4#mB?@nwLaQ-L2sXk5f!Ot*1h&Kc$FGI-NgdYk9rE&SwLZp&ub&Eto|u` zweiO2LIpT-yN!GNN9PXL#p4g39N);+ z8iid@?Tch@S`E(D9P8d`OFCVdGokWuh`YZ$EMT{{!i1l84Xo?-9Qe!}KeI> zqR(t4l~a^E!sZ<&&U zV`JcxUhq9&*GQMkL%miPC*rX=0LyI#t~BQS2&Bu&Z8sK7-(G1N@ixY&kN=V%Z4j&6 zjuo|U7ArTqjIs&o)eWYlW4BwdDid&2RqEC|>!n`4`zryEmXup8$H~nqMKhS&j2X71 zf$>(CAh{3E1hGRvzfk=x3kLouR07EQ*S|b;GAJ@QfN|}Sj&pB<4)2p^U!2#@t(-LL zcAY%S6vVoN;$wLHaHdBu`yi52M!&+a0-v{cjGkYbZhdJ@gxMsLNsi(8r~`ec9qq%5 zus;C?E(SF)!fUY}Cu8w6m|YgV1gpxqhU(bPC%~TFupTVi4{F+6c1fe%RRtq)gF_&@ zM)lLip)bJF8mwyGZb}-6GY_=iTo_j-g>Pqxvqwh3M(Lysy>Ilc_FIUR_z6WTY>9Il z-$MEanO}bYkHB*mpPc{uaQ+ivQoq0d|8^evDEY6>|M>F!Bx3ljjQCplch9lt)_ma< z5MA_jr^|>Bo|;oYEE?_mSGF-(j=*;Wz9aCz4uSsyDUIH* literal 0 HcmV?d00001 diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index e19c2ceef..da7738923 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -2,6 +2,7 @@ use anyhow::{Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use indoc::indoc; +use ostree_ext::container::store::PrepareResult; use ostree_ext::container::{ Config, ImageReference, OstreeImageReference, SignatureSource, Transport, }; @@ -24,6 +25,7 @@ const TEST_REGISTRY_DEFAULT: &str = "localhost:5000"; /// Image that contains a base exported layer, then a `podman build` of an added file on top. const EXAMPLEOS_DERIVED_OCI: &[u8] = include_bytes!("fixtures/exampleos-derive.ociarchive"); +const EXAMPLEOS_DERIVED_V2_OCI: &[u8] = include_bytes!("fixtures/exampleos-derive-v2.ociarchive"); fn assert_err_contains(r: Result, s: impl AsRef) { let s = s.as_ref(); @@ -386,7 +388,7 @@ async fn test_container_import_export() -> Result<()> { Ok(()) } -/// We should currently reject an image with multiple layers. +/// We should reject an image with multiple layers when doing an "import" - i.e. a direct un-encapsulation. #[tokio::test] async fn test_container_import_derive() -> Result<()> { let fixture = Fixture::new()?; @@ -404,6 +406,125 @@ async fn test_container_import_derive() -> Result<()> { Ok(()) } +/// But layers work via the container::write module. +#[tokio::test] +async fn test_container_write_derive() -> Result<()> { + let fixture = Fixture::new()?; + let exampleos_path = &fixture.path.join("exampleos-derive.ociarchive"); + std::fs::write(exampleos_path, EXAMPLEOS_DERIVED_OCI)?; + let exampleos_ref = OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref: ImageReference { + transport: Transport::OciArchive, + name: exampleos_path.to_string(), + }, + }; + + // There shouldn't be any container images stored yet. + let images = ostree_ext::container::store::list_images(&fixture.destrepo)?; + assert!(images.is_empty()); + + // Pull a derived image - two layers, new base plus one layer. + let mut imp = + ostree_ext::container::store::LayeredImageImporter::new(&fixture.destrepo, &exampleos_ref) + .await?; + let prep = match imp.prepare().await? { + PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + PrepareResult::Ready(r) => r, + }; + assert!(prep.base_layer.commit.is_none()); + for layer in prep.layers.iter() { + assert!(layer.commit.is_none()); + } + let import = imp.import(prep).await?; + // We should have exactly one image stored. + let images = ostree_ext::container::store::list_images(&fixture.destrepo)?; + assert_eq!(images.len(), 1); + assert_eq!(images[0], exampleos_ref.imgref.to_string()); + + // Parse the commit and verify we pulled the derived content. + bash!( + "ostree --repo={repo} ls {r} /usr/share/anewfile", + repo = fixture.destrepo_path.as_str(), + r = import.ostree_ref.as_str() + )?; + + // Import again, but there should be no changes. + let mut imp = + ostree_ext::container::store::LayeredImageImporter::new(&fixture.destrepo, &exampleos_ref) + .await?; + let already_present = match imp.prepare().await? { + PrepareResult::AlreadyPresent(c) => c, + PrepareResult::Ready(_) => { + panic!("Should have already imported {}", import.ostree_ref) + } + }; + assert_eq!(import.commit, already_present); + + // Test upgrades; replace the oci-archive with new content. + std::fs::write(exampleos_path, EXAMPLEOS_DERIVED_V2_OCI)?; + let mut imp = + ostree_ext::container::store::LayeredImageImporter::new(&fixture.destrepo, &exampleos_ref) + .await?; + let prep = match imp.prepare().await? { + PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + PrepareResult::Ready(r) => r, + }; + // We *should* already have the base layer. + assert!(prep.base_layer.commit.is_some()); + // One new layer + assert_eq!(prep.layers.len(), 1); + for layer in prep.layers.iter() { + assert!(layer.commit.is_none()); + } + let import = imp.import(prep).await?; + // New commit. + assert_ne!(import.commit, already_present); + // We should still have exactly one image stored. + let images = ostree_ext::container::store::list_images(&fixture.destrepo)?; + assert_eq!(images.len(), 1); + assert_eq!(images[0], exampleos_ref.imgref.to_string()); + + // Verify we have the new file and *not* the old one + bash!( + "ostree --repo={repo} ls {r} /usr/share/anewfile2 >/dev/null + if ostree --repo={repo} ls {r} /usr/share/anewfile 2>/dev/null; then + echo oops; exit 1 + fi + ", + repo = fixture.destrepo_path.as_str(), + r = import.ostree_ref.as_str() + )?; + + // And there should be no changes on upgrade again. + let mut imp = + ostree_ext::container::store::LayeredImageImporter::new(&fixture.destrepo, &exampleos_ref) + .await?; + let already_present = match imp.prepare().await? { + PrepareResult::AlreadyPresent(c) => c, + PrepareResult::Ready(_) => { + panic!("Should have already imported {}", import.ostree_ref) + } + }; + assert_eq!(import.commit, already_present); + + // Create a new repo, and copy to it + let destrepo2 = ostree::Repo::create_at( + ostree::AT_FDCWD, + fixture.path.join("destrepo2").as_str(), + ostree::RepoMode::Archive, + None, + gio::NONE_CANCELLABLE, + )?; + ostree_ext::container::store::copy(&fixture.destrepo, &destrepo2, &exampleos_ref).await?; + + let images = ostree_ext::container::store::list_images(&destrepo2)?; + assert_eq!(images.len(), 1); + assert_eq!(images[0], exampleos_ref.imgref.to_string()); + + Ok(()) +} + #[ignore] #[tokio::test] // Verify that we can push and pull to a registry, not just oci-archive:. From 38edc828516acc832fd45e9494282b13f4006262 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 6 Oct 2021 12:56:22 -0400 Subject: [PATCH 142/775] lib: Bump to ostree 0.13.3 And require v2021.5 because we do in practice for the fixed tar+selinux bits. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 6f58f8f9d..ba294fc35 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -29,7 +29,7 @@ nix = "0.22.0" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" -ostree = { features = ["v2021_4"], version = "0.13.2" } +ostree = { features = ["v2021_5"], version = "0.13.3" } phf = { features = ["macros"], version = "0.9.0" } pin-project = "1.0" serde = { features = ["derive"], version = "1.0.125" } From cf0cad42b8f49ae14f83676c6200060c6712b08b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 11 Oct 2021 17:02:28 -0400 Subject: [PATCH 143/775] Switch to using new skopeo proxy code This uses a custom JSON-over-SOCK_SEQPACKET with fd passing for data. For more information, see https://github.com/containers/skopeo/pull/1476 --- ci/installdeps.sh | 19 ++-- lib/Cargo.toml | 2 +- lib/src/container/imageproxy.rs | 157 -------------------------------- lib/src/container/import.rs | 60 ++++++++++-- lib/src/container/mod.rs | 1 - lib/src/container/oci.rs | 22 +---- lib/src/container/store.rs | 40 +++++--- 7 files changed, 92 insertions(+), 209 deletions(-) delete mode 100644 lib/src/container/imageproxy.rs diff --git a/ci/installdeps.sh b/ci/installdeps.sh index 606032edb..15e668dfe 100755 --- a/ci/installdeps.sh +++ b/ci/installdeps.sh @@ -1,10 +1,17 @@ #!/bin/bash set -xeuo pipefail -yum -y install skopeo -yum -y --enablerepo=updates-testing update ostree-devel +# Always pull ostree from updates-testing to avoid the bodhi wait +dnf -y --enablerepo=updates-testing update ostree-devel + +# Pull the code from https://github.com/containers/skopeo/pull/1476 +# if necessary. +if ! skopeo experimental-image-proxy --help &>/dev/null; then + dnf -y install dnf-utils + dnf builddep -y skopeo + git clone --depth=1 https://github.com/containers/skopeo + cd skopeo + make + install -m 0755 bin/skopeo /usr/bin/ +fi -git clone --depth=1 https://github.com/cgwalters/container-image-proxy -cd container-image-proxy -make -install -m 0755 bin/container-image-proxy /usr/bin/ diff --git a/lib/Cargo.toml b/lib/Cargo.toml index ba294fc35..275781845 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -10,6 +10,7 @@ version = "0.4.0-alpha.0" [dependencies] anyhow = "1.0" +containers-image-proxy = { version = "0.1", git = "https://github.com/cgwalters/containers-image-proxy-rs" } async-compression = { version = "0.3", features = ["gzip", "tokio"] } bytes = "1.0.1" bitflags = "1" @@ -20,7 +21,6 @@ fn-error-context = "0.2.0" futures-util = "0.3.13" gvariant = "0.4.0" hex = "0.4.3" -hyper = { version = "0.14", features = ["full"] } indicatif = "0.16.0" lazy_static = "1.4.0" libc = "0.2.92" diff --git a/lib/src/container/imageproxy.rs b/lib/src/container/imageproxy.rs deleted file mode 100644 index 005a43f66..000000000 --- a/lib/src/container/imageproxy.rs +++ /dev/null @@ -1,157 +0,0 @@ -//! Run container-image-proxy as a subprocess. -//! This allows fetching a container image manifest and layers in a streaming fashion. -//! More information: - -use super::{oci, ImageReference, Result}; -use crate::cmdext::CommandRedirectionExt; -use anyhow::Context; -use futures_util::{Future, FutureExt, TryFutureExt, TryStreamExt}; -use hyper::body::HttpBody; -use hyper::client::conn::{Builder, SendRequest}; -use hyper::{Body, Request, StatusCode}; -use std::os::unix::prelude::AsRawFd; -use std::pin::Pin; -use std::process::Stdio; -use tokio::io::{AsyncBufRead, AsyncReadExt}; - -// What we get from boxing a fallible tokio::spawn() closure. Note the nested Result. -type JoinFuture = Pin>>>>; - -/// Manage a child process proxy to fetch container images. -pub(crate) struct ImageProxy { - proc: tokio::process::Child, - request_sender: SendRequest, - stderr: JoinFuture, - driver: JoinFuture<()>, -} - -impl ImageProxy { - /// Create an image proxy that fetches the target image. - pub(crate) async fn new(imgref: &ImageReference) -> Result { - // Communicate over an anonymous socketpair(2) - let (mysock, childsock) = tokio::net::UnixStream::pair()?; - let childsock = childsock.into_std()?; - let mut c = std::process::Command::new("container-image-proxy"); - c.arg(&imgref.to_string()); - c.stdout(Stdio::null()).stderr(Stdio::piped()); - if let Some(port) = std::env::var_os("OSTREE_IMAGE_PROXY_PORT") { - c.arg("--port"); - c.arg(port); - } else { - // Pass one half of the pair as fd 3 to the child - let target_fd = 3; - c.arg("--sockfd"); - c.arg(&format!("{}", target_fd)); - c.take_fd_n(childsock.as_raw_fd(), target_fd); - } - let mut c = tokio::process::Command::from(c); - c.kill_on_drop(true); - let mut proc = c.spawn().context("Failed to spawn container-image-proxy")?; - // We've passed over the fd, close it. - drop(childsock); - - // Safety: We passed `Stdio::piped()` above - let mut child_stderr = proc.stderr.take().unwrap(); - - // Connect via HTTP to the child - let (request_sender, connection) = Builder::new().handshake::<_, Body>(mysock).await?; - // Background driver that manages things like timeouts. - let driver = tokio::spawn(connection.map_err(anyhow::Error::msg)) - .map_err(anyhow::Error::msg) - .boxed(); - let stderr = tokio::spawn(async move { - let mut buf = String::new(); - child_stderr.read_to_string(&mut buf).await?; - Ok(buf) - }) - .map_err(anyhow::Error::msg) - .boxed(); - Ok(Self { - proc, - stderr, - request_sender, - driver, - }) - } - - /// Fetch the manifest. - /// https://github.com/opencontainers/image-spec/blob/main/manifest.md - pub(crate) async fn fetch_manifest(&mut self) -> Result<(String, Vec)> { - let req = Request::builder() - .header("Host", "localhost") - .method("GET") - .uri("/manifest") - .body(Body::from(""))?; - let mut resp = self.request_sender.send_request(req).await?; - if resp.status() != StatusCode::OK { - return Err(anyhow::anyhow!("error from proxy: {}", resp.status())); - } - let hname = "Manifest-Digest"; - let digest = resp - .headers() - .get(hname) - .ok_or_else(|| anyhow::anyhow!("Missing {} header", hname))? - .to_str() - .with_context(|| format!("Invalid {} header", hname))? - .to_string(); - let mut ret = Vec::new(); - while let Some(chunk) = resp.body_mut().data().await { - let chunk = chunk?; - ret.extend_from_slice(&chunk); - } - Ok((digest, ret)) - } - - /// Fetch a blob identified by e.g. `sha256:`. - /// https://github.com/opencontainers/image-spec/blob/main/descriptor.md - /// Note that right now the proxy does verification of the digest: - /// https://github.com/cgwalters/container-image-proxy/issues/1#issuecomment-926712009 - pub(crate) async fn fetch_blob( - &mut self, - digest: &str, - ) -> Result { - let uri = format!("/blobs/{}", digest); - let req = Request::builder() - .header("Host", "localhost") - .method("GET") - .uri(&uri) - .body(Body::from(""))?; - let resp = self.request_sender.send_request(req).await?; - let status = resp.status(); - let body = TryStreamExt::map_err(resp.into_body(), |e| { - std::io::Error::new(std::io::ErrorKind::Other, e) - }); - let mut body = tokio_util::io::StreamReader::new(body); - if status != StatusCode::OK { - let mut s = String::new(); - let _: usize = body.read_to_string(&mut s).await?; - return Err(anyhow::anyhow!("error from proxy: {}: {}", status, s)); - } - Ok(body) - } - - /// A wrapper for [`fetch_blob`] which fetches a layer and decompresses it. - pub(crate) async fn fetch_layer_decompress( - &mut self, - layer: &oci::ManifestLayer, - ) -> Result> { - let blob = self.fetch_blob(layer.digest.as_str()).await?; - Ok(layer.new_async_decompressor(blob)?) - } - - /// Close the HTTP connection and wait for the child process to exit successfully. - pub(crate) async fn finalize(mut self) -> Result<()> { - // For now discard any errors from the connection - drop(self.request_sender); - let _r = self.driver.await??; - let status = self.proc.wait().await?; - if !status.success() { - if let Some(stderr) = self.stderr.await.map(|v| v.ok()).ok().flatten() { - anyhow::bail!("proxy failed: {}\n{}", status, stderr) - } else { - anyhow::bail!("proxy failed: {} (failed to fetch stderr)", status) - } - } - Ok(()) - } -} diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 59211b266..2d16b6e1c 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -30,8 +30,11 @@ use super::*; use anyhow::{anyhow, Context}; +use containers_image_proxy::{ImageProxy, OpenedImage}; +use containers_image_proxy::{OCI_TYPE_LAYER_GZIP, OCI_TYPE_LAYER_TAR}; use fn_error_context::context; -use tokio::io::AsyncRead; +use futures_util::Future; +use tokio::io::{AsyncBufRead, AsyncRead}; use tracing::{event, instrument, Level}; /// The result of an import operation @@ -84,8 +87,10 @@ impl AsyncRead for ProgressReader { /// Download the manifest for a target image and its sha256 digest. #[context("Fetching manifest")] pub async fn fetch_manifest(imgref: &OstreeImageReference) -> Result<(Vec, String)> { - let mut proxy = imageproxy::ImageProxy::new(&imgref.imgref).await?; - let (digest, raw_manifest) = proxy.fetch_manifest().await?; + let proxy = ImageProxy::new().await?; + let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; + let (digest, raw_manifest) = proxy.fetch_manifest(oi).await?; + proxy.close_image(oi).await?; Ok((raw_manifest, digest)) } @@ -135,6 +140,36 @@ pub async fn import( }) } +/// Create a decompressor for this MIME type, given a stream of input. +fn new_async_decompressor<'a>( + media_type: &str, + src: impl AsyncBufRead + Send + Unpin + 'a, +) -> Result> { + match media_type { + OCI_TYPE_LAYER_GZIP => Ok(Box::new(tokio::io::BufReader::new( + async_compression::tokio::bufread::GzipDecoder::new(src), + ))), + OCI_TYPE_LAYER_TAR => Ok(Box::new(src)), + o => Err(anyhow::anyhow!("Unhandled layer type: {}", o)), + } +} + +/// A wrapper for [`get_blob`] which fetches a layer and decompresses it. +pub(crate) async fn fetch_layer_decompress<'a>( + proxy: &'a ImageProxy, + img: &OpenedImage, + layer: &oci::ManifestLayer, +) -> Result<( + Box, + impl Future> + 'a, +)> { + let (blob, driver) = proxy + .get_blob(img, layer.digest.as_str(), layer.size) + .await?; + let blob = new_async_decompressor(&layer.media_type, blob)?; + Ok((blob, driver)) +} + /// Fetch a container image using an in-memory manifest and import its embedded OSTree commit. #[context("Importing {}", imgref)] #[instrument(skip(repo, options, manifest_bytes))] @@ -152,9 +187,15 @@ pub async fn import_from_manifest( let options = options.unwrap_or_default(); let manifest: oci::Manifest = serde_json::from_slice(manifest_bytes)?; let layer = require_one_layer_blob(&manifest)?; - event!(Level::DEBUG, "target blob: {}", layer.digest.as_str()); - let mut proxy = imageproxy::ImageProxy::new(&imgref.imgref).await?; - let blob = proxy.fetch_layer_decompress(layer).await?; + event!( + Level::DEBUG, + "target blob digest:{} size: {}", + layer.digest.as_str(), + layer.size + ); + let proxy = ImageProxy::new().await?; + let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; + let (blob, driver) = fetch_layer_decompress(&proxy, oi, layer).await?; let blob = ProgressReader { reader: blob, progress: options.progress, @@ -164,9 +205,10 @@ pub async fn import_from_manifest( SignatureSource::OstreeRemote(remote) => taropts.remote = Some(remote.clone()), SignatureSource::ContainerPolicy | SignatureSource::ContainerPolicyAllowInsecure => {} } - let ostree_commit = crate::tar::import_tar(repo, blob, Some(taropts)) - .await - .with_context(|| format!("Parsing blob {}", layer.digest))?; + let import = crate::tar::import_tar(repo, blob, Some(taropts)); + let (import, driver) = tokio::join!(import, driver); + driver?; + let ostree_commit = import.with_context(|| format!("Parsing blob {}", layer.digest))?; // FIXME write ostree commit after proxy finalization proxy.finalize().await?; event!(Level::DEBUG, "created commit {}", ostree_commit); diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 2612a47f3..7d0405cea 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -228,7 +228,6 @@ mod export; pub use export::*; mod import; pub use import::*; -mod imageproxy; mod oci; mod skopeo; pub mod store; diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index 4c9724c84..d66f2db48 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -2,6 +2,7 @@ //! oriented towards generating images. use anyhow::{anyhow, Result}; +use containers_image_proxy::OCI_TYPE_LAYER_GZIP; use flate2::write::GzEncoder; use fn_error_context::context; use openat_ext::*; @@ -10,7 +11,6 @@ use phf::phf_map; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::io::prelude::*; -use tokio::io::AsyncBufRead; /// Map the value from `uname -m` to the Go architecture. /// TODO find a more canonical home for this. @@ -22,10 +22,6 @@ static MACHINE_TO_OCI: phf::Map<&str, &str> = phf_map! { // OCI types, see https://github.com/opencontainers/image-spec/blob/master/media-types.md pub(crate) const OCI_TYPE_CONFIG_JSON: &str = "application/vnd.oci.image.config.v1+json"; pub(crate) const OCI_TYPE_MANIFEST_JSON: &str = "application/vnd.oci.image.manifest.v1+json"; -pub(crate) const OCI_TYPE_LAYER_GZIP: &str = "application/vnd.oci.image.layer.v1.tar+gzip"; -pub(crate) const OCI_TYPE_LAYER_TAR: &str = "application/vnd.oci.image.layer.v1.tar"; -// FIXME - use containers/image to fully convert the manifest to OCI -const DOCKER_TYPE_LAYER_TARGZ: &str = "application/vnd.docker.image.rootfs.diff.tar.gzip"; /// Path inside an OCI directory to the blobs const BLOBDIR: &str = "blobs/sha256"; @@ -68,22 +64,6 @@ pub(crate) struct ManifestLayer { pub size: u64, } -impl ManifestLayer { - /// Create a decompressor for this layer, given a stream of input. - pub fn new_async_decompressor( - &self, - src: impl AsyncBufRead + Send + Unpin + 'static, - ) -> Result> { - match self.media_type.as_str() { - OCI_TYPE_LAYER_GZIP | DOCKER_TYPE_LAYER_TARGZ => Ok(Box::new( - tokio::io::BufReader::new(async_compression::tokio::bufread::GzipDecoder::new(src)), - )), - OCI_TYPE_LAYER_TAR => Ok(Box::new(src)), - o => Err(anyhow::anyhow!("Unhandled layer type: {}", o)), - } - } -} - #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub(crate) struct Manifest { diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 1c9b282f0..b93e08288 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -5,11 +5,11 @@ //! This code supports ingesting arbitrary layered container images from an ostree-exported //! base. See [`super::import`] for more information on encaspulation of images. -use super::imageproxy::ImageProxy; use super::oci::ManifestLayer; use super::*; use crate::refescape; use anyhow::{anyhow, Context}; +use containers_image_proxy::{ImageProxy, OpenedImage}; use fn_error_context::context; use ostree::prelude::{Cast, ToVariant}; use ostree::{gio, glib}; @@ -45,6 +45,7 @@ pub struct LayeredImageImporter { repo: ostree::Repo, proxy: ImageProxy, imgref: OstreeImageReference, + proxy_img: OpenedImage, ostree_ref: String, } @@ -135,12 +136,14 @@ fn manifest_from_commit(commit: &glib::Variant) -> Result { impl LayeredImageImporter { /// Create a new importer. pub async fn new(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result { - let proxy = ImageProxy::new(&imgref.imgref).await?; + let proxy = ImageProxy::new().await?; + let proxy_img = proxy.open_image(&imgref.imgref.to_string()).await?; let repo = repo.clone(); let ostree_ref = ref_for_image(&imgref.imgref)?; Ok(LayeredImageImporter { repo, proxy, + proxy_img, ostree_ref, imgref: imgref.clone(), }) @@ -161,7 +164,7 @@ impl LayeredImageImporter { _ => {} } - let (manifest_digest, manifest_bytes) = self.proxy.fetch_manifest().await?; + let (manifest_digest, manifest_bytes) = self.proxy.fetch_manifest(&self.proxy_img).await?; let manifest: oci::Manifest = serde_json::from_slice(&manifest_bytes)?; let new_imageid = manifest.imageid(); @@ -214,17 +217,23 @@ impl LayeredImageImporter { } /// Import a layered container image - pub async fn import(mut self, import: PreparedImport) -> Result { + pub async fn import(self, import: PreparedImport) -> Result { + let proxy = self.proxy; // First download the base image (if necessary) - we need the SELinux policy // there to label all following layers. let base_layer = import.base_layer; let base_commit = if let Some(c) = base_layer.commit { c } else { - let blob = self.proxy.fetch_layer_decompress(&base_layer.layer).await?; - let commit = crate::tar::import_tar(&self.repo, blob, None) - .await - .with_context(|| format!("Parsing blob {}", &base_layer.digest()))?; + let base_layer_ref = &base_layer.layer; + let (blob, driver) = + super::import::fetch_layer_decompress(&proxy, &self.proxy_img, &base_layer.layer) + .await?; + let importer = crate::tar::import_tar(&self.repo, blob, None); + let (commit, driver) = tokio::join!(importer, driver); + driver?; + let commit = + commit.with_context(|| format!("Parsing blob {}", &base_layer_ref.digest))?; // TODO support ref writing in tar import self.repo.set_ref_immediate( None, @@ -241,17 +250,20 @@ impl LayeredImageImporter { if let Some(c) = layer.commit { layer_commits.push(c.to_string()); } else { - let blob = self.proxy.fetch_layer_decompress(&layer.layer).await?; + let (blob, driver) = + super::import::fetch_layer_decompress(&proxy, &self.proxy_img, &layer.layer) + .await?; // An important aspect of this is that we SELinux label the derived layers using // the base policy. let opts = crate::tar::WriteTarOptions { base: Some(base_commit.clone()), selinux: true, }; - let r = - crate::tar::write_tar(&self.repo, blob, layer.ostree_ref.as_str(), Some(opts)) - .await - .with_context(|| format!("Parsing layer blob {}", layer.digest()))?; + let w = + crate::tar::write_tar(&self.repo, blob, layer.ostree_ref.as_str(), Some(opts)); + let (r, driver) = tokio::join!(w, driver); + let r = r.with_context(|| format!("Parsing layer blob {}", layer.digest()))?; + driver?; layer_commits.push(r.commit); if !r.filtered.is_empty() { layer_filtered_content.insert(layer.digest().to_string(), r.filtered); @@ -260,7 +272,7 @@ impl LayeredImageImporter { } // We're done with the proxy, make sure it didn't have any errors. - self.proxy.finalize().await?; + proxy.finalize().await?; let serialized_manifest = serde_json::to_string(&import.manifest)?; let mut metadata = HashMap::new(); From c1ca5c057a85523bce689af54cef615bc15a2229 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 15 Oct 2021 14:44:46 -0400 Subject: [PATCH 144/775] Switch to published containers-image-proxy crate https://github.com/containers/containers-image-proxy-rs and https://crates.io/crates/containers-image-proxy exist now. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 275781845..44d92480c 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -10,7 +10,7 @@ version = "0.4.0-alpha.0" [dependencies] anyhow = "1.0" -containers-image-proxy = { version = "0.1", git = "https://github.com/cgwalters/containers-image-proxy-rs" } +containers-image-proxy = "0.1" async-compression = { version = "0.3", features = ["gzip", "tokio"] } bytes = "1.0.1" bitflags = "1" From 6334eff4210f6a64d10c6652b759207de578ed54 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 19 Oct 2021 15:59:33 -0400 Subject: [PATCH 145/775] Add `ostree-unverified-registry:` shorthand In testing right now, I find myself using `ostree-unverified-image:registry:...` a lot. It will really be the common case for experimentation, so let's add the shorthand. --- lib/src/container/mod.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 7d0405cea..517b80568 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -157,6 +157,11 @@ impl TryFrom<&str> for OstreeImageReference { SignatureSource::ContainerPolicyAllowInsecure, Cow::Borrowed(second), ), + // Shorthand for ostree-unverified-image:registry: + "ostree-unverified-registry" => ( + SignatureSource::ContainerPolicyAllowInsecure, + Cow::Owned(format!("registry:{}", second)), + ), // This is a shorthand for ostree-remote-image with registry: "ostree-remote-registry" => { let mut subparts = second.splitn(2, ':'); @@ -306,5 +311,9 @@ mod tests { ir.to_string(), "ostree-unverified-image:docker://quay.io/exampleos/blah" ); + let ir_shorthand = + OstreeImageReference::try_from("ostree-unverified-registry:quay.io/exampleos/blah") + .unwrap(); + assert_eq!(&ir_shorthand, &ir); } } From 4ef4433682ffd0898acba8d1b7d05779e536d9fc Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 22 Oct 2021 11:31:22 -0400 Subject: [PATCH 146/775] Factor out internal `objgv` module with `gv!` incantations I plan to add some more code which uses this, so let's deduplicate the hairy GVariant type strings. I also tried to deduplicate this more by having a function or macro that abstracts the `data_as_bytes()` and `try_as_aligned` stuff, but couldn't figure out how to do it. --- lib/src/ima.rs | 5 +++-- lib/src/lib.rs | 2 +- lib/src/objgv.rs | 31 +++++++++++++++++++++++++++++++ lib/src/tar/export.rs | 10 +++++----- 4 files changed, 40 insertions(+), 8 deletions(-) create mode 100644 lib/src/objgv.rs diff --git a/lib/src/ima.rs b/lib/src/ima.rs index bfece36b3..8adf2aa08 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT +use crate::objgv::*; use anyhow::{Context, Result}; use fn_error_context::context; use gio::glib; @@ -224,7 +225,7 @@ impl<'a> CommitRewriter<'a> { .load_variant(ostree::ObjectType::DirTree, checksum)?; let src = src.data_as_bytes(); let src = src.try_as_aligned()?; - let src = gv!("(a(say)a(sayay))").cast(src); + let src = gv_dirtree!().cast(src); let (files, dirs) = src.to_tuple(); // A reusable buffer to avoid heap allocating these @@ -277,7 +278,7 @@ impl<'a> CommitRewriter<'a> { let commit_bytes = commit_v.data_as_bytes(); let commit_bytes = commit_bytes.try_as_aligned()?; - let commit = gv!("(a{sv}aya(say)sstayay)").cast(commit_bytes); + let commit = gv_commit!().cast(commit_bytes); let commit = commit.to_tuple(); let contents = &hex::encode(commit.6); diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 3f7ce54c5..5787a8575 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -31,7 +31,7 @@ pub mod tar; pub mod tokio_util; mod cmdext; - +pub(crate) mod objgv; /// Prelude, intended for glob import. pub mod prelude { #[doc(hidden)] diff --git a/lib/src/objgv.rs b/lib/src/objgv.rs new file mode 100644 index 000000000..3be5c94cd --- /dev/null +++ b/lib/src/objgv.rs @@ -0,0 +1,31 @@ +/// Type representing an ostree commit object. +macro_rules! gv_commit { + () => { + gvariant::gv!("(a{sv}aya(say)sstayay)") + }; +} +pub(crate) use gv_commit; + +/// Type representing an ostree DIRTREE object. +macro_rules! gv_dirtree { + () => { + gvariant::gv!("(a(say)a(sayay))") + }; +} +pub(crate) use gv_dirtree; + +#[cfg(test)] +mod tests { + use gvariant::aligned_bytes::TryAsAligned; + use gvariant::Marker; + + use super::*; + #[test] + fn test_dirtree() { + // Just a compilation test + let data = b"".try_as_aligned().ok(); + if let Some(data) = data { + let _t = gv_dirtree!().cast(data); + } + } +} diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 942b214bf..3427d0324 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -1,13 +1,13 @@ //! APIs for creating container images from OSTree commits -use crate::Result; - +use crate::objgv::*; +use anyhow::Result; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use gio::glib; use gio::prelude::*; use gvariant::aligned_bytes::TryAsAligned; -use gvariant::{gv, Marker, Structure}; +use gvariant::{Marker, Structure}; use ostree::gio; use std::borrow::Cow; use std::collections::HashSet; @@ -175,7 +175,7 @@ impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { self.append(ostree::ObjectType::DirTree, checksum, v)?; let v = v.data_as_bytes(); let v = v.try_as_aligned()?; - let v = gv!("(a(say)a(sayay))").cast(v); + let v = gv_dirtree!().cast(v); let (files, dirs) = v.to_tuple(); if let Some(c) = cancellable { @@ -271,7 +271,7 @@ fn impl_export( let commit_v = commit_v.data_as_bytes(); let commit_v = commit_v.try_as_aligned()?; - let commit = gv!("(a{sv}aya(say)sstayay)").cast(commit_v); + let commit = gv_commit!().cast(commit_v); let commit = commit.to_tuple(); let contents = &hex::encode(commit.6); let metadata_checksum = &hex::encode(commit.7); From 271c266140fbb5ed3885944202a06e91df472753 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 25 Oct 2021 11:06:25 -0400 Subject: [PATCH 147/775] Fix misc clippy lints Nothing important here, just trying to keep clippy happy so the real problems don't get lost in noise. --- lib/src/cli.rs | 4 ++-- lib/src/container/import.rs | 4 ++-- lib/src/container/store.rs | 12 +++++------- lib/src/refescape.rs | 2 +- lib/src/tar/write.rs | 2 +- 5 files changed, 11 insertions(+), 13 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index a12098ad1..785b6dfec 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -313,7 +313,7 @@ async fn container_info(imgref: &str) -> Result<()> { async fn container_store(repo: &str, imgref: &str) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; let imgref = imgref.try_into()?; - let mut imp = LayeredImageImporter::new(&repo, &imgref).await?; + let mut imp = LayeredImageImporter::new(repo, &imgref).await?; let prep = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => { println!("No changes in {} => {}", imgref, c); @@ -417,7 +417,7 @@ where ContainerImageOpts::List { repo } => { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, &repo, gio::NONE_CANCELLABLE)?; - for image in crate::container::store::list_images(&repo)? { + for image in crate::container::store::list_images(repo)? { println!("{}", image); } Ok(()) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index 2d16b6e1c..eb010b5d7 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -105,11 +105,11 @@ pub struct Import { fn require_one_layer_blob(manifest: &oci::Manifest) -> Result<&oci::ManifestLayer> { let n = manifest.layers.len(); - if let Some(layer) = manifest.layers.iter().next() { + if let Some(layer) = manifest.layers.get(0) { if n > 1 { Err(anyhow!("Expected 1 layer, found {}", n)) } else { - Ok(&layer) + Ok(layer) } } else { // Validated by find_layer_blobids() diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index b93e08288..ff30d3169 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -297,7 +297,7 @@ impl LayeredImageImporter { let base_contents_obj = base_commit_tree.tree_get_contents_checksum().unwrap(); let base_metadata_obj = base_commit_tree.tree_get_metadata_checksum().unwrap(); let mt = ostree::MutableTree::from_checksum( - &repo, + repo, &base_contents_obj, &base_metadata_obj, ); @@ -339,11 +339,9 @@ pub fn list_images(repo: &ostree::Repo) -> Result> { ostree::RepoListRefsExtFlags::empty(), cancellable, )?; - let r: Result> = refs - .keys() + refs.keys() .map(|imgname| refescape::unprefix_unescape_ref(IMAGE_PREFIX, imgname)) - .collect(); - Ok(r?) + .collect() } /// Copy a downloaded image from one repository to another. @@ -360,7 +358,7 @@ pub async fn copy( let layer_refs = manifest .layers .iter() - .map(|layer| ref_for_layer(&layer)) + .map(|layer| ref_for_layer(layer)) .chain(std::iter::once(Ok(ostree_ref))); for ostree_ref in layer_refs { let ostree_ref = ostree_ref?; @@ -377,7 +375,7 @@ pub async fn copy( opts.insert("refs", &&refs[..]); opts.insert("flags", &(flags.bits() as i32)); let options = opts.to_variant(); - dest_repo.pull_with_options(&srcfd, &options, None, cancellable)?; + dest_repo.pull_with_options(srcfd, &options, None, cancellable)?; Ok(()) }) .await??; diff --git a/lib/src/refescape.rs b/lib/src/refescape.rs index 7c9f2b0e1..649882064 100644 --- a/lib/src/refescape.rs +++ b/lib/src/refescape.rs @@ -138,7 +138,7 @@ pub fn unprefix_unescape_ref(prefix: &str, ostree_ref: &str) -> Result { prefix ) })?; - Ok(unescape_for_ref(rest)?) + unescape_for_ref(rest) } #[cfg(test)] diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index f156e06ce..f4bb97d2d 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -71,7 +71,7 @@ enum NormalizedPathResult<'a> { Normal(Utf8PathBuf), } -fn normalize_validate_path<'a>(path: &'a Utf8Path) -> Result> { +fn normalize_validate_path(path: &Utf8Path) -> Result> { // This converts e.g. `foo//bar/./baz` into `foo/bar/baz`. let mut components = path .components() From 47c4148bf7694bfb89387e30a3e1aa3b24997f84 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 25 Oct 2021 11:08:43 -0400 Subject: [PATCH 148/775] container/store: Box PreparedImport case clippy correctly notes there's a large size difference between the two variants. --- lib/src/container/store.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index b93e08288..1e03af404 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -54,7 +54,7 @@ pub enum PrepareResult { /// The image reference is already present; the contained string is the OSTree commit. AlreadyPresent(String), /// The image needs to be downloaded - Ready(PreparedImport), + Ready(Box), } /// A container image layer with associated downloaded-or-not state. @@ -213,11 +213,11 @@ impl LayeredImageImporter { base_layer, layers, }; - Ok(PrepareResult::Ready(imp)) + Ok(PrepareResult::Ready(Box::new(imp))) } /// Import a layered container image - pub async fn import(self, import: PreparedImport) -> Result { + pub async fn import(self, import: Box) -> Result { let proxy = self.proxy; // First download the base image (if necessary) - we need the SELinux policy // there to label all following layers. From 0b1e4427d9affdee2cb226407113c3dd57aabd96 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 25 Oct 2021 12:01:41 -0400 Subject: [PATCH 149/775] tar/export: Add a `new()` method General cleanup, and prep for further work around tar-split. --- lib/src/tar/export.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 3427d0324..aeeab1b59 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -55,6 +55,17 @@ fn xattrs_path(checksum: &str) -> Utf8PathBuf { } impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { + fn new(repo: &'a ostree::Repo, out: &'a mut tar::Builder) -> Self { + Self { + repo, + out, + wrote_dirmeta: HashSet::new(), + wrote_dirtree: HashSet::new(), + wrote_content: HashSet::new(), + wrote_xattrs: HashSet::new(), + } + } + fn append( &mut self, objtype: ostree::ObjectType, @@ -253,14 +264,7 @@ fn impl_export( out.append_data(&mut h, &path, &mut std::io::empty())?; } - let writer = &mut OstreeMetadataWriter { - repo, - out, - wrote_dirmeta: HashSet::new(), - wrote_dirtree: HashSet::new(), - wrote_content: HashSet::new(), - wrote_xattrs: HashSet::new(), - }; + let writer = &mut OstreeMetadataWriter::new(repo, out); let (commit_v, _) = repo.load_commit(commit_checksum)?; let commit_v = &commit_v; writer.append(ostree::ObjectType::Commit, commit_checksum, commit_v)?; From 91ef1f6335f502c804007e6a080cdc48f135d0ea Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 25 Oct 2021 12:02:27 -0400 Subject: [PATCH 150/775] tar/export: Rename internal struct It's not just a metadata writer. --- lib/src/tar/export.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index aeeab1b59..7e22bf735 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -28,7 +28,7 @@ fn map_path(p: &Utf8Path) -> std::borrow::Cow { } } -struct OstreeMetadataWriter<'a, W: std::io::Write> { +struct OstreeTarWriter<'a, W: std::io::Write> { repo: &'a ostree::Repo, out: &'a mut tar::Builder, wrote_dirtree: HashSet, @@ -54,7 +54,7 @@ fn xattrs_path(checksum: &str) -> Utf8PathBuf { format!("{}/repo/xattrs/{}", OSTREEDIR, checksum).into() } -impl<'a, W: std::io::Write> OstreeMetadataWriter<'a, W> { +impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { fn new(repo: &'a ostree::Repo, out: &'a mut tar::Builder) -> Self { Self { repo, @@ -264,7 +264,7 @@ fn impl_export( out.append_data(&mut h, &path, &mut std::io::empty())?; } - let writer = &mut OstreeMetadataWriter::new(repo, out); + let writer = &mut OstreeTarWriter::new(repo, out); let (commit_v, _) = repo.load_commit(commit_checksum)?; let commit_v = &commit_v; writer.append(ostree::ObjectType::Commit, commit_checksum, commit_v)?; From 042623160b051bf1fd1cc0ad4ed7ed7146065819 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 25 Oct 2021 12:31:54 -0400 Subject: [PATCH 151/775] tar/export: Move initial directory writes to helper function This way the writer owns the whole stream, which I think is cleaner. --- lib/src/tar/export.rs | 47 ++++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 21 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 7e22bf735..523d05992 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -66,6 +66,30 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { } } + /// Write the initial directory structure. + fn prelude(&mut self) -> Result<()> { + // Object subdirectories + for d in 0..0xFF { + let mut h = tar::Header::new_gnu(); + h.set_entry_type(tar::EntryType::Directory); + h.set_uid(0); + h.set_gid(0); + h.set_mode(0o755); + h.set_size(0); + let path = format!("{}/repo/objects/{:#04x}", OSTREEDIR, d); + self.out.append_data(&mut h, &path, &mut std::io::empty())?; + } + + // The special `repo/xattrs` directory used only in our tar serialization. + let mut h = tar::Header::new_gnu(); + h.set_entry_type(tar::EntryType::Directory); + h.set_mode(0o755); + h.set_size(0); + let path = format!("{}/repo/xattrs", OSTREEDIR); + self.out.append_data(&mut h, &path, &mut std::io::empty())?; + Ok(()) + } + fn append( &mut self, objtype: ostree::ObjectType, @@ -242,29 +266,10 @@ fn impl_export( out: &mut tar::Builder, ) -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; - // Pre create the object directories - for d in 0..0xFF { - let mut h = tar::Header::new_gnu(); - h.set_entry_type(tar::EntryType::Directory); - h.set_uid(0); - h.set_gid(0); - h.set_mode(0o755); - h.set_size(0); - let path = format!("{}/repo/objects/{:#04x}", OSTREEDIR, d); - out.append_data(&mut h, &path, &mut std::io::empty())?; - } - - // Write out the xattrs directory - { - let mut h = tar::Header::new_gnu(); - h.set_entry_type(tar::EntryType::Directory); - h.set_mode(0o755); - h.set_size(0); - let path = format!("{}/repo/xattrs", OSTREEDIR); - out.append_data(&mut h, &path, &mut std::io::empty())?; - } let writer = &mut OstreeTarWriter::new(repo, out); + writer.prelude()?; + let (commit_v, _) = repo.load_commit(commit_checksum)?; let commit_v = &commit_v; writer.append(ostree::ObjectType::Commit, commit_checksum, commit_v)?; From db7ce7f5d8b30beb4c11531524cdbb2611ada851 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 25 Oct 2021 12:55:23 -0400 Subject: [PATCH 152/775] tar/export: Finish hoisting all write code into writer struct This helps encapsulate all state in that struct cleanly instead of having it spread out. Prep for tar-split work. --- lib/src/tar/export.rs | 60 +++++++++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 22 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 523d05992..5959a14e8 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -31,6 +31,7 @@ fn map_path(p: &Utf8Path) -> std::borrow::Cow { struct OstreeTarWriter<'a, W: std::io::Write> { repo: &'a ostree::Repo, out: &'a mut tar::Builder, + wrote_prelude: bool, wrote_dirtree: HashSet, wrote_dirmeta: HashSet, wrote_content: HashSet, @@ -59,6 +60,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { Self { repo, out, + wrote_prelude: false, wrote_dirmeta: HashSet::new(), wrote_dirtree: HashSet::new(), wrote_content: HashSet::new(), @@ -68,6 +70,10 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { /// Write the initial directory structure. fn prelude(&mut self) -> Result<()> { + if self.wrote_prelude { + return Ok(()); + } + self.wrote_prelude = true; // Object subdirectories for d in 0..0xFF { let mut h = tar::Header::new_gnu(); @@ -90,6 +96,37 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { Ok(()) } + /// Recursively serialize a commit object to the target tar stream. + fn write_commit(&mut self, checksum: &str) -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; + + self.prelude()?; + + let (commit_v, _) = self.repo.load_commit(checksum)?; + let commit_v = &commit_v; + self.append(ostree::ObjectType::Commit, checksum, commit_v)?; + + if let Some(commitmeta) = self + .repo + .read_commit_detached_metadata(checksum, cancellable)? + { + self.append(ostree::ObjectType::CommitMeta, checksum, &commitmeta)?; + } + + let commit_v = commit_v.data_as_bytes(); + let commit_v = commit_v.try_as_aligned()?; + let commit = gv_commit!().cast(commit_v); + let commit = commit.to_tuple(); + let contents = &hex::encode(commit.6); + let metadata_checksum = &hex::encode(commit.7); + let metadata_v = self + .repo + .load_variant(ostree::ObjectType::DirMeta, metadata_checksum)?; + self.append(ostree::ObjectType::DirMeta, metadata_checksum, &metadata_v)?; + self.append_dirtree(Utf8Path::new("./"), contents, cancellable)?; + Ok(()) + } + fn append( &mut self, objtype: ostree::ObjectType, @@ -265,29 +302,8 @@ fn impl_export( commit_checksum: &str, out: &mut tar::Builder, ) -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; - let writer = &mut OstreeTarWriter::new(repo, out); - writer.prelude()?; - - let (commit_v, _) = repo.load_commit(commit_checksum)?; - let commit_v = &commit_v; - writer.append(ostree::ObjectType::Commit, commit_checksum, commit_v)?; - - if let Some(commitmeta) = repo.read_commit_detached_metadata(commit_checksum, cancellable)? { - writer.append(ostree::ObjectType::CommitMeta, commit_checksum, &commitmeta)?; - } - - let commit_v = commit_v.data_as_bytes(); - let commit_v = commit_v.try_as_aligned()?; - let commit = gv_commit!().cast(commit_v); - let commit = commit.to_tuple(); - let contents = &hex::encode(commit.6); - let metadata_checksum = &hex::encode(commit.7); - let metadata_v = &repo.load_variant(ostree::ObjectType::DirMeta, metadata_checksum)?; - writer.append(ostree::ObjectType::DirMeta, metadata_checksum, metadata_v)?; - - writer.append_dirtree(Utf8Path::new("./"), contents, cancellable)?; + writer.write_commit(commit_checksum)?; Ok(()) } From f6400238475401894fa4d93dce355fed9168ed60 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 27 Aug 2021 13:18:19 -0400 Subject: [PATCH 153/775] lib/oci: Make writer completion consume self This will allow us to pass ownership of e.g. annotations to OCI data in the future. --- lib/src/container/export.rs | 2 +- lib/src/container/oci.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index 9a1317a32..af5cd9f1a 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -45,7 +45,7 @@ fn build_oci( // Explicitly error if the target exists std::fs::create_dir(ocidir_path).context("Creating OCI dir")?; let ocidir = &openat::Dir::open(ocidir_path)?; - let writer = &mut oci::OciWriter::new(ocidir)?; + let mut writer = oci::OciWriter::new(ocidir)?; let commit = repo.resolve_rev(rev, false)?.unwrap(); let commit = commit.as_str(); diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index d66f2db48..e4a351c67 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -175,7 +175,7 @@ impl<'a> OciWriter<'a> { } #[context("Writing OCI")] - pub(crate) fn complete(&mut self) -> Result<()> { + pub(crate) fn complete(self) -> Result<()> { let utsname = nix::sys::utsname::uname(); let machine = utsname.machine(); let arch = MACHINE_TO_OCI.get(machine).unwrap_or(&machine); @@ -220,7 +220,7 @@ impl<'a> OciWriter<'a> { size: rootfs_blob.blob.size, digest: rootfs_blob.blob.digest_id(), }], - annotations: Some(self.manifest_annotations.drain().collect()), + annotations: Some(self.manifest_annotations), }; let manifest_blob = write_json_blob(self.dir, &manifest)?; From 0eeee834f33a42ea1373e0310b534db146bf8340 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 25 Aug 2021 16:51:40 -0400 Subject: [PATCH 154/775] Depend on oci-spec Since https://github.com/containers/oci-spec-rs now exists, let's use it! --- lib/Cargo.toml | 2 + lib/src/container/oci.rs | 175 +++++++++++++++++---------------------- 2 files changed, 78 insertions(+), 99 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 44d92480c..e3b023dcd 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -26,6 +26,7 @@ lazy_static = "1.4.0" libc = "0.2.92" maplit = "1.0.2" nix = "0.22.0" +oci-spec = "0.5.0" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" @@ -34,6 +35,7 @@ phf = { features = ["macros"], version = "0.9.0" } pin-project = "1.0" serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" +serde_plain = "0.3.0" structopt = "0.3.21" tar = "0.4.33" tempfile = "3.2.0" diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index e4a351c67..987848226 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -2,9 +2,10 @@ //! oriented towards generating images. use anyhow::{anyhow, Result}; -use containers_image_proxy::OCI_TYPE_LAYER_GZIP; use flate2::write::GzEncoder; use fn_error_context::context; +use oci_image::MediaType; +use oci_spec::image as oci_image; use openat_ext::*; use openssl::hash::{Hasher, MessageDigest}; use phf::phf_map; @@ -19,43 +20,9 @@ static MACHINE_TO_OCI: phf::Map<&str, &str> = phf_map! { "aarch64" => "arm64", }; -// OCI types, see https://github.com/opencontainers/image-spec/blob/master/media-types.md -pub(crate) const OCI_TYPE_CONFIG_JSON: &str = "application/vnd.oci.image.config.v1+json"; -pub(crate) const OCI_TYPE_MANIFEST_JSON: &str = "application/vnd.oci.image.manifest.v1+json"; - /// Path inside an OCI directory to the blobs const BLOBDIR: &str = "blobs/sha256"; -fn default_schema_version() -> u32 { - 2 -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub(crate) struct IndexPlatform { - pub architecture: String, - pub os: String, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub(crate) struct IndexManifest { - pub media_type: String, - pub digest: String, - pub size: u64, - - pub platform: Option, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub(crate) struct Index { - #[serde(default = "default_schema_version")] - pub schema_version: u32, - - pub manifests: Vec, -} - #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub(crate) struct ManifestLayer { @@ -67,9 +34,6 @@ pub(crate) struct ManifestLayer { #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub(crate) struct Manifest { - #[serde(default = "default_schema_version")] - pub schema_version: u32, - pub config: ManifestLayer, pub layers: Vec, pub annotations: Option>, @@ -94,6 +58,12 @@ impl Blob { pub(crate) fn digest_id(&self) -> String { format!("sha256:{}", self.sha256) } + + pub(crate) fn descriptor(&self) -> oci_image::DescriptorBuilder { + oci_image::DescriptorBuilder::default() + .digest(self.digest_id()) + .size(self.size as i64) + } } /// Completed layer metadata @@ -103,6 +73,12 @@ pub(crate) struct Layer { pub(crate) uncompressed_sha256: String, } +impl Layer { + pub(crate) fn descriptor(&self) -> oci_image::DescriptorBuilder { + self.blob.descriptor() + } +} + /// Create an OCI blob. pub(crate) struct BlobWriter<'a> { pub(crate) hash: Hasher, @@ -130,13 +106,15 @@ pub(crate) struct OciWriter<'a> { /// Write a serializable data (JSON) as an OCI blob #[context("Writing json blob")] -fn write_json_blob(ocidir: &openat::Dir, v: &S) -> Result { +fn write_json_blob( + ocidir: &openat::Dir, + v: &S, + media_type: oci_image::MediaType, +) -> Result { let mut w = BlobWriter::new(ocidir)?; - { - cjson::to_writer(&mut w, v).map_err(|e| anyhow!("{:?}", e))?; - } - - w.complete() + cjson::to_writer(&mut w, v).map_err(|e| anyhow!("{:?}", e))?; + let blob = w.complete()?; + Ok(blob.descriptor().media_type(media_type)) } impl<'a> OciWriter<'a> { @@ -179,65 +157,64 @@ impl<'a> OciWriter<'a> { let utsname = nix::sys::utsname::uname(); let machine = utsname.machine(); let arch = MACHINE_TO_OCI.get(machine).unwrap_or(&machine); + let arch = oci_image::Arch::from(*arch); let rootfs_blob = self.root_layer.as_ref().unwrap(); let root_layer_id = format!("sha256:{}", rootfs_blob.uncompressed_sha256); - - let mut ctrconfig = serde_json::Map::new(); - ctrconfig.insert( - "Labels".to_string(), - serde_json::to_value(&self.config_annotations)?, - ); - if let Some(cmd) = self.cmd.as_deref() { - ctrconfig.insert("Cmd".to_string(), serde_json::to_value(cmd)?); + let rootfs = oci_image::RootFsBuilder::default() + .diff_ids(vec![root_layer_id]) + .build() + .unwrap(); + + let ctrconfig_builder = oci_image::ConfigBuilder::default().labels(self.config_annotations); + let ctrconfig = if let Some(cmd) = self.cmd { + ctrconfig_builder.cmd(cmd) + } else { + ctrconfig_builder } - let created_by = concat!("created by ", env!("CARGO_PKG_VERSION")); - let config = serde_json::json!({ - "architecture": arch, - "os": "linux", - "config": ctrconfig, - "rootfs": { - "type": "layers", - "diff_ids": [ root_layer_id ], - }, - "history": [ - { - "commit": created_by, - } - ] - }); - let config_blob = write_json_blob(self.dir, &config)?; - - let manifest = Manifest { - schema_version: default_schema_version(), - config: ManifestLayer { - media_type: OCI_TYPE_CONFIG_JSON.to_string(), - size: config_blob.size, - digest: config_blob.digest_id(), - }, - layers: vec![ManifestLayer { - media_type: OCI_TYPE_LAYER_GZIP.to_string(), - size: rootfs_blob.blob.size, - digest: rootfs_blob.blob.digest_id(), - }], - annotations: Some(self.manifest_annotations), - }; - let manifest_blob = write_json_blob(self.dir, &manifest)?; - - let index_data = serde_json::json!({ - "schemaVersion": default_schema_version(), - "manifests": [ - { - "mediaType": OCI_TYPE_MANIFEST_JSON, - "digest": manifest_blob.digest_id(), - "size": manifest_blob.size, - "platform": { - "architecture": arch, - "os": "linux" - } - } - ] - }); + .build() + .unwrap(); + let history = oci_image::HistoryBuilder::default() + .created_by(concat!("created by ", env!("CARGO_PKG_VERSION"))) + .build() + .unwrap(); + let config = oci_image::ImageConfigurationBuilder::default() + .architecture(arch.clone()) + .os(oci_image::Os::Linux) + .config(ctrconfig) + .rootfs(rootfs) + .history(vec![history]) + .build() + .unwrap(); + let config_blob = write_json_blob(self.dir, &config, MediaType::ImageConfig)?; + + let manifest_data = oci_image::ImageManifestBuilder::default() + .schema_version(oci_image::SCHEMA_VERSION) + .config(config_blob.build().unwrap()) + .layers(vec![rootfs_blob + .descriptor() + .media_type(MediaType::ImageLayerGzip) + .build() + .unwrap()]) + .annotations(self.manifest_annotations) + .build() + .unwrap(); + let manifest = write_json_blob(self.dir, &manifest_data, MediaType::ImageManifest)? + .platform( + oci_image::PlatformBuilder::default() + .architecture(arch) + .os(oci_spec::image::Os::Linux) + .build() + .unwrap(), + ) + .build() + .unwrap(); + + let index_data = oci_image::ImageIndexBuilder::default() + .schema_version(oci_image::SCHEMA_VERSION) + .manifests(vec![manifest]) + .build() + .unwrap(); self.dir .write_file_with("index.json", 0o644, |w| -> Result<()> { cjson::to_writer(w, &index_data).map_err(|e| anyhow::anyhow!("{:?}", e))?; From dcb5598248716f53ff36249543110138353f1374 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 26 Oct 2021 09:56:53 -0400 Subject: [PATCH 155/775] oci: Also add `CARGO_PKG_NAME` Spotted in review. --- lib/src/container/oci.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index 987848226..6184d8071 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -175,7 +175,11 @@ impl<'a> OciWriter<'a> { .build() .unwrap(); let history = oci_image::HistoryBuilder::default() - .created_by(concat!("created by ", env!("CARGO_PKG_VERSION"))) + .created_by(format!( + "created by {} {}", + env!("CARGO_PKG_NAME"), + env!("CARGO_PKG_VERSION") + )) .build() .unwrap(); let config = oci_image::ImageConfigurationBuilder::default() From e52b00fae7178ecc2f42ea7d363c4b32d61638f4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 26 Oct 2021 11:01:09 -0400 Subject: [PATCH 156/775] tar/export: Minor tweaks from PR review --- lib/src/tar/export.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 5959a14e8..06b33e931 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -31,7 +31,7 @@ fn map_path(p: &Utf8Path) -> std::borrow::Cow { struct OstreeTarWriter<'a, W: std::io::Write> { repo: &'a ostree::Repo, out: &'a mut tar::Builder, - wrote_prelude: bool, + wrote_initdirs: bool, wrote_dirtree: HashSet, wrote_dirmeta: HashSet, wrote_content: HashSet, @@ -60,7 +60,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { Self { repo, out, - wrote_prelude: false, + wrote_initdirs: false, wrote_dirmeta: HashSet::new(), wrote_dirtree: HashSet::new(), wrote_content: HashSet::new(), @@ -69,11 +69,11 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { } /// Write the initial directory structure. - fn prelude(&mut self) -> Result<()> { - if self.wrote_prelude { + fn write_initial_directories(&mut self) -> Result<()> { + if self.wrote_initdirs { return Ok(()); } - self.wrote_prelude = true; + self.wrote_initdirs = true; // Object subdirectories for d in 0..0xFF { let mut h = tar::Header::new_gnu(); @@ -89,6 +89,8 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { // The special `repo/xattrs` directory used only in our tar serialization. let mut h = tar::Header::new_gnu(); h.set_entry_type(tar::EntryType::Directory); + h.set_uid(0); + h.set_gid(0); h.set_mode(0o755); h.set_size(0); let path = format!("{}/repo/xattrs", OSTREEDIR); @@ -100,7 +102,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { fn write_commit(&mut self, checksum: &str) -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; - self.prelude()?; + self.write_initial_directories()?; let (commit_v, _) = self.repo.load_commit(checksum)?; let commit_v = &commit_v; From 4e96bc2bac0141ac43b4d8389b1bd129b26137ca Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 26 Oct 2021 13:57:15 -0400 Subject: [PATCH 157/775] oci: Finish porting to main oci_spec crate This removes our internal structs for `Manifest`, which is prep for exposing the `oci_spec` crate types as public API. --- lib/src/container/import.rs | 28 +++++++++++------------ lib/src/container/oci.rs | 29 ++---------------------- lib/src/container/store.rs | 45 +++++++++++++++++-------------------- 3 files changed, 37 insertions(+), 65 deletions(-) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index eb010b5d7..a7af1fefa 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -31,9 +31,9 @@ use super::*; use anyhow::{anyhow, Context}; use containers_image_proxy::{ImageProxy, OpenedImage}; -use containers_image_proxy::{OCI_TYPE_LAYER_GZIP, OCI_TYPE_LAYER_TAR}; use fn_error_context::context; use futures_util::Future; +use oci_spec::image as oci_image; use tokio::io::{AsyncBufRead, AsyncRead}; use tracing::{event, instrument, Level}; @@ -103,9 +103,9 @@ pub struct Import { pub image_digest: String, } -fn require_one_layer_blob(manifest: &oci::Manifest) -> Result<&oci::ManifestLayer> { - let n = manifest.layers.len(); - if let Some(layer) = manifest.layers.get(0) { +fn require_one_layer_blob(manifest: &oci_image::ImageManifest) -> Result<&oci_image::Descriptor> { + let n = manifest.layers().len(); + if let Some(layer) = manifest.layers().get(0) { if n > 1 { Err(anyhow!("Expected 1 layer, found {}", n)) } else { @@ -142,14 +142,14 @@ pub async fn import( /// Create a decompressor for this MIME type, given a stream of input. fn new_async_decompressor<'a>( - media_type: &str, + media_type: &oci_image::MediaType, src: impl AsyncBufRead + Send + Unpin + 'a, ) -> Result> { match media_type { - OCI_TYPE_LAYER_GZIP => Ok(Box::new(tokio::io::BufReader::new( + oci_image::MediaType::ImageLayerGzip => Ok(Box::new(tokio::io::BufReader::new( async_compression::tokio::bufread::GzipDecoder::new(src), ))), - OCI_TYPE_LAYER_TAR => Ok(Box::new(src)), + oci_image::MediaType::ImageLayer => Ok(Box::new(src)), o => Err(anyhow::anyhow!("Unhandled layer type: {}", o)), } } @@ -158,15 +158,15 @@ fn new_async_decompressor<'a>( pub(crate) async fn fetch_layer_decompress<'a>( proxy: &'a ImageProxy, img: &OpenedImage, - layer: &oci::ManifestLayer, + layer: &oci_image::Descriptor, ) -> Result<( Box, impl Future> + 'a, )> { let (blob, driver) = proxy - .get_blob(img, layer.digest.as_str(), layer.size) + .get_blob(img, layer.digest().as_str(), layer.size() as u64) .await?; - let blob = new_async_decompressor(&layer.media_type, blob)?; + let blob = new_async_decompressor(layer.media_type(), blob)?; Ok((blob, driver)) } @@ -185,13 +185,13 @@ pub async fn import_from_manifest( return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); } let options = options.unwrap_or_default(); - let manifest: oci::Manifest = serde_json::from_slice(manifest_bytes)?; + let manifest: oci_image::ImageManifest = serde_json::from_slice(manifest_bytes)?; let layer = require_one_layer_blob(&manifest)?; event!( Level::DEBUG, "target blob digest:{} size: {}", - layer.digest.as_str(), - layer.size + layer.digest().as_str(), + layer.size() ); let proxy = ImageProxy::new().await?; let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; @@ -208,7 +208,7 @@ pub async fn import_from_manifest( let import = crate::tar::import_tar(repo, blob, Some(taropts)); let (import, driver) = tokio::join!(import, driver); driver?; - let ostree_commit = import.with_context(|| format!("Parsing blob {}", layer.digest))?; + let ostree_commit = import.with_context(|| format!("Parsing blob {}", layer.digest()))?; // FIXME write ostree commit after proxy finalization proxy.finalize().await?; event!(Level::DEBUG, "created commit {}", ostree_commit); diff --git a/lib/src/container/oci.rs b/lib/src/container/oci.rs index 6184d8071..9db1cc248 100644 --- a/lib/src/container/oci.rs +++ b/lib/src/container/oci.rs @@ -9,7 +9,6 @@ use oci_spec::image as oci_image; use openat_ext::*; use openssl::hash::{Hasher, MessageDigest}; use phf::phf_map; -use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::io::prelude::*; @@ -23,30 +22,6 @@ static MACHINE_TO_OCI: phf::Map<&str, &str> = phf_map! { /// Path inside an OCI directory to the blobs const BLOBDIR: &str = "blobs/sha256"; -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub(crate) struct ManifestLayer { - pub media_type: String, - pub digest: String, - pub size: u64, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub(crate) struct Manifest { - pub config: ManifestLayer, - pub layers: Vec, - pub annotations: Option>, -} - -impl Manifest { - /// Return the digest of the configuration layer. - /// https://github.com/opencontainers/image-spec/blob/main/config.md - pub(crate) fn imageid(&self) -> &str { - self.config.digest.as_str() - } -} - /// Completed blob metadata #[derive(Debug)] pub(crate) struct Blob { @@ -336,9 +311,9 @@ mod tests { #[test] fn manifest() -> Result<()> { - let m: Manifest = serde_json::from_str(MANIFEST_DERIVE)?; + let m: oci_image::ImageManifest = serde_json::from_str(MANIFEST_DERIVE)?; assert_eq!( - m.layers[0].digest.as_str(), + m.layers()[0].digest().as_str(), "sha256:ee02768e65e6fb2bb7058282338896282910f3560de3e0d6cd9b1d5985e8360d" ); Ok(()) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 9b6f21800..a74e29d27 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -5,12 +5,12 @@ //! This code supports ingesting arbitrary layered container images from an ostree-exported //! base. See [`super::import`] for more information on encaspulation of images. -use super::oci::ManifestLayer; use super::*; use crate::refescape; use anyhow::{anyhow, Context}; use containers_image_proxy::{ImageProxy, OpenedImage}; use fn_error_context::context; +use oci_spec::image as oci_image; use ostree::prelude::{Cast, ToVariant}; use ostree::{gio, glib}; use std::collections::{BTreeMap, HashMap}; @@ -31,8 +31,8 @@ fn ref_for_blob_digest(d: &str) -> Result { } /// Convert e.g. sha256:12345... into `/ostree/container/blob/sha256_2B12345...`. -fn ref_for_layer(l: &oci::ManifestLayer) -> Result { - ref_for_blob_digest(l.digest.as_str()) +fn ref_for_layer(l: &oci_image::Descriptor) -> Result { + ref_for_blob_digest(l.digest().as_str()) } /// Convert e.g. sha256:12345... into `/ostree/container/blob/sha256_2B12345...`. @@ -60,7 +60,7 @@ pub enum PrepareResult { /// A container image layer with associated downloaded-or-not state. #[derive(Debug)] pub struct ManifestLayerState { - layer: oci::ManifestLayer, + layer: oci_image::Descriptor, /// The ostree ref name for this layer. pub ostree_ref: String, /// The ostree commit that caches this layer, if present. @@ -70,12 +70,12 @@ pub struct ManifestLayerState { impl ManifestLayerState { /// The cryptographic checksum. pub fn digest(&self) -> &str { - self.layer.digest.as_str() + self.layer.digest().as_str() } /// The (possibly compressed) size. pub fn size(&self) -> u64 { - self.layer.size + self.layer.size() as u64 } } @@ -92,8 +92,8 @@ pub struct PreparedImport { pub base_layer: ManifestLayerState, /// Any further layers. pub layers: Vec, - /// TODO: serialize this into the commit object - manifest: oci::Manifest, + /// The deserialized manifest. + manifest: oci_image::ImageManifest, } /// A successful import of a container image. @@ -109,7 +109,7 @@ pub struct CompletedImport { } // Given a manifest, compute its ostree ref name and cached ostree commit -fn query_layer(repo: &ostree::Repo, layer: ManifestLayer) -> Result { +fn query_layer(repo: &ostree::Repo, layer: oci_image::Descriptor) -> Result { let ostree_ref = ref_for_layer(&layer)?; let commit = repo.resolve_rev(&ostree_ref, true)?.map(|s| s.to_string()); Ok(ManifestLayerState { @@ -119,15 +119,14 @@ fn query_layer(repo: &ostree::Repo, layer: ManifestLayer) -> Result Result { +fn manifest_from_commitmeta(commit_meta: &glib::VariantDict) -> Result { let manifest_bytes: String = commit_meta .lookup::(META_MANIFEST)? .ok_or_else(|| anyhow!("Failed to find {} metadata key", META_MANIFEST))?; - let manifest: oci::Manifest = serde_json::from_str(&manifest_bytes)?; - Ok(manifest) + Ok(serde_json::from_str(&manifest_bytes)?) } -fn manifest_from_commit(commit: &glib::Variant) -> Result { +fn manifest_from_commit(commit: &glib::Variant) -> Result { let commit_meta = &commit.child_value(0); let commit_meta = &ostree::glib::VariantDict::new(Some(commit_meta)); manifest_from_commitmeta(commit_meta) @@ -165,8 +164,8 @@ impl LayeredImageImporter { } let (manifest_digest, manifest_bytes) = self.proxy.fetch_manifest(&self.proxy_img).await?; - let manifest: oci::Manifest = serde_json::from_slice(&manifest_bytes)?; - let new_imageid = manifest.imageid(); + let manifest: oci_image::ImageManifest = serde_json::from_slice(&manifest_bytes)?; + let new_imageid = manifest.config().digest().as_str(); // Query for previous stored state let (previous_manifest_digest, previous_imageid) = @@ -184,18 +183,16 @@ impl LayeredImageImporter { } // Failing that, if they have the same imageID, we're also done. let previous_manifest = manifest_from_commitmeta(&commit_meta)?; - if previous_manifest.imageid() == new_imageid { + let previous_imageid = previous_manifest.config().digest().as_str(); + if previous_imageid == new_imageid { return Ok(PrepareResult::AlreadyPresent(merge_commit.to_string())); } - ( - Some(previous_digest), - Some(previous_manifest.imageid().to_string()), - ) + (Some(previous_digest), Some(previous_imageid.to_string())) } else { (None, None) }; - let mut layers = manifest.layers.iter().cloned(); + let mut layers = manifest.layers().iter().cloned(); // We require a base layer. let base_layer = layers.next().ok_or_else(|| anyhow!("No layers found"))?; let base_layer = query_layer(&self.repo, base_layer)?; @@ -233,7 +230,7 @@ impl LayeredImageImporter { let (commit, driver) = tokio::join!(importer, driver); driver?; let commit = - commit.with_context(|| format!("Parsing blob {}", &base_layer_ref.digest))?; + commit.with_context(|| format!("Parsing blob {}", base_layer_ref.digest()))?; // TODO support ref writing in tar import self.repo.set_ref_immediate( None, @@ -353,10 +350,10 @@ pub async fn copy( let ostree_ref = ref_for_image(&imgref.imgref)?; let rev = src_repo.resolve_rev(&ostree_ref, false)?.unwrap(); let (commit_obj, _) = src_repo.load_commit(rev.as_str())?; - let manifest: oci::Manifest = manifest_from_commit(&commit_obj)?; + let manifest = manifest_from_commit(&commit_obj)?; // Create a task to copy each layer, plus the final ref let layer_refs = manifest - .layers + .layers() .iter() .map(|layer| ref_for_layer(layer)) .chain(std::iter::once(Ok(ostree_ref))); From 063e47ca41bc1df5d78f104b6b325b253b51eec3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 26 Oct 2021 17:25:08 -0400 Subject: [PATCH 158/775] cli: Add missing sysroot load "Shouldn't we have CI tests that cover this?" you ask - yes, yes we should. Need to deduplicate infrastructure for this between this and the main ostree repo - or between this and rpm-ostree. --- lib/src/cli.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 785b6dfec..a7fbcb2a9 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -442,6 +442,7 @@ where karg, } => { let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); + sysroot.load(gio::NONE_CANCELLABLE)?; let imgref = OstreeImageReference::try_from(imgref.as_str())?; let kargs = karg.as_deref(); let kargs = kargs.map(|v| { From 95ba978d658d482d2222b7af936a3453448acdd6 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 27 Oct 2021 10:00:43 -0400 Subject: [PATCH 159/775] container/deploy: Change origin key to match what rpm-ostree is using The original higher level (sysroot oriented) code for container bits was prototyped more in rpm-ostree, where we picked the key `container-image-reference`. When I went to write the deploy code here, I picked the key `container` instead. But, I think `container-image-reference` is more self-explanatory. No one is going to be typing this stuff by hand, so verbosity is OK. Now further, let's make the group `origin` matching the group for the base `refspec` key. (Again matching rpm-ostree too) This makes `container-image-reference` feel as native as `refspec` to ostree. --- lib/src/container/deploy.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 9d638d06f..4a8944023 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -6,7 +6,7 @@ use anyhow::Result; use ostree::glib; /// The key in the OSTree origin which holds a serialized [`super::OstreeImageReference`]. -pub const ORIGIN_CONTAINER: &str = "container"; +pub const ORIGIN_CONTAINER: &str = "container-image-reference"; async fn pull_idempotent(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result { let mut imp = super::store::LayeredImageImporter::new(repo, imgref).await?; @@ -37,7 +37,7 @@ pub async fn deploy<'opts>( let repo = &sysroot.repo().unwrap(); let commit = &pull_idempotent(repo, imgref).await?; let origin = glib::KeyFile::new(); - origin.set_string("ostree", ORIGIN_CONTAINER, &imgref.to_string()); + origin.set_string("origin", ORIGIN_CONTAINER, &imgref.to_string()); let deployment = &sysroot.deploy_tree( Some(stateroot), commit, From 13f57863d40cea9d8bcca680ee3f17b69baccb2e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 27 Oct 2021 13:44:19 -0400 Subject: [PATCH 160/775] Add a new `keyfileext` module with `KeyFileExt` trait Draining more code from rpm-ostree around handling `glib::KeyFile`. I will try to do a PR to add this stuff to the main glib crate, but one step at a time. --- lib/src/keyfileext.rs | 87 +++++++++++++++++++++++++++++++++++++++++++ lib/src/lib.rs | 1 + 2 files changed, 88 insertions(+) create mode 100644 lib/src/keyfileext.rs diff --git a/lib/src/keyfileext.rs b/lib/src/keyfileext.rs new file mode 100644 index 000000000..767cf7b6e --- /dev/null +++ b/lib/src/keyfileext.rs @@ -0,0 +1,87 @@ +//! Helper methods for [`glib::KeyFile`]. + +use glib::GString; +use ostree::glib; + +/// Helper methods for [`glib::KeyFile`]. +pub trait KeyFileExt { + /// Get a string value, but return `None` if the key does not exist. + fn optional_string(&self, group: &str, key: &str) -> Result, glib::Error>; + /// Get a boolean value, but return `None` if the key does not exist. + fn optional_bool(&self, group: &str, key: &str) -> Result, glib::Error>; + /// Get a string list value, but return `None` if the key does not exist. + fn optional_string_list( + &self, + group: &str, + key: &str, + ) -> Result>, glib::Error>; +} + +/// Consume a keyfile error, mapping the case where group or key is not found to `Ok(None)`. +pub fn map_keyfile_optional(res: Result) -> Result, glib::Error> { + match res { + Ok(v) => Ok(Some(v)), + Err(e) => { + if let Some(t) = e.kind::() { + match t { + glib::KeyFileError::GroupNotFound | glib::KeyFileError::KeyNotFound => Ok(None), + _ => Err(e), + } + } else { + Err(e) + } + } + } +} + +impl KeyFileExt for glib::KeyFile { + fn optional_string(&self, group: &str, key: &str) -> Result, glib::Error> { + map_keyfile_optional(self.string(group, key)) + } + + fn optional_bool(&self, group: &str, key: &str) -> Result, glib::Error> { + map_keyfile_optional(self.boolean(group, key)) + } + + fn optional_string_list( + &self, + group: &str, + key: &str, + ) -> Result>, glib::Error> { + map_keyfile_optional(self.string_list(group, key)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_optional() { + let kf = glib::KeyFile::new(); + assert_eq!(kf.optional_string("foo", "bar").unwrap(), None); + kf.set_string("foo", "baz", "someval"); + assert_eq!(kf.optional_string("foo", "bar").unwrap(), None); + assert_eq!( + kf.optional_string("foo", "baz").unwrap().unwrap(), + "someval" + ); + + assert!(kf.optional_bool("foo", "baz").is_err()); + assert_eq!(kf.optional_bool("foo", "bar").unwrap(), None); + kf.set_boolean("foo", "somebool", false); + assert_eq!(kf.optional_bool("foo", "somebool").unwrap(), Some(false)); + + assert_eq!(kf.optional_string_list("foo", "bar").unwrap(), None); + kf.set_string("foo", "somelist", "one;two;three"); + assert_eq!( + kf.optional_string_list("foo", "somelist").unwrap(), + Some( + vec!["one", "two", "three"] + .iter() + .map(|&v| GString::from(v)) + .collect() + ) + ); + } +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 5787a8575..66dc387a1 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -26,6 +26,7 @@ pub mod cli; pub mod container; pub mod diff; pub mod ima; +pub mod keyfileext; pub mod refescape; pub mod tar; pub mod tokio_util; From a773546ee5ee3a18e756648fdce6c6fcfee6e1bd Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 27 Oct 2021 13:33:52 -0400 Subject: [PATCH 161/775] Add a public API to get manifest digest rpm-ostree wants to show this. --- lib/src/container/store.rs | 67 +++++++++++++++++++++----------------- lib/tests/it/main.rs | 6 ++++ 2 files changed, 44 insertions(+), 29 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index a74e29d27..53816ac2b 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -119,17 +119,28 @@ fn query_layer(repo: &ostree::Repo, layer: oci_image::Descriptor) -> Result Result { +fn manifest_data_from_commitmeta( + commit_meta: &glib::VariantDict, +) -> Result<(oci_image::ImageManifest, String)> { + let digest = commit_meta + .lookup(META_MANIFEST_DIGEST)? + .ok_or_else(|| anyhow!("Missing {} metadata on merge commit", META_MANIFEST_DIGEST))?; let manifest_bytes: String = commit_meta .lookup::(META_MANIFEST)? .ok_or_else(|| anyhow!("Failed to find {} metadata key", META_MANIFEST))?; - Ok(serde_json::from_str(&manifest_bytes)?) + let r = serde_json::from_str(&manifest_bytes)?; + Ok((r, digest)) } -fn manifest_from_commit(commit: &glib::Variant) -> Result { +/// Return the original digest of the manifest stored in the commit metadata. +/// This will be a string of the form e.g. `sha256:`. +/// +/// This can be used to uniquely identify the image. For example, it can be used +/// in a "digested pull spec" like `quay.io/someuser/exampleos@sha256:...`. +pub fn manifest_digest_from_commit(commit: &glib::Variant) -> Result { let commit_meta = &commit.child_value(0); - let commit_meta = &ostree::glib::VariantDict::new(Some(commit_meta)); - manifest_from_commitmeta(commit_meta) + let commit_meta = &glib::VariantDict::new(Some(commit_meta)); + Ok(manifest_data_from_commitmeta(commit_meta)?.1) } impl LayeredImageImporter { @@ -168,29 +179,26 @@ impl LayeredImageImporter { let new_imageid = manifest.config().digest().as_str(); // Query for previous stored state - let (previous_manifest_digest, previous_imageid) = - if let Some(merge_commit) = self.repo.resolve_rev(&self.ostree_ref, true)? { - let (merge_commit_obj, _) = self.repo.load_commit(merge_commit.as_str())?; - let commit_meta = &merge_commit_obj.child_value(0); - let commit_meta = ostree::glib::VariantDict::new(Some(commit_meta)); - let previous_digest: String = - commit_meta.lookup(META_MANIFEST_DIGEST)?.ok_or_else(|| { - anyhow!("Missing {} metadata on merge commit", META_MANIFEST_DIGEST) - })?; - // If the manifest digests match, we're done. - if previous_digest == manifest_digest { - return Ok(PrepareResult::AlreadyPresent(merge_commit.to_string())); - } - // Failing that, if they have the same imageID, we're also done. - let previous_manifest = manifest_from_commitmeta(&commit_meta)?; - let previous_imageid = previous_manifest.config().digest().as_str(); - if previous_imageid == new_imageid { - return Ok(PrepareResult::AlreadyPresent(merge_commit.to_string())); - } - (Some(previous_digest), Some(previous_imageid.to_string())) - } else { - (None, None) - }; + let (previous_manifest_digest, previous_imageid) = if let Some(merge_commit) = + self.repo.resolve_rev(&self.ostree_ref, true)? + { + let merge_commit_obj = &self.repo.load_commit(merge_commit.as_str())?.0; + let commit_meta = &merge_commit_obj.child_value(0); + let commit_meta = &ostree::glib::VariantDict::new(Some(commit_meta)); + let (previous_manifest, previous_digest) = manifest_data_from_commitmeta(commit_meta)?; + // If the manifest digests match, we're done. + if previous_digest == manifest_digest { + return Ok(PrepareResult::AlreadyPresent(merge_commit.to_string())); + } + // Failing that, if they have the same imageID, we're also done. + let previous_imageid = previous_manifest.config().digest().as_str(); + if previous_imageid == new_imageid { + return Ok(PrepareResult::AlreadyPresent(merge_commit.to_string())); + } + (Some(previous_digest), Some(previous_imageid.to_string())) + } else { + (None, None) + }; let mut layers = manifest.layers().iter().cloned(); // We require a base layer. @@ -350,7 +358,8 @@ pub async fn copy( let ostree_ref = ref_for_image(&imgref.imgref)?; let rev = src_repo.resolve_rev(&ostree_ref, false)?.unwrap(); let (commit_obj, _) = src_repo.load_commit(rev.as_str())?; - let manifest = manifest_from_commit(&commit_obj)?; + let commit_meta = &glib::VariantDict::new(Some(&commit_obj.child_value(0))); + let (manifest, _) = manifest_data_from_commitmeta(commit_meta)?; // Create a task to copy each layer, plus the final ref let layer_refs = manifest .layers() diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index da7738923..aab230301 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -432,6 +432,7 @@ async fn test_container_write_derive() -> Result<()> { PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), PrepareResult::Ready(r) => r, }; + let expected_digest = prep.manifest_digest.clone(); assert!(prep.base_layer.commit.is_none()); for layer in prep.layers.iter() { assert!(layer.commit.is_none()); @@ -442,6 +443,11 @@ async fn test_container_write_derive() -> Result<()> { assert_eq!(images.len(), 1); assert_eq!(images[0], exampleos_ref.imgref.to_string()); + let imported_commit = &fixture.destrepo.load_commit(import.commit.as_str())?.0; + let digest = ostree_ext::container::store::manifest_digest_from_commit(imported_commit)?; + assert!(digest.starts_with("sha256:")); + assert_eq!(digest, expected_digest); + // Parse the commit and verify we pulled the derived content. bash!( "ostree --repo={repo} ls {r} /usr/share/anewfile", From 175d1ec0ef51d170fc681572d38cb154e8222a6c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 28 Oct 2021 16:41:05 -0400 Subject: [PATCH 162/775] lib: Drop unused `serde-plain` I planned to use this I guess, but didn't. --- lib/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index e3b023dcd..b52d9db06 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -35,7 +35,6 @@ phf = { features = ["macros"], version = "0.9.0" } pin-project = "1.0" serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" -serde_plain = "0.3.0" structopt = "0.3.21" tar = "0.4.33" tempfile = "3.2.0" From 345cc6976703b7366b898b02a17a8cc4a1aff3d3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 28 Oct 2021 16:42:58 -0400 Subject: [PATCH 163/775] lib: Remove clap/structopt from dev-dependencies We expose the CLI as part of the library now. --- lib/Cargo.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index b52d9db06..a47c646b0 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -44,11 +44,9 @@ tokio-util = { features = ["io"], version = "0.6" } tracing = "0.1" [dev-dependencies] -clap = "2.33.3" indoc = "1.0.3" quickcheck = "1" sh-inline = "0.1.0" -structopt = "0.3.21" [package.metadata.docs.rs] features = ["dox"] From 16391d8d1e2ea19a591dfd5e419cc6e2c561c06d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 28 Oct 2021 16:46:41 -0400 Subject: [PATCH 164/775] lib: Remove `maplit` It's unused. --- lib/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index a47c646b0..1f404a384 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -24,7 +24,6 @@ hex = "0.4.3" indicatif = "0.16.0" lazy_static = "1.4.0" libc = "0.2.92" -maplit = "1.0.2" nix = "0.22.0" oci-spec = "0.5.0" openat = "0.1.20" From 686c838a8346d2b3f98d2e5a92fa66bb3526a77c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 28 Oct 2021 16:46:57 -0400 Subject: [PATCH 165/775] lib: Remove `tokio-stream` Not used since we switched to forking `skopeo`. --- lib/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 1f404a384..5546955b0 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -38,7 +38,6 @@ structopt = "0.3.21" tar = "0.4.33" tempfile = "3.2.0" tokio = { features = ["full"], version = "1" } -tokio-stream = "0.1.5" tokio-util = { features = ["io"], version = "0.6" } tracing = "0.1" From e04ed454a7ab20f33c0c21b1a18c7477ce63b5f7 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 28 Oct 2021 16:56:08 -0400 Subject: [PATCH 166/775] container/deploy: Add a `--target-imgref` option We want the ability to separate the image that's pulled from its target reference. This is what we do today in coreos-assembler for pure ostree refs - e.g. we deploy via a commit hash, but still have `ostree admin upgrade` pull from a ref. This is equivalent functionality for containers. --- lib/src/cli.rs | 14 +++++++++++++- lib/src/container/deploy.rs | 12 +++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index a7fbcb2a9..9670b5c9a 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -157,10 +157,17 @@ enum ContainerImageOpts { #[structopt(long)] stateroot: String, - /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest + /// Source image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos@sha256:abcd... #[structopt(long)] imgref: String, + /// Target image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest + /// + /// If specified, `--imgref` will be used as a source, but this reference will be emitted into the origin + /// so that later OS updates pull from it. + #[structopt(long)] + target_imgref: Option, + #[structopt(long)] /// Add a kernel argument karg: Option>, @@ -439,11 +446,15 @@ where sysroot, stateroot, imgref, + target_imgref, karg, } => { let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); sysroot.load(gio::NONE_CANCELLABLE)?; let imgref = OstreeImageReference::try_from(imgref.as_str())?; + let target_imgref = target_imgref + .map(|s| OstreeImageReference::try_from(s.as_str())) + .transpose()?; let kargs = karg.as_deref(); let kargs = kargs.map(|v| { let r: Vec<_> = v.iter().map(|s| s.as_str()).collect(); @@ -451,6 +462,7 @@ where }); let options = crate::container::deploy::DeployOpts { kargs: kargs.as_deref(), + target_imgref: target_imgref.as_ref(), }; crate::container::deploy::deploy(sysroot, &stateroot, &imgref, Some(options)) .await diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 4a8944023..d6de68cc0 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -21,6 +21,15 @@ async fn pull_idempotent(repo: &ostree::Repo, imgref: &OstreeImageReference) -> pub struct DeployOpts<'a> { /// Kernel arguments to use. pub kargs: Option<&'a [&'a str]>, + /// Target image reference, as distinct from the source. + /// + /// In many cases, one may want a workflow where a system is provisioned from + /// an image with a specific digest (e.g. `quay.io/example/os@sha256:...) for + /// reproducibilty. However, one would want `ostree admin upgrade` to fetch + /// `quay.io/example/os:latest`. + /// + /// To implement this, use this option for the latter `:latest` tag. + pub target_imgref: Option<&'a OstreeImageReference>, } /// Write a container image to an OSTree deployment. @@ -37,7 +46,8 @@ pub async fn deploy<'opts>( let repo = &sysroot.repo().unwrap(); let commit = &pull_idempotent(repo, imgref).await?; let origin = glib::KeyFile::new(); - origin.set_string("origin", ORIGIN_CONTAINER, &imgref.to_string()); + let target_imgref = options.target_imgref.unwrap_or(imgref); + origin.set_string("origin", ORIGIN_CONTAINER, &target_imgref.to_string()); let deployment = &sysroot.deploy_tree( Some(stateroot), commit, From 9da9223355c7264638ae747f6d0677bdf07cb7de Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 28 Oct 2021 18:45:48 -0400 Subject: [PATCH 167/775] lib/container: Expose `oci_spec::image::ImageManifest` publicly Previously we had our own internal definitions for these things, which I didn't want to make public API. Now that a crate exists for this and we're using it internally, let's take the next step and expose it as part of our API. This will allow clients (e.g. rpm-ostree) to render things more nicely. --- lib/src/container/import.rs | 11 ++++++----- lib/src/container/store.rs | 4 ++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/src/container/import.rs b/lib/src/container/import.rs index a7af1fefa..3e798f3cb 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/import.rs @@ -86,12 +86,14 @@ impl AsyncRead for ProgressReader { /// Download the manifest for a target image and its sha256 digest. #[context("Fetching manifest")] -pub async fn fetch_manifest(imgref: &OstreeImageReference) -> Result<(Vec, String)> { +pub async fn fetch_manifest( + imgref: &OstreeImageReference, +) -> Result<(oci_spec::image::ImageManifest, String)> { let proxy = ImageProxy::new().await?; let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; let (digest, raw_manifest) = proxy.fetch_manifest(oi).await?; proxy.close_image(oi).await?; - Ok((raw_manifest, digest)) + Ok((serde_json::from_slice(&raw_manifest)?, digest)) } /// The result of an import operation @@ -172,11 +174,11 @@ pub(crate) async fn fetch_layer_decompress<'a>( /// Fetch a container image using an in-memory manifest and import its embedded OSTree commit. #[context("Importing {}", imgref)] -#[instrument(skip(repo, options, manifest_bytes))] +#[instrument(skip(repo, options, manifest))] pub async fn import_from_manifest( repo: &ostree::Repo, imgref: &OstreeImageReference, - manifest_bytes: &[u8], + manifest: &oci_spec::image::ImageManifest, options: Option, ) -> Result { if matches!(imgref.sigverify, SignatureSource::ContainerPolicy) @@ -185,7 +187,6 @@ pub async fn import_from_manifest( return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); } let options = options.unwrap_or_default(); - let manifest: oci_image::ImageManifest = serde_json::from_slice(manifest_bytes)?; let layer = require_one_layer_blob(&manifest)?; event!( Level::DEBUG, diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 53816ac2b..55e44ef2c 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -84,6 +84,8 @@ impl ManifestLayerState { pub struct PreparedImport { /// The manifest digest that was found pub manifest_digest: String, + /// The deserialized manifest. + pub manifest: oci_image::ImageManifest, /// The previously stored manifest digest. pub previous_manifest_digest: Option, /// The previously stored image ID. @@ -92,8 +94,6 @@ pub struct PreparedImport { pub base_layer: ManifestLayerState, /// Any further layers. pub layers: Vec, - /// The deserialized manifest. - manifest: oci_image::ImageManifest, } /// A successful import of a container image. From 49df552f09f42693f34e7fd19416e535064fc43e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 28 Oct 2021 17:43:05 -0400 Subject: [PATCH 168/775] lib/cli: Use `#[structopt(parse)]` This is both less code *and* results in better error messages for invalid values. --- lib/src/cli.rs | 52 ++++++++++++++++++++++++++++---------------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 9670b5c9a..fc49e0d8b 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -8,12 +8,20 @@ use anyhow::Result; use ostree::gio; use std::collections::BTreeMap; -use std::convert::{TryFrom, TryInto}; +use std::convert::TryFrom; use std::ffi::OsString; use structopt::StructOpt; use crate::container::store::{LayeredImageImporter, PrepareResult}; -use crate::container::{Config, ImportOptions, OstreeImageReference}; +use crate::container::{Config, ImageReference, ImportOptions, OstreeImageReference}; + +fn parse_imgref(s: &str) -> Result { + OstreeImageReference::try_from(s) +} + +fn parse_base_imgref(s: &str) -> Result { + ImageReference::try_from(s) +} #[derive(Debug, StructOpt)] struct BuildOpts { @@ -70,7 +78,8 @@ enum ContainerOpts { repo: String, /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest - imgref: String, + #[structopt(parse(try_from_str = parse_imgref))] + imgref: OstreeImageReference, /// Create an ostree ref pointing to the imported commit #[structopt(long)] @@ -84,7 +93,8 @@ enum ContainerOpts { /// Print information about an exported ostree-container image. Info { /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest - imgref: String, + #[structopt(parse(try_from_str = parse_imgref))] + imgref: OstreeImageReference, }, /// Wrap an ostree commit into a container @@ -98,7 +108,8 @@ enum ContainerOpts { rev: String, /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest - imgref: String, + #[structopt(parse(try_from_str = parse_base_imgref))] + imgref: ImageReference, /// Additional labels for the container #[structopt(name = "label", long, short)] @@ -130,7 +141,8 @@ enum ContainerImageOpts { repo: String, /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest - imgref: String, + #[structopt(parse(try_from_str = parse_imgref))] + imgref: OstreeImageReference, }, /// Copy a pulled container image from one repo to another. @@ -144,7 +156,8 @@ enum ContainerImageOpts { dest_repo: String, /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest - imgref: String, + #[structopt(parse(try_from_str = parse_imgref))] + imgref: OstreeImageReference, }, /// Perform initial deployment for a container image @@ -159,14 +172,16 @@ enum ContainerImageOpts { /// Source image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos@sha256:abcd... #[structopt(long)] - imgref: String, + #[structopt(parse(try_from_str = parse_imgref))] + imgref: OstreeImageReference, /// Target image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest /// /// If specified, `--imgref` will be used as a source, but this reference will be emitted into the origin /// so that later OS updates pull from it. #[structopt(long)] - target_imgref: Option, + #[structopt(parse(try_from_str = parse_imgref))] + target_imgref: Option, #[structopt(long)] /// Add a kernel argument @@ -228,12 +243,11 @@ fn tar_export(opts: &ExportOpts) -> Result<()> { /// Import a container image with an encapsulated ostree commit. async fn container_import( repo: &str, - imgref: &str, + imgref: &OstreeImageReference, write_ref: Option<&str>, quiet: bool, ) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; - let imgref = imgref.try_into()?; let (tx_progress, rx_progress) = tokio::sync::watch::channel(Default::default()); let target = indicatif::ProgressDrawTarget::stdout(); let style = indicatif::ProgressStyle::default_bar(); @@ -293,7 +307,7 @@ async fn container_import( async fn container_export( repo: &str, rev: &str, - imgref: &str, + imgref: &ImageReference, labels: BTreeMap, cmd: Option>, ) -> Result<()> { @@ -302,24 +316,21 @@ async fn container_export( labels: Some(labels), cmd, }; - let imgref = imgref.try_into()?; let pushed = crate::container::export(repo, rev, &config, &imgref).await?; println!("{}", pushed); Ok(()) } /// Load metadata for a container image with an encapsulated ostree commit. -async fn container_info(imgref: &str) -> Result<()> { - let imgref = imgref.try_into()?; +async fn container_info(imgref: &OstreeImageReference) -> Result<()> { let (_, digest) = crate::container::fetch_manifest(&imgref).await?; println!("{} digest: {}", imgref, digest); Ok(()) } /// Write a layered container image into an OSTree commit. -async fn container_store(repo: &str, imgref: &str) -> Result<()> { +async fn container_store(repo: &str, imgref: &OstreeImageReference) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; - let imgref = imgref.try_into()?; let mut imp = LayeredImageImporter::new(repo, &imgref).await?; let prep = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => { @@ -393,7 +404,7 @@ where Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt).await, Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), Opt::Container(o) => match o { - ContainerOpts::Info { imgref } => container_info(imgref.as_str()).await, + ContainerOpts::Info { imgref } => container_info(&imgref).await, ContainerOpts::Unencapsulate { repo, imgref, @@ -439,7 +450,6 @@ where &ostree::Repo::open_at(libc::AT_FDCWD, &src_repo, gio::NONE_CANCELLABLE)?; let dest_repo = &ostree::Repo::open_at(libc::AT_FDCWD, &dest_repo, gio::NONE_CANCELLABLE)?; - let imgref = OstreeImageReference::try_from(imgref.as_str())?; crate::container::store::copy(src_repo, dest_repo, &imgref).await } ContainerImageOpts::Deploy { @@ -451,10 +461,6 @@ where } => { let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); sysroot.load(gio::NONE_CANCELLABLE)?; - let imgref = OstreeImageReference::try_from(imgref.as_str())?; - let target_imgref = target_imgref - .map(|s| OstreeImageReference::try_from(s.as_str())) - .transpose()?; let kargs = karg.as_deref(); let kargs = kargs.map(|v| { let r: Vec<_> = v.iter().map(|s| s.as_str()).collect(); From c85ce036208c4de992e8a6b8abfd02c6848505c2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 29 Oct 2021 09:12:30 -0400 Subject: [PATCH 169/775] lib: Re-export `oci_spec` Since it's part of our public API. --- lib/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 66dc387a1..21b818b45 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -13,6 +13,7 @@ // Re-export our dependencies. See https://gtk-rs.org/blog/2021/06/22/new-release.html // "Dependencies are re-exported". Users will need e.g. `gio::File`, so this avoids // them needing to update matching versions. +pub use oci_spec; pub use ostree; pub use ostree::gio; pub use ostree::gio::glib; From bc4c49b536e415340337eef310037fda327c7f3e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 29 Oct 2021 15:16:23 -0400 Subject: [PATCH 170/775] lib: Rename internal `oci` module to `ociwriter` This makes fully clear this is just for writing; reading is handled via the `containers-image-proxy` and `oci_spec` crates. --- lib/src/container/export.rs | 6 +++--- lib/src/container/mod.rs | 2 +- lib/src/container/{oci.rs => ociwriter.rs} | 0 3 files changed, 4 insertions(+), 4 deletions(-) rename lib/src/container/{oci.rs => ociwriter.rs} (100%) diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index af5cd9f1a..cbf5c7ac6 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -26,9 +26,9 @@ fn export_ostree_ref_to_blobdir( rev: &str, ocidir: &openat::Dir, compression: Option, -) -> Result { +) -> Result { let commit = repo.resolve_rev(rev, false)?.unwrap(); - let mut w = oci::LayerWriter::new(ocidir, compression)?; + let mut w = ociwriter::LayerWriter::new(ocidir, compression)?; ostree_tar::export_commit(repo, commit.as_str(), &mut w)?; w.complete() } @@ -45,7 +45,7 @@ fn build_oci( // Explicitly error if the target exists std::fs::create_dir(ocidir_path).context("Creating OCI dir")?; let ocidir = &openat::Dir::open(ocidir_path)?; - let mut writer = oci::OciWriter::new(ocidir)?; + let mut writer = ociwriter::OciWriter::new(ocidir)?; let commit = repo.resolve_rev(rev, false)?.unwrap(); let commit = commit.as_str(); diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 517b80568..df2cf2ee7 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -233,7 +233,7 @@ mod export; pub use export::*; mod import; pub use import::*; -mod oci; +mod ociwriter; mod skopeo; pub mod store; diff --git a/lib/src/container/oci.rs b/lib/src/container/ociwriter.rs similarity index 100% rename from lib/src/container/oci.rs rename to lib/src/container/ociwriter.rs From 87495953c5374001ea1335467ac25e8a8ab100c1 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 29 Oct 2021 15:32:07 -0400 Subject: [PATCH 171/775] ociwriter: Expose API to push multiple layers Prep for "blob splitting" the base ostree. --- lib/src/container/export.rs | 2 +- lib/src/container/ociwriter.rs | 42 ++++++++++++++++++++++------------ 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index cbf5c7ac6..b7bb41855 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -73,7 +73,7 @@ fn build_oci( } let rootfs_blob = export_ostree_ref_to_blobdir(repo, commit, ocidir, compression)?; - writer.set_root_layer(rootfs_blob); + writer.push_layer(rootfs_blob); writer.complete()?; Ok(ImageReference { diff --git a/lib/src/container/ociwriter.rs b/lib/src/container/ociwriter.rs index 9db1cc248..6674df187 100644 --- a/lib/src/container/ociwriter.rs +++ b/lib/src/container/ociwriter.rs @@ -4,7 +4,7 @@ use anyhow::{anyhow, Result}; use flate2::write::GzEncoder; use fn_error_context::context; -use oci_image::MediaType; +use oci_image::{Descriptor, MediaType}; use oci_spec::image as oci_image; use openat_ext::*; use openssl::hash::{Hasher, MessageDigest}; @@ -76,7 +76,7 @@ pub(crate) struct OciWriter<'a> { cmd: Option>, - root_layer: Option, + layers: Vec, } /// Write a serializable data (JSON) as an OCI blob @@ -101,13 +101,13 @@ impl<'a> OciWriter<'a> { dir, config_annotations: Default::default(), manifest_annotations: Default::default(), - root_layer: None, + layers: Vec::new(), cmd: None, }) } - pub(crate) fn set_root_layer(&mut self, layer: Layer) { - assert!(self.root_layer.replace(layer).is_none()) + pub(crate) fn push_layer(&mut self, layer: Layer) { + self.layers.push(layer) } pub(crate) fn set_cmd(&mut self, e: &[&str]) { @@ -134,10 +134,17 @@ impl<'a> OciWriter<'a> { let arch = MACHINE_TO_OCI.get(machine).unwrap_or(&machine); let arch = oci_image::Arch::from(*arch); - let rootfs_blob = self.root_layer.as_ref().unwrap(); - let root_layer_id = format!("sha256:{}", rootfs_blob.uncompressed_sha256); + if self.layers.is_empty() { + return Err(anyhow!("No layers specified")); + } + + let diffids: Vec = self + .layers + .iter() + .map(|l| format!("sha256:{}", l.uncompressed_sha256)) + .collect(); let rootfs = oci_image::RootFsBuilder::default() - .diff_ids(vec![root_layer_id]) + .diff_ids(diffids) .build() .unwrap(); @@ -167,14 +174,21 @@ impl<'a> OciWriter<'a> { .unwrap(); let config_blob = write_json_blob(self.dir, &config, MediaType::ImageConfig)?; + let layers: Vec = self + .layers + .iter() + .map(|layer| { + layer + .descriptor() + .media_type(MediaType::ImageLayerGzip) + .build() + .unwrap() + }) + .collect(); let manifest_data = oci_image::ImageManifestBuilder::default() .schema_version(oci_image::SCHEMA_VERSION) .config(config_blob.build().unwrap()) - .layers(vec![rootfs_blob - .descriptor() - .media_type(MediaType::ImageLayerGzip) - .build() - .unwrap()]) + .layers(layers) .annotations(self.manifest_annotations) .build() .unwrap(); @@ -331,7 +345,7 @@ mod tests { root_layer.uncompressed_sha256, "349438e5faf763e8875b43de4d7101540ef4d865190336c2cc549a11f33f8d7c" ); - w.set_root_layer(root_layer); + w.push_layer(root_layer); w.complete()?; Ok(()) } From 1acfe410f5a5aac6803d2804a28a4e785a3e1d3b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 29 Oct 2021 16:04:36 -0400 Subject: [PATCH 172/775] ociwriter: Add methods to create layers from writer This is cleaner than calling `LayerWriter::new` with the same directory. Also rename it to `RawLayerWriter`, and add a helper method that wraps it in a `tar::Builder` that will be used in future code. --- lib/src/container/export.rs | 9 +++++---- lib/src/container/ociwriter.rs | 34 ++++++++++++++++++++++++++++++---- 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/lib/src/container/export.rs b/lib/src/container/export.rs index b7bb41855..4b95c7154 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/export.rs @@ -1,5 +1,6 @@ //! APIs for creating container images from OSTree commits +use super::ociwriter::OciWriter; use super::*; use crate::tar as ostree_tar; use anyhow::Context; @@ -21,14 +22,14 @@ pub struct Config { /// Write an ostree commit to an OCI blob #[context("Writing ostree root to blob")] -fn export_ostree_ref_to_blobdir( +fn export_ostree_ref( repo: &ostree::Repo, rev: &str, - ocidir: &openat::Dir, + writer: &mut OciWriter, compression: Option, ) -> Result { let commit = repo.resolve_rev(rev, false)?.unwrap(); - let mut w = ociwriter::LayerWriter::new(ocidir, compression)?; + let mut w = writer.create_raw_layer(compression)?; ostree_tar::export_commit(repo, commit.as_str(), &mut w)?; w.complete() } @@ -72,7 +73,7 @@ fn build_oci( writer.set_cmd(&cmd); } - let rootfs_blob = export_ostree_ref_to_blobdir(repo, commit, ocidir, compression)?; + let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, compression)?; writer.push_layer(rootfs_blob); writer.complete()?; diff --git a/lib/src/container/ociwriter.rs b/lib/src/container/ociwriter.rs index 6674df187..702d1620f 100644 --- a/lib/src/container/ociwriter.rs +++ b/lib/src/container/ociwriter.rs @@ -62,7 +62,7 @@ pub(crate) struct BlobWriter<'a> { } /// Create an OCI layer (also a blob). -pub(crate) struct LayerWriter<'a> { +pub(crate) struct RawLayerWriter<'a> { bw: BlobWriter<'a>, uncompressed_hash: Hasher, compressor: GzEncoder>, @@ -106,6 +106,32 @@ impl<'a> OciWriter<'a> { }) } + /// Create a writer for a new blob (expected to be a tar stream) + pub(crate) fn create_raw_layer( + &self, + c: Option, + ) -> Result { + RawLayerWriter::new(&self.dir, c) + } + + #[allow(dead_code)] + /// Create a tar output stream, backed by a blob + pub(crate) fn create_layer( + &self, + c: Option, + ) -> Result> { + Ok(tar::Builder::new(self.create_raw_layer(c)?)) + } + + #[allow(dead_code)] + /// Finish all I/O for a layer writer, and add it to the layers in the image. + pub(crate) fn finish_and_push_layer(&mut self, w: RawLayerWriter) -> Result<()> { + let w = w.complete()?; + self.push_layer(w); + Ok(()) + } + + /// Add a layer to the top of the image stack. The firsh pushed layer becomes the root. pub(crate) fn push_layer(&mut self, layer: Layer) { self.layers.push(layer) } @@ -254,7 +280,7 @@ impl<'a> std::io::Write for BlobWriter<'a> { } } -impl<'a> LayerWriter<'a> { +impl<'a> RawLayerWriter<'a> { pub(crate) fn new(ocidir: &'a openat::Dir, c: Option) -> Result { let bw = BlobWriter::new(ocidir)?; Ok(Self { @@ -278,7 +304,7 @@ impl<'a> LayerWriter<'a> { } } -impl<'a> std::io::Write for LayerWriter<'a> { +impl<'a> std::io::Write for RawLayerWriter<'a> { fn write(&mut self, srcbuf: &[u8]) -> std::io::Result { self.compressor.get_mut().clear(); self.compressor.write_all(srcbuf).unwrap(); @@ -338,7 +364,7 @@ mod tests { let td = tempfile::tempdir()?; let td = &openat::Dir::open(td.path())?; let mut w = OciWriter::new(td)?; - let mut layerw = LayerWriter::new(td, None)?; + let mut layerw = w.create_raw_layer(None)?; layerw.write_all(b"pretend this is a tarball")?; let root_layer = layerw.complete()?; assert_eq!( From 1ed8e29869065919179ca3ffd73f249f4fe23b55 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 26 Oct 2021 09:30:18 -0400 Subject: [PATCH 173/775] Use tokio SyncIoBridge We can only land this after a new tokio is released with https://github.com/tokio-rs/tokio/pull/4146 --- lib/Cargo.toml | 2 +- lib/src/async_util.rs | 88 ------------------------------------------- lib/src/lib.rs | 1 - lib/src/tar/import.rs | 3 +- lib/src/tar/write.rs | 6 +-- 5 files changed, 5 insertions(+), 95 deletions(-) delete mode 100644 lib/src/async_util.rs diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 5546955b0..a6d289d43 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -38,7 +38,7 @@ structopt = "0.3.21" tar = "0.4.33" tempfile = "3.2.0" tokio = { features = ["full"], version = "1" } -tokio-util = { features = ["io"], version = "0.6" } +tokio-util = { features = ["io-util"], version = "0.6.9" } tracing = "0.1" [dev-dependencies] diff --git a/lib/src/async_util.rs b/lib/src/async_util.rs deleted file mode 100644 index 8aed32c3f..000000000 --- a/lib/src/async_util.rs +++ /dev/null @@ -1,88 +0,0 @@ -use std::io::prelude::*; -use std::pin::Pin; -use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; - -/// A [`std::io::Read`] implementation backed by an asynchronous source. -pub(crate) struct ReadBridge { - reader: Pin>, - rt: tokio::runtime::Handle, -} - -impl Read for ReadBridge { - fn read(&mut self, buf: &mut [u8]) -> std::io::Result { - let reader = &mut self.reader; - self.rt.block_on(async { reader.read(buf).await }) - } -} - -impl ReadBridge { - /// Create a [`std::io::Read`] implementation backed by an asynchronous source. - /// - /// This is useful with e.g. [`tokio::task::spawn_blocking`]. - pub(crate) fn new(reader: T) -> Self { - let reader = Box::pin(reader); - let rt = tokio::runtime::Handle::current(); - ReadBridge { reader, rt } - } -} - -/// A [`std::io::Write`] implementation backed by an asynchronous source. -pub(crate) struct WriteBridge { - w: Pin>, - rt: tokio::runtime::Handle, -} - -impl Write for WriteBridge { - fn write(&mut self, buf: &[u8]) -> std::io::Result { - let w = &mut self.w; - self.rt.block_on(async { w.write(buf).await }) - } - - fn flush(&mut self) -> std::io::Result<()> { - let w = &mut self.w; - self.rt.block_on(async { w.flush().await }) - } -} - -impl WriteBridge { - /// Create a [`std::io::Write`] implementation backed by an asynchronous source. - /// - /// This is useful with e.g. [`tokio::task::spawn_blocking`]. - pub(crate) fn new(reader: T) -> Self { - let w = Box::pin(reader); - let rt = tokio::runtime::Handle::current(); - WriteBridge { w, rt } - } -} - -#[cfg(test)] -mod test { - use std::convert::TryInto; - - use super::*; - use anyhow::Result; - - async fn test_reader_len( - r: impl AsyncRead + Unpin + Send + 'static, - expected_len: usize, - ) -> Result<()> { - let mut r = ReadBridge::new(r); - let res = tokio::task::spawn_blocking(move || { - let mut buf = Vec::new(); - r.read_to_end(&mut buf)?; - Ok::<_, anyhow::Error>(buf) - }) - .await?; - assert_eq!(res?.len(), expected_len); - Ok(()) - } - - #[tokio::test] - async fn test_async_read_to_sync() -> Result<()> { - test_reader_len(tokio::io::empty(), 0).await?; - let bash = tokio::fs::File::open("/usr/bin/sh").await?; - let bash_len = bash.metadata().await?.len(); - test_reader_len(bash, bash_len.try_into().unwrap()).await?; - Ok(()) - } -} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 21b818b45..88793b1b4 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -22,7 +22,6 @@ pub use ostree::gio::glib; /// to a string to output to a terminal or logs. type Result = anyhow::Result; -mod async_util; pub mod cli; pub mod container; pub mod diff; diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 2d2eb8ae9..32b022168 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -1,6 +1,5 @@ //! APIs for extracting OSTree commits from container images -use crate::async_util::ReadBridge; use crate::Result; use anyhow::{anyhow, Context}; use camino::Utf8Path; @@ -603,7 +602,7 @@ pub async fn import_tar( options: Option, ) -> Result { let options = options.unwrap_or_default(); - let src = ReadBridge::new(src); + let src = tokio_util::io::SyncIoBridge::new(src); let repo = repo.clone(); let import = crate::tokio_util::spawn_blocking_cancellable(move |cancellable| { let mut archive = tar::Archive::new(src); diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index f4bb97d2d..578dc710f 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -7,7 +7,6 @@ //! In the future, this may also evolve into parsing the tar //! stream in Rust, not in C. -use crate::async_util::{ReadBridge, WriteBridge}; use crate::cmdext::CommandRedirectionExt; use crate::Result; use anyhow::{anyhow, Context}; @@ -163,9 +162,10 @@ async fn filter_tar_async( mut dest: impl AsyncWrite + Send + Unpin, ) -> Result> { let (tx_buf, mut rx_buf) = tokio::io::duplex(8192); + let src = Box::pin(src); let tar_transformer = tokio::task::spawn_blocking(move || -> Result<_> { - let src = ReadBridge::new(src); - let dest = WriteBridge::new(tx_buf); + let src = tokio_util::io::SyncIoBridge::new(src); + let dest = tokio_util::io::SyncIoBridge::new(tx_buf); filter_tar(src, dest) }); let copier = tokio::io::copy(&mut rx_buf, &mut dest); From f10a82b10f3f200f031fc1843d17305a1dd80140 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 29 Oct 2021 09:27:40 -0400 Subject: [PATCH 174/775] lib/container: Use "encapsulate" terminology for module names too Previously in https://github.com/ostreedev/ostree-rs-ext/pull/113 we changed the CLI entrypoints with this rationale: > Since we're moving towards more "native" support for container > images, we need to very clearly differentiate between the code > that currently uses the terms "import" and "export" which are > somewhat ambiguous. To elaborate, the term "import" is ambiguous with respect to our container "store" path, which generates a new ostree commit and operates on arbitrary container images. Let's take the next step now and rename the modules too. --- lib/src/cli.rs | 8 +++--- .../container/{export.rs => encapsulate.rs} | 4 +-- lib/src/container/mod.rs | 8 +++--- lib/src/container/store.rs | 22 ++++++++++------ .../container/{import.rs => unencapsulate.rs} | 25 +++++++++++-------- lib/tests/it/main.rs | 24 ++++++++++-------- 6 files changed, 51 insertions(+), 40 deletions(-) rename lib/src/container/{export.rs => encapsulate.rs} (97%) rename lib/src/container/{import.rs => unencapsulate.rs} (91%) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index fc49e0d8b..57e7e084b 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -13,7 +13,7 @@ use std::ffi::OsString; use structopt::StructOpt; use crate::container::store::{LayeredImageImporter, PrepareResult}; -use crate::container::{Config, ImageReference, ImportOptions, OstreeImageReference}; +use crate::container::{Config, ImageReference, OstreeImageReference, UnencapsulateOptions}; fn parse_imgref(s: &str) -> Result { OstreeImageReference::try_from(s) @@ -261,10 +261,10 @@ async fn container_import( } else { None }; - let opts = ImportOptions { + let opts = UnencapsulateOptions { progress: Some(tx_progress), }; - let import = crate::container::import(repo, &imgref, Some(opts)); + let import = crate::container::unencapsulate(repo, &imgref, Some(opts)); tokio::pin!(import); tokio::pin!(rx_progress); let import = loop { @@ -316,7 +316,7 @@ async fn container_export( labels: Some(labels), cmd, }; - let pushed = crate::container::export(repo, rev, &config, &imgref).await?; + let pushed = crate::container::encapsulate(repo, rev, &config, &imgref).await?; println!("{}", pushed); Ok(()) } diff --git a/lib/src/container/export.rs b/lib/src/container/encapsulate.rs similarity index 97% rename from lib/src/container/export.rs rename to lib/src/container/encapsulate.rs index 4b95c7154..6c20fba54 100644 --- a/lib/src/container/export.rs +++ b/lib/src/container/encapsulate.rs @@ -144,7 +144,7 @@ async fn build_impl( sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref: dest.to_owned(), }; - let (_, digest) = super::import::fetch_manifest(&imgref).await?; + let (_, digest) = super::unencapsulate::fetch_manifest(&imgref).await?; Ok(digest) } } @@ -152,7 +152,7 @@ async fn build_impl( /// Given an OSTree repository and ref, generate a container image. /// /// The returned `ImageReference` will contain a digested (e.g. `@sha256:`) version of the destination. -pub async fn export>( +pub async fn encapsulate>( repo: &ostree::Repo, ostree_ref: S, config: &Config, diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index df2cf2ee7..e2c9d36c4 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -229,10 +229,10 @@ impl std::fmt::Display for OstreeImageReference { } pub mod deploy; -mod export; -pub use export::*; -mod import; -pub use import::*; +mod encapsulate; +pub use encapsulate::*; +mod unencapsulate; +pub use unencapsulate::*; mod ociwriter; mod skopeo; pub mod store; diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 55e44ef2c..72a70740e 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -1,9 +1,9 @@ //! APIs for storing (layered) container images as OSTree commits //! -//! # Extension of import support +//! # Extension of encapsulation support //! //! This code supports ingesting arbitrary layered container images from an ostree-exported -//! base. See [`super::import`] for more information on encaspulation of images. +//! base. See [`encapsulate`][`super::encapsulate()`] for more information on encaspulation of images. use super::*; use crate::refescape; @@ -231,9 +231,12 @@ impl LayeredImageImporter { c } else { let base_layer_ref = &base_layer.layer; - let (blob, driver) = - super::import::fetch_layer_decompress(&proxy, &self.proxy_img, &base_layer.layer) - .await?; + let (blob, driver) = super::unencapsulate::fetch_layer_decompress( + &proxy, + &self.proxy_img, + &base_layer.layer, + ) + .await?; let importer = crate::tar::import_tar(&self.repo, blob, None); let (commit, driver) = tokio::join!(importer, driver); driver?; @@ -255,9 +258,12 @@ impl LayeredImageImporter { if let Some(c) = layer.commit { layer_commits.push(c.to_string()); } else { - let (blob, driver) = - super::import::fetch_layer_decompress(&proxy, &self.proxy_img, &layer.layer) - .await?; + let (blob, driver) = super::unencapsulate::fetch_layer_decompress( + &proxy, + &self.proxy_img, + &layer.layer, + ) + .await?; // An important aspect of this is that we SELinux label the derived layers using // the base policy. let opts = crate::tar::WriteTarOptions { diff --git a/lib/src/container/import.rs b/lib/src/container/unencapsulate.rs similarity index 91% rename from lib/src/container/import.rs rename to lib/src/container/unencapsulate.rs index 3e798f3cb..50ce979dc 100644 --- a/lib/src/container/import.rs +++ b/lib/src/container/unencapsulate.rs @@ -1,4 +1,7 @@ -//! APIs for extracting OSTree commits from container images +//! APIs for "unencapsulating" OSTree commits from container images +//! +//! This code only operates on container images that were created via +//! [`encapsulate`]. //! //! # External depenendency on container-image-proxy //! @@ -17,7 +20,7 @@ //! Additionally, the proxy "upconverts" manifests into OCI, so we don't need to care //! about parsing the Docker manifest format (as used by most registries still). //! -//! +//! [`encapsulate`]: [`super::encapsulate()`] // # Implementation // @@ -39,12 +42,12 @@ use tracing::{event, instrument, Level}; /// The result of an import operation #[derive(Copy, Clone, Debug, Default)] -pub struct ImportProgress { +pub struct UnencapsulationProgress { /// Number of bytes downloaded (approximate) pub processed_bytes: u64, } -type Progress = tokio::sync::watch::Sender; +type Progress = tokio::sync::watch::Sender; /// A read wrapper that updates the download progress. #[pin_project::pin_project] @@ -121,21 +124,21 @@ fn require_one_layer_blob(manifest: &oci_image::ImageManifest) -> Result<&oci_im /// Configuration for container fetches. #[derive(Debug, Default)] -pub struct ImportOptions { +pub struct UnencapsulateOptions { /// Channel which will receive progress updates - pub progress: Option>, + pub progress: Option>, } /// Fetch a container image and import its embedded OSTree commit. #[context("Importing {}", imgref)] #[instrument(skip(repo, options))] -pub async fn import( +pub async fn unencapsulate( repo: &ostree::Repo, imgref: &OstreeImageReference, - options: Option, + options: Option, ) -> Result { let (manifest, image_digest) = fetch_manifest(imgref).await?; - let ostree_commit = import_from_manifest(repo, imgref, &manifest, options).await?; + let ostree_commit = unencapsulate_from_manifest(repo, imgref, &manifest, options).await?; Ok(Import { ostree_commit, image_digest, @@ -175,11 +178,11 @@ pub(crate) async fn fetch_layer_decompress<'a>( /// Fetch a container image using an in-memory manifest and import its embedded OSTree commit. #[context("Importing {}", imgref)] #[instrument(skip(repo, options, manifest))] -pub async fn import_from_manifest( +pub async fn unencapsulate_from_manifest( repo: &ostree::Repo, imgref: &OstreeImageReference, manifest: &oci_spec::image::ImageManifest, - options: Option, + options: Option, ) -> Result { if matches!(imgref.sigverify, SignatureSource::ContainerPolicy) && skopeo::container_policy_is_default_insecure()? diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index aab230301..ad82e9615 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -326,9 +326,10 @@ async fn test_container_import_export() -> Result<()> { ), cmd: Some(vec!["/bin/bash".to_string()]), }; - let digest = ostree_ext::container::export(&fixture.srcrepo, TESTREF, &config, &srcoci_imgref) - .await - .context("exporting")?; + let digest = + ostree_ext::container::encapsulate(&fixture.srcrepo, TESTREF, &config, &srcoci_imgref) + .await + .context("exporting")?; assert!(srcoci_path.exists()); let inspect = skopeo_inspect(&srcoci_imgref.to_string())?; @@ -349,7 +350,7 @@ async fn test_container_import_export() -> Result<()> { sigverify: SignatureSource::OstreeRemote("unknownremote".to_string()), imgref: srcoci_imgref.clone(), }; - let r = ostree_ext::container::import(&fixture.destrepo, &srcoci_unknownremote, None) + let r = ostree_ext::container::unencapsulate(&fixture.destrepo, &srcoci_unknownremote, None) .await .context("importing"); assert_err_contains(r, r#"Remote "unknownremote" not found"#); @@ -372,7 +373,7 @@ async fn test_container_import_export() -> Result<()> { sigverify: SignatureSource::OstreeRemote("myremote".to_string()), imgref: srcoci_imgref.clone(), }; - let import = ostree_ext::container::import(&fixture.destrepo, &srcoci_verified, None) + let import = ostree_ext::container::unencapsulate(&fixture.destrepo, &srcoci_verified, None) .await .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); @@ -380,7 +381,7 @@ async fn test_container_import_export() -> Result<()> { // Test without signature verification // Create a new repo let fixture = Fixture::new()?; - let import = ostree_ext::container::import(&fixture.destrepo, &srcoci_unverified, None) + let import = ostree_ext::container::unencapsulate(&fixture.destrepo, &srcoci_unverified, None) .await .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); @@ -401,7 +402,7 @@ async fn test_container_import_derive() -> Result<()> { name: exampleos_path.to_string(), }, }; - let r = ostree_ext::container::import(&fixture.destrepo, &exampleos_ref, None).await; + let r = ostree_ext::container::unencapsulate(&fixture.destrepo, &exampleos_ref, None).await; assert_err_contains(r, "Expected 1 layer, found 2"); Ok(()) } @@ -555,9 +556,10 @@ async fn test_container_import_export_registry() -> Result<()> { cmd: Some(vec!["/bin/bash".to_string()]), ..Default::default() }; - let digest = ostree_ext::container::export(&fixture.srcrepo, TESTREF, &config, &src_imgref) - .await - .context("exporting to registry")?; + let digest = + ostree_ext::container::encapsulate(&fixture.srcrepo, TESTREF, &config, &src_imgref) + .await + .context("exporting to registry")?; let mut digested_imgref = src_imgref.clone(); digested_imgref.name = format!("{}@{}", src_imgref.name, digest); @@ -565,7 +567,7 @@ async fn test_container_import_export_registry() -> Result<()> { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref: digested_imgref, }; - let import = ostree_ext::container::import(&fixture.destrepo, &import_ref, None) + let import = ostree_ext::container::unencapsulate(&fixture.destrepo, &import_ref, None) .await .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); From 5841c80ff08a1f429fe431f11e7d9234255510a2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 1 Nov 2021 11:47:21 -0400 Subject: [PATCH 175/775] Clean up `deny/forbid` bits We had duplicate versions of these inside the `mod.rs`; the versions in the top level `lib.rs` are all that are needed. However, because the cli is a separate unit, copy them there too. Not that there's actually anything actually *in* the cli `main.rs`, but it's a good best practice in case the code does grow for some reason, or it gets cargo culted elsewhere. --- cli/src/main.rs | 4 ++++ lib/src/container/mod.rs | 5 ----- lib/src/tar/mod.rs | 5 ----- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/cli/src/main.rs b/cli/src/main.rs index 64910451a..f80554811 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,3 +1,7 @@ +// Good defaults +#![forbid(unused_must_use)] +#![deny(unsafe_code)] + use anyhow::Result; async fn run() -> Result<()> { diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index e2c9d36c4..39f21d344 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -25,11 +25,6 @@ //! A key feature of container images is support for layering. At the moment, support //! for this is [planned but not implemented](https://github.com/ostreedev/ostree-rs-ext/issues/12). -//#![deny(missing_docs)] -// Good defaults -#![forbid(unused_must_use)] -#![deny(unsafe_code)] - use anyhow::anyhow; use std::borrow::Cow; use std::convert::{TryFrom, TryInto}; diff --git a/lib/src/tar/mod.rs b/lib/src/tar/mod.rs index 4eb9d57bd..bd393fbd1 100644 --- a/lib/src/tar/mod.rs +++ b/lib/src/tar/mod.rs @@ -32,11 +32,6 @@ //! to have the container runtime try to unpack and apply those. For this reason, this module //! serializes extended attributes into separate `.xattr` files associated with each ostree object. -//#![deny(missing_docs)] -// Good defaults -#![forbid(unused_must_use)] -#![deny(unsafe_code)] - mod import; pub use import::*; mod export; From 2dab4c3501315f74055511753f19dd7370e5e12b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 1 Nov 2021 15:13:24 -0400 Subject: [PATCH 176/775] README.md: Various updates/reworking - Include an architecture diagram - Split off the "why ostree with containers" detail bits to separate file - Describe a little bit more about layering now that it's supported --- README.md | 176 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 102 insertions(+), 74 deletions(-) diff --git a/README.md b/README.md index 4829df396..6b493bd96 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,55 @@ Extension APIs for [ostree](https://github.com/ostreedev/ostree/) that are written in Rust, using the [Rust ostree bindings](https://crates.io/crates/ostree). +If you are writing tooling that uses ostree and Rust, this crate is intended for you. +However, while the ostree core is very stable, the APIs and data models and this crate +should be considered "slushy". An effort will be made to preserve backwards compatibility +for data written by prior versions (e.g. of tar and container serialization), but +if you choose to use this crate, please [file an issue](https://github.com/ostreedev/ostree-rs-ext/issues) +to let us know. + +At the moment, the following projects are known to use this crate: + +- https://github.com/coreos/rpm-ostree/ + +The intention of this crate is to be where new high level ostree-related features +land. However, at this time it is kept separate from the core C library, which +is in turn separate from the [ostree-rs bindings](https://github.com/ostreedev/ostree-rs). + +High level features (more on this below): + +- ostree and [opencontainers/image](https://github.com/opencontainers/image-spec) bridging/integration +- Generalized tar import/export +- APIs to diff ostree commits + +``` +┌─────────────────┐ +│ │ +│ ostree-rs-ext ├────────────┐ +│ │ │ +└────────┬────────┘ │ + │ │ +┌────────▼────────┐ ┌────────▼─────────┐ +│ │ │ │ +│ ostree-rs │ │ imageproxy-rs │ +│ │ │ │ +└────────┬────────┘ └────────┬─────────┘ + │ │ +┌────────▼────────┐ ┌────────▼─────────┐ +│ │ │ │ +│ ostree │ │ skopeo │ +│ │ │ │ +└─────────────────┘ └────────┬─────────┘ + │ + ┌────────▼─────────┐ + │ │ + │ containers/image │ + │ │ + └──────────────────┘ +``` + +For more information on the container stack, see below. + ## module "tar": tar export/import ostree's support for exporting to a tarball is lossy because it doesn't have e.g. commit @@ -47,26 +96,26 @@ A major distinction is the addition of special `.xattr` files; tar variants and This is used by `rpm-ostree ex apply-live`. -## module "container": Encapsulate ostree commits in OCI/Docker images +## module "container": Bridging between ostree and OCI/Docker images + + +This module contains APIs to bidirectionally map between OSTree commits and the [opencontainers](https://github.com/opencontainers) +ecosystem. -This module contains APIs to bidirectionally map between a single OSTree commit and a container image wrapping it. Because container images are just layers of tarballs, this builds on the [`crate::tar`] module. -To emphasize this, the current high level model is that this is a one-to-one mapping - an ostree commit -can be exported (wrapped) into a container image, which will have exactly one layer. Upon import -back into an ostree repository, all container metadata except for its digested checksum will be discarded. -#### Signatures -OSTree supports GPG and ed25519 signatures natively, and it's expected by default that -when booting from a fetched container image, one verifies ostree-level signatures. -For ostree, a signing configuration is specified via an ostree remote. In order to -pair this configuration together, this library defines a "URL-like" string schema: -`ostree-remote-registry::` -A concrete instantiation might be e.g.: `ostree-remote-registry:fedora:quay.io/coreos/fedora-coreos:stable` -To parse and generate these strings, see [`OstreeImageReference`]. -#### Layering -A key feature of container images is support for layering. At the moment, support -for this is [planned but not implemented](https://github.com/ostreedev/ostree-rs-ext/issues/12). -### Encapsulate an OSTree commit inside a container image +This module builds on [containers-image-proxy-rs](https://github.com/containers/containers-image-proxy-rs) +and [skopeo](https://github.com/containers/skopeo), which in turn is ultimately a frontend +around the [containers/image](https://github.com/containers/image) ecosystem. + +In particular, the `containers/image` library is used to fetch content from remote registries, +which allows building on top of functionality in that library, including signatures, mirroring +and in general a battle tested codebase for interacting with both OCI and Docker registries. + +### Encapsulation + +For existing organizations which use ostree, APIs (and a CLI) are provided to "encapsulate" +and "unencapsulate" an OSTree commit as as an OCI image. ``` $ ostree-ext-cli container encapsulate --repo=/path/to/repo exampleos/x86_64/stable docker://quay.io/exampleos/exampleos:stable @@ -103,69 +152,48 @@ $ rpm-ostree rebase ostree-remote-image:someremote:quay.io/exampleos/exampleos:s (Along with the usual `rpm-ostree upgrade` knowing to pull that container image) -### Future: Running an ostree-container as a webserver - -It also should work to run the ostree-container as a webserver, which will expose a webserver that responds to `GET /repo`. - -The effect will be as if it was built from a `Dockerfile` that contains `EXPOSE 8080`; it will work to e.g. -`kubectl run nginx --image=quay.io/exampleos/exampleos:latest --replicas=1` -and then also create a service for it. - -### Integrating with future container deltas - -See https://blogs.gnome.org/alexl/2020/05/13/putting-container-updates-on-a-diet/ - -# ostree vs OCI/Docker - -Looking at this, one might ask: why even have ostree? Why not just have the operating system directly use something like the [containers/image](https://github.com/containers/image/) storage? - -The first answer to this is that it's a goal of this project to "hide" ostree usage; it should feel "native" to ship and manage the operating system "as if" it was just running a container. - -But, ostree has a *lot* of stuff built up around it and we can't just throw that away. - -## Understanding kernels - -ostree was designed from the start to manage bootable operating system trees - hence the name of the project. For example, ostree understands bootloaders and kernels/initramfs images. Container tools don't. - -## Signing - -ostree also quite early on gained an opinionated mechanism to sign images (commits) via GPG. As of this time there are multiple competing mechanisms for container signing, and it is not widely deployed. -For running random containers from `docker.io`, it can be OK to just trust TLS or pin via `@sha256` - a whole idea of Docker is that containers are isolated and it should be reasonably safe to -at least try out random containers. But for the *operating system* its integrity is paramount because it's ultimately trusted. - -## Deduplication - -ostree's hardlink store is designed around de-duplication. Operating systems can get large and they are most natural as "base images" - which in the Docker container model -are duplicated on disk. Of course storage systems like containers/image could learn to de-duplicate; but it would be a use case that *mostly* applied to just the operating system. - -## Being able to remove all container images - -In Kubernetes, the kubelet will prune the image storage periodically, removing images not backed by containers. If we store the operating system itself as an image...well, we'd need to do something like teach the container storage to have the concept of an image that is "pinned" because it's actually the booted filesystem. Or create a "fake" container representing the running operating system. - -Other projects in this space ended up having an "early docker" distinct from the "main docker" which brings its own large set of challenges. - -## SELinux - -OSTree has *first class* support for SELinux. It was baked into the design from the very start. Handling SELinux is very tricky because it's a part of the operating system that can influence *everything else*. And specifically file labels. - -In this approach we aren't trying to inject xattrs into the tar stream; they're stored out of band for reliability. - -## Independence of complexity of container storage - -This stuff could be done - but the container storage and tooling is already quite complex, and introducing a special case like this would be treading into new ground. - -Today for example, cri-o ships a `crio-wipe.service` which removes all container storage across major version upgrades. +To emphasize this, the current high level model is that this is a one-to-one mapping - an ostree commit +can be exported (wrapped) into a container image, which will have exactly one layer. Upon import +back into an ostree repository, all container metadata except for its digested checksum will be discarded. -ostree is a fairly simple format and has been 100% stable throughout its life so far. +#### Signatures -## ostree format has per-file integrity +OSTree supports GPG and ed25519 signatures natively, and it's expected by default that +when booting from a fetched container image, one verifies ostree-level signatures. +For ostree, a signing configuration is specified via an ostree remote. In order to +pair this configuration together, this library defines a "URL-like" string schema: +`ostree-remote-registry::` +A concrete instantiation might be e.g.: `ostree-remote-registry:fedora:quay.io/coreos/fedora-coreos:stable` +To parse and generate these strings, see [`OstreeImageReference`]. -More on this here: https://ostreedev.github.io/ostree/related-projects/#docker +### Layering -## Allow hiding ostree while not reinventing everything +A key feature of container images is support for layering. This functionality is handled +via a separate [container/store](https://docs.rs/ostree_ext/latest/container/store/) module. -So, again the goal here is: make it feel "native" to ship and manage the operating system "as if" it was just running a container without throwing away everything in ostree today. +These APIs are also exposed via the CLI: +``` +$ ostree-ext-cli container image --help +ostree-ext-cli-container-image 0.4.0-alpha.0 +Commands for working with (possibly layered, non-encapsulated) container images + +USAGE: + ostree-ext-cli container image + +FLAGS: + -h, --help Prints help information + -V, --version Prints version information + +SUBCOMMANDS: + copy Copy a pulled container image from one repo to another + deploy Perform initial deployment for a container image + help Prints this message or the help of the given subcommand(s) + list List container images + pull Pull (or update) a container image +``` +## More details about ostree and containers +See [ostree-and-containers.md](ostree-and-containers.md). From 407e65094cdb213f84d0b055d035333ad90baf0e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 2 Nov 2021 09:31:04 -0400 Subject: [PATCH 177/775] (cargo-release) version 0.4.0 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 5546955b0..6cd5bcebc 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.4.0-alpha.0" +version = "0.4.0" [dependencies] anyhow = "1.0" From 0f8ce4c363d227912a772ae0f9c11bcd58d92258 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 3 Nov 2021 17:23:05 -0400 Subject: [PATCH 178/775] containers: Expose a state struct with base commit and layering state This is actually quite analogous to what rpm-ostree does internally; a lot of the internals there distinguish "base" versus "layered" commits, and this is very similar. Here, when we import a non-layered image, we can mostly ignore the merge commit since all it has is the image manifest. The base image layer ref *is* the encapsulated ostree commit with all the metadata injected by (rpm-)ostree in the non-layered case. Add an API which exposes this as a struct, and also return it from the importer's `AlreadyPresent` case. --- lib/src/cli.rs | 2 +- lib/src/container/deploy.rs | 2 +- lib/src/container/store.rs | 98 ++++++++++++++++++++++++++++--------- lib/tests/it/main.rs | 6 +-- 4 files changed, 81 insertions(+), 27 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 57e7e084b..6dd02432c 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -334,7 +334,7 @@ async fn container_store(repo: &str, imgref: &OstreeImageReference) -> Result<() let mut imp = LayeredImageImporter::new(repo, &imgref).await?; let prep = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => { - println!("No changes in {} => {}", imgref, c); + println!("No changes in {} => {}", imgref, c.merge_commit); return Ok(()); } PrepareResult::Ready(r) => r, diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index d6de68cc0..76b8b0189 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -11,7 +11,7 @@ pub const ORIGIN_CONTAINER: &str = "container-image-reference"; async fn pull_idempotent(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result { let mut imp = super::store::LayeredImageImporter::new(repo, imgref).await?; match imp.prepare().await? { - PrepareResult::AlreadyPresent(r) => Ok(r), + PrepareResult::AlreadyPresent(r) => Ok(r.merge_commit), PrepareResult::Ready(prep) => Ok(imp.import(prep).await?.commit), } } diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 72a70740e..a648e076a 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -10,7 +10,7 @@ use crate::refescape; use anyhow::{anyhow, Context}; use containers_image_proxy::{ImageProxy, OpenedImage}; use fn_error_context::context; -use oci_spec::image as oci_image; +use oci_spec::image::{self as oci_image, ImageManifest}; use ostree::prelude::{Cast, ToVariant}; use ostree::{gio, glib}; use std::collections::{BTreeMap, HashMap}; @@ -40,6 +40,19 @@ fn ref_for_image(l: &ImageReference) -> Result { refescape::prefix_escape_for_ref(IMAGE_PREFIX, &l.to_string()) } +/// State of an already pulled layered image. +#[derive(Debug, PartialEq, Eq)] +pub struct LayeredImageState { + /// The base ostree commit + pub base_commit: String, + /// The merge commit unions all layers + pub merge_commit: String, + /// Whether or not the image has multiple layers. + pub is_layered: bool, + /// The digest of the original manifest + pub manifest_digest: String, +} + /// Context for importing a container image. pub struct LayeredImageImporter { repo: ostree::Repo, @@ -52,7 +65,7 @@ pub struct LayeredImageImporter { /// Result of invoking [`LayeredImageImporter::prepare`]. pub enum PrepareResult { /// The image reference is already present; the contained string is the OSTree commit. - AlreadyPresent(String), + AlreadyPresent(LayeredImageState), /// The image needs to be downloaded Ready(Box), } @@ -179,26 +192,27 @@ impl LayeredImageImporter { let new_imageid = manifest.config().digest().as_str(); // Query for previous stored state - let (previous_manifest_digest, previous_imageid) = if let Some(merge_commit) = - self.repo.resolve_rev(&self.ostree_ref, true)? - { - let merge_commit_obj = &self.repo.load_commit(merge_commit.as_str())?.0; - let commit_meta = &merge_commit_obj.child_value(0); - let commit_meta = &ostree::glib::VariantDict::new(Some(commit_meta)); - let (previous_manifest, previous_digest) = manifest_data_from_commitmeta(commit_meta)?; - // If the manifest digests match, we're done. - if previous_digest == manifest_digest { - return Ok(PrepareResult::AlreadyPresent(merge_commit.to_string())); - } - // Failing that, if they have the same imageID, we're also done. - let previous_imageid = previous_manifest.config().digest().as_str(); - if previous_imageid == new_imageid { - return Ok(PrepareResult::AlreadyPresent(merge_commit.to_string())); - } - (Some(previous_digest), Some(previous_imageid.to_string())) - } else { - (None, None) - }; + + let (previous_manifest_digest, previous_imageid) = + if let Some((previous_manifest, previous_state)) = + query_image_impl(&self.repo, &self.imgref)? + { + // If the manifest digests match, we're done. + if previous_state.manifest_digest == manifest_digest { + return Ok(PrepareResult::AlreadyPresent(previous_state)); + } + // Failing that, if they have the same imageID, we're also done. + let previous_imageid = previous_manifest.config().digest().as_str(); + if previous_imageid == new_imageid { + return Ok(PrepareResult::AlreadyPresent(previous_state)); + } + ( + Some(previous_state.manifest_digest), + Some(previous_imageid.to_string()), + ) + } else { + (None, None) + }; let mut layers = manifest.layers().iter().cloned(); // We require a base layer. @@ -355,6 +369,46 @@ pub fn list_images(repo: &ostree::Repo) -> Result> { .collect() } +fn query_image_impl( + repo: &ostree::Repo, + imgref: &OstreeImageReference, +) -> Result> { + let ostree_ref = &ref_for_image(&imgref.imgref)?; + let merge_rev = repo.resolve_rev(&ostree_ref, true)?; + let (merge_commit, merge_commit_obj) = if let Some(r) = merge_rev { + (r.to_string(), repo.load_commit(r.as_str())?.0) + } else { + return Ok(None); + }; + let commit_meta = &merge_commit_obj.child_value(0); + let commit_meta = &ostree::glib::VariantDict::new(Some(commit_meta)); + let (manifest, manifest_digest) = manifest_data_from_commitmeta(commit_meta)?; + let mut layers = manifest.layers().iter().cloned(); + // We require a base layer. + let base_layer = layers.next().ok_or_else(|| anyhow!("No layers found"))?; + let base_layer = query_layer(repo, base_layer)?; + let base_commit = base_layer + .commit + .ok_or_else(|| anyhow!("Missing base image ref"))?; + // If there are more layers after the base, then we're layered. + let is_layered = layers.count() > 0; + let state = LayeredImageState { + base_commit, + merge_commit, + is_layered, + manifest_digest, + }; + Ok(Some((manifest, state))) +} + +/// Query metadata for a pulled image. +pub fn query_image( + repo: &ostree::Repo, + imgref: &OstreeImageReference, +) -> Result> { + Ok(query_image_impl(repo, imgref)?.map(|v| v.1)) +} + /// Copy a downloaded image from one repository to another. pub async fn copy( src_repo: &ostree::Repo, diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index ad82e9615..72368c837 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -466,7 +466,7 @@ async fn test_container_write_derive() -> Result<()> { panic!("Should have already imported {}", import.ostree_ref) } }; - assert_eq!(import.commit, already_present); + assert_eq!(import.commit, already_present.merge_commit); // Test upgrades; replace the oci-archive with new content. std::fs::write(exampleos_path, EXAMPLEOS_DERIVED_V2_OCI)?; @@ -486,7 +486,7 @@ async fn test_container_write_derive() -> Result<()> { } let import = imp.import(prep).await?; // New commit. - assert_ne!(import.commit, already_present); + assert_ne!(import.commit, already_present.merge_commit); // We should still have exactly one image stored. let images = ostree_ext::container::store::list_images(&fixture.destrepo)?; assert_eq!(images.len(), 1); @@ -513,7 +513,7 @@ async fn test_container_write_derive() -> Result<()> { panic!("Should have already imported {}", import.ostree_ref) } }; - assert_eq!(import.commit, already_present); + assert_eq!(import.commit, already_present.merge_commit); // Create a new repo, and copy to it let destrepo2 = ostree::Repo::create_at( From bfaab1d6721ecd9ab7425947279cc78758924df3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 3 Nov 2021 17:53:23 -0400 Subject: [PATCH 179/775] container: Change import result case to contain state struct Notably, this also stops exposing the ostree ref for the merge commit, which I think is a good idea in general since it should be thought of more as an implementation detail. In other words, this module speaks container image references and ostree commits. --- lib/src/cli.rs | 5 +---- lib/src/container/deploy.rs | 9 +++++---- lib/src/container/store.rs | 19 ++++++++++--------- lib/tests/it/main.rs | 19 +++++++++++-------- 4 files changed, 27 insertions(+), 25 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 6dd02432c..31b84d5d1 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -366,10 +366,7 @@ async fn container_store(repo: &str, imgref: &OstreeImageReference) -> Result<() } } } - println!( - "Wrote: {} => {} => {}", - imgref, import.ostree_ref, import.commit - ); + println!("Wrote: {} => {}", imgref, import.state.merge_commit); Ok(()) } diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 76b8b0189..684cfb0b7 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -10,10 +10,11 @@ pub const ORIGIN_CONTAINER: &str = "container-image-reference"; async fn pull_idempotent(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result { let mut imp = super::store::LayeredImageImporter::new(repo, imgref).await?; - match imp.prepare().await? { - PrepareResult::AlreadyPresent(r) => Ok(r.merge_commit), - PrepareResult::Ready(prep) => Ok(imp.import(prep).await?.commit), - } + let state = match imp.prepare().await? { + PrepareResult::AlreadyPresent(r) => r, + PrepareResult::Ready(prep) => imp.import(prep).await?.state, + }; + Ok(state.merge_commit) } /// Options configuring deployment. diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index a648e076a..f36e8e1fc 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -112,10 +112,8 @@ pub struct PreparedImport { /// A successful import of a container image. #[derive(Debug, PartialEq, Eq)] pub struct CompletedImport { - /// The ostree ref used for the container image. - pub ostree_ref: String, - /// The current commit. - pub commit: String, + /// The completed layered image state + pub state: LayeredImageState, /// A mapping from layer blob IDs to a count of content filtered out /// by toplevel path. pub layer_filtered_content: BTreeMap>, @@ -312,8 +310,9 @@ impl LayeredImageImporter { // Destructure to transfer ownership to thread let repo = self.repo; let target_ref = self.ostree_ref; - let (ostree_ref, commit) = crate::tokio_util::spawn_blocking_cancellable( - move |cancellable| -> Result<(String, String)> { + let imgref = self.imgref; + let state = crate::tokio_util::spawn_blocking_cancellable( + move |cancellable| -> Result { let cancellable = Some(cancellable); let repo = &repo; let txn = repo.auto_transaction(cancellable)?; @@ -344,13 +343,15 @@ impl LayeredImageImporter { )?; repo.transaction_set_ref(None, &target_ref, Some(merged_commit.as_str())); txn.commit(cancellable)?; - Ok((target_ref, merged_commit.to_string())) + // Here we re-query state just to run through the same code path, + // though it'd be cheaper to synthesize it from the data we already have. + let state = query_image(&repo, &imgref)?.unwrap(); + Ok(state) }, ) .await??; Ok(CompletedImport { - ostree_ref, - commit, + state, layer_filtered_content, }) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 72368c837..fb20b4818 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -444,7 +444,10 @@ async fn test_container_write_derive() -> Result<()> { assert_eq!(images.len(), 1); assert_eq!(images[0], exampleos_ref.imgref.to_string()); - let imported_commit = &fixture.destrepo.load_commit(import.commit.as_str())?.0; + let imported_commit = &fixture + .destrepo + .load_commit(import.state.merge_commit.as_str())? + .0; let digest = ostree_ext::container::store::manifest_digest_from_commit(imported_commit)?; assert!(digest.starts_with("sha256:")); assert_eq!(digest, expected_digest); @@ -453,7 +456,7 @@ async fn test_container_write_derive() -> Result<()> { bash!( "ostree --repo={repo} ls {r} /usr/share/anewfile", repo = fixture.destrepo_path.as_str(), - r = import.ostree_ref.as_str() + r = import.state.merge_commit.as_str() )?; // Import again, but there should be no changes. @@ -463,10 +466,10 @@ async fn test_container_write_derive() -> Result<()> { let already_present = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => c, PrepareResult::Ready(_) => { - panic!("Should have already imported {}", import.ostree_ref) + panic!("Should have already imported {}", &exampleos_ref) } }; - assert_eq!(import.commit, already_present.merge_commit); + assert_eq!(import.state.merge_commit, already_present.merge_commit); // Test upgrades; replace the oci-archive with new content. std::fs::write(exampleos_path, EXAMPLEOS_DERIVED_V2_OCI)?; @@ -486,7 +489,7 @@ async fn test_container_write_derive() -> Result<()> { } let import = imp.import(prep).await?; // New commit. - assert_ne!(import.commit, already_present.merge_commit); + assert_ne!(import.state.merge_commit, already_present.merge_commit); // We should still have exactly one image stored. let images = ostree_ext::container::store::list_images(&fixture.destrepo)?; assert_eq!(images.len(), 1); @@ -500,7 +503,7 @@ async fn test_container_write_derive() -> Result<()> { fi ", repo = fixture.destrepo_path.as_str(), - r = import.ostree_ref.as_str() + r = import.state.merge_commit.as_str() )?; // And there should be no changes on upgrade again. @@ -510,10 +513,10 @@ async fn test_container_write_derive() -> Result<()> { let already_present = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => c, PrepareResult::Ready(_) => { - panic!("Should have already imported {}", import.ostree_ref) + panic!("Should have already imported {}", &exampleos_ref) } }; - assert_eq!(import.commit, already_present.merge_commit); + assert_eq!(import.state.merge_commit, already_present.merge_commit); // Create a new repo, and copy to it let destrepo2 = ostree::Repo::create_at( From c9bdb94d19235eda1161801acf05590a66823dbb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 3 Nov 2021 20:43:06 -0400 Subject: [PATCH 180/775] container/deploy: Use base commit if we're not layered If we're not doing a layered image, then use the base commit for the deployment. Closes: https://github.com/ostreedev/ostree-rs-ext/issues/143 --- lib/src/container/deploy.rs | 16 ++++++---------- lib/src/container/store.rs | 16 ++++++++++++++++ 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 684cfb0b7..3b7e058d2 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -8,15 +8,6 @@ use ostree::glib; /// The key in the OSTree origin which holds a serialized [`super::OstreeImageReference`]. pub const ORIGIN_CONTAINER: &str = "container-image-reference"; -async fn pull_idempotent(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result { - let mut imp = super::store::LayeredImageImporter::new(repo, imgref).await?; - let state = match imp.prepare().await? { - PrepareResult::AlreadyPresent(r) => r, - PrepareResult::Ready(prep) => imp.import(prep).await?.state, - }; - Ok(state.merge_commit) -} - /// Options configuring deployment. #[derive(Debug, Default)] pub struct DeployOpts<'a> { @@ -45,7 +36,12 @@ pub async fn deploy<'opts>( let cancellable = ostree::gio::NONE_CANCELLABLE; let options = options.unwrap_or_default(); let repo = &sysroot.repo().unwrap(); - let commit = &pull_idempotent(repo, imgref).await?; + let mut imp = super::store::LayeredImageImporter::new(repo, imgref).await?; + let state = match imp.prepare().await? { + PrepareResult::AlreadyPresent(r) => r, + PrepareResult::Ready(prep) => imp.import(prep).await?.state, + }; + let commit = state.get_commit(); let origin = glib::KeyFile::new(); let target_imgref = options.target_imgref.unwrap_or(imgref); origin.set_string("origin", ORIGIN_CONTAINER, &target_imgref.to_string()); diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index f36e8e1fc..ba8631a97 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -53,6 +53,22 @@ pub struct LayeredImageState { pub manifest_digest: String, } +impl LayeredImageState { + /// Return the default ostree commit digest for this image. + /// + /// If this is a non-layered image, the merge commit will be + /// ignored, and the base commit returned. + /// + /// Otherwise, this returns the merge commit. + pub fn get_commit(&self) -> &str { + if self.is_layered { + self.merge_commit.as_str() + } else { + self.base_commit.as_str() + } + } +} + /// Context for importing a container image. pub struct LayeredImageImporter { repo: ostree::Repo, From 35d3528c5f4c65082c4398b0744e764904690b21 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 3 Nov 2021 20:59:03 -0400 Subject: [PATCH 181/775] container/deploy: Also write ref with target if provided With this new emphasis on "dual commit objects" for the container deployment, the higher level code queries via container image references and we don't expose the ostree ref (since there is no longer a single one). This makes it critical to write the internal ref matching the target container image, because it now needs to match the origin. --- lib/src/container/deploy.rs | 3 +++ lib/src/container/store.rs | 17 +++++++++++------ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 3b7e058d2..8d5d17e95 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -37,6 +37,9 @@ pub async fn deploy<'opts>( let options = options.unwrap_or_default(); let repo = &sysroot.repo().unwrap(); let mut imp = super::store::LayeredImageImporter::new(repo, imgref).await?; + if let Some(target) = options.target_imgref { + imp.set_target(target); + } let state = match imp.prepare().await? { PrepareResult::AlreadyPresent(r) => r, PrepareResult::Ready(prep) => imp.import(prep).await?.state, diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index ba8631a97..fea17384d 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -74,8 +74,8 @@ pub struct LayeredImageImporter { repo: ostree::Repo, proxy: ImageProxy, imgref: OstreeImageReference, + target_imgref: Option, proxy_img: OpenedImage, - ostree_ref: String, } /// Result of invoking [`LayeredImageImporter::prepare`]. @@ -176,16 +176,20 @@ impl LayeredImageImporter { let proxy = ImageProxy::new().await?; let proxy_img = proxy.open_image(&imgref.imgref.to_string()).await?; let repo = repo.clone(); - let ostree_ref = ref_for_image(&imgref.imgref)?; Ok(LayeredImageImporter { repo, proxy, proxy_img, - ostree_ref, + target_imgref: None, imgref: imgref.clone(), }) } + /// Write cached data as if the image came from this source. + pub fn set_target(&mut self, target: &OstreeImageReference) { + self.target_imgref = Some(target.clone()) + } + /// Determine if there is a new manifest, and if so return its digest. #[context("Fetching manifest")] pub async fn prepare(&mut self) -> Result { @@ -252,6 +256,8 @@ impl LayeredImageImporter { /// Import a layered container image pub async fn import(self, import: Box) -> Result { let proxy = self.proxy; + let target_imgref = self.target_imgref.as_ref().unwrap_or(&self.imgref); + let ostree_ref = ref_for_image(&target_imgref.imgref)?; // First download the base image (if necessary) - we need the SELinux policy // there to label all following layers. let base_layer = import.base_layer; @@ -325,8 +331,7 @@ impl LayeredImageImporter { // Destructure to transfer ownership to thread let repo = self.repo; - let target_ref = self.ostree_ref; - let imgref = self.imgref; + let imgref = self.target_imgref.unwrap_or(self.imgref); let state = crate::tokio_util::spawn_blocking_cancellable( move |cancellable| -> Result { let cancellable = Some(cancellable); @@ -357,7 +362,7 @@ impl LayeredImageImporter { &merged_root, cancellable, )?; - repo.transaction_set_ref(None, &target_ref, Some(merged_commit.as_str())); + repo.transaction_set_ref(None, &ostree_ref, Some(merged_commit.as_str())); txn.commit(cancellable)?; // Here we re-query state just to run through the same code path, // though it'd be cheaper to synthesize it from the data we already have. From 5753e86883b9e91e28f87c272a655bc2c1397b3b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 4 Nov 2021 17:43:33 -0400 Subject: [PATCH 182/775] Add some more tracing in container path Seeing a hang in upgrades in rpm-ostree, this may help debug. --- lib/src/container/store.rs | 3 +++ lib/src/container/unencapsulate.rs | 1 + 2 files changed, 4 insertions(+) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index fea17384d..dc28d08a7 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -290,6 +290,7 @@ impl LayeredImageImporter { let mut layer_filtered_content = BTreeMap::new(); for layer in import.layers { if let Some(c) = layer.commit { + tracing::debug!("Reusing fetched commit {}", c); layer_commits.push(c.to_string()); } else { let (blob, driver) = super::unencapsulate::fetch_layer_decompress( @@ -318,6 +319,7 @@ impl LayeredImageImporter { // We're done with the proxy, make sure it didn't have any errors. proxy.finalize().await?; + tracing::debug!("finalized proxy"); let serialized_manifest = serde_json::to_string(&import.manifest)?; let mut metadata = HashMap::new(); @@ -420,6 +422,7 @@ fn query_image_impl( is_layered, manifest_digest, }; + tracing::debug!(state = ?state); Ok(Some((manifest, state))) } diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index 50ce979dc..e72d296fd 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -160,6 +160,7 @@ fn new_async_decompressor<'a>( } /// A wrapper for [`get_blob`] which fetches a layer and decompresses it. +#[instrument(skip(proxy, img))] pub(crate) async fn fetch_layer_decompress<'a>( proxy: &'a ImageProxy, img: &OpenedImage, From 99757591408c99693d01418f5b04e6141bfb4828 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 5 Nov 2021 11:43:45 -0400 Subject: [PATCH 183/775] container: Bump to containers-image-proxy 0.2 This requires threading `mut` around more. Motivated by improved logging/debugging. --- lib/Cargo.toml | 2 +- lib/src/container/store.rs | 8 ++++---- lib/src/container/unencapsulate.rs | 13 +++++++------ 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 72f8d235b..a059910d1 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -10,7 +10,7 @@ version = "0.4.0" [dependencies] anyhow = "1.0" -containers-image-proxy = "0.1" +containers-image-proxy = "0.2" async-compression = { version = "0.3", features = ["gzip", "tokio"] } bytes = "1.0.1" bitflags = "1" diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index dc28d08a7..f39cdda2c 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -173,7 +173,7 @@ pub fn manifest_digest_from_commit(commit: &glib::Variant) -> Result { impl LayeredImageImporter { /// Create a new importer. pub async fn new(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result { - let proxy = ImageProxy::new().await?; + let mut proxy = ImageProxy::new().await?; let proxy_img = proxy.open_image(&imgref.imgref.to_string()).await?; let repo = repo.clone(); Ok(LayeredImageImporter { @@ -255,7 +255,7 @@ impl LayeredImageImporter { /// Import a layered container image pub async fn import(self, import: Box) -> Result { - let proxy = self.proxy; + let mut proxy = self.proxy; let target_imgref = self.target_imgref.as_ref().unwrap_or(&self.imgref); let ostree_ref = ref_for_image(&target_imgref.imgref)?; // First download the base image (if necessary) - we need the SELinux policy @@ -266,7 +266,7 @@ impl LayeredImageImporter { } else { let base_layer_ref = &base_layer.layer; let (blob, driver) = super::unencapsulate::fetch_layer_decompress( - &proxy, + &mut proxy, &self.proxy_img, &base_layer.layer, ) @@ -294,7 +294,7 @@ impl LayeredImageImporter { layer_commits.push(c.to_string()); } else { let (blob, driver) = super::unencapsulate::fetch_layer_decompress( - &proxy, + &mut proxy, &self.proxy_img, &layer.layer, ) diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index e72d296fd..2094efbc5 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -92,7 +92,7 @@ impl AsyncRead for ProgressReader { pub async fn fetch_manifest( imgref: &OstreeImageReference, ) -> Result<(oci_spec::image::ImageManifest, String)> { - let proxy = ImageProxy::new().await?; + let mut proxy = ImageProxy::new().await?; let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; let (digest, raw_manifest) = proxy.fetch_manifest(oi).await?; proxy.close_image(oi).await?; @@ -160,15 +160,16 @@ fn new_async_decompressor<'a>( } /// A wrapper for [`get_blob`] which fetches a layer and decompresses it. -#[instrument(skip(proxy, img))] +#[instrument(skip(proxy, img, layer))] pub(crate) async fn fetch_layer_decompress<'a>( - proxy: &'a ImageProxy, + proxy: &'a mut ImageProxy, img: &OpenedImage, layer: &oci_image::Descriptor, ) -> Result<( Box, impl Future> + 'a, )> { + tracing::debug!("fetching {}", layer.digest()); let (blob, driver) = proxy .get_blob(img, layer.digest().as_str(), layer.size() as u64) .await?; @@ -198,9 +199,9 @@ pub async fn unencapsulate_from_manifest( layer.digest().as_str(), layer.size() ); - let proxy = ImageProxy::new().await?; - let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; - let (blob, driver) = fetch_layer_decompress(&proxy, oi, layer).await?; + let mut proxy = ImageProxy::new().await?; + let oi = proxy.open_image(&imgref.imgref.to_string()).await?; + let (blob, driver) = fetch_layer_decompress(&mut proxy, &oi, layer).await?; let blob = ProgressReader { reader: blob, progress: options.progress, From 016978e75a0ecc380c68f419963f544113b4e061 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 5 Nov 2021 14:01:05 -0400 Subject: [PATCH 184/775] container/store: Write filter data to commitmeta, clean up API The import path was previously providing the data about filtered-out content (e.g. files in `/var`) "out of band" as part of the import return value. But this means that unless that data is e.g. logged it ends up being lost. Since the amount of data is bounded, let's instead add it as metadata to the merge commit. Then this lets us make a nice cleanup of entirely dropping the `CompletedImport` struct in favor of the `LayeredImageState` that we use in various places. Now it's possible for something like `rpm-ostree status` to also reliably show how many files were filtered out of each layer. --- lib/src/cli.rs | 14 ++++++++++---- lib/src/container/deploy.rs | 2 +- lib/src/container/store.rs | 31 +++++++++++++------------------ lib/tests/it/main.rs | 12 ++++++------ 4 files changed, 30 insertions(+), 29 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 31b84d5d1..e41dba2a3 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -6,12 +6,13 @@ //! such as `rpm-ostree` can directly reuse it. use anyhow::Result; -use ostree::gio; +use ostree::{gio, glib}; use std::collections::BTreeMap; use std::convert::TryFrom; use std::ffi::OsString; use structopt::StructOpt; +use crate::container as ostree_container; use crate::container::store::{LayeredImageImporter, PrepareResult}; use crate::container::{Config, ImageReference, OstreeImageReference, UnencapsulateOptions}; @@ -358,15 +359,20 @@ async fn container_store(repo: &str, imgref: &OstreeImageReference) -> Result<() } } let import = imp.import(prep).await?; - if !import.layer_filtered_content.is_empty() { - for (layerid, filtered) in import.layer_filtered_content { + let commit = &repo.load_commit(&import.merge_commit)?.0; + let commit_meta = &glib::VariantDict::new(Some(&commit.child_value(0))); + let filtered = commit_meta.lookup::( + ostree_container::store::META_FILTERED, + )?; + if let Some(filtered) = filtered { + for (layerid, filtered) in filtered { eprintln!("Unsupported paths filtered from {}:", layerid); for (prefix, count) in filtered { eprintln!(" {}: {}", prefix, count); } } } - println!("Wrote: {} => {}", imgref, import.state.merge_commit); + println!("Wrote: {} => {}", imgref, import.merge_commit); Ok(()) } diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 8d5d17e95..d240e6129 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -42,7 +42,7 @@ pub async fn deploy<'opts>( } let state = match imp.prepare().await? { PrepareResult::AlreadyPresent(r) => r, - PrepareResult::Ready(prep) => imp.import(prep).await?.state, + PrepareResult::Ready(prep) => imp.import(prep).await?, }; let commit = state.get_commit(); let origin = glib::KeyFile::new(); diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index f39cdda2c..41d946c5c 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -13,7 +13,8 @@ use fn_error_context::context; use oci_spec::image::{self as oci_image, ImageManifest}; use ostree::prelude::{Cast, ToVariant}; use ostree::{gio, glib}; -use std::collections::{BTreeMap, HashMap}; +use std::collections::HashMap; +use std::iter::FromIterator; /// The ostree ref prefix for blobs. const LAYER_PREFIX: &str = "ostree/container/blob"; @@ -24,6 +25,10 @@ const IMAGE_PREFIX: &str = "ostree/container/image"; const META_MANIFEST_DIGEST: &str = "ostree.manifest-digest"; /// The key injected into the merge commit with the manifest serialized as JSON. const META_MANIFEST: &str = "ostree.manifest"; +/// Value of type `a{sa{su}}` containing number of filtered out files +pub const META_FILTERED: &str = "ostree.tar-filtered"; +/// The type used to store content filtering information with `META_FILTERED`. +pub type MetaFilteredData = HashMap>; /// Convert e.g. sha256:12345... into `/ostree/container/blob/sha256_2B12345...`. fn ref_for_blob_digest(d: &str) -> Result { @@ -125,16 +130,6 @@ pub struct PreparedImport { pub layers: Vec, } -/// A successful import of a container image. -#[derive(Debug, PartialEq, Eq)] -pub struct CompletedImport { - /// The completed layered image state - pub state: LayeredImageState, - /// A mapping from layer blob IDs to a count of content filtered out - /// by toplevel path. - pub layer_filtered_content: BTreeMap>, -} - // Given a manifest, compute its ostree ref name and cached ostree commit fn query_layer(repo: &ostree::Repo, layer: oci_image::Descriptor) -> Result { let ostree_ref = ref_for_layer(&layer)?; @@ -254,7 +249,7 @@ impl LayeredImageImporter { } /// Import a layered container image - pub async fn import(self, import: Box) -> Result { + pub async fn import(self, import: Box) -> Result { let mut proxy = self.proxy; let target_imgref = self.target_imgref.as_ref().unwrap_or(&self.imgref); let ostree_ref = ref_for_image(&target_imgref.imgref)?; @@ -287,7 +282,7 @@ impl LayeredImageImporter { }; let mut layer_commits = Vec::new(); - let mut layer_filtered_content = BTreeMap::new(); + let mut layer_filtered_content: MetaFilteredData = HashMap::new(); for layer in import.layers { if let Some(c) = layer.commit { tracing::debug!("Reusing fetched commit {}", c); @@ -312,7 +307,8 @@ impl LayeredImageImporter { driver?; layer_commits.push(r.commit); if !r.filtered.is_empty() { - layer_filtered_content.insert(layer.digest().to_string(), r.filtered); + let filtered = HashMap::from_iter(r.filtered.into_iter()); + layer_filtered_content.insert(layer.digest().to_string(), filtered); } } } @@ -329,6 +325,8 @@ impl LayeredImageImporter { "ostree.importer.version", env!("CARGO_PKG_VERSION").to_variant(), ); + let filtered = layer_filtered_content.to_variant(); + metadata.insert(META_FILTERED, filtered); let metadata = metadata.to_variant(); // Destructure to transfer ownership to thread @@ -373,10 +371,7 @@ impl LayeredImageImporter { }, ) .await??; - Ok(CompletedImport { - state, - layer_filtered_content, - }) + Ok(state) } } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index fb20b4818..3dba6d8d6 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -446,7 +446,7 @@ async fn test_container_write_derive() -> Result<()> { let imported_commit = &fixture .destrepo - .load_commit(import.state.merge_commit.as_str())? + .load_commit(import.merge_commit.as_str())? .0; let digest = ostree_ext::container::store::manifest_digest_from_commit(imported_commit)?; assert!(digest.starts_with("sha256:")); @@ -456,7 +456,7 @@ async fn test_container_write_derive() -> Result<()> { bash!( "ostree --repo={repo} ls {r} /usr/share/anewfile", repo = fixture.destrepo_path.as_str(), - r = import.state.merge_commit.as_str() + r = import.merge_commit.as_str() )?; // Import again, but there should be no changes. @@ -469,7 +469,7 @@ async fn test_container_write_derive() -> Result<()> { panic!("Should have already imported {}", &exampleos_ref) } }; - assert_eq!(import.state.merge_commit, already_present.merge_commit); + assert_eq!(import.merge_commit, already_present.merge_commit); // Test upgrades; replace the oci-archive with new content. std::fs::write(exampleos_path, EXAMPLEOS_DERIVED_V2_OCI)?; @@ -489,7 +489,7 @@ async fn test_container_write_derive() -> Result<()> { } let import = imp.import(prep).await?; // New commit. - assert_ne!(import.state.merge_commit, already_present.merge_commit); + assert_ne!(import.merge_commit, already_present.merge_commit); // We should still have exactly one image stored. let images = ostree_ext::container::store::list_images(&fixture.destrepo)?; assert_eq!(images.len(), 1); @@ -503,7 +503,7 @@ async fn test_container_write_derive() -> Result<()> { fi ", repo = fixture.destrepo_path.as_str(), - r = import.state.merge_commit.as_str() + r = import.merge_commit.as_str() )?; // And there should be no changes on upgrade again. @@ -516,7 +516,7 @@ async fn test_container_write_derive() -> Result<()> { panic!("Should have already imported {}", &exampleos_ref) } }; - assert_eq!(import.state.merge_commit, already_present.merge_commit); + assert_eq!(import.merge_commit, already_present.merge_commit); // Create a new repo, and copy to it let destrepo2 = ostree::Repo::create_at( From 312d6ee3e006f95193c933ece9c2776a2d80a476 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 5 Nov 2021 17:05:37 -0400 Subject: [PATCH 185/775] Fix `clippy::needless_borrow` --- lib/src/cli.rs | 8 ++++---- lib/src/container/ociwriter.rs | 2 +- lib/src/container/store.rs | 4 ++-- lib/src/container/unencapsulate.rs | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 31b84d5d1..545e87fca 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -264,7 +264,7 @@ async fn container_import( let opts = UnencapsulateOptions { progress: Some(tx_progress), }; - let import = crate::container::unencapsulate(repo, &imgref, Some(opts)); + let import = crate::container::unencapsulate(repo, imgref, Some(opts)); tokio::pin!(import); tokio::pin!(rx_progress); let import = loop { @@ -316,14 +316,14 @@ async fn container_export( labels: Some(labels), cmd, }; - let pushed = crate::container::encapsulate(repo, rev, &config, &imgref).await?; + let pushed = crate::container::encapsulate(repo, rev, &config, imgref).await?; println!("{}", pushed); Ok(()) } /// Load metadata for a container image with an encapsulated ostree commit. async fn container_info(imgref: &OstreeImageReference) -> Result<()> { - let (_, digest) = crate::container::fetch_manifest(&imgref).await?; + let (_, digest) = crate::container::fetch_manifest(imgref).await?; println!("{} digest: {}", imgref, digest); Ok(()) } @@ -331,7 +331,7 @@ async fn container_info(imgref: &OstreeImageReference) -> Result<()> { /// Write a layered container image into an OSTree commit. async fn container_store(repo: &str, imgref: &OstreeImageReference) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; - let mut imp = LayeredImageImporter::new(repo, &imgref).await?; + let mut imp = LayeredImageImporter::new(repo, imgref).await?; let prep = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => { println!("No changes in {} => {}", imgref, c.merge_commit); diff --git a/lib/src/container/ociwriter.rs b/lib/src/container/ociwriter.rs index 702d1620f..38e56761d 100644 --- a/lib/src/container/ociwriter.rs +++ b/lib/src/container/ociwriter.rs @@ -111,7 +111,7 @@ impl<'a> OciWriter<'a> { &self, c: Option, ) -> Result { - RawLayerWriter::new(&self.dir, c) + RawLayerWriter::new(self.dir, c) } #[allow(dead_code)] diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index f39cdda2c..82ed3aa88 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -368,7 +368,7 @@ impl LayeredImageImporter { txn.commit(cancellable)?; // Here we re-query state just to run through the same code path, // though it'd be cheaper to synthesize it from the data we already have. - let state = query_image(&repo, &imgref)?.unwrap(); + let state = query_image(repo, &imgref)?.unwrap(); Ok(state) }, ) @@ -398,7 +398,7 @@ fn query_image_impl( imgref: &OstreeImageReference, ) -> Result> { let ostree_ref = &ref_for_image(&imgref.imgref)?; - let merge_rev = repo.resolve_rev(&ostree_ref, true)?; + let merge_rev = repo.resolve_rev(ostree_ref, true)?; let (merge_commit, merge_commit_obj) = if let Some(r) = merge_rev { (r.to_string(), repo.load_commit(r.as_str())?.0) } else { diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index 2094efbc5..07f4c5b95 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -192,7 +192,7 @@ pub async fn unencapsulate_from_manifest( return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); } let options = options.unwrap_or_default(); - let layer = require_one_layer_blob(&manifest)?; + let layer = require_one_layer_blob(manifest)?; event!( Level::DEBUG, "target blob digest:{} size: {}", From 89eb4031c723da81131f1feda9fc14f61c0b6cfc Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 5 Nov 2021 17:07:48 -0400 Subject: [PATCH 186/775] Fix two minor clippy lints --- lib/src/container/deploy.rs | 4 ++-- lib/src/refescape.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 8d5d17e95..a14a014c8 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -27,11 +27,11 @@ pub struct DeployOpts<'a> { /// Write a container image to an OSTree deployment. /// /// This API is currently intended for only an initial deployment. -pub async fn deploy<'opts>( +pub async fn deploy( sysroot: &ostree::Sysroot, stateroot: &str, imgref: &OstreeImageReference, - options: Option>, + options: Option>, ) -> Result<()> { let cancellable = ostree::gio::NONE_CANCELLABLE; let options = options.unwrap_or_default(); diff --git a/lib/src/refescape.rs b/lib/src/refescape.rs index 649882064..a472a98ac 100644 --- a/lib/src/refescape.rs +++ b/lib/src/refescape.rs @@ -97,7 +97,7 @@ fn unescape_for_ref(s: &str) -> Result { } else if let Some(c) = next { buf.clear(); buf.push(c); - while let Some(c) = it.next() { + for c in &mut it { if c == '_' { break; } From 698827ce347b4ab7ce7184b5b17c6f6cc013d728 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 8 Nov 2021 18:28:43 -0500 Subject: [PATCH 187/775] ostree-and-containers.md: Forgot to `git add` in a previous PR I split this off in https://github.com/ostreedev/ostree-rs-ext/pull/141 But forgot to `git add` apparently. --- ostree-and-containers.md | 65 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 ostree-and-containers.md diff --git a/ostree-and-containers.md b/ostree-and-containers.md new file mode 100644 index 000000000..8e4b9c6e4 --- /dev/null +++ b/ostree-and-containers.md @@ -0,0 +1,65 @@ +# ostree vs OCI/Docker + +Be sure to see the main [README.md](README.md) which describes the current architecture intersecting ostree and OCI. + +Looking at this project, one might ask: why even have ostree? Why not just have the operating system directly use something like the [containers/image](https://github.com/containers/image/) storage? + +The first answer to this is that it's a goal of this project to "hide" ostree usage; it should feel "native" to ship and manage the operating system "as if" it was just running a container. + +But, ostree has a *lot* of stuff built up around it and we can't just throw that away. + +## Understanding kernels + +ostree was designed from the start to manage bootable operating system trees - hence the name of the project. For example, ostree understands bootloaders and kernels/initramfs images. Container tools don't. + +## Signing + +ostree also quite early on gained an opinionated mechanism to sign images (commits) via GPG. As of this time there are multiple competing mechanisms for container signing, and it is not widely deployed. +For running random containers from `docker.io`, it can be OK to just trust TLS or pin via `@sha256` - a whole idea of Docker is that containers are isolated and it should be reasonably safe to +at least try out random containers. But for the *operating system* its integrity is paramount because it's ultimately trusted. + +## Deduplication + +ostree's hardlink store is designed around de-duplication. Operating systems can get large and they are most natural as "base images" - which in the Docker container model +are duplicated on disk. Of course storage systems like containers/image could learn to de-duplicate; but it would be a use case that *mostly* applied to just the operating system. + +## Being able to remove all container images + +In Kubernetes, the kubelet will prune the image storage periodically, removing images not backed by containers. If we store the operating system itself as an image...well, we'd need to do something like teach the container storage to have the concept of an image that is "pinned" because it's actually the booted filesystem. Or create a "fake" container representing the running operating system. + +Other projects in this space ended up having an "early docker" distinct from the "main docker" which brings its own large set of challenges. + +## SELinux + +OSTree has *first class* support for SELinux. It was baked into the design from the very start. Handling SELinux is very tricky because it's a part of the operating system that can influence *everything else*. And specifically file labels. + +In this approach we aren't trying to inject xattrs into the tar stream; they're stored out of band for reliability. + +## Independence of complexity of container storage + +This stuff could be done - but the container storage and tooling is already quite complex, and introducing a special case like this would be treading into new ground. + +Today for example, cri-o ships a `crio-wipe.service` which removes all container storage across major version upgrades. + +ostree is a fairly simple format and has been 100% stable throughout its life so far. + +## ostree format has per-file integrity + +More on this here: https://ostreedev.github.io/ostree/related-projects/#docker + +## Allow hiding ostree while not reinventing everything + +So, again the goal here is: make it feel "native" to ship and manage the operating system "as if" it was just running a container without throwing away everything in ostree today. + + +### Future: Running an ostree-container as a webserver + +It also should work to run the ostree-container as a webserver, which will expose a webserver that responds to `GET /repo`. + +The effect will be as if it was built from a `Dockerfile` that contains `EXPOSE 8080`; it will work to e.g. +`kubectl run nginx --image=quay.io/exampleos/exampleos:latest --replicas=1` +and then also create a service for it. + +### Integrating with future container deltas + +See https://blogs.gnome.org/alexl/2020/05/13/putting-container-updates-on-a-diet/ From a21bbcd6f3a818dca361217e70d8c4910c434535 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 8 Nov 2021 20:47:36 -0500 Subject: [PATCH 188/775] container: Add options struct for encapsulation This exposes the ability to disable compression, which is mostly only useful for the internal flow that pushes to containers/storage where we don't want compression. But the main goal here is to pave the way for adding a `bool chunking`. --- lib/src/cli.rs | 3 ++- lib/src/container/encapsulate.rs | 33 ++++++++++++++++++++++---------- lib/tests/it/main.rs | 15 ++++++++++----- 3 files changed, 35 insertions(+), 16 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index f9879b092..644c6ae17 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -317,7 +317,8 @@ async fn container_export( labels: Some(labels), cmd, }; - let pushed = crate::container::encapsulate(repo, rev, &config, imgref).await?; + let opts = Some(Default::default()); + let pushed = crate::container::encapsulate(repo, rev, &config, opts, &imgref).await?; println!("{}", pushed); Ok(()) } diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 6c20fba54..f9fa719e4 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -41,7 +41,7 @@ fn build_oci( rev: &str, ocidir_path: &Path, config: &Config, - compression: Option, + opts: ExportOpts, ) -> Result { // Explicitly error if the target exists std::fs::create_dir(ocidir_path).context("Creating OCI dir")?; @@ -72,8 +72,13 @@ fn build_oci( let cmd: Vec<_> = cmd.iter().map(|s| s.as_str()).collect(); writer.set_cmd(&cmd); } + let compression = if opts.compress { + flate2::Compression::default() + } else { + flate2::Compression::none() + }; - let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, compression)?; + let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, Some(compression))?; writer.push_layer(rootfs_blob); writer.complete()?; @@ -89,20 +94,20 @@ async fn build_impl( repo: &ostree::Repo, ostree_ref: &str, config: &Config, + opts: Option, dest: &ImageReference, ) -> Result { - let compression = if dest.transport == Transport::ContainerStorage { - Some(flate2::Compression::none()) - } else { - None - }; + let mut opts = opts.unwrap_or_default(); + if dest.transport == Transport::ContainerStorage { + opts.compress = false; + } let digest = if dest.transport == Transport::OciDir { let _copied: ImageReference = build_oci( repo, ostree_ref, Path::new(dest.name.as_str()), config, - compression, + opts, )?; None } else { @@ -115,7 +120,7 @@ async fn build_impl( None }; - let src = build_oci(repo, ostree_ref, Path::new(tempdest), config, compression)?; + let src = build_oci(repo, ostree_ref, Path::new(tempdest), config, opts)?; let mut cmd = skopeo::new_cmd(); tracing::event!(Level::DEBUG, "Copying {} to {}", src, dest); @@ -149,6 +154,13 @@ async fn build_impl( } } +/// Options controlling commit export into OCI +#[derive(Debug, Default)] +pub struct ExportOpts { + /// If true, perform gzip compression of the tar layers. + pub compress: bool, +} + /// Given an OSTree repository and ref, generate a container image. /// /// The returned `ImageReference` will contain a digested (e.g. `@sha256:`) version of the destination. @@ -156,7 +168,8 @@ pub async fn encapsulate>( repo: &ostree::Repo, ostree_ref: S, config: &Config, + opts: Option, dest: &ImageReference, ) -> Result { - build_impl(repo, ostree_ref.as_ref(), config, dest).await + build_impl(repo, ostree_ref.as_ref(), config, opts, dest).await } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 3dba6d8d6..fac3d623b 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -326,10 +326,15 @@ async fn test_container_import_export() -> Result<()> { ), cmd: Some(vec!["/bin/bash".to_string()]), }; - let digest = - ostree_ext::container::encapsulate(&fixture.srcrepo, TESTREF, &config, &srcoci_imgref) - .await - .context("exporting")?; + let digest = ostree_ext::container::encapsulate( + &fixture.srcrepo, + TESTREF, + &config, + None, + &srcoci_imgref, + ) + .await + .context("exporting")?; assert!(srcoci_path.exists()); let inspect = skopeo_inspect(&srcoci_imgref.to_string())?; @@ -560,7 +565,7 @@ async fn test_container_import_export_registry() -> Result<()> { ..Default::default() }; let digest = - ostree_ext::container::encapsulate(&fixture.srcrepo, TESTREF, &config, &src_imgref) + ostree_ext::container::encapsulate(&fixture.srcrepo, TESTREF, &config, None, &src_imgref) .await .context("exporting to registry")?; let mut digested_imgref = src_imgref.clone(); From 49b51fc6e391cfbdc6c20769db3229285f845188 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 9 Nov 2021 14:45:58 -0500 Subject: [PATCH 189/775] `Cargo.toml`: Add build tweaks from rpm-ostree For the same reasons. --- Cargo.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index d3940e2cb..d49afd92f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,9 @@ [workspace] members = ["cli", "lib"] +# These bits are copied from rpm-ostree. +[profile.dev] +opt-level = 1 # No optimizations are too slow for us. + [profile.release] -codegen-units = 1 lto = "thin" From b924e1eb10e97f82aaf610b6d9be61c373dd96fb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 Nov 2021 15:31:39 -0500 Subject: [PATCH 190/775] Bump to containers-image-proxy 0.3 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index a059910d1..1de876712 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -10,7 +10,7 @@ version = "0.4.0" [dependencies] anyhow = "1.0" -containers-image-proxy = "0.2" +containers-image-proxy = "0.3" async-compression = { version = "0.3", features = ["gzip", "tokio"] } bytes = "1.0.1" bitflags = "1" From 52ac3549c5cd343e24a6e083d1cfc2b7a7bf4702 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 Nov 2021 13:47:11 -0500 Subject: [PATCH 191/775] lib/container: Drop unnecessary `mut` for proxy The proxy has gone back to a shared borrow with internal locking. --- lib/src/container/store.rs | 2 +- lib/src/container/unencapsulate.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 7bcfb237b..247b8b4e1 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -168,7 +168,7 @@ pub fn manifest_digest_from_commit(commit: &glib::Variant) -> Result { impl LayeredImageImporter { /// Create a new importer. pub async fn new(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result { - let mut proxy = ImageProxy::new().await?; + let proxy = ImageProxy::new().await?; let proxy_img = proxy.open_image(&imgref.imgref.to_string()).await?; let repo = repo.clone(); Ok(LayeredImageImporter { diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index 07f4c5b95..979ab1899 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -92,7 +92,7 @@ impl AsyncRead for ProgressReader { pub async fn fetch_manifest( imgref: &OstreeImageReference, ) -> Result<(oci_spec::image::ImageManifest, String)> { - let mut proxy = ImageProxy::new().await?; + let proxy = ImageProxy::new().await?; let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; let (digest, raw_manifest) = proxy.fetch_manifest(oi).await?; proxy.close_image(oi).await?; From 54f2d3c333406990e220c2e00de4ae14d191929f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 Nov 2021 14:06:38 -0500 Subject: [PATCH 192/775] lib: Expose proxy configuration as public API We will need to support basically all options that the proxy does, such as disabling TLS, separate authfile etc. Rather than write a new type, just re-export it. Thread the new options struct through the store and deploy APIs. --- lib/src/cli.rs | 3 ++- lib/src/container/deploy.rs | 10 +++++++++- lib/src/container/store.rs | 14 ++++++++++++-- lib/tests/it/main.rs | 36 ++++++++++++++++++++++++------------ 4 files changed, 47 insertions(+), 16 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 644c6ae17..29a9963ae 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -333,7 +333,7 @@ async fn container_info(imgref: &OstreeImageReference) -> Result<()> { /// Write a layered container image into an OSTree commit. async fn container_store(repo: &str, imgref: &OstreeImageReference) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; - let mut imp = LayeredImageImporter::new(repo, imgref).await?; + let mut imp = LayeredImageImporter::new(repo, imgref, Default::default()).await?; let prep = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => { println!("No changes in {} => {}", imgref, c.merge_commit); @@ -473,6 +473,7 @@ where let options = crate::container::deploy::DeployOpts { kargs: kargs.as_deref(), target_imgref: target_imgref.as_ref(), + ..Default::default() }; crate::container::deploy::deploy(sysroot, &stateroot, &imgref, Some(options)) .await diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index d7f695272..020657e52 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -22,6 +22,9 @@ pub struct DeployOpts<'a> { /// /// To implement this, use this option for the latter `:latest` tag. pub target_imgref: Option<&'a OstreeImageReference>, + + /// Configuration for fetching containers. + pub proxy_cfg: Option, } /// Write a container image to an OSTree deployment. @@ -36,7 +39,12 @@ pub async fn deploy( let cancellable = ostree::gio::NONE_CANCELLABLE; let options = options.unwrap_or_default(); let repo = &sysroot.repo().unwrap(); - let mut imp = super::store::LayeredImageImporter::new(repo, imgref).await?; + let mut imp = super::store::LayeredImageImporter::new( + repo, + imgref, + options.proxy_cfg.unwrap_or_default(), + ) + .await?; if let Some(target) = options.target_imgref { imp.set_target(target); } diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 247b8b4e1..c973f270c 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -16,6 +16,12 @@ use ostree::{gio, glib}; use std::collections::HashMap; use std::iter::FromIterator; +/// Configuration for the proxy. +/// +/// We re-export this rather than inventing our own wrapper +/// in the interest of avoiding duplication. +pub use containers_image_proxy::ImageProxyConfig; + /// The ostree ref prefix for blobs. const LAYER_PREFIX: &str = "ostree/container/blob"; /// The ostree ref prefix for image references. @@ -167,8 +173,12 @@ pub fn manifest_digest_from_commit(commit: &glib::Variant) -> Result { impl LayeredImageImporter { /// Create a new importer. - pub async fn new(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result { - let proxy = ImageProxy::new().await?; + pub async fn new( + repo: &ostree::Repo, + imgref: &OstreeImageReference, + config: ImageProxyConfig, + ) -> Result { + let proxy = ImageProxy::new_with_config(config).await?; let proxy_img = proxy.open_image(&imgref.imgref.to_string()).await?; let repo = repo.clone(); Ok(LayeredImageImporter { diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index fac3d623b..bc6015a09 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -431,9 +431,12 @@ async fn test_container_write_derive() -> Result<()> { assert!(images.is_empty()); // Pull a derived image - two layers, new base plus one layer. - let mut imp = - ostree_ext::container::store::LayeredImageImporter::new(&fixture.destrepo, &exampleos_ref) - .await?; + let mut imp = ostree_ext::container::store::LayeredImageImporter::new( + &fixture.destrepo, + &exampleos_ref, + Default::default(), + ) + .await?; let prep = match imp.prepare().await? { PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), PrepareResult::Ready(r) => r, @@ -465,9 +468,12 @@ async fn test_container_write_derive() -> Result<()> { )?; // Import again, but there should be no changes. - let mut imp = - ostree_ext::container::store::LayeredImageImporter::new(&fixture.destrepo, &exampleos_ref) - .await?; + let mut imp = ostree_ext::container::store::LayeredImageImporter::new( + &fixture.destrepo, + &exampleos_ref, + Default::default(), + ) + .await?; let already_present = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => c, PrepareResult::Ready(_) => { @@ -478,9 +484,12 @@ async fn test_container_write_derive() -> Result<()> { // Test upgrades; replace the oci-archive with new content. std::fs::write(exampleos_path, EXAMPLEOS_DERIVED_V2_OCI)?; - let mut imp = - ostree_ext::container::store::LayeredImageImporter::new(&fixture.destrepo, &exampleos_ref) - .await?; + let mut imp = ostree_ext::container::store::LayeredImageImporter::new( + &fixture.destrepo, + &exampleos_ref, + Default::default(), + ) + .await?; let prep = match imp.prepare().await? { PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), PrepareResult::Ready(r) => r, @@ -512,9 +521,12 @@ async fn test_container_write_derive() -> Result<()> { )?; // And there should be no changes on upgrade again. - let mut imp = - ostree_ext::container::store::LayeredImageImporter::new(&fixture.destrepo, &exampleos_ref) - .await?; + let mut imp = ostree_ext::container::store::LayeredImageImporter::new( + &fixture.destrepo, + &exampleos_ref, + Default::default(), + ) + .await?; let already_present = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => c, PrepareResult::Ready(_) => { From 3873ef5aab1c9bbf6265eecd781b97e3e13ead80 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 Nov 2021 15:06:09 -0500 Subject: [PATCH 193/775] cli: Expose current container proxy opts as CLI flags We want this for the same reason they exist in skopeo around non-default authfiles, etc. The chain here is pretty amazing: - containers/image internal Go API - skopeo CLI flags - containers/containers-image-proxy Rust API - ostreedev/ostree-rs-ext Rust API - ostreedev/ostree-rs-ext CLI code - rpm-ostree CLI --- lib/src/cli.rs | 44 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 29a9963ae..b53fed12b 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -125,6 +125,18 @@ enum ContainerOpts { Image(ContainerImageOpts), } +/// Options for container image fetching. +#[derive(Debug, StructOpt)] +struct ContainerProxyOpts { + #[structopt(long)] + /// Path to Docker-formatted authentication file. + authfile: Option, + + #[structopt(long)] + /// Skip TLS verification. + insecure_skip_tls_verification: bool, +} + /// Options for import/export to tar archives. #[derive(Debug, StructOpt)] enum ContainerImageOpts { @@ -144,6 +156,9 @@ enum ContainerImageOpts { /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest #[structopt(parse(try_from_str = parse_imgref))] imgref: OstreeImageReference, + + #[structopt(flatten)] + proxyopts: ContainerProxyOpts, }, /// Copy a pulled container image from one repo to another. @@ -176,6 +191,9 @@ enum ContainerImageOpts { #[structopt(parse(try_from_str = parse_imgref))] imgref: OstreeImageReference, + #[structopt(flatten)] + proxyopts: ContainerProxyOpts, + /// Target image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest /// /// If specified, `--imgref` will be used as a source, but this reference will be emitted into the origin @@ -220,6 +238,15 @@ enum Opt { ImaSign(ImaSignOpts), } +impl Into for ContainerProxyOpts { + fn into(self) -> ostree_container::store::ImageProxyConfig { + ostree_container::store::ImageProxyConfig { + authfile: self.authfile, + insecure_skip_tls_verification: Some(self.insecure_skip_tls_verification), + } + } +} + /// Import a tar archive containing an ostree commit. async fn tar_import(opts: &ImportOpts) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, opts.repo.as_str(), gio::NONE_CANCELLABLE)?; @@ -331,9 +358,13 @@ async fn container_info(imgref: &OstreeImageReference) -> Result<()> { } /// Write a layered container image into an OSTree commit. -async fn container_store(repo: &str, imgref: &OstreeImageReference) -> Result<()> { +async fn container_store( + repo: &str, + imgref: &OstreeImageReference, + proxyopts: ContainerProxyOpts, +) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; - let mut imp = LayeredImageImporter::new(repo, imgref, Default::default()).await?; + let mut imp = LayeredImageImporter::new(repo, imgref, proxyopts.into()).await?; let prep = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => { println!("No changes in {} => {}", imgref, c.merge_commit); @@ -444,7 +475,11 @@ where } Ok(()) } - ContainerImageOpts::Pull { repo, imgref } => container_store(&repo, &imgref).await, + ContainerImageOpts::Pull { + repo, + imgref, + proxyopts, + } => container_store(&repo, &imgref, proxyopts).await, ContainerImageOpts::Copy { src_repo, dest_repo, @@ -462,6 +497,7 @@ where imgref, target_imgref, karg, + proxyopts, } => { let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); sysroot.load(gio::NONE_CANCELLABLE)?; @@ -473,7 +509,7 @@ where let options = crate::container::deploy::DeployOpts { kargs: kargs.as_deref(), target_imgref: target_imgref.as_ref(), - ..Default::default() + proxy_cfg: Some(proxyopts.into()), }; crate::container::deploy::deploy(sysroot, &stateroot, &imgref, Some(options)) .await From 63328a2423b66f0262497e62eeaada378059e8d7 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 Nov 2021 16:06:38 -0500 Subject: [PATCH 194/775] Bump to 0.5.0 For various API changes. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 1de876712..bc928136a 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.4.0" +version = "0.5.0" [dependencies] anyhow = "1.0" From 9679fdcb51adc44b4d24bad98081511b6fb40262 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 Nov 2021 16:16:19 -0500 Subject: [PATCH 195/775] ci: Sync with containers/containers-image-proxy-rs Specifically this picks up the rust-cache bits which dramatically speed up CI builds. --- .github/workflows/rust.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 05e8ac884..6dff6d1e2 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -1,3 +1,6 @@ +# Inspired by https://github.com/rust-analyzer/rust-analyzer/blob/master/.github/workflows/ci.yaml +# but tweaked in several ways. If you make changes here, consider doing so across other +# repositories in e.g. ostreedev etc. name: Rust permissions: @@ -24,8 +27,11 @@ jobs: run: ./ci/installdeps.sh - name: Format run: cargo fmt -- --check -l + # xref containers/containers-image-proxy-rs + - name: Cache Dependencies + uses: Swatinem/rust-cache@ce325b60658c1b38465c06cc965b79baf32c1e72 - name: Build - run: cargo build --verbose + run: cargo test --no-run - name: Run tests - run: cargo test --verbose + run: cargo test -- --nocapture --quiet From d737cf3e35a629bee3ca7ddf6fc9683cd905ddaa Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 Nov 2021 16:16:46 -0500 Subject: [PATCH 196/775] ci: Re-indent yaml For cleanliness. --- .github/workflows/rust.yml | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 6dff6d1e2..5ad39eac0 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -8,30 +8,28 @@ permissions: on: push: - branches: [ main ] + branches: [main] pull_request: - branches: [ main ] + branches: [main] env: CARGO_TERM_COLOR: always jobs: build: - runs-on: ubuntu-latest container: quay.io/coreos-assembler/fcos-buildroot:testing-devel steps: - - uses: actions/checkout@v2 - - name: Install deps - run: ./ci/installdeps.sh - - name: Format - run: cargo fmt -- --check -l - # xref containers/containers-image-proxy-rs - - name: Cache Dependencies - uses: Swatinem/rust-cache@ce325b60658c1b38465c06cc965b79baf32c1e72 - - name: Build - run: cargo test --no-run - - name: Run tests - run: cargo test -- --nocapture --quiet - + - uses: actions/checkout@v2 + - name: Install deps + run: ./ci/installdeps.sh + - name: Format + run: cargo fmt -- --check -l + # xref containers/containers-image-proxy-rs + - name: Cache Dependencies + uses: Swatinem/rust-cache@ce325b60658c1b38465c06cc965b79baf32c1e72 + - name: Build + run: cargo test --no-run + - name: Run tests + run: cargo test -- --nocapture --quiet From af1c000868c9a59f44d9dac600cd9684adfe2c92 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 Nov 2021 09:06:17 -0500 Subject: [PATCH 197/775] cli: Use `..Default::default()` for proxy options This way it's not a breaking change to add a new member to the struct. --- lib/src/cli.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index b53fed12b..84d359b2f 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -243,6 +243,7 @@ impl Into for ContainerProxyOpts { ostree_container::store::ImageProxyConfig { authfile: self.authfile, insecure_skip_tls_verification: Some(self.insecure_skip_tls_verification), + ..Default::default() } } } From 423dcaff4b950686850675a1a1caa70cdfad5740 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 18 Nov 2021 14:11:28 -0500 Subject: [PATCH 198/775] tar/export: Add more error prefixing I'm hitting an error on symlink length exporting Fedora Silverblue, this helped me debug via: ``` $ /var/srv/walters/src/github/ostreedev/ostree-rs-ext/target/debug/ostree-ext-cli container encapsulate --repo=/ostree/repo f8e7ed0ee87a8ae48eb779c085f4fffddb824c5830e43715e6c92799a678d806 oci:/var/tmp/fedora-silverblue error: Building oci: Writing ostree root to blob: Exporting commit: Writing content symlink: 87c8834da3bad596352375ff413bca064584f184bdd6ba1764293d137249e168: provided value is too long when setting link name for $ ostree ls -C -R f8e7ed0ee87a8ae48eb779c085f4fffddb824c5830e43715e6c92799a678d806 | grep 87c8834da3bad596352375ff413bca064584f184bdd6ba1764293d137249e168 l00777 0 0 0 87c8834da3bad596352375ff413bca064584f184bdd6ba1764293d137249e168 /usr/lib/.build-id/05/159ed904e45ff5100f7acd3d3b99fa7e27e34f -> ../../../../usr/lib64/qt5/plugins/wayland-graphics-integration-server/libqt-wayland-compositor-xcomposite-egl.so $ ``` Previously we just said "Error exporting, provided value is too long". --- lib/src/tar/export.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 06b33e931..2cfadda93 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -1,6 +1,7 @@ //! APIs for creating container images from OSTree commits use crate::objgv::*; +use anyhow::Context; use anyhow::Result; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; @@ -157,10 +158,12 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let data = data.as_ref(); h.set_size(data.len() as u64); self.out - .append_data(&mut h, &object_path(objtype, checksum), data)?; + .append_data(&mut h, &object_path(objtype, checksum), data) + .with_context(|| format!("Writing object {}", checksum))?; Ok(()) } + #[context("Writing xattrs")] fn append_xattrs( &mut self, xattrs: &glib::Variant, @@ -224,12 +227,18 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { h.set_entry_type(tar::EntryType::Regular); h.set_size(meta.size() as u64); let mut instream = BufReader::with_capacity(BUF_CAPACITY, instream.into_read()); - self.out.append_data(&mut h, &path, &mut instream)?; + self.out + .append_data(&mut h, &path, &mut instream) + .with_context(|| format!("Writing regfile {}", checksum))?; } else { h.set_size(0); h.set_entry_type(tar::EntryType::Symlink); - h.set_link_name(meta.symlink_target().unwrap().as_str())?; - self.out.append_data(&mut h, &path, &mut std::io::empty())?; + let context = || format!("Writing content symlink: {}", checksum); + h.set_link_name(meta.symlink_target().unwrap().as_str()) + .with_context(context)?; + self.out + .append_data(&mut h, &path, &mut std::io::empty()) + .with_context(context)?; } } From 6c5e53eb5cd1a3b350ffa015479015bef363f813 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 18 Nov 2021 16:53:01 -0500 Subject: [PATCH 199/775] tar/import: Use the correct API to read link name The `entry::header().link_name()` path doesn't support long links, and this is a documented foot-gun. Use `entry::link_name()` instead. Needed for e.g. `/ostree/repo/objects/87/c8834da3bad596352375ff413bca064584f184bdd6ba1764293d137249e168.file -> ../../../../usr/lib64/qt5/plugins/wayland-graphics-integration-server/libqt-wayland-compositor-xcomposite-egl.so`. --- lib/src/tar/import.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 32b022168..4612cb8cd 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -284,7 +284,6 @@ impl Importer { ) -> Result<()> { let (uid, gid, _) = header_attrs(entry.header())?; let target = entry - .header() .link_name()? .ok_or_else(|| anyhow!("Invalid symlink"))?; let target = target From 24b68d3ab71a216a297089dee3927c9ef63d1dab Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 19 Nov 2021 10:36:56 -0500 Subject: [PATCH 200/775] tar/import: Write commitpartial state This mirrors the logic in `ostree_repo_pull()` - before we write the commit object, we want to mark it as partial so the core knows objects are still being written. And after the transaction, undo that marker. Motivated by supporting "chunked" containers (split tar) where this partial state will be more obvious. --- lib/src/tar/import.rs | 6 ++++++ lib/tests/it/main.rs | 3 ++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 4612cb8cd..8fd2d418b 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -523,6 +523,8 @@ impl Importer { ostree::RepoVerifyFlags::empty(), )?; + self.repo.mark_commit_partial(&checksum, true)?; + // Write the commit object, which also verifies its checksum. let actual_checksum = self.repo @@ -534,6 +536,8 @@ impl Importer { self.repo .write_commit_detached_metadata(&checksum, Some(&commitmeta), cancellable)?; } else { + self.repo.mark_commit_partial(&checksum, true)?; + // We're not doing any validation of the commit, so go ahead and write it. let actual_checksum = self.repo @@ -572,6 +576,8 @@ impl Importer { } txn.commit(cancellable)?; + self.repo.mark_commit_partial(&checksum, false)?; + Ok(checksum) } } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index bc6015a09..bcd59b181 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -226,13 +226,14 @@ async fn test_tar_import_signed() -> Result<()> { }), ) .await?; - let (commitdata, _) = fixture.destrepo.load_commit(&imported)?; + let (commitdata, state) = fixture.destrepo.load_commit(&imported)?; assert_eq!( EXAMPLEOS_CONTENT_CHECKSUM, ostree::commit_get_content_checksum(&commitdata) .unwrap() .as_str() ); + assert_eq!(state, ostree::RepoCommitState::NORMAL); Ok(()) } From 491be7483c84c9a597facb54f1cbe81f64c9f426 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 21 Nov 2021 14:25:04 -0500 Subject: [PATCH 201/775] tar/import: Remove unnecessary `abort_transaction()` This is dead code since b7ba07556c8c54a719f47d9a8f1ef47b5b7a0e4b when we switched to `auto_transaction()`. --- lib/src/tar/import.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 8fd2d418b..6958fc92c 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -51,12 +51,6 @@ struct Importer { stats: ImportStats, } -impl Drop for Importer { - fn drop(&mut self) { - let _ = self.repo.abort_transaction(gio::NONE_CANCELLABLE); - } -} - /// Validate size/type of a tar header for OSTree metadata object. fn validate_metadata_header(header: &tar::Header, desc: &str) -> Result { if header.entry_type() != tar::EntryType::Regular { From 5a0d8e24dfe632b07a47543aea782646f861d9d3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 21 Nov 2021 14:27:44 -0500 Subject: [PATCH 202/775] tar/import: Move directory filtering into filter function It's cleaner if our `filter_entry` function does most of the work here instead of having half of the filtering in a closure. --- lib/src/tar/import.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 6958fc92c..ffae41bb0 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -152,12 +152,16 @@ impl Importer { } } - // Given a tar entry, filter it out if it doesn't start with the repository prefix. + // Given a tar entry, filter it out if it doesn't look like an object file in + // `/sysroot/ostree`. // It is an error if the filename is invalid UTF-8. If it is valid UTF-8, return // an owned copy of the path. fn filter_entry( e: tar::Entry, ) -> Result, Utf8PathBuf)>> { + if e.header().entry_type() == tar::EntryType::Directory { + return Ok(None); + } let orig_path = e.path()?; let path = Utf8Path::from_path(&*orig_path) .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", orig_path))?; @@ -457,12 +461,7 @@ impl Importer { // Create an iterator that skips over directories; we just care about the file names. let mut ents = archive.entries()?.filter_map(|e| match e { - Ok(e) => { - if e.header().entry_type() == tar::EntryType::Directory { - return None; - } - Self::filter_entry(e).transpose() - } + Ok(e) => Self::filter_entry(e).transpose(), Err(e) => Some(Err(anyhow::Error::msg(e))), }); From 4cbefd5f19e1e2cc69b3bb14fee183136c6bedf1 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 21 Nov 2021 14:33:33 -0500 Subject: [PATCH 203/775] tar/import: Move txn outside of `Importer` This is prep for split tar imports, where we'll reuse distinct calls to the importer across a single transaction. --- lib/src/tar/import.rs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index ffae41bb0..2ae150094 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -455,10 +455,6 @@ impl Importer { archive: &mut tar::Archive, cancellable: Option<&gio::Cancellable>, ) -> Result { - // Unfortunately our use of `&mut self` here clashes with borrowing the repo - let txn_repo = self.repo.clone(); - let txn = txn_repo.auto_transaction(cancellable)?; - // Create an iterator that skips over directories; we just care about the file names. let mut ents = archive.entries()?.filter_map(|e| match e { Ok(e) => Self::filter_entry(e).transpose(), @@ -567,9 +563,6 @@ impl Importer { self.import_xattrs(entry)?; } } - txn.commit(cancellable)?; - - self.repo.mark_commit_partial(&checksum, false)?; Ok(checksum) } @@ -604,8 +597,12 @@ pub async fn import_tar( let repo = repo.clone(); let import = crate::tokio_util::spawn_blocking_cancellable(move |cancellable| { let mut archive = tar::Archive::new(src); + let txn = repo.auto_transaction(Some(cancellable))?; let importer = Importer::new(&repo, options.remote); - importer.import(&mut archive, Some(cancellable)) + let checksum = importer.import(&mut archive, Some(cancellable))?; + txn.commit(Some(cancellable))?; + repo.mark_commit_partial(&checksum, false)?; + Ok::<_, anyhow::Error>(checksum) }) .map_err(anyhow::Error::msg); let import: String = import.await??; From 8befc11c2f3c8ec880b9e37d6e934cfcbf91bdc4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 23 Nov 2021 11:52:09 -0500 Subject: [PATCH 204/775] container/encapsulate: Write `ostree.encapsulated` annotation on layer This is prep for commit splitting into separate blobs, to help identify layers which are "ostree native" and those which are not. It turns out since the Docker schema: https://docs.docker.com/registry/spec/manifest-v2-2/ does not have annotations, they simply get discarded, so we can't rely on this. But...I wrote this code and I think it makes sense, so let's ship it anyways. --- lib/src/container/encapsulate.rs | 9 +++++++- lib/src/container/ociwriter.rs | 39 ++++++++++++++++++++------------ 2 files changed, 32 insertions(+), 16 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index f9fa719e4..2f73bc916 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -11,6 +11,12 @@ use std::collections::BTreeMap; use std::path::Path; use tracing::{instrument, Level}; +/// Annotation injected into the layer to say that this is an ostree commit. +/// However, because this gets lost when converted to D2S2 https://docs.docker.com/registry/spec/manifest-v2-2/ +/// schema, it's not actually useful today. But, we keep it +/// out of principle. +const BLOB_OSTREE_ANNOTATION: &str = "ostree.encapsulated"; + /// Configuration for the generated container. #[derive(Debug, Default)] pub struct Config { @@ -79,7 +85,8 @@ fn build_oci( }; let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, Some(compression))?; - writer.push_layer(rootfs_blob); + let annos = [(BLOB_OSTREE_ANNOTATION.to_string(), "true".to_string())]; + writer.push_layer_annotated(rootfs_blob, Some(annos)); writer.complete()?; Ok(ImageReference { diff --git a/lib/src/container/ociwriter.rs b/lib/src/container/ociwriter.rs index 38e56761d..d50cd6f2f 100644 --- a/lib/src/container/ociwriter.rs +++ b/lib/src/container/ociwriter.rs @@ -76,7 +76,7 @@ pub(crate) struct OciWriter<'a> { cmd: Option>, - layers: Vec, + layers: Vec<(oci_image::Descriptor, String)>, } /// Write a serializable data (JSON) as an OCI blob @@ -131,9 +131,28 @@ impl<'a> OciWriter<'a> { Ok(()) } - /// Add a layer to the top of the image stack. The firsh pushed layer becomes the root. + /// Add a layer to the top of the image stack. + /// + /// The first pushed layer becomes the root. pub(crate) fn push_layer(&mut self, layer: Layer) { - self.layers.push(layer) + let v: Option> = None; + self.push_layer_annotated(layer, v); + } + + /// Add a layer to the top of the image stack with optional annotations. + /// + /// This is otherwise equivalent to [`Self::push_layer`]. + pub(crate) fn push_layer_annotated( + &mut self, + layer: Layer, + annotations: Option>>, + ) { + let mut builder = layer.descriptor().media_type(MediaType::ImageLayerGzip); + if let Some(annotations) = annotations { + builder = builder.annotations(annotations); + } + self.layers + .push((builder.build().unwrap(), layer.uncompressed_sha256)); } pub(crate) fn set_cmd(&mut self, e: &[&str]) { @@ -167,7 +186,7 @@ impl<'a> OciWriter<'a> { let diffids: Vec = self .layers .iter() - .map(|l| format!("sha256:{}", l.uncompressed_sha256)) + .map(|(_, diffid)| format!("sha256:{}", diffid)) .collect(); let rootfs = oci_image::RootFsBuilder::default() .diff_ids(diffids) @@ -200,17 +219,7 @@ impl<'a> OciWriter<'a> { .unwrap(); let config_blob = write_json_blob(self.dir, &config, MediaType::ImageConfig)?; - let layers: Vec = self - .layers - .iter() - .map(|layer| { - layer - .descriptor() - .media_type(MediaType::ImageLayerGzip) - .build() - .unwrap() - }) - .collect(); + let layers: Vec = self.layers.into_iter().map(|v| v.0).collect(); let manifest_data = oci_image::ImageManifestBuilder::default() .schema_version(oci_image::SCHEMA_VERSION) .config(config_blob.build().unwrap()) From 593d1931dc6f226476bf62777837416ee311c145 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 24 Nov 2021 12:12:13 -0500 Subject: [PATCH 205/775] tar/export: Fix numeric export for objects directories Currently it looks like `sysroot/ostree/repo/objects/0xf5` when it should obviously be `sysroot/ostree/repo/objects/f5`. Reported by @giuseppe --- lib/src/tar/export.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 2cfadda93..f7ee3b384 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -83,7 +83,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { h.set_gid(0); h.set_mode(0o755); h.set_size(0); - let path = format!("{}/repo/objects/{:#04x}", OSTREEDIR, d); + let path = format!("{}/repo/objects/{:02x}", OSTREEDIR, d); self.out.append_data(&mut h, &path, &mut std::io::empty())?; } From 2bdff79c1707c62b2622e67df5c389c2d093da47 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 29 Nov 2021 09:42:19 -0500 Subject: [PATCH 206/775] tar: Factor out helper to write default directory Prep for ensuring we create all parent dirs to aid containers/storage. --- lib/src/tar/export.rs | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index f7ee3b384..5e8c0624a 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -69,6 +69,18 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { } } + /// Add a directory entry with default permissions (root/root 0755) + fn append_default_dir(&mut self, path: &Utf8Path) -> Result<()> { + let mut h = tar::Header::new_gnu(); + h.set_entry_type(tar::EntryType::Directory); + h.set_uid(0); + h.set_gid(0); + h.set_mode(0o755); + h.set_size(0); + self.out.append_data(&mut h, &path, &mut std::io::empty())?; + Ok(()) + } + /// Write the initial directory structure. fn write_initial_directories(&mut self) -> Result<()> { if self.wrote_initdirs { @@ -77,25 +89,13 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { self.wrote_initdirs = true; // Object subdirectories for d in 0..0xFF { - let mut h = tar::Header::new_gnu(); - h.set_entry_type(tar::EntryType::Directory); - h.set_uid(0); - h.set_gid(0); - h.set_mode(0o755); - h.set_size(0); - let path = format!("{}/repo/objects/{:02x}", OSTREEDIR, d); - self.out.append_data(&mut h, &path, &mut std::io::empty())?; + let path: Utf8PathBuf = format!("{}/repo/objects/{:02x}", OSTREEDIR, d).into(); + self.append_default_dir(&path)?; } // The special `repo/xattrs` directory used only in our tar serialization. - let mut h = tar::Header::new_gnu(); - h.set_entry_type(tar::EntryType::Directory); - h.set_uid(0); - h.set_gid(0); - h.set_mode(0o755); - h.set_size(0); - let path = format!("{}/repo/xattrs", OSTREEDIR); - self.out.append_data(&mut h, &path, &mut std::io::empty())?; + let path: Utf8PathBuf = format!("{}/repo/xattrs", OSTREEDIR).into(); + self.append_default_dir(&path)?; Ok(()) } From 61165be31388166ef6ed432b5186abe540cd6418 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 29 Nov 2021 10:12:17 -0500 Subject: [PATCH 207/775] tar/export: Create all parent directories too This is really the standard/expected thing to do. The current containers/storage stack handles this, but the experimental chunked back end did not until https://github.com/containers/storage/pull/1072/commits/501611fd510f3406aab3acf25b18545d321ebb79 --- lib/src/tar/export.rs | 17 ++++++++++++++++- lib/tests/it/main.rs | 15 +++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 5e8c0624a..bcc21cd33 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -87,9 +87,24 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { return Ok(()); } self.wrote_initdirs = true; + + let objdir: Utf8PathBuf = format!("{}/repo/objects", OSTREEDIR).into(); + // Add all parent directories + let parent_dirs = { + let mut parts: Vec<_> = objdir.ancestors().collect(); + parts.reverse(); + parts + }; + for path in parent_dirs { + match path.as_str() { + "/" | "" => continue, + _ => {} + } + self.append_default_dir(&path)?; + } // Object subdirectories for d in 0..0xFF { - let path: Utf8PathBuf = format!("{}/repo/objects/{:02x}", OSTREEDIR, d).into(); + let path: Utf8PathBuf = format!("{}/{:02x}", objdir, d).into(); self.append_default_dir(&path)?; } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index bcd59b181..db66b46b0 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -237,6 +237,21 @@ async fn test_tar_import_signed() -> Result<()> { Ok(()) } +/// Validate basic structure of the tar export. +/// Right now just checks the first entry is `sysroot` with mode 0755. +#[test] +fn test_tar_export_structure() -> Result<()> { + let fixture = Fixture::new()?; + let src_tar = initial_export(&fixture)?; + let src_tar = std::io::BufReader::new(std::fs::File::open(&src_tar)?); + let mut src_tar = tar::Archive::new(src_tar); + let first = src_tar.entries()?.next().unwrap()?; + let firstpath = first.path()?; + assert_eq!(firstpath.to_str().unwrap(), "sysroot"); + assert_eq!(first.header().mode()?, 0o755); + Ok(()) +} + #[tokio::test] async fn test_tar_import_export() -> Result<()> { let fixture = Fixture::new()?; From 017b27de3775b8be94660a0ee847dce44b93e43c Mon Sep 17 00:00:00 2001 From: Jens Petersen Date: Wed, 24 Nov 2021 14:53:14 +0800 Subject: [PATCH 208/775] readme: fix the broken container/store docs.rs url --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6b493bd96..d0ade6b12 100644 --- a/README.md +++ b/README.md @@ -170,7 +170,7 @@ To parse and generate these strings, see [`OstreeImageReference`]. ### Layering A key feature of container images is support for layering. This functionality is handled -via a separate [container/store](https://docs.rs/ostree_ext/latest/container/store/) module. +via a separate [container/store](https://docs.rs/ostree_ext/latest/ostree_ext/container/store/) module. These APIs are also exposed via the CLI: From 3f70bc9e139ce135678e458b886fdbac7725fc31 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 30 Nov 2021 09:24:28 -0500 Subject: [PATCH 209/775] container/encapsulate: Be compatible with Rust 1.54 Since that's what's in CentOS8 at the moment. The `From` impl on `HashMap` for tuples is too new. --- lib/src/container/encapsulate.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 2f73bc916..882af7af2 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -7,7 +7,7 @@ use anyhow::Context; use fn_error_context::context; use gio::glib; use ostree::gio; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::path::Path; use tracing::{instrument, Level}; @@ -85,7 +85,8 @@ fn build_oci( }; let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, Some(compression))?; - let annos = [(BLOB_OSTREE_ANNOTATION.to_string(), "true".to_string())]; + let mut annos = HashMap::new(); + annos.insert(BLOB_OSTREE_ANNOTATION.to_string(), "true".to_string()); writer.push_layer_annotated(rootfs_blob, Some(annos)); writer.complete()?; From 6240a19394e36d7d247695fb30c11dce410206b1 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 30 Nov 2021 10:21:00 -0500 Subject: [PATCH 210/775] Release 0.5.1 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index bc928136a..302b735cf 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.5.0" +version = "0.5.1" [dependencies] anyhow = "1.0" From a49b2e70b06a56b1eb957f34e1fab297920d1b8b Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Tue, 30 Nov 2021 15:14:19 +0000 Subject: [PATCH 211/775] ci: add GH workflow for Minimum Supported Rust Version (MSRV) This adds a new CI jobs to make sure the code is building fine under a given MSRV. It mostly helps making sure we stay compatible with system toolchains shipped by distros (e.g. RHEL8). This keeps using system dependencies from FCOS buildroot (notably, a fresh ostree), but it drop the installed toolchain in favor of a CI-pinned one. --- .github/workflows/rust.yml | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 5ad39eac0..d09a4e6e6 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -14,12 +14,13 @@ on: env: CARGO_TERM_COLOR: always + # Minimum supported Rust version (MSRV) + ACTION_MSRV_TOOLCHAIN: 1.54.0 jobs: build: runs-on: ubuntu-latest container: quay.io/coreos-assembler/fcos-buildroot:testing-devel - steps: - uses: actions/checkout@v2 - name: Install deps @@ -33,3 +34,23 @@ jobs: run: cargo test --no-run - name: Run tests run: cargo test -- --nocapture --quiet + build-minimum-toolchain: + name: "Build, minimum supported toolchain (MSRV)" + runs-on: ubuntu-latest + container: quay.io/coreos-assembler/fcos-buildroot:testing-devel + steps: + - name: Checkout repository + uses: actions/checkout@v2 + - name: Install deps + run: ./ci/installdeps.sh + - name: Remove system Rust toolchain + run: dnf remove -y rust cargo + - name: Install toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ env['ACTION_MSRV_TOOLCHAIN'] }} + default: true + - name: Cache Dependencies + uses: Swatinem/rust-cache@ce325b60658c1b38465c06cc965b79baf32c1e72 + - name: cargo build (release) + run: cargo build --release From c9e23674665b7ce1825def13f46138f0dccc0bd2 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Wed, 1 Dec 2021 10:43:39 +0000 Subject: [PATCH 212/775] lib: fix new clippy warnings This fixes the following warnings highlighted by clippy: * https://rust-lang.github.io/rust-clippy/master/index.html#from_over_into * https://rust-lang.github.io/rust-clippy/master/index.html#needless_borrow --- lib/src/cli.rs | 3 ++- lib/src/tar/export.rs | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 84d359b2f..e6cbaee95 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -238,6 +238,7 @@ enum Opt { ImaSign(ImaSignOpts), } +#[allow(clippy::from_over_into)] impl Into for ContainerProxyOpts { fn into(self) -> ostree_container::store::ImageProxyConfig { ostree_container::store::ImageProxyConfig { @@ -346,7 +347,7 @@ async fn container_export( cmd, }; let opts = Some(Default::default()); - let pushed = crate::container::encapsulate(repo, rev, &config, opts, &imgref).await?; + let pushed = crate::container::encapsulate(repo, rev, &config, opts, imgref).await?; println!("{}", pushed); Ok(()) } diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index bcc21cd33..4db0522cb 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -100,7 +100,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { "/" | "" => continue, _ => {} } - self.append_default_dir(&path)?; + self.append_default_dir(path)?; } // Object subdirectories for d in 0..0xFF { From 739e0d3f0d54ba660c8eb14a87424b90c7505847 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Wed, 1 Dec 2021 10:25:41 +0000 Subject: [PATCH 213/775] ci: add GH workflow for linting This adds a new CI job dedicated to linting, which includes steps for rustfmt and clippy. --- .github/workflows/rust.yml | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index d09a4e6e6..8cc6345a8 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -16,6 +16,8 @@ env: CARGO_TERM_COLOR: always # Minimum supported Rust version (MSRV) ACTION_MSRV_TOOLCHAIN: 1.54.0 + # Pinned toolchain for linting + ACTION_LINTS_TOOLCHAIN: 1.56.0 jobs: build: @@ -25,8 +27,6 @@ jobs: - uses: actions/checkout@v2 - name: Install deps run: ./ci/installdeps.sh - - name: Format - run: cargo fmt -- --check -l # xref containers/containers-image-proxy-rs - name: Cache Dependencies uses: Swatinem/rust-cache@ce325b60658c1b38465c06cc965b79baf32c1e72 @@ -54,3 +54,24 @@ jobs: uses: Swatinem/rust-cache@ce325b60658c1b38465c06cc965b79baf32c1e72 - name: cargo build (release) run: cargo build --release + linting: + name: "Lints, pinned toolchain" + runs-on: ubuntu-latest + container: quay.io/coreos-assembler/fcos-buildroot:testing-devel + steps: + - name: Checkout repository + uses: actions/checkout@v2 + - name: Install deps + run: ./ci/installdeps.sh + - name: Remove system Rust toolchain + run: dnf remove -y rust cargo + - name: Install toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: ${{ env['ACTION_LINTS_TOOLCHAIN'] }} + default: true + components: rustfmt, clippy + - name: cargo fmt (check) + run: cargo fmt -- --check -l + - name: cargo clippy (warnings) + run: cargo clippy -- -D warnings From 9363bf80114bd9f2ff059a868c03146eb2db8703 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 7 Dec 2021 09:09:32 -0500 Subject: [PATCH 214/775] container/unencapsulate: Only create one proxy in default pull path A while ago I split up the APIs so we support - fetching just the manifest - fetching an image from an already fetched manifest - combining the two However, the latter API ended up creating two proxies, one to fetch the manifest and one for the rest. That's inefficent, so rework things so we have internal APIs that take a proxy and only create it once. Just noticed when reading the code for unrelated reasons. --- lib/src/container/unencapsulate.rs | 49 +++++++++++++++++++++--------- 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index 979ab1899..6274fd624 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -87,18 +87,25 @@ impl AsyncRead for ProgressReader { } } -/// Download the manifest for a target image and its sha256 digest. -#[context("Fetching manifest")] -pub async fn fetch_manifest( +async fn fetch_manifest_impl( + proxy: &mut ImageProxy, imgref: &OstreeImageReference, ) -> Result<(oci_spec::image::ImageManifest, String)> { - let proxy = ImageProxy::new().await?; let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; let (digest, raw_manifest) = proxy.fetch_manifest(oi).await?; proxy.close_image(oi).await?; Ok((serde_json::from_slice(&raw_manifest)?, digest)) } +/// Download the manifest for a target image and its sha256 digest. +#[context("Fetching manifest")] +pub async fn fetch_manifest( + imgref: &OstreeImageReference, +) -> Result<(oci_spec::image::ImageManifest, String)> { + let mut proxy = ImageProxy::new().await?; + fetch_manifest_impl(&mut proxy, imgref).await +} + /// The result of an import operation #[derive(Debug)] pub struct Import { @@ -137,8 +144,10 @@ pub async fn unencapsulate( imgref: &OstreeImageReference, options: Option, ) -> Result { - let (manifest, image_digest) = fetch_manifest(imgref).await?; - let ostree_commit = unencapsulate_from_manifest(repo, imgref, &manifest, options).await?; + let mut proxy = ImageProxy::new().await?; + let (manifest, image_digest) = fetch_manifest_impl(&mut proxy, imgref).await?; + let ostree_commit = + unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, &manifest, options).await?; Ok(Import { ostree_commit, image_digest, @@ -177,11 +186,9 @@ pub(crate) async fn fetch_layer_decompress<'a>( Ok((blob, driver)) } -/// Fetch a container image using an in-memory manifest and import its embedded OSTree commit. -#[context("Importing {}", imgref)] -#[instrument(skip(repo, options, manifest))] -pub async fn unencapsulate_from_manifest( +async fn unencapsulate_from_manifest_impl( repo: &ostree::Repo, + proxy: &mut ImageProxy, imgref: &OstreeImageReference, manifest: &oci_spec::image::ImageManifest, options: Option, @@ -199,9 +206,8 @@ pub async fn unencapsulate_from_manifest( layer.digest().as_str(), layer.size() ); - let mut proxy = ImageProxy::new().await?; let oi = proxy.open_image(&imgref.imgref.to_string()).await?; - let (blob, driver) = fetch_layer_decompress(&mut proxy, &oi, layer).await?; + let (blob, driver) = fetch_layer_decompress(proxy, &oi, layer).await?; let blob = ProgressReader { reader: blob, progress: options.progress, @@ -215,8 +221,23 @@ pub async fn unencapsulate_from_manifest( let (import, driver) = tokio::join!(import, driver); driver?; let ostree_commit = import.with_context(|| format!("Parsing blob {}", layer.digest()))?; - // FIXME write ostree commit after proxy finalization - proxy.finalize().await?; + event!(Level::DEBUG, "created commit {}", ostree_commit); Ok(ostree_commit) } + +/// Fetch a container image using an in-memory manifest and import its embedded OSTree commit. +#[context("Importing {}", imgref)] +#[instrument(skip(repo, options, manifest))] +pub async fn unencapsulate_from_manifest( + repo: &ostree::Repo, + imgref: &OstreeImageReference, + manifest: &oci_spec::image::ImageManifest, + options: Option, +) -> Result { + let mut proxy = ImageProxy::new().await?; + let r = unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, manifest, options).await?; + // FIXME write ostree commit after proxy finalization + proxy.finalize().await?; + Ok(r) +} From 495d202e84d76fd15413cda0b3734d35367fdaca Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 7 Dec 2021 09:50:34 -0500 Subject: [PATCH 215/775] Add some missing `#[derive(Debug)]` Hit this when I wanted to add `dbg!`. --- lib/src/container/store.rs | 2 ++ lib/src/container/unencapsulate.rs | 1 + lib/src/lib.rs | 1 + lib/src/tar/write.rs | 1 + 4 files changed, 5 insertions(+) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index c973f270c..e852b0b38 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -81,6 +81,7 @@ impl LayeredImageState { } /// Context for importing a container image. +#[derive(Debug)] pub struct LayeredImageImporter { repo: ostree::Repo, proxy: ImageProxy, @@ -90,6 +91,7 @@ pub struct LayeredImageImporter { } /// Result of invoking [`LayeredImageImporter::prepare`]. +#[derive(Debug)] pub enum PrepareResult { /// The image reference is already present; the contained string is the OSTree commit. AlreadyPresent(LayeredImageState), diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index 6274fd624..4ccf5e57b 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -51,6 +51,7 @@ type Progress = tokio::sync::watch::Sender; /// A read wrapper that updates the download progress. #[pin_project::pin_project] +#[derive(Debug)] struct ProgressReader { #[pin] reader: T, diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 88793b1b4..496be3166 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -5,6 +5,7 @@ //! written in Rust. #![deny(missing_docs)] +#![deny(missing_debug_implementations)] // Good defaults #![forbid(unused_must_use)] #![deny(unsafe_code)] diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index 578dc710f..8af16cb80 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -36,6 +36,7 @@ pub struct WriteTarOptions { /// /// This includes some basic data on the number of files that were filtered /// out because they were not in `/usr`. +#[derive(Debug, Default)] pub struct WriteTarResult { /// The resulting OSTree commit SHA-256. pub commit: String, From 685ac081b09496922bfdf085d5520ef4d9c08c1a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 7 Dec 2021 11:20:47 -0500 Subject: [PATCH 216/775] Drop ununused `bytes` dependency We aren't doing HTTP directly anymore. --- lib/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 302b735cf..314706894 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -12,7 +12,6 @@ version = "0.5.1" anyhow = "1.0" containers-image-proxy = "0.3" async-compression = { version = "0.3", features = ["gzip", "tokio"] } -bytes = "1.0.1" bitflags = "1" camino = "1.0.4" cjson = "0.1.1" From 21da3ca59ca9eaf6f354f2b09f700e36f7bbcd0a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 7 Dec 2021 11:30:41 -0500 Subject: [PATCH 217/775] lib: Also add `unreachable_pub` to opted-in lint And link to the list. I plan to cargo cult this elsewhere. --- lib/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 496be3166..0831d57b4 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -4,9 +4,10 @@ //! and the Rust bindings to it, adding new functionality //! written in Rust. +// See https://doc.rust-lang.org/rustc/lints/listing/allowed-by-default.html #![deny(missing_docs)] #![deny(missing_debug_implementations)] -// Good defaults +#![deny(unreachable_pub)] #![forbid(unused_must_use)] #![deny(unsafe_code)] #![cfg_attr(feature = "dox", feature(doc_cfg))] From 9fa2046b9240dc7b0b6d3507f93fc5cedc85b32f Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Wed, 8 Dec 2021 09:29:30 +0000 Subject: [PATCH 218/775] lib/cargo: update stale dependencies This refreshes `nix` and `phf` dependencies, making sure everything is up-to-date. --- lib/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 314706894..1d1a980d6 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -23,13 +23,13 @@ hex = "0.4.3" indicatif = "0.16.0" lazy_static = "1.4.0" libc = "0.2.92" -nix = "0.22.0" +nix = "0.23" oci-spec = "0.5.0" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" ostree = { features = ["v2021_5"], version = "0.13.3" } -phf = { features = ["macros"], version = "0.9.0" } +phf = { features = ["macros"], version = "0.10" } pin-project = "1.0" serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" From bc596aefb1c048c58f5add825ae220a12371e984 Mon Sep 17 00:00:00 2001 From: Matthew Kenigsberg Date: Wed, 8 Dec 2021 14:51:48 -0600 Subject: [PATCH 219/775] cli: Expose certificate_directory Exposes the corresponding options from containers-image-proxy and skopeo Also changes authfile type from String to PathBuf for consistency Helps https://github.com/ostreedev/ostree-rs-ext/issues/121 Depends https://github.com/containers/containers-image-proxy-rs/pull/22 --- lib/src/cli.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index e6cbaee95..b5b0f187d 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -10,6 +10,7 @@ use ostree::{gio, glib}; use std::collections::BTreeMap; use std::convert::TryFrom; use std::ffi::OsString; +use std::path::PathBuf; use structopt::StructOpt; use crate::container as ostree_container; @@ -130,7 +131,12 @@ enum ContainerOpts { struct ContainerProxyOpts { #[structopt(long)] /// Path to Docker-formatted authentication file. - authfile: Option, + authfile: Option, + + #[structopt(long)] + /// Directory with certificates (*.crt, *.cert, *.key) used to connect to registry + /// Equivalent to `skopeo --cert-dir` + cert_dir: Option, #[structopt(long)] /// Skip TLS verification. @@ -243,6 +249,7 @@ impl Into for ContainerProxyOpts { fn into(self) -> ostree_container::store::ImageProxyConfig { ostree_container::store::ImageProxyConfig { authfile: self.authfile, + certificate_directory: self.cert_dir, insecure_skip_tls_verification: Some(self.insecure_skip_tls_verification), ..Default::default() } From 204566b92f6cf162b3aabd63c1f8542d602881df Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 9 Dec 2021 16:27:35 -0500 Subject: [PATCH 220/775] Use containers-image-proxy from git --- lib/Cargo.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 1d1a980d6..23b05a4f1 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -10,7 +10,9 @@ version = "0.5.1" [dependencies] anyhow = "1.0" -containers-image-proxy = "0.3" +# containers-image-proxy = "0.3" +containers-image-proxy = { git = "https://github.com/containers/containers-image-proxy-rs" } + async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" camino = "1.0.4" From 63f342cb0229f28b1ea446e446707c8e2359cf2e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 9 Dec 2021 16:35:55 -0500 Subject: [PATCH 221/775] cli: Suppress clippy enum variant warning It's not important here. --- lib/src/cli.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index b5b0f187d..9f5a9317a 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -235,6 +235,7 @@ struct ImaSignOpts { #[derive(Debug, StructOpt)] #[structopt(name = "ostree-ext")] #[structopt(rename_all = "kebab-case")] +#[allow(clippy::large_enum_variant)] enum Opt { /// Import and export to tar Tar(TarOpts), From ccac015db516dfe0971c3691cf462d9f4791b2f7 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 9 Dec 2021 18:02:54 -0500 Subject: [PATCH 222/775] container/deploy: Add error context On general principle to help pin down errors. --- lib/src/container/deploy.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 020657e52..39b2b688a 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -3,6 +3,7 @@ use super::OstreeImageReference; use crate::container::store::PrepareResult; use anyhow::Result; +use fn_error_context::context; use ostree::glib; /// The key in the OSTree origin which holds a serialized [`super::OstreeImageReference`]. @@ -30,6 +31,7 @@ pub struct DeployOpts<'a> { /// Write a container image to an OSTree deployment. /// /// This API is currently intended for only an initial deployment. +#[context("Performing deployment")] pub async fn deploy( sysroot: &ostree::Sysroot, stateroot: &str, From a27dac83831297a6e83bd25c5b6b1b842249ad4d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 9 Dec 2021 18:11:48 -0500 Subject: [PATCH 223/775] containers: Better handle errors from worker and/or driver I was seeing this in a `cosa build`: ``` + rpm-ostree ex-container image deploy --imgref ostree-unverified-image:oci-archive:/var/srv/walters/builds/rhcos-master/builds/410.84.202112092014-0-1/x86_64/rhcos-410.84.202112092014-0-ostree.x86_64.ociarchive --stateroot rhcos --sysroot /tmp/rootfs --karg=random.trust_cpu=on --karg=console=tty0 --karg=console=ttyS0,115200n8 --karg=ignition.platform.id=qemu '--karg=$ignition_firstboot' error: Performing deployment: remote error: write |1: broken pipe ``` which is not useful. This is really a brutal hack around the fact that an error can occur on either our side or in the proxy. But if an error occurs on our side, then we will close the pipe, which will *also* cause the proxy to error out. What we really want is for the proxy to tell us when it got an error from us closing the pipe. Or, we could store that state on our side. Both are slightly tricky, so we have this (again) hacky thing where we just search for `broken pipe` in the error text. Or to restate all of the above - what this function does is check to see if the worker function had an error *and* if the proxy had an error, but if the proxy's error ends in `broken pipe` then it means the real only error is from the worker. Now: ``` + rpm-ostree ex-container image deploy --imgref ostree-unverified-image:oci-archive:/var/srv/walters/builds/rhcos-master/builds/410.84.202112092014-0-1/x86_64/rhcos-410.84.202112092014-0-ostree.x86_64.ociarchive --stateroot rhcos --sysroot /tmp/rootfs --karg=random.trust_cpu=on --karg=console=tty0 --karg=console=ttyS0,115200n8 --karg=ignition.platform.id=qemu '--karg=$ignition_firstboot' error: Performing deployment: Parsing blob sha256:9448e2c9ad473c7d63d7d7789eadd28e5ae72f37eb8a1c4901b7bd76764e9bd0: object 4f/5cc466c863110ecd782153cc3a126ba8593d531dde621539a2d1a290b6482b.file: Processing content object 4f5cc466c863110ecd782153cc3a126ba8593d531dde621539a2d1a290b6482b: Writing content object: Corrupted file object; checksum expected='4f5cc466c863110ecd782153cc3a126ba8593d531dde621539a2d1a290b6482b' actual='63e8321f6ed6f189d37d98d61e782e6e8c9031103c97c983c696de6ca42702f4' ``` (But why is that object corrupted? Don't know yet, that's an exciting new problem!) --- lib/src/container/store.rs | 13 +++++----- lib/src/container/unencapsulate.rs | 41 +++++++++++++++++++++++++++--- 2 files changed, 44 insertions(+), 10 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index e852b0b38..28525f9ff 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -279,10 +279,9 @@ impl LayeredImageImporter { ) .await?; let importer = crate::tar::import_tar(&self.repo, blob, None); - let (commit, driver) = tokio::join!(importer, driver); - driver?; - let commit = - commit.with_context(|| format!("Parsing blob {}", base_layer_ref.digest()))?; + let commit = super::unencapsulate::join_fetch(importer, driver) + .await + .with_context(|| format!("Parsing blob {}", base_layer_ref.digest()))?; // TODO support ref writing in tar import self.repo.set_ref_immediate( None, @@ -314,9 +313,9 @@ impl LayeredImageImporter { }; let w = crate::tar::write_tar(&self.repo, blob, layer.ostree_ref.as_str(), Some(opts)); - let (r, driver) = tokio::join!(w, driver); - let r = r.with_context(|| format!("Parsing layer blob {}", layer.digest()))?; - driver?; + let r = super::unencapsulate::join_fetch(w, driver) + .await + .with_context(|| format!("Parsing layer blob {}", layer.digest()))?; layer_commits.push(r.commit); if !r.filtered.is_empty() { let filtered = HashMap::from_iter(r.filtered.into_iter()); diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index 4ccf5e57b..880ee5220 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -130,6 +130,41 @@ fn require_one_layer_blob(manifest: &oci_image::ImageManifest) -> Result<&oci_im } } +/// Use this to process potential errors from a worker and a driver. +/// This is really a brutal hack around the fact that an error can occur +/// on either our side or in the proxy. But if an error occurs on our +/// side, then we will close the pipe, which will *also* cause the proxy +/// to error out. +/// +/// What we really want is for the proxy to tell us when it got an +/// error from us closing the pipe. Or, we could store that state +/// on our side. Both are slightly tricky, so we have this (again) +/// hacky thing where we just search for `broken pipe` in the error text. +/// +/// Or to restate all of the above - what this function does is check +/// to see if the worker function had an error *and* if the proxy +/// had an error, but if the proxy's error ends in `broken pipe` +/// then it means the real only error is from the worker. +pub(crate) async fn join_fetch( + worker: impl Future>, + driver: impl Future>, +) -> Result { + let (worker, driver) = tokio::join!(worker, driver); + match (worker, driver) { + (Ok(t), Ok(())) => Ok(t), + (Err(worker), Err(driver)) => { + let text = driver.root_cause().to_string(); + if text.ends_with("broken pipe") { + Err(worker) + } else { + Err(worker.context(format!("proxy failure: {} and client error", text))) + } + } + (Ok(_), Err(driver)) => Err(driver), + (Err(worker), Ok(())) => Err(worker), + } +} + /// Configuration for container fetches. #[derive(Debug, Default)] pub struct UnencapsulateOptions { @@ -219,9 +254,9 @@ async fn unencapsulate_from_manifest_impl( SignatureSource::ContainerPolicy | SignatureSource::ContainerPolicyAllowInsecure => {} } let import = crate::tar::import_tar(repo, blob, Some(taropts)); - let (import, driver) = tokio::join!(import, driver); - driver?; - let ostree_commit = import.with_context(|| format!("Parsing blob {}", layer.digest()))?; + let ostree_commit = join_fetch(import, driver) + .await + .with_context(|| format!("Parsing blob {}", layer.digest()))?; event!(Level::DEBUG, "created commit {}", ostree_commit); Ok(ostree_commit) From 727fd25856cdde028c0913288b1565470920fb63 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 6 Dec 2021 16:19:49 -0500 Subject: [PATCH 224/775] container/store: Also parse and save image configuration Prep for using this for copies. --- lib/src/container/store.rs | 20 +++++++++++++++++++- lib/tests/it/main.rs | 15 +++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 28525f9ff..c97586a3c 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -31,6 +31,8 @@ const IMAGE_PREFIX: &str = "ostree/container/image"; const META_MANIFEST_DIGEST: &str = "ostree.manifest-digest"; /// The key injected into the merge commit with the manifest serialized as JSON. const META_MANIFEST: &str = "ostree.manifest"; +/// The key injected into the merge commit with the image configuration serialized as JSON. +const META_CONFIG: &str = "ostree.container.image-config"; /// Value of type `a{sa{su}}` containing number of filtered out files pub const META_FILTERED: &str = "ostree.tar-filtered"; /// The type used to store content filtering information with `META_FILTERED`. @@ -128,6 +130,8 @@ pub struct PreparedImport { pub manifest_digest: String, /// The deserialized manifest. pub manifest: oci_image::ImageManifest, + /// The deserialized configuration. + pub config: Option, /// The previously stored manifest digest. pub previous_manifest_digest: Option, /// The previously stored image ID. @@ -200,6 +204,7 @@ impl LayeredImageImporter { /// Determine if there is a new manifest, and if so return its digest. #[context("Fetching manifest")] pub async fn prepare(&mut self) -> Result { + let proxy_023 = self.proxy.get_0_2_3(); match &self.imgref.sigverify { SignatureSource::ContainerPolicy if skopeo::container_policy_is_default_insecure()? => { return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); @@ -213,7 +218,8 @@ impl LayeredImageImporter { } let (manifest_digest, manifest_bytes) = self.proxy.fetch_manifest(&self.proxy_img).await?; - let manifest: oci_image::ImageManifest = serde_json::from_slice(&manifest_bytes)?; + let manifest: oci_image::ImageManifest = + serde_json::from_slice(&manifest_bytes).context("Parsing image manifest")?; let new_imageid = manifest.config().digest().as_str(); // Query for previous stored state @@ -239,6 +245,15 @@ impl LayeredImageImporter { (None, None) }; + let config = if let Some(proxy) = proxy_023 { + let config_bytes = proxy.fetch_config(&self.proxy_img).await?; + let config: oci_image::ImageConfiguration = + serde_json::from_slice(&config_bytes).context("Parsing image configuration")?; + Some(config) + } else { + None + }; + let mut layers = manifest.layers().iter().cloned(); // We require a base layer. let base_layer = layers.next().ok_or_else(|| anyhow!("No layers found"))?; @@ -252,6 +267,7 @@ impl LayeredImageImporter { let imp = PreparedImport { manifest, manifest_digest, + config, previous_manifest_digest, previous_imageid, base_layer, @@ -329,9 +345,11 @@ impl LayeredImageImporter { tracing::debug!("finalized proxy"); let serialized_manifest = serde_json::to_string(&import.manifest)?; + let serialized_config = serde_json::to_string(&import.config)?; let mut metadata = HashMap::new(); metadata.insert(META_MANIFEST_DIGEST, import.manifest_digest.to_variant()); metadata.insert(META_MANIFEST, serialized_manifest.to_variant()); + metadata.insert(META_CONFIG, serialized_config.to_variant()); metadata.insert( "ostree.importer.version", env!("CARGO_PKG_VERSION").to_variant(), diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index db66b46b0..d65673021 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -476,6 +476,21 @@ async fn test_container_write_derive() -> Result<()> { assert!(digest.starts_with("sha256:")); assert_eq!(digest, expected_digest); + // For now we need to make this test dynamic + { + let proxy = containers_image_proxy::ImageProxy::new().await?; + let proxy = proxy.get_0_2_3(); + if proxy.is_some() { + let commit_meta = &imported_commit.child_value(0); + let commit_meta = glib::VariantDict::new(Some(commit_meta)); + let config = commit_meta + .lookup::("ostree.container.image-config")? + .unwrap(); + let config: oci_spec::image::ImageConfiguration = serde_json::from_str(&config)?; + assert_eq!(config.os(), &oci_spec::image::Os::Linux); + } + } + // Parse the commit and verify we pulled the derived content. bash!( "ostree --repo={repo} ls {r} /usr/share/anewfile", From 4ea684618ac7d1827b77dcbfdb6a4b7ac1b1064d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 10 Dec 2021 15:36:38 -0500 Subject: [PATCH 225/775] container: Add history struct to oci writing, tweak history description Prep for https://github.com/ostreedev/ostree-rs-ext/issues/69 where we'll split up the input ostree commit into content-addressed blobs. We want to inject something useful into the the `history` in the config git that describes each chunk, so add support for that into our OCI writer. Change the default description for the (currently single) layer to include the commit subject, if present; otherwise the commit hash. The description of the layer shouldn't change as this tool changes, just as the input changes. (Side note; today rpm-ostree isn't adding a subject description, but hey, maybe someone else is) --- lib/src/container/encapsulate.rs | 14 ++++++++++- lib/src/container/ociwriter.rs | 43 +++++++++++++++----------------- 2 files changed, 33 insertions(+), 24 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 882af7af2..35c6a46eb 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -57,6 +57,13 @@ fn build_oci( let commit = repo.resolve_rev(rev, false)?.unwrap(); let commit = commit.as_str(); let (commit_v, _) = repo.load_commit(commit)?; + let commit_subject = commit_v.child_value(3); + let commit_subject = commit_subject.str().ok_or_else(|| { + anyhow::anyhow!( + "Corrupted commit {}; expecting string value for subject", + commit + ) + })?; let commit_meta = &commit_v.child_value(0); let commit_meta = glib::VariantDict::new(Some(commit_meta)); @@ -85,9 +92,14 @@ fn build_oci( }; let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, Some(compression))?; + let description = if commit_subject.is_empty() { + Cow::Owned(format!("ostree export of commit {}", commit)) + } else { + Cow::Borrowed(commit_subject) + }; let mut annos = HashMap::new(); annos.insert(BLOB_OSTREE_ANNOTATION.to_string(), "true".to_string()); - writer.push_layer_annotated(rootfs_blob, Some(annos)); + writer.push_layer_annotated(rootfs_blob, Some(annos), &description); writer.complete()?; Ok(ImageReference { diff --git a/lib/src/container/ociwriter.rs b/lib/src/container/ociwriter.rs index d50cd6f2f..b2429af1b 100644 --- a/lib/src/container/ociwriter.rs +++ b/lib/src/container/ociwriter.rs @@ -77,6 +77,7 @@ pub(crate) struct OciWriter<'a> { cmd: Option>, layers: Vec<(oci_image::Descriptor, String)>, + history: Vec, } /// Write a serializable data (JSON) as an OCI blob @@ -102,6 +103,7 @@ impl<'a> OciWriter<'a> { config_annotations: Default::default(), manifest_annotations: Default::default(), layers: Vec::new(), + history: Vec::new(), cmd: None, }) } @@ -123,20 +125,11 @@ impl<'a> OciWriter<'a> { Ok(tar::Builder::new(self.create_raw_layer(c)?)) } + /// Add a layer to the top of the image stack. The firsh pushed layer becomes the root. #[allow(dead_code)] - /// Finish all I/O for a layer writer, and add it to the layers in the image. - pub(crate) fn finish_and_push_layer(&mut self, w: RawLayerWriter) -> Result<()> { - let w = w.complete()?; - self.push_layer(w); - Ok(()) - } - - /// Add a layer to the top of the image stack. - /// - /// The first pushed layer becomes the root. - pub(crate) fn push_layer(&mut self, layer: Layer) { - let v: Option> = None; - self.push_layer_annotated(layer, v); + pub(crate) fn push_layer(&mut self, layer: Layer, description: &str) { + let annotations: Option> = None; + self.push_layer_annotated(layer, annotations, description); } /// Add a layer to the top of the image stack with optional annotations. @@ -146,6 +139,7 @@ impl<'a> OciWriter<'a> { &mut self, layer: Layer, annotations: Option>>, + description: &str, ) { let mut builder = layer.descriptor().media_type(MediaType::ImageLayerGzip); if let Some(annotations) = annotations { @@ -153,6 +147,7 @@ impl<'a> OciWriter<'a> { } self.layers .push((builder.build().unwrap(), layer.uncompressed_sha256)); + self.history.push(description.to_string()); } pub(crate) fn set_cmd(&mut self, e: &[&str]) { @@ -201,20 +196,22 @@ impl<'a> OciWriter<'a> { } .build() .unwrap(); - let history = oci_image::HistoryBuilder::default() - .created_by(format!( - "created by {} {}", - env!("CARGO_PKG_NAME"), - env!("CARGO_PKG_VERSION") - )) - .build() - .unwrap(); + let history: Vec<_> = self + .history + .into_iter() + .map(|h| { + oci_image::HistoryBuilder::default() + .created_by(h) + .build() + .unwrap() + }) + .collect(); let config = oci_image::ImageConfigurationBuilder::default() .architecture(arch.clone()) .os(oci_image::Os::Linux) .config(ctrconfig) .rootfs(rootfs) - .history(vec![history]) + .history(history) .build() .unwrap(); let config_blob = write_json_blob(self.dir, &config, MediaType::ImageConfig)?; @@ -380,7 +377,7 @@ mod tests { root_layer.uncompressed_sha256, "349438e5faf763e8875b43de4d7101540ef4d865190336c2cc549a11f33f8d7c" ); - w.push_layer(root_layer); + w.push_layer(root_layer, "root"); w.complete()?; Ok(()) } From 27887c7e940e8ab652351de5f5cbfc607e994823 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 13 Dec 2021 18:24:44 -0500 Subject: [PATCH 226/775] container: Make layering more directly re-use unencapsulation This came out of some prep work on https://github.com/ostreedev/ostree-rs-ext/issues/69 Right now it's confusing, the layering code ended up re-implementing the "fetch and unpack tarball" logic from the unencapsulation path unnecessarily. I think it's much clearer if the layering path just calls down into the unencapsulation path first. Among other things this will also ensure we're honoring the image verification string. --- lib/src/container/store.rs | 25 +++++++++++++------------ lib/src/container/unencapsulate.rs | 18 ++++++++++++++---- 2 files changed, 27 insertions(+), 16 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 28525f9ff..2f159ba89 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -264,34 +264,35 @@ impl LayeredImageImporter { pub async fn import(self, import: Box) -> Result { let mut proxy = self.proxy; let target_imgref = self.target_imgref.as_ref().unwrap_or(&self.imgref); - let ostree_ref = ref_for_image(&target_imgref.imgref)?; + // First download the base image (if necessary) - we need the SELinux policy // there to label all following layers. let base_layer = import.base_layer; let base_commit = if let Some(c) = base_layer.commit { c } else { - let base_layer_ref = &base_layer.layer; - let (blob, driver) = super::unencapsulate::fetch_layer_decompress( + let base_commit = super::unencapsulate_from_manifest_impl( + &self.repo, &mut proxy, - &self.proxy_img, - &base_layer.layer, + target_imgref, + &import.manifest, + None, + true, ) .await?; - let importer = crate::tar::import_tar(&self.repo, blob, None); - let commit = super::unencapsulate::join_fetch(importer, driver) - .await - .with_context(|| format!("Parsing blob {}", base_layer_ref.digest()))?; - // TODO support ref writing in tar import + // Write the ostree ref for that single layer; TODO + // handle this as part of the overall transaction. self.repo.set_ref_immediate( None, base_layer.ostree_ref.as_str(), - Some(commit.as_str()), + Some(base_commit.as_str()), gio::NONE_CANCELLABLE, )?; - commit + base_commit }; + let ostree_ref = ref_for_image(&target_imgref.imgref)?; + let mut layer_commits = Vec::new(); let mut layer_filtered_content: MetaFilteredData = HashMap::new(); for layer in import.layers { diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index 880ee5220..ebf8426ad 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -183,7 +183,8 @@ pub async fn unencapsulate( let mut proxy = ImageProxy::new().await?; let (manifest, image_digest) = fetch_manifest_impl(&mut proxy, imgref).await?; let ostree_commit = - unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, &manifest, options).await?; + unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, &manifest, options, false) + .await?; Ok(Import { ostree_commit, image_digest, @@ -222,12 +223,13 @@ pub(crate) async fn fetch_layer_decompress<'a>( Ok((blob, driver)) } -async fn unencapsulate_from_manifest_impl( +pub(crate) async fn unencapsulate_from_manifest_impl( repo: &ostree::Repo, proxy: &mut ImageProxy, imgref: &OstreeImageReference, manifest: &oci_spec::image::ImageManifest, options: Option, + ignore_layered: bool, ) -> Result { if matches!(imgref.sigverify, SignatureSource::ContainerPolicy) && skopeo::container_policy_is_default_insecure()? @@ -235,7 +237,14 @@ async fn unencapsulate_from_manifest_impl( return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); } let options = options.unwrap_or_default(); - let layer = require_one_layer_blob(manifest)?; + let layer = if ignore_layered { + manifest + .layers() + .get(0) + .ok_or_else(|| anyhow!("No layers in image"))? + } else { + require_one_layer_blob(manifest)? + }; event!( Level::DEBUG, "target blob digest:{} size: {}", @@ -272,7 +281,8 @@ pub async fn unencapsulate_from_manifest( options: Option, ) -> Result { let mut proxy = ImageProxy::new().await?; - let r = unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, manifest, options).await?; + let r = unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, manifest, options, false) + .await?; // FIXME write ostree commit after proxy finalization proxy.finalize().await?; Ok(r) From 835cd4eba36a08629f76b2d331a811e9f878f996 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Tue, 14 Dec 2021 11:08:20 +0000 Subject: [PATCH 227/775] lib/container: fix error prefix for invalid ostree imgref scheme This fixes a wrong and colliding error prefix, possibly coming from a copy-paste mistake. --- lib/src/container/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 39f21d344..066765462 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -182,7 +182,7 @@ impl TryFrom<&str> for OstreeImageReference { (SignatureSource::OstreeRemote(remote.to_string()), second) } o => { - return Err(anyhow!("Invalid signature source: {}", o)); + return Err(anyhow!("Invalid ostree image reference scheme: {}", o)); } }; let imgref = rest.deref().try_into()?; From 0570c50400dd1da07564b287dc69224c934d792a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 13 Dec 2021 20:34:51 -0500 Subject: [PATCH 228/775] containers: Only open image once I was doing some further reading of code and noticed we opened the image multiple times. I think that's not a big deal, we probably will reuse connections to the registry etc. internally. But fix it anyways. --- lib/src/container/store.rs | 1 + lib/src/container/unencapsulate.rs | 18 ++++++++++++------ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 2f159ba89..10caa9809 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -275,6 +275,7 @@ impl LayeredImageImporter { &self.repo, &mut proxy, target_imgref, + &self.proxy_img, &import.manifest, None, true, diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index ebf8426ad..0f728b7ad 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -181,10 +181,13 @@ pub async fn unencapsulate( options: Option, ) -> Result { let mut proxy = ImageProxy::new().await?; - let (manifest, image_digest) = fetch_manifest_impl(&mut proxy, imgref).await?; + let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; + let (image_digest, raw_manifest) = proxy.fetch_manifest(oi).await?; + let manifest = serde_json::from_slice(&raw_manifest)?; let ostree_commit = - unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, &manifest, options, false) + unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, oi, &manifest, options, false) .await?; + proxy.close_image(oi).await?; Ok(Import { ostree_commit, image_digest, @@ -227,6 +230,7 @@ pub(crate) async fn unencapsulate_from_manifest_impl( repo: &ostree::Repo, proxy: &mut ImageProxy, imgref: &OstreeImageReference, + oi: &containers_image_proxy::OpenedImage, manifest: &oci_spec::image::ImageManifest, options: Option, ignore_layered: bool, @@ -251,8 +255,7 @@ pub(crate) async fn unencapsulate_from_manifest_impl( layer.digest().as_str(), layer.size() ); - let oi = proxy.open_image(&imgref.imgref.to_string()).await?; - let (blob, driver) = fetch_layer_decompress(proxy, &oi, layer).await?; + let (blob, driver) = fetch_layer_decompress(proxy, oi, layer).await?; let blob = ProgressReader { reader: blob, progress: options.progress, @@ -281,8 +284,11 @@ pub async fn unencapsulate_from_manifest( options: Option, ) -> Result { let mut proxy = ImageProxy::new().await?; - let r = unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, manifest, options, false) - .await?; + let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; + let r = + unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, oi, manifest, options, false) + .await?; + proxy.close_image(oi).await?; // FIXME write ostree commit after proxy finalization proxy.finalize().await?; Ok(r) From 39d4b938cbf703ec3ad7074b72b374ca1cba272a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 18 Nov 2021 16:54:39 -0500 Subject: [PATCH 229/775] Use new `append_link()` API to handle long symlinks I hit this when exporting Fedora Silverblue, there are some long symlinks in there. Depends: https://github.com/alexcrichton/tar-rs/pull/273 Closes: https://github.com/ostreedev/ostree-rs-ext/issues/162 --- lib/Cargo.toml | 2 +- lib/src/tar/export.rs | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index bc928136a..ba4d88dd7 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -35,7 +35,7 @@ pin-project = "1.0" serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" structopt = "0.3.21" -tar = "0.4.33" +tar = "0.4.38" tempfile = "3.2.0" tokio = { features = ["full"], version = "1" } tokio-util = { features = ["io-util"], version = "0.6.9" } diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 2cfadda93..ff9721a6d 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -231,13 +231,11 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { .append_data(&mut h, &path, &mut instream) .with_context(|| format!("Writing regfile {}", checksum))?; } else { - h.set_size(0); - h.set_entry_type(tar::EntryType::Symlink); let context = || format!("Writing content symlink: {}", checksum); - h.set_link_name(meta.symlink_target().unwrap().as_str()) - .with_context(context)?; + h.set_entry_type(tar::EntryType::Symlink); + h.set_size(0); self.out - .append_data(&mut h, &path, &mut std::io::empty()) + .append_link(&mut h, &path, meta.symlink_target().unwrap().as_str()) .with_context(context)?; } } From 05f11ec7e2c6172d3ae8f8794753ce9a2c14db39 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 10 Dec 2021 09:08:20 -0500 Subject: [PATCH 230/775] tar/export: Write symlink targets literally Requires: https://github.com/alexcrichton/tar-rs/pull/274 And I'll just copy/paste the commit message from there, lightly edited: In https://github.com/ostreedev/ostree we generate a cryptographic checksum over files and symlinks, and directories. ostree does not currently perform any canonicalization on symlinks; we'll respect and honor whatever bytes we're provided as input, and replicate that on the target. We're using the Rust tar crate to do tar serialization, which has so far worked fine...except, I hit this corner case: ``` [root@cosa-devsh ~]# rpm -qf /usr/lib/systemd/systemd-sysv-install chkconfig-1.13-2.el8.x86_64 [root@cosa-devsh ~]# ll /usr/lib/systemd/systemd-sysv-install lrwxrwxrwx. 2 root root 24 Nov 29 18:08 /usr/lib/systemd/systemd-sysv-install -> ../../..//sbin/chkconfig [root@cosa-devsh ~]# ``` But, using `set_link_name` to write the tarball, we end up with the canonicalized path `../../../sbin/chkconfig` - i.e. without the double `//`. This breaks the checksum. Now, I am a bit tempted to change ostree to do canonicalization. But even if we did, I'd need to *exactly* match what tar-rs is doing. (I may of course also try to change the rhel8 systemd package, but that's going to take a while to propagate and this corner case isn't the only one I'm sure) --- lib/src/tar/export.rs | 44 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index ff9721a6d..e7a15bd2e 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -56,6 +56,17 @@ fn xattrs_path(checksum: &str) -> Utf8PathBuf { format!("{}/repo/xattrs/{}", OSTREEDIR, checksum).into() } +/// Check for "denormal" symlinks which contain "//" +/// See https://github.com/fedora-sysv/chkconfig/pull/67 +/// [root@cosa-devsh ~]# rpm -qf /usr/lib/systemd/systemd-sysv-install +/// chkconfig-1.13-2.el8.x86_64 +/// [root@cosa-devsh ~]# ll /usr/lib/systemd/systemd-sysv-install +/// lrwxrwxrwx. 2 root root 24 Nov 29 18:08 /usr/lib/systemd/systemd-sysv-install -> ../../..//sbin/chkconfig +/// [root@cosa-devsh ~]# +fn symlink_is_denormal(target: &str) -> bool { + target.contains("//") +} + impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { fn new(repo: &'a ostree::Repo, out: &'a mut tar::Builder) -> Self { Self { @@ -231,12 +242,23 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { .append_data(&mut h, &path, &mut instream) .with_context(|| format!("Writing regfile {}", checksum))?; } else { + let target = meta.symlink_target().unwrap(); + let target = target.as_str(); let context = || format!("Writing content symlink: {}", checksum); - h.set_entry_type(tar::EntryType::Symlink); - h.set_size(0); - self.out - .append_link(&mut h, &path, meta.symlink_target().unwrap().as_str()) - .with_context(context)?; + // Handle //chkconfig, see above + if symlink_is_denormal(target) { + h.set_link_name_literal(meta.symlink_target().unwrap().as_str()) + .with_context(context)?; + self.out + .append_data(&mut h, &path, &mut std::io::empty()) + .with_context(context)?; + } else { + h.set_entry_type(tar::EntryType::Symlink); + h.set_size(0); + self.out + .append_link(&mut h, &path, target) + .with_context(context)?; + } } } @@ -338,4 +360,16 @@ mod tests { Utf8Path::new("./etc/blah") ); } + + #[test] + fn test_denormal_symlink() { + let normal = ["/", "/usr", "../usr/bin/blah"]; + let denormal = ["../../usr/sbin//chkconfig", "foo//bar/baz"]; + for path in normal { + assert!(!symlink_is_denormal(path)); + } + for path in denormal { + assert!(symlink_is_denormal(path)); + } + } } From 4c55c833e5011a58dc85ba6e32cab5026c32a069 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 15 Dec 2021 09:00:52 -0500 Subject: [PATCH 231/775] tar: Write directory hierarchy correctly I hit on the fact that we were missing `/tmp` in the exported container, and this is because we were actually missing writing directory entries. It only works because container runtimes will auto-create parent directories. But we clearly want to reflect the intended uid/gid/mode in the tar stream too, and we definitely want empty toplevel dirs like `/tmp`. The simple fix of emitting them in our current flow actually fails when trying to import into `containers/storage`, complaining about duplicate entries. And this is because the simple fix ends up writing a `sysroot` entry with two different modes (0755 and 0700). One or the other needs to win, let's just have it be 0755 for now, though it doesn't really matter for this. So to really make this work, rework the flow so that the tar stream looks like: - root directory - sysroot/ base structure - commit object - commit metadata - contents of root recursively *except* sysroot --- lib/Cargo.toml | 3 +- lib/src/tar/export.rs | 67 ++++++++++++++++++++++++++++++++----------- lib/tests/it/main.rs | 26 +++++++++++++++-- 3 files changed, 76 insertions(+), 20 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 83cb10efa..b9a3a6368 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -30,7 +30,8 @@ oci-spec = "0.5.0" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" -ostree = { features = ["v2021_5"], version = "0.13.3" } +# ostree = { features = ["v2021_5"], version = "0.13.3" } +ostree = { git = "https://github.com/ostreedev/ostree-rs", features = ["v2021_5"] } phf = { features = ["macros"], version = "0.10" } pin-project = "1.0" serde = { features = ["derive"], version = "1.0.125" } diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 85ed4194d..313de1f36 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -14,6 +14,8 @@ use std::borrow::Cow; use std::collections::HashSet; use std::io::BufReader; +// This is both special in the tar stream *and* it's in the ostree commit. +const SYSROOT: &str = "sysroot"; // This way the default ostree -> sysroot/ostree symlink works. const OSTREEDIR: &str = "sysroot/ostree"; @@ -129,12 +131,30 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { fn write_commit(&mut self, checksum: &str) -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; - self.write_initial_directories()?; - let (commit_v, _) = self.repo.load_commit(checksum)?; let commit_v = &commit_v; - self.append(ostree::ObjectType::Commit, checksum, commit_v)?; + let commit_bytes = commit_v.data_as_bytes(); + let commit_bytes = commit_bytes.try_as_aligned()?; + let commit = gv_commit!().cast(commit_bytes); + let commit = commit.to_tuple(); + let contents = &hex::encode(commit.6); + let metadata_checksum = &hex::encode(commit.7); + let metadata_v = self + .repo + .load_variant(ostree::ObjectType::DirMeta, metadata_checksum)?; + // Safety: We passed the correct variant type just above + let metadata = &ostree::DirMetaParsed::from_variant(&metadata_v).unwrap(); + let rootpath = Utf8Path::new("./"); + + // We need to write the root directory, before we write any objects. This should be the very + // first thing. + self.append_dir(rootpath, metadata)?; + + // Now, we create sysroot/ and everything under it + self.write_initial_directories()?; + + self.append(ostree::ObjectType::Commit, checksum, commit_v)?; if let Some(commitmeta) = self .repo .read_commit_detached_metadata(checksum, cancellable)? @@ -142,17 +162,11 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { self.append(ostree::ObjectType::CommitMeta, checksum, &commitmeta)?; } - let commit_v = commit_v.data_as_bytes(); - let commit_v = commit_v.try_as_aligned()?; - let commit = gv_commit!().cast(commit_v); - let commit = commit.to_tuple(); - let contents = &hex::encode(commit.6); - let metadata_checksum = &hex::encode(commit.7); - let metadata_v = self - .repo - .load_variant(ostree::ObjectType::DirMeta, metadata_checksum)?; + // The ostree dirmeta object for the root. self.append(ostree::ObjectType::DirMeta, metadata_checksum, &metadata_v)?; - self.append_dirtree(Utf8Path::new("./"), contents, cancellable)?; + + // Recurse and write everything else. + self.append_dirtree(Utf8Path::new("./"), contents, true, cancellable)?; Ok(()) } @@ -280,11 +294,25 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { Ok((path, target_header)) } + /// Write a directory using the provided metadata. + fn append_dir(&mut self, dirpath: &Utf8Path, meta: &ostree::DirMetaParsed) -> Result<()> { + let mut header = tar::Header::new_gnu(); + header.set_entry_type(tar::EntryType::Directory); + header.set_size(0); + header.set_uid(meta.uid as u64); + header.set_gid(meta.gid as u64); + header.set_mode(meta.mode); + self.out + .append_data(&mut header, dirpath, std::io::empty())?; + Ok(()) + } + /// Write a dirtree object. fn append_dirtree>( &mut self, dirpath: &Utf8Path, checksum: &str, + is_root: bool, cancellable: Option<&C>, ) -> Result<()> { let v = &self @@ -320,19 +348,26 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { for item in dirs { let (name, contents_csum, meta_csum) = item.to_tuple(); let name = name.to_str(); - { + // Special hack because tar stream for containers can't have duplicates. + if is_root && name == SYSROOT { + continue; + } + let metadata = { hex::encode_to_slice(meta_csum, &mut hexbuf)?; let meta_csum = std::str::from_utf8(&hexbuf)?; let meta_v = &self .repo .load_variant(ostree::ObjectType::DirMeta, meta_csum)?; self.append(ostree::ObjectType::DirMeta, meta_csum, meta_v)?; - } + // Safety: We passed the correct variant type just above + ostree::DirMetaParsed::from_variant(meta_v).unwrap() + }; hex::encode_to_slice(contents_csum, &mut hexbuf)?; let dirtree_csum = std::str::from_utf8(&hexbuf)?; let subpath = &dirpath.join(name); let subpath = map_path(subpath); - self.append_dirtree(&*subpath, dirtree_csum, cancellable)?; + self.append_dir(&*subpath, &metadata)?; + self.append_dirtree(&*subpath, dirtree_csum, false, cancellable)?; } Ok(()) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index d65673021..2e8eeee8f 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -245,10 +245,30 @@ fn test_tar_export_structure() -> Result<()> { let src_tar = initial_export(&fixture)?; let src_tar = std::io::BufReader::new(std::fs::File::open(&src_tar)?); let mut src_tar = tar::Archive::new(src_tar); - let first = src_tar.entries()?.next().unwrap()?; + let mut entries = src_tar.entries()?; + // The first entry should be the root directory. + let first = entries.next().unwrap()?; let firstpath = first.path()?; - assert_eq!(firstpath.to_str().unwrap(), "sysroot"); - assert_eq!(first.header().mode()?, 0o755); + assert_eq!(firstpath.to_str().unwrap(), "./"); + assert_eq!(first.header().mode()?, libc::S_IFDIR | 0o755); + let next = entries.next().unwrap().unwrap(); + assert_eq!(next.path().unwrap().as_os_str(), "sysroot"); + // Verify we're injecting directories, fixes the absence of `/tmp` in our + // images for example. + entries + .map(|e| e.unwrap()) + .find(|entry| { + let header = entry.header(); + let path = entry.path().unwrap(); + if path.as_os_str() == "usr" { + assert_eq!(header.entry_type(), tar::EntryType::Directory); + assert_eq!(header.mode().unwrap(), libc::S_IFDIR | 0o755); + true + } else { + false + } + }) + .unwrap(); Ok(()) } From 0ec9e980d3274a3a8fe2f99d3fd253c8d03fb160 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 16 Dec 2021 09:09:45 -0500 Subject: [PATCH 232/775] cli: Use `split_once` to parse `key=value` Motivated by dropping an `unwrap()` call. This is the example in the doc even! I remembered this after seeing https://github.com/coreos/rpm-ostree/pull/3281 --- lib/src/cli.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 9f5a9317a..f6c4fcfdf 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -466,10 +466,8 @@ where let labels: Result> = labels .into_iter() .map(|l| { - let mut parts = l.splitn(2, '='); - let k = parts.next().unwrap(); - let v = parts - .next() + let (k, v) = l + .split_once('=') .ok_or_else(|| anyhow::anyhow!("Missing '=' in label {}", l))?; Ok((k.to_string(), v.to_string())) }) From 88e14fee7b4fe940ce7dbe9e3daa0213192706b5 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 16 Dec 2021 10:59:12 -0500 Subject: [PATCH 233/775] container: Use more `split_once` Followup to https://github.com/ostreedev/ostree-rs-ext/pull/187/commits/0ec9e980d3274a3a8fe2f99d3fd253c8d03fb160 Motivated by dropping some `.unwrap()` calls. --- lib/src/container/mod.rs | 38 ++++++++++++++------------------------ 1 file changed, 14 insertions(+), 24 deletions(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 066765462..39445ba3e 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -100,12 +100,10 @@ impl TryFrom<&str> for ImageReference { type Error = anyhow::Error; fn try_from(value: &str) -> Result { - let mut parts = value.splitn(2, ':'); - let transport_name = parts.next().unwrap(); - let transport: Transport = transport_name.try_into()?; - let mut name = parts - .next() + let (transport_name, mut name) = value + .split_once(':') .ok_or_else(|| anyhow!("Missing ':' in {}", value))?; + let transport: Transport = transport_name.try_into()?; if name.is_empty() { return Err(anyhow!("Invalid empty name in {}", value)); } @@ -140,11 +138,8 @@ impl TryFrom<&str> for OstreeImageReference { type Error = anyhow::Error; fn try_from(value: &str) -> Result { - let mut parts = value.splitn(2, ':'); - // Safety: Split always returns at least one value. - let first = parts.next().unwrap(); - let second = parts - .next() + let (first, second) = value + .split_once(':') .ok_or_else(|| anyhow!("Missing ':' in {}", value))?; let (sigverify, rest) = match first { "ostree-image-signed" => (SignatureSource::ContainerPolicy, Cow::Borrowed(second)), @@ -159,11 +154,8 @@ impl TryFrom<&str> for OstreeImageReference { ), // This is a shorthand for ostree-remote-image with registry: "ostree-remote-registry" => { - let mut subparts = second.splitn(2, ':'); - // Safety: Split always returns at least one value. - let remote = subparts.next().unwrap(); - let rest = subparts - .next() + let (remote, rest) = second + .split_once(':') .ok_or_else(|| anyhow!("Missing second ':' in {}", value))?; ( SignatureSource::OstreeRemote(remote.to_string()), @@ -171,15 +163,13 @@ impl TryFrom<&str> for OstreeImageReference { ) } "ostree-remote-image" => { - let mut subparts = second.splitn(2, ':'); - // Safety: Split always returns at least one value. - let remote = subparts.next().unwrap(); - let second = Cow::Borrowed( - subparts - .next() - .ok_or_else(|| anyhow!("Missing second ':' in {}", value))?, - ); - (SignatureSource::OstreeRemote(remote.to_string()), second) + let (remote, rest) = second + .split_once(':') + .ok_or_else(|| anyhow!("Missing second ':' in {}", value))?; + ( + SignatureSource::OstreeRemote(remote.to_string()), + Cow::Borrowed(rest), + ) } o => { return Err(anyhow!("Invalid ostree image reference scheme: {}", o)); From 623d7649a96ad165930b93d78c7b39f5b2a62d5f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 16 Dec 2021 15:32:46 -0500 Subject: [PATCH 234/775] Add a container_utils module with API to detect containerization This isn't going to be perfect, but it will serve for now. We will need it in both this codebase as well as rpm-ostree, so let's make it a public API. --- lib/src/container_utils.rs | 17 +++++++++++++++++ lib/src/lib.rs | 1 + 2 files changed, 18 insertions(+) create mode 100644 lib/src/container_utils.rs diff --git a/lib/src/container_utils.rs b/lib/src/container_utils.rs new file mode 100644 index 000000000..87e8766f1 --- /dev/null +++ b/lib/src/container_utils.rs @@ -0,0 +1,17 @@ +//! Helpers for interacting with containers at runtime. + +/// Attempts to detect if the current process is running inside a container. +/// This looks for the `container` environment variable or the presence +/// of Docker or podman's more generic `/run/.containerenv`. +pub fn running_in_container() -> bool { + if std::env::var_os("container").is_some() { + return true; + } + // https://stackoverflow.com/questions/20010199/how-to-determine-if-a-process-runs-inside-lxc-docker + for p in ["/run/.containerenv", "/.dockerenv"] { + if std::path::Path::new(p).exists() { + return true; + } + } + false +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 0831d57b4..74fe52cd9 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -26,6 +26,7 @@ type Result = anyhow::Result; pub mod cli; pub mod container; +pub mod container_utils; pub mod diff; pub mod ima; pub mod keyfileext; From 0c297fc7559ae3f888cb1b8ea201d832e496e735 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 16 Dec 2021 12:53:16 -0500 Subject: [PATCH 235/775] Add new integration testing flow The core idea here is that we take our built binary and inject it into a fcos container image, where we can do further testing. For now just to prove this out I've added an internal testutils command. (I think I'd like to write more tests in Rust, but to do that correctly we also want a `bin-unit-tests` feature like rpm-ostree has, which can come later) --- .github/workflows/rust.yml | 23 ++++++++++++++++++++++- ci/integration.sh | 22 ++++++++++++++++++++++ lib/src/cli.rs | 20 ++++++++++++++++++++ lib/src/integrationtest.rs | 14 ++++++++++++++ lib/src/lib.rs | 2 ++ 5 files changed, 80 insertions(+), 1 deletion(-) create mode 100755 ci/integration.sh create mode 100644 lib/src/integrationtest.rs diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 8cc6345a8..dcb0e0b0f 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -31,9 +31,14 @@ jobs: - name: Cache Dependencies uses: Swatinem/rust-cache@ce325b60658c1b38465c06cc965b79baf32c1e72 - name: Build - run: cargo test --no-run + run: cargo test --no-run && cargo build - name: Run tests run: cargo test -- --nocapture --quiet + - name: Upload binary + uses: actions/upload-artifact@v2 + with: + name: ostree-ext-cli + path: target/debug/ostree-ext-cli build-minimum-toolchain: name: "Build, minimum supported toolchain (MSRV)" runs-on: ubuntu-latest @@ -75,3 +80,19 @@ jobs: run: cargo fmt -- --check -l - name: cargo clippy (warnings) run: cargo clippy -- -D warnings + integration: + name: "Integration" + needs: build + runs-on: ubuntu-latest + container: quay.io/cgwalters/fcos + steps: + - name: Checkout repository + uses: actions/checkout@v2 + - name: Download ostree-ext-cli + uses: actions/download-artifact@v2 + with: + name: ostree-ext-cli + - name: Install + run: install ostree-ext-cli /usr/bin && rm -v ostree-ext-cli + - name: Integration tests + run: ./ci/integration.sh diff --git a/ci/integration.sh b/ci/integration.sh new file mode 100755 index 000000000..8d0104e13 --- /dev/null +++ b/ci/integration.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Assumes that the current environment is a mutable ostree-container +# with ostree-ext-cli installed in /usr/bin. +# Runs integration tests. +set -xeuo pipefail + +# Output an ok message for TAP +n_tap_tests=0 +tap_ok() { + echo "ok" "$@" + n_tap_tests=$(($n_tap_tests+1)) +} + +tap_end() { + echo "1..${n_tap_tests}" +} + +env=$(ostree-ext-cli internal-only-for-testing detect-env) +test "${env}" = ostree-container +tap_ok environment + +tap_end \ No newline at end of file diff --git a/lib/src/cli.rs b/lib/src/cli.rs index f6c4fcfdf..72a489a28 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -231,6 +231,13 @@ struct ImaSignOpts { key: String, } +/// Options for internal testing +#[derive(Debug, StructOpt)] +enum TestingOpts { + // Detect the current environment + DetectEnv, +} + /// Toplevel options for extended ostree functionality. #[derive(Debug, StructOpt)] #[structopt(name = "ostree-ext")] @@ -243,6 +250,8 @@ enum Opt { Container(ContainerOpts), /// IMA signatures ImaSign(ImaSignOpts), + #[structopt(setting(structopt::clap::AppSettings::Hidden))] + InternalOnlyForTesting(TestingOpts), } #[allow(clippy::from_over_into)] @@ -437,6 +446,16 @@ fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { Ok(()) } +fn testing(opts: &TestingOpts) -> Result<()> { + match opts { + TestingOpts::DetectEnv => { + let s = crate::integrationtest::detectenv(); + println!("{}", s); + Ok(()) + } + } +} + /// Parse the provided arguments and execute. /// Calls [`structopt::clap::Error::exit`] on failure, printing the error message and aborting the program. pub async fn run_from_iter(args: I) -> Result<()> @@ -525,5 +544,6 @@ where }, }, Opt::ImaSign(ref opts) => ima_sign(opts), + Opt::InternalOnlyForTesting(ref opts) => testing(opts), } } diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs new file mode 100644 index 000000000..e5a8f0b2d --- /dev/null +++ b/lib/src/integrationtest.rs @@ -0,0 +1,14 @@ +//! Module used for integration tests; should not be public. + +fn has_ostree() -> bool { + std::path::Path::new("/sysroot/ostree/repo").exists() +} + +pub(crate) fn detectenv() -> &'static str { + match (crate::container_utils::running_in_container(), has_ostree()) { + (true, true) => "ostree-container", + (true, false) => "container", + (false, true) => "ostree", + (false, false) => "none", + } +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 74fe52cd9..46c122732 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -41,3 +41,5 @@ pub mod prelude { #[doc(hidden)] pub use ostree::prelude::*; } + +mod integrationtest; From 59e7a12d34f2e99380cb81da00c61136f16f5ac3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 17 Dec 2021 10:32:29 -0500 Subject: [PATCH 236/775] tar/export: Do emit dirmeta for `/sysroot` This fixes an important regression from https://github.com/ostreedev/ostree-rs-ext/pull/186 Basically we have now a really ugly hack around `/sysroot` that wasn't caught by the unit tests here because the OS content doesn't have it. I'm going to do a followup where we extend our integration testing more here. But anyways, we still need to emit the dirmeta object for `/sysroot`, even if not its contents. (Actually, it *should* be empty in an ostree commit, hmm probably ostree should enforce that) --- lib/src/tar/export.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 313de1f36..e8ea73f2f 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -348,10 +348,6 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { for item in dirs { let (name, contents_csum, meta_csum) = item.to_tuple(); let name = name.to_str(); - // Special hack because tar stream for containers can't have duplicates. - if is_root && name == SYSROOT { - continue; - } let metadata = { hex::encode_to_slice(meta_csum, &mut hexbuf)?; let meta_csum = std::str::from_utf8(&hexbuf)?; @@ -362,6 +358,10 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { // Safety: We passed the correct variant type just above ostree::DirMetaParsed::from_variant(meta_v).unwrap() }; + // Special hack because tar stream for containers can't have duplicates. + if is_root && name == SYSROOT { + continue; + } hex::encode_to_slice(contents_csum, &mut hexbuf)?; let dirtree_csum = std::str::from_utf8(&hexbuf)?; let subpath = &dirpath.join(name); From 76c9038d80ff369bca57115e40dff7e69f7e65ab Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 17 Dec 2021 13:03:51 -0500 Subject: [PATCH 237/775] =?UTF-8?q?tar/export:=20Do=20checksum=20=E2=86=92?= =?UTF-8?q?=20string=20a=20bit=20smarter?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I was working on this area of code for a previous PR, and I almost made a refactoring that shared the `hexbuf`. I think the stack allocation here was a premature optimization. Let's just heap allocate - modern allocators are fast with thread-local storage and caches etc. The C side allocates a ton anyways. However there is an optimization we can make here - by passing ownership of the checksum string into the recursive dirtree walk, we can drop it before recursing. Now there's only one heap allocation there instead of it being O(depth). --- lib/src/tar/export.rs | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index e8ea73f2f..0f201d6e9 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -138,7 +138,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let commit_bytes = commit_bytes.try_as_aligned()?; let commit = gv_commit!().cast(commit_bytes); let commit = commit.to_tuple(); - let contents = &hex::encode(commit.6); + let contents = hex::encode(commit.6); let metadata_checksum = &hex::encode(commit.7); let metadata_v = self .repo @@ -218,9 +218,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { h.set_mode(0o644); h.set_size(0); let digest = openssl::hash::hash(openssl::hash::MessageDigest::sha256(), xattrs_data)?; - let mut hexbuf = [0u8; 64]; - hex::encode_to_slice(digest, &mut hexbuf)?; - let checksum = std::str::from_utf8(&hexbuf)?; + let checksum = &hex::encode(digest); let path = xattrs_path(checksum); if !self.wrote_xattrs.contains(checksum) { @@ -311,14 +309,15 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { fn append_dirtree>( &mut self, dirpath: &Utf8Path, - checksum: &str, + checksum: String, is_root: bool, cancellable: Option<&C>, ) -> Result<()> { let v = &self .repo - .load_variant(ostree::ObjectType::DirTree, checksum)?; - self.append(ostree::ObjectType::DirTree, checksum, v)?; + .load_variant(ostree::ObjectType::DirTree, &checksum)?; + self.append(ostree::ObjectType::DirTree, &checksum, v)?; + drop(checksum); let v = v.data_as_bytes(); let v = v.try_as_aligned()?; let v = gv_dirtree!().cast(v); @@ -328,14 +327,10 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { c.set_error_if_cancelled()?; } - // A reusable buffer to avoid heap allocating these - let mut hexbuf = [0u8; 64]; - for file in files { let (name, csum) = file.to_tuple(); let name = name.to_str(); - hex::encode_to_slice(csum, &mut hexbuf)?; - let checksum = std::str::from_utf8(&hexbuf)?; + let checksum = &hex::encode(csum); let (objpath, mut h) = self.append_content(checksum)?; h.set_entry_type(tar::EntryType::Link); h.set_link_name(&objpath)?; @@ -349,8 +344,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let (name, contents_csum, meta_csum) = item.to_tuple(); let name = name.to_str(); let metadata = { - hex::encode_to_slice(meta_csum, &mut hexbuf)?; - let meta_csum = std::str::from_utf8(&hexbuf)?; + let meta_csum = &hex::encode(meta_csum); let meta_v = &self .repo .load_variant(ostree::ObjectType::DirMeta, meta_csum)?; @@ -362,8 +356,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { if is_root && name == SYSROOT { continue; } - hex::encode_to_slice(contents_csum, &mut hexbuf)?; - let dirtree_csum = std::str::from_utf8(&hexbuf)?; + let dirtree_csum = hex::encode(contents_csum); let subpath = &dirpath.join(name); let subpath = map_path(subpath); self.append_dir(&*subpath, &metadata)?; From 5b9a141c203c963b95e23a59d421b10eada5a158 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 17 Dec 2021 16:19:32 -0500 Subject: [PATCH 238/775] tar/export: Add a repo/config with mode `bare-split-xattrs` Part of https://github.com/ostreedev/ostree/issues/2499 Basically, ostree core should have first-class support for reading and parsing this. The primary motivation is to support ostree-in-container better. The real main wrinkle here is xattrs, as usual. We can't rely on being able to read/write xattrs inside a container. For now, this will serve sufficient to *identify* this format, but it will pave the way for ostree core to support read/write. (Note that if we go to implement writes, a notable wrinkle is that garbage collection of the `xattrs` subdirectory will require another special handling pass) --- lib/src/tar/export.rs | 28 +++++++++++++++++++++++++--- lib/src/tar/import.rs | 4 ++++ lib/tests/it/main.rs | 33 +++++++++++++++++++++------------ 3 files changed, 50 insertions(+), 15 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 0f201d6e9..f713fd13b 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -14,11 +14,21 @@ use std::borrow::Cow; use std::collections::HashSet; use std::io::BufReader; +/// The repository mode generated by a tar export stream. +pub const BARE_SPLIT_XATTRS_MODE: &str = "bare-split-xattrs"; + // This is both special in the tar stream *and* it's in the ostree commit. const SYSROOT: &str = "sysroot"; // This way the default ostree -> sysroot/ostree symlink works. const OSTREEDIR: &str = "sysroot/ostree"; +/// The base repository configuration that identifies this is a tar export. +// See https://github.com/ostreedev/ostree/issues/2499 +const REPO_CONFIG: &str = r#"[core] +repo_version=1 +mode=bare-split-xattrs +"#; + /// A decently large buffer, as used by e.g. coreutils `cat`. /// System calls are expensive. const BUF_CAPACITY: usize = 131072; @@ -94,8 +104,8 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { Ok(()) } - /// Write the initial directory structure. - fn write_initial_directories(&mut self) -> Result<()> { + /// Write the initial /sysroot/ostree/repo structure. + fn write_repo_structure(&mut self) -> Result<()> { if self.wrote_initdirs { return Ok(()); } @@ -124,6 +134,18 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { // The special `repo/xattrs` directory used only in our tar serialization. let path: Utf8PathBuf = format!("{}/repo/xattrs", OSTREEDIR).into(); self.append_default_dir(&path)?; + let mut h = tar::Header::new_gnu(); + h.set_entry_type(tar::EntryType::Regular); + h.set_uid(0); + h.set_gid(0); + h.set_mode(0o644); + h.set_size(REPO_CONFIG.as_bytes().len() as u64); + self.out.append_data( + &mut h, + &format!("{}/repo/config", OSTREEDIR), + std::io::Cursor::new(REPO_CONFIG), + )?; + Ok(()) } @@ -152,7 +174,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { self.append_dir(rootpath, metadata)?; // Now, we create sysroot/ and everything under it - self.write_initial_directories()?; + self.write_repo_structure()?; self.append(ostree::ObjectType::Commit, checksum, commit_v)?; if let Some(commitmeta) = self diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 2ae150094..aa768d079 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -167,6 +167,10 @@ impl Importer { .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", orig_path))?; // Ignore the regular non-object file hardlinks we inject if let Ok(path) = path.strip_prefix(REPO_PREFIX) { + // Filter out the repo config file + if path.file_name() == Some("config") { + return Ok(None); + } let path = path.into(); Ok(Some((e, path))) } else { diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 2e8eeee8f..5a68cc1d9 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -253,22 +253,31 @@ fn test_tar_export_structure() -> Result<()> { assert_eq!(first.header().mode()?, libc::S_IFDIR | 0o755); let next = entries.next().unwrap().unwrap(); assert_eq!(next.path().unwrap().as_os_str(), "sysroot"); + + let expected = vec![ + ("sysroot/ostree/repo/config", tar::EntryType::Regular, 0o644), + ("usr", tar::EntryType::Directory, libc::S_IFDIR | 0o755), + ]; + let mut entries = entries.map(|e| e.unwrap()); + // Verify we're injecting directories, fixes the absence of `/tmp` in our // images for example. - entries - .map(|e| e.unwrap()) - .find(|entry| { + for (path, expected_type, expected_mode) in expected { + let mut found = false; + while let Some(entry) = entries.next() { let header = entry.header(); - let path = entry.path().unwrap(); - if path.as_os_str() == "usr" { - assert_eq!(header.entry_type(), tar::EntryType::Directory); - assert_eq!(header.mode().unwrap(), libc::S_IFDIR | 0o755); - true - } else { - false + let entry_path = entry.path().unwrap(); + if path == entry_path.as_os_str() { + assert_eq!(header.entry_type(), expected_type); + assert_eq!(header.mode().unwrap(), expected_mode); + found = true; + break; } - }) - .unwrap(); + } + if !found { + panic!("Failed to find entry: {}", path); + } + } Ok(()) } From 8d9884d1ee8119388493148c35e4c752da2d6362 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 17 Dec 2021 16:58:38 -0500 Subject: [PATCH 239/775] container_utils: Add a method to detect `bare-split-xattrs` This will be used for an integration test once we rebuild our current container image. But also, this will help out cases like https://github.com/coreos/rpm-ostree/pull/3280 As well as supporting finalization inside a container. --- lib/src/container_utils.rs | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/lib/src/container_utils.rs b/lib/src/container_utils.rs index 87e8766f1..1cad5b101 100644 --- a/lib/src/container_utils.rs +++ b/lib/src/container_utils.rs @@ -1,5 +1,13 @@ //! Helpers for interacting with containers at runtime. +use crate::keyfileext::KeyFileExt; +use anyhow::Result; +use ostree::glib; +use std::io::Read; +use std::path::Path; + +const REPO_CONFIG: &str = "/sysroot/ostree/repo/config"; + /// Attempts to detect if the current process is running inside a container. /// This looks for the `container` environment variable or the presence /// of Docker or podman's more generic `/run/.containerenv`. @@ -15,3 +23,33 @@ pub fn running_in_container() -> bool { } false } + +// https://docs.rs/openat-ext/0.1.10/openat_ext/trait.OpenatDirExt.html#tymethod.open_file_optional +// https://users.rust-lang.org/t/why-i-use-anyhow-error-even-in-libraries/68592 +fn open_optional(path: impl AsRef) -> std::io::Result> { + match std::fs::File::open(path.as_ref()) { + Ok(r) => Ok(Some(r)), + Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None), + Err(e) => Err(e), + } +} + +/// Returns `true` if the current root filesystem has an ostree repository in `bare-split-xattrs` mode. +/// This will be the case in a running ostree-native container. +pub fn is_bare_split_xattrs() -> Result { + if let Some(configf) = open_optional(REPO_CONFIG)? { + let mut bufr = std::io::BufReader::new(configf); + let mut s = String::new(); + bufr.read_to_string(&mut s)?; + let kf = glib::KeyFile::new(); + kf.load_from_data(&s, glib::KeyFileFlags::NONE)?; + let r = if let Some(mode) = kf.optional_string("core", "mode")? { + mode == crate::tar::BARE_SPLIT_XATTRS_MODE + } else { + false + }; + Ok(r) + } else { + Ok(false) + } +} From 6543c855580da3a3f2a85bc8b4a294b4c92c2829 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 20 Dec 2021 14:49:20 -0500 Subject: [PATCH 240/775] Use a self-enabled `integration-testing-api` feature Most of our current testing is via an integration test, which I designed after reading https://matklad.github.io/2021/02/27/delete-cargo-integration-tests.html However, I then wanted to use our internal OCI build tooling from the integration test, which is blocked (by design) from seeing non-`pub` things. I initially tried doing: ``` #[cfg(test)] pub fn do_oci_stuff(...) ``` but that didn't work. (Perhaps it should?) After some searching I came across this gem: https://github.com/rust-lang/cargo/issues/2911#issuecomment-749580481 Which, looks an amazing hack. But it works. The *big* downside of this is that now in order to properly test building without the integration test feature, one must do so via a fully distinct crate. --- lib/Cargo.toml | 5 +++++ lib/src/cli.rs | 3 +++ lib/src/lib.rs | 1 + 3 files changed, 9 insertions(+) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index b9a3a6368..3c7f12fe8 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -47,9 +47,14 @@ tracing = "0.1" indoc = "1.0.3" quickcheck = "1" sh-inline = "0.1.0" +# https://github.com/rust-lang/cargo/issues/2911 +# https://github.com/rust-lang/rfcs/pull/1956 +ostree-ext = { path = ".", features = ["internal-testing-api"] } [package.metadata.docs.rs] features = ["dox"] [features] dox = ["ostree/dox"] +internal-testing-api = [] + diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 72a489a28..2a73bcebd 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -251,6 +251,7 @@ enum Opt { /// IMA signatures ImaSign(ImaSignOpts), #[structopt(setting(structopt::clap::AppSettings::Hidden))] + #[cfg(feature = "internal-testing-api")] InternalOnlyForTesting(TestingOpts), } @@ -446,6 +447,7 @@ fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { Ok(()) } +#[cfg(feature = "internal-testing-api")] fn testing(opts: &TestingOpts) -> Result<()> { match opts { TestingOpts::DetectEnv => { @@ -544,6 +546,7 @@ where }, }, Opt::ImaSign(ref opts) => ima_sign(opts), + #[cfg(feature = "internal-testing-api")] Opt::InternalOnlyForTesting(ref opts) => testing(opts), } } diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 46c122732..254394d17 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -42,4 +42,5 @@ pub mod prelude { pub use ostree::prelude::*; } +#[cfg(feature = "internal-testing-api")] mod integrationtest; From c61ad846f56b3616345efba2ed2eaeec42e00170 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 21 Dec 2021 10:16:50 -0500 Subject: [PATCH 241/775] tar/export: Correctly set size and entry type for denormal links Fixes https://discussion.fedoraproject.org/t/rebasing-from-container-registry/35356 (This needs unit tests I know, but right now our content set is a manually-generated fixed tarball, and I have some patches in the work to improve the test suite) --- lib/src/tar/export.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index f713fd13b..64bdfd53e 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -294,6 +294,8 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let target = meta.symlink_target().unwrap(); let target = target.as_str(); let context = || format!("Writing content symlink: {}", checksum); + h.set_entry_type(tar::EntryType::Symlink); + h.set_size(0); // Handle //chkconfig, see above if symlink_is_denormal(target) { h.set_link_name_literal(meta.symlink_target().unwrap().as_str()) @@ -302,8 +304,6 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { .append_data(&mut h, &path, &mut std::io::empty()) .with_context(context)?; } else { - h.set_entry_type(tar::EntryType::Symlink); - h.set_size(0); self.out .append_link(&mut h, &path, target) .with_context(context)?; From 22ae2eaaac30791185340414ab798d438299e805 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 21 Dec 2021 18:30:54 -0500 Subject: [PATCH 242/775] tar: Move repo config into `/sysroot/config` by default for backcompat It turns out adding `/sysroot/ostree/repo/config` into the export stream broke compat with older code. Now in theory this is all experimental and changeable, but it breaks the FCOS upgrade testing because the export format there is "production" for some values of production. And for our own sanity, it's helpful to be compatible. And add format versioning into the tar export code, and default to v0 where we put the config into `/sysroot/config` where it will be ignored by older ostree. This in particular should allow us to update rpm-ostree to the latest ostree-ext in https://github.com/coreos/rpm-ostree/pull/3285 --- lib/src/cli.rs | 11 +++- lib/src/container/encapsulate.rs | 2 +- lib/src/container_utils.rs | 12 ++++- lib/src/tar/export.rs | 36 +++++++++---- lib/tests/it/main.rs | 92 +++++++++++++++++++++++--------- 5 files changed, 116 insertions(+), 37 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 2a73bcebd..c96061d44 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -55,6 +55,10 @@ struct ExportOpts { #[structopt(long)] repo: String, + /// The format version. Must be 0 or 1. + #[structopt(long)] + format_version: u32, + /// The ostree ref or commit to export rev: String, } @@ -284,7 +288,12 @@ async fn tar_import(opts: &ImportOpts) -> Result<()> { /// Export a tar archive containing an ostree commit. fn tar_export(opts: &ExportOpts) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, opts.repo.as_str(), gio::NONE_CANCELLABLE)?; - crate::tar::export_commit(repo, opts.rev.as_str(), std::io::stdout())?; + #[allow(clippy::needless_update)] + let subopts = crate::tar::ExportOptions { + format_version: opts.format_version, + ..Default::default() + }; + crate::tar::export_commit(repo, opts.rev.as_str(), std::io::stdout(), Some(subopts))?; Ok(()) } diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 35c6a46eb..a28325ecc 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -36,7 +36,7 @@ fn export_ostree_ref( ) -> Result { let commit = repo.resolve_rev(rev, false)?.unwrap(); let mut w = writer.create_raw_layer(compression)?; - ostree_tar::export_commit(repo, commit.as_str(), &mut w)?; + ostree_tar::export_commit(repo, commit.as_str(), &mut w, None)?; w.complete() } diff --git a/lib/src/container_utils.rs b/lib/src/container_utils.rs index 1cad5b101..b80f3c124 100644 --- a/lib/src/container_utils.rs +++ b/lib/src/container_utils.rs @@ -6,7 +6,11 @@ use ostree::glib; use std::io::Read; use std::path::Path; -const REPO_CONFIG: &str = "/sysroot/ostree/repo/config"; +// See https://github.com/coreos/rpm-ostree/pull/3285#issuecomment-999101477 +// For compatibility with older ostree, we stick this in /sysroot where +// it will be ignored. +const V0_REPO_CONFIG: &str = "/sysroot/config"; +const V1_REPO_CONFIG: &str = "/sysroot/ostree/repo/config"; /// Attempts to detect if the current process is running inside a container. /// This looks for the `container` environment variable or the presence @@ -37,7 +41,11 @@ fn open_optional(path: impl AsRef) -> std::io::Result Result { - if let Some(configf) = open_optional(REPO_CONFIG)? { + if let Some(configf) = open_optional(V1_REPO_CONFIG) + .transpose() + .or_else(|| open_optional(V0_REPO_CONFIG).transpose()) + { + let configf = configf?; let mut bufr = std::io::BufReader::new(configf); let mut s = String::new(); bufr.read_to_string(&mut s)?; diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 64bdfd53e..0bf37c5b3 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -44,6 +44,7 @@ fn map_path(p: &Utf8Path) -> std::borrow::Cow { struct OstreeTarWriter<'a, W: std::io::Write> { repo: &'a ostree::Repo, out: &'a mut tar::Builder, + options: ExportOptions, wrote_initdirs: bool, wrote_dirtree: HashSet, wrote_dirmeta: HashSet, @@ -80,10 +81,11 @@ fn symlink_is_denormal(target: &str) -> bool { } impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { - fn new(repo: &'a ostree::Repo, out: &'a mut tar::Builder) -> Self { + fn new(repo: &'a ostree::Repo, out: &'a mut tar::Builder, options: ExportOptions) -> Self { Self { repo, out, + options, wrote_initdirs: false, wrote_dirmeta: HashSet::new(), wrote_dirtree: HashSet::new(), @@ -140,11 +142,13 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { h.set_gid(0); h.set_mode(0o644); h.set_size(REPO_CONFIG.as_bytes().len() as u64); - self.out.append_data( - &mut h, - &format!("{}/repo/config", OSTREEDIR), - std::io::Cursor::new(REPO_CONFIG), - )?; + let path = match self.options.format_version { + 0 => format!("{}/config", SYSROOT), + 1 => format!("{}/repo/config", OSTREEDIR), + n => anyhow::bail!("Unsupported ostree tar format version {}", n), + }; + self.out + .append_data(&mut h, path, std::io::Cursor::new(REPO_CONFIG))?; Ok(()) } @@ -397,18 +401,32 @@ fn impl_export( repo: &ostree::Repo, commit_checksum: &str, out: &mut tar::Builder, + options: ExportOptions, ) -> Result<()> { - let writer = &mut OstreeTarWriter::new(repo, out); + let writer = &mut OstreeTarWriter::new(repo, out, options); writer.write_commit(commit_checksum)?; Ok(()) } +/// Configuration for tar export. +#[derive(Debug, Default, PartialEq, Eq)] +pub struct ExportOptions { + /// Format version; must be 0 or 1. + pub format_version: u32, +} + /// Export an ostree commit to an (uncompressed) tar archive stream. #[context("Exporting commit")] -pub fn export_commit(repo: &ostree::Repo, rev: &str, out: impl std::io::Write) -> Result<()> { +pub fn export_commit( + repo: &ostree::Repo, + rev: &str, + out: impl std::io::Write, + options: Option, +) -> Result<()> { let commit = repo.resolve_rev(rev, false)?; let mut tar = tar::Builder::new(out); - impl_export(repo, commit.unwrap().as_str(), &mut tar)?; + let options = options.unwrap_or_default(); + impl_export(repo, commit.unwrap().as_str(), &mut tar, options)?; tar.finish()?; Ok(()) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 5a68cc1d9..bebf0e060 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -103,7 +103,11 @@ fn initial_export(fixture: &Fixture) -> Result { ); let destpath = fixture.path.join("exampleos-export.tar"); let mut outf = std::io::BufWriter::new(std::fs::File::create(&destpath)?); - ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut outf)?; + let options = ostree_ext::tar::ExportOptions { + format_version: fixture.format_version, + ..Default::default() + }; + ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut outf, Some(options))?; outf.flush()?; Ok(destpath) } @@ -116,6 +120,8 @@ struct Fixture { srcrepo: ostree::Repo, destrepo: ostree::Repo, destrepo_path: Utf8PathBuf, + + format_version: u32, } impl Fixture { @@ -142,6 +148,7 @@ impl Fixture { srcrepo, destrepo, destrepo_path, + format_version: 0, }) } } @@ -162,14 +169,14 @@ async fn test_tar_export_reproducible() -> Result<()> { .read_commit(TESTREF, gio::NONE_CANCELLABLE)?; let export1 = { let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; - ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h)?; + ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h, None)?; h.finish()? }; // Artificial delay to flush out mtimes (one second granularity baseline, plus another 100ms for good measure). std::thread::sleep(std::time::Duration::from_millis(1100)); let export2 = { let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; - ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h)?; + ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h, None)?; h.finish()? }; assert_eq!(*export1, *export2); @@ -237,11 +244,54 @@ async fn test_tar_import_signed() -> Result<()> { Ok(()) } +struct TarExpected { + path: &'static str, + etype: tar::EntryType, + mode: u32, +} + +impl Into for &(&'static str, tar::EntryType, u32) { + fn into(self) -> TarExpected { + TarExpected { + path: self.0, + etype: self.1, + mode: self.2, + } + } +} + +fn validate_tar_expected( + t: tar::Entries, + expected: impl IntoIterator, +) -> Result<()> { + let expected = expected.into_iter(); + let mut entries = t.map(|e| e.unwrap()); + // Verify we're injecting directories, fixes the absence of `/tmp` in our + // images for example. + for exp in expected { + let mut found = false; + while let Some(entry) = entries.next() { + let header = entry.header(); + let entry_path = entry.path().unwrap(); + if exp.path == entry_path.as_os_str() { + assert_eq!(header.entry_type(), exp.etype); + assert_eq!(header.mode().unwrap(), exp.mode); + found = true; + break; + } + } + if !found { + anyhow::bail!("Failed to find entry: {}", exp.path); + } + } + Ok(()) +} + /// Validate basic structure of the tar export. /// Right now just checks the first entry is `sysroot` with mode 0755. #[test] fn test_tar_export_structure() -> Result<()> { - let fixture = Fixture::new()?; + let mut fixture = Fixture::new()?; let src_tar = initial_export(&fixture)?; let src_tar = std::io::BufReader::new(std::fs::File::open(&src_tar)?); let mut src_tar = tar::Archive::new(src_tar); @@ -254,30 +304,24 @@ fn test_tar_export_structure() -> Result<()> { let next = entries.next().unwrap().unwrap(); assert_eq!(next.path().unwrap().as_os_str(), "sysroot"); - let expected = vec![ + // Validate format version 0 + let expected = [ + ("sysroot/config", tar::EntryType::Regular, 0o644), + ("usr", tar::EntryType::Directory, libc::S_IFDIR | 0o755), + ]; + validate_tar_expected(entries, expected.iter().map(Into::into))?; + + // Validate format version 1 + fixture.format_version = 1; + let src_tar = initial_export(&fixture)?; + let src_tar = std::io::BufReader::new(std::fs::File::open(&src_tar)?); + let mut src_tar = tar::Archive::new(src_tar); + let expected = [ ("sysroot/ostree/repo/config", tar::EntryType::Regular, 0o644), ("usr", tar::EntryType::Directory, libc::S_IFDIR | 0o755), ]; - let mut entries = entries.map(|e| e.unwrap()); + validate_tar_expected(src_tar.entries()?, expected.iter().map(Into::into))?; - // Verify we're injecting directories, fixes the absence of `/tmp` in our - // images for example. - for (path, expected_type, expected_mode) in expected { - let mut found = false; - while let Some(entry) = entries.next() { - let header = entry.header(); - let entry_path = entry.path().unwrap(); - if path == entry_path.as_os_str() { - assert_eq!(header.entry_type(), expected_type); - assert_eq!(header.mode().unwrap(), expected_mode); - found = true; - break; - } - } - if !found { - panic!("Failed to find entry: {}", path); - } - } Ok(()) } From 04c2b34732e02a27bab4080840fafccf0865bd55 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 22 Dec 2021 11:35:01 -0500 Subject: [PATCH 243/775] lib/container_utils: Add helper `is_ostree_contaner()` This came up in https://github.com/coreos/rpm-ostree/pull/3297 Most of our checks are going to be for this. --- lib/src/container_utils.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/src/container_utils.rs b/lib/src/container_utils.rs index b80f3c124..b42c1e977 100644 --- a/lib/src/container_utils.rs +++ b/lib/src/container_utils.rs @@ -61,3 +61,10 @@ pub fn is_bare_split_xattrs() -> Result { Ok(false) } } + +/// Returns `true` if the current booted filesystem appears to be an ostree-native container. +/// +/// This just invokes [`is_bare_split_xattrs`] and [`running_in_container`]. +pub fn is_ostree_container() -> Result { + Ok(running_in_container() && is_bare_split_xattrs()?) +} From c3fe35a750bb1e1f050888be0c0213de03bce3e8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 4 Jan 2022 10:50:09 -0500 Subject: [PATCH 244/775] Replace lazy_static with once_cell I saw https://github.com/dtolnay/cxx/pull/995 go by and it looks better. --- lib/Cargo.toml | 2 +- lib/src/container/skopeo.rs | 27 +++++++++++++-------------- lib/tests/it/main.rs | 13 +++++-------- 3 files changed, 19 insertions(+), 23 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 3c7f12fe8..9b5913736 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -23,7 +23,7 @@ futures-util = "0.3.13" gvariant = "0.4.0" hex = "0.4.3" indicatif = "0.16.0" -lazy_static = "1.4.0" +once_cell = "1.9" libc = "0.2.92" nix = "0.23" oci-spec = "0.5.0" diff --git a/lib/src/container/skopeo.rs b/lib/src/container/skopeo.rs index ccc957521..9f6718878 100644 --- a/lib/src/container/skopeo.rs +++ b/lib/src/container/skopeo.rs @@ -1,6 +1,7 @@ //! Fork skopeo as a subprocess use anyhow::{Context, Result}; +use once_cell::sync::Lazy; use serde::Deserialize; use std::process::Stdio; use tokio::process::Command; @@ -18,20 +19,18 @@ bitflags::bitflags! { } } -lazy_static::lazy_static! { - static ref SKOPEO_FEATURES: Result = { - let mut features = SkopeoFeatures::empty(); - let c = std::process::Command::new("skopeo") - .args(&["copy", "--help"]) - .stderr(std::process::Stdio::piped()) - .output()?; - let stdout = String::from_utf8_lossy(&c.stderr); - if stdout.contains("--digestfile") { - features.insert(SkopeoFeatures::COPY_DIGESTFILE); - } - Ok(features) - }; -} +static SKOPEO_FEATURES: Lazy> = Lazy::new(|| { + let mut features = SkopeoFeatures::empty(); + let c = std::process::Command::new("skopeo") + .args(&["copy", "--help"]) + .stderr(std::process::Stdio::piped()) + .output()?; + let stdout = String::from_utf8_lossy(&c.stderr); + if stdout.contains("--digestfile") { + features.insert(SkopeoFeatures::COPY_DIGESTFILE); + } + Ok(features) +}); pub(crate) fn skopeo_has_features(wanted: SkopeoFeatures) -> Result { match &*SKOPEO_FEATURES { diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index bebf0e060..33a2c4441 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -2,6 +2,7 @@ use anyhow::{Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use indoc::indoc; +use once_cell::sync::Lazy; use ostree_ext::container::store::PrepareResult; use ostree_ext::container::{ Config, ImageReference, OstreeImageReference, SignatureSource, Transport, @@ -35,14 +36,10 @@ fn assert_err_contains(r: Result, s: impl AsRef) { } } -lazy_static::lazy_static! { - static ref TEST_REGISTRY: String = { - match std::env::var_os("TEST_REGISTRY") { - Some(t) => t.to_str().unwrap().to_owned(), - None => TEST_REGISTRY_DEFAULT.to_string() - } - }; -} +static TEST_REGISTRY: Lazy = Lazy::new(|| match std::env::var_os("TEST_REGISTRY") { + Some(t) => t.to_str().unwrap().to_owned(), + None => TEST_REGISTRY_DEFAULT.to_string(), +}); #[context("Generating test repo")] fn generate_test_repo(dir: &Utf8Path) -> Result { From 2280d1841464e8458c89a2ccaec66270fd18a4da Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 4 Jan 2022 12:23:39 -0500 Subject: [PATCH 245/775] ima: Use std's `last_os_error()` instead of nix Part of investigating using rustix instead of nix. --- lib/src/ima.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 8adf2aa08..121a861bb 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -80,17 +80,17 @@ fn steal_xattr(f: &File, k: &str) -> Result> { let k = k.as_ptr() as *const _; let r = libc::fgetxattr(f.as_raw_fd(), k, std::ptr::null_mut(), 0); if r < 0 { - return Err(nix::Error::last().into()); + return Err(std::io::Error::last_os_error().into()); } let sz: usize = r.try_into()?; let mut buf = vec![0u8; sz]; let r = libc::fgetxattr(f.as_raw_fd(), k, buf.as_mut_ptr() as *mut _, sz); if r < 0 { - return Err(nix::Error::last().into()); + return Err(std::io::Error::last_os_error().into()); } let r = libc::fremovexattr(f.as_raw_fd(), k); if r < 0 { - return Err(nix::Error::last().into()); + return Err(std::io::Error::last_os_error().into()); } Ok(buf) } From ffb046d5bb4f45d8813ad88fbd5d30205d87b4b0 Mon Sep 17 00:00:00 2001 From: Joseph Marrero Date: Tue, 4 Jan 2022 16:59:17 -0500 Subject: [PATCH 246/775] lib/src/cli: Remove unused BuildOpts struct. --- lib/src/cli.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index c96061d44..6466779f2 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -25,18 +25,6 @@ fn parse_base_imgref(s: &str) -> Result { ImageReference::try_from(s) } -#[derive(Debug, StructOpt)] -struct BuildOpts { - #[structopt(long)] - repo: String, - - #[structopt(long = "ref")] - ostree_ref: String, - - #[structopt(long)] - oci_dir: String, -} - /// Options for importing a tar archive. #[derive(Debug, StructOpt)] struct ImportOpts { From cdd20ecf1f0097c485c318d6b757b6ea074db845 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 5 Jan 2022 17:36:45 -0500 Subject: [PATCH 247/775] lib: Switch to released containers-image-proxy version We cut a new version upstream, let's switch back to crate dependencies. Add a build-time conditional mirroring the underlying one. --- lib/Cargo.toml | 5 ++--- lib/src/container/store.rs | 10 +++++----- lib/tests/it/main.rs | 19 ++++++++----------- 3 files changed, 15 insertions(+), 19 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 9b5913736..c89eb468c 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -10,8 +10,7 @@ version = "0.5.1" [dependencies] anyhow = "1.0" -# containers-image-proxy = "0.3" -containers-image-proxy = { git = "https://github.com/containers/containers-image-proxy-rs" } +containers-image-proxy = "0.4.0" async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" @@ -57,4 +56,4 @@ features = ["dox"] [features] dox = ["ostree/dox"] internal-testing-api = [] - +proxy_v0_2_3 = ["containers-image-proxy/proxy_v0_2_3"] diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 0cccee71a..7b7d6193c 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -204,7 +204,6 @@ impl LayeredImageImporter { /// Determine if there is a new manifest, and if so return its digest. #[context("Fetching manifest")] pub async fn prepare(&mut self) -> Result { - let proxy_023 = self.proxy.get_0_2_3(); match &self.imgref.sigverify { SignatureSource::ContainerPolicy if skopeo::container_policy_is_default_insecure()? => { return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); @@ -245,14 +244,15 @@ impl LayeredImageImporter { (None, None) }; - let config = if let Some(proxy) = proxy_023 { - let config_bytes = proxy.fetch_config(&self.proxy_img).await?; + #[cfg(feature = "proxy_v0_2_3")] + let config = { + let config_bytes = self.proxy.fetch_config(&self.proxy_img).await?; let config: oci_image::ImageConfiguration = serde_json::from_slice(&config_bytes).context("Parsing image configuration")?; Some(config) - } else { - None }; + #[cfg(not(feature = "proxy_v0_2_3"))] + let config = None; let mut layers = manifest.layers().iter().cloned(); // We require a base layer. diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 33a2c4441..6cc24f184 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -546,19 +546,16 @@ async fn test_container_write_derive() -> Result<()> { assert!(digest.starts_with("sha256:")); assert_eq!(digest, expected_digest); - // For now we need to make this test dynamic + #[cfg(feature = "proxy_v0_2_3")] { + let commit_meta = &imported_commit.child_value(0); let proxy = containers_image_proxy::ImageProxy::new().await?; - let proxy = proxy.get_0_2_3(); - if proxy.is_some() { - let commit_meta = &imported_commit.child_value(0); - let commit_meta = glib::VariantDict::new(Some(commit_meta)); - let config = commit_meta - .lookup::("ostree.container.image-config")? - .unwrap(); - let config: oci_spec::image::ImageConfiguration = serde_json::from_str(&config)?; - assert_eq!(config.os(), &oci_spec::image::Os::Linux); - } + let commit_meta = glib::VariantDict::new(Some(commit_meta)); + let config = commit_meta + .lookup::("ostree.container.image-config")? + .unwrap(); + let config: oci_spec::image::ImageConfiguration = serde_json::from_str(&config)?; + assert_eq!(config.os(), &oci_spec::image::Os::Linux); } // Parse the commit and verify we pulled the derived content. From 0e86da1d1ec30330319e535bb1064b8f27b1d080 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 4 Jan 2022 12:16:17 -0500 Subject: [PATCH 248/775] Use rustix for cmd extension This is safer and may actually fix a race condition I've seen sometimes in CI runs. Part of investigating using rustix (and cap-std) in our section of the ecosystem (xref https://github.com/rust-lang/rfcs/issues/2610). --- lib/Cargo.toml | 1 + lib/src/cmdext.rs | 18 +++++++++++------- lib/src/tar/write.rs | 6 ++++-- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index c89eb468c..1f22ef0b3 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -25,6 +25,7 @@ indicatif = "0.16.0" once_cell = "1.9" libc = "0.2.92" nix = "0.23" +rustix = "0.31.3" oci-spec = "0.5.0" openat = "0.1.20" openat-ext = "0.2.0" diff --git a/lib/src/cmdext.rs b/lib/src/cmdext.rs index bd1da4ea1..65bdb096e 100644 --- a/lib/src/cmdext.rs +++ b/lib/src/cmdext.rs @@ -1,19 +1,23 @@ -use std::os::unix::prelude::{CommandExt, RawFd}; +use rustix::fd::{FromRawFd, IntoRawFd}; +use rustix::io::OwnedFd; +use std::os::unix::prelude::CommandExt; +use std::sync::Arc; pub(crate) trait CommandRedirectionExt { /// Pass a file descriptor into the target process. - /// IMPORTANT: `fd` must be valid (i.e. cannot be closed) until after [`std::Process::Command::spawn`] or equivalent is invoked. - fn take_fd_n(&mut self, fd: i32, target: i32) -> &mut Self; + fn take_fd_n(&mut self, fd: Arc, target: i32) -> &mut Self; } #[allow(unsafe_code)] impl CommandRedirectionExt for std::process::Command { - fn take_fd_n(&mut self, fd: i32, target: i32) -> &mut Self { + fn take_fd_n(&mut self, fd: Arc, target: i32) -> &mut Self { unsafe { self.pre_exec(move || { - nix::unistd::dup2(fd, target as RawFd) - .map(|_r| ()) - .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, format!("{}", e))) + let target = rustix::io::OwnedFd::from_raw_fd(target); + rustix::io::dup2(&*fd, &target)?; + // Intentionally leak into the child. + let _ = target.into_raw_fd(); + Ok(()) }); } self diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index 8af16cb80..6e048d1e8 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -13,12 +13,13 @@ use anyhow::{anyhow, Context}; use camino::{Utf8Component, Utf8Path, Utf8PathBuf}; use ostree::gio; use ostree::prelude::FileExt; +use rustix::fd::FromFd; use std::collections::BTreeMap; use std::convert::TryInto; use std::io::{BufWriter, Write}; -use std::os::unix::prelude::AsRawFd; use std::path::Path; use std::process::Stdio; +use std::sync::Arc; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite}; use tracing::instrument; @@ -197,13 +198,14 @@ pub async fn write_tar( }; let mut c = std::process::Command::new("ostree"); let repofd = repo.dfd_as_file()?; + let repofd = Arc::new(rustix::io::OwnedFd::from_into_fd(repofd)); { let c = c .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .args(&["commit"]); - c.take_fd_n(repofd.as_raw_fd(), 3); + c.take_fd_n(repofd.clone(), 3); c.arg("--repo=/proc/self/fd/3"); if let Some(sepolicy) = sepolicy.as_ref() { c.arg("--selinux-policy"); From b3974f597c5e17e70deadc071bf877a7dca7df8b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 4 Jan 2022 12:22:30 -0500 Subject: [PATCH 249/775] Drop remaining use of `nix` By using rustix for uname. --- lib/Cargo.toml | 1 - lib/src/container/ociwriter.rs | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 1f22ef0b3..bb6964f9c 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -24,7 +24,6 @@ hex = "0.4.3" indicatif = "0.16.0" once_cell = "1.9" libc = "0.2.92" -nix = "0.23" rustix = "0.31.3" oci-spec = "0.5.0" openat = "0.1.20" diff --git a/lib/src/container/ociwriter.rs b/lib/src/container/ociwriter.rs index b2429af1b..6a459d030 100644 --- a/lib/src/container/ociwriter.rs +++ b/lib/src/container/ociwriter.rs @@ -169,8 +169,8 @@ impl<'a> OciWriter<'a> { #[context("Writing OCI")] pub(crate) fn complete(self) -> Result<()> { - let utsname = nix::sys::utsname::uname(); - let machine = utsname.machine(); + let uname = rustix::process::uname(); + let machine = uname.machine().to_str().unwrap(); let arch = MACHINE_TO_OCI.get(machine).unwrap_or(&machine); let arch = oci_image::Arch::from(*arch); From 6d14d708bf4b0f9732118776ce7d351136862905 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 6 Jan 2022 16:13:23 -0500 Subject: [PATCH 250/775] Switch to published ostree crate In prep for releasing a new version. --- lib/Cargo.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index bb6964f9c..119a15c34 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -29,8 +29,7 @@ oci-spec = "0.5.0" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" -# ostree = { features = ["v2021_5"], version = "0.13.3" } -ostree = { git = "https://github.com/ostreedev/ostree-rs", features = ["v2021_5"] } +ostree = { features = ["v2021_5"], version = "0.13.4" } phf = { features = ["macros"], version = "0.10" } pin-project = "1.0" serde = { features = ["derive"], version = "1.0.125" } From 359201e192b4a7cdcc95ff69dd2f511bf9936e62 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 6 Jan 2022 16:50:20 -0500 Subject: [PATCH 251/775] Release 0.6.0 Semver bump because of various API changes. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 119a15c34..c45f15604 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.5.1" +version = "0.6.0" [dependencies] anyhow = "1.0" From f08de47da7d8684d02739d8fd67a2ea47c622002 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 20 Dec 2021 17:45:19 -0500 Subject: [PATCH 252/775] tests: Dynamically generate containers Currently we have a hardcoded "exampleos.tar" fixture, as well as two `.ociarchive` containers that also hardcode its content. Having pre-generated container fixtures on one hand is good because we're testing compatibility of import from a frozen format. But it also means that we can't easily test changing our output format. And I'd like to introduce some changes to the output format. I'll reintroduce compat testing another way. Add some internal code to extend an OCI directory. --- lib/Cargo.toml | 2 +- lib/src/container/mod.rs | 2 +- lib/src/container/ocidir.rs | 449 ++++++++++++++++++ lib/src/container/ociwriter.rs | 2 +- lib/src/container/store.rs | 2 +- lib/src/integrationtest.rs | 110 +++++ lib/src/lib.rs | 3 +- .../fixtures/exampleos-derive-v2.ociarchive | Bin 14336 -> 0 bytes .../it/fixtures/exampleos-derive.ociarchive | Bin 14336 -> 0 bytes lib/tests/it/main.rs | 134 ++++-- 10 files changed, 653 insertions(+), 51 deletions(-) create mode 100644 lib/src/container/ocidir.rs delete mode 100644 lib/tests/it/fixtures/exampleos-derive-v2.ociarchive delete mode 100644 lib/tests/it/fixtures/exampleos-derive.ociarchive diff --git a/lib/Cargo.toml b/lib/Cargo.toml index c45f15604..368b8b9bd 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -25,7 +25,7 @@ indicatif = "0.16.0" once_cell = "1.9" libc = "0.2.92" rustix = "0.31.3" -oci-spec = "0.5.0" +oci-spec = "0.5.3" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 39445ba3e..a1c388250 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -218,7 +218,7 @@ mod encapsulate; pub use encapsulate::*; mod unencapsulate; pub use unencapsulate::*; -mod ociwriter; +pub(crate) mod ociwriter; mod skopeo; pub mod store; diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs new file mode 100644 index 000000000..b25030c59 --- /dev/null +++ b/lib/src/container/ocidir.rs @@ -0,0 +1,449 @@ +//! Internal API to interact with Open Container Images; mostly +//! oriented towards generating images. + +use anyhow::{anyhow, Context, Result}; +use camino::Utf8Path; +use flate2::write::GzEncoder; +use fn_error_context::context; +use oci_image::MediaType; +use oci_spec::image as oci_image; +use once_cell::sync::OnceCell; +use openat_ext::*; +use openssl::hash::{Hasher, MessageDigest}; +use phf::phf_map; +use std::collections::HashMap; +use std::io::prelude::*; +use std::path::Path; +use std::rc::Rc; + +/// Map the value from `uname -m` to the Go architecture. +/// TODO find a more canonical home for this. +static MACHINE_TO_OCI: phf::Map<&str, &str> = phf_map! { + "x86_64" => "amd64", + "aarch64" => "arm64", +}; + +static THIS_OCI_ARCH: Lazy = Lazy::new(|| { + let machine = rustix::process::uname().machine(); + let arch = MACHINE_TO_OCI.get(machine).unwrap_or(&machine); + oci_image::Arch::from(*arch) +}); + +/// Path inside an OCI directory to the blobs +const BLOBDIR: &str = "blobs/sha256"; + +/// Completed blob metadata +#[derive(Debug)] +pub(crate) struct Blob { + pub(crate) sha256: String, + pub(crate) size: u64, +} + +impl Blob { + pub(crate) fn digest_id(&self) -> String { + format!("sha256:{}", self.sha256) + } + + pub(crate) fn descriptor(&self) -> oci_image::DescriptorBuilder { + oci_image::DescriptorBuilder::default() + .digest(self.digest_id()) + .size(self.size as i64) + } +} + +/// Completed layer metadata +#[derive(Debug)] +pub(crate) struct Layer { + pub(crate) blob: Blob, + pub(crate) uncompressed_sha256: String, +} + +impl Layer { + pub(crate) fn descriptor(&self) -> oci_image::DescriptorBuilder { + self.blob.descriptor() + } +} + +/// Create an OCI blob. +pub(crate) struct BlobWriter<'a> { + pub(crate) hash: Hasher, + pub(crate) target: Option>, + size: u64, +} + +/// Create an OCI layer (also a blob). +pub(crate) struct RawLayerWriter<'a> { + bw: BlobWriter<'a>, + uncompressed_hash: Hasher, + compressor: GzEncoder>, +} + +pub(crate) struct OciDir { + pub(crate) dir: Rc, +} + +/// Write a serializable data (JSON) as an OCI blob +#[context("Writing json blob")] +pub(crate) fn write_json_blob( + ocidir: &openat::Dir, + v: &S, + media_type: oci_image::MediaType, +) -> Result { + let mut w = BlobWriter::new(ocidir)?; + cjson::to_writer(&mut w, v).map_err(|e| anyhow!("{:?}", e))?; + let blob = w.complete()?; + Ok(blob.descriptor().media_type(media_type)) +} + +fn deserialize_json_path( + d: &openat::Dir, + p: impl AsRef, +) -> Result { + let p = p.as_ref(); + let ctx = || format!("Parsing {:?}", p); + let f = std::io::BufReader::new(d.open_file(p).with_context(ctx)?); + serde_json::from_reader(f).with_context(ctx) +} + +// Parse a filename from a string; this will ignore any directory components, and error out on `/` and `..` for example. +fn parse_one_filename(s: &str) -> Result<&str> { + Utf8Path::new(s) + .file_name() + .ok_or_else(|| anyhow!("Invalid filename {}", s)) +} + +// Sadly the builder bits in the OCI spec don't offer mutable access to fields +// https://github.com/containers/oci-spec-rs/issues/86 +fn vec_clone_append(s: &[T], i: T) -> Vec { + s.iter().cloned().chain(std::iter::once(i)).collect() +} + +/// Create a dummy config descriptor. +/// Our API right now always mutates a manifest, which means we need +/// a "valid" manifest, which requires a "valid" config descriptor. +/// This digest should never actually be used for anything. +fn empty_config_descriptor() -> oci_image::Descriptor { + oci_image::DescriptorBuilder::default() + .media_type(MediaType::ImageConfig) + .size(7023) + .digest("sha256:a5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7") + .build() + .unwrap() +} + +/// Generate a "valid" empty manifest. See above. +pub(crate) fn new_empty_manifest() -> oci_image::ImageManifestBuilder { + oci_image::ImageManifestBuilder::default() + .schema_version(oci_image::SCHEMA_VERSION) + .config(empty_config_descriptor()) + .layers(Vec::new()) +} + +/// Generate an image configuration targeting Linux for this architecture. +pub(crate) fn new_config() -> oci_image::ImageConfigurationBuilder { + oci_image::ImageConfigurationBuilder::default() + .architecture(THIS_OCI_ARCH.clone()) + .os(oci_image::Os::Linux) +} + +/// Return a Platform object for Linux for this architecture. +pub(crate) fn this_platform() -> oci_image::Platform { + oci_image::PlatformBuilder::default() + .os(oci_image::Os::Linux) + .architecture(THIS_OCI_ARCH.clone()) + .build() + .unwrap() +} + +impl OciDir { + /// Create a new, empty OCI directory at the target path, which should be empty. + pub(crate) fn create(dir: impl Into>) -> Result { + let dir = dir.into(); + dir.ensure_dir_all(BLOBDIR, 0o755)?; + dir.write_file_contents("oci-layout", 0o644, r#"{"imageLayoutVersion":"1.0.0"}"#)?; + Self::open(dir) + } + + #[allow(dead_code)] + /// Clone an OCI directory, using reflinks for blobs. + pub(crate) fn clone_to(&self, destdir: &openat::Dir, p: impl AsRef) -> Result { + let p = p.as_ref(); + destdir.ensure_dir(p, 0o755)?; + let cloned = Self::create(destdir.sub_dir(p)?)?; + for blob in self.dir.list_dir(BLOBDIR)? { + let blob = blob?; + let path = Path::new(BLOBDIR).join(blob.file_name()); + self.dir.copy_file_at(&path, destdir, &path)?; + } + Ok(cloned) + } + + /// Open an existing OCI directory. + pub(crate) fn open(dir: impl Into>) -> Result { + Ok(Self { dir: dir.into() }) + } + + /// Create a writer for a new blob (expected to be a tar stream) + pub(crate) fn create_raw_layer( + &self, + c: Option, + ) -> Result { + RawLayerWriter::new(&self.dir, c) + } + + #[allow(dead_code)] + /// Create a tar output stream, backed by a blob + pub(crate) fn create_layer( + &self, + c: Option, + ) -> Result> { + Ok(tar::Builder::new(self.create_raw_layer(c)?)) + } + + /// Add a layer to the top of the image stack. The firsh pushed layer becomes the root. + #[allow(dead_code)] + pub(crate) fn push_layer( + &self, + manifest: &mut oci_image::ImageManifest, + config: &mut oci_image::ImageConfiguration, + layer: Layer, + description: &str, + ) { + let annotations: Option> = None; + self.push_layer_annotated(manifest, config, layer, annotations, description); + } + + /// Add a layer to the top of the image stack with optional annotations. + /// + /// This is otherwise equivalent to [`Self::push_layer`]. + pub(crate) fn push_layer_annotated( + &self, + manifest: &mut oci_image::ImageManifest, + config: &mut oci_image::ImageConfiguration, + layer: Layer, + annotations: Option>>, + description: &str, + ) { + let mut builder = layer.descriptor().media_type(MediaType::ImageLayerGzip); + if let Some(annotations) = annotations { + builder = builder.annotations(annotations); + } + let blobdesc = builder.build().unwrap(); + manifest.set_layers(vec_clone_append(manifest.layers(), blobdesc)); + let mut rootfs = config.rootfs().clone(); + rootfs.set_diff_ids(vec_clone_append( + rootfs.diff_ids(), + format!("sha256:{}", layer.uncompressed_sha256), + )); + config.set_rootfs(rootfs); + let h = oci_image::HistoryBuilder::default() + .created_by(description.to_string()) + .build() + .unwrap(); + config.set_history(vec_clone_append(config.history(), h)); + } + + /// Read a JSON blob. + pub(crate) fn read_json_blob( + &self, + desc: &oci_spec::image::Descriptor, + ) -> Result { + let (alg, hash) = desc + .digest() + .split_once(':') + .ok_or_else(|| anyhow!("Invalid digest {}", desc.digest()))?; + let alg = parse_one_filename(alg)?; + if alg != "sha256" { + anyhow::bail!("Unsupported digest algorithm {}", desc.digest()); + } + let hash = parse_one_filename(hash)?; + deserialize_json_path(&self.dir, Path::new(BLOBDIR).join(hash)) + } + + /// Write a configuration blob. + pub(crate) fn write_config( + &self, + config: oci_image::ImageConfiguration, + ) -> Result { + Ok(write_json_blob(&self.dir, &config, MediaType::ImageConfig)? + .build() + .unwrap()) + } + + /// Write a manifest as a blob, and replace the index with a reference to it. + pub(crate) fn write_manifest( + &self, + manifest: oci_image::ImageManifest, + platform: oci_image::Platform, + ) -> Result<()> { + let manifest = write_json_blob(&self.dir, &manifest, MediaType::ImageManifest)? + .platform(platform) + .build() + .unwrap(); + + let index_data = oci_image::ImageIndexBuilder::default() + .schema_version(oci_image::SCHEMA_VERSION) + .manifests(vec![manifest]) + .build() + .unwrap(); + self.dir + .write_file_with("index.json", 0o644, |w| -> Result<()> { + cjson::to_writer(w, &index_data).map_err(|e| anyhow::anyhow!("{:?}", e))?; + Ok(()) + })?; + Ok(()) + } + + /// If this OCI directory has a single manifest, return it. Otherwise, an error is returned. + pub(crate) fn read_manifest(&self) -> Result { + let idx: oci_image::ImageIndex = deserialize_json_path(&self.dir, "index.json")?; + let desc = match idx.manifests().as_slice() { + [] => anyhow::bail!("No manifests found"), + [desc] => desc, + manifests => anyhow::bail!("Expected exactly 1 manifest, found {}", manifests.len()), + }; + self.read_json_blob(desc) + } +} + +impl<'a> BlobWriter<'a> { + #[context("Creating blob writer")] + fn new(ocidir: &'a openat::Dir) -> Result { + Ok(Self { + hash: Hasher::new(MessageDigest::sha256())?, + // FIXME add ability to choose filename after completion + target: Some(ocidir.new_file_writer(0o644)?), + size: 0, + }) + } + + #[context("Completing blob")] + /// Finish writing this blob object. + pub(crate) fn complete(mut self) -> Result { + let sha256 = hex::encode(self.hash.finish()?); + let target = &format!("{}/{}", BLOBDIR, sha256); + self.target.take().unwrap().complete(target)?; + Ok(Blob { + sha256, + size: self.size, + }) + } +} + +impl<'a> std::io::Write for BlobWriter<'a> { + fn write(&mut self, srcbuf: &[u8]) -> std::io::Result { + self.hash.update(srcbuf)?; + self.target.as_mut().unwrap().writer.write_all(srcbuf)?; + self.size += srcbuf.len() as u64; + Ok(srcbuf.len()) + } + + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } +} + +impl<'a> RawLayerWriter<'a> { + /// Create a writer for a gzip compressed layer blob. + fn new(ocidir: &'a openat::Dir, c: Option) -> Result { + let bw = BlobWriter::new(ocidir)?; + Ok(Self { + bw, + uncompressed_hash: Hasher::new(MessageDigest::sha256())?, + compressor: GzEncoder::new(Vec::with_capacity(8192), c.unwrap_or_default()), + }) + } + + #[context("Completing layer")] + /// Consume this writer, flushing buffered data and put the blob in place. + pub(crate) fn complete(mut self) -> Result { + self.compressor.get_mut().clear(); + let buf = self.compressor.finish()?; + self.bw.write_all(&buf)?; + let blob = self.bw.complete()?; + let uncompressed_sha256 = hex::encode(self.uncompressed_hash.finish()?); + Ok(Layer { + blob, + uncompressed_sha256, + }) + } +} + +impl<'a> std::io::Write for RawLayerWriter<'a> { + fn write(&mut self, srcbuf: &[u8]) -> std::io::Result { + self.compressor.get_mut().clear(); + self.compressor.write_all(srcbuf).unwrap(); + self.uncompressed_hash.update(srcbuf)?; + let compressed_buf = self.compressor.get_mut().as_slice(); + self.bw.write_all(compressed_buf)?; + Ok(srcbuf.len()) + } + + fn flush(&mut self) -> std::io::Result<()> { + self.bw.flush() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + const MANIFEST_DERIVE: &str = r#"{ + "schemaVersion": 2, + "config": { + "mediaType": "application/vnd.oci.image.config.v1+json", + "digest": "sha256:54977ab597b345c2238ba28fe18aad751e5c59dc38b9393f6f349255f0daa7fc", + "size": 754 + }, + "layers": [ + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:ee02768e65e6fb2bb7058282338896282910f3560de3e0d6cd9b1d5985e8360d", + "size": 5462 + }, + { + "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", + "digest": "sha256:d203cef7e598fa167cb9e8b703f9f20f746397eca49b51491da158d64968b429", + "size": 214 + } + ], + "annotations": { + "ostree.commit": "3cb6170b6945065c2475bc16d7bebcc84f96b4c677811a6751e479b89f8c3770", + "ostree.version": "42.0" + } + } + "#; + + #[test] + fn manifest() -> Result<()> { + let m: oci_image::ImageManifest = serde_json::from_str(MANIFEST_DERIVE)?; + assert_eq!( + m.layers()[0].digest().as_str(), + "sha256:ee02768e65e6fb2bb7058282338896282910f3560de3e0d6cd9b1d5985e8360d" + ); + Ok(()) + } + + #[test] + fn test_build() -> Result<()> { + let td = tempfile::tempdir()?; + let td = openat::Dir::open(td.path())?; + let w = OciDir::create(td)?; + let mut layerw = w.create_raw_layer(None)?; + layerw.write_all(b"pretend this is a tarball")?; + let root_layer = layerw.complete()?; + assert_eq!( + root_layer.uncompressed_sha256, + "349438e5faf763e8875b43de4d7101540ef4d865190336c2cc549a11f33f8d7c" + ); + let mut manifest = new_empty_manifest().build().unwrap(); + let mut config = oci_image::ImageConfigurationBuilder::default() + .build() + .unwrap(); + w.push_layer(&mut manifest, &mut config, root_layer, "root"); + let config = w.write_config(config)?; + manifest.set_config(config); + w.write_manifest(manifest, this_platform())?; + Ok(()) + } +} diff --git a/lib/src/container/ociwriter.rs b/lib/src/container/ociwriter.rs index 6a459d030..8626ae9a9 100644 --- a/lib/src/container/ociwriter.rs +++ b/lib/src/container/ociwriter.rs @@ -82,7 +82,7 @@ pub(crate) struct OciWriter<'a> { /// Write a serializable data (JSON) as an OCI blob #[context("Writing json blob")] -fn write_json_blob( +pub(crate) fn write_json_blob( ocidir: &openat::Dir, v: &S, media_type: oci_image::MediaType, diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 7b7d6193c..3b6331212 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -475,7 +475,7 @@ pub async fn copy( let layer_refs = manifest .layers() .iter() - .map(|layer| ref_for_layer(layer)) + .map(ref_for_layer) .chain(std::iter::once(Ok(ostree_ref))); for ostree_ref in layer_refs { let ostree_ref = ostree_ref?; diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index e5a8f0b2d..0385931a6 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -1,5 +1,10 @@ //! Module used for integration tests; should not be public. +use anyhow::{Context, Result}; +use camino::Utf8Path; +use fn_error_context::context; +use std::path::Path; + fn has_ostree() -> bool { std::path::Path::new("/sysroot/ostree/repo").exists() } @@ -12,3 +17,108 @@ pub(crate) fn detectenv() -> &'static str { (false, false) => "none", } } + +fn deserialize_json_path( + p: impl AsRef, +) -> Result { + let p = p.as_ref(); + let ctx = || format!("Parsing {:?}", p); + let f = std::io::BufReader::new(std::fs::File::open(p).with_context(ctx)?); + serde_json::from_reader(f).with_context(ctx) +} + +fn deserialize_json_blob( + ocidir: impl AsRef, + desc: &oci_spec::image::Descriptor, +) -> Result { + let ocidir = ocidir.as_ref(); + let blobpath = desc.digest().replace(':', "/"); + deserialize_json_path(&ocidir.join(&format!("blobs/{}", blobpath))) +} + +/// Using `src` as a base, take append `dir` into OCI image. +/// Should only be enabled for testing. +#[cfg(feature = "internal-testing-api")] +#[context("Generating derived oci")] +pub fn generate_derived_oci(src: impl AsRef, dir: impl AsRef) -> Result<()> { + let src = src.as_ref(); + let dir = dir.as_ref(); + let index_path = &src.join("index.json"); + let mut idx: oci_spec::image::ImageIndex = deserialize_json_path(index_path)?; + let mut manifest: oci_spec::image::ImageManifest = { + let manifest_desc = idx + .manifests() + .get(0) + .ok_or_else(|| anyhow::anyhow!("No manifests found"))?; + deserialize_json_blob(src, manifest_desc)? + }; + let mut config: oci_spec::image::ImageConfiguration = + deserialize_json_blob(src, manifest.config())?; + + let srcdir = &openat::Dir::open(src.as_std_path())?; + + let bw = crate::container::ociwriter::RawLayerWriter::new(srcdir, None)?; + let mut layer_tar = tar::Builder::new(bw); + layer_tar.append_dir_all("./", dir.as_std_path())?; + let bw = layer_tar.into_inner()?; + let new_layer = bw.complete()?; + + let layers: Vec<_> = manifest + .layers() + .iter() + .cloned() + .chain(std::iter::once( + new_layer + .blob + .descriptor() + .media_type(oci_spec::image::MediaType::ImageLayerGzip) + .build() + .unwrap(), + )) + .collect(); + manifest.set_layers(layers); + let history: Vec<_> = config + .history() + .iter() + .cloned() + .chain(std::iter::once( + oci_spec::image::HistoryBuilder::default() + .created_by("generate_derived_oci") + .build() + .unwrap(), + )) + .collect(); + config.set_history(history); + let diffids: Vec<_> = config + .rootfs() + .diff_ids() + .iter() + .cloned() + .chain(std::iter::once(new_layer.uncompressed_sha256)) + .collect(); + config.set_rootfs( + oci_spec::image::RootFsBuilder::default() + .diff_ids(diffids) + .build() + .unwrap(), + ); + let new_config_desc = crate::container::ociwriter::write_json_blob( + srcdir, + &config, + oci_spec::image::MediaType::ImageConfig, + )? + .build() + .unwrap(); + manifest.set_config(new_config_desc); + + let new_manifest_desc = crate::container::ociwriter::write_json_blob( + srcdir, + &manifest, + oci_spec::image::MediaType::ImageManifest, + )? + .build() + .unwrap(); + idx.set_manifests(vec![new_manifest_desc]); + idx.to_file(index_path.as_std_path())?; + Ok(()) +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 254394d17..d1695dc66 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -7,7 +7,6 @@ // See https://doc.rust-lang.org/rustc/lints/listing/allowed-by-default.html #![deny(missing_docs)] #![deny(missing_debug_implementations)] -#![deny(unreachable_pub)] #![forbid(unused_must_use)] #![deny(unsafe_code)] #![cfg_attr(feature = "dox", feature(doc_cfg))] @@ -43,4 +42,4 @@ pub mod prelude { } #[cfg(feature = "internal-testing-api")] -mod integrationtest; +pub mod integrationtest; diff --git a/lib/tests/it/fixtures/exampleos-derive-v2.ociarchive b/lib/tests/it/fixtures/exampleos-derive-v2.ociarchive deleted file mode 100644 index 42b91b1187a133f27dac1a11a4c29d757ee0ab20..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14336 zcmeHNcU%)$*N&h9mc@o*4F(hugh`+HsHBxr|9Ep>lJ!668eijonKzHdWPK|d{73$0AkhDGDvS2r|Nk8{`JMmiA{w71;)uj- zI*){O-y475s{G35HZDU0Ry44sX`8w$rK9_l*XcqIZU2V#G;EBH1=l8iDcv#MFN#~otLgddUSps+T15I z0xP!|*YAKd<<`KC)6yBf)AwZ**qu7rIfS=xfhc~Jo&FEq5-{=f^n;r^Y@EOB@};bz zsi)@N*s^JazgmHfFttuBRRo}YSrB(`=9uCJ)!VCGr3<9 zc+>Vo+M>kE60c>Q>tJMBU2*sL$XhRmVWw+@z6p7@Q|7j+x$@i#hUArXkz1}lRzFH3 z*H1i}lr<}8F9-_o=;EBbaNE-UbIxBjIpM!P%m+YP0H9CW%N2DXb^Av-@_$&^AL5@c zWHVU|geK&(1Z*A^Md@@gDyE5rECHRx=JDtZ8i&E6iTMJU75FSFhbI)UiGO@5Di(YO zr+*XwR4SA8G5(4CPh(K&-{b!?X#TUqVHBCbTja}|FA9`Nq<)mqG#iRQ>L-?XQAUSR zd__VDZ*Hi+h%%bO^Y{0W2zWtomfdncp{-OPv6cApyhOGi=G!i(S}&DB5jGT|#7iU# zf@?nPD@HTai=O+!)I1+*wMv=;b0!4%ueSIZ_)93;i*ldK);xL%74hx`v zn0x`1C1mqOe1QOEh&e1iL%?FQQ7VggvpAeZFsylj0&hu>NDvepsHq6PLKZ_4?QpS= z#4mUS?0hU94t~pFj;73>Fn8)$yI@(MosU$&^RbihC4Qqn>HpUKctJDtp$nT7n6f7M zLId3>nq_dvj?a^M6XP>@e36ePi%F!m{9uWXkmqgtG3`)oX*65<$At7x=D3eqw8_ne zLcAjoYpe3q{#XRLJ5`g$NB5(U}Y} z6{R9fo`A+vzQhXD z7NJuaOa_y#VTE?8Cm$}R*b&~5d7F(An3<38llohkO|Z8&6H9zVqfsHkpu;-Cp@~q0 zPerJFzEC6(u=pG*3XKu+MI1yx6$t4<_?!v12q?TQjsSt0pv~}wkeS5KOhW*8+kqlG zo}Va$kV`Yuu+G<4Q;w1VgB?<11RM*BdP!%a!};_p@HlMM2^ z{QtFkpiks}+Wd0s|D(HCpH9zHeZ!W8 zBzgKidGKcW%jJ)E9`Sv0)^}h!{L8eP<>wJNE3na^x2kOBa>dxveg1QXCSNz9XHKLh z-xN1KS{As|y{hWbQU1`1r?sfaYEVJIZnT@_*wLHm6%{rYUbUx_x37;*7VJ%|>yjU{ zDm1il*R~?}>iXgJi(-B8^j4Ksb^Ug~$FUMT!h9SS)22+>)hyU0wU|pk^8|B?KCBMGhVYVZ(b8zp99Kju~X5(HK?58z~bFtgVHV%WQ_;?6rfLAX{UDi3j9zZu)PKHo45{m$ejKW zLy0*XG;>-!yMW*b-0qFM4_>FI{$OPQPL8PugOuDNkgtxvf-T+ypq4>uV<>|#$)FaL ztBt1s{dZUZsCw(ExeSenZ2HhQhS%GYDIiQ|gud2@AzC9^T>3}cBx?OlM_=3l*mw4Q%wT*&#E;@b%2ryFi2_$(B4{X6||)Wi&53e$<+Vf)?nZ7~Ha>R@G3QDrkHXuxi8%wM3bu1&Vx- zruXv)&itgsp}Uq319g1(n>c9Ll80$vn*EO3Q>!`_8mCd+QLEZqr@Bn#`4r=**wd@z zdwx#`DV$5tq15p2;qY85N$(GI;OkUr=>SZ%J}E#6pZ@?h>2TS7&CrLilha60O&xz3 z6Tr1fyY=8IgryX4nlaedLa-z zMp{r>bRG+X4%tPJc(V$6TLS{IoCwftOQ>2G6gqaa$!?IfF%t$Irb!bzIzVk7nc7FK z@X)EZ)2Z*IQ%@jAxtz=pgilU^-63Ln5u~sZRBM4g9iQ~L;RVq zFic0z#>wc3R@^Baxb9JL{l_$@WBK6N_%`hMigG16A9syt1Q#3fTDURlG`%ViQ4w8_ z^-+2{j5J4DKbeCyw1Df4IA%Q?x5Jy07nb0%PS-%bCy1|luNizJx53>yG-1U(H9#bQ z?9G~daZyzooTst08k{iKgf#*}25XFsXp`4PDY@%`YxxISo$IA#jVlCL6WY_Evxzk0 zsdJw-OsgL$+**gt$-zU0K{Pz^UFB7BYb`)-#Qgsto;3~}(Vjd~r%&gvW4fR+fjWr1 zo~Co)ydxVgRuehT96-`P#jI54x@h?^4~qK|lfUG=NwDDP0@zNctDlZ68?@#C2Q9UK*yME7 z@w%g47h1wg{$+dY{ECIcYLA{=G^*7@bUCy31}CuhK)VbN>QH{(z(vTz>5h31SyNM3 zlQ+7YalZRFZ_SQ-j`!vcxIKUWO@o>TvL)62jfpQd*4HO{`HxiMO1=f<;PiJ3Wzr}Q!t!eYz5$5x@>@bYzayIwOnTK(m zw$FJyxB`!jT2j)~l+>z!#U_4l!ON%|y#80^M91`0*@WfmO^}gG9(6M`={t2?4yT90Y=jl#sa{JqgZ0W*1JEUVn_ALy5 zn-P3Sk>h_s6+ak+D6rK5Z(A)rA2mFPos#Oq*_y#x9A2FXnr~DT6yA;oUQjZxZb| zdU#2Xf(JbYjtQO>c3^vXU;Uv^hg$XMG%^737OT{@cWHq zqdRsWqQgzmi-p1VNgy|&-TBdzBA0XlZ||#?+K9%>fsxY0yPQx()WzL*Dqm&O4haJL z51Z0wrAL3xfB|t$Sf$EyC(u8z+v=bdbrwZJ(tEFWUO2<5D)+sIv(iOTY zIQIVW){6_~gVRr%!><%NOAkA!j=jX5x^>M`V`uTS;ZbA2zpkg`dCC-Lr>v$o^a6Jn zk{!>AUrhcvuD5tou5W=gJN3QisW{`4O25)9{?l!#1!|vSvfpA_MR|xgQMERBPV$P1 zGOu&K58pq1I?Y`1Mh2l=615wc*MMuPG@t1SckV1WW_mDh;fBPxh%m}_|9kZEh=5y* zB#!BqHcN!*T|4WY+jhNBjqO*0oN~`+;0g)vhsLKIy5BPtuRjFpZQ`?DVKPNm^C<%J zKsheP@ z)yr*dPAN``q>Pu>1$dU~w$*+X*1qR_Q%Iy*a8&oomTE^2*op3tlChr4{$$&&DR-Yr8)HQ%rIj zaLI_y%w$^xA__zpjHQUf5r6xnU4gIgMQLmT2;fZOLxcs&n^JpN)0Iw1856m?f)~MuG zXC2-E^A-T{IIQBHK5}-+@vElmFK#kfheVEa-{XcsUxYg39WEqpcfA+%%A!(0+Ar z2zPAY3ERvaFxbS2HUnB(eYx#^$|=^xxsO&=Sq|h%lR>l5E=%{O!g|-4I~%S9Wsf^h zlXC%t$vQsb>FKAZ) zuXgg36QFK9*frPtRtI-dqy&Mr?;6l66QtDwSv0oQ%smxc84n^er!L(`GLOZo z!Q}TiJuok_DA0Y@@NfKx@e2K+ZxKlFts8Zb?^W!^Fj8A`CTRJQaq@J zMTXrHNEKU`7G-wrV!f=V>2br;)6q%WTnkADXSj5J{Jy#V@HKE8v~o%I7r>$UO007k zFi&n$R^M#Q0QUQz-`A0Y1dpxaZZOC*vD%W+jbu>ql~WxYY6+OrZ* zI|cN&E`2*4y*Ml2K<&Oe!!8%{bo)cyi~`gSo^ zZ32e2a&IiGY~ltZ;AbZl9gjS|C#2v!s3^IL_4l%gD0AP|H&k7J0z7M%qTtq~C~iL? zxh=URp5EA6ydK}6QYEh(3kqtt@t2Lcob9x_tH+$8KCUHQL`DxTIY$rfJ9%Xpqk9)O z{>;MmyhRZ`_ME`&;*!DLlQp2y1o)|ox0k8OEhQknSon_Jt}0bDVT)phH{#s577(2S zoaU;r5soPxtcMN1BpggW%$qoR*4imoJJxp?dTjz{%Vq0fMuwZuT)gJ^!w~<<8o*Gs zaW#omFvef)^`Q3wqL#QlS?smJ&eHte`P*sX%G_$>Az5{&calMi<*|Zfe-P>RXz;SG zcBLzuxPzJLWBl&Dvg|up-jIrotK)I8#-qk(w=T~D`(U$d3e zgVF*vy#Y-z*gU+{5g1ISaKWK*3h-iUS6No)j?LXV6<#BqGJnaUEI$+O> zIc|O<)Tn4#mB?@nwLaQ-L2sXk5f!Ot*1h&Kc$FGI-NgdYk9rE&SwLZp&ub&Eto|u` zweiO2LIpT-yN!GNN9PXL#p4g39N);+ z8iid@?Tch@S`E(D9P8d`OFCVdGokWuh`YZ$EMT{{!i1l84Xo?-9Qe!}KeI> zqR(t4l~a^E!sZ<&&U zV`JcxUhq9&*GQMkL%miPC*rX=0LyI#t~BQS2&Bu&Z8sK7-(G1N@ixY&kN=V%Z4j&6 zjuo|U7ArTqjIs&o)eWYlW4BwdDid&2RqEC|>!n`4`zryEmXup8$H~nqMKhS&j2X71 zf$>(CAh{3E1hGRvzfk=x3kLouR07EQ*S|b;GAJ@QfN|}Sj&pB<4)2p^U!2#@t(-LL zcAY%S6vVoN;$wLHaHdBu`yi52M!&+a0-v{cjGkYbZhdJ@gxMsLNsi(8r~`ec9qq%5 zus;C?E(SF)!fUY}Cu8w6m|YgV1gpxqhU(bPC%~TFupTVi4{F+6c1fe%RRtq)gF_&@ zM)lLip)bJF8mwyGZb}-6GY_=iTo_j-g>Pqxvqwh3M(Lysy>Ilc_FIUR_z6WTY>9Il z-$MEanO}bYkHB*mpPc{uaQ+ivQoq0d|8^evDEY6>|M>F!Bx3ljjQCplch9lt)_ma< z5MA_jr^|>Bo|;oYEE?_mSGF-(j=*;Wz9aCz4uSsyDUIH* diff --git a/lib/tests/it/fixtures/exampleos-derive.ociarchive b/lib/tests/it/fixtures/exampleos-derive.ociarchive deleted file mode 100644 index 5b4162f9a161aed6963d9c006bc20ddc5454c64a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14336 zcmeHN30RD4`yYi$IoV~Mnj(pseeuOuDvl6YqSdT5RFkG8$$PS#HY5k-XtQKbI>=I) zEU7~^B(j$F6jIT&%)HP4dB+ULIfw6j*Kz*Wf6;Z(v)=dp{_fxXd){Uqu8){2vBv$Q zKSUyx$e_`1#P>hH|Ba2}NK_J;M4}VP6b6n6n>0GknD&oGr8XWU3FHL8kOIWwz%Tjx zck{o0xG&lIA2i2RA=Bvp6b=5P{8NcA{}jlCZ~6c4pvfosr%_o928T;yF}M^e zjYlR^m|PB-DHM>H91fpBBME3c8jH_^!z>DmBBTo`R2G>=6B79x4nxTMOu$4Ekw~Xf ze_!{SK8Zpne_H=!Dv?aZ852Ka@2d{~Kd=8#0xv+o2^8=NQwU@tnKYTmnoMFWAu*>= zm{W)hD>8*dW)SHlk_%xHffK;<76l4;fk6QRxSr$7r&HmuSOPmfBEO(@u)`Dk2}NFn zDWL>AzqN!Zu7rg%mdu%I9V7{`_7U?qKGqVh$ZyJT`X9OnjR`6BHhoPGS(bbQYCHq(iDx88j}BMCUWO0xploqzYMdE|o`TFqk9~hmKL7%3yJs zEFqIeVK9i$X zk;&)9T= zpV;5Rc!sU5u~6hAm_p-FX;eCeE?`p0TppJ}6)?$Mh%t>vC&A~WvbYomhbH6^X%reu z$Rm@OJibuCqcQ{>s7rjlvB=L@m4Eou0Rn4|pCA~sYm9{e(<<=8@@ntoINw;sGjFjZ z&|0vL3HlZo z{HnvD1PRYu;LBO2(MaL#R%}x=0`+&_D=?`Ot%rijpi!YzsZ|v!72&T1sM-MIvx2YsmR{>c z{(lmqKtLoj=u82fCZG$sP?;D+8k5WW_wN#qNlZu03oK8s7@(^yQJ zfJuR)A7X?eN5&NN>xITA6Dd4_kRgEHgd7r`!Q-+7Oc)>(av_;0WI$PEF$6qVpj;Xh zQWBp-qA}sw%%U^7R5I&BXk-#K3?7;sKR!yivnz|2)QoL*z^s5#w#EK-VQpf8f)FZO?iS(>;Zd4==xVZNP~6?%Eae znsph^X?xev68j6!hvg(Af^)-? zK~vY0L;n&7R|tw3GiH_*$&c<^VtaiBYA~nBezQwm>AV31{$C0s22~?34QcyJz5M zu=;ZSlgP9`)<2a-`Bwj*f%B^lf2{wP&$NGV^BU6UdAe`t>fmHg-zN`VuWefUc;9i~ zCt1FuGU5L;>ji!u0Sf}!bOtM`=C74atvu+z*d*olP|Bs5q?BS|+oRP1``qg49-ZKt z)I4ou3M@t!tVv`Vm`|OugHls7>B`IQO#Gg$Q7OCwNzMK8qc?zQrZh z7tQQcSk$-d@p~LALc>j`A<@W^0{s_xF}za>`+PxYwm9_Xg3_~ya65Eo-q&3+*3?# zMI#P@B2(0da{C2h-8oYQ!xCyosb zL(pa|<@+etp!nz(FuI3b0`isd*Ae$r0KJS>8bTXbNIH$6T4^{NXun0)fVwxHs%aQR zMEm=`A)N0@A%IX#5ZdY>#;SvmJBU2vl0;kRl9n4nR8&AUqP>tTMl446S zlIkFHfn^@5f$FmOr;(1Au|1B|5siB_~|D?M$%;#vgTkHylXi3K`T0kUzT z>6tFrY$yb93d7;u8VrZgXfxca$`-`s3Xr_S=2O@O3fQdx{%_f^+k!B=@qdbn9T*H_ z!CV8J+O>|_H5;dP-AfIF;YSz>x*!YzonqX37%H*7J?sM15av~x!UHhsP6ewT>tY1? z3oa!2F!M0d963(>}X|%RviehiE2TH^my7$G$nTaW)0FR2e;c$ z#Bw2OjdrB0s6ZusZh?GH5MTFBHTYUO33ct&j+75n0g(W*cc}CtPf;0kLltQ~IBlvb zYa$39qY5^>OWGXS!`=#vl&^Og9eX- zXn3OgtLvneYJgm^_5VRVs}eY&J7uEApC(@?HAOWP`oMHOLzBRz$G7FxW143QAnBhX z7K#h~)cjZq&3%cTrOanqlDJ68n|y@Jy1$0rj(oVuFAb z7Ph?;m1Sb_X>@iPvZD)jKhIux`jh~7n4g2-S4|cssh?f;*#RHO7qzkugE>SUs-!C* zlvr5{VNq$+T?2|Ng@>vj{}Xk`Fm$MvYBCa|Dd~L86Kk{vA*sWX8mVy~ezeW=&Zjw% zW2><+JPdxr2n{IkeE>VAJ{)XN;r-vAO`(|a9{frm;n`&-Sesc*e!lN9C|-ADe$HDl z_m)ya$NR*Y*r zamHnGr-$I$rN$ywz~E8V=R8Oz+#91@h!5vF+;~WzlSa4S=5&71y~j5;?JaXCTQc(Q zvO~o>4G$zM>;2o3UTkY=Nnvg9s^4E_({3ZJY{|6|*L6GktYQu;^tT&PU8FU%Xn9c* zv0}NUXyMa^x3}&zT5V<2v1Eej(w%#C!~13Bo>x0 zM^8$eGI^W>FXfHlZPx-K#7Xy9A+pH4#M0WA+2o_VfDz+n4_WUqf;DnvTsu;$@Z1Nq z4<}k2u^=rVh;Ul(v@Y`JS=8me^H^lD)ogC~`6XKZcg#+{d%P=e`7&_!Nk`cA!bRd^ zc8Zft$WvGS%St2*%@`kP1O9b8?S`jBmNk1LrF9S}rHb}GD}Q1CQ`}(T_FUfrOGf%T z&tKvU&-D0JUgkdConD~yDaZS{lWVGjg-MFdL5ow?)l_+1@O}91>C^8`Wv?X=%9W9c zz_bC}Qe^ndO(-p0e$wd3jTJFTap9qaJ^p2s>hLvpTtp6;S9gf`nfiUTF6_Qts6-C+ zfShX2XW%*x9nOqTJ9>X$2-{1TDK^i5~;Oasu^L}1y@_Bx&yot>IF7`H7z z*gwrSV5f)ZD${Q70NY-q9Z_Y;kBy^-CH8SJIvCdj?ByUNUpf+K4^t}Hb!Kg?VB~hN zpX(Zse|fQoP4yY~{kLA;XVcv)0&mBp4VEV4Dyxsu6-~Rl9Mc?S$>*AG^U%wRyB3G& zNO$VR_TUJmsJP3uB*(k%v3X0FpRW)VtS+OZ)UH3;KvRSTU^O`-SRpl}9lUKCIw}SH_w;oLX zbqhrC%ApqXn9;PEwF*27IM*Yo$NOF7jyb1Q969Upmb~4jGp8I~q&sKvs#B?X(=8?& zEGl)-8l-K6zsFdvsRwEX|y$O z`_L`K2%FivMwq3YbHTk?nIMlGc->&Rj=k=6W25UEG}bb1g5L6%{hPX{60Z_cK*n8# z^lsgyU+ZEPR9s5G^rF(UqV3*5zbzH5jhoLlRNb$*()PHI41f7el(S*KSFe6OmUc3B zRM7f0<^9EuF2TUd!B7!c)eIc0TtU+IQTT-Jn`466Qv*(0UD^waO_(%kq`cFY-R;*i z+p;|O(S|znQEYJv=+N75?$%z|;yiy}>-E6wX@?sw9qE?bmi#pM3P5+&ROSRcJ2;{# zvoy2dH1N6p@D)!e`+2U_#W&Y1V-;=91I?4!$h*DiVW?*u@6JP4wB-@lGturB-{rQ8 znojB6BI}p%pM)CNCp*oXw8);c<6PDPGTK5u4MA zS_2mhta`Y(csDP3)FxWhw!x!DAGjJ&yM_Glz|09*oii(*M48UMf>mf6BA(xPLVt+l z#vnKSHG?jJj7A`dLUtLurGx9!LBypws}AB!W087b{|==D+=wU%u%P8{J?TB0LOL%Cebu9zebfWKUS8FWQpkLDDVJHAukK>{?ZFNxz@v>VZb5bkELZ z&f4u{N3N-jc`#Eu&jeDQibuqQ|$P>^M1y$^ni`l*j~!v|g6u#$V-+!5`zyoU&l^ z?3=w?dYRms!Pl1gc7;AIW zvWs^!!g_M+4aZ(?KD!SO!UHN^{p${x3kC4GHv|IUYZXZBW+DbrZsce zNZSd+tUbxD9WNtvrky2(p`qX`W$N)4In#o-kM6$(5((~`j)_)WWT0|;`k{WWNhMwX(L4nW7Azy?RB$-s+U z`jX3idv_T0DZFwaYV`IW3%l3>Xhso;JZ#H}K4n@IqF1uIPN2Jq)DmU5e6UaWgqrm8 zTVDC;yiAXu>tsgMj(iFjmx0U@UD}0TU;ktFM#F7Ug)(sbZWsI14~u%;7EV8QW_lYd zaeRT>+P6QaH7IH?-qpG2;I~2XMj}kva7X0nI2pO3I2gg$z7b?Kob2CcXC|d5XGZO@ zVA~P^k9GlCFh$VMVg9&7*Zg1Zv`biQKhAsez@bH(OGbi->6ykeCaz44%Pd?!?tEQb zEMRnZLc!0t1-2MG2R`$s&mRzi4u29oX56!px3+Sdm;Rc&f3LO2#5mHY4%T*f`HjrHLhO1*!n?QLPfF_;?!kPw z`|atZlNHat_e1lGEgz-qFLjXK?-?I=D0vYl>ijO;4bkfJc5_D954{$?rr`L@)Tx=< z`lgn8XQb)f5;f`GTO!#yKz=y*zH=}tpPhyxHt;XKpfX_HMv+QGyf&66p|Lpt$?X8n zWZJTDV!t!HiaG5iN3i*EGI%@7durc~bBo11L);@5+(7q2Pp~ z(sl6VCarw8mpmY@s5V=R;yV0g&~t zetzhvQ=)SiVcQaWFDVAS-lZ(eTiVU8oweX@v-GkNh;;+yC(-!vG><{HfjGx=!wW+T zeBPAmJ-<5F^6I8=<5{?&Il9v$4-fnP#1Kxn?P)N|9W=lSZ$t*1iA6J@y0|of4b^O2 zWo+NmAT<%52dfW(hAyZ5;wU#o!9-N&D9CP5{J72J1z6R9)GbXUWPmu+0NWjfaaCgY znJo7GgIc!2}Et@c;%F=+OD;TFi>y(r: Result, s: impl AsRef) { let s = s.as_ref(); let msg = format!("{:#}", r.err().unwrap()); @@ -471,30 +467,32 @@ async fn test_container_import_export() -> Result<()> { // Test without signature verification // Create a new repo - let fixture = Fixture::new()?; - let import = ostree_ext::container::unencapsulate(&fixture.destrepo, &srcoci_unverified, None) - .await - .context("importing")?; - assert_eq!(import.ostree_commit, testrev.as_str()); + { + let fixture = Fixture::new()?; + let import = + ostree_ext::container::unencapsulate(&fixture.destrepo, &srcoci_unverified, None) + .await + .context("importing")?; + assert_eq!(import.ostree_commit, testrev.as_str()); + } Ok(()) } -/// We should reject an image with multiple layers when doing an "import" - i.e. a direct un-encapsulation. -#[tokio::test] -async fn test_container_import_derive() -> Result<()> { - let fixture = Fixture::new()?; - let exampleos_path = &fixture.path.join("exampleos.ociarchive"); - std::fs::write(exampleos_path, EXAMPLEOS_DERIVED_OCI)?; - let exampleos_ref = OstreeImageReference { - sigverify: SignatureSource::ContainerPolicyAllowInsecure, - imgref: ImageReference { - transport: Transport::OciArchive, - name: exampleos_path.to_string(), - }, - }; - let r = ostree_ext::container::unencapsulate(&fixture.destrepo, &exampleos_ref, None).await; - assert_err_contains(r, "Expected 1 layer, found 2"); +/// Copy an OCI directory. +async fn oci_clone(src: impl AsRef, dest: impl AsRef) -> Result<()> { + let src = src.as_ref(); + let dest = dest.as_ref(); + // For now we just fork off `cp` and rely on reflinks, but we could and should + // explicitly hardlink blobs/sha256 e.g. + let cmd = tokio::process::Command::new("cp") + .args(&["-a", "--reflink=auto"]) + .args(&[src, dest]) + .status() + .await?; + if !cmd.success() { + anyhow::bail!("cp failed"); + } Ok(()) } @@ -502,24 +500,66 @@ async fn test_container_import_derive() -> Result<()> { #[tokio::test] async fn test_container_write_derive() -> Result<()> { let fixture = Fixture::new()?; - let exampleos_path = &fixture.path.join("exampleos-derive.ociarchive"); - std::fs::write(exampleos_path, EXAMPLEOS_DERIVED_OCI)?; - let exampleos_ref = OstreeImageReference { + let base_oci_path = &fixture.path.join("exampleos.oci"); + let _digest = ostree_ext::container::encapsulate( + &fixture.srcrepo, + TESTREF, + &Config { + cmd: Some(vec!["/bin/bash".to_string()]), + ..Default::default() + }, + None, + &ImageReference { + transport: Transport::OciDir, + name: base_oci_path.to_string(), + }, + ) + .await + .context("exporting")?; + assert!(base_oci_path.exists()); + + // Build the derived images + let derived_path = &fixture.path.join("derived.oci"); + oci_clone(base_oci_path, derived_path).await?; + let temproot = &fixture.path.join("temproot"); + std::fs::create_dir_all(&temproot.join("usr/bin"))?; + std::fs::write(temproot.join("usr/bin/newderivedfile"), "newderivedfile v0")?; + std::fs::write( + temproot.join("usr/bin/newderivedfile3"), + "newderivedfile3 v0", + )?; + ostree_ext::integrationtest::generate_derived_oci(derived_path, temproot)?; + // And v2 + let derived2_path = &fixture.path.join("derived2.oci"); + oci_clone(base_oci_path, derived2_path).await?; + std::fs::remove_dir_all(temproot)?; + std::fs::create_dir_all(&temproot.join("usr/bin"))?; + std::fs::write(temproot.join("usr/bin/newderivedfile"), "newderivedfile v1")?; + std::fs::write( + temproot.join("usr/bin/newderivedfile2"), + "newderivedfile2 v0", + )?; + ostree_ext::integrationtest::generate_derived_oci(derived2_path, temproot)?; + + let derived_ref = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref: ImageReference { - transport: Transport::OciArchive, - name: exampleos_path.to_string(), + transport: Transport::OciDir, + name: derived_path.to_string(), }, }; - // There shouldn't be any container images stored yet. let images = ostree_ext::container::store::list_images(&fixture.destrepo)?; assert!(images.is_empty()); + // Verify importing a derive dimage fails + let r = ostree_ext::container::unencapsulate(&fixture.destrepo, &derived_ref, None).await; + assert_err_contains(r, "Expected 1 layer, found 2"); + // Pull a derived image - two layers, new base plus one layer. let mut imp = ostree_ext::container::store::LayeredImageImporter::new( &fixture.destrepo, - &exampleos_ref, + &derived_ref, Default::default(), ) .await?; @@ -529,6 +569,7 @@ async fn test_container_write_derive() -> Result<()> { }; let expected_digest = prep.manifest_digest.clone(); assert!(prep.base_layer.commit.is_none()); + assert_eq!(prep.layers.len(), 1); for layer in prep.layers.iter() { assert!(layer.commit.is_none()); } @@ -536,7 +577,7 @@ async fn test_container_write_derive() -> Result<()> { // We should have exactly one image stored. let images = ostree_ext::container::store::list_images(&fixture.destrepo)?; assert_eq!(images.len(), 1); - assert_eq!(images[0], exampleos_ref.imgref.to_string()); + assert_eq!(images[0], derived_ref.imgref.to_string()); let imported_commit = &fixture .destrepo @@ -560,7 +601,7 @@ async fn test_container_write_derive() -> Result<()> { // Parse the commit and verify we pulled the derived content. bash!( - "ostree --repo={repo} ls {r} /usr/share/anewfile", + "ostree --repo={repo} ls {r} /usr/bin/newderivedfile", repo = fixture.destrepo_path.as_str(), r = import.merge_commit.as_str() )?; @@ -568,23 +609,24 @@ async fn test_container_write_derive() -> Result<()> { // Import again, but there should be no changes. let mut imp = ostree_ext::container::store::LayeredImageImporter::new( &fixture.destrepo, - &exampleos_ref, + &derived_ref, Default::default(), ) .await?; let already_present = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => c, PrepareResult::Ready(_) => { - panic!("Should have already imported {}", &exampleos_ref) + panic!("Should have already imported {}", &derived_ref) } }; assert_eq!(import.merge_commit, already_present.merge_commit); // Test upgrades; replace the oci-archive with new content. - std::fs::write(exampleos_path, EXAMPLEOS_DERIVED_V2_OCI)?; + std::fs::remove_dir_all(derived_path)?; + std::fs::rename(derived2_path, derived_path)?; let mut imp = ostree_ext::container::store::LayeredImageImporter::new( &fixture.destrepo, - &exampleos_ref, + &derived_ref, Default::default(), ) .await?; @@ -604,16 +646,18 @@ async fn test_container_write_derive() -> Result<()> { assert_ne!(import.merge_commit, already_present.merge_commit); // We should still have exactly one image stored. let images = ostree_ext::container::store::list_images(&fixture.destrepo)?; + assert_eq!(images[0], derived_ref.imgref.to_string()); assert_eq!(images.len(), 1); - assert_eq!(images[0], exampleos_ref.imgref.to_string()); // Verify we have the new file and *not* the old one bash!( - "ostree --repo={repo} ls {r} /usr/share/anewfile2 >/dev/null - if ostree --repo={repo} ls {r} /usr/share/anewfile 2>/dev/null; then + r#"set -x; + ostree --repo={repo} ls {r} /usr/bin/newderivedfile2 >/dev/null + test "$(ostree --repo={repo} cat {r} /usr/bin/newderivedfile)" = "newderivedfile v1" + if ostree --repo={repo} ls {r} /usr/bin/newderivedfile3 2>/dev/null; then echo oops; exit 1 fi - ", + "#, repo = fixture.destrepo_path.as_str(), r = import.merge_commit.as_str() )?; @@ -621,14 +665,14 @@ async fn test_container_write_derive() -> Result<()> { // And there should be no changes on upgrade again. let mut imp = ostree_ext::container::store::LayeredImageImporter::new( &fixture.destrepo, - &exampleos_ref, + &derived_ref, Default::default(), ) .await?; let already_present = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => c, PrepareResult::Ready(_) => { - panic!("Should have already imported {}", &exampleos_ref) + panic!("Should have already imported {}", &derived_ref) } }; assert_eq!(import.merge_commit, already_present.merge_commit); @@ -641,11 +685,11 @@ async fn test_container_write_derive() -> Result<()> { None, gio::NONE_CANCELLABLE, )?; - ostree_ext::container::store::copy(&fixture.destrepo, &destrepo2, &exampleos_ref).await?; + ostree_ext::container::store::copy(&fixture.destrepo, &destrepo2, &derived_ref).await?; let images = ostree_ext::container::store::list_images(&destrepo2)?; assert_eq!(images.len(), 1); - assert_eq!(images[0], exampleos_ref.imgref.to_string()); + assert_eq!(images[0], derived_ref.imgref.to_string()); Ok(()) } From 90c6556c6e629c6b6be313f8903cc27c3c06010c Mon Sep 17 00:00:00 2001 From: Joseph Marrero Date: Tue, 4 Jan 2022 16:52:49 -0500 Subject: [PATCH 253/775] lib/src/cli: Add commit verb that validates /var --- lib/src/cli.rs | 5 +++++ lib/src/commit.rs | 56 +++++++++++++++++++++++++++++++++++++++++++++++ lib/src/lib.rs | 1 + 3 files changed, 62 insertions(+) create mode 100644 lib/src/commit.rs diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 6466779f2..d60a8d309 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -13,6 +13,7 @@ use std::ffi::OsString; use std::path::PathBuf; use structopt::StructOpt; +use crate::commit::container_commit; use crate::container as ostree_container; use crate::container::store::{LayeredImageImporter, PrepareResult}; use crate::container::{Config, ImageReference, OstreeImageReference, UnencapsulateOptions}; @@ -114,6 +115,9 @@ enum ContainerOpts { cmd: Option>, }, + #[structopt(alias = "commit")] + Commit, + /// Commands for working with (possibly layered, non-encapsulated) container images. Image(ContainerImageOpts), } @@ -468,6 +472,7 @@ where Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), Opt::Container(o) => match o { ContainerOpts::Info { imgref } => container_info(&imgref).await, + ContainerOpts::Commit {} => container_commit().await, ContainerOpts::Unencapsulate { repo, imgref, diff --git a/lib/src/commit.rs b/lib/src/commit.rs new file mode 100644 index 000000000..02317df82 --- /dev/null +++ b/lib/src/commit.rs @@ -0,0 +1,56 @@ +/// This module contains the functions to implement the commit +/// procedures as part of building an ostree container image. +/// https://github.com/ostreedev/ostree-rs-ext/issues/159 +use anyhow::Context; +use anyhow::Result; +use std::fs; +use std::path::Path; +use tokio::task; + +use crate::container_utils::is_ostree_container; + +/// Check if there are any files that are not directories and error out if +/// we find any, /var should not contain any files to commit in a container +/// as it is where we expect user data to reside. +fn validate_directories_only(path: &Path, error_count: &mut i32) -> Result<()> { + let context = || format!("Validating file: {:?}", path); + for entry in fs::read_dir(path).with_context(context)? { + let entry = entry?; + let path = entry.path(); + + let metadata = path.symlink_metadata()?; + + if metadata.is_dir() { + validate_directories_only(&path, error_count)?; + } else { + *error_count += 1; + if *error_count < 20 { + eprintln!("Found file: {:?}", path) + } + } + } + Ok(()) +} + +/// Entrypoint to the commit procedures, initially we just +/// have one validation but we expect more in the future. +pub(crate) async fn container_commit() -> Result<()> { + if is_ostree_container()? { + println!("Checking /var for files"); + let var_path = Path::new("/var"); + + let mut error_count = 0; + + task::spawn_blocking(move || -> Result<()> { + validate_directories_only(var_path, &mut error_count) + }) + .await??; + + if error_count != 0 { + anyhow::bail!("Found content in /var"); + } + } else { + anyhow::bail!("Not a container can't commit"); + } + Ok(()) +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 254394d17..e9630f709 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -35,6 +35,7 @@ pub mod tar; pub mod tokio_util; mod cmdext; +pub(crate) mod commit; pub(crate) mod objgv; /// Prelude, intended for glob import. pub mod prelude { From 8220c01d60d576c53c01372a64a09be35af0e586 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 12 Jan 2022 15:04:41 -0500 Subject: [PATCH 254/775] container: Drop `use super::*` Came across this in a refactoring. Glob imports should be used sparingly. --- lib/src/container/encapsulate.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index a28325ecc..90188ecd8 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -1,12 +1,16 @@ //! APIs for creating container images from OSTree commits use super::ociwriter::OciWriter; -use super::*; +use super::{ociwriter, OstreeImageReference, Transport}; +use super::{ImageReference, SignatureSource, OSTREE_COMMIT_LABEL}; +use crate::container::skopeo; use crate::tar as ostree_tar; use anyhow::Context; +use anyhow::Result; use fn_error_context::context; use gio::glib; use ostree::gio; +use std::borrow::Cow; use std::collections::{BTreeMap, HashMap}; use std::path::Path; use tracing::{instrument, Level}; From e0c4c849d9901e0de03744a6200117424f1b7158 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 11 Jan 2022 12:52:36 -0500 Subject: [PATCH 255/775] integrationtest: Use mut setters from oci-spec Depends https://github.com/containers/oci-spec-rs/pull/87 --- lib/Cargo.toml | 4 ++- lib/src/integrationtest.rs | 51 +++++++++++--------------------------- 2 files changed, 18 insertions(+), 37 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 368b8b9bd..297a49add 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -25,7 +25,9 @@ indicatif = "0.16.0" once_cell = "1.9" libc = "0.2.92" rustix = "0.31.3" -oci-spec = "0.5.3" +# oci-spec = "0.5.3" +# Until the next release +oci-spec = { git = "https://github.com/containers/oci-spec-rs" } openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 0385931a6..5f043b455 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -63,45 +63,24 @@ pub fn generate_derived_oci(src: impl AsRef, dir: impl AsRef let bw = layer_tar.into_inner()?; let new_layer = bw.complete()?; - let layers: Vec<_> = manifest - .layers() - .iter() - .cloned() - .chain(std::iter::once( - new_layer - .blob - .descriptor() - .media_type(oci_spec::image::MediaType::ImageLayerGzip) - .build() - .unwrap(), - )) - .collect(); - manifest.set_layers(layers); - let history: Vec<_> = config - .history() - .iter() - .cloned() - .chain(std::iter::once( - oci_spec::image::HistoryBuilder::default() - .created_by("generate_derived_oci") - .build() - .unwrap(), - )) - .collect(); - config.set_history(history); - let diffids: Vec<_> = config - .rootfs() - .diff_ids() - .iter() - .cloned() - .chain(std::iter::once(new_layer.uncompressed_sha256)) - .collect(); - config.set_rootfs( - oci_spec::image::RootFsBuilder::default() - .diff_ids(diffids) + manifest.layers_mut().push( + new_layer + .blob + .descriptor() + .media_type(oci_spec::image::MediaType::ImageLayerGzip) .build() .unwrap(), ); + config.history_mut().push( + oci_spec::image::HistoryBuilder::default() + .created_by("generate_derived_oci") + .build() + .unwrap(), + ); + config + .rootfs_mut() + .diff_ids_mut() + .push(new_layer.uncompressed_sha256); let new_config_desc = crate::container::ociwriter::write_json_blob( srcdir, &config, From 5a05ff780c63692954347d71e9de711ad3fade56 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 12 Jan 2022 15:25:34 -0500 Subject: [PATCH 256/775] Port to ocidir, drop ociwriter See https://github.com/ostreedev/ostree-rs-ext/pull/198#issuecomment-1010139066 Basically I messed up a merge. We want to drop `ociwriter.rs`. Port all code code to the new `ocidir.rs` which supports derivation and uses the oci-spec types. --- lib/src/container/encapsulate.rs | 45 ++-- lib/src/container/mod.rs | 2 +- lib/src/container/ocidir.rs | 14 +- lib/src/container/ociwriter.rs | 384 ------------------------------- lib/src/integrationtest.rs | 60 +---- 5 files changed, 49 insertions(+), 456 deletions(-) delete mode 100644 lib/src/container/ociwriter.rs diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 90188ecd8..86ed26104 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -1,7 +1,7 @@ //! APIs for creating container images from OSTree commits -use super::ociwriter::OciWriter; -use super::{ociwriter, OstreeImageReference, Transport}; +use super::ocidir::OciDir; +use super::{ocidir, OstreeImageReference, Transport}; use super::{ImageReference, SignatureSource, OSTREE_COMMIT_LABEL}; use crate::container::skopeo; use crate::tar as ostree_tar; @@ -9,10 +9,12 @@ use anyhow::Context; use anyhow::Result; use fn_error_context::context; use gio::glib; +use oci_spec::image as oci_image; use ostree::gio; use std::borrow::Cow; use std::collections::{BTreeMap, HashMap}; use std::path::Path; +use std::rc::Rc; use tracing::{instrument, Level}; /// Annotation injected into the layer to say that this is an ostree commit. @@ -35,9 +37,9 @@ pub struct Config { fn export_ostree_ref( repo: &ostree::Repo, rev: &str, - writer: &mut OciWriter, + writer: &mut OciDir, compression: Option, -) -> Result { +) -> Result { let commit = repo.resolve_rev(rev, false)?.unwrap(); let mut w = writer.create_raw_layer(compression)?; ostree_tar::export_commit(repo, commit.as_str(), &mut w, None)?; @@ -55,8 +57,8 @@ fn build_oci( ) -> Result { // Explicitly error if the target exists std::fs::create_dir(ocidir_path).context("Creating OCI dir")?; - let ocidir = &openat::Dir::open(ocidir_path)?; - let mut writer = ociwriter::OciWriter::new(ocidir)?; + let ocidir = Rc::new(openat::Dir::open(ocidir_path)?); + let mut writer = ocidir::OciDir::create(ocidir)?; let commit = repo.resolve_rev(rev, false)?.unwrap(); let commit = commit.as_str(); @@ -71,24 +73,29 @@ fn build_oci( let commit_meta = &commit_v.child_value(0); let commit_meta = glib::VariantDict::new(Some(commit_meta)); + let mut ctrcfg = oci_image::Config::default(); + let mut imgcfg = ocidir::new_config_thisarch_linux(); + let labels = ctrcfg.labels_mut().get_or_insert_with(Default::default); + let mut manifest = ocidir::new_empty_manifest().build().unwrap(); + if let Some(version) = commit_meta.lookup_value("version", Some(glib::VariantTy::new("s").unwrap())) { let version = version.str().unwrap(); - writer.add_config_annotation("version", version); - writer.add_manifest_annotation("ostree.version", version); + labels.insert("version".into(), version.into()); } - writer.add_config_annotation(OSTREE_COMMIT_LABEL, commit); - writer.add_manifest_annotation(OSTREE_COMMIT_LABEL, commit); + labels.insert(OSTREE_COMMIT_LABEL.into(), commit.into()); for (k, v) in config.labels.iter().map(|k| k.iter()).flatten() { - writer.add_config_annotation(k, v); + labels.insert(k.into(), v.into()); } if let Some(cmd) = config.cmd.as_ref() { - let cmd: Vec<_> = cmd.iter().map(|s| s.as_str()).collect(); - writer.set_cmd(&cmd); + ctrcfg.set_cmd(Some(cmd.clone())); } + + imgcfg.set_config(Some(ctrcfg)); + let compression = if opts.compress { flate2::Compression::default() } else { @@ -103,8 +110,16 @@ fn build_oci( }; let mut annos = HashMap::new(); annos.insert(BLOB_OSTREE_ANNOTATION.to_string(), "true".to_string()); - writer.push_layer_annotated(rootfs_blob, Some(annos), &description); - writer.complete()?; + writer.push_layer_annotated( + &mut manifest, + &mut imgcfg, + rootfs_blob, + Some(annos), + &description, + ); + let ctrcfg = writer.write_config(imgcfg)?; + manifest.set_config(ctrcfg); + writer.write_manifest(manifest, ocidir::this_platform())?; Ok(ImageReference { transport: Transport::OciDir, diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index a1c388250..666b7ec4a 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -218,7 +218,7 @@ mod encapsulate; pub use encapsulate::*; mod unencapsulate; pub use unencapsulate::*; -pub(crate) mod ociwriter; +pub(crate) mod ocidir; mod skopeo; pub mod store; diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index b25030c59..f76031c38 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -7,7 +7,7 @@ use flate2::write::GzEncoder; use fn_error_context::context; use oci_image::MediaType; use oci_spec::image as oci_image; -use once_cell::sync::OnceCell; +use once_cell::sync::Lazy; use openat_ext::*; use openssl::hash::{Hasher, MessageDigest}; use phf::phf_map; @@ -24,7 +24,8 @@ static MACHINE_TO_OCI: phf::Map<&str, &str> = phf_map! { }; static THIS_OCI_ARCH: Lazy = Lazy::new(|| { - let machine = rustix::process::uname().machine(); + let uname = rustix::process::uname(); + let machine = uname.machine().to_str().unwrap(); let arch = MACHINE_TO_OCI.get(machine).unwrap_or(&machine); oci_image::Arch::from(*arch) }); @@ -140,10 +141,11 @@ pub(crate) fn new_empty_manifest() -> oci_image::ImageManifestBuilder { } /// Generate an image configuration targeting Linux for this architecture. -pub(crate) fn new_config() -> oci_image::ImageConfigurationBuilder { - oci_image::ImageConfigurationBuilder::default() - .architecture(THIS_OCI_ARCH.clone()) - .os(oci_image::Os::Linux) +pub(crate) fn new_config_thisarch_linux() -> oci_image::ImageConfiguration { + let mut r = oci_image::ImageConfiguration::default(); + r.set_architecture(THIS_OCI_ARCH.clone()); + r.set_os(oci_image::Os::Linux); + r } /// Return a Platform object for Linux for this architecture. diff --git a/lib/src/container/ociwriter.rs b/lib/src/container/ociwriter.rs deleted file mode 100644 index 8626ae9a9..000000000 --- a/lib/src/container/ociwriter.rs +++ /dev/null @@ -1,384 +0,0 @@ -//! Internal API to interact with Open Container Images; mostly -//! oriented towards generating images. - -use anyhow::{anyhow, Result}; -use flate2::write::GzEncoder; -use fn_error_context::context; -use oci_image::{Descriptor, MediaType}; -use oci_spec::image as oci_image; -use openat_ext::*; -use openssl::hash::{Hasher, MessageDigest}; -use phf::phf_map; -use std::collections::HashMap; -use std::io::prelude::*; - -/// Map the value from `uname -m` to the Go architecture. -/// TODO find a more canonical home for this. -static MACHINE_TO_OCI: phf::Map<&str, &str> = phf_map! { - "x86_64" => "amd64", - "aarch64" => "arm64", -}; - -/// Path inside an OCI directory to the blobs -const BLOBDIR: &str = "blobs/sha256"; - -/// Completed blob metadata -#[derive(Debug)] -pub(crate) struct Blob { - pub(crate) sha256: String, - pub(crate) size: u64, -} - -impl Blob { - pub(crate) fn digest_id(&self) -> String { - format!("sha256:{}", self.sha256) - } - - pub(crate) fn descriptor(&self) -> oci_image::DescriptorBuilder { - oci_image::DescriptorBuilder::default() - .digest(self.digest_id()) - .size(self.size as i64) - } -} - -/// Completed layer metadata -#[derive(Debug)] -pub(crate) struct Layer { - pub(crate) blob: Blob, - pub(crate) uncompressed_sha256: String, -} - -impl Layer { - pub(crate) fn descriptor(&self) -> oci_image::DescriptorBuilder { - self.blob.descriptor() - } -} - -/// Create an OCI blob. -pub(crate) struct BlobWriter<'a> { - pub(crate) hash: Hasher, - pub(crate) target: Option>, - size: u64, -} - -/// Create an OCI layer (also a blob). -pub(crate) struct RawLayerWriter<'a> { - bw: BlobWriter<'a>, - uncompressed_hash: Hasher, - compressor: GzEncoder>, -} - -pub(crate) struct OciWriter<'a> { - pub(crate) dir: &'a openat::Dir, - - config_annotations: HashMap, - manifest_annotations: HashMap, - - cmd: Option>, - - layers: Vec<(oci_image::Descriptor, String)>, - history: Vec, -} - -/// Write a serializable data (JSON) as an OCI blob -#[context("Writing json blob")] -pub(crate) fn write_json_blob( - ocidir: &openat::Dir, - v: &S, - media_type: oci_image::MediaType, -) -> Result { - let mut w = BlobWriter::new(ocidir)?; - cjson::to_writer(&mut w, v).map_err(|e| anyhow!("{:?}", e))?; - let blob = w.complete()?; - Ok(blob.descriptor().media_type(media_type)) -} - -impl<'a> OciWriter<'a> { - pub(crate) fn new(dir: &'a openat::Dir) -> Result { - dir.ensure_dir_all(BLOBDIR, 0o755)?; - dir.write_file_contents("oci-layout", 0o644, r#"{"imageLayoutVersion":"1.0.0"}"#)?; - - Ok(Self { - dir, - config_annotations: Default::default(), - manifest_annotations: Default::default(), - layers: Vec::new(), - history: Vec::new(), - cmd: None, - }) - } - - /// Create a writer for a new blob (expected to be a tar stream) - pub(crate) fn create_raw_layer( - &self, - c: Option, - ) -> Result { - RawLayerWriter::new(self.dir, c) - } - - #[allow(dead_code)] - /// Create a tar output stream, backed by a blob - pub(crate) fn create_layer( - &self, - c: Option, - ) -> Result> { - Ok(tar::Builder::new(self.create_raw_layer(c)?)) - } - - /// Add a layer to the top of the image stack. The firsh pushed layer becomes the root. - #[allow(dead_code)] - pub(crate) fn push_layer(&mut self, layer: Layer, description: &str) { - let annotations: Option> = None; - self.push_layer_annotated(layer, annotations, description); - } - - /// Add a layer to the top of the image stack with optional annotations. - /// - /// This is otherwise equivalent to [`Self::push_layer`]. - pub(crate) fn push_layer_annotated( - &mut self, - layer: Layer, - annotations: Option>>, - description: &str, - ) { - let mut builder = layer.descriptor().media_type(MediaType::ImageLayerGzip); - if let Some(annotations) = annotations { - builder = builder.annotations(annotations); - } - self.layers - .push((builder.build().unwrap(), layer.uncompressed_sha256)); - self.history.push(description.to_string()); - } - - pub(crate) fn set_cmd(&mut self, e: &[&str]) { - self.cmd = Some(e.iter().map(|s| s.to_string()).collect()); - } - - pub(crate) fn add_manifest_annotation, V: AsRef>(&mut self, k: K, v: V) { - let k = k.as_ref(); - let v = v.as_ref(); - self.manifest_annotations - .insert(k.to_string(), v.to_string()); - } - - pub(crate) fn add_config_annotation, V: AsRef>(&mut self, k: K, v: V) { - let k = k.as_ref(); - let v = v.as_ref(); - self.config_annotations.insert(k.to_string(), v.to_string()); - } - - #[context("Writing OCI")] - pub(crate) fn complete(self) -> Result<()> { - let uname = rustix::process::uname(); - let machine = uname.machine().to_str().unwrap(); - let arch = MACHINE_TO_OCI.get(machine).unwrap_or(&machine); - let arch = oci_image::Arch::from(*arch); - - if self.layers.is_empty() { - return Err(anyhow!("No layers specified")); - } - - let diffids: Vec = self - .layers - .iter() - .map(|(_, diffid)| format!("sha256:{}", diffid)) - .collect(); - let rootfs = oci_image::RootFsBuilder::default() - .diff_ids(diffids) - .build() - .unwrap(); - - let ctrconfig_builder = oci_image::ConfigBuilder::default().labels(self.config_annotations); - let ctrconfig = if let Some(cmd) = self.cmd { - ctrconfig_builder.cmd(cmd) - } else { - ctrconfig_builder - } - .build() - .unwrap(); - let history: Vec<_> = self - .history - .into_iter() - .map(|h| { - oci_image::HistoryBuilder::default() - .created_by(h) - .build() - .unwrap() - }) - .collect(); - let config = oci_image::ImageConfigurationBuilder::default() - .architecture(arch.clone()) - .os(oci_image::Os::Linux) - .config(ctrconfig) - .rootfs(rootfs) - .history(history) - .build() - .unwrap(); - let config_blob = write_json_blob(self.dir, &config, MediaType::ImageConfig)?; - - let layers: Vec = self.layers.into_iter().map(|v| v.0).collect(); - let manifest_data = oci_image::ImageManifestBuilder::default() - .schema_version(oci_image::SCHEMA_VERSION) - .config(config_blob.build().unwrap()) - .layers(layers) - .annotations(self.manifest_annotations) - .build() - .unwrap(); - let manifest = write_json_blob(self.dir, &manifest_data, MediaType::ImageManifest)? - .platform( - oci_image::PlatformBuilder::default() - .architecture(arch) - .os(oci_spec::image::Os::Linux) - .build() - .unwrap(), - ) - .build() - .unwrap(); - - let index_data = oci_image::ImageIndexBuilder::default() - .schema_version(oci_image::SCHEMA_VERSION) - .manifests(vec![manifest]) - .build() - .unwrap(); - self.dir - .write_file_with("index.json", 0o644, |w| -> Result<()> { - cjson::to_writer(w, &index_data).map_err(|e| anyhow::anyhow!("{:?}", e))?; - Ok(()) - })?; - - Ok(()) - } -} - -impl<'a> BlobWriter<'a> { - #[context("Creating blob writer")] - pub(crate) fn new(ocidir: &'a openat::Dir) -> Result { - Ok(Self { - hash: Hasher::new(MessageDigest::sha256())?, - // FIXME add ability to choose filename after completion - target: Some(ocidir.new_file_writer(0o644)?), - size: 0, - }) - } - - #[context("Completing blob")] - pub(crate) fn complete(mut self) -> Result { - let sha256 = hex::encode(self.hash.finish()?); - let target = &format!("{}/{}", BLOBDIR, sha256); - self.target.take().unwrap().complete(target)?; - Ok(Blob { - sha256, - size: self.size, - }) - } -} - -impl<'a> std::io::Write for BlobWriter<'a> { - fn write(&mut self, srcbuf: &[u8]) -> std::io::Result { - self.hash.update(srcbuf)?; - self.target.as_mut().unwrap().writer.write_all(srcbuf)?; - self.size += srcbuf.len() as u64; - Ok(srcbuf.len()) - } - - fn flush(&mut self) -> std::io::Result<()> { - Ok(()) - } -} - -impl<'a> RawLayerWriter<'a> { - pub(crate) fn new(ocidir: &'a openat::Dir, c: Option) -> Result { - let bw = BlobWriter::new(ocidir)?; - Ok(Self { - bw, - uncompressed_hash: Hasher::new(MessageDigest::sha256())?, - compressor: GzEncoder::new(Vec::with_capacity(8192), c.unwrap_or_default()), - }) - } - - #[context("Completing layer")] - pub(crate) fn complete(mut self) -> Result { - self.compressor.get_mut().clear(); - let buf = self.compressor.finish()?; - self.bw.write_all(&buf)?; - let blob = self.bw.complete()?; - let uncompressed_sha256 = hex::encode(self.uncompressed_hash.finish()?); - Ok(Layer { - blob, - uncompressed_sha256, - }) - } -} - -impl<'a> std::io::Write for RawLayerWriter<'a> { - fn write(&mut self, srcbuf: &[u8]) -> std::io::Result { - self.compressor.get_mut().clear(); - self.compressor.write_all(srcbuf).unwrap(); - self.uncompressed_hash.update(srcbuf)?; - let compressed_buf = self.compressor.get_mut().as_slice(); - self.bw.write_all(compressed_buf)?; - Ok(srcbuf.len()) - } - - fn flush(&mut self) -> std::io::Result<()> { - self.bw.flush() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - const MANIFEST_DERIVE: &str = r#"{ - "schemaVersion": 2, - "config": { - "mediaType": "application/vnd.oci.image.config.v1+json", - "digest": "sha256:54977ab597b345c2238ba28fe18aad751e5c59dc38b9393f6f349255f0daa7fc", - "size": 754 - }, - "layers": [ - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "digest": "sha256:ee02768e65e6fb2bb7058282338896282910f3560de3e0d6cd9b1d5985e8360d", - "size": 5462 - }, - { - "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip", - "digest": "sha256:d203cef7e598fa167cb9e8b703f9f20f746397eca49b51491da158d64968b429", - "size": 214 - } - ], - "annotations": { - "ostree.commit": "3cb6170b6945065c2475bc16d7bebcc84f96b4c677811a6751e479b89f8c3770", - "ostree.version": "42.0" - } - } - "#; - - #[test] - fn manifest() -> Result<()> { - let m: oci_image::ImageManifest = serde_json::from_str(MANIFEST_DERIVE)?; - assert_eq!( - m.layers()[0].digest().as_str(), - "sha256:ee02768e65e6fb2bb7058282338896282910f3560de3e0d6cd9b1d5985e8360d" - ); - Ok(()) - } - - #[test] - fn test_build() -> Result<()> { - let td = tempfile::tempdir()?; - let td = &openat::Dir::open(td.path())?; - let mut w = OciWriter::new(td)?; - let mut layerw = w.create_raw_layer(None)?; - layerw.write_all(b"pretend this is a tarball")?; - let root_layer = layerw.complete()?; - assert_eq!( - root_layer.uncompressed_sha256, - "349438e5faf763e8875b43de4d7101540ef4d865190336c2cc549a11f33f8d7c" - ); - w.push_layer(root_layer, "root"); - w.complete()?; - Ok(()) - } -} diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 5f043b455..c8f59e94c 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -1,9 +1,9 @@ //! Module used for integration tests; should not be public. -use anyhow::{Context, Result}; +use crate::container::ocidir; +use anyhow::Result; use camino::Utf8Path; use fn_error_context::context; -use std::path::Path; fn has_ostree() -> bool { std::path::Path::new("/sysroot/ostree/repo").exists() @@ -18,46 +18,20 @@ pub(crate) fn detectenv() -> &'static str { } } -fn deserialize_json_path( - p: impl AsRef, -) -> Result { - let p = p.as_ref(); - let ctx = || format!("Parsing {:?}", p); - let f = std::io::BufReader::new(std::fs::File::open(p).with_context(ctx)?); - serde_json::from_reader(f).with_context(ctx) -} - -fn deserialize_json_blob( - ocidir: impl AsRef, - desc: &oci_spec::image::Descriptor, -) -> Result { - let ocidir = ocidir.as_ref(); - let blobpath = desc.digest().replace(':', "/"); - deserialize_json_path(&ocidir.join(&format!("blobs/{}", blobpath))) -} - /// Using `src` as a base, take append `dir` into OCI image. /// Should only be enabled for testing. #[cfg(feature = "internal-testing-api")] #[context("Generating derived oci")] pub fn generate_derived_oci(src: impl AsRef, dir: impl AsRef) -> Result<()> { + use std::rc::Rc; let src = src.as_ref(); + let src = Rc::new(openat::Dir::open(src.as_std_path())?); + let src = ocidir::OciDir::open(src)?; let dir = dir.as_ref(); - let index_path = &src.join("index.json"); - let mut idx: oci_spec::image::ImageIndex = deserialize_json_path(index_path)?; - let mut manifest: oci_spec::image::ImageManifest = { - let manifest_desc = idx - .manifests() - .get(0) - .ok_or_else(|| anyhow::anyhow!("No manifests found"))?; - deserialize_json_blob(src, manifest_desc)? - }; - let mut config: oci_spec::image::ImageConfiguration = - deserialize_json_blob(src, manifest.config())?; - - let srcdir = &openat::Dir::open(src.as_std_path())?; + let mut manifest = src.read_manifest()?; + let mut config: oci_spec::image::ImageConfiguration = src.read_json_blob(manifest.config())?; - let bw = crate::container::ociwriter::RawLayerWriter::new(srcdir, None)?; + let bw = src.create_raw_layer(None)?; let mut layer_tar = tar::Builder::new(bw); layer_tar.append_dir_all("./", dir.as_std_path())?; let bw = layer_tar.into_inner()?; @@ -81,23 +55,9 @@ pub fn generate_derived_oci(src: impl AsRef, dir: impl AsRef .rootfs_mut() .diff_ids_mut() .push(new_layer.uncompressed_sha256); - let new_config_desc = crate::container::ociwriter::write_json_blob( - srcdir, - &config, - oci_spec::image::MediaType::ImageConfig, - )? - .build() - .unwrap(); + let new_config_desc = src.write_config(config)?; manifest.set_config(new_config_desc); - let new_manifest_desc = crate::container::ociwriter::write_json_blob( - srcdir, - &manifest, - oci_spec::image::MediaType::ImageManifest, - )? - .build() - .unwrap(); - idx.set_manifests(vec![new_manifest_desc]); - idx.to_file(index_path.as_std_path())?; + src.write_manifest(manifest, ocidir::this_platform())?; Ok(()) } From 92c3303537c42c7d443719c8474775535ca83171 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 19 Jan 2022 13:23:41 -0500 Subject: [PATCH 257/775] Add a helper to flatten `Result>` with tokio_util I think this is nicer than having `??` which still looks odd to me, and we can also drop a `map(anyhow::Error::msg)` in once place too. Prep for further work. --- lib/src/container/store.rs | 8 ++++---- lib/src/tar/import.rs | 8 +++----- lib/src/tokio_util.rs | 25 ++++++++++++++++++++++++- 3 files changed, 31 insertions(+), 10 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 3b6331212..9196ccf00 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -363,7 +363,7 @@ impl LayeredImageImporter { // Destructure to transfer ownership to thread let repo = self.repo; let imgref = self.target_imgref.unwrap_or(self.imgref); - let state = crate::tokio_util::spawn_blocking_cancellable( + let state = crate::tokio_util::spawn_blocking_cancellable_flatten( move |cancellable| -> Result { let cancellable = Some(cancellable); let repo = &repo; @@ -401,7 +401,7 @@ impl LayeredImageImporter { Ok(state) }, ) - .await??; + .await?; Ok(state) } } @@ -481,7 +481,7 @@ pub async fn copy( let ostree_ref = ostree_ref?; let src_repo = src_repo.clone(); let dest_repo = dest_repo.clone(); - crate::tokio_util::spawn_blocking_cancellable(move |cancellable| -> Result<_> { + crate::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| -> Result<_> { let cancellable = Some(cancellable); let srcfd = &format!("file:///proc/self/fd/{}", src_repo.dfd()); let flags = ostree::RepoPullFlags::MIRROR; @@ -495,7 +495,7 @@ pub async fn copy( dest_repo.pull_with_options(srcfd, &options, None, cancellable)?; Ok(()) }) - .await??; + .await?; } Ok(()) } diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index aa768d079..69941b553 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -5,7 +5,6 @@ use anyhow::{anyhow, Context}; use camino::Utf8Path; use camino::Utf8PathBuf; use fn_error_context::context; -use futures_util::TryFutureExt; use gio::glib; use gio::prelude::*; use glib::Variant; @@ -599,7 +598,7 @@ pub async fn import_tar( let options = options.unwrap_or_default(); let src = tokio_util::io::SyncIoBridge::new(src); let repo = repo.clone(); - let import = crate::tokio_util::spawn_blocking_cancellable(move |cancellable| { + let import = crate::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| { let mut archive = tar::Archive::new(src); let txn = repo.auto_transaction(Some(cancellable))?; let importer = Importer::new(&repo, options.remote); @@ -607,9 +606,8 @@ pub async fn import_tar( txn.commit(Some(cancellable))?; repo.mark_commit_partial(&checksum, false)?; Ok::<_, anyhow::Error>(checksum) - }) - .map_err(anyhow::Error::msg); - let import: String = import.await??; + }); + let import: String = import.await?; Ok(import) } diff --git a/lib/src/tokio_util.rs b/lib/src/tokio_util.rs index e842b8f2e..443901ff8 100644 --- a/lib/src/tokio_util.rs +++ b/lib/src/tokio_util.rs @@ -1,7 +1,8 @@ //! Helpers for bridging GLib async/mainloop with Tokio. use anyhow::Result; -use futures_util::Future; +use core::fmt::{Debug, Display}; +use futures_util::{Future, FutureExt}; use ostree::gio; use ostree::prelude::CancellableExt; @@ -48,6 +49,28 @@ where f(&dropper.0) }) } + +/// Flatten a nested Result>, defaulting to converting the error type to an `anyhow::Error`. +/// See https://doc.rust-lang.org/std/result/enum.Result.html#method.flatten +pub(crate) fn flatten_anyhow(r: std::result::Result, E>) -> Result +where + E: Display + Debug + Send + Sync + 'static, +{ + match r { + Ok(x) => x, + Err(e) => Err(anyhow::anyhow!(e)), + } +} + +/// A wrapper around [`spawn_blocking_cancellable`] that flattens nested results. +pub fn spawn_blocking_cancellable_flatten(f: F) -> impl Future> +where + F: FnOnce(&gio::Cancellable) -> Result + Send + 'static, + T: Send + 'static, +{ + spawn_blocking_cancellable(f).map(flatten_anyhow) +} + #[cfg(test)] mod tests { use super::*; From 48f0a9d94710c4053d0ed4ffd1dbe5f269d881b3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 19 Jan 2022 17:38:14 -0500 Subject: [PATCH 258/775] ocidir: Add creation time into history Turns out that `podman history` segfaults if this is missing. Closes: https://github.com/ostreedev/ostree-rs-ext/issues/211 --- lib/src/container/ocidir.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index f76031c38..c0cb2d0c7 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -238,7 +238,18 @@ impl OciDir { format!("sha256:{}", layer.uncompressed_sha256), )); config.set_rootfs(rootfs); + // There is e.g. https://docs.rs/chrono/latest/chrono/struct.DateTime.html#method.to_rfc3339_opts + // and chrono is already in our dependency chain, just indirectly because of tracing-subscriber. + // glib actually also has https://docs.rs/glib/latest/glib/struct.DateTime.html#method.format_iso8601 + // but that requires a newer glib. + // Since glib is going to be required by ostree for the forseeable future, for now + // let's use that instead of adding chrono. + let now = ostree::glib::DateTime::new_now_utc() + .unwrap() + .format("%Y-%m-%dT%H:%M:%S.%fZ") + .unwrap(); let h = oci_image::HistoryBuilder::default() + .created(now) .created_by(description.to_string()) .build() .unwrap(); From f83f6d78cabd2fe0034e306fa07b628e80c75810 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 20 Jan 2022 11:19:36 -0500 Subject: [PATCH 259/775] container: Support `/etc/ostree/auth.json` and in `/run` too This is related to https://github.com/ostreedev/ostree-rs-ext/issues/121 as well as https://github.com/containers/containers-image-proxy-rs/pull/8 etc. The CLI code here supports `--authfile`. However, passing it on the CLI each time for production use cases pushes complexity to users. Add support for global persistent and runtime config files in `/etc/ostree/auth.json` and `/run/ostree/auth.json`. Change the default constructor for image pulls to use that by default. Note that the CLI options override the config defaults. While we're here, also add `--auth-anonymous` to the CLI, which is needed to ensure we don't use a config file if present. --- ci/integration.sh | 5 ++- lib/src/cli.rs | 8 +++++ lib/src/container/mod.rs | 11 +++++++ lib/src/container/store.rs | 4 ++- lib/src/container_utils.rs | 2 +- lib/src/globals.rs | 62 ++++++++++++++++++++++++++++++++++++++ lib/src/integrationtest.rs | 35 +++++++++++++++++++++ lib/src/lib.rs | 3 ++ 8 files changed, 127 insertions(+), 3 deletions(-) create mode 100644 lib/src/globals.rs diff --git a/ci/integration.sh b/ci/integration.sh index 8d0104e13..342207cdf 100755 --- a/ci/integration.sh +++ b/ci/integration.sh @@ -19,4 +19,7 @@ env=$(ostree-ext-cli internal-only-for-testing detect-env) test "${env}" = ostree-container tap_ok environment -tap_end \ No newline at end of file +ostree-ext-cli internal-only-for-testing run +tap_ok integrationtests + +tap_end diff --git a/lib/src/cli.rs b/lib/src/cli.rs index d60a8d309..dbb24a3d4 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -125,6 +125,10 @@ enum ContainerOpts { /// Options for container image fetching. #[derive(Debug, StructOpt)] struct ContainerProxyOpts { + #[structopt(long)] + /// Do not use default authentication files. + auth_anonymous: bool, + #[structopt(long)] /// Path to Docker-formatted authentication file. authfile: Option, @@ -232,6 +236,8 @@ struct ImaSignOpts { enum TestingOpts { // Detect the current environment DetectEnv, + /// Execute integration tests, assuming mutable environment + Run, } /// Toplevel options for extended ostree functionality. @@ -255,6 +261,7 @@ enum Opt { impl Into for ContainerProxyOpts { fn into(self) -> ostree_container::store::ImageProxyConfig { ostree_container::store::ImageProxyConfig { + auth_anonymous: self.auth_anonymous, authfile: self.authfile, certificate_directory: self.cert_dir, insecure_skip_tls_verification: Some(self.insecure_skip_tls_verification), @@ -456,6 +463,7 @@ fn testing(opts: &TestingOpts) -> Result<()> { println!("{}", s); Ok(()) } + TestingOpts::Run => crate::integrationtest::run_tests(), } } diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 666b7ec4a..db20ab4fd 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -213,6 +213,17 @@ impl std::fmt::Display for OstreeImageReference { } } +/// Apply default configuration for container image pulls to an existing configuration. +/// For example, if `authfile` is not set, and `auth_anonymous` is `false`, and a global configuration file exists, it will be used. +pub fn merge_default_container_proxy_opts( + config: &mut containers_image_proxy::ImageProxyConfig, +) -> Result<()> { + if !config.auth_anonymous && config.authfile.is_none() { + config.authfile = crate::globals::get_global_authfile_path()?; + } + Ok(()) +} + pub mod deploy; mod encapsulate; pub use encapsulate::*; diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 9196ccf00..3d9c51b66 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -182,8 +182,10 @@ impl LayeredImageImporter { pub async fn new( repo: &ostree::Repo, imgref: &OstreeImageReference, - config: ImageProxyConfig, + mut config: ImageProxyConfig, ) -> Result { + // Apply our defaults to the proxy config + merge_default_container_proxy_opts(&mut config)?; let proxy = ImageProxy::new_with_config(config).await?; let proxy_img = proxy.open_image(&imgref.imgref.to_string()).await?; let repo = repo.clone(); diff --git a/lib/src/container_utils.rs b/lib/src/container_utils.rs index b42c1e977..2256dedc9 100644 --- a/lib/src/container_utils.rs +++ b/lib/src/container_utils.rs @@ -30,7 +30,7 @@ pub fn running_in_container() -> bool { // https://docs.rs/openat-ext/0.1.10/openat_ext/trait.OpenatDirExt.html#tymethod.open_file_optional // https://users.rust-lang.org/t/why-i-use-anyhow-error-even-in-libraries/68592 -fn open_optional(path: impl AsRef) -> std::io::Result> { +pub(crate) fn open_optional(path: impl AsRef) -> std::io::Result> { match std::fs::File::open(path.as_ref()) { Ok(r) => Ok(Some(r)), Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None), diff --git a/lib/src/globals.rs b/lib/src/globals.rs new file mode 100644 index 000000000..228e2a680 --- /dev/null +++ b/lib/src/globals.rs @@ -0,0 +1,62 @@ +//! Global functions. + +use super::Result; +use once_cell::sync::OnceCell; +use ostree::glib; +use std::fs::File; +use std::path::{Path, PathBuf}; + +struct ConfigPaths { + persistent: PathBuf, + runtime: PathBuf, +} + +/// Get the runtime and persistent config directories. In the system (root) case, these +/// system(root) case: /run/ostree /etc/ostree +/// user(nonroot) case: /run/user/$uid/ostree ~/.config/ostree +fn get_config_paths() -> &'static ConfigPaths { + static PATHS: OnceCell = OnceCell::new(); + PATHS.get_or_init(|| { + let mut r = if rustix::process::getuid() == rustix::process::Uid::ROOT { + ConfigPaths { + persistent: PathBuf::from("/etc"), + runtime: PathBuf::from("/run"), + } + } else { + ConfigPaths { + persistent: glib::user_config_dir(), + runtime: glib::user_runtime_dir(), + } + }; + let path = "ostree"; + r.persistent.push(path); + r.runtime.push(path); + r + }) +} + +impl ConfigPaths { + /// Return the path and an open fd for a config file, if it exists. + pub(crate) fn open_file(&self, p: impl AsRef) -> Result> { + let p = p.as_ref(); + let mut runtime = self.runtime.clone(); + runtime.push(p); + if let Some(f) = crate::container_utils::open_optional(&runtime)? { + return Ok(Some((runtime, f))); + } + let mut persistent = self.persistent.clone(); + persistent.push(p); + if let Some(f) = crate::container_utils::open_optional(&persistent)? { + return Ok(Some((persistent, f))); + } + Ok(None) + } +} + +/// Return the path to the global container authentication file, if it exists. +pub(crate) fn get_global_authfile_path() -> Result> { + let paths = get_config_paths(); + let r = paths.open_file("auth.json")?; + // TODO pass the file descriptor to the proxy, not a global path + Ok(r.map(|v| v.0)) +} diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index c8f59e94c..6a6d5fc1e 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -1,5 +1,7 @@ //! Module used for integration tests; should not be public. +use std::path::Path; + use crate::container::ocidir; use anyhow::Result; use camino::Utf8Path; @@ -61,3 +63,36 @@ pub fn generate_derived_oci(src: impl AsRef, dir: impl AsRef src.write_manifest(manifest, ocidir::this_platform())?; Ok(()) } + +fn test_proxy_auth() -> Result<()> { + use containers_image_proxy::ImageProxyConfig; + let merge = crate::container::merge_default_container_proxy_opts; + let mut c = ImageProxyConfig::default(); + merge(&mut c)?; + assert_eq!(c.authfile, None); + std::fs::create_dir_all("/etc/ostree")?; + let authpath = Path::new("/etc/ostree/auth.json"); + std::fs::write(authpath, "{}")?; + let mut c = ImageProxyConfig::default(); + merge(&mut c)?; + assert_eq!(c.authfile.unwrap().as_path(), authpath,); + let c = ImageProxyConfig { + auth_anonymous: true, + ..Default::default() + }; + assert_eq!(c.authfile, None); + std::fs::remove_file(authpath)?; + let mut c = ImageProxyConfig::default(); + merge(&mut c)?; + assert_eq!(c.authfile, None); + Ok(()) +} + +#[cfg(feature = "internal-testing-api")] +#[context("Running integration tests")] +pub(crate) fn run_tests() -> Result<()> { + // When there's a new integration test to run, add it here. + test_proxy_auth()?; + println!("integration tests succeeded."); + Ok(()) +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 630641b15..1ed799d01 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -23,6 +23,9 @@ pub use ostree::gio::glib; /// to a string to output to a terminal or logs. type Result = anyhow::Result; +// Import global functions. +mod globals; + pub mod cli; pub mod container; pub mod container_utils; From 485358b0531d8b04dbf66e5db7bcddaba134a5b3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 22 Jan 2022 09:20:34 -0500 Subject: [PATCH 260/775] utils: Add an API to require being in an ostree container Most of our callers actually want this. --- lib/src/container_utils.rs | 10 ++++++++++ lib/src/integrationtest.rs | 1 + 2 files changed, 11 insertions(+) diff --git a/lib/src/container_utils.rs b/lib/src/container_utils.rs index 2256dedc9..388fe50a1 100644 --- a/lib/src/container_utils.rs +++ b/lib/src/container_utils.rs @@ -68,3 +68,13 @@ pub fn is_bare_split_xattrs() -> Result { pub fn is_ostree_container() -> Result { Ok(running_in_container() && is_bare_split_xattrs()?) } + +/// Returns an error unless the current filesystem is an ostree-based container +/// +/// This just wraps [`is_ostree_container`]. +pub fn require_ostree_container() -> Result<()> { + if !is_ostree_container()? { + anyhow::bail!("Not in an ostree-based container environment"); + } + Ok(()) +} diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 6a6d5fc1e..f9b383d7d 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -91,6 +91,7 @@ fn test_proxy_auth() -> Result<()> { #[cfg(feature = "internal-testing-api")] #[context("Running integration tests")] pub(crate) fn run_tests() -> Result<()> { + crate::container_utils::require_ostree_container()?; // When there's a new integration test to run, add it here. test_proxy_auth()?; println!("integration tests succeeded."); From ca48c6007d61c5fadd57f9e2bfce1823ac263426 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 22 Jan 2022 09:25:12 -0500 Subject: [PATCH 261/775] lib/commit: Use `require_ostree_container()` API --- lib/src/commit.rs | 33 +++++++++++++++------------------ 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/lib/src/commit.rs b/lib/src/commit.rs index 02317df82..70aed4b22 100644 --- a/lib/src/commit.rs +++ b/lib/src/commit.rs @@ -1,14 +1,14 @@ -/// This module contains the functions to implement the commit -/// procedures as part of building an ostree container image. -/// https://github.com/ostreedev/ostree-rs-ext/issues/159 +//! This module contains the functions to implement the commit +//! procedures as part of building an ostree container image. +//! https://github.com/ostreedev/ostree-rs-ext/issues/159 + +use crate::container_utils::require_ostree_container; use anyhow::Context; use anyhow::Result; use std::fs; use std::path::Path; use tokio::task; -use crate::container_utils::is_ostree_container; - /// Check if there are any files that are not directories and error out if /// we find any, /var should not contain any files to commit in a container /// as it is where we expect user data to reside. @@ -35,22 +35,19 @@ fn validate_directories_only(path: &Path, error_count: &mut i32) -> Result<()> { /// Entrypoint to the commit procedures, initially we just /// have one validation but we expect more in the future. pub(crate) async fn container_commit() -> Result<()> { - if is_ostree_container()? { - println!("Checking /var for files"); - let var_path = Path::new("/var"); + require_ostree_container()?; + println!("Checking /var for files"); + let var_path = Path::new("/var"); - let mut error_count = 0; + let mut error_count = 0; - task::spawn_blocking(move || -> Result<()> { - validate_directories_only(var_path, &mut error_count) - }) - .await??; + task::spawn_blocking(move || -> Result<()> { + validate_directories_only(var_path, &mut error_count) + }) + .await??; - if error_count != 0 { - anyhow::bail!("Found content in /var"); - } - } else { - anyhow::bail!("Not a container can't commit"); + if error_count != 0 { + anyhow::bail!("Found content in /var"); } Ok(()) } From dd568714d92aff5f6c710a013dd6056cc5a8bf68 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 12 Jan 2022 16:48:34 -0500 Subject: [PATCH 262/775] Use cap-std-ext We want to use this code in rpm-ostree too, so let's consider creating a new shared crate. --- lib/Cargo.toml | 1 + lib/src/cmdext.rs | 25 ------------------------- lib/src/lib.rs | 1 - lib/src/tar/write.rs | 2 +- 4 files changed, 2 insertions(+), 27 deletions(-) delete mode 100644 lib/src/cmdext.rs diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 297a49add..3649c418d 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -16,6 +16,7 @@ async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" camino = "1.0.4" cjson = "0.1.1" +cap-std-ext = "0.1.0" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" futures-util = "0.3.13" diff --git a/lib/src/cmdext.rs b/lib/src/cmdext.rs deleted file mode 100644 index 65bdb096e..000000000 --- a/lib/src/cmdext.rs +++ /dev/null @@ -1,25 +0,0 @@ -use rustix::fd::{FromRawFd, IntoRawFd}; -use rustix::io::OwnedFd; -use std::os::unix::prelude::CommandExt; -use std::sync::Arc; - -pub(crate) trait CommandRedirectionExt { - /// Pass a file descriptor into the target process. - fn take_fd_n(&mut self, fd: Arc, target: i32) -> &mut Self; -} - -#[allow(unsafe_code)] -impl CommandRedirectionExt for std::process::Command { - fn take_fd_n(&mut self, fd: Arc, target: i32) -> &mut Self { - unsafe { - self.pre_exec(move || { - let target = rustix::io::OwnedFd::from_raw_fd(target); - rustix::io::dup2(&*fd, &target)?; - // Intentionally leak into the child. - let _ = target.into_raw_fd(); - Ok(()) - }); - } - self - } -} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 1ed799d01..5c64ea684 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -36,7 +36,6 @@ pub mod refescape; pub mod tar; pub mod tokio_util; -mod cmdext; pub(crate) mod commit; pub(crate) mod objgv; /// Prelude, intended for glob import. diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index 6e048d1e8..abf799610 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -7,10 +7,10 @@ //! In the future, this may also evolve into parsing the tar //! stream in Rust, not in C. -use crate::cmdext::CommandRedirectionExt; use crate::Result; use anyhow::{anyhow, Context}; use camino::{Utf8Component, Utf8Path, Utf8PathBuf}; +use cap_std_ext::cmdext::CapStdExtCommandExt; use ostree::gio; use ostree::prelude::FileExt; use rustix::fd::FromFd; From 1f1271852874b908ba39fa5ef5f1d078faab762c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 24 Jan 2022 16:13:32 -0500 Subject: [PATCH 263/775] Use new correct `impl Default` for oci bits Now that https://github.com/containers/oci-spec-rs/pull/90/commits/6e65562e3d70b66656598e6a567dc71e80dd0f56 landed, we can just depend on it and not have a hardcoded architecture mapping here. --- lib/Cargo.toml | 1 - lib/src/container/encapsulate.rs | 4 ++-- lib/src/container/ocidir.rs | 35 +------------------------------- lib/src/integrationtest.rs | 3 ++- 4 files changed, 5 insertions(+), 38 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 297a49add..309cbd73c 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -32,7 +32,6 @@ openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" ostree = { features = ["v2021_5"], version = "0.13.4" } -phf = { features = ["macros"], version = "0.10" } pin-project = "1.0" serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 86ed26104..db8d6ff96 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -74,7 +74,7 @@ fn build_oci( let commit_meta = glib::VariantDict::new(Some(commit_meta)); let mut ctrcfg = oci_image::Config::default(); - let mut imgcfg = ocidir::new_config_thisarch_linux(); + let mut imgcfg = oci_image::ImageConfiguration::default(); let labels = ctrcfg.labels_mut().get_or_insert_with(Default::default); let mut manifest = ocidir::new_empty_manifest().build().unwrap(); @@ -119,7 +119,7 @@ fn build_oci( ); let ctrcfg = writer.write_config(imgcfg)?; manifest.set_config(ctrcfg); - writer.write_manifest(manifest, ocidir::this_platform())?; + writer.write_manifest(manifest, oci_image::Platform::default())?; Ok(ImageReference { transport: Transport::OciDir, diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index c0cb2d0c7..b8a59f945 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -7,29 +7,13 @@ use flate2::write::GzEncoder; use fn_error_context::context; use oci_image::MediaType; use oci_spec::image as oci_image; -use once_cell::sync::Lazy; use openat_ext::*; use openssl::hash::{Hasher, MessageDigest}; -use phf::phf_map; use std::collections::HashMap; use std::io::prelude::*; use std::path::Path; use std::rc::Rc; -/// Map the value from `uname -m` to the Go architecture. -/// TODO find a more canonical home for this. -static MACHINE_TO_OCI: phf::Map<&str, &str> = phf_map! { - "x86_64" => "amd64", - "aarch64" => "arm64", -}; - -static THIS_OCI_ARCH: Lazy = Lazy::new(|| { - let uname = rustix::process::uname(); - let machine = uname.machine().to_str().unwrap(); - let arch = MACHINE_TO_OCI.get(machine).unwrap_or(&machine); - oci_image::Arch::from(*arch) -}); - /// Path inside an OCI directory to the blobs const BLOBDIR: &str = "blobs/sha256"; @@ -140,23 +124,6 @@ pub(crate) fn new_empty_manifest() -> oci_image::ImageManifestBuilder { .layers(Vec::new()) } -/// Generate an image configuration targeting Linux for this architecture. -pub(crate) fn new_config_thisarch_linux() -> oci_image::ImageConfiguration { - let mut r = oci_image::ImageConfiguration::default(); - r.set_architecture(THIS_OCI_ARCH.clone()); - r.set_os(oci_image::Os::Linux); - r -} - -/// Return a Platform object for Linux for this architecture. -pub(crate) fn this_platform() -> oci_image::Platform { - oci_image::PlatformBuilder::default() - .os(oci_image::Os::Linux) - .architecture(THIS_OCI_ARCH.clone()) - .build() - .unwrap() -} - impl OciDir { /// Create a new, empty OCI directory at the target path, which should be empty. pub(crate) fn create(dir: impl Into>) -> Result { @@ -456,7 +423,7 @@ mod tests { w.push_layer(&mut manifest, &mut config, root_layer, "root"); let config = w.write_config(config)?; manifest.set_config(config); - w.write_manifest(manifest, this_platform())?; + w.write_manifest(manifest, oci_image::Platform::default())?; Ok(()) } } diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 6a6d5fc1e..3cfe95584 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -6,6 +6,7 @@ use crate::container::ocidir; use anyhow::Result; use camino::Utf8Path; use fn_error_context::context; +use oci_spec::image as oci_image; fn has_ostree() -> bool { std::path::Path::new("/sysroot/ostree/repo").exists() @@ -60,7 +61,7 @@ pub fn generate_derived_oci(src: impl AsRef, dir: impl AsRef let new_config_desc = src.write_config(config)?; manifest.set_config(new_config_desc); - src.write_manifest(manifest, ocidir::this_platform())?; + src.write_manifest(manifest, oci_image::Platform::default())?; Ok(()) } From 27c2c41bcb20ebd120e95d2bcf6d691c2a3157b3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 24 Jan 2022 18:09:45 -0500 Subject: [PATCH 264/775] Switch to new `require_rev` API I added this back in https://github.com/ostreedev/ostree-rs/pull/35/commits/f8aa658d17af9c13702a20ea8d4030cc34121bc8 and only recently remembered about it when modifying some other code. We need some sort of automatic reminder system for "remember to use this new API from your dependency". --- lib/src/container/encapsulate.rs | 4 ++-- lib/src/container/store.rs | 2 +- lib/src/ima.rs | 2 +- lib/src/tar/export.rs | 4 ++-- lib/tests/it/main.rs | 10 ++++------ 5 files changed, 10 insertions(+), 12 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 86ed26104..0724d6a9f 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -40,7 +40,7 @@ fn export_ostree_ref( writer: &mut OciDir, compression: Option, ) -> Result { - let commit = repo.resolve_rev(rev, false)?.unwrap(); + let commit = repo.require_rev(rev)?; let mut w = writer.create_raw_layer(compression)?; ostree_tar::export_commit(repo, commit.as_str(), &mut w, None)?; w.complete() @@ -60,7 +60,7 @@ fn build_oci( let ocidir = Rc::new(openat::Dir::open(ocidir_path)?); let mut writer = ocidir::OciDir::create(ocidir)?; - let commit = repo.resolve_rev(rev, false)?.unwrap(); + let commit = repo.require_rev(rev)?; let commit = commit.as_str(); let (commit_v, _) = repo.load_commit(commit)?; let commit_subject = commit_v.child_value(3); diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 3d9c51b66..b3dd11da3 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -469,7 +469,7 @@ pub async fn copy( imgref: &OstreeImageReference, ) -> Result<()> { let ostree_ref = ref_for_image(&imgref.imgref)?; - let rev = src_repo.resolve_rev(&ostree_ref, false)?.unwrap(); + let rev = src_repo.require_rev(&ostree_ref)?; let (commit_obj, _) = src_repo.load_commit(rev.as_str())?; let commit_meta = &glib::VariantDict::new(Some(&commit_obj.child_value(0))); let (manifest, _) = manifest_data_from_commitmeta(commit_meta)?; diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 121a861bb..ba964a1e1 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -271,7 +271,7 @@ impl<'a> CommitRewriter<'a> { /// Write a commit object. #[context("Mapping {}", rev)] fn map_commit(&mut self, rev: &str) -> Result { - let checksum = self.repo.resolve_rev(rev, false)?.unwrap(); + let checksum = self.repo.require_rev(rev)?; let cancellable = gio::NONE_CANCELLABLE; let (commit_v, _) = self.repo.load_commit(&checksum)?; let commit_v = &commit_v; diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 0bf37c5b3..b2af36817 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -423,10 +423,10 @@ pub fn export_commit( out: impl std::io::Write, options: Option, ) -> Result<()> { - let commit = repo.resolve_rev(rev, false)?; + let commit = repo.require_rev(rev)?; let mut tar = tar::Builder::new(out); let options = options.unwrap_or_default(); - impl_export(repo, commit.unwrap().as_str(), &mut tar, options)?; + impl_export(repo, commit.as_str(), &mut tar, options)?; tar.finish()?; Ok(()) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 7046b04b0..8a21190ea 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -390,9 +390,8 @@ async fn test_container_import_export() -> Result<()> { let fixture = Fixture::new()?; let testrev = fixture .srcrepo - .resolve_rev(TESTREF, false) - .context("Failed to resolve ref")? - .unwrap(); + .require_rev(TESTREF) + .context("Failed to resolve ref")?; let srcoci_path = &fixture.path.join("oci"); let srcoci_imgref = ImageReference { @@ -707,9 +706,8 @@ async fn test_container_import_export_registry() -> Result<()> { let fixture = Fixture::new()?; let testrev = fixture .srcrepo - .resolve_rev(TESTREF, false) - .context("Failed to resolve ref")? - .unwrap(); + .require_rev(TESTREF) + .context("Failed to resolve ref")?; let src_imgref = ImageReference { transport: Transport::Registry, name: format!("{}/exampleos", tr), From 762e45871f0138e06dcef35b0deaf7c114bc0b84 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Tue, 25 Jan 2022 11:26:13 +0000 Subject: [PATCH 265/775] tar/export: add additional mandatory ostree repo directories This adds some hierarchies (e.g. 'refs/' and 'tmp/') that are expected by libostree to be present on valid repositories. --- lib/src/tar/export.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index b2af36817..1d623b0d9 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -111,7 +111,6 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { if self.wrote_initdirs { return Ok(()); } - self.wrote_initdirs = true; let objdir: Utf8PathBuf = format!("{}/repo/objects", OSTREEDIR).into(); // Add all parent directories @@ -132,6 +131,16 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let path: Utf8PathBuf = format!("{}/{:02x}", objdir, d).into(); self.append_default_dir(&path)?; } + // Tmp subdirectories + for d in ["tmp", "tmp/cache"] { + let path: Utf8PathBuf = format!("{}/repo/{}", OSTREEDIR, d).into(); + self.append_default_dir(&path)?; + } + // Refs subdirectories + for d in ["refs", "refs/heads", "refs/mirrors", "refs/remotes"] { + let path: Utf8PathBuf = format!("{}/repo/{}", OSTREEDIR, d).into(); + self.append_default_dir(&path)?; + } // The special `repo/xattrs` directory used only in our tar serialization. let path: Utf8PathBuf = format!("{}/repo/xattrs", OSTREEDIR).into(); @@ -150,6 +159,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { self.out .append_data(&mut h, path, std::io::Cursor::new(REPO_CONFIG))?; + self.wrote_initdirs = true; Ok(()) } From 895c35fef3d0c6efa3fa137417a939730fc499be Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Tue, 25 Jan 2022 14:37:30 +0000 Subject: [PATCH 266/775] tar/export: create 'ff' objects subdirectory This fixes an off-by-one bug, which was causing a missing objects subdirectory. --- lib/src/tar/export.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 1d623b0d9..2865f4fa0 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -127,7 +127,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { self.append_default_dir(path)?; } // Object subdirectories - for d in 0..0xFF { + for d in 0..=0xFF { let path: Utf8PathBuf = format!("{}/{:02x}", objdir, d).into(); self.append_default_dir(&path)?; } From 07e9adcd01a4ca2deb3f5dd2f3afe18813596277 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Tue, 25 Jan 2022 14:55:33 +0000 Subject: [PATCH 267/775] lib/tests: check ostree repo skeleton on tar export --- lib/tests/it/main.rs | 76 ++++++++++++++++++++++++++++++++------------ 1 file changed, 55 insertions(+), 21 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 8a21190ea..f5dbe250c 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -10,6 +10,7 @@ use ostree_ext::container::{ use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; use sh_inline::bash; +use std::collections::HashMap; use std::convert::TryInto; use std::{io::Write, process::Command}; @@ -237,6 +238,7 @@ async fn test_tar_import_signed() -> Result<()> { Ok(()) } +#[derive(Debug)] struct TarExpected { path: &'static str, etype: tar::EntryType, @@ -257,33 +259,33 @@ fn validate_tar_expected( t: tar::Entries, expected: impl IntoIterator, ) -> Result<()> { - let expected = expected.into_iter(); - let mut entries = t.map(|e| e.unwrap()); + let mut expected: HashMap<&'static str, TarExpected> = + expected.into_iter().map(|exp| (exp.path, exp)).collect(); + let entries = t.map(|e| e.unwrap()); // Verify we're injecting directories, fixes the absence of `/tmp` in our // images for example. - for exp in expected { - let mut found = false; - while let Some(entry) = entries.next() { - let header = entry.header(); - let entry_path = entry.path().unwrap(); - if exp.path == entry_path.as_os_str() { - assert_eq!(header.entry_type(), exp.etype); - assert_eq!(header.mode().unwrap(), exp.mode); - found = true; - break; - } - } - if !found { - anyhow::bail!("Failed to find entry: {}", exp.path); + for entry in entries { + let header = entry.header(); + let entry_path = entry.path().unwrap().to_string_lossy().into_owned(); + if let Some(exp) = expected.remove(entry_path.as_str()) { + assert_eq!(header.entry_type(), exp.etype, "{}", entry_path); + assert_eq!(header.mode().unwrap(), exp.mode, "{}", entry_path); } } + + assert!( + expected.is_empty(), + "Expected but not found:\n{:?}", + expected + ); Ok(()) } /// Validate basic structure of the tar export. -/// Right now just checks the first entry is `sysroot` with mode 0755. #[test] fn test_tar_export_structure() -> Result<()> { + use tar::EntryType::{Directory, Regular}; + let mut fixture = Fixture::new()?; let src_tar = initial_export(&fixture)?; let src_tar = std::io::BufReader::new(std::fs::File::open(&src_tar)?); @@ -299,8 +301,22 @@ fn test_tar_export_structure() -> Result<()> { // Validate format version 0 let expected = [ - ("sysroot/config", tar::EntryType::Regular, 0o644), - ("usr", tar::EntryType::Directory, libc::S_IFDIR | 0o755), + ("sysroot/config", Regular, 0o644), + ("sysroot/ostree/repo", Directory, 0o755), + ("sysroot/ostree/repo/objects/00", Directory, 0o755), + ("sysroot/ostree/repo/objects/23", Directory, 0o755), + ("sysroot/ostree/repo/objects/77", Directory, 0o755), + ("sysroot/ostree/repo/objects/bc", Directory, 0o755), + ("sysroot/ostree/repo/objects/ff", Directory, 0o755), + ("sysroot/ostree/repo/refs", Directory, 0o755), + ("sysroot/ostree/repo/refs", Directory, 0o755), + ("sysroot/ostree/repo/refs/heads", Directory, 0o755), + ("sysroot/ostree/repo/refs/mirrors", Directory, 0o755), + ("sysroot/ostree/repo/refs/remotes", Directory, 0o755), + ("sysroot/ostree/repo/tmp", Directory, 0o755), + ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), + ("sysroot/ostree/repo/xattrs", Directory, 0o755), + ("usr", Directory, libc::S_IFDIR | 0o755), ]; validate_tar_expected(entries, expected.iter().map(Into::into))?; @@ -310,8 +326,26 @@ fn test_tar_export_structure() -> Result<()> { let src_tar = std::io::BufReader::new(std::fs::File::open(&src_tar)?); let mut src_tar = tar::Archive::new(src_tar); let expected = [ - ("sysroot/ostree/repo/config", tar::EntryType::Regular, 0o644), - ("usr", tar::EntryType::Directory, libc::S_IFDIR | 0o755), + ("sysroot/ostree/repo", Directory, 0o755), + ("sysroot/ostree/repo/config", Regular, 0o644), + ("sysroot/ostree/repo/objects/00", Directory, 0o755), + ("sysroot/ostree/repo/objects/23", Directory, 0o755), + ("sysroot/ostree/repo/objects/77", Directory, 0o755), + ("sysroot/ostree/repo/objects/bc", Directory, 0o755), + ("sysroot/ostree/repo/objects/ff", Directory, 0o755), + ("sysroot/ostree/repo/refs", Directory, 0o755), + ("sysroot/ostree/repo/refs", Directory, 0o755), + ("sysroot/ostree/repo/refs/heads", Directory, 0o755), + ("sysroot/ostree/repo/refs/mirrors", Directory, 0o755), + ("sysroot/ostree/repo/refs/remotes", Directory, 0o755), + ("sysroot/ostree/repo/tmp", Directory, 0o755), + ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), + ( + "sysroot/ostree/repo/xattrs", + Directory, + libc::S_IFDIR | 0o755, + ), + ("usr", Directory, libc::S_IFDIR | 0o755), ]; validate_tar_expected(src_tar.entries()?, expected.iter().map(Into::into))?; From c442a967633a02b515684055e7de2e9fe9da7ec4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 25 Jan 2022 13:38:45 -0500 Subject: [PATCH 268/775] tests: Quiet output of most shell commands I'm realizing the default of having `bash!` use `set +x` by default was perhaps a mistake. But this at least makes the test output more legible, if not perfect. --- lib/tests/it/main.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 8a21190ea..ae49f23a7 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -54,8 +54,8 @@ fn generate_test_repo(dir: &Utf8Path) -> Result { cd {dir} ostree --repo=repo init --mode=archive ostree --repo=repo commit -b {testref} --bootable --no-bindings --add-metadata-string=version=42.0 --gpg-homedir={gpghome} --gpg-sign={keyid} \ - --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst - ostree --repo=repo show {testref} + --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst >/dev/null + ostree --repo=repo show {testref} >/dev/null "}, testref = TESTREF, gpghome = gpghome.as_str(), @@ -213,7 +213,7 @@ async fn test_tar_import_signed() -> Result<()> { // And signed correctly bash!( - "ostree --repo={repo} remote gpg-import --stdin myremote < {p}/gpghome/key1.asc", + "ostree --repo={repo} remote gpg-import --stdin myremote < {p}/gpghome/key1.asc >/dev/null", repo = fixture.destrepo_path.as_str(), p = fixture.srcdir.as_str() )?; @@ -334,7 +334,7 @@ async fn test_tar_import_export() -> Result<()> { ); bash!( r#" - ostree --repo={destrepodir} ls -R {imported_commit} + ostree --repo={destrepodir} ls -R {imported_commit} >/dev/null val=$(ostree --repo={destrepodir} show --print-detached-metadata-key=my-detached-key {imported_commit}) test "${{val}}" = "'my-detached-value'" "#, @@ -600,7 +600,7 @@ async fn test_container_write_derive() -> Result<()> { // Parse the commit and verify we pulled the derived content. bash!( - "ostree --repo={repo} ls {r} /usr/bin/newderivedfile", + "ostree --repo={repo} ls {r} /usr/bin/newderivedfile >/dev/null", repo = fixture.destrepo_path.as_str(), r = import.merge_commit.as_str() )?; From 42664283224343c94d6c5af9962c5085973b23ca Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 25 Jan 2022 14:18:08 -0500 Subject: [PATCH 269/775] lib/tests: Mask out `S_IFMT` in tests We should fix the generation code, but for now let's just ignore it because I believe it's harmless. --- lib/tests/it/main.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index f5dbe250c..de201c675 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -269,7 +269,15 @@ fn validate_tar_expected( let entry_path = entry.path().unwrap().to_string_lossy().into_owned(); if let Some(exp) = expected.remove(entry_path.as_str()) { assert_eq!(header.entry_type(), exp.etype, "{}", entry_path); - assert_eq!(header.mode().unwrap(), exp.mode, "{}", entry_path); + // FIXME: change the generation code to not inject the format bits into the mode, + // because tar doesn't need/use it. + // https://github.com/ostreedev/ostree-rs-ext/pull/217/files#r791942496 + assert_eq!( + header.mode().unwrap() & !libc::S_IFMT, + exp.mode, + "{}", + entry_path + ); } } @@ -316,7 +324,7 @@ fn test_tar_export_structure() -> Result<()> { ("sysroot/ostree/repo/tmp", Directory, 0o755), ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), ("sysroot/ostree/repo/xattrs", Directory, 0o755), - ("usr", Directory, libc::S_IFDIR | 0o755), + ("usr", Directory, 0o755), ]; validate_tar_expected(entries, expected.iter().map(Into::into))?; @@ -340,12 +348,8 @@ fn test_tar_export_structure() -> Result<()> { ("sysroot/ostree/repo/refs/remotes", Directory, 0o755), ("sysroot/ostree/repo/tmp", Directory, 0o755), ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), - ( - "sysroot/ostree/repo/xattrs", - Directory, - libc::S_IFDIR | 0o755, - ), - ("usr", Directory, libc::S_IFDIR | 0o755), + ("sysroot/ostree/repo/xattrs", Directory, 0o755), + ("usr", Directory, 0o755), ]; validate_tar_expected(src_tar.entries()?, expected.iter().map(Into::into))?; From cde864b952e9625e8cebe03a96237e5a35a1fedf Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Wed, 26 Jan 2022 08:58:51 +0000 Subject: [PATCH 270/775] tar/export: fix mode bits in tar archive This fixes the tar exporting logic to avoid leaking filetype bits into mode field. The tar header already has a distinct field to identify entry type, thus mode bits should only convey permissions. --- lib/src/tar/export.rs | 4 ++-- lib/src/tar/import.rs | 2 +- lib/tests/it/main.rs | 12 ++---------- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 2865f4fa0..07469836d 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -281,7 +281,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { h.set_uid(meta.attribute_uint32("unix::uid") as u64); h.set_gid(meta.attribute_uint32("unix::gid") as u64); let mode = meta.attribute_uint32("unix::mode"); - h.set_mode(mode); + h.set_mode(mode & !libc::S_IFMT); let mut target_header = h.clone(); target_header.set_size(0); @@ -335,7 +335,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { header.set_size(0); header.set_uid(meta.uid as u64); header.set_gid(meta.gid as u64); - header.set_mode(meta.mode); + header.set_mode(meta.mode & !libc::S_IFMT); self.out .append_data(&mut header, dirpath, std::io::empty())?; Ok(()) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 69941b553..5fd8d31ac 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -266,7 +266,7 @@ impl Importer { Some(checksum), uid, gid, - mode, + libc::S_IFREG | mode, xattrs.as_ref(), &buf, cancellable, diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 005251759..9c68194a4 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -269,15 +269,7 @@ fn validate_tar_expected( let entry_path = entry.path().unwrap().to_string_lossy().into_owned(); if let Some(exp) = expected.remove(entry_path.as_str()) { assert_eq!(header.entry_type(), exp.etype, "{}", entry_path); - // FIXME: change the generation code to not inject the format bits into the mode, - // because tar doesn't need/use it. - // https://github.com/ostreedev/ostree-rs-ext/pull/217/files#r791942496 - assert_eq!( - header.mode().unwrap() & !libc::S_IFMT, - exp.mode, - "{}", - entry_path - ); + assert_eq!(header.mode().unwrap(), exp.mode, "{}", entry_path); } } @@ -303,7 +295,7 @@ fn test_tar_export_structure() -> Result<()> { let first = entries.next().unwrap()?; let firstpath = first.path()?; assert_eq!(firstpath.to_str().unwrap(), "./"); - assert_eq!(first.header().mode()?, libc::S_IFDIR | 0o755); + assert_eq!(first.header().mode()?, 0o755); let next = entries.next().unwrap().unwrap(); assert_eq!(next.path().unwrap().as_os_str(), "sysroot"); From 1757c0c745a0166be28380dd6676a159b17ba2ea Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 25 Jan 2022 17:58:30 -0500 Subject: [PATCH 271/775] build: Use released oci-spec See https://github.com/containers/oci-spec-rs/pull/94 --- lib/Cargo.toml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 6dfe452f8..70010697f 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -26,9 +26,7 @@ indicatif = "0.16.0" once_cell = "1.9" libc = "0.2.92" rustix = "0.31.3" -# oci-spec = "0.5.3" -# Until the next release -oci-spec = { git = "https://github.com/containers/oci-spec-rs" } +oci-spec = "0.5.4" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" From 5ebbcd384b4ee0798cf9b14e3c16100e01e372b1 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 25 Jan 2022 18:17:48 -0500 Subject: [PATCH 272/775] lib: Bump rustix and cap-std-ext cap-std-ext needs the latest rustix. --- lib/Cargo.toml | 3 +-- lib/src/globals.rs | 1 + lib/src/tar/write.rs | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 70010697f..742d6ab9e 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -16,7 +16,7 @@ async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" camino = "1.0.4" cjson = "0.1.1" -cap-std-ext = "0.1.0" +cap-std-ext = "0.23" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" futures-util = "0.3.13" @@ -25,7 +25,6 @@ hex = "0.4.3" indicatif = "0.16.0" once_cell = "1.9" libc = "0.2.92" -rustix = "0.31.3" oci-spec = "0.5.4" openat = "0.1.20" openat-ext = "0.2.0" diff --git a/lib/src/globals.rs b/lib/src/globals.rs index 228e2a680..e45df1bd7 100644 --- a/lib/src/globals.rs +++ b/lib/src/globals.rs @@ -1,6 +1,7 @@ //! Global functions. use super::Result; +use cap_std_ext::rustix; use once_cell::sync::OnceCell; use ostree::glib; use std::fs::File; diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index abf799610..eba87a40c 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -11,6 +11,7 @@ use crate::Result; use anyhow::{anyhow, Context}; use camino::{Utf8Component, Utf8Path, Utf8PathBuf}; use cap_std_ext::cmdext::CapStdExtCommandExt; +use cap_std_ext::rustix; use ostree::gio; use ostree::prelude::FileExt; use rustix::fd::FromFd; From 72afaafcadbcd37671a4ddf6ff8526cbc6783ee9 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 25 Jan 2022 18:22:16 -0500 Subject: [PATCH 273/775] Release 0.6.1 I believe this is all semver compat; we have a big change queued up for the layer split work which should come after this. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 742d6ab9e..846da4491 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.6.0" +version = "0.6.1" [dependencies] anyhow = "1.0" From 0581c26ff3cc1b93decaabd3e65a3cb308b4596c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 26 Jan 2022 10:07:29 -0500 Subject: [PATCH 274/775] Drop use of `FileExt` I stumbled across the fact that we no longer need https://github.com/coreos/openat-ext/commit/c377a54aed87d7831eb7241483e6b11d8123fecd because https://github.com/rust-lang/rust/pull/75272 landed just a few months after! While we're here, slightly clean up the fd dance to make things a bit safer using `BorrowedFd`. It's interesting to note here that with io-lifetimes we could add a method to the glib crate to borrow the underlying fd safely. --- lib/Cargo.toml | 1 + lib/src/ima.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 846da4491..53698972a 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -23,6 +23,7 @@ futures-util = "0.3.13" gvariant = "0.4.0" hex = "0.4.3" indicatif = "0.16.0" +io-lifetimes = "0.4" once_cell = "1.9" libc = "0.2.92" oci-spec = "0.5.4" diff --git a/lib/src/ima.rs b/lib/src/ima.rs index ba964a1e1..83aef9127 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -4,6 +4,7 @@ use crate::objgv::*; use anyhow::{Context, Result}; +use cap_std_ext::rustix::fd::BorrowedFd; use fn_error_context::context; use gio::glib; use gio::prelude::*; @@ -11,13 +12,13 @@ use glib::Cast; use glib::Variant; use gvariant::aligned_bytes::TryAsAligned; use gvariant::{gv, Marker, Structure}; -use openat_ext::FileExt; +use io_lifetimes::AsFilelike; use ostree::gio; use std::collections::{BTreeMap, HashMap}; use std::ffi::CString; use std::fs::File; +use std::ops::DerefMut; use std::os::unix::io::AsRawFd; -use std::os::unix::prelude::{FromRawFd, IntoRawFd}; use std::process::{Command, Stdio}; use std::rc::Rc; use std::{convert::TryInto, io::Seek}; @@ -122,10 +123,9 @@ impl<'a> CommitRewriter<'a> { // If we're operating on a bare repo, we can clone the file (copy_file_range) directly. if let Ok(instream) = instream.clone().downcast::() { // View the fd as a File - let instream_fd = unsafe { File::from_raw_fd(instream.as_raw_fd()) }; - instream_fd.copy_to(tempf.as_file_mut())?; - // Leak to avoid double close - let _ = instream_fd.into_raw_fd(); + let instream_fd = unsafe { BorrowedFd::borrow_raw_fd(instream.as_raw_fd()) }; + let instream_fd = &mut instream_fd.as_filelike_view::(); + std::io::copy(instream_fd.deref_mut(), tempf.as_file_mut())?; } else { // If we're operating on an archive repo, then we need to uncompress // and recompress... From 3e9086ac4f581a5c201d1c2145fcc3ce3cdf6391 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 26 Jan 2022 14:58:29 -0500 Subject: [PATCH 275/775] tar/export: Continue injecting mode bits in format version 0 This ensures compatibility with older clients, which we need to pass the rpm-ostree upgrade test. --- lib/src/tar/export.rs | 15 +++++++++++++-- lib/tests/it/main.rs | 35 +++++++++++++++++++++++++++++++---- 2 files changed, 44 insertions(+), 6 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 07469836d..829d43865 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -94,6 +94,17 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { } } + /// Convert the ostree mode to tar mode. + /// The ostree mode bits include the format, tar does not. + /// Historically in format version 0 we injected them, so we need to keep doing so. + fn filter_mode(&self, mode: u32) -> u32 { + if self.options.format_version == 0 { + mode + } else { + mode & !libc::S_IFMT + } + } + /// Add a directory entry with default permissions (root/root 0755) fn append_default_dir(&mut self, path: &Utf8Path) -> Result<()> { let mut h = tar::Header::new_gnu(); @@ -281,7 +292,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { h.set_uid(meta.attribute_uint32("unix::uid") as u64); h.set_gid(meta.attribute_uint32("unix::gid") as u64); let mode = meta.attribute_uint32("unix::mode"); - h.set_mode(mode & !libc::S_IFMT); + h.set_mode(self.filter_mode(mode)); let mut target_header = h.clone(); target_header.set_size(0); @@ -335,7 +346,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { header.set_size(0); header.set_uid(meta.uid as u64); header.set_gid(meta.gid as u64); - header.set_mode(meta.mode & !libc::S_IFMT); + header.set_mode(self.filter_mode(meta.mode)); self.out .append_data(&mut header, dirpath, std::io::empty())?; Ok(()) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 9c68194a4..fd0d5698b 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -256,6 +256,7 @@ impl Into for &(&'static str, tar::EntryType, u32) { } fn validate_tar_expected( + format_version: u32, t: tar::Entries, expected: impl IntoIterator, ) -> Result<()> { @@ -269,7 +270,25 @@ fn validate_tar_expected( let entry_path = entry.path().unwrap().to_string_lossy().into_owned(); if let Some(exp) = expected.remove(entry_path.as_str()) { assert_eq!(header.entry_type(), exp.etype, "{}", entry_path); - assert_eq!(header.mode().unwrap(), exp.mode, "{}", entry_path); + let is_old_object = format_version == 0; + let mut expected_mode = exp.mode; + if is_old_object && !entry_path.starts_with("sysroot/") { + let fmtbits = match header.entry_type() { + tar::EntryType::Regular => libc::S_IFREG, + tar::EntryType::Directory => libc::S_IFDIR, + tar::EntryType::Symlink => 0, + o => panic!("Unexpected entry type {:?}", o), + }; + expected_mode |= fmtbits; + } + assert_eq!( + header.mode().unwrap(), + expected_mode, + "fmtver: {} type: {:?} path: {}", + format_version, + header.entry_type(), + entry_path + ); } } @@ -295,7 +314,7 @@ fn test_tar_export_structure() -> Result<()> { let first = entries.next().unwrap()?; let firstpath = first.path()?; assert_eq!(firstpath.to_str().unwrap(), "./"); - assert_eq!(first.header().mode()?, 0o755); + assert_eq!(first.header().mode()?, libc::S_IFDIR | 0o755); let next = entries.next().unwrap().unwrap(); assert_eq!(next.path().unwrap().as_os_str(), "sysroot"); @@ -318,7 +337,11 @@ fn test_tar_export_structure() -> Result<()> { ("sysroot/ostree/repo/xattrs", Directory, 0o755), ("usr", Directory, 0o755), ]; - validate_tar_expected(entries, expected.iter().map(Into::into))?; + validate_tar_expected( + fixture.format_version, + entries, + expected.iter().map(Into::into), + )?; // Validate format version 1 fixture.format_version = 1; @@ -343,7 +366,11 @@ fn test_tar_export_structure() -> Result<()> { ("sysroot/ostree/repo/xattrs", Directory, 0o755), ("usr", Directory, 0o755), ]; - validate_tar_expected(src_tar.entries()?, expected.iter().map(Into::into))?; + validate_tar_expected( + fixture.format_version, + src_tar.entries()?, + expected.iter().map(Into::into), + )?; Ok(()) } From 41be29c372f9aafaeda04a453f91df1caf810b04 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 26 Jan 2022 17:09:33 -0500 Subject: [PATCH 276/775] lib/container: Quiet dead code warnings for `ocidir` We have this trick of compiling ourself with integration testing enabled, which uses a lot of the code here. See the `ostree-ext = { path = ".", features = ["internal-testing-api"] }` bit in Cargo.toml. But that isn't turned on for other crates that use this, and correctly gating all of it is a little tedious. So let's just use the big hammer for now to quiet the dead code warnings. --- lib/src/container/mod.rs | 9 +++++++++ lib/src/container/ocidir.rs | 4 +--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index db20ab4fd..713108f64 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -229,6 +229,15 @@ mod encapsulate; pub use encapsulate::*; mod unencapsulate; pub use unencapsulate::*; +// We have this trick of compiling ourself with integration testing +// enabled, which uses a lot of the code here. See the +// `ostree-ext = { path = ".", features = ["internal-testing-api"] }` +// bit in Cargo.toml. +// +// But that isn't turned on for other crates that use this, and correctly gating all +// of it is a little tedious. So let's just use the big hammer for now to +// quiet the dead code warnings. +#[allow(dead_code)] pub(crate) mod ocidir; mod skopeo; pub mod store; diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index b8a59f945..2e421aab0 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -133,7 +133,6 @@ impl OciDir { Self::open(dir) } - #[allow(dead_code)] /// Clone an OCI directory, using reflinks for blobs. pub(crate) fn clone_to(&self, destdir: &openat::Dir, p: impl AsRef) -> Result { let p = p.as_ref(); @@ -160,7 +159,6 @@ impl OciDir { RawLayerWriter::new(&self.dir, c) } - #[allow(dead_code)] /// Create a tar output stream, backed by a blob pub(crate) fn create_layer( &self, @@ -170,7 +168,7 @@ impl OciDir { } /// Add a layer to the top of the image stack. The firsh pushed layer becomes the root. - #[allow(dead_code)] + pub(crate) fn push_layer( &self, manifest: &mut oci_image::ImageManifest, From 67296c773139f5a90cc4c869797b2bc84803adb7 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 27 Jan 2022 08:51:06 -0500 Subject: [PATCH 277/775] Release 0.6.2 Fixes a backwards incompatible change in the (now yanked) 0.6.1. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 53698972a..1bccc21d0 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.6.1" +version = "0.6.2" [dependencies] anyhow = "1.0" From 985d26405abb68257d818b7af435c1c98c759fcb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 31 Jan 2022 21:02:30 -0500 Subject: [PATCH 278/775] ci: Switch to more official FCOS image This one is updated automatically. --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index dcb0e0b0f..c0df72b6a 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -84,7 +84,7 @@ jobs: name: "Integration" needs: build runs-on: ubuntu-latest - container: quay.io/cgwalters/fcos + container: quay.io/coreos-assembler/fcos:testing-devel steps: - name: Checkout repository uses: actions/checkout@v2 From 750905272073adf4a44bc8ae02eeec36604d636a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 1 Feb 2022 10:05:30 -0500 Subject: [PATCH 279/775] lib/tar: Simplify import code, minor comment tweaks I was just reading this and noticed we can simplify it. --- lib/src/tar/import.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 5fd8d31ac..2e76f7c0b 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -588,7 +588,8 @@ pub struct TarImportOptions { pub remote: Option, } -/// Read the contents of a tarball and import the ostree commit inside. The sha56 of the imported commit will be returned. +/// Read the contents of a tarball and import the ostree commit inside. +/// Returns the sha256 of the imported commit. #[instrument(skip(repo, src))] pub async fn import_tar( repo: &ostree::Repo, @@ -598,7 +599,8 @@ pub async fn import_tar( let options = options.unwrap_or_default(); let src = tokio_util::io::SyncIoBridge::new(src); let repo = repo.clone(); - let import = crate::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| { + // The tar code we use today is blocking, so we spawn a thread. + crate::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| { let mut archive = tar::Archive::new(src); let txn = repo.auto_transaction(Some(cancellable))?; let importer = Importer::new(&repo, options.remote); @@ -606,9 +608,8 @@ pub async fn import_tar( txn.commit(Some(cancellable))?; repo.mark_commit_partial(&checksum, false)?; Ok::<_, anyhow::Error>(checksum) - }); - let import: String = import.await?; - Ok(import) + }) + .await } #[cfg(test)] From e597381a311c080c4f051d1ce987ad9572896a61 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 1 Feb 2022 12:05:13 -0500 Subject: [PATCH 280/775] Bump cap-std to 0.24 This picks up various fixes, but most critically a new rustix with https://github.com/bytecodealliance/rustix/pull/186 --- lib/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 1bccc21d0..13ea2fcbd 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -16,14 +16,14 @@ async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" camino = "1.0.4" cjson = "0.1.1" -cap-std-ext = "0.23" +cap-std-ext = "0.24" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" futures-util = "0.3.13" gvariant = "0.4.0" hex = "0.4.3" indicatif = "0.16.0" -io-lifetimes = "0.4" +io-lifetimes = "0.5" once_cell = "1.9" libc = "0.2.92" oci-spec = "0.5.4" From 43e2bcf05e3821249f72f9e770c777b226b403af Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 2 Feb 2022 11:19:04 -0500 Subject: [PATCH 281/775] ocidir: Use `chrono`, not `glib` for time formatting Sadly RHEL8's glib is too old for this. Switch to using chrono. --- lib/Cargo.toml | 1 + lib/src/container/ocidir.rs | 13 ++----------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 13ea2fcbd..5f4848518 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -15,6 +15,7 @@ containers-image-proxy = "0.4.0" async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" camino = "1.0.4" +chrono = "0.4.19" cjson = "0.1.1" cap-std-ext = "0.24" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index 2e421aab0..71eefc4f9 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -203,18 +203,9 @@ impl OciDir { format!("sha256:{}", layer.uncompressed_sha256), )); config.set_rootfs(rootfs); - // There is e.g. https://docs.rs/chrono/latest/chrono/struct.DateTime.html#method.to_rfc3339_opts - // and chrono is already in our dependency chain, just indirectly because of tracing-subscriber. - // glib actually also has https://docs.rs/glib/latest/glib/struct.DateTime.html#method.format_iso8601 - // but that requires a newer glib. - // Since glib is going to be required by ostree for the forseeable future, for now - // let's use that instead of adding chrono. - let now = ostree::glib::DateTime::new_now_utc() - .unwrap() - .format("%Y-%m-%dT%H:%M:%S.%fZ") - .unwrap(); + let now = chrono::offset::Utc::now(); let h = oci_image::HistoryBuilder::default() - .created(now) + .created(now.to_rfc3339_opts(chrono::SecondsFormat::Secs, true)) .created_by(description.to_string()) .build() .unwrap(); From 5d3a5940b18b9853a4dc698f7b2edc995f3e95b2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 2 Feb 2022 16:06:26 -0500 Subject: [PATCH 282/775] Release 0.6.3 Nothing critical, just the cap-std bump and the chrono fix; I particularly want the latter for the next rpm-ostree. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 5f4848518..e8978a2e9 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.6.2" +version = "0.6.3" [dependencies] anyhow = "1.0" From 409d937a1a8737e0a8f345b811d862dfe03cb88b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 2 Feb 2022 19:53:02 -0500 Subject: [PATCH 283/775] ocidir: More direct use of direct `_mut()` methods Followup to https://github.com/ostreedev/ostree-rs-ext/pull/206/commits/e0c4c849d9901e0de03744a6200117424f1b7158 This is notably more efficient than deep cloning all the elements. Just noticed while reading the code for an unrelated reason. --- lib/src/container/ocidir.rs | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index 71eefc4f9..34df415dd 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -97,12 +97,6 @@ fn parse_one_filename(s: &str) -> Result<&str> { .ok_or_else(|| anyhow!("Invalid filename {}", s)) } -// Sadly the builder bits in the OCI spec don't offer mutable access to fields -// https://github.com/containers/oci-spec-rs/issues/86 -fn vec_clone_append(s: &[T], i: T) -> Vec { - s.iter().cloned().chain(std::iter::once(i)).collect() -} - /// Create a dummy config descriptor. /// Our API right now always mutates a manifest, which means we need /// a "valid" manifest, which requires a "valid" config descriptor. @@ -196,12 +190,11 @@ impl OciDir { builder = builder.annotations(annotations); } let blobdesc = builder.build().unwrap(); - manifest.set_layers(vec_clone_append(manifest.layers(), blobdesc)); + manifest.layers_mut().push(blobdesc); let mut rootfs = config.rootfs().clone(); - rootfs.set_diff_ids(vec_clone_append( - rootfs.diff_ids(), - format!("sha256:{}", layer.uncompressed_sha256), - )); + rootfs + .diff_ids_mut() + .push(format!("sha256:{}", layer.uncompressed_sha256)); config.set_rootfs(rootfs); let now = chrono::offset::Utc::now(); let h = oci_image::HistoryBuilder::default() @@ -209,7 +202,7 @@ impl OciDir { .created_by(description.to_string()) .build() .unwrap(); - config.set_history(vec_clone_append(config.history(), h)); + config.history_mut().push(h); } /// Read a JSON blob. From 4eb9203e1987f9cd8ab3754d8c40bc070bb643ff Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 3 Feb 2022 11:03:07 -0500 Subject: [PATCH 284/775] container/encapsulate: Support copying commit metadata keys For https://github.com/coreos/coreos-assembler/issues/2685 we want to copy e.g. `rpmostree.input-hash` into the container image. Extend the `ExportOpts` struct to support this, and also expose it via the CLI, e.g. `ostree container encapsulate --copymeta=rpmostree.input-hash ...`. And while I was thinking about this...we should by default copy some core ostree keys, such as `ostree.bootable` and `ostree.linux` since they are key pieces of metadata. --- lib/src/cli.rs | 15 +++++++++--- lib/src/container/encapsulate.rs | 39 +++++++++++++++++++++++++++++--- lib/tests/it/main.rs | 13 ++++++++--- 3 files changed, 58 insertions(+), 9 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index dbb24a3d4..596e708f2 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -110,6 +110,10 @@ enum ContainerOpts { #[structopt(name = "label", long, short)] labels: Vec, + /// Propagate an OSTree commit metadata key to container label + #[structopt(name = "copymeta", long)] + copy_meta_keys: Vec, + /// Corresponds to the Dockerfile `CMD` instruction. #[structopt(long)] cmd: Option>, @@ -365,6 +369,7 @@ async fn container_export( rev: &str, imgref: &ImageReference, labels: BTreeMap, + copy_meta_keys: Vec, cmd: Option>, ) -> Result<()> { let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; @@ -372,8 +377,11 @@ async fn container_export( labels: Some(labels), cmd, }; - let opts = Some(Default::default()); - let pushed = crate::container::encapsulate(repo, rev, &config, opts, imgref).await?; + let opts = crate::container::ExportOpts { + copy_meta_keys, + ..Default::default() + }; + let pushed = crate::container::encapsulate(repo, rev, &config, Some(opts), imgref).await?; println!("{}", pushed); Ok(()) } @@ -492,6 +500,7 @@ where rev, imgref, labels, + copy_meta_keys, cmd, } => { let labels: Result> = labels @@ -503,7 +512,7 @@ where Ok((k.to_string(), v.to_string())) }) .collect(); - container_export(&repo, &rev, &imgref, labels?, cmd).await + container_export(&repo, &rev, &imgref, labels?, copy_meta_keys, cmd).await } ContainerOpts::Image(opts) => match opts { ContainerImageOpts::List { repo } => { diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 33728aa7d..e4055f3c4 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -5,8 +5,7 @@ use super::{ocidir, OstreeImageReference, Transport}; use super::{ImageReference, SignatureSource, OSTREE_COMMIT_LABEL}; use crate::container::skopeo; use crate::tar as ostree_tar; -use anyhow::Context; -use anyhow::Result; +use anyhow::{anyhow, Context, Result}; use fn_error_context::context; use gio::glib; use oci_spec::image as oci_image; @@ -22,7 +21,6 @@ use tracing::{instrument, Level}; /// schema, it's not actually useful today. But, we keep it /// out of principle. const BLOB_OSTREE_ANNOTATION: &str = "ostree.encapsulated"; - /// Configuration for the generated container. #[derive(Debug, Default)] pub struct Config { @@ -46,6 +44,32 @@ fn export_ostree_ref( w.complete() } +fn commit_meta_to_labels<'a>( + meta: &glib::VariantDict, + keys: impl IntoIterator, + labels: &mut HashMap, +) -> Result<()> { + for k in keys { + let v = meta + .lookup::(k) + .context("Expected string for commit metadata value")? + .ok_or_else(|| anyhow!("Could not find commit metadata key: {}", k))?; + labels.insert(k.to_string(), v); + } + // Copy standard metadata keys `ostree.bootable` and `ostree.linux`. + // Bootable is an odd one out in being a boolean. + if let Some(v) = meta.lookup::(*ostree::METADATA_KEY_BOOTABLE)? { + labels.insert(ostree::METADATA_KEY_BOOTABLE.to_string(), v.to_string()); + } + // Handle any other string-typed values here. + for k in &[&ostree::METADATA_KEY_LINUX] { + if let Some(v) = meta.lookup::(k)? { + labels.insert(k.to_string(), v); + } + } + Ok(()) +} + /// Generate an OCI image from a given ostree root #[context("Building oci")] fn build_oci( @@ -76,6 +100,13 @@ fn build_oci( let mut ctrcfg = oci_image::Config::default(); let mut imgcfg = oci_image::ImageConfiguration::default(); let labels = ctrcfg.labels_mut().get_or_insert_with(Default::default); + + commit_meta_to_labels( + &commit_meta, + opts.copy_meta_keys.iter().map(|k| k.as_str()), + labels, + )?; + let mut manifest = ocidir::new_empty_manifest().build().unwrap(); if let Some(version) = @@ -198,6 +229,8 @@ async fn build_impl( pub struct ExportOpts { /// If true, perform gzip compression of the tar layers. pub compress: bool, + /// A set of commit metadata keys to copy as image labels. + pub copy_meta_keys: Vec, } /// Given an OSTree repository and ref, generate a container image. diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index fd0d5698b..d81a25eca 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -54,8 +54,8 @@ fn generate_test_repo(dir: &Utf8Path) -> Result { indoc! {" cd {dir} ostree --repo=repo init --mode=archive - ostree --repo=repo commit -b {testref} --bootable --no-bindings --add-metadata-string=version=42.0 --gpg-homedir={gpghome} --gpg-sign={keyid} \ - --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst >/dev/null + ostree --repo=repo commit -b {testref} --bootable --no-bindings --add-metadata-string=version=42.0 --add-metadata-string=buildsys.checksum=41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3 --gpg-homedir={gpghome} --gpg-sign={keyid} \ + --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst >/dev/null ostree --repo=repo show {testref} >/dev/null "}, testref = TESTREF, @@ -464,11 +464,15 @@ async fn test_container_import_export() -> Result<()> { ), cmd: Some(vec!["/bin/bash".to_string()]), }; + let opts = ostree_ext::container::ExportOpts { + copy_meta_keys: vec!["buildsys.checksum".to_string()], + ..Default::default() + }; let digest = ostree_ext::container::encapsulate( &fixture.srcrepo, TESTREF, &config, - None, + Some(opts), &srcoci_imgref, ) .await @@ -479,6 +483,9 @@ async fn test_container_import_export() -> Result<()> { assert!(inspect.contains(r#""version": "42.0""#)); assert!(inspect.contains(r#""foo": "bar""#)); assert!(inspect.contains(r#""test": "value""#)); + assert!(inspect.contains( + r#""buildsys.checksum": "41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3""# + )); let srcoci_unverified = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, From db79b435763365bcbc5abfabf98c071d34511ca2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 3 Feb 2022 20:01:50 -0500 Subject: [PATCH 285/775] lib: Bump ostree crate, enable `cap-std-apis` This way we can start making use of cap-std more. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index e8978a2e9..35b310c51 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -31,7 +31,7 @@ oci-spec = "0.5.4" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" -ostree = { features = ["v2021_5"], version = "0.13.4" } +ostree = { features = ["v2021_5", "cap-std-apis"], version = "0.13.5" } pin-project = "1.0" serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" From 04ecf823b7ea30a6d9b5a9bd3bd3f34b7ce843c8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 3 Feb 2022 20:10:35 -0500 Subject: [PATCH 286/775] container/encapsulate: Honor `ostree.container-cmd` Builds on https://github.com/ostreedev/ostree-rs/pull/47/commits/6d3a69cc43214a77085e98f062f9f624c4ea25db Part of https://github.com/coreos/coreos-assembler/issues/2685 --- lib/src/container/encapsulate.rs | 6 +++++- lib/tests/it/main.rs | 27 +++++++++++++++++++++++++-- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index e4055f3c4..eb816b792 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -121,7 +121,11 @@ fn build_oci( for (k, v) in config.labels.iter().map(|k| k.iter()).flatten() { labels.insert(k.into(), v.into()); } - if let Some(cmd) = config.cmd.as_ref() { + // Lookup the cmd embedded in commit metadata + let cmd = commit_meta.lookup::>(ostree::COMMIT_META_CONTAINER_CMD)?; + // But support it being overridden by CLI options + let cmd = config.cmd.as_ref().or_else(|| cmd.as_ref()); + if let Some(cmd) = cmd { ctrcfg.set_cmd(Some(cmd.clone())); } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index d81a25eca..8dfd6db2a 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -54,7 +54,7 @@ fn generate_test_repo(dir: &Utf8Path) -> Result { indoc! {" cd {dir} ostree --repo=repo init --mode=archive - ostree --repo=repo commit -b {testref} --bootable --no-bindings --add-metadata-string=version=42.0 --add-metadata-string=buildsys.checksum=41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3 --gpg-homedir={gpghome} --gpg-sign={keyid} \ + ostree --repo=repo commit -b {testref} --bootable --no-bindings --add-metadata=ostree.container-cmd='[\"/usr/bin/bash\"]' --add-metadata-string=version=42.0 --add-metadata-string=buildsys.checksum=41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3 --gpg-homedir={gpghome} --gpg-sign={keyid} \ --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst >/dev/null ostree --repo=repo show {testref} >/dev/null "}, @@ -442,6 +442,14 @@ fn skopeo_inspect(imgref: &str) -> Result { Ok(String::from_utf8(out.stdout)?) } +fn skopeo_inspect_config(imgref: &str) -> Result { + let out = Command::new("skopeo") + .args(&["inspect", "--config", imgref]) + .stdout(std::process::Stdio::piped()) + .output()?; + Ok(serde_json::from_slice(&out.stdout)?) +} + #[tokio::test] async fn test_container_import_export() -> Result<()> { let fixture = Fixture::new()?; @@ -462,7 +470,7 @@ async fn test_container_import_export() -> Result<()> { .map(|(k, v)| (k.to_string(), v.to_string())) .collect(), ), - cmd: Some(vec!["/bin/bash".to_string()]), + ..Default::default() }; let opts = ostree_ext::container::ExportOpts { copy_meta_keys: vec!["buildsys.checksum".to_string()], @@ -486,6 +494,21 @@ async fn test_container_import_export() -> Result<()> { assert!(inspect.contains( r#""buildsys.checksum": "41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3""# )); + let cfg = skopeo_inspect_config(&srcoci_imgref.to_string())?; + // unwrap. Unwrap. UnWrap. UNWRAP!!!!!!! + assert_eq!( + cfg.config() + .as_ref() + .unwrap() + .cmd() + .as_ref() + .unwrap() + .get(0) + .as_ref() + .unwrap() + .as_str(), + "/usr/bin/bash" + ); let srcoci_unverified = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, From a54e157cf69269c8caabbe1f18b9730240c93092 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 4 Feb 2022 15:04:19 -0500 Subject: [PATCH 287/775] Hard depend on `skopeo copy --digestfile` This code predated our dependency on `experimental-image-proxy`, so we can now safely carry a hard dependency on a new enough skopeo and drop the pile of hacks here. --- lib/src/container/encapsulate.rs | 16 ++++------------ lib/src/container/skopeo.rs | 27 --------------------------- 2 files changed, 4 insertions(+), 39 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index eb816b792..d6c6a3e3a 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -188,21 +188,15 @@ async fn build_impl( let tempdir = tempfile::tempdir_in("/var/tmp")?; let tempdest = tempdir.path().join("d"); let tempdest = tempdest.to_str().unwrap(); - let digestfile = if skopeo::skopeo_has_features(skopeo::SkopeoFeatures::COPY_DIGESTFILE)? { - Some(tempdir.path().join("digestfile")) - } else { - None - }; + let digestfile = tempdir.path().join("digestfile"); let src = build_oci(repo, ostree_ref, Path::new(tempdest), config, opts)?; let mut cmd = skopeo::new_cmd(); tracing::event!(Level::DEBUG, "Copying {} to {}", src, dest); cmd.stdout(std::process::Stdio::null()).arg("copy"); - if let Some(ref digestfile) = digestfile { - cmd.arg("--digestfile"); - cmd.arg(digestfile); - } + cmd.arg("--digestfile"); + cmd.arg(&digestfile); cmd.args(&[src.to_string(), dest.to_string()]); let proc = super::skopeo::spawn(cmd)?; let output = proc.wait_with_output().await?; @@ -210,9 +204,7 @@ async fn build_impl( let stderr = String::from_utf8_lossy(&output.stderr); return Err(anyhow::anyhow!("skopeo failed: {}\n", stderr)); } - digestfile - .map(|p| -> Result { Ok(std::fs::read_to_string(p)?.trim().to_string()) }) - .transpose()? + Some(std::fs::read_to_string(digestfile)?.trim().to_string()) }; if let Some(digest) = digest { Ok(digest) diff --git a/lib/src/container/skopeo.rs b/lib/src/container/skopeo.rs index 9f6718878..3aa02703b 100644 --- a/lib/src/container/skopeo.rs +++ b/lib/src/container/skopeo.rs @@ -1,7 +1,6 @@ //! Fork skopeo as a subprocess use anyhow::{Context, Result}; -use once_cell::sync::Lazy; use serde::Deserialize; use std::process::Stdio; use tokio::process::Command; @@ -13,32 +12,6 @@ use tokio::process::Command; const POLICY_PATH: &str = "/etc/containers/policy.json"; const INSECURE_ACCEPT_ANYTHING: &str = "insecureAcceptAnything"; -bitflags::bitflags! { - pub(crate) struct SkopeoFeatures: u32 { - const COPY_DIGESTFILE = 0b00000001; - } -} - -static SKOPEO_FEATURES: Lazy> = Lazy::new(|| { - let mut features = SkopeoFeatures::empty(); - let c = std::process::Command::new("skopeo") - .args(&["copy", "--help"]) - .stderr(std::process::Stdio::piped()) - .output()?; - let stdout = String::from_utf8_lossy(&c.stderr); - if stdout.contains("--digestfile") { - features.insert(SkopeoFeatures::COPY_DIGESTFILE); - } - Ok(features) -}); - -pub(crate) fn skopeo_has_features(wanted: SkopeoFeatures) -> Result { - match &*SKOPEO_FEATURES { - Ok(found) => Ok(found.intersects(wanted)), - Err(e) => Err(anyhow::Error::msg(e)), - } -} - #[derive(Deserialize)] struct PolicyEntry { #[serde(rename = "type")] From 92a3800b7d541dbfd7f24a5d9de1c9e40c53e1c3 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Thu, 10 Feb 2022 08:27:01 +0000 Subject: [PATCH 288/775] ci: bump linting toolchain to latest stable (1.58.1) --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index c0df72b6a..dd6dec573 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -17,7 +17,7 @@ env: # Minimum supported Rust version (MSRV) ACTION_MSRV_TOOLCHAIN: 1.54.0 # Pinned toolchain for linting - ACTION_LINTS_TOOLCHAIN: 1.56.0 + ACTION_LINTS_TOOLCHAIN: 1.58.1 jobs: build: From 65d986d495fa40207fec4247cb5ab037a9aab786 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 10 Feb 2022 12:57:04 -0500 Subject: [PATCH 289/775] cli: Drop use of `select` I saw https://blog.yoshuawuyts.com/futures-concurrency-3/ go by and decided to audit our use of `select!` - I am definitely not an expert but my understanding is the CLI use case is buggy, but the other one is not. Rewrite the CLI case to operate on an explicit stream of values. --- lib/Cargo.toml | 1 + lib/src/cli.rs | 44 ++++++++++++++++++++++++++++--------------- lib/src/tokio_util.rs | 2 ++ 3 files changed, 32 insertions(+), 15 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 35b310c51..f3aa52f4c 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -40,6 +40,7 @@ tar = "0.4.38" tempfile = "3.2.0" tokio = { features = ["full"], version = "1" } tokio-util = { features = ["io-util"], version = "0.6.9" } +tokio-stream = { features = ["sync"], version = "0.1.8" } tracing = "0.1" [dev-dependencies] diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 596e708f2..77e5d0961 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -6,16 +6,19 @@ //! such as `rpm-ostree` can directly reuse it. use anyhow::Result; +use futures_util::FutureExt; use ostree::{gio, glib}; +use std::borrow::Borrow; use std::collections::BTreeMap; use std::convert::TryFrom; use std::ffi::OsString; use std::path::PathBuf; use structopt::StructOpt; +use tokio_stream::StreamExt; use crate::commit::container_commit; -use crate::container as ostree_container; use crate::container::store::{LayeredImageImporter, PrepareResult}; +use crate::container::{self as ostree_container, UnencapsulationProgress}; use crate::container::{Config, ImageReference, OstreeImageReference, UnencapsulateOptions}; fn parse_imgref(s: &str) -> Result { @@ -300,6 +303,11 @@ fn tar_export(opts: &ExportOpts) -> Result<()> { Ok(()) } +enum ProgressOrFinish { + Progress(UnencapsulationProgress), + Finished(Result), +} + /// Import a container image with an encapsulated ostree commit. async fn container_import( repo: &str, @@ -324,26 +332,32 @@ async fn container_import( let opts = UnencapsulateOptions { progress: Some(tx_progress), }; - let import = crate::container::unencapsulate(repo, imgref, Some(opts)); - tokio::pin!(import); - tokio::pin!(rx_progress); - let import = loop { - tokio::select! { - _ = rx_progress.changed() => { - let n = rx_progress.borrow().processed_bytes; + let rx_progress_stream = + tokio_stream::wrappers::WatchStream::new(rx_progress).map(ProgressOrFinish::Progress); + let import = crate::container::unencapsulate(repo, imgref, Some(opts)) + .into_stream() + .map(ProgressOrFinish::Finished); + let stream = rx_progress_stream.merge(import); + tokio::pin!(stream); + let mut import_result = None; + while let Some(value) = stream.next().await { + match value { + ProgressOrFinish::Progress(progress) => { + let n = progress.borrow().processed_bytes; if let Some(pb) = pb.as_ref() { pb.set_message(format!("Processed: {}", indicatif::HumanBytes(n))); } } - import = &mut import => { - if let Some(pb) = pb.as_ref() { - pb.finish(); - } - break import?; + ProgressOrFinish::Finished(import) => { + import_result = Some(import?); } } - }; - + } + if let Some(pb) = pb.as_ref() { + pb.finish(); + } + // It must have been set + let import = import_result.unwrap(); if let Some(write_ref) = write_ref { repo.set_ref_immediate( None, diff --git a/lib/src/tokio_util.rs b/lib/src/tokio_util.rs index 443901ff8..b11a158d5 100644 --- a/lib/src/tokio_util.rs +++ b/lib/src/tokio_util.rs @@ -16,6 +16,8 @@ where let notify2 = notify.clone(); cancellable.connect_cancelled(move |_| notify2.notify_one()); cancellable.set_error_if_cancelled()?; + // See https://blog.yoshuawuyts.com/futures-concurrency-3/ on why + // `select!` is a trap in general, but I believe this case is safe. tokio::select! { r = f => r, _ = notify.notified() => { From c6f33536650fc672d9bcdfcde00e5f63b88725a0 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Fri, 11 Feb 2022 14:09:06 +0000 Subject: [PATCH 290/775] tar/v1: introduce object types for split xattrs This reworks the v1 tar format (and the 'bare-split-xattrs' mode too), moving xattrs content under the usual sharded object store. In particular, this adds two new object types. `.file-xattrs` are regular files storing xattrs content, encoded as GVariant. Each object is keyed by the checksum of its content, allowing for multiple references. `.file-xattrs-link` are hardlinks which are associated to `.file` objects. Each object is keyed by the same checksum of the corresponding file object. The target of the hardlink is an existing `file-xattrs` object. In case of reaching the limit of too many links, this object could be a regular file too. --- lib/src/tar/export.rs | 236 +++++++++++++++++++-------- lib/src/tar/import.rs | 369 ++++++++++++++++++++++++++++++------------ lib/src/tar/mod.rs | 25 ++- lib/tests/it/main.rs | 1 - 4 files changed, 456 insertions(+), 175 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 829d43865..f672060ae 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -1,8 +1,7 @@ //! APIs for creating container images from OSTree commits use crate::objgv::*; -use anyhow::Context; -use anyhow::Result; +use anyhow::{anyhow, bail, ensure, Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use gio::glib; @@ -65,17 +64,41 @@ fn object_path(objtype: ostree::ObjectType, checksum: &str) -> Utf8PathBuf { format!("{}/repo/objects/{}/{}.{}", OSTREEDIR, first, rest, suffix).into() } -fn xattrs_path(checksum: &str) -> Utf8PathBuf { +fn v0_xattrs_path(checksum: &str) -> Utf8PathBuf { format!("{}/repo/xattrs/{}", OSTREEDIR, checksum).into() } +fn v0_xattrs_object_path(checksum: &str) -> Utf8PathBuf { + let (first, rest) = checksum.split_at(2); + format!("{}/repo/objects/{}/{}.file.xattrs", OSTREEDIR, first, rest).into() +} + +fn v1_xattrs_object_path(checksum: &str) -> Utf8PathBuf { + let (first, rest) = checksum.split_at(2); + format!("{}/repo/objects/{}/{}.file-xattrs", OSTREEDIR, first, rest).into() +} + +fn v1_xattrs_link_object_path(checksum: &str) -> Utf8PathBuf { + let (first, rest) = checksum.split_at(2); + format!( + "{}/repo/objects/{}/{}.file-xattrs-link", + OSTREEDIR, first, rest + ) + .into() +} + +fn v1_xattrs_link_target(checksum: &str) -> Utf8PathBuf { + let (first, rest) = checksum.split_at(2); + format!("../{}/{}.file-xattrs", first, rest).into() +} + /// Check for "denormal" symlinks which contain "//" -/// See https://github.com/fedora-sysv/chkconfig/pull/67 -/// [root@cosa-devsh ~]# rpm -qf /usr/lib/systemd/systemd-sysv-install -/// chkconfig-1.13-2.el8.x86_64 -/// [root@cosa-devsh ~]# ll /usr/lib/systemd/systemd-sysv-install -/// lrwxrwxrwx. 2 root root 24 Nov 29 18:08 /usr/lib/systemd/systemd-sysv-install -> ../../..//sbin/chkconfig -/// [root@cosa-devsh ~]# +// See https://github.com/fedora-sysv/chkconfig/pull/67 +// [root@cosa-devsh ~]# rpm -qf /usr/lib/systemd/systemd-sysv-install +// chkconfig-1.13-2.el8.x86_64 +// [root@cosa-devsh ~]# ll /usr/lib/systemd/systemd-sysv-install +// lrwxrwxrwx. 2 root root 24 Nov 29 18:08 /usr/lib/systemd/systemd-sysv-install -> ../../..//sbin/chkconfig +// [root@cosa-devsh ~]# fn symlink_is_denormal(target: &str) -> bool { target.contains("//") } @@ -117,6 +140,31 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { Ok(()) } + /// Add a regular file entry with default permissions (root/root 0644) + fn append_default_data(&mut self, path: &Utf8Path, data: &[u8]) -> Result<()> { + let mut h = tar::Header::new_gnu(); + h.set_entry_type(tar::EntryType::Regular); + h.set_uid(0); + h.set_gid(0); + h.set_mode(0o644); + h.set_size(data.len() as u64); + self.out.append_data(&mut h, &path, data)?; + Ok(()) + } + + /// Add an hardlink entry with default permissions (root/root 0644) + fn append_default_hardlink(&mut self, path: &Utf8Path, link_target: &Utf8Path) -> Result<()> { + let mut h = tar::Header::new_gnu(); + h.set_entry_type(tar::EntryType::Link); + h.set_uid(0); + h.set_gid(0); + h.set_mode(0o644); + h.set_size(0); + h.set_link_name(&link_target)?; + self.out.append_data(&mut h, &path, &mut std::io::empty())?; + Ok(()) + } + /// Write the initial /sysroot/ostree/repo structure. fn write_repo_structure(&mut self) -> Result<()> { if self.wrote_initdirs { @@ -153,22 +201,21 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { self.append_default_dir(&path)?; } - // The special `repo/xattrs` directory used only in our tar serialization. - let path: Utf8PathBuf = format!("{}/repo/xattrs", OSTREEDIR).into(); - self.append_default_dir(&path)?; - let mut h = tar::Header::new_gnu(); - h.set_entry_type(tar::EntryType::Regular); - h.set_uid(0); - h.set_gid(0); - h.set_mode(0o644); - h.set_size(REPO_CONFIG.as_bytes().len() as u64); - let path = match self.options.format_version { - 0 => format!("{}/config", SYSROOT), - 1 => format!("{}/repo/config", OSTREEDIR), - n => anyhow::bail!("Unsupported ostree tar format version {}", n), - }; - self.out - .append_data(&mut h, path, std::io::Cursor::new(REPO_CONFIG))?; + // The special `repo/xattrs` directory used in v0 format. + if self.options.format_version == 0 { + let path: Utf8PathBuf = format!("{}/repo/xattrs", OSTREEDIR).into(); + self.append_default_dir(&path)?; + } + + // Repository configuration file. + { + let path = match self.options.format_version { + 0 => format!("{}/config", SYSROOT), + 1 => format!("{}/repo/config", OSTREEDIR), + n => anyhow::bail!("Unsupported ostree tar format version {}", n), + }; + self.append_default_data(Utf8Path::new(&path), REPO_CONFIG.as_bytes())?; + } self.wrote_initdirs = true; Ok(()) @@ -237,56 +284,72 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { debug_assert!(inserted); } - let mut h = tar::Header::new_gnu(); - h.set_uid(0); - h.set_gid(0); - h.set_mode(0o644); let data = v.data_as_bytes(); let data = data.as_ref(); - h.set_size(data.len() as u64); - self.out - .append_data(&mut h, &object_path(objtype, checksum), data) + self.append_default_data(&object_path(objtype, checksum), data) .with_context(|| format!("Writing object {}", checksum))?; Ok(()) } + /// Export xattrs to the tar stream, return whether content was written. #[context("Writing xattrs")] - fn append_xattrs( - &mut self, - xattrs: &glib::Variant, - ) -> Result> { + fn append_xattrs(&mut self, checksum: &str, xattrs: &glib::Variant) -> Result { let xattrs_data = xattrs.data_as_bytes(); let xattrs_data = xattrs_data.as_ref(); if xattrs_data.is_empty() { - return Ok(None); + return Ok(false); } - let mut h = tar::Header::new_gnu(); - h.set_mode(0o644); - h.set_size(0); - let digest = openssl::hash::hash(openssl::hash::MessageDigest::sha256(), xattrs_data)?; - let checksum = &hex::encode(digest); - let path = xattrs_path(checksum); + let xattrs_checksum = { + let digest = openssl::hash::hash(openssl::hash::MessageDigest::sha256(), xattrs_data)?; + &hex::encode(digest) + }; - if !self.wrote_xattrs.contains(checksum) { - let inserted = self.wrote_xattrs.insert(checksum.to_string()); - debug_assert!(inserted); - let mut target_header = h.clone(); - target_header.set_size(xattrs_data.len() as u64); - self.out - .append_data(&mut target_header, &path, xattrs_data)?; + if self.options.format_version == 0 { + let path = v0_xattrs_path(xattrs_checksum); + + // Write xattrs content into a separate directory. + if !self.wrote_xattrs.contains(xattrs_checksum) { + let inserted = self.wrote_xattrs.insert(checksum.to_string()); + debug_assert!(inserted); + self.append_default_data(&path, xattrs_data)?; + } + // Hardlink the object in the repo. + { + let objpath = v0_xattrs_object_path(checksum); + self.append_default_hardlink(&objpath, &path)?; + } + } else if self.options.format_version == 1 { + // Write xattrs content into a separate `.file-xattrs` object. + if !self.wrote_xattrs.contains(xattrs_checksum) { + let inserted = self.wrote_xattrs.insert(checksum.to_string()); + debug_assert!(inserted); + + let objpath = v1_xattrs_object_path(xattrs_checksum); + self.append_default_data(&objpath, xattrs_data)?; + } + // Write a `.file-xattrs-link` which links the file object to + // the corresponding detached xattrs. + { + let objpath = v1_xattrs_link_object_path(checksum); + let target_path = v1_xattrs_link_target(xattrs_checksum); + self.append_default_hardlink(&objpath, &target_path)?; + } + } else { + bail!("Unknown format version '{}'", self.options.format_version); } - Ok(Some((path, h))) + + Ok(true) } /// Write a content object, returning the path/header that should be used - /// as a hard link to it in the target path. This matches how ostree checkouts work. + /// as a hard link to it in the target path. This matches how ostree checkouts work. fn append_content(&mut self, checksum: &str) -> Result<(Utf8PathBuf, tar::Header)> { let path = object_path(ostree::ObjectType::File, checksum); let (instream, meta, xattrs) = self.repo.load_file(checksum, gio::NONE_CANCELLABLE)?; - let meta = meta.unwrap(); - let xattrs = xattrs.unwrap(); + let meta = meta.ok_or_else(|| anyhow!("Missing metadata for object {}", checksum))?; + let xattrs = xattrs.ok_or_else(|| anyhow!("Missing xattrs for object {}", checksum))?; let mut h = tar::Header::new_gnu(); h.set_uid(meta.attribute_uint32("unix::uid") as u64); @@ -300,15 +363,14 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let inserted = self.wrote_content.insert(checksum.to_string()); debug_assert!(inserted); - if let Some((xattrspath, mut xattrsheader)) = self.append_xattrs(&xattrs)? { - xattrsheader.set_entry_type(tar::EntryType::Link); - xattrsheader.set_link_name(xattrspath)?; - let subpath = format!("{}.xattrs", path); - self.out - .append_data(&mut xattrsheader, subpath, &mut std::io::empty())?; - } + // The xattrs objects need to be exported before the regular object they + // refer to. Otherwise the importing logic won't have the xattrs available + // when importing file content. + self.append_xattrs(checksum, &xattrs)?; if let Some(instream) = instream { + ensure!(meta.file_type() == gio::FileType::Regular); + h.set_entry_type(tar::EntryType::Regular); h.set_size(meta.size() as u64); let mut instream = BufReader::with_capacity(BUF_CAPACITY, instream.into_read()); @@ -316,13 +378,16 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { .append_data(&mut h, &path, &mut instream) .with_context(|| format!("Writing regfile {}", checksum))?; } else { - let target = meta.symlink_target().unwrap(); - let target = target.as_str(); + ensure!(meta.file_type() == gio::FileType::SymbolicLink); + + let target = meta + .symlink_target() + .ok_or_else(|| anyhow!("Missing symlink target"))?; let context = || format!("Writing content symlink: {}", checksum); h.set_entry_type(tar::EntryType::Symlink); h.set_size(0); // Handle //chkconfig, see above - if symlink_is_denormal(target) { + if symlink_is_denormal(&target) { h.set_link_name_literal(meta.symlink_target().unwrap().as_str()) .with_context(context)?; self.out @@ -330,7 +395,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { .with_context(context)?; } else { self.out - .append_link(&mut h, &path, target) + .append_link(&mut h, &path, target.as_str()) .with_context(context)?; } } @@ -476,4 +541,45 @@ mod tests { assert!(symlink_is_denormal(path)); } } + + #[test] + fn test_v0_xattrs_path() { + let checksum = "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; + let expected = "sysroot/ostree/repo/xattrs/b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; + let output = v0_xattrs_path(checksum); + assert_eq!(&output, expected); + } + + #[test] + fn test_v0_xattrs_object_path() { + let checksum = "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; + let expected = "sysroot/ostree/repo/objects/b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file.xattrs"; + let output = v0_xattrs_object_path(checksum); + assert_eq!(&output, expected); + } + + #[test] + fn test_v1_xattrs_object_path() { + let checksum = "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; + let expected = "sysroot/ostree/repo/objects/b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file-xattrs"; + let output = v1_xattrs_object_path(checksum); + assert_eq!(&output, expected); + } + + #[test] + fn test_v1_xattrs_link_object_path() { + let checksum = "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; + let expected = "sysroot/ostree/repo/objects/b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file-xattrs-link"; + let output = v1_xattrs_link_object_path(checksum); + assert_eq!(&output, expected); + } + + #[test] + fn test_v1_xattrs_link_target() { + let checksum = "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; + let expected = + "../b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file-xattrs"; + let output = v1_xattrs_link_target(checksum); + assert_eq!(&output, expected); + } } diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 2e76f7c0b..5f0cee89c 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -1,7 +1,7 @@ //! APIs for extracting OSTree commits from container images use crate::Result; -use anyhow::{anyhow, Context}; +use anyhow::{anyhow, bail, ensure, Context}; use camino::Utf8Path; use camino::Utf8PathBuf; use fn_error_context::context; @@ -15,14 +15,15 @@ use std::io::prelude::*; use tracing::{event, instrument, Level}; /// Arbitrary limit on xattrs to avoid RAM exhaustion attacks. The actual filesystem limits are often much smaller. -/// See https://en.wikipedia.org/wiki/Extended_file_attributes -/// For example, XFS limits to 614 KiB. +// See https://en.wikipedia.org/wiki/Extended_file_attributes +// For example, XFS limits to 614 KiB. const MAX_XATTR_SIZE: u32 = 1024 * 1024; /// Limit on metadata objects (dirtree/dirmeta); this is copied /// from ostree-core.h. TODO: Bind this in introspection const MAX_METADATA_SIZE: u32 = 10 * 1024 * 1024; -/// https://stackoverflow.com/questions/258091/when-should-i-use-mmap-for-file-access +/// Upper size limit for "small" regular files. +// https://stackoverflow.com/questions/258091/when-should-i-use-mmap-for-file-access pub(crate) const SMALL_REGFILE_SIZE: usize = 127 * 1024; // The prefix for filenames that contain content we actually look at. @@ -41,9 +42,11 @@ struct ImportStats { struct Importer { repo: ostree::Repo, remote: Option, + // Cache of xattrs, keyed by their content checksum. xattrs: HashMap, + // Reusable buffer for xattrs references. It maps a file checksum (.0) + // to an xattrs checksum (.1) in the `xattrs` cache above. next_xattrs: Option<(String, String)>, - // Reusable buffer for reads. See also https://github.com/rust-lang/rust/issues/78485 buf: Vec, @@ -74,8 +77,8 @@ fn header_attrs(header: &tar::Header) -> Result<(u32, u32, u32)> { Ok((uid, gid, mode)) } -/// The C function ostree_object_type_from_string aborts on -/// unknown strings, so we have a safe version here. +// The C function ostree_object_type_from_string aborts on +// unknown strings, so we have a safe version here. fn objtype_from_string(t: &str) -> Option { Some(match t { "commit" => ostree::ObjectType::Commit, @@ -104,6 +107,7 @@ fn entry_to_variant( } /// Parse an object path into (parent, rest, objtype). +/// /// Normal ostree object paths look like 00/1234.commit. /// In the tar format, we may also see 00/1234.file.xattrs. fn parse_object_entry_path(path: &Utf8Path) -> Result<(&str, &Utf8Path, &str)> { @@ -123,6 +127,7 @@ fn parse_object_entry_path(path: &Utf8Path) -> Result<(&str, &Utf8Path, &str)> { let objtype = name .extension() .ok_or_else(|| anyhow!("Invalid objpath {}", path))?; + Ok((parentname, name, objtype)) } @@ -130,12 +135,50 @@ fn parse_checksum(parent: &str, name: &Utf8Path) -> Result { let checksum_rest = name .file_stem() .ok_or_else(|| anyhow!("Invalid object path part {}", name))?; + // Also take care of the double extension on `.file.xattrs`. + let checksum_rest = checksum_rest.trim_end_matches(".file"); if checksum_rest.len() != 62 { return Err(anyhow!("Invalid checksum part {}", checksum_rest)); } - let checksum = format!("{}{}", parent, checksum_rest); - validate_sha256(&checksum)?; + let reassembled = format!("{}{}", parent, checksum_rest); + validate_sha256(reassembled) +} + +/// Parse a `.file-xattrs-link` link target into the corresponding checksum. +fn parse_xattrs_link_target(path: &Utf8Path) -> Result { + // Discard the relative parent. + let path = path.strip_prefix("..")?; + + // Split the sharded checksum directory. + let parentname = path + .parent() + .map(|p| p.file_name()) + .flatten() + .ok_or_else(|| anyhow!("Invalid path (no parent) {}", path))?; + if parentname.len() != 2 { + return Err(anyhow!("Invalid checksum parent {}", parentname)); + } + + // Split the filename (basename + objtype). + let fname = path + .file_name() + .map(Utf8Path::new) + .ok_or_else(|| anyhow!("Invalid filename {}", path))?; + + // Ensure the link points to the correct object type. + let objtype = fname + .extension() + .ok_or_else(|| anyhow!("Invalid path (extension) {}", path))?; + if objtype != "file-xattrs" { + bail!("Invalid objpath {} for {}", objtype, path); + } + + // Reassemble the target checksum and validate it. + let basename = fname.as_str().trim_end_matches(".file-xattrs"); + let target = format!("{}{}", parentname, basename); + let checksum = validate_sha256(target)?; + Ok(checksum) } @@ -212,13 +255,13 @@ impl Importer { Ok(()) } - /// Import a content object. + /// Import a content object, large regular file flavour. fn import_large_regfile_object( &mut self, mut entry: tar::Entry, size: usize, checksum: &str, - xattrs: Option, + xattrs: glib::Variant, cancellable: Option<&gio::Cancellable>, ) -> Result<()> { let (uid, gid, mode) = header_attrs(entry.header())?; @@ -228,7 +271,7 @@ impl Importer { gid, libc::S_IFREG | mode, size as u64, - xattrs.as_ref(), + Some(&xattrs), )?; { let w = w.clone().upcast::(); @@ -249,13 +292,13 @@ impl Importer { Ok(()) } - /// Import a content object. + /// Import a content object, small regular file flavour. fn import_small_regfile_object( &mut self, mut entry: tar::Entry, size: usize, checksum: &str, - xattrs: Option, + xattrs: glib::Variant, cancellable: Option<&gio::Cancellable>, ) -> Result<()> { let (uid, gid, mode) = header_attrs(entry.header())?; @@ -267,7 +310,7 @@ impl Importer { uid, gid, libc::S_IFREG | mode, - xattrs.as_ref(), + Some(&xattrs), &buf, cancellable, )?; @@ -276,12 +319,12 @@ impl Importer { Ok(()) } - /// Import a content object. + /// Import a content object, symlink flavour. fn import_symlink_object( &mut self, entry: tar::Entry, checksum: &str, - xattrs: Option, + xattrs: glib::Variant, ) -> Result<()> { let (uid, gid, _) = header_attrs(entry.header())?; let target = entry @@ -295,7 +338,7 @@ impl Importer { Some(checksum), uid, gid, - xattrs.as_ref(), + Some(&xattrs), target, gio::NONE_CANCELLABLE, )?; @@ -310,7 +353,6 @@ impl Importer { &mut self, entry: tar::Entry, checksum: &str, - xattrs: Option, cancellable: Option<&gio::Cancellable>, ) -> Result<()> { if self @@ -320,6 +362,23 @@ impl Importer { return Ok(()); } let size: usize = entry.header().size()?.try_into()?; + + // Pop the queued xattrs reference. + let (file_csum, xattrs_csum) = self + .next_xattrs + .take() + .ok_or_else(|| anyhow!("Missing xattrs reference"))?; + if checksum != file_csum { + return Err(anyhow!("Object mismatch, found xattrs for {}", file_csum)); + } + + // Retrieve xattrs content from the cache. + let xattrs = self + .xattrs + .get(&xattrs_csum) + .cloned() + .ok_or_else(|| anyhow!("Failed to find xattrs content {}", xattrs_csum,))?; + match entry.header().entry_type() { tar::EntryType::Regular => { if size > SMALL_REGFILE_SIZE { @@ -335,103 +394,152 @@ impl Importer { /// Given a tar entry that looks like an object (its path is under ostree/repo/objects/), /// determine its type and import it. - #[context("object {}", path)] + #[context("Importing object {}", path)] fn import_object<'b, R: std::io::Read>( &mut self, entry: tar::Entry<'b, R>, path: &Utf8Path, cancellable: Option<&gio::Cancellable>, ) -> Result<()> { - let (parentname, mut name, mut objtype) = parse_object_entry_path(path)?; + let (parentname, name, suffix) = parse_object_entry_path(path)?; + let checksum = parse_checksum(parentname, name)?; - let is_xattrs = objtype == "xattrs"; - let xattrs = self.next_xattrs.take(); - if is_xattrs { - if xattrs.is_some() { - return Err(anyhow!("Found multiple xattrs")); + match suffix { + "commit" => Err(anyhow!("Found multiple commit objects")), + "file" => self.import_content_object(entry, &checksum, cancellable), + "file-xattrs" => self.process_file_xattrs(entry, checksum), + "file-xattrs-link" => self.process_file_xattrs_link(entry, checksum), + "xattrs" => self.process_xattr_ref(entry, checksum), + kind => { + let objtype = objtype_from_string(kind) + .ok_or_else(|| anyhow!("Invalid object type {}", kind))?; + self.import_metadata(entry, &checksum, objtype) } - name = name - .file_stem() - .map(Utf8Path::new) - .ok_or_else(|| anyhow!("Invalid xattrs {}", path))?; - objtype = name - .extension() - .ok_or_else(|| anyhow!("Invalid objpath {}", path))?; } - let checksum = parse_checksum(parentname, name)?; - let xattr_ref = if let Some((xattr_target, xattr_objref)) = xattrs { - if xattr_target.as_str() != checksum.as_str() { - return Err(anyhow!( - "Found object {} but previous xattr was {}", - checksum, - xattr_target - )); - } - let v = self - .xattrs - .get(&xattr_objref) - .ok_or_else(|| anyhow!("Failed to find xattr {}", xattr_objref))?; - Some(v.clone()) - } else { - None - }; - let objtype = objtype_from_string(objtype) - .ok_or_else(|| anyhow!("Invalid object type {}", objtype))?; - if is_xattrs && objtype != ostree::ObjectType::File { - return Err(anyhow!("Found xattrs for non-file object type {}", objtype)); + } + + /// Process a `.file-xattrs` object (v1). + #[context("Processing file xattrs")] + fn process_file_xattrs( + &mut self, + entry: tar::Entry, + checksum: String, + ) -> Result<()> { + self.cache_xattrs_content(entry, Some(checksum))?; + Ok(()) + } + + /// Process a `.file-xattrs-link` object (v1). + /// + /// This is an hardlink that contains extended attributes for a content object. + /// When the max hardlink count is reached, this object may also be encoded as + /// a regular file instead. + #[context("Processing xattrs link")] + fn process_file_xattrs_link( + &mut self, + entry: tar::Entry, + checksum: String, + ) -> Result<()> { + use tar::EntryType::{Link, Regular}; + if let Some(prev) = &self.next_xattrs { + bail!( + "Found previous dangling xattrs for file object '{}'", + prev.0 + ); } - match objtype { - ostree::ObjectType::Commit => Err(anyhow!("Found multiple commit objects")), - ostree::ObjectType::File => { - if is_xattrs { - self.import_xattr_ref(entry, checksum) - } else { - self.import_content_object(entry, &checksum, xattr_ref, cancellable) - } + + // Extract the xattrs checksum from the link target or from the content (v1). + // Later, it will be used as the key for a lookup into the `self.xattrs` cache. + let xattrs_checksum; + match entry.header().entry_type() { + Link => { + let link_target = entry + .link_name()? + .ok_or_else(|| anyhow!("No xattrs link content for {}", checksum))?; + let xattr_target = Utf8Path::from_path(&*link_target) + .ok_or_else(|| anyhow!("Invalid non-UTF8 xattrs link {}", checksum))?; + xattrs_checksum = parse_xattrs_link_target(xattr_target)?; + } + Regular => { + xattrs_checksum = self.cache_xattrs_content(entry, None)?; } - objtype => self.import_metadata(entry, &checksum, objtype), + x => bail!("Unexpected xattrs type '{:?}' found for {}", x, checksum), } + + // Now xattrs are properly cached for the next content object in the stream, + // which should match `checksum`. + self.next_xattrs = Some((checksum, xattrs_checksum)); + + Ok(()) } - /// Handle .xattr hardlinks that contain extended attributes for - /// a content object. - #[context("Processing xattr ref")] - fn import_xattr_ref( + /// Process a `.file.xattrs` entry (v0). + /// + /// This is an hardlink that contains extended attributes for a content object. + #[context("Processing xattrs reference")] + fn process_xattr_ref( &mut self, entry: tar::Entry, target: String, ) -> Result<()> { - assert!(self.next_xattrs.is_none()); + if let Some(prev) = &self.next_xattrs { + bail!( + "Found previous dangling xattrs for file object '{}'", + prev.0 + ); + } + + // Parse the xattrs checksum from the link target (v0). + // Later, it will be used as the key for a lookup into the `self.xattrs` cache. let header = entry.header(); if header.entry_type() != tar::EntryType::Link { - return Err(anyhow!("Non-hardlink xattr reference found for {}", target)); + bail!("Non-hardlink xattrs reference found for {}", target); } let xattr_target = entry .link_name()? - .ok_or_else(|| anyhow!("No xattr link content for {}", target))?; + .ok_or_else(|| anyhow!("No xattrs link content for {}", target))?; let xattr_target = Utf8Path::from_path(&*xattr_target) - .ok_or_else(|| anyhow!("Invalid non-UTF8 xattr link {}", target))?; + .ok_or_else(|| anyhow!("Invalid non-UTF8 xattrs link {}", target))?; let xattr_target = xattr_target .file_name() - .ok_or_else(|| anyhow!("Invalid xattr link {}", target))?; - validate_sha256(xattr_target)?; - self.next_xattrs = Some((target, xattr_target.to_string())); + .ok_or_else(|| anyhow!("Invalid xattrs link {}", target))? + .to_string(); + let xattrs_checksum = validate_sha256(xattr_target)?; + + // Now xattrs are properly cached for the next content object in the stream, + // which should match `checksum`. + self.next_xattrs = Some((target, xattrs_checksum)); + Ok(()) } - /// Process a special /xattrs/ entry (sha256 of xattr values). - fn import_xattrs(&mut self, mut entry: tar::Entry) -> Result<()> { + /// Process a special /xattrs/ entry, with checksum of xattrs content (v0). + fn process_split_xattrs_content( + &mut self, + entry: tar::Entry, + ) -> Result<()> { let checksum = { let path = entry.path()?; let name = path .file_name() - .ok_or_else(|| anyhow!("Invalid xattr dir: {:?}", path))?; + .ok_or_else(|| anyhow!("Invalid xattrs dir: {:?}", path))?; let name = name .to_str() - .ok_or_else(|| anyhow!("Invalid non-UTF8 xattr name: {:?}", name))?; - validate_sha256(name)?; - name.to_string() + .ok_or_else(|| anyhow!("Invalid non-UTF8 xattrs name: {:?}", name))?; + validate_sha256(name.to_string())? }; + self.cache_xattrs_content(entry, Some(checksum))?; + Ok(()) + } + + /// Read an xattrs entry and cache its content, optionally validating its checksum. + /// + /// This returns the computed checksum for the successfully cached content. + fn cache_xattrs_content( + &mut self, + mut entry: tar::Entry, + expected_checksum: Option, + ) -> Result { let header = entry.header(); if header.entry_type() != tar::EntryType::Regular { return Err(anyhow!( @@ -446,11 +554,23 @@ impl Importer { let mut contents = vec![0u8; n as usize]; entry.read_exact(contents.as_mut_slice())?; - let contents: glib::Bytes = contents.as_slice().into(); - let contents = Variant::from_bytes::<&[(&[u8], &[u8])]>(&contents); + let data: glib::Bytes = contents.as_slice().into(); + let xattrs_checksum = { + let digest = openssl::hash::hash(openssl::hash::MessageDigest::sha256(), &data)?; + hex::encode(digest) + }; + if let Some(input) = expected_checksum { + ensure!( + input == xattrs_checksum, + "Checksum mismatch, expected '{}' but computed '{}'", + input, + xattrs_checksum + ); + } - self.xattrs.insert(checksum, contents); - Ok(()) + let contents = Variant::from_bytes::<&[(&[u8], &[u8])]>(&data); + self.xattrs.insert(xattrs_checksum.clone(), contents); + Ok(xattrs_checksum) } fn import( @@ -563,7 +683,7 @@ impl Importer { if let Ok(p) = path.strip_prefix("objects/") { self.import_object(entry, p, cancellable)?; } else if path.strip_prefix("xattrs/").is_ok() { - self.import_xattrs(entry)?; + self.process_split_xattrs_content(entry)?; } } @@ -571,14 +691,14 @@ impl Importer { } } -fn validate_sha256(s: &str) -> Result<()> { - if s.len() != 64 { - return Err(anyhow!("Invalid sha256 checksum (len) {}", s)); +fn validate_sha256(input: String) -> Result { + if input.len() != 64 { + return Err(anyhow!("Invalid sha256 checksum (len) {}", input)); } - if !s.chars().all(|c| matches!(c, '0'..='9' | 'a'..='f')) { - return Err(anyhow!("Invalid sha256 checksum {}", s)); + if !input.chars().all(|c| matches!(c, '0'..='9' | 'a'..='f')) { + return Err(anyhow!("Invalid sha256 checksum {}", input)); } - Ok(()) + Ok(input) } /// Configuration for tar import. @@ -630,17 +750,62 @@ mod tests { } #[test] - fn test_validate_sha256() -> Result<()> { - validate_sha256("a86d80a3e9ff77c2e3144c787b7769b300f91ffd770221aac27bab854960b964")?; - assert!(validate_sha256("").is_err()); - assert!(validate_sha256( - "a86d80a3e9ff77c2e3144c787b7769b300f91ffd770221aac27bab854960b9644" - ) - .is_err()); - assert!(validate_sha256( - "a86d80a3E9ff77c2e3144c787b7769b300f91ffd770221aac27bab854960b964" + fn test_validate_sha256() { + let err_cases = &[ + "a86d80a3e9ff77c2e3144c787b7769b300f91ffd770221aac27bab854960b9644", + "a86d80a3E9ff77c2e3144c787b7769b300f91ffd770221aac27bab854960b964", + ]; + for input in err_cases { + validate_sha256(input.to_string()).unwrap_err(); + } + + validate_sha256( + "a86d80a3e9ff77c2e3144c787b7769b300f91ffd770221aac27bab854960b964".to_string(), ) - .is_err()); - Ok(()) + .unwrap(); + } + + #[test] + fn test_parse_object_entry_path() { + let path = + "sysroot/ostree/repo/objects/b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file.xattrs"; + let input = Utf8PathBuf::from(path); + let expected_parent = "b8"; + let expected_rest = + "627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file.xattrs"; + let expected_objtype = "xattrs"; + let output = parse_object_entry_path(&input).unwrap(); + assert_eq!(output.0, expected_parent); + assert_eq!(output.1, expected_rest); + assert_eq!(output.2, expected_objtype); + } + + #[test] + fn test_parse_checksum() { + let parent = "b8"; + let name = "627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file.xattrs"; + let expected = "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; + let output = parse_checksum(parent, &Utf8PathBuf::from(name)).unwrap(); + assert_eq!(output, expected); + } + + #[test] + fn test_parse_xattrs_link_target() { + let err_cases = &[ + "", + "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file-xattrs", + "b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file-xattrs", + "../b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file.xattrs", + "../b8/62.file-xattrs", + ]; + for input in err_cases { + parse_xattrs_link_target(&Utf8PathBuf::from(input)).unwrap_err(); + } + + let path = + "../b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file-xattrs"; + let expected = "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; + let output = parse_xattrs_link_target(&Utf8PathBuf::from(path)).unwrap(); + assert_eq!(output, expected); } } diff --git a/lib/src/tar/mod.rs b/lib/src/tar/mod.rs index bd393fbd1..2e1bbc722 100644 --- a/lib/src/tar/mod.rs +++ b/lib/src/tar/mod.rs @@ -12,8 +12,8 @@ //! # Tar stream layout //! //! In order to solve these problems, this new tar serialization format effectively -//! combines *both* a `/ostree/repo/objects` directory and a checkout in `/usr`, where -//! the latter are hardlinks to the former. +//! combines *both* a `/sysroot/ostree/repo/objects` directory and a checkout in `/usr`, +//! where the latter are hardlinks to the former. //! //! The exported stream will have the ostree metadata first; in particular the commit object. //! Following the commit object is the `.commitmeta` object, which contains any cryptographic @@ -25,12 +25,23 @@ //! The remainder of the stream is a breadth-first traversal of dirtree/dirmeta objects and the //! content objects they reference. //! -//! # Extended attributes +//! # `bare-split-xattrs` repository mode //! -//! Extended attributes are a complex subject for tar, which has many variants. Further, -//! when exporting bootable ostree commits to container images, it is not actually desired -//! to have the container runtime try to unpack and apply those. For this reason, this module -//! serializes extended attributes into separate `.xattr` files associated with each ostree object. +//! In format version 1, the tar stream embeds a proper ostree repository using a tailored +//! `bare-split-xattrs` mode. +//! +//! This is because extended attributes (xattrs) are a complex subject for tar, which has +//! many variants. +//! Further, when exporting bootable ostree commits to container images, it is not actually +//! desired to have the container runtime try to unpack and apply those. +//! +//! For these reasons, extended attributes (xattrs) get serialized into detached objects +//! which are associated with the relevant content objects. +//! +//! At a low level, two dedicated object types are used: +//! * `file-xattrs` as regular files storing (and de-duplicating) xattrs content. +//! * `file-xattrs-link` as hardlinks which associate a `file` object to its corresponding +//! `file-xattrs` object. mod import; pub use import::*; diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 8dfd6db2a..e86ddcb5e 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -363,7 +363,6 @@ fn test_tar_export_structure() -> Result<()> { ("sysroot/ostree/repo/refs/remotes", Directory, 0o755), ("sysroot/ostree/repo/tmp", Directory, 0o755), ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), - ("sysroot/ostree/repo/xattrs", Directory, 0o755), ("usr", Directory, 0o755), ]; validate_tar_expected( From cdeb011feb7220757e9421f0898b0e6e36abde44 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Fri, 11 Feb 2022 15:47:58 +0000 Subject: [PATCH 291/775] tar/export: export empty xattrs in v1 format This makes sure that xattrs content is always exported in v1 format, even the empty attributes set. Previous behaviour is retained for v0, for compatibility. --- lib/src/tar/export.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index f672060ae..71027a3fc 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -296,7 +296,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { fn append_xattrs(&mut self, checksum: &str, xattrs: &glib::Variant) -> Result { let xattrs_data = xattrs.data_as_bytes(); let xattrs_data = xattrs_data.as_ref(); - if xattrs_data.is_empty() { + if xattrs_data.is_empty() && self.options.format_version == 0 { return Ok(false); } From 8ff0460c8caee4bffb96c00880d43c0ac3cd8930 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Fri, 11 Feb 2022 16:25:42 +0000 Subject: [PATCH 292/775] tar/export: use full path as link target This switches hardlinks to use full paths as link targets. Otherwise a plain tar unpacking may get really confused by the leading `../` (and may actually decide to strip it as a security concern). --- lib/src/tar/export.rs | 28 +++++------------------- lib/src/tar/import.rs | 51 ++++++++++--------------------------------- 2 files changed, 17 insertions(+), 62 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index f672060ae..44e8b19c1 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -87,11 +87,6 @@ fn v1_xattrs_link_object_path(checksum: &str) -> Utf8PathBuf { .into() } -fn v1_xattrs_link_target(checksum: &str) -> Utf8PathBuf { - let (first, rest) = checksum.split_at(2); - format!("../{}/{}.file-xattrs", first, rest).into() -} - /// Check for "denormal" symlinks which contain "//" // See https://github.com/fedora-sysv/chkconfig/pull/67 // [root@cosa-devsh ~]# rpm -qf /usr/lib/systemd/systemd-sysv-install @@ -160,8 +155,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { h.set_gid(0); h.set_mode(0o644); h.set_size(0); - h.set_link_name(&link_target)?; - self.out.append_data(&mut h, &path, &mut std::io::empty())?; + self.out.append_link(&mut h, &path, &link_target)?; Ok(()) } @@ -320,20 +314,19 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { self.append_default_hardlink(&objpath, &path)?; } } else if self.options.format_version == 1 { + let path = v1_xattrs_object_path(xattrs_checksum); + // Write xattrs content into a separate `.file-xattrs` object. if !self.wrote_xattrs.contains(xattrs_checksum) { let inserted = self.wrote_xattrs.insert(checksum.to_string()); debug_assert!(inserted); - - let objpath = v1_xattrs_object_path(xattrs_checksum); - self.append_default_data(&objpath, xattrs_data)?; + self.append_default_data(&path, xattrs_data)?; } // Write a `.file-xattrs-link` which links the file object to // the corresponding detached xattrs. { - let objpath = v1_xattrs_link_object_path(checksum); - let target_path = v1_xattrs_link_target(xattrs_checksum); - self.append_default_hardlink(&objpath, &target_path)?; + let link_obj_path = v1_xattrs_link_object_path(checksum); + self.append_default_hardlink(&link_obj_path, &path)?; } } else { bail!("Unknown format version '{}'", self.options.format_version); @@ -573,13 +566,4 @@ mod tests { let output = v1_xattrs_link_object_path(checksum); assert_eq!(&output, expected); } - - #[test] - fn test_v1_xattrs_link_target() { - let checksum = "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; - let expected = - "../b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file-xattrs"; - let output = v1_xattrs_link_target(checksum); - assert_eq!(&output, expected); - } } diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 5f0cee89c..4b76c960b 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -147,39 +147,8 @@ fn parse_checksum(parent: &str, name: &Utf8Path) -> Result { /// Parse a `.file-xattrs-link` link target into the corresponding checksum. fn parse_xattrs_link_target(path: &Utf8Path) -> Result { - // Discard the relative parent. - let path = path.strip_prefix("..")?; - - // Split the sharded checksum directory. - let parentname = path - .parent() - .map(|p| p.file_name()) - .flatten() - .ok_or_else(|| anyhow!("Invalid path (no parent) {}", path))?; - if parentname.len() != 2 { - return Err(anyhow!("Invalid checksum parent {}", parentname)); - } - - // Split the filename (basename + objtype). - let fname = path - .file_name() - .map(Utf8Path::new) - .ok_or_else(|| anyhow!("Invalid filename {}", path))?; - - // Ensure the link points to the correct object type. - let objtype = fname - .extension() - .ok_or_else(|| anyhow!("Invalid path (extension) {}", path))?; - if objtype != "file-xattrs" { - bail!("Invalid objpath {} for {}", objtype, path); - } - - // Reassemble the target checksum and validate it. - let basename = fname.as_str().trim_end_matches(".file-xattrs"); - let target = format!("{}{}", parentname, basename); - let checksum = validate_sha256(target)?; - - Ok(checksum) + let (parent, rest, _objtype) = parse_object_entry_path(path)?; + parse_checksum(parent, rest) } impl Importer { @@ -794,18 +763,20 @@ mod tests { let err_cases = &[ "", "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file-xattrs", - "b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file-xattrs", - "../b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file.xattrs", "../b8/62.file-xattrs", ]; for input in err_cases { - parse_xattrs_link_target(&Utf8PathBuf::from(input)).unwrap_err(); + parse_xattrs_link_target(Utf8Path::new(input)).unwrap_err(); } - let path = - "../b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file-xattrs"; + let ok_cases = &[ + "../b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file-xattrs", + "sysroot/ostree/repo/objects/b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file-xattrs", + ]; let expected = "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; - let output = parse_xattrs_link_target(&Utf8PathBuf::from(path)).unwrap(); - assert_eq!(output, expected); + for input in ok_cases { + let output = parse_xattrs_link_target(Utf8Path::new(input)).unwrap(); + assert_eq!(output, expected); + } } } From 1ee4b21a2d3aaf9940711a8f99d73b88052f9330 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 11 Feb 2022 17:04:01 -0500 Subject: [PATCH 293/775] tar/import: Don't crash on extant object This bug may have existed before, but basically we need to handle the case where an object already exists in the repo; we were erroring out in this case because we retained the queued xattrs entry. --- lib/src/tar/import.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 4b76c960b..54a4e1b46 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -324,12 +324,6 @@ impl Importer { checksum: &str, cancellable: Option<&gio::Cancellable>, ) -> Result<()> { - if self - .repo - .has_object(ostree::ObjectType::File, checksum, cancellable)? - { - return Ok(()); - } let size: usize = entry.header().size()?.try_into()?; // Pop the queued xattrs reference. @@ -341,6 +335,13 @@ impl Importer { return Err(anyhow!("Object mismatch, found xattrs for {}", file_csum)); } + if self + .repo + .has_object(ostree::ObjectType::File, checksum, cancellable)? + { + return Ok(()); + } + // Retrieve xattrs content from the cache. let xattrs = self .xattrs From fdd938bb29104bf8a6df463fc896681985f78463 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 13 Feb 2022 10:35:43 -0500 Subject: [PATCH 294/775] tests: Factor out test fixture as a separate module OK so...buckle up. I've been working on this ostree stuff for like 10 years now it turns out that I still stuck at writing the test suites. When I generated the hardcoded `exampleos-v0.tar.zstd`, I apparently included SELinux labels (I don't think intentionally). But I didn't when generating `exampleos-v1.tar`! I was trying to test an upgrade path around xattrs and hit a bug there code, and it took me a while to understand it was because the second update didn't have them! The time it took to debug this was greatly extended by me reading `man tar`, and thinking that `tar -tf -v --xattrs` would print any SELinux label. But no - SELinux is *special cased* in tar - even though it's an extended attribute, in order to see any embedded labels in the tarball one needs to use `tar -tf -v --selinux`! Somehow I had apparently avoided learning this until now. (ostree's model I think is better than tar's, but here we're intersecting them) Anyways. None of this is fixed right now. Yes, I can see the disappointment. But - in preparation for making the test suite more robust and fixing things up, factor out the fixture as a separate module. Make things like the testref a method instead of using a constant. Port the diff test to use this too. --- lib/tests/it/fixture.rs | 107 +++++++++++++++++++++++++++++++ lib/tests/it/main.rs | 135 +++++++--------------------------------- 2 files changed, 129 insertions(+), 113 deletions(-) create mode 100644 lib/tests/it/fixture.rs diff --git a/lib/tests/it/fixture.rs b/lib/tests/it/fixture.rs new file mode 100644 index 000000000..e73e4ef8e --- /dev/null +++ b/lib/tests/it/fixture.rs @@ -0,0 +1,107 @@ +use anyhow::Result; +use camino::{Utf8Path, Utf8PathBuf}; +use fn_error_context::context; +use indoc::indoc; +use ostree_ext::gio; +use sh_inline::bash; +use std::convert::TryInto; + +const OSTREE_GPG_HOME: &[u8] = include_bytes!("fixtures/ostree-gpg-test-home.tar.gz"); +const TEST_GPG_KEYID_1: &str = "7FCA23D8472CDAFA"; +#[allow(dead_code)] +const TEST_GPG_KEYFPR_1: &str = "5E65DE75AB1C501862D476347FCA23D8472CDAFA"; +pub(crate) const EXAMPLEOS_V0: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); +pub(crate) const EXAMPLEOS_V1: &[u8] = include_bytes!("fixtures/exampleos-v1.tar.zst"); +const TESTREF: &str = "exampleos/x86_64/stable"; + +pub(crate) struct Fixture { + // Just holds a reference + _tempdir: tempfile::TempDir, + pub(crate) path: Utf8PathBuf, + pub(crate) srcdir: Utf8PathBuf, + pub(crate) srcrepo: ostree::Repo, + pub(crate) destrepo: ostree::Repo, + pub(crate) destrepo_path: Utf8PathBuf, + + pub(crate) format_version: u32, +} + +impl Fixture { + pub(crate) fn new() -> Result { + let _tempdir = tempfile::tempdir_in("/var/tmp")?; + let path: &Utf8Path = _tempdir.path().try_into().unwrap(); + let path = path.to_path_buf(); + + let srcdir = path.join("src"); + std::fs::create_dir(&srcdir)?; + let srcrepo_path = generate_test_repo(&srcdir, TESTREF)?; + let srcrepo = + ostree::Repo::open_at(libc::AT_FDCWD, srcrepo_path.as_str(), gio::NONE_CANCELLABLE)?; + + let destdir = &path.join("dest"); + std::fs::create_dir(destdir)?; + let destrepo_path = destdir.join("repo"); + let destrepo = ostree::Repo::new_for_path(&destrepo_path); + destrepo.create(ostree::RepoMode::BareUser, gio::NONE_CANCELLABLE)?; + Ok(Self { + _tempdir, + path, + srcdir, + srcrepo, + destrepo, + destrepo_path, + format_version: 0, + }) + } + + pub(crate) fn testref(&self) -> &'static str { + TESTREF + } + + pub(crate) fn update(&mut self) -> Result<()> { + let repopath = &self.srcdir.join("repo"); + let repotmp = &repopath.join("tmp"); + let srcpath = &repotmp.join("exampleos-v1.tar.zst"); + std::fs::write(srcpath, EXAMPLEOS_V1)?; + let srcpath = srcpath.as_str(); + let repopath = repopath.as_str(); + let testref = TESTREF; + bash!( + "ostree --repo={repopath} commit -b {testref} --no-bindings --tree=tar={srcpath}", + testref, + repopath, + srcpath + )?; + std::fs::remove_file(srcpath)?; + Ok(()) + } +} + +#[context("Generating test repo")] +pub(crate) fn generate_test_repo(dir: &Utf8Path, testref: &str) -> Result { + let src_tarpath = &dir.join("exampleos.tar.zst"); + std::fs::write(src_tarpath, EXAMPLEOS_V0)?; + + let gpghome = dir.join("gpghome"); + { + let dec = flate2::read::GzDecoder::new(OSTREE_GPG_HOME); + let mut a = tar::Archive::new(dec); + a.unpack(&gpghome)?; + }; + + bash!( + indoc! {" + cd {dir} + ostree --repo=repo init --mode=archive + ostree --repo=repo commit -b {testref} --bootable --no-bindings --add-metadata=ostree.container-cmd='[\"/usr/bin/bash\"]' --add-metadata-string=version=42.0 --add-metadata-string=buildsys.checksum=41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3 --gpg-homedir={gpghome} --gpg-sign={keyid} \ + --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst >/dev/null + ostree --repo=repo show {testref} >/dev/null + "}, + testref = testref, + gpghome = gpghome.as_str(), + keyid = TEST_GPG_KEYID_1, + dir = dir.as_str() + )?; + std::fs::remove_file(src_tarpath)?; + Ok(dir.join("repo")) +} diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index e86ddcb5e..863d04209 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1,7 +1,8 @@ +mod fixture; + use anyhow::{Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; -use indoc::indoc; use once_cell::sync::Lazy; use ostree_ext::container::store::PrepareResult; use ostree_ext::container::{ @@ -11,16 +12,10 @@ use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; use sh_inline::bash; use std::collections::HashMap; -use std::convert::TryInto; use std::{io::Write, process::Command}; -const OSTREE_GPG_HOME: &[u8] = include_bytes!("fixtures/ostree-gpg-test-home.tar.gz"); -const TEST_GPG_KEYID_1: &str = "7FCA23D8472CDAFA"; -#[allow(dead_code)] -const TEST_GPG_KEYFPR_1: &str = "5E65DE75AB1C501862D476347FCA23D8472CDAFA"; -const EXAMPLEOS_V0: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); -const EXAMPLEOS_V1: &[u8] = include_bytes!("fixtures/exampleos-v1.tar.zst"); -const TESTREF: &str = "exampleos/x86_64/stable"; +use fixture::Fixture; + const EXAMPLEOS_CONTENT_CHECKSUM: &str = "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; const TEST_REGISTRY_DEFAULT: &str = "localhost:5000"; @@ -38,56 +33,12 @@ static TEST_REGISTRY: Lazy = Lazy::new(|| match std::env::var_os("TEST_R None => TEST_REGISTRY_DEFAULT.to_string(), }); -#[context("Generating test repo")] -fn generate_test_repo(dir: &Utf8Path) -> Result { - let src_tarpath = &dir.join("exampleos.tar.zst"); - std::fs::write(src_tarpath, EXAMPLEOS_V0)?; - - let gpghome = dir.join("gpghome"); - { - let dec = flate2::read::GzDecoder::new(OSTREE_GPG_HOME); - let mut a = tar::Archive::new(dec); - a.unpack(&gpghome)?; - }; - - bash!( - indoc! {" - cd {dir} - ostree --repo=repo init --mode=archive - ostree --repo=repo commit -b {testref} --bootable --no-bindings --add-metadata=ostree.container-cmd='[\"/usr/bin/bash\"]' --add-metadata-string=version=42.0 --add-metadata-string=buildsys.checksum=41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3 --gpg-homedir={gpghome} --gpg-sign={keyid} \ - --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst >/dev/null - ostree --repo=repo show {testref} >/dev/null - "}, - testref = TESTREF, - gpghome = gpghome.as_str(), - keyid = TEST_GPG_KEYID_1, - dir = dir.as_str() - )?; - std::fs::remove_file(src_tarpath)?; - Ok(dir.join("repo")) -} - -fn update_repo(repopath: &Utf8Path) -> Result<()> { - let repotmp = &repopath.join("tmp"); - let srcpath = &repotmp.join("exampleos-v1.tar.zst"); - std::fs::write(srcpath, EXAMPLEOS_V1)?; - let srcpath = srcpath.as_str(); - let repopath = repopath.as_str(); - let testref = TESTREF; - bash!( - "ostree --repo={repopath} commit -b {testref} --no-bindings --tree=tar={srcpath}", - testref, - repopath, - srcpath - )?; - std::fs::remove_file(srcpath)?; - Ok(()) -} - #[context("Generating test tarball")] fn initial_export(fixture: &Fixture) -> Result { let cancellable = gio::NONE_CANCELLABLE; - let (_, rev) = fixture.srcrepo.read_commit(TESTREF, cancellable)?; + let (_, rev) = fixture + .srcrepo + .read_commit(fixture.testref(), cancellable)?; let (commitv, _) = fixture.srcrepo.load_commit(rev.as_str())?; assert_eq!( ostree::commit_get_content_checksum(&commitv) @@ -106,47 +57,6 @@ fn initial_export(fixture: &Fixture) -> Result { Ok(destpath) } -struct Fixture { - // Just holds a reference - _tempdir: tempfile::TempDir, - path: Utf8PathBuf, - srcdir: Utf8PathBuf, - srcrepo: ostree::Repo, - destrepo: ostree::Repo, - destrepo_path: Utf8PathBuf, - - format_version: u32, -} - -impl Fixture { - fn new() -> Result { - let _tempdir = tempfile::tempdir_in("/var/tmp")?; - let path: &Utf8Path = _tempdir.path().try_into().unwrap(); - let path = path.to_path_buf(); - - let srcdir = path.join("src"); - std::fs::create_dir(&srcdir)?; - let srcrepo_path = generate_test_repo(&srcdir)?; - let srcrepo = - ostree::Repo::open_at(libc::AT_FDCWD, srcrepo_path.as_str(), gio::NONE_CANCELLABLE)?; - - let destdir = &path.join("dest"); - std::fs::create_dir(destdir)?; - let destrepo_path = destdir.join("repo"); - let destrepo = ostree::Repo::new_for_path(&destrepo_path); - destrepo.create(ostree::RepoMode::BareUser, gio::NONE_CANCELLABLE)?; - Ok(Self { - _tempdir, - path, - srcdir, - srcrepo, - destrepo, - destrepo_path, - format_version: 0, - }) - } -} - #[tokio::test] async fn test_tar_import_empty() -> Result<()> { let fixture = Fixture::new()?; @@ -160,7 +70,7 @@ async fn test_tar_export_reproducible() -> Result<()> { let fixture = Fixture::new()?; let (_, rev) = fixture .srcrepo - .read_commit(TESTREF, gio::NONE_CANCELLABLE)?; + .read_commit(fixture.testref(), gio::NONE_CANCELLABLE)?; let export1 = { let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h, None)?; @@ -377,7 +287,8 @@ fn test_tar_export_structure() -> Result<()> { #[tokio::test] async fn test_tar_import_export() -> Result<()> { let fixture = Fixture::new()?; - let src_tar = tokio::fs::File::open(&initial_export(&fixture)?).await?; + let p = &initial_export(&fixture)?; + let src_tar = tokio::fs::File::open(p).await?; let imported_commit: String = ostree_ext::tar::import_tar(&fixture.destrepo, src_tar, None).await?; @@ -454,7 +365,7 @@ async fn test_container_import_export() -> Result<()> { let fixture = Fixture::new()?; let testrev = fixture .srcrepo - .require_rev(TESTREF) + .require_rev(fixture.testref()) .context("Failed to resolve ref")?; let srcoci_path = &fixture.path.join("oci"); @@ -477,7 +388,7 @@ async fn test_container_import_export() -> Result<()> { }; let digest = ostree_ext::container::encapsulate( &fixture.srcrepo, - TESTREF, + fixture.testref(), &config, Some(opts), &srcoci_imgref, @@ -588,7 +499,7 @@ async fn test_container_write_derive() -> Result<()> { let base_oci_path = &fixture.path.join("exampleos.oci"); let _digest = ostree_ext::container::encapsulate( &fixture.srcrepo, - TESTREF, + fixture.testref(), &Config { cmd: Some(vec!["/bin/bash".to_string()]), ..Default::default() @@ -790,9 +701,10 @@ async fn test_container_write_derive() -> Result<()> { async fn test_container_import_export_registry() -> Result<()> { let tr = &*TEST_REGISTRY; let fixture = Fixture::new()?; + let testref = fixture.testref(); let testrev = fixture .srcrepo - .require_rev(TESTREF) + .require_rev(testref) .context("Failed to resolve ref")?; let src_imgref = ImageReference { transport: Transport::Registry, @@ -803,7 +715,7 @@ async fn test_container_import_export_registry() -> Result<()> { ..Default::default() }; let digest = - ostree_ext::container::encapsulate(&fixture.srcrepo, TESTREF, &config, None, &src_imgref) + ostree_ext::container::encapsulate(&fixture.srcrepo, testref, &config, None, &src_imgref) .await .context("exporting to registry")?; let mut digested_imgref = src_imgref.clone(); @@ -822,15 +734,12 @@ async fn test_container_import_export_registry() -> Result<()> { #[test] fn test_diff() -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; - let tempdir = tempfile::tempdir()?; - let tempdir = Utf8Path::from_path(tempdir.path()).unwrap(); - let repopath = &generate_test_repo(tempdir)?; - update_repo(repopath)?; - let from = &format!("{}^", TESTREF); - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repopath.as_str(), cancellable)?; + let mut fixture = Fixture::new()?; + fixture.update()?; + let from = &format!("{}^", fixture.testref()); + let repo = &fixture.srcrepo; let subdir: Option<&str> = None; - let diff = ostree_ext::diff::diff(repo, from, TESTREF, subdir)?; + let diff = ostree_ext::diff::diff(repo, from, fixture.testref(), subdir)?; assert!(diff.subdir.is_none()); assert_eq!(diff.added_dirs.len(), 1); assert_eq!(diff.added_dirs.iter().next().unwrap(), "/usr/share"); @@ -838,7 +747,7 @@ fn test_diff() -> Result<()> { assert_eq!(diff.added_files.iter().next().unwrap(), "/usr/bin/newbin"); assert_eq!(diff.removed_files.len(), 1); assert_eq!(diff.removed_files.iter().next().unwrap(), "/usr/bin/foo"); - let diff = ostree_ext::diff::diff(repo, from, TESTREF, Some("/usr"))?; + let diff = ostree_ext::diff::diff(repo, from, fixture.testref(), Some("/usr"))?; assert_eq!(diff.subdir.as_ref().unwrap(), "/usr"); assert_eq!(diff.added_dirs.len(), 1); assert_eq!(diff.added_dirs.iter().next().unwrap(), "/share"); From 112a69dfea6ace4238d2333b0c2fb4ecf7672fce Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 13 Feb 2022 13:39:24 -0500 Subject: [PATCH 295/775] testsuite: Port to `sh-inline` 0.2 Prep for further work. I just cut a new release of `sh-inline` - we now have a `cap-std-ext` feature which lets us use `bash_in!` which takes a reference to a directory fd. Also of note, passed variables are now shell-like `${foo}` instead of substituted. This is significantly less fragile than having our script do a `cd` at the top. This also starts us down the path to porting the framework to use `cap-std`, e.g. the fixture uses the new `open_at_dir()` API from https://github.com/ostreedev/ostree-rs/commit/e1fa1da4cafad874a6e58fbea326ded5255e0cdd --- lib/Cargo.toml | 2 +- lib/tests/it/fixture.rs | 78 ++++++++++++++++++++++++----------------- lib/tests/it/main.rs | 22 ++++++------ 3 files changed, 58 insertions(+), 44 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index f3aa52f4c..a89face5f 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -46,7 +46,7 @@ tracing = "0.1" [dev-dependencies] indoc = "1.0.3" quickcheck = "1" -sh-inline = "0.1.0" +sh-inline = { version = "0.2", features = ["cap-std-ext"] } # https://github.com/rust-lang/cargo/issues/2911 # https://github.com/rust-lang/rfcs/pull/1956 ostree-ext = { path = ".", features = ["internal-testing-api"] } diff --git a/lib/tests/it/fixture.rs b/lib/tests/it/fixture.rs index e73e4ef8e..599981fa2 100644 --- a/lib/tests/it/fixture.rs +++ b/lib/tests/it/fixture.rs @@ -1,10 +1,15 @@ -use anyhow::Result; +use anyhow::{Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; +use cap_std::fs::Dir; +use cap_std_ext::prelude::CapStdExtCommandExt; use fn_error_context::context; use indoc::indoc; +use ostree::cap_std; use ostree_ext::gio; -use sh_inline::bash; +use sh_inline::bash_in; use std::convert::TryInto; +use std::process::Stdio; +use std::sync::Arc; const OSTREE_GPG_HOME: &[u8] = include_bytes!("fixtures/ostree-gpg-test-home.tar.gz"); const TEST_GPG_KEYID_1: &str = "7FCA23D8472CDAFA"; @@ -17,6 +22,7 @@ const TESTREF: &str = "exampleos/x86_64/stable"; pub(crate) struct Fixture { // Just holds a reference _tempdir: tempfile::TempDir, + pub(crate) dir: Arc

, pub(crate) path: Utf8PathBuf, pub(crate) srcdir: Utf8PathBuf, pub(crate) srcrepo: ostree::Repo, @@ -28,15 +34,19 @@ pub(crate) struct Fixture { impl Fixture { pub(crate) fn new() -> Result { - let _tempdir = tempfile::tempdir_in("/var/tmp")?; - let path: &Utf8Path = _tempdir.path().try_into().unwrap(); + let tempdir = tempfile::tempdir_in("/var/tmp")?; + let dir = Arc::new(cap_std::fs::Dir::open_ambient_dir( + tempdir.path(), + cap_std::ambient_authority(), + )?); + let path: &Utf8Path = tempdir.path().try_into().unwrap(); let path = path.to_path_buf(); let srcdir = path.join("src"); std::fs::create_dir(&srcdir)?; - let srcrepo_path = generate_test_repo(&srcdir, TESTREF)?; - let srcrepo = - ostree::Repo::open_at(libc::AT_FDCWD, srcrepo_path.as_str(), gio::NONE_CANCELLABLE)?; + let srcdir_dfd = &dir.open_dir("src")?; + generate_test_repo(srcdir_dfd, TESTREF)?; + let srcrepo = ostree::Repo::open_at_dir(srcdir_dfd, "repo")?; let destdir = &path.join("dest"); std::fs::create_dir(destdir)?; @@ -44,7 +54,8 @@ impl Fixture { let destrepo = ostree::Repo::new_for_path(&destrepo_path); destrepo.create(ostree::RepoMode::BareUser, gio::NONE_CANCELLABLE)?; Ok(Self { - _tempdir, + _tempdir: tempdir, + dir, path, srcdir, srcrepo, @@ -58,18 +69,18 @@ impl Fixture { TESTREF } + #[context("Updating test repo")] pub(crate) fn update(&mut self) -> Result<()> { let repopath = &self.srcdir.join("repo"); let repotmp = &repopath.join("tmp"); let srcpath = &repotmp.join("exampleos-v1.tar.zst"); std::fs::write(srcpath, EXAMPLEOS_V1)?; let srcpath = srcpath.as_str(); - let repopath = repopath.as_str(); let testref = TESTREF; - bash!( - "ostree --repo={repopath} commit -b {testref} --no-bindings --tree=tar={srcpath}", + bash_in!( + self.dir.open_dir("src")?, + "ostree --repo=repo commit -b ${testref} --no-bindings --tree=tar=${srcpath}", testref, - repopath, srcpath )?; std::fs::remove_file(srcpath)?; @@ -78,30 +89,33 @@ impl Fixture { } #[context("Generating test repo")] -pub(crate) fn generate_test_repo(dir: &Utf8Path, testref: &str) -> Result { - let src_tarpath = &dir.join("exampleos.tar.zst"); - std::fs::write(src_tarpath, EXAMPLEOS_V0)?; +pub(crate) fn generate_test_repo(dir: &Dir, testref: &str) -> Result<()> { + let gpgtarname = "gpghome.tgz"; + dir.write(gpgtarname, OSTREE_GPG_HOME)?; + let gpgtar = dir.open(gpgtarname)?; + dir.remove_file(gpgtarname)?; - let gpghome = dir.join("gpghome"); - { - let dec = flate2::read::GzDecoder::new(OSTREE_GPG_HOME); - let mut a = tar::Archive::new(dec); - a.unpack(&gpghome)?; - }; - - bash!( + dir.create_dir("gpghome")?; + let gpghome = dir.open_dir("gpghome")?; + let st = std::process::Command::new("tar") + .cwd_dir_owned(gpghome) + .stdin(Stdio::from(gpgtar)) + .args(&["-azxf", "-"]) + .status()?; + assert!(st.success()); + let tarname = "exampleos.tar.zst"; + dir.write(tarname, EXAMPLEOS_V0)?; + bash_in!( + dir, indoc! {" - cd {dir} ostree --repo=repo init --mode=archive - ostree --repo=repo commit -b {testref} --bootable --no-bindings --add-metadata=ostree.container-cmd='[\"/usr/bin/bash\"]' --add-metadata-string=version=42.0 --add-metadata-string=buildsys.checksum=41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3 --gpg-homedir={gpghome} --gpg-sign={keyid} \ + ostree --repo=repo commit -b ${testref} --bootable --no-bindings --add-metadata=ostree.container-cmd='[\"/usr/bin/bash\"]' --add-metadata-string=version=42.0 --add-metadata-string=buildsys.checksum=41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3 --gpg-homedir=gpghome --gpg-sign=${keyid} \ --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst >/dev/null - ostree --repo=repo show {testref} >/dev/null + ostree --repo=repo show ${testref} >/dev/null "}, testref = testref, - gpghome = gpghome.as_str(), - keyid = TEST_GPG_KEYID_1, - dir = dir.as_str() - )?; - std::fs::remove_file(src_tarpath)?; - Ok(dir.join("repo")) + keyid = TEST_GPG_KEYID_1 + ).context("Writing commit")?; + dir.remove_file(tarname)?; + Ok(()) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 863d04209..28c960f14 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -124,7 +124,7 @@ async fn test_tar_import_signed() -> Result<()> { // And signed correctly bash!( - "ostree --repo={repo} remote gpg-import --stdin myremote < {p}/gpghome/key1.asc >/dev/null", + "ostree --repo=${repo} remote gpg-import --stdin myremote < ${p}/gpghome/key1.asc >/dev/null", repo = fixture.destrepo_path.as_str(), p = fixture.srcdir.as_str() )?; @@ -301,9 +301,9 @@ async fn test_tar_import_export() -> Result<()> { ); bash!( r#" - ostree --repo={destrepodir} ls -R {imported_commit} >/dev/null - val=$(ostree --repo={destrepodir} show --print-detached-metadata-key=my-detached-key {imported_commit}) - test "${{val}}" = "'my-detached-value'" + ostree --repo=${destrepodir} ls -R ${imported_commit} >/dev/null + val=$(ostree --repo=${destrepodir} show --print-detached-metadata-key=my-detached-key ${imported_commit}) + test "${val}" = "'my-detached-value'" "#, destrepodir = fixture.destrepo_path.as_str(), imported_commit = imported_commit.as_str() @@ -326,14 +326,14 @@ async fn test_tar_write() -> Result<()> { std::fs::create_dir_all(tmproot.join("boot"))?; let tmptar = fixture.path.join("testlayer.tar"); bash!( - "tar cf {tmptar} -C {tmproot} .", + "tar cf ${tmptar} -C ${tmproot} .", tmptar = tmptar.as_str(), tmproot = tmproot.as_str() )?; let src = tokio::fs::File::open(&tmptar).await?; let r = ostree_ext::tar::write_tar(&fixture.destrepo, src, "layer", None).await?; bash!( - "ostree --repo={repo} ls {layer_commit} /usr/etc/someconfig.conf >/dev/null", + "ostree --repo=${repo} ls ${layer_commit} /usr/etc/someconfig.conf >/dev/null", repo = fixture.destrepo_path.as_str(), layer_commit = r.commit.as_str() )?; @@ -446,7 +446,7 @@ async fn test_container_import_export() -> Result<()> { .destrepo .remote_add("myremote", None, Some(&opts.end()), gio::NONE_CANCELLABLE)?; bash!( - "ostree --repo={repo} remote gpg-import --stdin myremote < {p}/gpghome/key1.asc", + "ostree --repo=${repo} remote gpg-import --stdin myremote < ${p}/gpghome/key1.asc", repo = fixture.destrepo_path.as_str(), p = fixture.srcdir.as_str() )?; @@ -597,7 +597,7 @@ async fn test_container_write_derive() -> Result<()> { // Parse the commit and verify we pulled the derived content. bash!( - "ostree --repo={repo} ls {r} /usr/bin/newderivedfile >/dev/null", + "ostree --repo=${repo} ls ${r} /usr/bin/newderivedfile >/dev/null", repo = fixture.destrepo_path.as_str(), r = import.merge_commit.as_str() )?; @@ -648,9 +648,9 @@ async fn test_container_write_derive() -> Result<()> { // Verify we have the new file and *not* the old one bash!( r#"set -x; - ostree --repo={repo} ls {r} /usr/bin/newderivedfile2 >/dev/null - test "$(ostree --repo={repo} cat {r} /usr/bin/newderivedfile)" = "newderivedfile v1" - if ostree --repo={repo} ls {r} /usr/bin/newderivedfile3 2>/dev/null; then + ostree --repo=${repo} ls ${r} /usr/bin/newderivedfile2 >/dev/null + test "$(ostree --repo=${repo} cat ${r} /usr/bin/newderivedfile)" = "newderivedfile v1" + if ostree --repo=${repo} ls ${r} /usr/bin/newderivedfile3 2>/dev/null; then echo oops; exit 1 fi "#, From 4a058c52fb2931d423507ba74f3fb124144954b7 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 14 Feb 2022 17:21:53 -0500 Subject: [PATCH 296/775] tests: Split off fixture base prep from repo initialization --- lib/tests/it/fixture.rs | 46 ++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/lib/tests/it/fixture.rs b/lib/tests/it/fixture.rs index 599981fa2..86126e05a 100644 --- a/lib/tests/it/fixture.rs +++ b/lib/tests/it/fixture.rs @@ -33,7 +33,9 @@ pub(crate) struct Fixture { } impl Fixture { - pub(crate) fn new() -> Result { + #[context("Initializing fixture")] + pub(crate) fn new_base() -> Result { + // Basic setup, allocate a tempdir let tempdir = tempfile::tempdir_in("/var/tmp")?; let dir = Arc::new(cap_std::fs::Dir::open_ambient_dir( tempdir.path(), @@ -42,11 +44,28 @@ impl Fixture { let path: &Utf8Path = tempdir.path().try_into().unwrap(); let path = path.to_path_buf(); + // Create the src/ directory let srcdir = path.join("src"); std::fs::create_dir(&srcdir)?; let srcdir_dfd = &dir.open_dir("src")?; - generate_test_repo(srcdir_dfd, TESTREF)?; - let srcrepo = ostree::Repo::open_at_dir(srcdir_dfd, "repo")?; + + // Initialize the src/gpghome/ directory + let gpgtarname = "gpghome.tgz"; + srcdir_dfd.write(gpgtarname, OSTREE_GPG_HOME)?; + let gpgtar = srcdir_dfd.open(gpgtarname)?; + srcdir_dfd.remove_file(gpgtarname)?; + srcdir_dfd.create_dir("gpghome")?; + let gpghome = srcdir_dfd.open_dir("gpghome")?; + let st = std::process::Command::new("tar") + .cwd_dir_owned(gpghome) + .stdin(Stdio::from(gpgtar)) + .args(&["-azxf", "-"]) + .status()?; + assert!(st.success()); + + let srcrepo = + ostree::Repo::create_at_dir(srcdir_dfd, "repo", ostree::RepoMode::Archive, None) + .context("Creating src/ repo")?; let destdir = &path.join("dest"); std::fs::create_dir(destdir)?; @@ -65,6 +84,12 @@ impl Fixture { }) } + pub(crate) fn new() -> Result { + let r = Self::new_base()?; + generate_test_repo(&r.dir.open_dir("src")?, TESTREF)?; + Ok(r) + } + pub(crate) fn testref(&self) -> &'static str { TESTREF } @@ -89,20 +114,7 @@ impl Fixture { } #[context("Generating test repo")] -pub(crate) fn generate_test_repo(dir: &Dir, testref: &str) -> Result<()> { - let gpgtarname = "gpghome.tgz"; - dir.write(gpgtarname, OSTREE_GPG_HOME)?; - let gpgtar = dir.open(gpgtarname)?; - dir.remove_file(gpgtarname)?; - - dir.create_dir("gpghome")?; - let gpghome = dir.open_dir("gpghome")?; - let st = std::process::Command::new("tar") - .cwd_dir_owned(gpghome) - .stdin(Stdio::from(gpgtar)) - .args(&["-azxf", "-"]) - .status()?; - assert!(st.success()); +fn generate_test_repo(dir: &Dir, testref: &str) -> Result<()> { let tarname = "exampleos.tar.zst"; dir.write(tarname, EXAMPLEOS_V0)?; bash_in!( From 83bd76ec502c162b40a5362b9b9b4c4042c9fa3a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 14 Feb 2022 17:27:27 -0500 Subject: [PATCH 297/775] tests: Port to use more `bash_in!` and drop absolute paths --- lib/tests/it/fixture.rs | 11 +++-------- lib/tests/it/main.rs | 43 ++++++++++++++++++++--------------------- 2 files changed, 24 insertions(+), 30 deletions(-) diff --git a/lib/tests/it/fixture.rs b/lib/tests/it/fixture.rs index 86126e05a..5eaa1e95c 100644 --- a/lib/tests/it/fixture.rs +++ b/lib/tests/it/fixture.rs @@ -5,7 +5,6 @@ use cap_std_ext::prelude::CapStdExtCommandExt; use fn_error_context::context; use indoc::indoc; use ostree::cap_std; -use ostree_ext::gio; use sh_inline::bash_in; use std::convert::TryInto; use std::process::Stdio; @@ -27,7 +26,6 @@ pub(crate) struct Fixture { pub(crate) srcdir: Utf8PathBuf, pub(crate) srcrepo: ostree::Repo, pub(crate) destrepo: ostree::Repo, - pub(crate) destrepo_path: Utf8PathBuf, pub(crate) format_version: u32, } @@ -67,11 +65,9 @@ impl Fixture { ostree::Repo::create_at_dir(srcdir_dfd, "repo", ostree::RepoMode::Archive, None) .context("Creating src/ repo")?; - let destdir = &path.join("dest"); - std::fs::create_dir(destdir)?; - let destrepo_path = destdir.join("repo"); - let destrepo = ostree::Repo::new_for_path(&destrepo_path); - destrepo.create(ostree::RepoMode::BareUser, gio::NONE_CANCELLABLE)?; + dir.create_dir("dest")?; + let destrepo = + ostree::Repo::create_at_dir(&dir, "dest/repo", ostree::RepoMode::BareUser, None)?; Ok(Self { _tempdir: tempdir, dir, @@ -79,7 +75,6 @@ impl Fixture { srcdir, srcrepo, destrepo, - destrepo_path, format_version: 0, }) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 28c960f14..fcc5e6302 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -10,7 +10,7 @@ use ostree_ext::container::{ }; use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; -use sh_inline::bash; +use sh_inline::{bash, bash_in}; use std::collections::HashMap; use std::{io::Write, process::Command}; @@ -123,9 +123,8 @@ async fn test_tar_import_signed() -> Result<()> { assert_err_contains(r, r#"Can't check signature: public key not found"#); // And signed correctly - bash!( - "ostree --repo=${repo} remote gpg-import --stdin myremote < ${p}/gpghome/key1.asc >/dev/null", - repo = fixture.destrepo_path.as_str(), + bash_in!(&fixture.dir, + "ostree --repo=dest/repo remote gpg-import --stdin myremote < ${p}/gpghome/key1.asc >/dev/null", p = fixture.srcdir.as_str() )?; let src_tar = tokio::fs::File::open(test_tar).await?; @@ -299,13 +298,13 @@ async fn test_tar_import_export() -> Result<()> { .unwrap() .as_str() ); - bash!( + bash_in!( + &fixture.dir, r#" - ostree --repo=${destrepodir} ls -R ${imported_commit} >/dev/null - val=$(ostree --repo=${destrepodir} show --print-detached-metadata-key=my-detached-key ${imported_commit}) + ostree --repo=dest/repo ls -R ${imported_commit} >/dev/null + val=$(ostree --repo=dest/repo show --print-detached-metadata-key=my-detached-key ${imported_commit}) test "${val}" = "'my-detached-value'" "#, - destrepodir = fixture.destrepo_path.as_str(), imported_commit = imported_commit.as_str() )?; Ok(()) @@ -332,9 +331,9 @@ async fn test_tar_write() -> Result<()> { )?; let src = tokio::fs::File::open(&tmptar).await?; let r = ostree_ext::tar::write_tar(&fixture.destrepo, src, "layer", None).await?; - bash!( - "ostree --repo=${repo} ls ${layer_commit} /usr/etc/someconfig.conf >/dev/null", - repo = fixture.destrepo_path.as_str(), + bash_in!( + &fixture.dir, + "ostree --repo=dest/repo ls ${layer_commit} /usr/etc/someconfig.conf >/dev/null", layer_commit = r.commit.as_str() )?; assert_eq!(r.filtered.len(), 2); @@ -445,9 +444,9 @@ async fn test_container_import_export() -> Result<()> { fixture .destrepo .remote_add("myremote", None, Some(&opts.end()), gio::NONE_CANCELLABLE)?; - bash!( - "ostree --repo=${repo} remote gpg-import --stdin myremote < ${p}/gpghome/key1.asc", - repo = fixture.destrepo_path.as_str(), + bash_in!( + &fixture.dir, + "ostree --repo=dest/repo remote gpg-import --stdin myremote < ${p}/gpghome/key1.asc", p = fixture.srcdir.as_str() )?; @@ -596,9 +595,9 @@ async fn test_container_write_derive() -> Result<()> { } // Parse the commit and verify we pulled the derived content. - bash!( - "ostree --repo=${repo} ls ${r} /usr/bin/newderivedfile >/dev/null", - repo = fixture.destrepo_path.as_str(), + bash_in!( + &fixture.dir, + "ostree --repo=dest/repo ls ${r} /usr/bin/newderivedfile >/dev/null", r = import.merge_commit.as_str() )?; @@ -646,15 +645,15 @@ async fn test_container_write_derive() -> Result<()> { assert_eq!(images.len(), 1); // Verify we have the new file and *not* the old one - bash!( + bash_in!( + &fixture.dir, r#"set -x; - ostree --repo=${repo} ls ${r} /usr/bin/newderivedfile2 >/dev/null - test "$(ostree --repo=${repo} cat ${r} /usr/bin/newderivedfile)" = "newderivedfile v1" - if ostree --repo=${repo} ls ${r} /usr/bin/newderivedfile3 2>/dev/null; then + ostree --repo=dest/repo ls ${r} /usr/bin/newderivedfile2 >/dev/null + test "$(ostree --repo=dest/repo cat ${r} /usr/bin/newderivedfile)" = "newderivedfile v1" + if ostree --repo=dest/repo ls ${r} /usr/bin/newderivedfile3 2>/dev/null; then echo oops; exit 1 fi "#, - repo = fixture.destrepo_path.as_str(), r = import.merge_commit.as_str() )?; From b9593a2519f2d3ddf4c9b49322a6ef39dc39628f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 14 Feb 2022 17:30:42 -0500 Subject: [PATCH 298/775] tests: Drop src absolute path This patch series is staring to feel like spraying Windex on a dirty window, and watching it get cleaner. --- lib/tests/it/fixture.rs | 20 +++++++------------- lib/tests/it/main.rs | 6 ++---- 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/lib/tests/it/fixture.rs b/lib/tests/it/fixture.rs index 5eaa1e95c..0254401d2 100644 --- a/lib/tests/it/fixture.rs +++ b/lib/tests/it/fixture.rs @@ -23,7 +23,6 @@ pub(crate) struct Fixture { _tempdir: tempfile::TempDir, pub(crate) dir: Arc, pub(crate) path: Utf8PathBuf, - pub(crate) srcdir: Utf8PathBuf, pub(crate) srcrepo: ostree::Repo, pub(crate) destrepo: ostree::Repo, @@ -43,8 +42,7 @@ impl Fixture { let path = path.to_path_buf(); // Create the src/ directory - let srcdir = path.join("src"); - std::fs::create_dir(&srcdir)?; + dir.create_dir("src")?; let srcdir_dfd = &dir.open_dir("src")?; // Initialize the src/gpghome/ directory @@ -72,7 +70,6 @@ impl Fixture { _tempdir: tempdir, dir, path, - srcdir, srcrepo, destrepo, format_version: 0, @@ -91,19 +88,16 @@ impl Fixture { #[context("Updating test repo")] pub(crate) fn update(&mut self) -> Result<()> { - let repopath = &self.srcdir.join("repo"); - let repotmp = &repopath.join("tmp"); - let srcpath = &repotmp.join("exampleos-v1.tar.zst"); - std::fs::write(srcpath, EXAMPLEOS_V1)?; - let srcpath = srcpath.as_str(); + let tmptarpath = "src/repo/tmp/exampleos-v1.tar.zst"; + self.dir.write(tmptarpath, EXAMPLEOS_V1)?; let testref = TESTREF; bash_in!( - self.dir.open_dir("src")?, - "ostree --repo=repo commit -b ${testref} --no-bindings --tree=tar=${srcpath}", + &self.dir, + "ostree --repo=src/repo commit -b ${testref} --no-bindings --tree=tar=${tmptarpath}", testref, - srcpath + tmptarpath )?; - std::fs::remove_file(srcpath)?; + self.dir.remove_file(tmptarpath)?; Ok(()) } } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index fcc5e6302..b40a373ac 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -124,8 +124,7 @@ async fn test_tar_import_signed() -> Result<()> { // And signed correctly bash_in!(&fixture.dir, - "ostree --repo=dest/repo remote gpg-import --stdin myremote < ${p}/gpghome/key1.asc >/dev/null", - p = fixture.srcdir.as_str() + "ostree --repo=dest/repo remote gpg-import --stdin myremote < src/gpghome/key1.asc >/dev/null", )?; let src_tar = tokio::fs::File::open(test_tar).await?; let imported = ostree_ext::tar::import_tar( @@ -446,8 +445,7 @@ async fn test_container_import_export() -> Result<()> { .remote_add("myremote", None, Some(&opts.end()), gio::NONE_CANCELLABLE)?; bash_in!( &fixture.dir, - "ostree --repo=dest/repo remote gpg-import --stdin myremote < ${p}/gpghome/key1.asc", - p = fixture.srcdir.as_str() + "ostree --repo=dest/repo remote gpg-import --stdin myremote < src/gpghome/key1.asc", )?; // No remote matching From ca1f340b89e8540766e0b84107ab9931c0389bda Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 14 Feb 2022 17:53:11 -0500 Subject: [PATCH 299/775] tests: Use cap-std in more places --- lib/tests/it/main.rs | 58 +++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index b40a373ac..a782fbc0f 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1,7 +1,7 @@ mod fixture; use anyhow::{Context, Result}; -use camino::{Utf8Path, Utf8PathBuf}; +use camino::Utf8Path; use fn_error_context::context; use once_cell::sync::Lazy; use ostree_ext::container::store::PrepareResult; @@ -10,7 +10,7 @@ use ostree_ext::container::{ }; use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; -use sh_inline::{bash, bash_in}; +use sh_inline::bash_in; use std::collections::HashMap; use std::{io::Write, process::Command}; @@ -34,7 +34,7 @@ static TEST_REGISTRY: Lazy = Lazy::new(|| match std::env::var_os("TEST_R }); #[context("Generating test tarball")] -fn initial_export(fixture: &Fixture) -> Result { +fn initial_export(fixture: &Fixture) -> Result<&'static Utf8Path> { let cancellable = gio::NONE_CANCELLABLE; let (_, rev) = fixture .srcrepo @@ -46,15 +46,15 @@ fn initial_export(fixture: &Fixture) -> Result { .as_str(), EXAMPLEOS_CONTENT_CHECKSUM ); - let destpath = fixture.path.join("exampleos-export.tar"); - let mut outf = std::io::BufWriter::new(std::fs::File::create(&destpath)?); + let path = "exampleos-export.tar"; + let mut outf = std::io::BufWriter::new(fixture.dir.create(path)?); let options = ostree_ext::tar::ExportOptions { format_version: fixture.format_version, ..Default::default() }; ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut outf, Some(options))?; outf.flush()?; - Ok(destpath) + Ok(path.into()) } #[tokio::test] @@ -90,10 +90,10 @@ async fn test_tar_export_reproducible() -> Result<()> { #[tokio::test] async fn test_tar_import_signed() -> Result<()> { let fixture = Fixture::new()?; - let test_tar = &initial_export(&fixture)?; + let test_tar = initial_export(&fixture)?; // Verify we fail with an unknown remote. - let src_tar = tokio::fs::File::open(test_tar).await?; + let src_tar = tokio::fs::File::from_std(fixture.dir.open(test_tar)?.into_std()); let r = ostree_ext::tar::import_tar( &fixture.destrepo, src_tar, @@ -111,7 +111,7 @@ async fn test_tar_import_signed() -> Result<()> { fixture .destrepo .remote_add("myremote", None, Some(&opts.end()), gio::NONE_CANCELLABLE)?; - let src_tar = tokio::fs::File::open(test_tar).await?; + let src_tar = tokio::fs::File::from_std(fixture.dir.open(test_tar)?.into_std()); let r = ostree_ext::tar::import_tar( &fixture.destrepo, src_tar, @@ -126,7 +126,7 @@ async fn test_tar_import_signed() -> Result<()> { bash_in!(&fixture.dir, "ostree --repo=dest/repo remote gpg-import --stdin myremote < src/gpghome/key1.asc >/dev/null", )?; - let src_tar = tokio::fs::File::open(test_tar).await?; + let src_tar = tokio::fs::File::from_std(fixture.dir.open(test_tar)?.into_std()); let imported = ostree_ext::tar::import_tar( &fixture.destrepo, src_tar, @@ -215,7 +215,7 @@ fn test_tar_export_structure() -> Result<()> { let mut fixture = Fixture::new()?; let src_tar = initial_export(&fixture)?; - let src_tar = std::io::BufReader::new(std::fs::File::open(&src_tar)?); + let src_tar = std::io::BufReader::new(fixture.dir.open(src_tar)?); let mut src_tar = tar::Archive::new(src_tar); let mut entries = src_tar.entries()?; // The first entry should be the root directory. @@ -254,7 +254,7 @@ fn test_tar_export_structure() -> Result<()> { // Validate format version 1 fixture.format_version = 1; let src_tar = initial_export(&fixture)?; - let src_tar = std::io::BufReader::new(std::fs::File::open(&src_tar)?); + let src_tar = std::io::BufReader::new(fixture.dir.open(src_tar)?); let mut src_tar = tar::Archive::new(src_tar); let expected = [ ("sysroot/ostree/repo", Directory, 0o755), @@ -285,8 +285,8 @@ fn test_tar_export_structure() -> Result<()> { #[tokio::test] async fn test_tar_import_export() -> Result<()> { let fixture = Fixture::new()?; - let p = &initial_export(&fixture)?; - let src_tar = tokio::fs::File::open(p).await?; + let p = initial_export(&fixture)?; + let src_tar = tokio::fs::File::from_std(fixture.dir.open(p)?.into_std()); let imported_commit: String = ostree_ext::tar::import_tar(&fixture.destrepo, src_tar, None).await?; @@ -313,22 +313,20 @@ async fn test_tar_import_export() -> Result<()> { async fn test_tar_write() -> Result<()> { let fixture = Fixture::new()?; // Test translating /etc to /usr/etc - let tmpetc = fixture.path.join("tmproot/etc"); - std::fs::create_dir_all(&tmpetc)?; - std::fs::write(tmpetc.join("someconfig.conf"), b"")?; - let tmproot = tmpetc.parent().unwrap(); - let tmpvarlib = &tmproot.join("var/lib"); - std::fs::create_dir_all(tmpvarlib)?; - std::fs::write(tmpvarlib.join("foo.log"), "foolog")?; - std::fs::write(tmpvarlib.join("bar.log"), "barlog")?; - std::fs::create_dir_all(tmproot.join("boot"))?; - let tmptar = fixture.path.join("testlayer.tar"); - bash!( - "tar cf ${tmptar} -C ${tmproot} .", - tmptar = tmptar.as_str(), - tmproot = tmproot.as_str() - )?; - let src = tokio::fs::File::open(&tmptar).await?; + fixture.dir.create_dir_all("tmproot/etc")?; + let tmproot = &fixture.dir.open_dir("tmproot")?; + let tmpetc = tmproot.open_dir("etc")?; + tmpetc.write("someconfig.conf", b"some config")?; + tmproot.create_dir_all("var/log")?; + let tmpvarlog = tmproot.open_dir("var/log")?; + tmpvarlog.write("foo.log", "foolog")?; + tmpvarlog.write("bar.log", "barlog")?; + tmproot.create_dir("boot")?; + let tmptar = "testlayer.tar"; + bash_in!(fixture.dir, "tar cf ${tmptar} -C tmproot .", tmptar)?; + let src = fixture.dir.open(tmptar)?; + fixture.dir.remove_file(tmptar)?; + let src = tokio::fs::File::from_std(src.into_std()); let r = ostree_ext::tar::write_tar(&fixture.destrepo, src, "layer", None).await?; bash_in!( &fixture.dir, From 828c0e6768986df20b3f01b4933a5666568ea3bf Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 14 Feb 2022 18:22:12 -0500 Subject: [PATCH 300/775] tests: Move initial commit into fixture --- lib/tests/it/fixture.rs | 33 +++++++++++++-------------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/lib/tests/it/fixture.rs b/lib/tests/it/fixture.rs index 0254401d2..903ffeae6 100644 --- a/lib/tests/it/fixture.rs +++ b/lib/tests/it/fixture.rs @@ -78,7 +78,19 @@ impl Fixture { pub(crate) fn new() -> Result { let r = Self::new_base()?; - generate_test_repo(&r.dir.open_dir("src")?, TESTREF)?; + let tarname = "exampleos.tar.zst"; + r.dir.write(tarname, EXAMPLEOS_V0)?; + bash_in!( + r.dir, + indoc! {" + ostree --repo=src/repo commit -b ${testref} --bootable --no-bindings --add-metadata=ostree.container-cmd='[\"/usr/bin/bash\"]' --add-metadata-string=version=42.0 --add-metadata-string=buildsys.checksum=41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3 --gpg-homedir=src/gpghome --gpg-sign=${keyid} \ + --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst >/dev/null + ostree --repo=src/repo show ${testref} >/dev/null + "}, + testref = r.testref(), + keyid = TEST_GPG_KEYID_1 + ).context("Writing commit")?; + r.dir.remove_file(tarname)?; Ok(r) } @@ -101,22 +113,3 @@ impl Fixture { Ok(()) } } - -#[context("Generating test repo")] -fn generate_test_repo(dir: &Dir, testref: &str) -> Result<()> { - let tarname = "exampleos.tar.zst"; - dir.write(tarname, EXAMPLEOS_V0)?; - bash_in!( - dir, - indoc! {" - ostree --repo=repo init --mode=archive - ostree --repo=repo commit -b ${testref} --bootable --no-bindings --add-metadata=ostree.container-cmd='[\"/usr/bin/bash\"]' --add-metadata-string=version=42.0 --add-metadata-string=buildsys.checksum=41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3 --gpg-homedir=gpghome --gpg-sign=${keyid} \ - --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst >/dev/null - ostree --repo=repo show ${testref} >/dev/null - "}, - testref = testref, - keyid = TEST_GPG_KEYID_1 - ).context("Writing commit")?; - dir.remove_file(tarname)?; - Ok(()) -} From 34e6dca4b945cca73db9e135544908c911938bb9 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 15 Feb 2022 18:28:02 -0500 Subject: [PATCH 301/775] tests: Move tar export as method on fixture It's cleaner. --- lib/tests/it/fixture.rs | 17 ++++++++++++++++ lib/tests/it/main.rs | 44 +++++++++++++---------------------------- 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/lib/tests/it/fixture.rs b/lib/tests/it/fixture.rs index 903ffeae6..b0c2b3698 100644 --- a/lib/tests/it/fixture.rs +++ b/lib/tests/it/fixture.rs @@ -5,8 +5,10 @@ use cap_std_ext::prelude::CapStdExtCommandExt; use fn_error_context::context; use indoc::indoc; use ostree::cap_std; +use ostree_ext::gio; use sh_inline::bash_in; use std::convert::TryInto; +use std::io::Write; use std::process::Stdio; use std::sync::Arc; @@ -112,4 +114,19 @@ impl Fixture { self.dir.remove_file(tmptarpath)?; Ok(()) } + + #[context("Exporting tar")] + pub(crate) fn export_tar(&self) -> Result<&'static Utf8Path> { + let cancellable = gio::NONE_CANCELLABLE; + let (_, rev) = self.srcrepo.read_commit(self.testref(), cancellable)?; + let path = "exampleos-export.tar"; + let mut outf = std::io::BufWriter::new(self.dir.create(path)?); + let options = ostree_ext::tar::ExportOptions { + format_version: self.format_version, + ..Default::default() + }; + ostree_ext::tar::export_commit(&self.srcrepo, rev.as_str(), &mut outf, Some(options))?; + outf.flush()?; + Ok(path.into()) + } } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index a782fbc0f..77200d32c 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -2,7 +2,6 @@ mod fixture; use anyhow::{Context, Result}; use camino::Utf8Path; -use fn_error_context::context; use once_cell::sync::Lazy; use ostree_ext::container::store::PrepareResult; use ostree_ext::container::{ @@ -12,7 +11,7 @@ use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; use sh_inline::bash_in; use std::collections::HashMap; -use std::{io::Write, process::Command}; +use std::process::Command; use fixture::Fixture; @@ -33,30 +32,6 @@ static TEST_REGISTRY: Lazy = Lazy::new(|| match std::env::var_os("TEST_R None => TEST_REGISTRY_DEFAULT.to_string(), }); -#[context("Generating test tarball")] -fn initial_export(fixture: &Fixture) -> Result<&'static Utf8Path> { - let cancellable = gio::NONE_CANCELLABLE; - let (_, rev) = fixture - .srcrepo - .read_commit(fixture.testref(), cancellable)?; - let (commitv, _) = fixture.srcrepo.load_commit(rev.as_str())?; - assert_eq!( - ostree::commit_get_content_checksum(&commitv) - .unwrap() - .as_str(), - EXAMPLEOS_CONTENT_CHECKSUM - ); - let path = "exampleos-export.tar"; - let mut outf = std::io::BufWriter::new(fixture.dir.create(path)?); - let options = ostree_ext::tar::ExportOptions { - format_version: fixture.format_version, - ..Default::default() - }; - ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut outf, Some(options))?; - outf.flush()?; - Ok(path.into()) -} - #[tokio::test] async fn test_tar_import_empty() -> Result<()> { let fixture = Fixture::new()?; @@ -90,7 +65,16 @@ async fn test_tar_export_reproducible() -> Result<()> { #[tokio::test] async fn test_tar_import_signed() -> Result<()> { let fixture = Fixture::new()?; - let test_tar = initial_export(&fixture)?; + let test_tar = fixture.export_tar()?; + + let rev = fixture.srcrepo.require_rev(fixture.testref())?; + let (commitv, _) = fixture.srcrepo.load_commit(rev.as_str())?; + assert_eq!( + ostree::commit_get_content_checksum(&commitv) + .unwrap() + .as_str(), + EXAMPLEOS_CONTENT_CHECKSUM + ); // Verify we fail with an unknown remote. let src_tar = tokio::fs::File::from_std(fixture.dir.open(test_tar)?.into_std()); @@ -214,7 +198,7 @@ fn test_tar_export_structure() -> Result<()> { use tar::EntryType::{Directory, Regular}; let mut fixture = Fixture::new()?; - let src_tar = initial_export(&fixture)?; + let src_tar = fixture.export_tar()?; let src_tar = std::io::BufReader::new(fixture.dir.open(src_tar)?); let mut src_tar = tar::Archive::new(src_tar); let mut entries = src_tar.entries()?; @@ -253,7 +237,7 @@ fn test_tar_export_structure() -> Result<()> { // Validate format version 1 fixture.format_version = 1; - let src_tar = initial_export(&fixture)?; + let src_tar = fixture.export_tar()?; let src_tar = std::io::BufReader::new(fixture.dir.open(src_tar)?); let mut src_tar = tar::Archive::new(src_tar); let expected = [ @@ -285,7 +269,7 @@ fn test_tar_export_structure() -> Result<()> { #[tokio::test] async fn test_tar_import_export() -> Result<()> { let fixture = Fixture::new()?; - let p = initial_export(&fixture)?; + let p = fixture.export_tar()?; let src_tar = tokio::fs::File::from_std(fixture.dir.open(p)?.into_std()); let imported_commit: String = From 0702b0debaa080830673a1c38b672d4f4444a550 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 15 Feb 2022 18:41:25 -0500 Subject: [PATCH 302/775] tests: Drop the usage of indoc for commit It's mostly just obscuring things. --- lib/tests/it/fixture.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/lib/tests/it/fixture.rs b/lib/tests/it/fixture.rs index b0c2b3698..5afff771b 100644 --- a/lib/tests/it/fixture.rs +++ b/lib/tests/it/fixture.rs @@ -3,7 +3,6 @@ use camino::{Utf8Path, Utf8PathBuf}; use cap_std::fs::Dir; use cap_std_ext::prelude::CapStdExtCommandExt; use fn_error_context::context; -use indoc::indoc; use ostree::cap_std; use ostree_ext::gio; use sh_inline::bash_in; @@ -84,11 +83,11 @@ impl Fixture { r.dir.write(tarname, EXAMPLEOS_V0)?; bash_in!( r.dir, - indoc! {" - ostree --repo=src/repo commit -b ${testref} --bootable --no-bindings --add-metadata=ostree.container-cmd='[\"/usr/bin/bash\"]' --add-metadata-string=version=42.0 --add-metadata-string=buildsys.checksum=41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3 --gpg-homedir=src/gpghome --gpg-sign=${keyid} \ - --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst >/dev/null - ostree --repo=src/repo show ${testref} >/dev/null - "}, + "ostree --repo=src/repo commit -b ${testref} --bootable --no-bindings --add-metadata=ostree.container-cmd='[\"/usr/bin/bash\"]' \ + --add-metadata-string=version=42.0 --add-metadata-string=buildsys.checksum=41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3 \ + --gpg-homedir=src/gpghome --gpg-sign=${keyid} \ + --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst >/dev/null && \ + ostree --repo=src/repo show ${testref} >/dev/null", testref = r.testref(), keyid = TEST_GPG_KEYID_1 ).context("Writing commit")?; From 1b77f316c97b30cafa0b1877d57be57322f1dd84 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 15 Feb 2022 16:10:00 -0500 Subject: [PATCH 303/775] cli: Use structopt's `TryFrom` support for parsing `ostree::Repo` It's stuff like this that is just *so elegant* in Rust. --- lib/src/cli.rs | 75 ++++++++++++++++++++++++++------------------------ 1 file changed, 39 insertions(+), 36 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 77e5d0961..d683ef99f 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -7,7 +7,7 @@ use anyhow::Result; use futures_util::FutureExt; -use ostree::{gio, glib}; +use ostree::{cap_std, gio, glib}; use std::borrow::Borrow; use std::collections::BTreeMap; use std::convert::TryFrom; @@ -29,12 +29,18 @@ fn parse_base_imgref(s: &str) -> Result { ImageReference::try_from(s) } +fn parse_repo(s: &str) -> Result { + let repofd = cap_std::fs::Dir::open_ambient_dir(s, cap_std::ambient_authority())?; + Ok(ostree::Repo::open_at_dir(&repofd, ".")?) +} + /// Options for importing a tar archive. #[derive(Debug, StructOpt)] struct ImportOpts { /// Path to the repository #[structopt(long)] - repo: String, + #[structopt(parse(try_from_str = parse_repo))] + repo: ostree::Repo, /// Path to a tar archive; if unspecified, will be stdin. Currently the tar archive must not be compressed. path: Option, @@ -45,7 +51,8 @@ struct ImportOpts { struct ExportOpts { /// Path to the repository #[structopt(long)] - repo: String, + #[structopt(parse(try_from_str = parse_repo))] + repo: ostree::Repo, /// The format version. Must be 0 or 1. #[structopt(long)] @@ -73,7 +80,8 @@ enum ContainerOpts { Unencapsulate { /// Path to the repository #[structopt(long)] - repo: String, + #[structopt(parse(try_from_str = parse_repo))] + repo: ostree::Repo, /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest #[structopt(parse(try_from_str = parse_imgref))] @@ -100,7 +108,8 @@ enum ContainerOpts { Encapsulate { /// Path to the repository #[structopt(long)] - repo: String, + #[structopt(parse(try_from_str = parse_repo))] + repo: ostree::Repo, /// The ostree ref or commit to export rev: String, @@ -157,14 +166,15 @@ enum ContainerImageOpts { List { /// Path to the repository #[structopt(long)] - repo: String, + #[structopt(parse(try_from_str = parse_repo))] + repo: ostree::Repo, }, /// Pull (or update) a container image. Pull { /// Path to the repository - #[structopt(long)] - repo: String, + #[structopt(parse(try_from_str = parse_repo))] + repo: ostree::Repo, /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest #[structopt(parse(try_from_str = parse_imgref))] @@ -178,11 +188,13 @@ enum ContainerImageOpts { Copy { /// Path to the source repository #[structopt(long)] - src_repo: String, + #[structopt(parse(try_from_str = parse_repo))] + src_repo: ostree::Repo, /// Path to the destination repository #[structopt(long)] - dest_repo: String, + #[structopt(parse(try_from_str = parse_repo))] + dest_repo: ostree::Repo, /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest #[structopt(parse(try_from_str = parse_imgref))] @@ -226,7 +238,8 @@ enum ContainerImageOpts { struct ImaSignOpts { /// Path to the repository #[structopt(long)] - repo: String, + #[structopt(parse(try_from_str = parse_repo))] + repo: ostree::Repo, /// The ostree ref or commit to use as a base src_rev: String, /// The ostree ref to use for writing the signed commit @@ -279,13 +292,12 @@ impl Into for ContainerProxyOpts { /// Import a tar archive containing an ostree commit. async fn tar_import(opts: &ImportOpts) -> Result<()> { - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, opts.repo.as_str(), gio::NONE_CANCELLABLE)?; let imported = if let Some(path) = opts.path.as_ref() { let instream = tokio::fs::File::open(path).await?; - crate::tar::import_tar(repo, instream, None).await? + crate::tar::import_tar(&opts.repo, instream, None).await? } else { let stdin = tokio::io::stdin(); - crate::tar::import_tar(repo, stdin, None).await? + crate::tar::import_tar(&opts.repo, stdin, None).await? }; println!("Imported: {}", imported); Ok(()) @@ -293,13 +305,17 @@ async fn tar_import(opts: &ImportOpts) -> Result<()> { /// Export a tar archive containing an ostree commit. fn tar_export(opts: &ExportOpts) -> Result<()> { - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, opts.repo.as_str(), gio::NONE_CANCELLABLE)?; #[allow(clippy::needless_update)] let subopts = crate::tar::ExportOptions { format_version: opts.format_version, ..Default::default() }; - crate::tar::export_commit(repo, opts.rev.as_str(), std::io::stdout(), Some(subopts))?; + crate::tar::export_commit( + &opts.repo, + opts.rev.as_str(), + std::io::stdout(), + Some(subopts), + )?; Ok(()) } @@ -310,12 +326,11 @@ enum ProgressOrFinish { /// Import a container image with an encapsulated ostree commit. async fn container_import( - repo: &str, + repo: &ostree::Repo, imgref: &OstreeImageReference, write_ref: Option<&str>, quiet: bool, ) -> Result<()> { - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; let (tx_progress, rx_progress) = tokio::sync::watch::channel(Default::default()); let target = indicatif::ProgressDrawTarget::stdout(); let style = indicatif::ProgressStyle::default_bar(); @@ -379,14 +394,13 @@ async fn container_import( /// Export a container image with an encapsulated ostree commit. async fn container_export( - repo: &str, + repo: &ostree::Repo, rev: &str, imgref: &ImageReference, labels: BTreeMap, copy_meta_keys: Vec, cmd: Option>, ) -> Result<()> { - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; let config = Config { labels: Some(labels), cmd, @@ -409,11 +423,10 @@ async fn container_info(imgref: &OstreeImageReference) -> Result<()> { /// Write a layered container image into an OSTree commit. async fn container_store( - repo: &str, + repo: &ostree::Repo, imgref: &OstreeImageReference, proxyopts: ContainerProxyOpts, ) -> Result<()> { - let repo = &ostree::Repo::open_at(libc::AT_FDCWD, repo, gio::NONE_CANCELLABLE)?; let mut imp = LayeredImageImporter::new(repo, imgref, proxyopts.into()).await?; let prep = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => { @@ -460,14 +473,12 @@ async fn container_store( /// Add IMA signatures to an ostree commit, generating a new commit. fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { - let repo = - &ostree::Repo::open_at(libc::AT_FDCWD, cmdopts.repo.as_str(), gio::NONE_CANCELLABLE)?; let signopts = crate::ima::ImaOpts { algorithm: cmdopts.algorithm.clone(), key: cmdopts.key.clone(), }; - let signed_commit = crate::ima::ima_sign(repo, cmdopts.src_rev.as_str(), &signopts)?; - repo.set_ref_immediate( + let signed_commit = crate::ima::ima_sign(&cmdopts.repo, cmdopts.src_rev.as_str(), &signopts)?; + cmdopts.repo.set_ref_immediate( None, cmdopts.target_ref.as_str(), Some(signed_commit.as_str()), @@ -530,9 +541,7 @@ where } ContainerOpts::Image(opts) => match opts { ContainerImageOpts::List { repo } => { - let repo = - &ostree::Repo::open_at(libc::AT_FDCWD, &repo, gio::NONE_CANCELLABLE)?; - for image in crate::container::store::list_images(repo)? { + for image in crate::container::store::list_images(&repo)? { println!("{}", image); } Ok(()) @@ -546,13 +555,7 @@ where src_repo, dest_repo, imgref, - } => { - let src_repo = - &ostree::Repo::open_at(libc::AT_FDCWD, &src_repo, gio::NONE_CANCELLABLE)?; - let dest_repo = - &ostree::Repo::open_at(libc::AT_FDCWD, &dest_repo, gio::NONE_CANCELLABLE)?; - crate::container::store::copy(src_repo, dest_repo, &imgref).await - } + } => crate::container::store::copy(&src_repo, &dest_repo, &imgref).await, ContainerImageOpts::Deploy { sysroot, stateroot, From 1319373941bcace44ae8d621c46a270633ae3dbb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 16 Feb 2022 16:25:14 -0500 Subject: [PATCH 304/775] tar: Add a debug function to filter tar To help us debug. --- lib/src/cli.rs | 4 ++++ lib/src/tar/write.rs | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index d683ef99f..8b34bb291 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -258,6 +258,7 @@ enum TestingOpts { DetectEnv, /// Execute integration tests, assuming mutable environment Run, + FilterTar, } /// Toplevel options for extended ostree functionality. @@ -497,6 +498,9 @@ fn testing(opts: &TestingOpts) -> Result<()> { Ok(()) } TestingOpts::Run => crate::integrationtest::run_tests(), + TestingOpts::FilterTar => { + crate::tar::filter_tar(std::io::stdin(), std::io::stdout()).map(|_| {}) + } } } diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index eba87a40c..7d251b983 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -127,7 +127,10 @@ fn normalize_validate_path(path: &Utf8Path) -> Result> /// Remember that we're parsing this while we're downloading it, and in order /// to verify integrity we rely on the total sha256 of the blob, so all content /// written before then must be considered untrusted. -fn filter_tar(src: impl std::io::Read, dest: impl std::io::Write) -> Result> { +pub(crate) fn filter_tar( + src: impl std::io::Read, + dest: impl std::io::Write, +) -> Result> { let src = std::io::BufReader::new(src); let mut src = tar::Archive::new(src); let dest = BufWriter::new(dest); From b5276a0889c7bd312b08dd8f773d51ef42d351e0 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 16 Feb 2022 07:33:38 -0500 Subject: [PATCH 305/775] tests: Add much beefed up alternative path for test framework The main thing I want to get away from is hardcoded tarball fixtures. I do want the commit digests we write to be reproducible, and I also want the unit tests to work on a non-SELinux system. This patch adds an alternative flow to the text fixture via `new_v1()` that uses the native ostree APIs to write and generate commits. We mock up the SELinux labeling (and support labeling disabled). I plan to drop the old path and hardcoded tar fixtures once this lands. --- lib/tests/it/fixture.rs | 302 +++++++++++++++++++++++++++++++++++++++- lib/tests/it/main.rs | 35 ++--- 2 files changed, 316 insertions(+), 21 deletions(-) diff --git a/lib/tests/it/fixture.rs b/lib/tests/it/fixture.rs index 5afff771b..491f329d9 100644 --- a/lib/tests/it/fixture.rs +++ b/lib/tests/it/fixture.rs @@ -1,12 +1,14 @@ -use anyhow::{Context, Result}; -use camino::{Utf8Path, Utf8PathBuf}; +use anyhow::{anyhow, Context, Result}; +use camino::{Utf8Component, Utf8Path, Utf8PathBuf}; use cap_std::fs::Dir; use cap_std_ext::prelude::CapStdExtCommandExt; use fn_error_context::context; use ostree::cap_std; -use ostree_ext::gio; +use ostree_ext::prelude::*; +use ostree_ext::{gio, glib}; use sh_inline::bash_in; -use std::convert::TryInto; +use std::borrow::Cow; +use std::convert::{TryFrom, TryInto}; use std::io::Write; use std::process::Stdio; use std::sync::Arc; @@ -19,6 +21,214 @@ pub(crate) const EXAMPLEOS_V0: &[u8] = include_bytes!("fixtures/exampleos.tar.zs pub(crate) const EXAMPLEOS_V1: &[u8] = include_bytes!("fixtures/exampleos-v1.tar.zst"); const TESTREF: &str = "exampleos/x86_64/stable"; +#[derive(Debug)] +enum FileDefType { + Regular(Cow<'static, str>), + Symlink(Cow<'static, Utf8Path>), + Directory, +} + +#[derive(Debug)] +pub(crate) struct FileDef { + uid: u32, + gid: u32, + mode: u32, + path: Cow<'static, Utf8Path>, + ty: FileDefType, +} + +impl TryFrom<&'static str> for FileDef { + type Error = anyhow::Error; + + fn try_from(value: &'static str) -> Result { + let mut parts = value.split(" "); + let tydef = parts + .next() + .ok_or_else(|| anyhow!("Missing type definition"))?; + let name = parts.next().ok_or_else(|| anyhow!("Missing file name"))?; + let contents = parts.next(); + let contents = move || contents.ok_or_else(|| anyhow!("Missing file contents: {}", value)); + if parts.next().is_some() { + anyhow::bail!("Invalid filedef: {}", value); + } + let ty = match tydef { + "r" => FileDefType::Regular(contents()?.into()), + "l" => FileDefType::Symlink(Cow::Borrowed(contents()?.into())), + "d" => FileDefType::Directory, + _ => anyhow::bail!("Invalid filedef type: {}", value), + }; + Ok(FileDef { + uid: 0, + gid: 0, + mode: 0o644, + path: Cow::Borrowed(name.into()), + ty, + }) + } +} + +fn parse_mode(line: &str) -> Result<(u32, u32, u32)> { + let mut parts = line.split(" ").skip(1); + // An empty mode resets to defaults + let uid = if let Some(u) = parts.next() { + u + } else { + return Ok((0, 0, 0o644)); + }; + let gid = parts.next().ok_or_else(|| anyhow!("Missing gid"))?; + let mode = parts.next().ok_or_else(|| anyhow!("Missing mode"))?; + if parts.next().is_some() { + anyhow::bail!("Invalid mode: {}", line); + } + Ok((uid.parse()?, gid.parse()?, u32::from_str_radix(mode, 8)?)) +} + +impl FileDef { + /// Parse a list of newline-separated file definitions. + fn iter_from(defs: &'static str) -> impl Iterator> { + let mut uid = 0; + let mut gid = 0; + let mut mode = 0o644; + defs.lines() + .filter(|v| !(v.is_empty() || v.starts_with("#"))) + .filter_map(move |line| { + if line.starts_with("m") { + match parse_mode(line) { + Ok(r) => { + uid = r.0; + gid = r.1; + mode = r.2; + None + } + Err(e) => Some(Err(e)), + } + } else { + Some(FileDef::try_from(line).map(|mut def| { + def.uid = uid; + def.gid = gid; + def.mode = mode; + def + })) + } + }) + } +} + +static CONTENTS_V0: &str = indoc::indoc! { r##" +r usr/lib/modules/5.10.18-200.x86_64/vmlinuz this-is-a-kernel +r usr/lib/modules/5.10.18-200.x86_64/initramfs this-is-an-initramfs +m 0 0 755 +r usr/bin/bash the-bash-shell +l usr/bin/sh bash +m 0 0 644 +# Should be the same object +r usr/bin/hardlink-a testlink +r usr/bin/hardlink-b testlink +r usr/etc/someconfig.conf someconfig +m 10 10 644 +r usr/etc/polkit.conf a-polkit-config +m +d boot +d run +m 0 0 1755 +d tmp +"## }; + +#[derive(Debug, PartialEq, Eq)] +enum SeLabel { + Root, + Usr, + UsrLibSystemd, + Boot, + Etc, + EtcSystemConf, +} + +impl SeLabel { + pub(crate) fn from_path(p: &Utf8Path) -> Self { + let rootdir = p.components().find_map(|v| { + if let Utf8Component::Normal(name) = v { + Some(name) + } else { + None + } + }); + let rootdir = if let Some(r) = rootdir { + r + } else { + return SeLabel::Root; + }; + if rootdir == "usr" { + if p.as_str().contains("systemd") { + SeLabel::UsrLibSystemd + } else { + SeLabel::Usr + } + } else if rootdir == "boot" { + SeLabel::Boot + } else if rootdir == "etc" { + if p.as_str().len() % 2 == 0 { + SeLabel::Etc + } else { + SeLabel::EtcSystemConf + } + } else { + SeLabel::Usr + } + } + + pub(crate) fn to_str(&self) -> &'static str { + match self { + SeLabel::Root => "system_u:object_r:root_t:s0", + SeLabel::Usr => "system_u:object_r:usr_t:s0", + SeLabel::UsrLibSystemd => "system_u:object_r:systemd_unit_file_t:s0", + SeLabel::Boot => "system_u:object_r:boot_t:s0", + SeLabel::Etc => "system_u:object_r:etc_t:s0", + SeLabel::EtcSystemConf => "system_u:object_r:system_conf_t:s0", + } + } + + pub(crate) fn new_xattrs(&self) -> glib::Variant { + vec![(b"security.selinux".as_slice(), self.to_str().as_bytes())].to_variant() + } +} + +/// Generate directory metadata variant for root/root 0755 directory with an optional SELinux label +pub(crate) fn create_dirmeta(path: &Utf8Path, selinux: bool) -> glib::Variant { + let finfo = gio::FileInfo::new(); + finfo.set_attribute_uint32("unix::uid", 0); + finfo.set_attribute_uint32("unix::gid", 0); + finfo.set_attribute_uint32("unix::mode", libc::S_IFDIR | 0o755); + let label = if selinux { + Some(SeLabel::from_path(path)) + } else { + None + }; + let xattrs = label.map(|v| v.new_xattrs()); + ostree::create_directory_metadata(&finfo, xattrs.as_ref()).unwrap() +} + +/// Wraps [`create_dirmeta`] and commits it. +pub(crate) fn require_dirmeta( + repo: &ostree::Repo, + path: &Utf8Path, + selinux: bool, +) -> Result { + let v = create_dirmeta(path, selinux); + let r = repo.write_metadata(ostree::ObjectType::DirMeta, None, &v, gio::NONE_CANCELLABLE)?; + Ok(r.to_hex()) +} + +fn ensure_parent_dirs( + mt: &ostree::MutableTree, + path: &Utf8Path, + metadata_checksum: &str, +) -> Result { + let parts = path.components().map(|s| s.as_str()).collect::>(); + mt.ensure_parent_dirs(&parts, metadata_checksum) + .map_err(Into::into) +} + pub(crate) struct Fixture { // Just holds a reference _tempdir: tempfile::TempDir, @@ -28,6 +238,7 @@ pub(crate) struct Fixture { pub(crate) destrepo: ostree::Repo, pub(crate) format_version: u32, + pub(crate) selinux: bool, } impl Fixture { @@ -74,6 +285,7 @@ impl Fixture { srcrepo, destrepo, format_version: 0, + selinux: true, }) } @@ -95,6 +307,88 @@ impl Fixture { Ok(r) } + pub(crate) fn write_filedef(&self, root: &ostree::MutableTree, def: &FileDef) -> Result<()> { + let parent_path = def.path.parent(); + let parent = if let Some(parent_path) = parent_path { + let meta = require_dirmeta(&self.srcrepo, parent_path, self.selinux)?; + Some(ensure_parent_dirs(root, &def.path, meta.as_str())?) + } else { + None + }; + let parent = parent.as_ref().unwrap_or(root); + let name = def.path.file_name().expect("file name"); + let label = if self.selinux { + Some(SeLabel::from_path(&def.path)) + } else { + None + }; + let xattrs = label.map(|v| v.new_xattrs()); + let xattrs = xattrs.as_ref(); + let checksum = match &def.ty { + FileDefType::Regular(contents) => self.srcrepo.write_regfile_inline( + None, + 0, + 0, + libc::S_IFREG | def.mode, + xattrs, + contents.as_bytes(), + gio::NONE_CANCELLABLE, + )?, + FileDefType::Symlink(target) => self.srcrepo.write_symlink( + None, + def.uid, + def.gid, + xattrs, + target.as_str(), + gio::NONE_CANCELLABLE, + )?, + FileDefType::Directory => { + let d = parent.ensure_dir(name)?; + let meta = require_dirmeta(&self.srcrepo, &def.path, self.selinux)?; + d.set_metadata_checksum(meta.as_str()); + return Ok(()); + } + }; + parent.replace_file(name, checksum.as_str())?; + Ok(()) + } + + pub(crate) fn commit_filedefs<'a>( + &self, + defs: impl IntoIterator>, + ) -> Result<()> { + let root = ostree::MutableTree::new(); + let cancellable = gio::NONE_CANCELLABLE; + let tx = self.srcrepo.auto_transaction(cancellable)?; + for def in defs { + let def = def?; + self.write_filedef(&root, &def)?; + } + let root = self.srcrepo.write_mtree(&root, cancellable)?; + let root = root.downcast_ref::().unwrap(); + let ts = chrono::DateTime::parse_from_rfc2822("Fri, 29 Aug 1997 10:30:42 PST")?.timestamp(); + let commit = self.srcrepo.write_commit_with_time( + None, + None, + None, + None, + root, + ts as u64, + cancellable, + )?; + self.srcrepo + .transaction_set_ref(None, self.testref(), Some(commit.as_str())); + tx.commit(cancellable)?; + + Ok(()) + } + + pub(crate) fn new_v1() -> Result { + let r = Self::new_base()?; + r.commit_filedefs(FileDef::iter_from(CONTENTS_V0))?; + Ok(r) + } + pub(crate) fn testref(&self) -> &'static str { TESTREF } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 77200d32c..49aa93be9 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -42,23 +42,24 @@ async fn test_tar_import_empty() -> Result<()> { #[tokio::test] async fn test_tar_export_reproducible() -> Result<()> { - let fixture = Fixture::new()?; - let (_, rev) = fixture - .srcrepo - .read_commit(fixture.testref(), gio::NONE_CANCELLABLE)?; - let export1 = { - let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; - ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h, None)?; - h.finish()? - }; - // Artificial delay to flush out mtimes (one second granularity baseline, plus another 100ms for good measure). - std::thread::sleep(std::time::Duration::from_millis(1100)); - let export2 = { - let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; - ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h, None)?; - h.finish()? - }; - assert_eq!(*export1, *export2); + for fixture in [Fixture::new()?, Fixture::new_v1()?] { + let (_, rev) = fixture + .srcrepo + .read_commit(fixture.testref(), gio::NONE_CANCELLABLE)?; + let export1 = { + let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; + ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h, None)?; + h.finish()? + }; + // Artificial delay to flush out mtimes (one second granularity baseline, plus another 100ms for good measure). + std::thread::sleep(std::time::Duration::from_millis(1100)); + let export2 = { + let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; + ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h, None)?; + h.finish()? + }; + assert_eq!(*export1, *export2); + } Ok(()) } From c3d42d2e5b950e1951503902161ba642c3cf6a27 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 17 Feb 2022 12:03:57 -0500 Subject: [PATCH 306/775] tests: Use accessors for src/dest repo Prep for further refactoring. --- lib/tests/it/fixture.rs | 12 ++++- lib/tests/it/main.rs | 99 ++++++++++++++++++++--------------------- 2 files changed, 59 insertions(+), 52 deletions(-) diff --git a/lib/tests/it/fixture.rs b/lib/tests/it/fixture.rs index 491f329d9..1aa633deb 100644 --- a/lib/tests/it/fixture.rs +++ b/lib/tests/it/fixture.rs @@ -234,8 +234,8 @@ pub(crate) struct Fixture { _tempdir: tempfile::TempDir, pub(crate) dir: Arc, pub(crate) path: Utf8PathBuf, - pub(crate) srcrepo: ostree::Repo, - pub(crate) destrepo: ostree::Repo, + srcrepo: ostree::Repo, + destrepo: ostree::Repo, pub(crate) format_version: u32, pub(crate) selinux: bool, @@ -307,6 +307,14 @@ impl Fixture { Ok(r) } + pub(crate) fn srcrepo(&self) -> &ostree::Repo { + &self.srcrepo + } + + pub(crate) fn destrepo(&self) -> &ostree::Repo { + &self.destrepo + } + pub(crate) fn write_filedef(&self, root: &ostree::MutableTree, def: &FileDef) -> Result<()> { let parent_path = def.path.parent(); let parent = if let Some(parent_path) = parent_path { diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 49aa93be9..5247dbf7c 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -35,31 +35,30 @@ static TEST_REGISTRY: Lazy = Lazy::new(|| match std::env::var_os("TEST_R #[tokio::test] async fn test_tar_import_empty() -> Result<()> { let fixture = Fixture::new()?; - let r = ostree_ext::tar::import_tar(&fixture.destrepo, tokio::io::empty(), None).await; + let r = ostree_ext::tar::import_tar(fixture.destrepo(), tokio::io::empty(), None).await; assert_err_contains(r, "Commit object not found"); Ok(()) } #[tokio::test] async fn test_tar_export_reproducible() -> Result<()> { - for fixture in [Fixture::new()?, Fixture::new_v1()?] { - let (_, rev) = fixture - .srcrepo - .read_commit(fixture.testref(), gio::NONE_CANCELLABLE)?; - let export1 = { - let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; - ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h, None)?; - h.finish()? - }; - // Artificial delay to flush out mtimes (one second granularity baseline, plus another 100ms for good measure). - std::thread::sleep(std::time::Duration::from_millis(1100)); - let export2 = { - let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; - ostree_ext::tar::export_commit(&fixture.srcrepo, rev.as_str(), &mut h, None)?; - h.finish()? - }; - assert_eq!(*export1, *export2); - } + let fixture = Fixture::new_v1()?; + let (_, rev) = fixture + .srcrepo() + .read_commit(fixture.testref(), gio::NONE_CANCELLABLE)?; + let export1 = { + let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; + ostree_ext::tar::export_commit(fixture.srcrepo(), rev.as_str(), &mut h, None)?; + h.finish()? + }; + // Artificial delay to flush out mtimes (one second granularity baseline, plus another 100ms for good measure). + std::thread::sleep(std::time::Duration::from_millis(1100)); + let export2 = { + let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; + ostree_ext::tar::export_commit(fixture.srcrepo(), rev.as_str(), &mut h, None)?; + h.finish()? + }; + assert_eq!(*export1, *export2); Ok(()) } @@ -68,8 +67,8 @@ async fn test_tar_import_signed() -> Result<()> { let fixture = Fixture::new()?; let test_tar = fixture.export_tar()?; - let rev = fixture.srcrepo.require_rev(fixture.testref())?; - let (commitv, _) = fixture.srcrepo.load_commit(rev.as_str())?; + let rev = fixture.srcrepo().require_rev(fixture.testref())?; + let (commitv, _) = fixture.srcrepo().load_commit(rev.as_str())?; assert_eq!( ostree::commit_get_content_checksum(&commitv) .unwrap() @@ -80,7 +79,7 @@ async fn test_tar_import_signed() -> Result<()> { // Verify we fail with an unknown remote. let src_tar = tokio::fs::File::from_std(fixture.dir.open(test_tar)?.into_std()); let r = ostree_ext::tar::import_tar( - &fixture.destrepo, + fixture.destrepo(), src_tar, Some(TarImportOptions { remote: Some("nosuchremote".to_string()), @@ -94,11 +93,11 @@ async fn test_tar_import_signed() -> Result<()> { opts.insert("gpg-verify", &true); opts.insert("custom-backend", &"ostree-rs-ext"); fixture - .destrepo + .destrepo() .remote_add("myremote", None, Some(&opts.end()), gio::NONE_CANCELLABLE)?; let src_tar = tokio::fs::File::from_std(fixture.dir.open(test_tar)?.into_std()); let r = ostree_ext::tar::import_tar( - &fixture.destrepo, + fixture.destrepo(), src_tar, Some(TarImportOptions { remote: Some("myremote".to_string()), @@ -113,14 +112,14 @@ async fn test_tar_import_signed() -> Result<()> { )?; let src_tar = tokio::fs::File::from_std(fixture.dir.open(test_tar)?.into_std()); let imported = ostree_ext::tar::import_tar( - &fixture.destrepo, + fixture.destrepo(), src_tar, Some(TarImportOptions { remote: Some("myremote".to_string()), }), ) .await?; - let (commitdata, state) = fixture.destrepo.load_commit(&imported)?; + let (commitdata, state) = fixture.destrepo().load_commit(&imported)?; assert_eq!( EXAMPLEOS_CONTENT_CHECKSUM, ostree::commit_get_content_checksum(&commitdata) @@ -274,8 +273,8 @@ async fn test_tar_import_export() -> Result<()> { let src_tar = tokio::fs::File::from_std(fixture.dir.open(p)?.into_std()); let imported_commit: String = - ostree_ext::tar::import_tar(&fixture.destrepo, src_tar, None).await?; - let (commitdata, _) = fixture.destrepo.load_commit(&imported_commit)?; + ostree_ext::tar::import_tar(fixture.destrepo(), src_tar, None).await?; + let (commitdata, _) = fixture.destrepo().load_commit(&imported_commit)?; assert_eq!( EXAMPLEOS_CONTENT_CHECKSUM, ostree::commit_get_content_checksum(&commitdata) @@ -312,7 +311,7 @@ async fn test_tar_write() -> Result<()> { let src = fixture.dir.open(tmptar)?; fixture.dir.remove_file(tmptar)?; let src = tokio::fs::File::from_std(src.into_std()); - let r = ostree_ext::tar::write_tar(&fixture.destrepo, src, "layer", None).await?; + let r = ostree_ext::tar::write_tar(fixture.destrepo(), src, "layer", None).await?; bash_in!( &fixture.dir, "ostree --repo=dest/repo ls ${layer_commit} /usr/etc/someconfig.conf >/dev/null", @@ -345,7 +344,7 @@ fn skopeo_inspect_config(imgref: &str) -> Result Result<()> { let fixture = Fixture::new()?; let testrev = fixture - .srcrepo + .srcrepo() .require_rev(fixture.testref()) .context("Failed to resolve ref")?; @@ -368,7 +367,7 @@ async fn test_container_import_export() -> Result<()> { ..Default::default() }; let digest = ostree_ext::container::encapsulate( - &fixture.srcrepo, + fixture.srcrepo(), fixture.testref(), &config, Some(opts), @@ -414,7 +413,7 @@ async fn test_container_import_export() -> Result<()> { sigverify: SignatureSource::OstreeRemote("unknownremote".to_string()), imgref: srcoci_imgref.clone(), }; - let r = ostree_ext::container::unencapsulate(&fixture.destrepo, &srcoci_unknownremote, None) + let r = ostree_ext::container::unencapsulate(fixture.destrepo(), &srcoci_unknownremote, None) .await .context("importing"); assert_err_contains(r, r#"Remote "unknownremote" not found"#); @@ -424,7 +423,7 @@ async fn test_container_import_export() -> Result<()> { opts.insert("gpg-verify", &true); opts.insert("custom-backend", &"ostree-rs-ext"); fixture - .destrepo + .destrepo() .remote_add("myremote", None, Some(&opts.end()), gio::NONE_CANCELLABLE)?; bash_in!( &fixture.dir, @@ -436,7 +435,7 @@ async fn test_container_import_export() -> Result<()> { sigverify: SignatureSource::OstreeRemote("myremote".to_string()), imgref: srcoci_imgref.clone(), }; - let import = ostree_ext::container::unencapsulate(&fixture.destrepo, &srcoci_verified, None) + let import = ostree_ext::container::unencapsulate(fixture.destrepo(), &srcoci_verified, None) .await .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); @@ -446,7 +445,7 @@ async fn test_container_import_export() -> Result<()> { { let fixture = Fixture::new()?; let import = - ostree_ext::container::unencapsulate(&fixture.destrepo, &srcoci_unverified, None) + ostree_ext::container::unencapsulate(fixture.destrepo(), &srcoci_unverified, None) .await .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); @@ -478,7 +477,7 @@ async fn test_container_write_derive() -> Result<()> { let fixture = Fixture::new()?; let base_oci_path = &fixture.path.join("exampleos.oci"); let _digest = ostree_ext::container::encapsulate( - &fixture.srcrepo, + fixture.srcrepo(), fixture.testref(), &Config { cmd: Some(vec!["/bin/bash".to_string()]), @@ -525,16 +524,16 @@ async fn test_container_write_derive() -> Result<()> { }, }; // There shouldn't be any container images stored yet. - let images = ostree_ext::container::store::list_images(&fixture.destrepo)?; + let images = ostree_ext::container::store::list_images(fixture.destrepo())?; assert!(images.is_empty()); // Verify importing a derive dimage fails - let r = ostree_ext::container::unencapsulate(&fixture.destrepo, &derived_ref, None).await; + let r = ostree_ext::container::unencapsulate(fixture.destrepo(), &derived_ref, None).await; assert_err_contains(r, "Expected 1 layer, found 2"); // Pull a derived image - two layers, new base plus one layer. let mut imp = ostree_ext::container::store::LayeredImageImporter::new( - &fixture.destrepo, + fixture.destrepo(), &derived_ref, Default::default(), ) @@ -551,12 +550,12 @@ async fn test_container_write_derive() -> Result<()> { } let import = imp.import(prep).await?; // We should have exactly one image stored. - let images = ostree_ext::container::store::list_images(&fixture.destrepo)?; + let images = ostree_ext::container::store::list_images(fixture.destrepo())?; assert_eq!(images.len(), 1); assert_eq!(images[0], derived_ref.imgref.to_string()); let imported_commit = &fixture - .destrepo + .destrepo() .load_commit(import.merge_commit.as_str())? .0; let digest = ostree_ext::container::store::manifest_digest_from_commit(imported_commit)?; @@ -584,7 +583,7 @@ async fn test_container_write_derive() -> Result<()> { // Import again, but there should be no changes. let mut imp = ostree_ext::container::store::LayeredImageImporter::new( - &fixture.destrepo, + fixture.destrepo(), &derived_ref, Default::default(), ) @@ -601,7 +600,7 @@ async fn test_container_write_derive() -> Result<()> { std::fs::remove_dir_all(derived_path)?; std::fs::rename(derived2_path, derived_path)?; let mut imp = ostree_ext::container::store::LayeredImageImporter::new( - &fixture.destrepo, + fixture.destrepo(), &derived_ref, Default::default(), ) @@ -621,7 +620,7 @@ async fn test_container_write_derive() -> Result<()> { // New commit. assert_ne!(import.merge_commit, already_present.merge_commit); // We should still have exactly one image stored. - let images = ostree_ext::container::store::list_images(&fixture.destrepo)?; + let images = ostree_ext::container::store::list_images(fixture.destrepo())?; assert_eq!(images[0], derived_ref.imgref.to_string()); assert_eq!(images.len(), 1); @@ -640,7 +639,7 @@ async fn test_container_write_derive() -> Result<()> { // And there should be no changes on upgrade again. let mut imp = ostree_ext::container::store::LayeredImageImporter::new( - &fixture.destrepo, + fixture.destrepo(), &derived_ref, Default::default(), ) @@ -661,7 +660,7 @@ async fn test_container_write_derive() -> Result<()> { None, gio::NONE_CANCELLABLE, )?; - ostree_ext::container::store::copy(&fixture.destrepo, &destrepo2, &derived_ref).await?; + ostree_ext::container::store::copy(fixture.destrepo(), &destrepo2, &derived_ref).await?; let images = ostree_ext::container::store::list_images(&destrepo2)?; assert_eq!(images.len(), 1); @@ -683,7 +682,7 @@ async fn test_container_import_export_registry() -> Result<()> { let fixture = Fixture::new()?; let testref = fixture.testref(); let testrev = fixture - .srcrepo + .srcrepo() .require_rev(testref) .context("Failed to resolve ref")?; let src_imgref = ImageReference { @@ -695,7 +694,7 @@ async fn test_container_import_export_registry() -> Result<()> { ..Default::default() }; let digest = - ostree_ext::container::encapsulate(&fixture.srcrepo, testref, &config, None, &src_imgref) + ostree_ext::container::encapsulate(fixture.srcrepo(), testref, &config, None, &src_imgref) .await .context("exporting to registry")?; let mut digested_imgref = src_imgref.clone(); @@ -705,7 +704,7 @@ async fn test_container_import_export_registry() -> Result<()> { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref: digested_imgref, }; - let import = ostree_ext::container::unencapsulate(&fixture.destrepo, &import_ref, None) + let import = ostree_ext::container::unencapsulate(fixture.destrepo(), &import_ref, None) .await .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); @@ -717,7 +716,7 @@ fn test_diff() -> Result<()> { let mut fixture = Fixture::new()?; fixture.update()?; let from = &format!("{}^", fixture.testref()); - let repo = &fixture.srcrepo; + let repo = fixture.srcrepo(); let subdir: Option<&str> = None; let diff = ostree_ext::diff::diff(repo, from, fixture.testref(), subdir)?; assert!(diff.subdir.is_none()); From b8dc59e8d2c0a007a3ecd3a09706e857331dc0ec Mon Sep 17 00:00:00 2001 From: Joseph Marrero Date: Tue, 15 Feb 2022 10:32:29 -0500 Subject: [PATCH 307/775] lib/src/tar/write: make sure we add the links when filtering the tar Co-authored-by: Colin Walters --- lib/src/tar/write.rs | 18 +++++++++++++++++- lib/tests/it/fixtures/hlinks.tar.gz | Bin 0 -> 9981 bytes lib/tests/it/main.rs | 11 +++++++++++ 3 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 lib/tests/it/fixtures/hlinks.tar.gz diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index 7d251b983..34ab7944d 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -156,7 +156,23 @@ pub(crate) fn filter_tar( }; let mut header = entry.header().clone(); - dest.append_data(&mut header, normalized, entry)?; + + // Need to use the entry.link_name() not the header.link_name() + // api as the header api does not handle long paths: + // https://github.com/alexcrichton/tar-rs/issues/192 + match entry.header().entry_type() { + tar::EntryType::Link | tar::EntryType::Symlink => { + let target = entry.link_name()?.ok_or_else(|| anyhow!("Invalid link"))?; + let target = target + .as_os_str() + .to_str() + .ok_or_else(|| anyhow!("Non-utf8 link"))?; + dest.append_link(&mut header, &normalized, target)?; + } + _ => { + dest.append_data(&mut header, normalized, entry)?; + } + } } dest.into_inner()?.flush()?; Ok(filtered) diff --git a/lib/tests/it/fixtures/hlinks.tar.gz b/lib/tests/it/fixtures/hlinks.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..0bbc06d4960d198f2639e6bafc815ffc186b115f GIT binary patch literal 9981 zcmY*+WmJ^!7cD7@fP!=i0)n)(3WI=jcL_+B(jqy8bcb|zhcpNS(jcXTpfH4#bTcqB z&mDjNdq3Q@-jDBD&pG?-v-den(YJ30ph+ySaP6Gz-#NPTdc1MN+RNE@qZh3W^XEjs ztTP_o4ls$0!t41!rdae%q0_AvUB7X@px_`r-q6|F8s#irG@_s@F{_S?r*PlzMV_R; zfnh1SQb&{_b?AxNO}Qdg%oN zOtY5jPv_HdfU4P<35nOfD&kxq?@y6cm&7vaikKQ{cCy9r6WSwyb}lqT_cmd- zIS9&-F=0s;=rDgcd`Rd!#7pByWmW`7OC;qrfQwgMHy&VP_4Jseu1AH*FNM!bbcj2z zw&b_z@w{1pEI(6TA?)H{eSG!ccxS!ND_(y~{6h$EM0XI}Fdf51GB@HgVl=q>O@qgh zEUhhLmR(C;?ud&BH1gKgP@I#mLHXj7*7#mS7+%6_5WjtOm@!0fNX zAzUE9H)V(pr6`I?eFSpw6+LtXi?l%q7Pa6#+$5}xJr5PeHl$Q#v#K(j9?+pOTMxd1 zhqfc&hg(o}^{t^zX2vrQ==TdBQ}R1R=(?+4{@*~%d7i4!7{pe-cUFmWhK$_LO5uL2 z$UO}%W934dfpaF?-+Na;^b{hmDQN~;IQGHVzqX#vr^}*JP81z>a`n#GsP2xx2#SiN z&)JST1w0_+0C{*lxt_y#@V9{vn;vO)EdM(QTrQIUSf~x+fsK8DX;;n{SP(~~7@IxQ z%}+OoewSEV8A{tDumuTS46SiA~PaXA*| zU5T%SpdrB3{euSmDO4hJ6&83o6XWTYgQ4iQ z#ycsd(fHxVpo!>1$aq=f64qOotHapcIX}P-lZZfRVb<=ObguzV! z`ciZk0?Qk`o^%^O0y|kjeDy#Rz!Gf`bWFq+DNiVp%A%IZcR3P7br;JbKQB`3LKWRgeCvuAGPHN{@0s=5_3H?XNB3RmdWV(U}~Pk+9mO^X5SXNy2_ za%kwmt^wGYHb%c=+69&s>Y;uaacX7TARJ%CVw|MaY`$daiXZK^WH3F7PQr zPDx<%V{qe}ZKz?h_r3p_-zttd(isgI3KmGIOr5I+jV`_D!(T&L;eDu`VIN7RgMimp ztQeXi4lPm6{~Hpbh&igJPuQ0I3RbZWB0u^y2GbX%%=K!3^xq3GU+C91&R`4!{vx4k{8qqky`p%n~XLn6R)bbz8Msfbc=M=Oh3NXxkZZ;SdHSH-S6~V9?^D|2(%M z;5bcA_n>GHKUPhZ?S1bDOZ{rl5a46d-3V?R!_)FS{+G}+aNYyIt#LzL1hAjv70%2(Q@~bMc=7=py!j3f=%Gg{BZUvDH zExmIv_x=>Ni5X?|d{rybo@p1>_aE!6UWB+xW>x1V3K&x|3s+nxWuD92y*K|GyvbV! z%jmp$geJ+o!z;w4a}OpZn8M(9v~s&O_MCq3{gL_Y;P8CAY|g-{IpDi@3ARr0f51Wj z9om}!4gCh*78|0!X%f5H944QZv)*}86_V~&Z(wp{F}zsJ{7e_|c;* z7=#Gzj5sU;SxoI4?UF%sgyQh3qGUOru3gFm+i;&>*u>%evPWZum@76E8Q z(s{lYSGp}}?2PAj7Z?ac&2I}PB?od1f?Dt30JKk0tIy?;%lBN`qh7!RO*K%d9kCrZ zD&O|F4u7OGEaEbT9VOT5fP$Saq4IKb-;uQPFzxMO3t;pa7LS5hf`5IEH;5v|Bk=Uu z{bIwPKJ}Pn0PoM&Y<)&>a|1i>OS@?Rb=uB|8;qXD{jXK*0s{C{ks%akPV}Gq*03dX zf~hS9rw8V@CqWlb1D6**zI>yTq{Y$5=cS57l_qg|2GtMXaJUE87!fX?&$kC2xF0CEEU5li$(Jf-n1jdE7J$!3Er z0Cx#(rVvDlOM`F_}&hv#b*|mF=lwIpcji5)r$(ASP`O zJllQS?da_5hv9!JM4cFB@+Olu@RI`hHg{3)Ddt4Mf`*PrATX1?4jIM@yAGW~;OS#@Ou zW|^h%j}o*EJ<*dt(P-$#@Ib-{s@a^y)?J5iW{>rLJP&_Y9m1E zcZoREK839x2mJYe1QhxU5t-%&7GgTaq(WLGr6=Ua;`Y7yTs?!jHR9F2Xy5G?ulYg!Nyh`iVY3N{Fn=A<=erC_CLaw118jeRfN4{ zmH9}GU#1Nylg>}buF&HNHaI7pGXa-Y9rIAAgy$s6uM0Z_D5G!0fcKyOc1&IhC5Hqu zYQ0}}O#+aP`@ACE<_pT9;&2P&Q@1 zBxNg$V)jWE+`y<<9uSyW*QTcaFt|Q@+<416L)Z{j3z>MtO4=2pzTze`N%jM5@omg5Z zV+VJ(20|lM>wjGb5-iiJ9U5Pb1+JITR38e z8N<*PUnHz+JWT})6L7U9aR%tNpzkn7WNK2QY0}sih!aX<&`Uvu6uN3%g>QJyLnY4Q z0rbURORt;BUO)^aW}q+ffW#iufBfX0Fu@tGYokI)7{5iiR#UK#41W%ZnddkfGTwWZ z2V9{qm~toLH_#qd5`0UyUtks-rY-`Wr=9_EccL1_$w~Q3@kA&ch3T!lFv>dt7}3Hf zunnIvSPvr-68GQgZASfiW_WXP?B~Ggzqo%~Asf0q01GlwC(ygChHFI5ntd4k79S~atY|CsA z&mWX!GNq6US*Izpy&=`dqIiJMJYH{s3=g`#gdSPbN&QoXih4Zl8f;feKaZbU*wrH{ zWzx2J*-^t$>#&f}^E|#ANZvWk`3%7|V!SZ?4d&c}rQ_V+O~F?>#S+j|>qelo#L}fJ ze0JZApTgMBPbcFq5gv11gedqw=H5kriAFGq{skB&LO*F_wyMAS6>+#ASVG_5PM9v9 z_>unYlm9m0xgSm-uL3o1xZ%<*5r>|H$O|Gb8_=Q`zzK5?Xeg1n)GFLMbRTWFRh2oM zQbA{aRH&=J53G^+2zVlNUK9MnC?YQ&U70GJCa_;xx9?S9^Wu?MiM_D-I69s>0aI_z zNe+GI=KmHfHq45C3Eo~|E>4WNJNvP^nC^Ly5wH|*;#4X$n;T0h1h+ghng+kjcTm^y z{~0~F8WKV{*KIo>G-ciYlX){Nw}ymfg`WF(NjfFvk7Q+yoU%tqf#T3yx-P5vr1%1R6v*j%w+T1Kfqnb#QSl z?`?W1T-pwpT+vLofGBbReYyYdx+yV4>$%}8=U0bHe14G00tp3{hYrEN#6F)Qv8e;G zfNW%;mG6-07}*yVd?K_pmuXD^x8c*0q(64C)pv!{517Mo_;n^Vgh=_r6^2x2^)@;_ zvA8RIs?z#Iuief%l@_1{dn9R1Z%1Yn6jsO>%^%I+2wB{4L$R!C03Djx# zr73J)Fufv>N9yjm(*TFHStZEhAvpx3eMPxT^e2z89wEKffWr$Q?1KgxUvsZ)V^tV` z!~7;VsBQ3LTqSI2Kaw%Lz?NzK9BR>}1+9QXXO;V)VC1!klxpk+G>p)9&1^KorkBGC z>%k*sy9$+EpWZ31)K8Ovj%vG~OnOK6JL7Rz%!V0PE}2>o`2uJ6dgQJS`+@Ei+L9CE z1J(;ZN*q7-&Pm^JDlr({R46fNI7`yx+lUS=d@1OYLwUQK^aI?Vz6U*K8xBP?1)e$0 zH;xx{V~c-U}=q4(JKA|baS-J2z9(XxD`AzF`&A5#37QDisfOt-e!L5wOl zfio>lXaj2g5(zZaeYzqrWfL4jg%1FxhJWF(m*XnT&J^X55F8Gx9s=&!-Ovs-H@2#G zHg>h9PfgcO0mAA1)gk10?HB}GZo6pu8g^JufUfJggb^aqa0Rqkz|RNZsle_hH)j1| z+v+c-BP-CQCc;oB$vjTmXv&hNGdDQ!2t?&|>}$E7A?ID*h@#JCKcIa*z}7$5vkY@G z7u*ylGsItudc5W%SEAt3uCRI%_`3ETHR42h3Gh&a2cRu$tk>zJB35aN&Y|!(_mF0> zH?X1`NI2O=KP+2RK|-5vBkEi!aqSYi8|uVRKax?R#Hszz=GN~(|4Uaa1f=KE%wcTC zz+cP<{Bn7tj;t|>oH@l0S&3z;GA;EENsabaI#&`;6Xj~M>it}QC`l3wurHu#3jW(= zF!`&)K``3}1Q&rsO8sMWhMQqs$l7G`XX`2YEC0}^95`Ba+xjKNI5 zZCrc?J}_YHNegGhmb<#Q;@m|DhcY6d;p7+n5VPTQ^K*(uHva3s{X$R9-MUqt$jONk zroN%|#F1R-z7dN6UV)d;Il4jIXFh1#wts*Rq9=#J^MCK_Nouo;E4?WYmHK^`O^bJn zYvHpYcWQ)T@c;{cnK|5_hzEE}q%Ea^*2^2SBawu%IzHz;d8(MBt*cAYbK^rx=t&-r zwYxY26A~N2XC(&%FH$fsgh-$BoHdxC@_yX)J`%)u_||6h-89L zX4ULl2p}Ldw*pK`)VBcbhvnr=+k0b#eJf_)0&}oz%->l71|_kF=+Ylfv^D+#oGu(l z&)4q=!}wUpp_Bdhtbbdu)1g458)n0%A)&o zA!#!NI40k=WWIQnW^i*~puQxG*hFVk=P&swuG8FRMt^ht0xFj4o&%_mU-Zf3Hd%IA zHGJ39MXw97hof^1&)w$Oqancsk_fmA*v-&d0k$3zW=~eoX1sYpvNyM!&zZuoK;1bk zS&5AwwQuaY&bNiX%H*Y|v2u8Gw6#Vj8mU1Mp->!VK;ivFmo0ZxH?)j2cQn>cP%k#p zK;OQ6{DbhsX`{svfS&hpl$BSZi8i`uthq1U0G}miF%#B zY;i+yxi2T&~%M92?8jOQSEBp-1?EyWhhG2J5k7a+k#}iJMK; zf7HCY`g=oMWcYZ{`AeL?vWdl_b{FmH!t1_IeXF^F!w*{e^^04b zo%=y=lu>;FUi0<(tjf~eprVQM{gHvGX2eN%PZ_D9)lX}qwUIQ7gsl&1PYJmjZG0SK z$l?PI1ZEU=6!=#a$KDYWakg8Q(3}pSaq=sy0<(nEXv*tO7DArgD?U0Njo(kPaWM2r$R+dL8 zigU`bT|G#)&~kX}Oa3v6Z?oX(7Bh>_n&q95W|p@jvWt|7(3X_Ik6ejy2GRE)n(`2q zU*wn3+Q_Mrv)-~Jx)r!i*BWBHN3Ujj!LoI2s`(YP*cQ?@%FP=%T%i%e#27O!J>yd^K zln8S$lT9inBzu!+FcVI4vf{XY>=zdtV~yf|s~9hvdThZOWr>HGs#2j%vJoa>Pw-r5t(&3?}!#=_4R|A=NaUX z{io;4@%#=#nU|HLe+Mg=P;w9=b+OTzRM?(|G^2+zFHX zn(&FVGb1`q0r zE}FP?R=+OErJ=_2`{Y%P4t?F2{^QT%puv3g+~h8a%Ua z%I11!Ry3dL$8HYqZqWfdi`J%meGGPUn^N$G%3^Ae-qR^1d$ZYt;GRnr>ROE#;Y|0a zHD4R8w)^O3X`E!cag*doN(6|OldGvccOO;Fn^(g=Cx_Cf&sTY?@z{;zt+ehqeo3iv z%f@;h`=m&j6CaAVDqqmt9M+nw;eH>bu)U}tINJ16a{1AZxK|c|I_2So%oRy)Jx1J$ z8rpZcq}XScvfQ|o0_|k=KB|hbjHVK0%3`l^<`WP~8BLLyTCsHyFD^$*Y#I%!T5|C; z8*}YeuwGODd@A-{Js*wz_IQh7q$?uxkH7_c!{xKjH;Gj6s0S43MhC-tNofW&zm%lo zNry{X2H&VW_{9=Ac_(XH&B~E1uIyRQ1%tQoYV@fPl7Tt$ZNr0wRIG!arK3Y)bCI4p z%Foz8TJHBNkPRN$`pv07ds-}LUYxTs)+DXfU zp^C(&WA=@_RR_qvAy`5t=C{@uN*X!g-jI!a!`*qz(y1PoWZBY2Bd_GOW_95oR5k=3 z`ar^6`ye6rk9?zh@XaOmQgG%=nj5-mNVxptEvZSFszW@s13u}>3!{%(7rGj!j|=Ix zw#?SILL~FEQ4adP&OMpq#<%5hhr1^d8OnFi58^UfO+LDI_YR3ueZx~;xD?ZJoIN;eEY&V5zLATyu}sJz`ZId>B^JP}%3VX~RlY`H!|g56c5RL)-TV46&24Icr8YZtEaCbt=7 zLB6ntnc38D7lokY4TrD`+`eYjaf8!T^^gU;vDq#LnRW-kDfF&6b^~!X_LB`$=eCaA zyHxY$svm9^YvSv&cX@Md2{Mh$THd^BX!xb<*VPdV-+FVJ+Lu`$ECc*Bx8lkRR_jFH zP|(<|w)#*m+1inO2#eLzb>{F}>sp#e7R#RC z$=z|&CxLW4%dJP}ELRN_;a7j5AJfCI%@XjB_webzvU&ayp*|Gn< z@%B`7I?h86?y6nHsg`qw=Ci4!_Pw9%o?cqG2E=d8uf@C73AUG9QupQ_whWtgb>UAK z@^nmeidTs{)jPZkqI(;}?a>W`CsXq$S0`e>HVl-ry*zDTz4Zxyks_P7?pgwD2uXC!Kaa+N^}o{tHH7Y=B;Kj_`b z9{hY9abfs!k?gHzy}Ettn)`?gHPdLumiuQ3dJghZT$~N6rf|o=y)rS9G!dKw0^Z_O zY~|Ke)ZUl5fUS!j`fN&zMO`u%!+9S6#+<9-A@%+Kea2H4(GpsGrUrXoqPJi90Ab<9 zz;32}lZ$hh=bfM%Q*6=%w>jEp>$ETur^nkkADgD7xCWoLHBZe=xF{!@k>;-NyBgD~ zP@5s&=q}nd$1lCgJXQ-{v=y1)DTN#jr1HJHDntv-2{ZZQ@9c5?UX^w1q+Lxht{!Y_ z9?RU9&6hWLP>Qe-Q=Jf9qRp2!E|VZ`if$*;!R~H95fH+jh2uOUz{RJEU5wZ8@U-MU=}$mC{c@pW$AE{I5?w`!sQmQ$a6p zlXq-2pV8m&F20cn{~F&`Mf~ejw`djz(qH^KKz_tOe5^ zxhaz5lB;g1)9X2F-;gpzUcz@S!fMu!-l-qe;Cql7-JSG7M$> z_v8a_rRZu4$JuVG&jrI9jqK(#~l(nx_NwtlXqWK&�bG4t2A zx|izHU;JdSTXiWYJj|^<-h3)Q5u&}Tvm>P6lW=Vl$g%!CfJox_M@-|RKzaUkQx??Ab z?Z48F2T8sk^4(t3dr@+(F`M8*O{HS;o!-Z8ob;d;Uuc+pcIJu0)*LTXxNGNjwXWtZ4TR1RKNU%a^YU42K~(UtFU{Q+&z>g&U4M1pFZn}PUgU)-RtX|| z?0JZrKJ|ZhQJW=a3_W%0{3SQpHE-VMY+i`GUbaGLI)DAryj=W4N#8@pI{MUR{^M&p z6%!sL&(l2?czlAChg1sdj(!3tsZ;YQW=)1NsJ4#x&B(KVK2Qu^VOwd#1+Dn+z zzoxVpsy$Q!)qPw7)hsK9>Xm(}i<`{9f6<=xoPObz_Ca!1WbNe0#wM5M%J&lm^X-N^ z+i?WGT3jrb--|!>$R|y2?F~0>y9bwvN9X1TCpAVs57+FbxOq9BL_mjb*d9p5i{-=UdQ=!MHnJkA%N@24|1{WuWL%OTyx-o!E^z zj)jl*wR(3c94*;>Zw~&S*9}DqQG`wFw9uEx~ut9B~se=)}r_* zx;WWLyhV8I@>+{)Xw`Wl=_pDa|7ml^b&k%iM?!7%5~Z3Gvig^Fk_zryErsSFuE9R8 z5BH!1b~$@1)v|6@f+J(y?3TuLi8N={JTwzAr>bek_xsdNYU_E6<6HRbTZ4pqs(sqz za3O!%XDP0p#{|cImxeLdGxnj9yHOM~68jm?{Y7)t+JDY)w&r9p-DWcF^j-fepXMC! z0r9RPVe&OV57)p0BmfoKteOoo=A-r>ZM-ph_dj9o>`&daqw zi!U`xhR^t@%UHXu)@UCXzvWE4I-&VkW-&_jV1GE%c0BFZR;Ky(LcNyn)^%;=dKl~W z@ZGZCmpw;)p1(VL_!N})t$voZHYojCU)2n0N&T|XWT1NSEn$<)@+vOw)s17~A;)g{ zL9wyuqUa{9D{k)PmS90!___kQQR1e?!jpEAA;why$tu9gb>WLu{Lumy^(&LBUi&!EU&sf(+lqEP#-3*P5 ztG|n6YRIaia&F{{391Ae6|;hU?msC!O$o629zXu>>COA)SsXB%BsqQXurqRF13}Xz zzQoe*miC*ZwW{>Cy7&os-)HJfv2Puh4&9+R`9HrG=6K{b-8FozzrA7sBY=h7`5pR) z-gon14FBiOomm2L>hlG>WV3iT7dtx6N0<|qY{1kjhM#b6cqVG~ASOrgp}<;X&qb-{ z>Z&aq&+7PgG10aE!+=3gpEC`Gm({m!9uC1$=tBlRTXuvk*4362A~ZP}ID{3R6-joJ zeDa}gzP3?c?BDDZVk^q#%WiC2R5NFBAhr#v-!n-RP^6?nFzu9nq8hY+U)I4k@~X(t zi_+I=`K>{btUH&CvAWUJn-? zjgN02SJum_&tKp-wfkHU_?jnr-1*pX9_$;&ZYC`@V%355b0|o+j)%k{=XXKWIlSZ? kN)YbSNP7cubwi(?Ly7wM|Njpuv5XzC))|hKfQ|Kk0DDoo<^TWy literal 0 HcmV?d00001 diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 77200d32c..cf0abcfda 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -15,6 +15,7 @@ use std::process::Command; use fixture::Fixture; +const EXAMPLE_TAR_LAYER: &[u8] = include_bytes!("fixtures/hlinks.tar.gz"); const EXAMPLEOS_CONTENT_CHECKSUM: &str = "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; const TEST_REGISTRY_DEFAULT: &str = "localhost:5000"; @@ -324,6 +325,16 @@ async fn test_tar_write() -> Result<()> { Ok(()) } +#[tokio::test] +async fn test_tar_write_tar_layer() -> Result<()> { + let fixture = Fixture::new()?; + let uncompressed_tar = tokio::io::BufReader::new( + async_compression::tokio::bufread::GzipDecoder::new(EXAMPLE_TAR_LAYER), + ); + ostree_ext::tar::write_tar(&fixture.destrepo, uncompressed_tar, "test", None).await?; + Ok(()) +} + fn skopeo_inspect(imgref: &str) -> Result { let out = Command::new("skopeo") .args(&["inspect", imgref]) From e39d380fe90da144a937e360247caba11c56fe04 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Wed, 23 Feb 2022 09:16:00 +0000 Subject: [PATCH 308/775] tar/tests: fix build failure --- lib/tests/it/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 5930f287a..68ce7dcc6 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -331,7 +331,7 @@ async fn test_tar_write_tar_layer() -> Result<()> { let uncompressed_tar = tokio::io::BufReader::new( async_compression::tokio::bufread::GzipDecoder::new(EXAMPLE_TAR_LAYER), ); - ostree_ext::tar::write_tar(&fixture.destrepo, uncompressed_tar, "test", None).await?; + ostree_ext::tar::write_tar(&fixture.destrepo(), uncompressed_tar, "test", None).await?; Ok(()) } From b86dd9b4f29a4b7a2b4ac440db1d1fc33db3b29c Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Wed, 23 Feb 2022 09:17:38 +0000 Subject: [PATCH 309/775] tar/export: add 'state' and 'extensions' subdirs --- lib/src/tar/export.rs | 11 +++++++++++ lib/tests/it/main.rs | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 0c7237e7f..0e2d3e9c5 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -184,6 +184,12 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let path: Utf8PathBuf = format!("{}/{:02x}", objdir, d).into(); self.append_default_dir(&path)?; } + // Extensions subdirectory + { + let path: Utf8PathBuf = format!("{}/repo/extensions", OSTREEDIR).into(); + self.append_default_dir(&path)?; + } + // Tmp subdirectories for d in ["tmp", "tmp/cache"] { let path: Utf8PathBuf = format!("{}/repo/{}", OSTREEDIR, d).into(); @@ -194,6 +200,11 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let path: Utf8PathBuf = format!("{}/repo/{}", OSTREEDIR, d).into(); self.append_default_dir(&path)?; } + // State subdirectory + { + let path: Utf8PathBuf = format!("{}/repo/state", OSTREEDIR).into(); + self.append_default_dir(&path)?; + } // The special `repo/xattrs` directory used in v0 format. if self.options.format_version == 0 { diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 68ce7dcc6..047e52880 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -215,6 +215,7 @@ fn test_tar_export_structure() -> Result<()> { let expected = [ ("sysroot/config", Regular, 0o644), ("sysroot/ostree/repo", Directory, 0o755), + ("sysroot/ostree/repo/extensions", Directory, 0o755), ("sysroot/ostree/repo/objects/00", Directory, 0o755), ("sysroot/ostree/repo/objects/23", Directory, 0o755), ("sysroot/ostree/repo/objects/77", Directory, 0o755), @@ -225,6 +226,7 @@ fn test_tar_export_structure() -> Result<()> { ("sysroot/ostree/repo/refs/heads", Directory, 0o755), ("sysroot/ostree/repo/refs/mirrors", Directory, 0o755), ("sysroot/ostree/repo/refs/remotes", Directory, 0o755), + ("sysroot/ostree/repo/state", Directory, 0o755), ("sysroot/ostree/repo/tmp", Directory, 0o755), ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), ("sysroot/ostree/repo/xattrs", Directory, 0o755), @@ -244,6 +246,7 @@ fn test_tar_export_structure() -> Result<()> { let expected = [ ("sysroot/ostree/repo", Directory, 0o755), ("sysroot/ostree/repo/config", Regular, 0o644), + ("sysroot/ostree/repo/extensions", Directory, 0o755), ("sysroot/ostree/repo/objects/00", Directory, 0o755), ("sysroot/ostree/repo/objects/23", Directory, 0o755), ("sysroot/ostree/repo/objects/77", Directory, 0o755), @@ -254,6 +257,7 @@ fn test_tar_export_structure() -> Result<()> { ("sysroot/ostree/repo/refs/heads", Directory, 0o755), ("sysroot/ostree/repo/refs/mirrors", Directory, 0o755), ("sysroot/ostree/repo/refs/remotes", Directory, 0o755), + ("sysroot/ostree/repo/state", Directory, 0o755), ("sysroot/ostree/repo/tmp", Directory, 0o755), ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), ("usr", Directory, 0o755), From 0b8c595e5460d2d1c3474b98e5222ea1ab96eade Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Wed, 23 Feb 2022 09:22:21 +0000 Subject: [PATCH 310/775] tar/export: unify creation of all standard repo dirs --- lib/src/tar/export.rs | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 0e2d3e9c5..f402a86cf 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -184,27 +184,21 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let path: Utf8PathBuf = format!("{}/{:02x}", objdir, d).into(); self.append_default_dir(&path)?; } - // Extensions subdirectory - { - let path: Utf8PathBuf = format!("{}/repo/extensions", OSTREEDIR).into(); - self.append_default_dir(&path)?; - } - - // Tmp subdirectories - for d in ["tmp", "tmp/cache"] { - let path: Utf8PathBuf = format!("{}/repo/{}", OSTREEDIR, d).into(); - self.append_default_dir(&path)?; - } - // Refs subdirectories - for d in ["refs", "refs/heads", "refs/mirrors", "refs/remotes"] { + // Standard repo subdirectories. + let subdirs = [ + "extensions", + "refs", + "refs/heads", + "refs/mirrors", + "refs/remotes", + "state", + "tmp", + "tmp/cache", + ]; + for d in subdirs { let path: Utf8PathBuf = format!("{}/repo/{}", OSTREEDIR, d).into(); self.append_default_dir(&path)?; } - // State subdirectory - { - let path: Utf8PathBuf = format!("{}/repo/state", OSTREEDIR).into(); - self.append_default_dir(&path)?; - } // The special `repo/xattrs` directory used in v0 format. if self.options.format_version == 0 { From 066e4d99bb441d60c503eb493ff4058969851ef5 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Thu, 24 Feb 2022 09:02:19 +0000 Subject: [PATCH 311/775] ostree-ext: release 0.6.4 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index a89face5f..b8f7ca397 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.6.3" +version = "0.6.4" [dependencies] anyhow = "1.0" From a77a57069280c102b05323cff3179ba3243cf36d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 24 Feb 2022 16:11:16 -0500 Subject: [PATCH 312/775] tar/export: Fix duplicate xattrs in tar stream The containers/storage stack fails if it sees duplicate files. Fix the bug introduced in the format version rework that passed the object checksum instead of the xattrs checksum into the "seen" map. While we have the patient open, I noticed that we can optimize things a bit and pass ownership of the checksum into the map. --- lib/src/tar/export.rs | 14 +++++++------- lib/tests/it/main.rs | 8 +++++++- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index f402a86cf..504f39871 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -301,15 +301,15 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let xattrs_checksum = { let digest = openssl::hash::hash(openssl::hash::MessageDigest::sha256(), xattrs_data)?; - &hex::encode(digest) + hex::encode(digest) }; if self.options.format_version == 0 { - let path = v0_xattrs_path(xattrs_checksum); + let path = v0_xattrs_path(&xattrs_checksum); // Write xattrs content into a separate directory. - if !self.wrote_xattrs.contains(xattrs_checksum) { - let inserted = self.wrote_xattrs.insert(checksum.to_string()); + if !self.wrote_xattrs.contains(&xattrs_checksum) { + let inserted = self.wrote_xattrs.insert(xattrs_checksum); debug_assert!(inserted); self.append_default_data(&path, xattrs_data)?; } @@ -319,11 +319,11 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { self.append_default_hardlink(&objpath, &path)?; } } else if self.options.format_version == 1 { - let path = v1_xattrs_object_path(xattrs_checksum); + let path = v1_xattrs_object_path(&xattrs_checksum); // Write xattrs content into a separate `.file-xattrs` object. - if !self.wrote_xattrs.contains(xattrs_checksum) { - let inserted = self.wrote_xattrs.insert(checksum.to_string()); + if !self.wrote_xattrs.contains(&xattrs_checksum) { + let inserted = self.wrote_xattrs.insert(xattrs_checksum); debug_assert!(inserted); self.append_default_data(&path, xattrs_data)?; } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 047e52880..57b565773 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -10,7 +10,7 @@ use ostree_ext::container::{ use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; use sh_inline::bash_in; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::process::Command; use fixture::Fixture; @@ -156,11 +156,16 @@ fn validate_tar_expected( let mut expected: HashMap<&'static str, TarExpected> = expected.into_iter().map(|exp| (exp.path, exp)).collect(); let entries = t.map(|e| e.unwrap()); + let mut seen_paths = HashSet::new(); // Verify we're injecting directories, fixes the absence of `/tmp` in our // images for example. for entry in entries { let header = entry.header(); let entry_path = entry.path().unwrap().to_string_lossy().into_owned(); + if seen_paths.contains(&entry_path) { + anyhow::bail!("Duplicate path: {}", entry_path); + } + seen_paths.insert(entry_path.clone()); if let Some(exp) = expected.remove(entry_path.as_str()) { assert_eq!(header.entry_type(), exp.etype, "{}", entry_path); let is_old_object = format_version == 0; @@ -230,6 +235,7 @@ fn test_tar_export_structure() -> Result<()> { ("sysroot/ostree/repo/tmp", Directory, 0o755), ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), ("sysroot/ostree/repo/xattrs", Directory, 0o755), + ("sysroot/ostree/repo/xattrs/44299b6a1738aab86de5966507fbe369af2ab421e1c6eb6e797054831430d92c", Regular, 0o644), ("usr", Directory, 0o755), ]; validate_tar_expected( From a06f1fbf5de34f11aa4514c32646d7c01efa35db Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 24 Feb 2022 16:30:29 -0500 Subject: [PATCH 313/775] Release 0.6.5 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index b8f7ca397..fc2b8a097 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.6.4" +version = "0.6.5" [dependencies] anyhow = "1.0" From 344cdb132d50d6c3cba1403fcb45f0a9a6fc6062 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 24 Feb 2022 17:06:10 -0500 Subject: [PATCH 314/775] Fix a few clippy lints from new Rust 1.59 version --- lib/src/container/encapsulate.rs | 2 +- lib/src/refescape.rs | 3 +-- lib/src/tar/import.rs | 14 +++++--------- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index d6c6a3e3a..e1b3ff17f 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -118,7 +118,7 @@ fn build_oci( labels.insert(OSTREE_COMMIT_LABEL.into(), commit.into()); - for (k, v) in config.labels.iter().map(|k| k.iter()).flatten() { + for (k, v) in config.labels.iter().flat_map(|k| k.iter()) { labels.insert(k.into(), v.into()); } // Lookup the cmd embedded in commit metadata diff --git a/lib/src/refescape.rs b/lib/src/refescape.rs index a472a98ac..f8bd8f6d2 100644 --- a/lib/src/refescape.rs +++ b/lib/src/refescape.rs @@ -129,8 +129,7 @@ fn unescape_for_ref(s: &str) -> Result { pub fn unprefix_unescape_ref(prefix: &str, ostree_ref: &str) -> Result { let rest = ostree_ref .strip_prefix(prefix) - .map(|s| s.strip_prefix('/')) - .flatten() + .and_then(|s| s.strip_prefix('/')) .ok_or_else(|| { anyhow::anyhow!( "ref does not match expected prefix {}/: {}", diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 54a4e1b46..a5ecc349d 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -114,8 +114,7 @@ fn parse_object_entry_path(path: &Utf8Path) -> Result<(&str, &Utf8Path, &str)> { // The "sharded" commit directory. let parentname = path .parent() - .map(|p| p.file_name()) - .flatten() + .and_then(|p| p.file_name()) .ok_or_else(|| anyhow!("Invalid path (no parent) {}", path))?; if parentname.len() != 2 { return Err(anyhow!("Invalid checksum parent {}", parentname)); @@ -420,21 +419,18 @@ impl Importer { // Extract the xattrs checksum from the link target or from the content (v1). // Later, it will be used as the key for a lookup into the `self.xattrs` cache. - let xattrs_checksum; - match entry.header().entry_type() { + let xattrs_checksum = match entry.header().entry_type() { Link => { let link_target = entry .link_name()? .ok_or_else(|| anyhow!("No xattrs link content for {}", checksum))?; let xattr_target = Utf8Path::from_path(&*link_target) .ok_or_else(|| anyhow!("Invalid non-UTF8 xattrs link {}", checksum))?; - xattrs_checksum = parse_xattrs_link_target(xattr_target)?; - } - Regular => { - xattrs_checksum = self.cache_xattrs_content(entry, None)?; + parse_xattrs_link_target(xattr_target)? } + Regular => self.cache_xattrs_content(entry, None)?, x => bail!("Unexpected xattrs type '{:?}' found for {}", x, checksum), - } + }; // Now xattrs are properly cached for the next content object in the stream, // which should match `checksum`. From 9bc80510e1f680f340849170fdd344301b2cd677 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 28 Feb 2022 09:59:08 -0500 Subject: [PATCH 315/775] ci: Drop hack to build skopeo A new enough version is now in Fedora. --- ci/installdeps.sh | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/ci/installdeps.sh b/ci/installdeps.sh index 15e668dfe..ad19d06ed 100755 --- a/ci/installdeps.sh +++ b/ci/installdeps.sh @@ -4,14 +4,5 @@ set -xeuo pipefail # Always pull ostree from updates-testing to avoid the bodhi wait dnf -y --enablerepo=updates-testing update ostree-devel -# Pull the code from https://github.com/containers/skopeo/pull/1476 -# if necessary. -if ! skopeo experimental-image-proxy --help &>/dev/null; then - dnf -y install dnf-utils - dnf builddep -y skopeo - git clone --depth=1 https://github.com/containers/skopeo - cd skopeo - make - install -m 0755 bin/skopeo /usr/bin/ -fi - +# Our tests depend on this +dnf -y install skopeo From 0c16fd5b52bd01968a67e69a12a2a241a5372b4b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 28 Feb 2022 10:04:56 -0500 Subject: [PATCH 316/775] ci: Install ostree from coreos/continuous copr Mainly to pick up https://github.com/ostreedev/ostree/pull/2548 What we really want of course is to have FCOS and build containers that consume the continuous directly. This is just dipping our toes in that water. --- ci/installdeps.sh | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/ci/installdeps.sh b/ci/installdeps.sh index ad19d06ed..a0dcc53df 100755 --- a/ci/installdeps.sh +++ b/ci/installdeps.sh @@ -6,3 +6,20 @@ dnf -y --enablerepo=updates-testing update ostree-devel # Our tests depend on this dnf -y install skopeo + +# For some reason dnf copr enable -y says there are no builds? +cat >/etc/yum.repos.d/coreos-continuous.repo << 'EOF' +[copr:copr.fedorainfracloud.org:group_CoreOS:continuous] +name=Copr repo for continuous owned by @CoreOS +baseurl=https://download.copr.fedorainfracloud.org/results/@CoreOS/continuous/fedora-$releasever-$basearch/ +type=rpm-md +skip_if_unavailable=True +gpgcheck=1 +gpgkey=https://download.copr.fedorainfracloud.org/results/@CoreOS/continuous/pubkey.gpg +repo_gpgcheck=0 +enabled=1 +enabled_metadata=1 +EOF + +# For now pull ostree from git +dnf update -y ostree From b516c247ce36d3627b6979c964028cceb2b1133b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 17 Feb 2022 17:33:24 -0500 Subject: [PATCH 317/775] tests: More fixture rework, port test-diff to new path Currently the fixture is just built during `cargo test`; to help debug it I wanted to support it being part of our main shared library in the "integrationtest" path. For example I want to add a CLI method to stand up the fixture directory (but not delete it). But the other big thing going on here is that we now support updating commits fully in memory, and so the diff API is ported to use this. This requires https://github.com/ostreedev/ostree/pull/2548 --- lib/Cargo.toml | 7 +- lib/{tests/it => src}/fixture.rs | 142 +++++++++++------- .../it => src}/fixtures/exampleos-v1.tar.zst | Bin .../it => src}/fixtures/exampleos.tar.zst | Bin .../fixtures/ostree-gpg-test-home.tar.gz | Bin lib/src/integrationtest.rs | 1 - lib/src/lib.rs | 2 + lib/tests/it/main.rs | 22 ++- 8 files changed, 112 insertions(+), 62 deletions(-) rename lib/{tests/it => src}/fixture.rs (74%) rename lib/{tests/it => src}/fixtures/exampleos-v1.tar.zst (100%) rename lib/{tests/it => src}/fixtures/exampleos.tar.zst (100%) rename lib/{tests/it => src}/fixtures/ostree-gpg-test-home.tar.gz (100%) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index fc2b8a097..e5fba266c 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -43,10 +43,11 @@ tokio-util = { features = ["io-util"], version = "0.6.9" } tokio-stream = { features = ["sync"], version = "0.1.8" } tracing = "0.1" +indoc = { version = "1.0.3", optional = true } +sh-inline = { version = "0.2", features = ["cap-std-ext"], optional = true } + [dev-dependencies] -indoc = "1.0.3" quickcheck = "1" -sh-inline = { version = "0.2", features = ["cap-std-ext"] } # https://github.com/rust-lang/cargo/issues/2911 # https://github.com/rust-lang/rfcs/pull/1956 ostree-ext = { path = ".", features = ["internal-testing-api"] } @@ -56,5 +57,5 @@ features = ["dox"] [features] dox = ["ostree/dox"] -internal-testing-api = [] +internal-testing-api = ["sh-inline", "indoc"] proxy_v0_2_3 = ["containers-image-proxy/proxy_v0_2_3"] diff --git a/lib/tests/it/fixture.rs b/lib/src/fixture.rs similarity index 74% rename from lib/tests/it/fixture.rs rename to lib/src/fixture.rs index 1aa633deb..e4e1fd748 100644 --- a/lib/tests/it/fixture.rs +++ b/lib/src/fixture.rs @@ -1,15 +1,21 @@ +//! Test suite fixture. Should only be used by this library. + +#![allow(missing_docs)] + +use crate::prelude::*; +use crate::{gio, glib}; use anyhow::{anyhow, Context, Result}; use camino::{Utf8Component, Utf8Path, Utf8PathBuf}; use cap_std::fs::Dir; use cap_std_ext::prelude::CapStdExtCommandExt; +use chrono::TimeZone; use fn_error_context::context; use ostree::cap_std; -use ostree_ext::prelude::*; -use ostree_ext::{gio, glib}; use sh_inline::bash_in; use std::borrow::Cow; use std::convert::{TryFrom, TryInto}; use std::io::Write; +use std::ops::Add; use std::process::Stdio; use std::sync::Arc; @@ -17,8 +23,7 @@ const OSTREE_GPG_HOME: &[u8] = include_bytes!("fixtures/ostree-gpg-test-home.tar const TEST_GPG_KEYID_1: &str = "7FCA23D8472CDAFA"; #[allow(dead_code)] const TEST_GPG_KEYFPR_1: &str = "5E65DE75AB1C501862D476347FCA23D8472CDAFA"; -pub(crate) const EXAMPLEOS_V0: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); -pub(crate) const EXAMPLEOS_V1: &[u8] = include_bytes!("fixtures/exampleos-v1.tar.zst"); +pub const EXAMPLEOS_V0: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); const TESTREF: &str = "exampleos/x86_64/stable"; #[derive(Debug)] @@ -29,7 +34,7 @@ enum FileDefType { } #[derive(Debug)] -pub(crate) struct FileDef { +pub struct FileDef { uid: u32, gid: u32, mode: u32, @@ -41,7 +46,7 @@ impl TryFrom<&'static str> for FileDef { type Error = anyhow::Error; fn try_from(value: &'static str) -> Result { - let mut parts = value.split(" "); + let mut parts = value.split(' '); let tydef = parts .next() .ok_or_else(|| anyhow!("Missing type definition"))?; @@ -68,7 +73,7 @@ impl TryFrom<&'static str> for FileDef { } fn parse_mode(line: &str) -> Result<(u32, u32, u32)> { - let mut parts = line.split(" ").skip(1); + let mut parts = line.split(' ').skip(1); // An empty mode resets to defaults let uid = if let Some(u) = parts.next() { u @@ -85,14 +90,14 @@ fn parse_mode(line: &str) -> Result<(u32, u32, u32)> { impl FileDef { /// Parse a list of newline-separated file definitions. - fn iter_from(defs: &'static str) -> impl Iterator> { + pub fn iter_from(defs: &'static str) -> impl Iterator> { let mut uid = 0; let mut gid = 0; let mut mode = 0o644; defs.lines() - .filter(|v| !(v.is_empty() || v.starts_with("#"))) + .filter(|v| !(v.is_empty() || v.starts_with('#'))) .filter_map(move |line| { - if line.starts_with("m") { + if line.starts_with('m') { match parse_mode(line) { Ok(r) => { uid = r.0; @@ -145,7 +150,7 @@ enum SeLabel { } impl SeLabel { - pub(crate) fn from_path(p: &Utf8Path) -> Self { + pub fn from_path(p: &Utf8Path) -> Self { let rootdir = p.components().find_map(|v| { if let Utf8Component::Normal(name) = v { Some(name) @@ -177,7 +182,7 @@ impl SeLabel { } } - pub(crate) fn to_str(&self) -> &'static str { + pub fn to_str(&self) -> &'static str { match self { SeLabel::Root => "system_u:object_r:root_t:s0", SeLabel::Usr => "system_u:object_r:usr_t:s0", @@ -188,13 +193,13 @@ impl SeLabel { } } - pub(crate) fn new_xattrs(&self) -> glib::Variant { - vec![(b"security.selinux".as_slice(), self.to_str().as_bytes())].to_variant() + pub fn new_xattrs(&self) -> glib::Variant { + vec![("security.selinux".as_bytes(), self.to_str().as_bytes())].to_variant() } } /// Generate directory metadata variant for root/root 0755 directory with an optional SELinux label -pub(crate) fn create_dirmeta(path: &Utf8Path, selinux: bool) -> glib::Variant { +pub fn create_dirmeta(path: &Utf8Path, selinux: bool) -> glib::Variant { let finfo = gio::FileInfo::new(); finfo.set_attribute_uint32("unix::uid", 0); finfo.set_attribute_uint32("unix::gid", 0); @@ -209,11 +214,7 @@ pub(crate) fn create_dirmeta(path: &Utf8Path, selinux: bool) -> glib::Variant { } /// Wraps [`create_dirmeta`] and commits it. -pub(crate) fn require_dirmeta( - repo: &ostree::Repo, - path: &Utf8Path, - selinux: bool, -) -> Result { +pub fn require_dirmeta(repo: &ostree::Repo, path: &Utf8Path, selinux: bool) -> Result { let v = create_dirmeta(path, selinux); let r = repo.write_metadata(ostree::ObjectType::DirMeta, None, &v, gio::NONE_CANCELLABLE)?; Ok(r.to_hex()) @@ -224,26 +225,34 @@ fn ensure_parent_dirs( path: &Utf8Path, metadata_checksum: &str, ) -> Result { - let parts = path.components().map(|s| s.as_str()).collect::>(); + let parts = relative_path_components(path) + .map(|s| s.as_str()) + .collect::>(); mt.ensure_parent_dirs(&parts, metadata_checksum) .map_err(Into::into) } -pub(crate) struct Fixture { +fn relative_path_components(p: &Utf8Path) -> impl Iterator { + p.components() + .filter(|p| matches!(p, Utf8Component::Normal(_))) +} + +#[derive(Debug)] +pub struct Fixture { // Just holds a reference _tempdir: tempfile::TempDir, - pub(crate) dir: Arc, - pub(crate) path: Utf8PathBuf, + pub dir: Arc, + pub path: Utf8PathBuf, srcrepo: ostree::Repo, destrepo: ostree::Repo, - pub(crate) format_version: u32, - pub(crate) selinux: bool, + pub format_version: u32, + pub selinux: bool, } impl Fixture { #[context("Initializing fixture")] - pub(crate) fn new_base() -> Result { + pub fn new_base() -> Result { // Basic setup, allocate a tempdir let tempdir = tempfile::tempdir_in("/var/tmp")?; let dir = Arc::new(cap_std::fs::Dir::open_ambient_dir( @@ -289,7 +298,7 @@ impl Fixture { }) } - pub(crate) fn new() -> Result { + pub fn new() -> Result { let r = Self::new_base()?; let tarname = "exampleos.tar.zst"; r.dir.write(tarname, EXAMPLEOS_V0)?; @@ -307,15 +316,15 @@ impl Fixture { Ok(r) } - pub(crate) fn srcrepo(&self) -> &ostree::Repo { + pub fn srcrepo(&self) -> &ostree::Repo { &self.srcrepo } - pub(crate) fn destrepo(&self) -> &ostree::Repo { + pub fn destrepo(&self) -> &ostree::Repo { &self.destrepo } - pub(crate) fn write_filedef(&self, root: &ostree::MutableTree, def: &FileDef) -> Result<()> { + pub fn write_filedef(&self, root: &ostree::MutableTree, def: &FileDef) -> Result<()> { let parent_path = def.path.parent(); let parent = if let Some(parent_path) = parent_path { let meta = require_dirmeta(&self.srcrepo, parent_path, self.selinux)?; @@ -361,10 +370,7 @@ impl Fixture { Ok(()) } - pub(crate) fn commit_filedefs<'a>( - &self, - defs: impl IntoIterator>, - ) -> Result<()> { + pub fn commit_filedefs(&self, defs: impl IntoIterator>) -> Result<()> { let root = ostree::MutableTree::new(); let cancellable = gio::NONE_CANCELLABLE; let tx = self.srcrepo.auto_transaction(cancellable)?; @@ -391,42 +397,76 @@ impl Fixture { Ok(()) } - pub(crate) fn new_v1() -> Result { + pub fn new_v1() -> Result { let r = Self::new_base()?; r.commit_filedefs(FileDef::iter_from(CONTENTS_V0))?; Ok(r) } - pub(crate) fn testref(&self) -> &'static str { + pub fn testref(&self) -> &'static str { TESTREF } #[context("Updating test repo")] - pub(crate) fn update(&mut self) -> Result<()> { - let tmptarpath = "src/repo/tmp/exampleos-v1.tar.zst"; - self.dir.write(tmptarpath, EXAMPLEOS_V1)?; - let testref = TESTREF; - bash_in!( - &self.dir, - "ostree --repo=src/repo commit -b ${testref} --no-bindings --tree=tar=${tmptarpath}", - testref, - tmptarpath - )?; - self.dir.remove_file(tmptarpath)?; + pub fn update( + &mut self, + additions: impl Iterator>, + removals: impl Iterator>, + ) -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; + + // Load our base commit + let rev = &self.srcrepo().require_rev(self.testref())?; + let (commit, _) = self.srcrepo.load_commit(rev)?; + let root = ostree::MutableTree::from_commit(self.srcrepo(), rev)?; + // Bump the commit timestamp by one day + let ts = chrono::Utc.timestamp(ostree::commit_get_timestamp(&commit) as i64, 0); + let new_ts = ts.add(chrono::Duration::days(1)).timestamp() as u64; + + // Prepare a transaction + let tx = self.srcrepo.auto_transaction(cancellable)?; + for def in additions { + let def = def?; + self.write_filedef(&root, &def)?; + } + for removal in removals { + let filename = removal + .file_name() + .ok_or_else(|| anyhow!("Invalid path {}", removal))?; + // Notice that we're traversing the whole path, because that's how the walk() API works. + let p = relative_path_components(&removal); + let parts = p.map(|s| s.as_str()).collect::>(); + let parent = &root.walk(&parts, 0)?; + parent.remove(filename, false)?; + self.srcrepo.write_mtree(parent, cancellable)?; + } + let root = self + .srcrepo + .write_mtree(&root, cancellable) + .context("Writing mtree")?; + let root = root.downcast_ref::().unwrap(); + let commit = self + .srcrepo + .write_commit_with_time(Some(rev), None, None, None, root, new_ts, cancellable) + .context("Writing commit")?; + self.srcrepo + .transaction_set_ref(None, self.testref(), Some(commit.as_str())); + tx.commit(cancellable)?; Ok(()) } #[context("Exporting tar")] - pub(crate) fn export_tar(&self) -> Result<&'static Utf8Path> { + pub fn export_tar(&self) -> Result<&'static Utf8Path> { let cancellable = gio::NONE_CANCELLABLE; let (_, rev) = self.srcrepo.read_commit(self.testref(), cancellable)?; let path = "exampleos-export.tar"; let mut outf = std::io::BufWriter::new(self.dir.create(path)?); - let options = ostree_ext::tar::ExportOptions { + #[allow(clippy::needless_update)] + let options = crate::tar::ExportOptions { format_version: self.format_version, ..Default::default() }; - ostree_ext::tar::export_commit(&self.srcrepo, rev.as_str(), &mut outf, Some(options))?; + crate::tar::export_commit(&self.srcrepo, rev.as_str(), &mut outf, Some(options))?; outf.flush()?; Ok(path.into()) } diff --git a/lib/tests/it/fixtures/exampleos-v1.tar.zst b/lib/src/fixtures/exampleos-v1.tar.zst similarity index 100% rename from lib/tests/it/fixtures/exampleos-v1.tar.zst rename to lib/src/fixtures/exampleos-v1.tar.zst diff --git a/lib/tests/it/fixtures/exampleos.tar.zst b/lib/src/fixtures/exampleos.tar.zst similarity index 100% rename from lib/tests/it/fixtures/exampleos.tar.zst rename to lib/src/fixtures/exampleos.tar.zst diff --git a/lib/tests/it/fixtures/ostree-gpg-test-home.tar.gz b/lib/src/fixtures/ostree-gpg-test-home.tar.gz similarity index 100% rename from lib/tests/it/fixtures/ostree-gpg-test-home.tar.gz rename to lib/src/fixtures/ostree-gpg-test-home.tar.gz diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 562a15950..badf244ec 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -23,7 +23,6 @@ pub(crate) fn detectenv() -> &'static str { /// Using `src` as a base, take append `dir` into OCI image. /// Should only be enabled for testing. -#[cfg(feature = "internal-testing-api")] #[context("Generating derived oci")] pub fn generate_derived_oci(src: impl AsRef, dir: impl AsRef) -> Result<()> { use std::rc::Rc; diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 5c64ea684..032bf040d 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -44,5 +44,7 @@ pub mod prelude { pub use ostree::prelude::*; } +#[cfg(feature = "internal-testing-api")] +pub mod fixture; #[cfg(feature = "internal-testing-api")] pub mod integrationtest; diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 57b565773..e5a9fd7de 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1,5 +1,3 @@ -mod fixture; - use anyhow::{Context, Result}; use camino::Utf8Path; use once_cell::sync::Lazy; @@ -10,10 +8,11 @@ use ostree_ext::container::{ use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; use sh_inline::bash_in; +use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::process::Command; -use fixture::Fixture; +use ostree_ext::fixture::{FileDef, Fixture}; const EXAMPLE_TAR_LAYER: &[u8] = include_bytes!("fixtures/hlinks.tar.gz"); const EXAMPLEOS_CONTENT_CHECKSUM: &str = @@ -734,8 +733,17 @@ async fn test_container_import_export_registry() -> Result<()> { #[test] fn test_diff() -> Result<()> { - let mut fixture = Fixture::new()?; - fixture.update()?; + let mut fixture = Fixture::new_v1()?; + const ADDITIONS: &str = indoc::indoc! { " +r /usr/bin/newbin some-new-binary +d /usr/share +"}; + fixture + .update( + FileDef::iter_from(ADDITIONS), + IntoIterator::into_iter([Cow::Borrowed("/usr/bin/bash".into())]), + ) + .context("Failed to update")?; let from = &format!("{}^", fixture.testref()); let repo = fixture.srcrepo(); let subdir: Option<&str> = None; @@ -746,7 +754,7 @@ fn test_diff() -> Result<()> { assert_eq!(diff.added_files.len(), 1); assert_eq!(diff.added_files.iter().next().unwrap(), "/usr/bin/newbin"); assert_eq!(diff.removed_files.len(), 1); - assert_eq!(diff.removed_files.iter().next().unwrap(), "/usr/bin/foo"); + assert_eq!(diff.removed_files.iter().next().unwrap(), "/usr/bin/bash"); let diff = ostree_ext::diff::diff(repo, from, fixture.testref(), Some("/usr"))?; assert_eq!(diff.subdir.as_ref().unwrap(), "/usr"); assert_eq!(diff.added_dirs.len(), 1); @@ -754,6 +762,6 @@ fn test_diff() -> Result<()> { assert_eq!(diff.added_files.len(), 1); assert_eq!(diff.added_files.iter().next().unwrap(), "/bin/newbin"); assert_eq!(diff.removed_files.len(), 1); - assert_eq!(diff.removed_files.iter().next().unwrap(), "/bin/foo"); + assert_eq!(diff.removed_files.iter().next().unwrap(), "/bin/bash"); Ok(()) } From 7a0462e149d1a9f21d5b6847b2e53edc1f36f8fb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 28 Feb 2022 11:08:44 -0500 Subject: [PATCH 318/775] ci/installdeps.sh: Clean up to enable repos consistently We should have a clear two-step flow where we enable repos (updates-testing and the CoreOS/continuous COPR) and then install packages. This way we also get skopeo from updates-testing which a future PR will need. --- ci/installdeps.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/ci/installdeps.sh b/ci/installdeps.sh index a0dcc53df..73a6b5f58 100755 --- a/ci/installdeps.sh +++ b/ci/installdeps.sh @@ -1,12 +1,6 @@ #!/bin/bash set -xeuo pipefail -# Always pull ostree from updates-testing to avoid the bodhi wait -dnf -y --enablerepo=updates-testing update ostree-devel - -# Our tests depend on this -dnf -y install skopeo - # For some reason dnf copr enable -y says there are no builds? cat >/etc/yum.repos.d/coreos-continuous.repo << 'EOF' [copr:copr.fedorainfracloud.org:group_CoreOS:continuous] @@ -21,5 +15,11 @@ enabled=1 enabled_metadata=1 EOF -# For now pull ostree from git -dnf update -y ostree +# Pull skopeo and ostree from updates-testing, since we depend on new features in our git main +dnf config-manager --set-enabled updates-testing + +# Our tests depend on this +dnf -y install skopeo + +# Always pull ostree from updates-testing to avoid the bodhi wait +dnf -y update ostree From 52d67fd9db433c46da7a4c3e27f8440097382270 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 7 Mar 2022 11:39:08 -0500 Subject: [PATCH 319/775] Add new `objectsource` module In ostree we aim to provide generic mechanisms that can be consumed by any package or build system. Hence we often use the term "component" instead of "package". This new `objectsource` module is an abstraction over basic metadata for a component/package, currently name, identifier, and last change time. This will be used for splitting up a single ostree commit back into "chunks" or container image layers, grouping objects that come from the same component together. https://github.com/ostreedev/ostree-rs-ext/issues/69 --- lib/src/lib.rs | 2 + lib/src/objectsource.rs | 87 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) create mode 100644 lib/src/objectsource.rs diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 032bf040d..38d4e8223 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -37,7 +37,9 @@ pub mod tar; pub mod tokio_util; pub(crate) mod commit; +pub mod objectsource; pub(crate) mod objgv; + /// Prelude, intended for glob import. pub mod prelude { #[doc(hidden)] diff --git a/lib/src/objectsource.rs b/lib/src/objectsource.rs new file mode 100644 index 000000000..9faa26b92 --- /dev/null +++ b/lib/src/objectsource.rs @@ -0,0 +1,87 @@ +//! Metadata about the source of an object: a component or package. +//! +//! This is used to help split up containers into distinct layers. + +use std::borrow::Borrow; +use std::collections::{BTreeMap, HashSet}; +use std::hash::Hash; +use std::rc::Rc; + +use serde::{Deserialize, Serialize, Serializer}; + +mod rcstr_serialize { + use serde::Deserializer; + + use super::*; + + pub(crate) fn serialize(v: &Rc, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&*v) + } + + pub(crate) fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let v = String::deserialize(deserializer)?; + Ok(Rc::from(v.into_boxed_str())) + } +} + +/// Identifier for content (e.g. package/layer). Not necessarily human readable. +pub type ContentID = Rc; + +/// Metadata about a component/package. +#[derive(Debug, Eq, Deserialize, Serialize)] +pub struct ObjectSourceMeta { + /// Unique identifier, does not need to be human readable, but can be. + #[serde(with = "rcstr_serialize")] + pub identifier: ContentID, + /// Identifier for this source (e.g. package name-version, git repo). + /// Unlike the [`ContentID`], this should be human readable. + #[serde(with = "rcstr_serialize")] + pub name: Rc, + /// Identifier for the *source* of this content; for example, if multiple binary + /// packages derive from a single git repository or source package. + #[serde(with = "rcstr_serialize")] + pub srcid: Rc, + /// Unitless, relative offset of last change time. + /// One suggested way to generate this number is to have it be in units of hours or days + /// since the earliest changed item. + pub change_time_offset: u32, +} + +impl PartialEq for ObjectSourceMeta { + fn eq(&self, other: &Self) -> bool { + *self.identifier == *other.identifier + } +} + +impl Hash for ObjectSourceMeta { + fn hash(&self, state: &mut H) { + self.identifier.hash(state); + } +} + +impl Borrow for ObjectSourceMeta { + fn borrow(&self) -> &str { + &*self.identifier + } +} + +/// Maps from e.g. "bash" or "kernel" to metadata about that content +pub type ObjectMetaSet = HashSet; + +/// Maps from an ostree content object digest to the `ContentSet` key. +pub type ObjectMetaMap = BTreeMap; + +/// Grouping of metadata about an object. +#[derive(Debug, Default)] +pub struct ObjectMeta { + /// The set of object sources with their metadata. + pub set: ObjectMetaSet, + /// Mapping from content object to source. + pub map: ObjectMetaMap, +} From 7b8f483d5ae56e7f4bd603e8196c3343fd3a2355 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 7 Mar 2022 12:27:35 -0500 Subject: [PATCH 320/775] bootabletree: New module with helpers for bootable ostree commits Right now this only offers an API to find the kernel directory, but I could imagine we do more stuff here in the future. I think this will be generally useful (e.g. ostree and rpm-ostree have duplicate code for this) as is, but it's specifically prep for using this in container layer splitting. --- lib/src/bootabletree.rs | 29 +++++++++++++++++++++++++++++ lib/src/lib.rs | 1 + lib/tests/it/main.rs | 12 ++++++++++++ 3 files changed, 42 insertions(+) create mode 100644 lib/src/bootabletree.rs diff --git a/lib/src/bootabletree.rs b/lib/src/bootabletree.rs new file mode 100644 index 000000000..6be01cb90 --- /dev/null +++ b/lib/src/bootabletree.rs @@ -0,0 +1,29 @@ +//! Helper functions for bootable OSTrees. + +use anyhow::Result; +use ostree::gio; +use ostree::prelude::*; + +const MODULES: &str = "/usr/lib/modules"; + +/// Find the kernel modules directory in a bootable OSTree commit. +pub fn find_kernel_dir( + root: &gio::File, + cancellable: Option<&gio::Cancellable>, +) -> Result> { + let moddir = root.resolve_relative_path(MODULES); + let e = moddir.enumerate_children( + "standard::name", + gio::FileQueryInfoFlags::NOFOLLOW_SYMLINKS, + cancellable, + )?; + let mut r = None; + for child in e.clone() { + let child = &child?; + let childpath = e.child(child); + if child.file_type() == gio::FileType::Directory && r.replace(childpath).is_some() { + anyhow::bail!("Found multiple subdirectories in {}", MODULES); + } + } + Ok(r) +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 38d4e8223..c0b9b8e8b 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -26,6 +26,7 @@ type Result = anyhow::Result; // Import global functions. mod globals; +pub mod bootabletree; pub mod cli; pub mod container; pub mod container_utils; diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index e5a9fd7de..699d5034b 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -5,6 +5,7 @@ use ostree_ext::container::store::PrepareResult; use ostree_ext::container::{ Config, ImageReference, OstreeImageReference, SignatureSource, Transport, }; +use ostree_ext::prelude::FileExt; use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; use sh_inline::bash_in; @@ -300,6 +301,17 @@ async fn test_tar_import_export() -> Result<()> { "#, imported_commit = imported_commit.as_str() )?; + + let (root, _) = fixture + .destrepo() + .read_commit(&imported_commit, gio::NONE_CANCELLABLE)?; + let kdir = ostree_ext::bootabletree::find_kernel_dir(&root, gio::NONE_CANCELLABLE)?; + let kdir = kdir.unwrap(); + assert_eq!( + kdir.basename().unwrap().to_str().unwrap(), + "5.10.18-200.x86_64" + ); + Ok(()) } From ed48ba411917920f4f81aa5188c23df381874626 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 7 Mar 2022 17:46:49 -0500 Subject: [PATCH 321/775] Fix clippy lints with `cargo 1.60.0-beta.1 (ea2a21c 2022-02-15)` I decided to suppress the ref-vs-not ref one for now, hopefully clippy gets fixed. --- lib/src/container/encapsulate.rs | 3 +++ lib/src/tar/write.rs | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index e1b3ff17f..6a01897ff 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -124,6 +124,9 @@ fn build_oci( // Lookup the cmd embedded in commit metadata let cmd = commit_meta.lookup::>(ostree::COMMIT_META_CONTAINER_CMD)?; // But support it being overridden by CLI options + + // https://github.com/rust-lang/rust-clippy/pull/7639#issuecomment-1050340564 + #[allow(clippy::unnecessary_lazy_evaluations)] let cmd = config.cmd.as_ref().or_else(|| cmd.as_ref()); if let Some(cmd) = cmd { ctrcfg.set_cmd(Some(cmd.clone())); diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index 34ab7944d..b196597be 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -193,7 +193,7 @@ async fn filter_tar_async( let copier = tokio::io::copy(&mut rx_buf, &mut dest); let (r, v) = tokio::join!(tar_transformer, copier); let _v: u64 = v?; - Ok(r??) + r? } /// Write the contents of a tarball as an ostree commit. From db8f7abeb6cb1103777535f6ef040e9a046643db Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 7 Mar 2022 20:09:06 -0500 Subject: [PATCH 322/775] tests: Port two tests to v1 fixture Add GPG signature on the commit object to match the current bits. --- lib/src/fixture.rs | 10 ++++++++++ lib/tests/it/main.rs | 10 +++++----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index e4e1fd748..169c06f5b 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -138,6 +138,8 @@ d run m 0 0 1755 d tmp "## }; +pub const CONTENTS_CHECKSUM_V0: &str = + "76f0d5ec8814bc2a1d7868dbe8d3783535dc0cc9c7dcfdf37fa3512f8e276f6c"; #[derive(Debug, PartialEq, Eq)] enum SeLabel { @@ -394,6 +396,14 @@ impl Fixture { .transaction_set_ref(None, self.testref(), Some(commit.as_str())); tx.commit(cancellable)?; + let gpghome = self.path.join("src/gpghome"); + self.srcrepo.sign_commit( + &commit, + TEST_GPG_KEYID_1, + Some(gpghome.as_str()), + gio::NONE_CANCELLABLE, + )?; + Ok(()) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 699d5034b..518e6f8e7 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -13,7 +13,7 @@ use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::process::Command; -use ostree_ext::fixture::{FileDef, Fixture}; +use ostree_ext::fixture::{FileDef, Fixture, CONTENTS_CHECKSUM_V0}; const EXAMPLE_TAR_LAYER: &[u8] = include_bytes!("fixtures/hlinks.tar.gz"); const EXAMPLEOS_CONTENT_CHECKSUM: &str = @@ -35,7 +35,7 @@ static TEST_REGISTRY: Lazy = Lazy::new(|| match std::env::var_os("TEST_R #[tokio::test] async fn test_tar_import_empty() -> Result<()> { - let fixture = Fixture::new()?; + let fixture = Fixture::new_v1()?; let r = ostree_ext::tar::import_tar(fixture.destrepo(), tokio::io::empty(), None).await; assert_err_contains(r, "Commit object not found"); Ok(()) @@ -65,7 +65,7 @@ async fn test_tar_export_reproducible() -> Result<()> { #[tokio::test] async fn test_tar_import_signed() -> Result<()> { - let fixture = Fixture::new()?; + let fixture = Fixture::new_v1()?; let test_tar = fixture.export_tar()?; let rev = fixture.srcrepo().require_rev(fixture.testref())?; @@ -74,7 +74,7 @@ async fn test_tar_import_signed() -> Result<()> { ostree::commit_get_content_checksum(&commitv) .unwrap() .as_str(), - EXAMPLEOS_CONTENT_CHECKSUM + CONTENTS_CHECKSUM_V0 ); // Verify we fail with an unknown remote. @@ -122,7 +122,7 @@ async fn test_tar_import_signed() -> Result<()> { .await?; let (commitdata, state) = fixture.destrepo().load_commit(&imported)?; assert_eq!( - EXAMPLEOS_CONTENT_CHECKSUM, + CONTENTS_CHECKSUM_V0, ostree::commit_get_content_checksum(&commitdata) .unwrap() .as_str() From 0de40a27089449d860b7fab8ab788a99547813d2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 7 Mar 2022 20:09:53 -0500 Subject: [PATCH 323/775] tar/import: Improve error for unsigned commit Move the check for object type earlier, so we provide a clearer error message. Hit this when trying to pull an unsigned commit in the tests. --- lib/src/tar/import.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index a5ecc349d..9109ce1a0 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -573,13 +573,6 @@ impl Importer { let (next_checksum, next_objtype) = Self::parse_metadata_entry(&nextent_path)?; if let Some(remote) = self.remote.as_deref() { - if next_checksum != checksum { - return Err(anyhow!( - "Expected commitmeta checksum {}, found {}", - checksum, - next_checksum - )); - } if next_objtype != ostree::ObjectType::CommitMeta { return Err(anyhow!( "Using remote {} for verification; Expected commitmeta object, not {:?}", @@ -587,6 +580,13 @@ impl Importer { objtype )); } + if next_checksum != checksum { + return Err(anyhow!( + "Expected commitmeta checksum {}, found {}", + checksum, + next_checksum + )); + } let commitmeta = entry_to_variant::<_, std::collections::HashMap>( next_ent, &next_checksum, From b0a2955112c0b90f9390525ca6dcde165aba202d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 7 Mar 2022 20:44:28 -0500 Subject: [PATCH 324/775] tests: Port two more tests to v1 fixture Ongoing work to use the updated fixture. --- lib/src/fixture.rs | 9 +++++++++ lib/tests/it/main.rs | 10 ++++------ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 169c06f5b..c6d965cd7 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -396,6 +396,15 @@ impl Fixture { .transaction_set_ref(None, self.testref(), Some(commit.as_str())); tx.commit(cancellable)?; + let detached = glib::VariantDict::new(None); + detached.insert("my-detached-key", &"my-detached-value"); + let detached = detached.to_variant(); + self.srcrepo.write_commit_detached_metadata( + commit.as_str(), + Some(&detached), + gio::NONE_CANCELLABLE, + )?; + let gpghome = self.path.join("src/gpghome"); self.srcrepo.sign_commit( &commit, diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 518e6f8e7..4e87d98cd 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -16,8 +16,6 @@ use std::process::Command; use ostree_ext::fixture::{FileDef, Fixture, CONTENTS_CHECKSUM_V0}; const EXAMPLE_TAR_LAYER: &[u8] = include_bytes!("fixtures/hlinks.tar.gz"); -const EXAMPLEOS_CONTENT_CHECKSUM: &str = - "0ef7461f9db15e1d8bd8921abf20694225fbaa4462cadf7deed8ea0e43162120"; const TEST_REGISTRY_DEFAULT: &str = "localhost:5000"; fn assert_err_contains(r: Result, s: impl AsRef) { @@ -203,7 +201,7 @@ fn validate_tar_expected( fn test_tar_export_structure() -> Result<()> { use tar::EntryType::{Directory, Regular}; - let mut fixture = Fixture::new()?; + let mut fixture = Fixture::new_v1()?; let src_tar = fixture.export_tar()?; let src_tar = std::io::BufReader::new(fixture.dir.open(src_tar)?); let mut src_tar = tar::Archive::new(src_tar); @@ -235,7 +233,7 @@ fn test_tar_export_structure() -> Result<()> { ("sysroot/ostree/repo/tmp", Directory, 0o755), ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), ("sysroot/ostree/repo/xattrs", Directory, 0o755), - ("sysroot/ostree/repo/xattrs/44299b6a1738aab86de5966507fbe369af2ab421e1c6eb6e797054831430d92c", Regular, 0o644), + ("sysroot/ostree/repo/xattrs/d67db507c5a6e7bfd078f0f3ded0a5669479a902e812931fc65c6f5e01831ef5", Regular, 0o644), ("usr", Directory, 0o755), ]; validate_tar_expected( @@ -279,7 +277,7 @@ fn test_tar_export_structure() -> Result<()> { #[tokio::test] async fn test_tar_import_export() -> Result<()> { - let fixture = Fixture::new()?; + let fixture = Fixture::new_v1()?; let p = fixture.export_tar()?; let src_tar = tokio::fs::File::from_std(fixture.dir.open(p)?.into_std()); @@ -287,7 +285,7 @@ async fn test_tar_import_export() -> Result<()> { ostree_ext::tar::import_tar(fixture.destrepo(), src_tar, None).await?; let (commitdata, _) = fixture.destrepo().load_commit(&imported_commit)?; assert_eq!( - EXAMPLEOS_CONTENT_CHECKSUM, + CONTENTS_CHECKSUM_V0, ostree::commit_get_content_checksum(&commitdata) .unwrap() .as_str() From 3ad28ad9eabeda03122c3a3c959a8fc2e467ccce Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 7 Mar 2022 20:44:28 -0500 Subject: [PATCH 325/775] tests: Port another two tests to v1 fixture Ongoing work to use the updated fixture. --- lib/tests/it/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 4e87d98cd..369b381c3 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -315,7 +315,7 @@ async fn test_tar_import_export() -> Result<()> { #[tokio::test] async fn test_tar_write() -> Result<()> { - let fixture = Fixture::new()?; + let fixture = Fixture::new_v1()?; // Test translating /etc to /usr/etc fixture.dir.create_dir_all("tmproot/etc")?; let tmproot = &fixture.dir.open_dir("tmproot")?; @@ -346,7 +346,7 @@ async fn test_tar_write() -> Result<()> { #[tokio::test] async fn test_tar_write_tar_layer() -> Result<()> { - let fixture = Fixture::new()?; + let fixture = Fixture::new_v1()?; let uncompressed_tar = tokio::io::BufReader::new( async_compression::tokio::bufread::GzipDecoder::new(EXAMPLE_TAR_LAYER), ); From 5eae4af32c9b833803b0e26a8f078791e550b127 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 8 Mar 2022 15:37:24 -0500 Subject: [PATCH 326/775] tests: Port all remaining tests to v1 fixture Will remove the old one in a followup. --- lib/src/fixture.rs | 13 ++++++++++++- lib/tests/it/main.rs | 8 ++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index c6d965cd7..f7127646a 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -382,12 +382,22 @@ impl Fixture { } let root = self.srcrepo.write_mtree(&root, cancellable)?; let root = root.downcast_ref::().unwrap(); + // You win internet points if you understand this date reference let ts = chrono::DateTime::parse_from_rfc2822("Fri, 29 Aug 1997 10:30:42 PST")?.timestamp(); + // Some default metadata fixtures + let metadata = glib::VariantDict::new(None); + metadata.insert( + "buildsys.checksum", + &"41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3", + ); + metadata.insert("ostree.container-cmd", &vec!["/usr/bin/bash"]); + metadata.insert("version", &"42.0"); + let metadata = metadata.to_variant(); let commit = self.srcrepo.write_commit_with_time( None, None, None, - None, + Some(&metadata), root, ts as u64, cancellable, @@ -396,6 +406,7 @@ impl Fixture { .transaction_set_ref(None, self.testref(), Some(commit.as_str())); tx.commit(cancellable)?; + // Add detached metadata so we can verify it makes it through let detached = glib::VariantDict::new(None); detached.insert("my-detached-key", &"my-detached-value"); let detached = detached.to_variant(); diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 369b381c3..aae848790 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -372,7 +372,7 @@ fn skopeo_inspect_config(imgref: &str) -> Result Result<()> { - let fixture = Fixture::new()?; + let fixture = Fixture::new_v1()?; let testrev = fixture .srcrepo() .require_rev(fixture.testref()) @@ -473,7 +473,7 @@ async fn test_container_import_export() -> Result<()> { // Test without signature verification // Create a new repo { - let fixture = Fixture::new()?; + let fixture = Fixture::new_v1()?; let import = ostree_ext::container::unencapsulate(fixture.destrepo(), &srcoci_unverified, None) .await @@ -504,7 +504,7 @@ async fn oci_clone(src: impl AsRef, dest: impl AsRef) -> Res /// But layers work via the container::write module. #[tokio::test] async fn test_container_write_derive() -> Result<()> { - let fixture = Fixture::new()?; + let fixture = Fixture::new_v1()?; let base_oci_path = &fixture.path.join("exampleos.oci"); let _digest = ostree_ext::container::encapsulate( fixture.srcrepo(), @@ -709,7 +709,7 @@ async fn test_container_write_derive() -> Result<()> { // Then you can run this test via `env TEST_REGISTRY=quay.io/$myuser cargo test -- --ignored`. async fn test_container_import_export_registry() -> Result<()> { let tr = &*TEST_REGISTRY; - let fixture = Fixture::new()?; + let fixture = Fixture::new_v1()?; let testref = fixture.testref(); let testrev = fixture .srcrepo() From 3ee37bc41b72f98f46be788da40ac43c982daab5 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 8 Mar 2022 18:11:50 -0500 Subject: [PATCH 327/775] tests: Remove v0 fixture No more hardcoded tarballs! --- lib/src/fixture.rs | 20 -------------------- lib/src/fixtures/exampleos-v1.tar.zst | Bin 492 -> 0 bytes lib/src/fixtures/exampleos.tar.zst | Bin 1052 -> 0 bytes 3 files changed, 20 deletions(-) delete mode 100644 lib/src/fixtures/exampleos-v1.tar.zst delete mode 100644 lib/src/fixtures/exampleos.tar.zst diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index f7127646a..480dc7baa 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -11,7 +11,6 @@ use cap_std_ext::prelude::CapStdExtCommandExt; use chrono::TimeZone; use fn_error_context::context; use ostree::cap_std; -use sh_inline::bash_in; use std::borrow::Cow; use std::convert::{TryFrom, TryInto}; use std::io::Write; @@ -23,7 +22,6 @@ const OSTREE_GPG_HOME: &[u8] = include_bytes!("fixtures/ostree-gpg-test-home.tar const TEST_GPG_KEYID_1: &str = "7FCA23D8472CDAFA"; #[allow(dead_code)] const TEST_GPG_KEYFPR_1: &str = "5E65DE75AB1C501862D476347FCA23D8472CDAFA"; -pub const EXAMPLEOS_V0: &[u8] = include_bytes!("fixtures/exampleos.tar.zst"); const TESTREF: &str = "exampleos/x86_64/stable"; #[derive(Debug)] @@ -300,24 +298,6 @@ impl Fixture { }) } - pub fn new() -> Result { - let r = Self::new_base()?; - let tarname = "exampleos.tar.zst"; - r.dir.write(tarname, EXAMPLEOS_V0)?; - bash_in!( - r.dir, - "ostree --repo=src/repo commit -b ${testref} --bootable --no-bindings --add-metadata=ostree.container-cmd='[\"/usr/bin/bash\"]' \ - --add-metadata-string=version=42.0 --add-metadata-string=buildsys.checksum=41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3 \ - --gpg-homedir=src/gpghome --gpg-sign=${keyid} \ - --add-detached-metadata-string=my-detached-key=my-detached-value --tree=tar=exampleos.tar.zst >/dev/null && \ - ostree --repo=src/repo show ${testref} >/dev/null", - testref = r.testref(), - keyid = TEST_GPG_KEYID_1 - ).context("Writing commit")?; - r.dir.remove_file(tarname)?; - Ok(r) - } - pub fn srcrepo(&self) -> &ostree::Repo { &self.srcrepo } diff --git a/lib/src/fixtures/exampleos-v1.tar.zst b/lib/src/fixtures/exampleos-v1.tar.zst deleted file mode 100644 index de20d2dce4bf76cc51067a9267c79308817f9eda..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 492 zcmV@cx#<->&%R&WL6bf2e;Z4h8cNNXy4iUcX($<{5})Kvrc$2%`LP}pNR-h6N6<=(tc(_P zSngzlpp~AR=1>sQ2&Z$IN(Bc)SezdGNeHIa(vD?S@rT`7PsG*rt49$>~CTR>1 z1Cc-i8LK?o261cG1~@tXnuBLw#X@HI97xyl;{ zKme5-Y4>pe{Bt7?&~2z*D4+0E3c0O;vPWh0k%yBf&iN%*o!IPODzaQ=yXu)5N$|p iC_g3*iJEGFCdmiLv`jsqT=56C`CgaE8Knsu(bNOSeaT?} diff --git a/lib/src/fixtures/exampleos.tar.zst b/lib/src/fixtures/exampleos.tar.zst deleted file mode 100644 index 8e8969d838ae96e1575fc2c9d3773325bed2bfc0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1052 zcmV+%1mpWCwJ-eySbZP>X6<$%Kx@WBVHyI{ESN~3A!6F1JjwxTPP9-SWiU{1 z@FFyCaXb;H%)0PPv3`-`i9lptK2?mD9`^AGD2)a+4u*en5a@{-j>N~ir$y$x%j=~> z{C`#c|D+OYQPLWe>ZevQ(T%*UHB}-*U54z+iXH2p z-c!FHWr+@&M$(q$O(bnO{x%+;tv#>8UYLsa^yE(3;ylsu=sZzEDUF7qaFi!HZpILi zthj95Qi$xS%y`Yt6G2Vm?2+*P@Am&KStt#PVp$Z%gN$If_;@8#dSBgU>{pDL_r&*P zJV?T{B+pDsb`x25H#3u2ZL@E8GcDQ1>#xOUr+6{;@rgB|(bR*`KpdzsZBZCUS@@AS z`)}MkhI*Ln>plcvfoaQugD~(&%iX=SdDqf@U$3iN+}}10g0?a( zQN&)UKa~s5{4=I{vO~;!LhRHu2#g0wzD$7YsL`2YMnptLnt~KEi~-;xkqJbmI0q6S zK^VqB6vQA51#uL{AOsYH2#qlULWqD60V4!KgB+g&{jc#k`)L}~`C$W6^aFS)Nbori zz|rO!8if*IIW87(@dNlE*#MCA0Af;zNZqht_5m|YqaMK@OEy3x3^1B02Yfw1@M|`p z&C}QP>%uo_cmQW1K%`_q$!sjOg0*E17$E~f0et`vF05w>%mXBQUE&140M`M#2PViDfCfCC(9fbzSs;1__3wE`za%d%_$TL#=jb_1$Nv2Oqk1qaWx0U>&T(*z!? z4hSQe6=g7tSrCEZTs3P(EIAYwLKTp@e??D9*5WyvZBkZIbOghj3 zd_UmwaAUOP1(TlVrc&+?pqvr>f+V|54q(zgSIMRa1ZSNi@kT7vU_jw@syNp_@M-@6 z5g8CV8XG`YceokAxz=566Hu$3TIDZTdp^#P}V57MmL!w-teIS_2281>MLIWb5`9N52LlD#&5MPgy z0Y`Bb0%H+q=Fc&a Date: Wed, 9 Mar 2022 18:09:34 -0500 Subject: [PATCH 328/775] tests/fixture: Add concept of "owner" Prep for chunk splitting - this is a bit like the dpkg/rpm database. --- lib/Cargo.toml | 1 + lib/src/fixture.rs | 107 +++++++++++++++++++++++++++++++++++++++++++ lib/tests/it/main.rs | 3 ++ 3 files changed, 111 insertions(+) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index e5fba266c..067b67795 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -33,6 +33,7 @@ openat-ext = "0.2.0" openssl = "0.10.33" ostree = { features = ["v2021_5", "cap-std-apis"], version = "0.13.5" } pin-project = "1.0" +regex = "1.5.4" serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" structopt = "0.3.21" diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 480dc7baa..819191980 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -2,6 +2,7 @@ #![allow(missing_docs)] +use crate::objectsource::{ObjectMeta, ObjectSourceMeta}; use crate::prelude::*; use crate::{gio, glib}; use anyhow::{anyhow, Context, Result}; @@ -10,12 +11,15 @@ use cap_std::fs::Dir; use cap_std_ext::prelude::CapStdExtCommandExt; use chrono::TimeZone; use fn_error_context::context; +use once_cell::sync::Lazy; use ostree::cap_std; +use regex::Regex; use std::borrow::Cow; use std::convert::{TryFrom, TryInto}; use std::io::Write; use std::ops::Add; use std::process::Stdio; +use std::rc::Rc; use std::sync::Arc; const OSTREE_GPG_HOME: &[u8] = include_bytes!("fixtures/ostree-gpg-test-home.tar.gz"); @@ -117,6 +121,21 @@ impl FileDef { } } +/// This is like a package database, mapping our test fixture files to package names +static OWNERS: Lazy> = Lazy::new(|| { + [ + ("usr/lib/modules/.*/initramfs", "initramfs"), + ("usr/lib/modules", "kernel"), + ("usr/bin/(ba)?sh", "bash"), + ("usr/bin/hardlink.*", "testlink"), + ("usr/etc/someconfig.conf", "someconfig"), + ("usr/etc/polkit.conf", "a-polkit-config"), + ] + .iter() + .map(|(k, v)| (Regex::new(k).unwrap(), *v)) + .collect() +}); + static CONTENTS_V0: &str = indoc::indoc! { r##" r usr/lib/modules/5.10.18-200.x86_64/vmlinuz this-is-a-kernel r usr/lib/modules/5.10.18-200.x86_64/initramfs this-is-an-initramfs @@ -237,6 +256,81 @@ fn relative_path_components(p: &Utf8Path) -> impl Iterator .filter(|p| matches!(p, Utf8Component::Normal(_))) } +/// Walk over the whole filesystem, and generate mappings from content object checksums +/// to the package that owns them. +/// +/// In the future, we could compute this much more efficiently by walking that +/// instead. But this design is currently oriented towards accepting a single ostree +/// commit as input. +fn build_mapping_recurse( + path: &mut Utf8PathBuf, + dir: &gio::File, + ret: &mut ObjectMeta, +) -> Result<()> { + use std::collections::btree_map::Entry; + let cancellable = gio::NONE_CANCELLABLE; + let e = dir.enumerate_children( + "standard::name,standard::type", + gio::FileQueryInfoFlags::NOFOLLOW_SYMLINKS, + cancellable, + )?; + for child in e { + let childi = child?; + let name: Utf8PathBuf = childi.name().try_into()?; + let child = dir.child(&name); + path.push(&name); + match childi.file_type() { + gio::FileType::Regular | gio::FileType::SymbolicLink => { + let child = child.downcast::().unwrap(); + + let owner = OWNERS + .iter() + .find_map(|(r, owner)| { + if r.is_match(path.as_str()) { + Some(Rc::from(*owner)) + } else { + None + } + }) + .ok_or_else(|| anyhow!("Unowned path {}", path))?; + + if !ret.set.contains(&*owner) { + ret.set.insert(ObjectSourceMeta { + identifier: Rc::clone(&owner), + name: Rc::clone(&owner), + srcid: Rc::clone(&owner), + change_time_offset: u32::MAX, + }); + } + + let checksum = child.checksum().unwrap().to_string(); + match ret.map.entry(checksum) { + Entry::Vacant(v) => { + v.insert(owner); + } + Entry::Occupied(v) => { + let prev_owner = v.get(); + if **prev_owner != *owner { + anyhow::bail!( + "Duplicate object ownership {} ({} and {})", + path.as_str(), + prev_owner, + owner + ); + } + } + } + } + gio::FileType::Directory => { + build_mapping_recurse(path, &child, ret)?; + } + o => anyhow::bail!("Unhandled file type: {}", o), + } + path.pop(); + } + Ok(()) +} + #[derive(Debug)] pub struct Fixture { // Just holds a reference @@ -465,6 +559,19 @@ impl Fixture { Ok(()) } + /// Gather object metadata for the current commit. + pub fn get_object_meta(&self) -> Result { + let cancellable = gio::NONE_CANCELLABLE; + + // Load our base commit + let root = self.srcrepo.read_commit(self.testref(), cancellable)?.0; + + let mut ret = ObjectMeta::default(); + build_mapping_recurse(&mut Utf8PathBuf::from("/"), &root, &mut ret)?; + + Ok(ret) + } + #[context("Exporting tar")] pub fn export_tar(&self) -> Result<&'static Utf8Path> { let cancellable = gio::NONE_CANCELLABLE; diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index aae848790..c21aecb9a 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -202,6 +202,9 @@ fn test_tar_export_structure() -> Result<()> { use tar::EntryType::{Directory, Regular}; let mut fixture = Fixture::new_v1()?; + // Just test that we can retrieve ownership for all objects + let _objmeta = fixture.get_object_meta()?; + let src_tar = fixture.export_tar()?; let src_tar = std::io::BufReader::new(fixture.dir.open(src_tar)?); let mut src_tar = tar::Archive::new(src_tar); From a3fff4df72737efd472a23075cee3290bd58be8a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 11 Mar 2022 13:52:19 -0500 Subject: [PATCH 329/775] cli: Make parse functions public I want to use them in rpm-ostree too. --- lib/src/cli.rs | 9 ++++++--- lib/tests/it/main.rs | 18 ++++++++++++++++++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 8b34bb291..ad61e45bd 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -21,15 +21,18 @@ use crate::container::store::{LayeredImageImporter, PrepareResult}; use crate::container::{self as ostree_container, UnencapsulationProgress}; use crate::container::{Config, ImageReference, OstreeImageReference, UnencapsulateOptions}; -fn parse_imgref(s: &str) -> Result { +/// Parse an [`OstreeImageReference`] from a CLI arguemnt. +pub fn parse_imgref(s: &str) -> Result { OstreeImageReference::try_from(s) } -fn parse_base_imgref(s: &str) -> Result { +/// Parse a base [`ImageReference`] from a CLI arguemnt. +pub fn parse_base_imgref(s: &str) -> Result { ImageReference::try_from(s) } -fn parse_repo(s: &str) -> Result { +/// Parse an [`ostree::Repo`] from a CLI arguemnt. +pub fn parse_repo(s: &str) -> Result { let repofd = cap_std::fs::Dir::open_ambient_dir(s, cap_std::ambient_authority())?; Ok(ostree::Repo::open_at_dir(&repofd, ".")?) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index c21aecb9a..78a1f371b 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -31,6 +31,24 @@ static TEST_REGISTRY: Lazy = Lazy::new(|| match std::env::var_os("TEST_R None => TEST_REGISTRY_DEFAULT.to_string(), }); +// This is mostly just sanity checking these functions are publicly accessible +#[test] +fn test_cli_fns() -> Result<()> { + let fixture = Fixture::new_v1()?; + let srcpath = fixture.path.join("src/repo"); + let srcrepo_parsed = ostree_ext::cli::parse_repo(srcpath.as_str()).unwrap(); + assert_eq!(srcrepo_parsed.mode(), fixture.srcrepo().mode()); + + let ir = + ostree_ext::cli::parse_imgref("ostree-unverified-registry:quay.io/examplens/exampleos") + .unwrap(); + assert_eq!(ir.imgref.transport, Transport::Registry); + + let ir = ostree_ext::cli::parse_base_imgref("docker://quay.io/examplens/exampleos").unwrap(); + assert_eq!(ir.transport, Transport::Registry); + Ok(()) +} + #[tokio::test] async fn test_tar_import_empty() -> Result<()> { let fixture = Fixture::new_v1()?; From 2b4f66f8bebed5001bc14a0797b1b8a6c6a63a3a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 17 Mar 2022 09:21:07 -0400 Subject: [PATCH 330/775] tar/export: Extract a helper for writing content hardlink Prep for tar-split work. --- lib/src/tar/export.rs | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 504f39871..2d188b1ce 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -415,6 +415,20 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { Ok(()) } + /// Given a source object (in e.g. ostree/repo/objects/...), write a hardlink to it + /// in its expected target path (e.g. `usr/bin/bash`). + fn append_content_hardlink( + &mut self, + srcpath: &Utf8Path, + mut h: tar::Header, + dest: &Utf8Path, + ) -> Result<()> { + h.set_entry_type(tar::EntryType::Link); + h.set_link_name(srcpath)?; + self.out.append_data(&mut h, dest, &mut std::io::empty())?; + Ok(()) + } + /// Write a dirtree object. fn append_dirtree>( &mut self, @@ -441,13 +455,10 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let (name, csum) = file.to_tuple(); let name = name.to_str(); let checksum = &hex::encode(csum); - let (objpath, mut h) = self.append_content(checksum)?; - h.set_entry_type(tar::EntryType::Link); - h.set_link_name(&objpath)?; + let (objpath, h) = self.append_content(checksum)?; let subpath = &dirpath.join(name); let subpath = map_path(subpath); - self.out - .append_data(&mut h, &*subpath, &mut std::io::empty())?; + self.append_content_hardlink(&objpath, h, &*subpath)?; } for item in dirs { From 20c279df01562f3328acb0941ed2f87468bfb0df Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 21 Mar 2022 10:33:41 -0400 Subject: [PATCH 331/775] Update to latest containers-image-proxy API This works on top of https://github.com/containers/containers-image-proxy-rs/pull/29 --- lib/Cargo.toml | 3 +-- lib/src/container/store.rs | 4 +--- lib/src/container/unencapsulate.rs | 7 +++---- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 067b67795..dc24fdded 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -10,7 +10,7 @@ version = "0.6.5" [dependencies] anyhow = "1.0" -containers-image-proxy = "0.4.0" +containers-image-proxy = "0.5.0" async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" @@ -59,4 +59,3 @@ features = ["dox"] [features] dox = ["ostree/dox"] internal-testing-api = ["sh-inline", "indoc"] -proxy_v0_2_3 = ["containers-image-proxy/proxy_v0_2_3"] diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index b3dd11da3..c3dc6f783 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -218,9 +218,7 @@ impl LayeredImageImporter { _ => {} } - let (manifest_digest, manifest_bytes) = self.proxy.fetch_manifest(&self.proxy_img).await?; - let manifest: oci_image::ImageManifest = - serde_json::from_slice(&manifest_bytes).context("Parsing image manifest")?; + let (manifest_digest, manifest) = self.proxy.fetch_manifest(&self.proxy_img).await?; let new_imageid = manifest.config().digest().as_str(); // Query for previous stored state diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index 0f728b7ad..321349a0b 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -93,9 +93,9 @@ async fn fetch_manifest_impl( imgref: &OstreeImageReference, ) -> Result<(oci_spec::image::ImageManifest, String)> { let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; - let (digest, raw_manifest) = proxy.fetch_manifest(oi).await?; + let (digest, manifest) = proxy.fetch_manifest(oi).await?; proxy.close_image(oi).await?; - Ok((serde_json::from_slice(&raw_manifest)?, digest)) + Ok((manifest, digest)) } /// Download the manifest for a target image and its sha256 digest. @@ -182,8 +182,7 @@ pub async fn unencapsulate( ) -> Result { let mut proxy = ImageProxy::new().await?; let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; - let (image_digest, raw_manifest) = proxy.fetch_manifest(oi).await?; - let manifest = serde_json::from_slice(&raw_manifest)?; + let (image_digest, manifest) = proxy.fetch_manifest(oi).await?; let ostree_commit = unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, oi, &manifest, options, false) .await?; From e19978da4aaac696bd98a567abe0627db40c44d1 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 17 Mar 2022 18:21:30 -0400 Subject: [PATCH 332/775] tar: Add an API to import an object set --- lib/src/tar/import.rs | 179 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 162 insertions(+), 17 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 9109ce1a0..c99c5c334 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -9,6 +9,7 @@ use gio::glib; use gio::prelude::*; use glib::Variant; use ostree::gio; +use std::collections::BTreeSet; use std::collections::HashMap; use std::convert::TryInto; use std::io::prelude::*; @@ -38,8 +39,13 @@ struct ImportStats { symlinks: u32, } +enum ImporterMode { + Commit(Option), + ObjectSet(BTreeSet), +} + /// Importer machine. -struct Importer { +pub(crate) struct Importer { repo: ostree::Repo, remote: Option, // Cache of xattrs, keyed by their content checksum. @@ -47,10 +53,14 @@ struct Importer { // Reusable buffer for xattrs references. It maps a file checksum (.0) // to an xattrs checksum (.1) in the `xattrs` cache above. next_xattrs: Option<(String, String)>, + // Reusable buffer for reads. See also https://github.com/rust-lang/rust/issues/78485 buf: Vec, stats: ImportStats, + + /// Additional state depending on whether we're importing an object set or a commit. + data: ImporterMode, } /// Validate size/type of a tar header for OSTree metadata object. @@ -151,7 +161,8 @@ fn parse_xattrs_link_target(path: &Utf8Path) -> Result { } impl Importer { - fn new(repo: &ostree::Repo, remote: Option) -> Self { + /// Create an importer which will import an OSTree commit object. + pub(crate) fn new_for_commit(repo: &ostree::Repo, remote: Option) -> Self { Self { repo: repo.clone(), remote, @@ -159,6 +170,21 @@ impl Importer { xattrs: Default::default(), next_xattrs: None, stats: Default::default(), + data: ImporterMode::Commit(None), + } + } + + /// Create an importer to write an "object set"; a chunk of objects which is + /// usually streamed from a separate storage system, such as an OCI container image layer. + pub(crate) fn new_for_object_set(repo: &ostree::Repo) -> Self { + Self { + repo: repo.clone(), + remote: None, + buf: vec![0u8; 16384], + xattrs: Default::default(), + next_xattrs: None, + stats: Default::default(), + data: ImporterMode::ObjectSet(Default::default()), } } @@ -375,13 +401,35 @@ impl Importer { match suffix { "commit" => Err(anyhow!("Found multiple commit objects")), - "file" => self.import_content_object(entry, &checksum, cancellable), + "file" => { + self.import_content_object(entry, &checksum, cancellable)?; + // Track the objects we wrote + match &mut self.data { + ImporterMode::ObjectSet(imported) => { + if let Some(p) = imported.replace(checksum) { + anyhow::bail!("Duplicate object: {}", p); + } + } + ImporterMode::Commit(_) => {} + } + Ok(()) + } "file-xattrs" => self.process_file_xattrs(entry, checksum), "file-xattrs-link" => self.process_file_xattrs_link(entry, checksum), "xattrs" => self.process_xattr_ref(entry, checksum), kind => { let objtype = objtype_from_string(kind) .ok_or_else(|| anyhow!("Invalid object type {}", kind))?; + match &mut self.data { + ImporterMode::ObjectSet(_) => { + anyhow::bail!( + "Found metadata object {}.{} in object set mode", + checksum, + objtype + ); + } + ImporterMode::Commit(_) => {} + } self.import_metadata(entry, &checksum, objtype) } } @@ -539,17 +587,46 @@ impl Importer { Ok(xattrs_checksum) } - fn import( - mut self, + fn import_objects_impl<'a>( + &mut self, + ents: impl Iterator, Utf8PathBuf)>>, + cancellable: Option<&gio::Cancellable>, + ) -> Result<()> { + for entry in ents { + let (entry, path) = entry?; + if let Ok(p) = path.strip_prefix("objects/") { + self.import_object(entry, p, cancellable)?; + } else if path.strip_prefix("xattrs/").is_ok() { + self.process_split_xattrs_content(entry)?; + } + } + Ok(()) + } + + pub(crate) fn import_objects( + &mut self, archive: &mut tar::Archive, cancellable: Option<&gio::Cancellable>, - ) -> Result { + ) -> Result<()> { + let ents = archive.entries()?.filter_map(|e| match e { + Ok(e) => Self::filter_entry(e).transpose(), + Err(e) => Some(Err(anyhow::Error::msg(e))), + }); + self.import_objects_impl(ents, cancellable) + } + + pub(crate) fn import_commit( + &mut self, + archive: &mut tar::Archive, + cancellable: Option<&gio::Cancellable>, + ) -> Result<()> { + // This can only be invoked once + assert!(matches!(self.data, ImporterMode::Commit(None))); // Create an iterator that skips over directories; we just care about the file names. let mut ents = archive.entries()?.filter_map(|e| match e { Ok(e) => Self::filter_entry(e).transpose(), Err(e) => Some(Err(anyhow::Error::msg(e))), }); - // Read the commit object. let (commit_ent, commit_path) = ents .next() @@ -642,18 +719,63 @@ impl Importer { } } } + match &mut self.data { + ImporterMode::Commit(c) => { + c.replace(checksum); + } + ImporterMode::ObjectSet(_) => unreachable!(), + } - for entry in ents { - let (entry, path) = entry?; + self.import_objects_impl(ents, cancellable)?; - if let Ok(p) = path.strip_prefix("objects/") { - self.import_object(entry, p, cancellable)?; - } else if path.strip_prefix("xattrs/").is_ok() { - self.process_split_xattrs_content(entry)?; - } + Ok(()) + } + + pub(crate) fn finish_import_commit(self) -> String { + tracing::debug!("Import stats: {:?}", self.stats); + match self.data { + ImporterMode::Commit(c) => c.unwrap(), + ImporterMode::ObjectSet(_) => unreachable!(), } + } - Ok(checksum) + pub(crate) fn default_dirmeta() -> glib::Variant { + let finfo = gio::FileInfo::new(); + finfo.set_attribute_uint32("unix::uid", 0); + finfo.set_attribute_uint32("unix::gid", 0); + finfo.set_attribute_uint32("unix::mode", libc::S_IFDIR | 0o755); + // SAFETY: TODO: This is not a nullable return, fix it in ostree + ostree::create_directory_metadata(&finfo, None).unwrap() + } + + pub(crate) fn finish_import_object_set(self) -> Result { + let objset = match self.data { + ImporterMode::Commit(_) => unreachable!(), + ImporterMode::ObjectSet(s) => s, + }; + tracing::debug!("Imported {} content objects", objset.len()); + let mtree = ostree::MutableTree::new(); + for checksum in objset.into_iter() { + mtree.replace_file(&checksum, &checksum)?; + } + let dirmeta = self.repo.write_metadata( + ostree::ObjectType::DirMeta, + None, + &Self::default_dirmeta(), + gio::NONE_CANCELLABLE, + )?; + mtree.set_metadata_checksum(&dirmeta.to_hex()); + let tree = self.repo.write_mtree(&mtree, gio::NONE_CANCELLABLE)?; + let commit = self.repo.write_commit_with_time( + None, + None, + None, + None, + tree.downcast_ref().unwrap(), + 0, + gio::NONE_CANCELLABLE, + )?; + Ok(commit.to_string()) } } @@ -689,8 +811,9 @@ pub async fn import_tar( crate::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| { let mut archive = tar::Archive::new(src); let txn = repo.auto_transaction(Some(cancellable))?; - let importer = Importer::new(&repo, options.remote); - let checksum = importer.import(&mut archive, Some(cancellable))?; + let mut importer = Importer::new_for_commit(&repo, options.remote); + importer.import_commit(&mut archive, Some(cancellable))?; + let checksum = importer.finish_import_commit(); txn.commit(Some(cancellable))?; repo.mark_commit_partial(&checksum, false)?; Ok::<_, anyhow::Error>(checksum) @@ -698,6 +821,28 @@ pub async fn import_tar( .await } +/// Read the contents of a tarball and import the content objects inside. +/// Generates a synthetic commit object referencing them. +#[instrument(skip(repo, src))] +pub async fn import_tar_objects( + repo: &ostree::Repo, + src: impl tokio::io::AsyncRead + Send + Unpin + 'static, +) -> Result { + let src = tokio_util::io::SyncIoBridge::new(src); + let repo = repo.clone(); + // The tar code we use today is blocking, so we spawn a thread. + crate::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| { + let mut archive = tar::Archive::new(src); + let mut importer = Importer::new_for_object_set(&repo); + let txn = repo.auto_transaction(Some(cancellable))?; + importer.import_objects(&mut archive, Some(cancellable))?; + let r = importer.finish_import_object_set()?; + txn.commit(Some(cancellable))?; + Ok::<_, anyhow::Error>(r) + }) + .await +} + #[cfg(test)] mod tests { use super::*; From 77b40dbcba83c054b7cd16f76f14060b7d724637 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 13 Dec 2021 20:43:13 -0500 Subject: [PATCH 333/775] Support for split layers Closes: https://github.com/ostreedev/ostree-rs-ext/issues/69 This is initial basic support for splitting files (objects) from a commit into separate container image layers, and reassembling those layers into a commit on the client. We retain our present logic around e.g. GPG signature verification. There's a new `chunking.rs` file which has logic to automatically factor out things like the kernel/initramfs and large files. In order to fetch these images client side, we now heavily intermix/cross the previous code for fetching non-ostree layers. --- lib/src/chunking.rs | 505 ++++++++++++++++++ lib/src/cli.rs | 24 +- lib/src/container/deploy.rs | 9 +- lib/src/container/encapsulate.rs | 129 ++++- lib/src/container/mod.rs | 2 + lib/src/container/store.rs | 303 ++++++++--- lib/src/container/unencapsulate.rs | 115 +--- lib/src/fixture.rs | 40 ++ .../fedora-coreos-contentmeta.json.gz | Bin 0 -> 10233 bytes lib/src/lib.rs | 1 + lib/src/tar/export.rs | 70 +++ lib/tests/it/main.rs | 197 +++++-- 12 files changed, 1155 insertions(+), 240 deletions(-) create mode 100644 lib/src/chunking.rs create mode 100644 lib/src/fixtures/fedora-coreos-contentmeta.json.gz diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs new file mode 100644 index 000000000..7a128baee --- /dev/null +++ b/lib/src/chunking.rs @@ -0,0 +1,505 @@ +//! Split an OSTree commit into separate chunks + +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use std::borrow::{Borrow, Cow}; +use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::convert::TryInto; +use std::fmt::Write; +use std::num::NonZeroU32; +use std::rc::Rc; + +use crate::objectsource::{ContentID, ObjectMeta, ObjectMetaMap, ObjectSourceMeta}; +use crate::objgv::*; +use anyhow::{anyhow, Result}; +use camino::Utf8PathBuf; +use gvariant::aligned_bytes::TryAsAligned; +use gvariant::{Marker, Structure}; +use ostree::{gio, glib}; +use serde::{Deserialize, Serialize}; + +/// Maximum number of layers (chunks) we will use. +// We take half the limit of 128. +// https://github.com/ostreedev/ostree-rs-ext/issues/69 +pub(crate) const MAX_CHUNKS: u32 = 64; + +type RcStr = Rc; + +#[derive(Debug, Default)] +pub(crate) struct Chunk { + pub(crate) name: String, + pub(crate) content: BTreeMap)>, + pub(crate) size: u64, +} + +#[derive(Debug)] +pub(crate) enum Meta { + DirTree(RcStr), + DirMeta(RcStr), +} + +impl Meta { + pub(crate) fn objtype(&self) -> ostree::ObjectType { + match self { + Meta::DirTree(_) => ostree::ObjectType::DirTree, + Meta::DirMeta(_) => ostree::ObjectType::DirMeta, + } + } + + pub(crate) fn checksum(&self) -> &str { + match self { + Meta::DirTree(v) => v, + Meta::DirMeta(v) => v, + } + } +} + +#[derive(Debug, Deserialize, Serialize)] +/// Object metadata, but with additional size data +pub struct ObjectSourceMetaSized { + /// The original metadata + #[serde(flatten)] + meta: ObjectSourceMeta, + /// Total size of associated objects + size: u64, +} + +/// Extend content source metadata with sizes. +#[derive(Debug)] +pub struct ObjectMetaSized { + /// Mapping from content object to source. + pub map: ObjectMetaMap, + /// Computed sizes of each content source + pub sizes: Vec, +} + +impl ObjectMetaSized { + /// Given object metadata and a repo, compute the size of each content source. + pub fn compute_sizes(repo: &ostree::Repo, meta: ObjectMeta) -> Result { + let cancellable = gio::NONE_CANCELLABLE; + // Destructure into component parts; we'll create the version with sizes + let map = meta.map; + let mut set = meta.set; + // Maps content id -> total size of associated objects + let mut sizes = HashMap::<&str, u64>::new(); + // Populate two mappings above, iterating over the object -> contentid mapping + for (checksum, contentid) in map.iter() { + let (_, finfo, _) = repo.load_file(checksum, cancellable)?; + let finfo = finfo.unwrap(); + let sz = sizes.entry(contentid).or_default(); + *sz += finfo.size() as u64; + } + // Combine data from sizes and the content mapping. + let sized: Result> = sizes + .into_iter() + .map(|(id, size)| -> Result { + set.take(id) + .ok_or_else(|| anyhow!("Failed to find {} in content set", id)) + .map(|meta| ObjectSourceMetaSized { meta, size }) + }) + .collect(); + let mut sizes = sized?; + sizes.sort_by(|a, b| b.size.cmp(&a.size)); + Ok(ObjectMetaSized { map, sizes }) + } +} + +/// How to split up an ostree commit into "chunks" - designed to map to container image layers. +#[derive(Debug, Default)] +pub struct Chunking { + pub(crate) metadata_size: u64, + pub(crate) commit: Box, + pub(crate) meta: Vec, + pub(crate) remainder: Chunk, + pub(crate) chunks: Vec, + + pub(crate) max: u32, + + processed_mapping: bool, + /// Number of components (e.g. packages) provided originally + pub(crate) n_provided_components: u32, + /// The above, but only ones with non-zero size + pub(crate) n_sized_components: u32, +} + +#[derive(Default)] +struct Generation { + path: Utf8PathBuf, + metadata_size: u64, + meta: Vec, + dirtree_found: BTreeSet, + dirmeta_found: BTreeSet, +} + +fn push_dirmeta(repo: &ostree::Repo, gen: &mut Generation, checksum: &str) -> Result<()> { + if gen.dirtree_found.contains(checksum) { + return Ok(()); + } + let checksum = RcStr::from(checksum); + gen.dirmeta_found.insert(RcStr::clone(&checksum)); + let child_v = repo.load_variant(ostree::ObjectType::DirMeta, checksum.borrow())?; + gen.metadata_size += child_v.data_as_bytes().as_ref().len() as u64; + gen.meta.push(Meta::DirMeta(checksum)); + Ok(()) +} + +fn push_dirtree( + repo: &ostree::Repo, + gen: &mut Generation, + checksum: &str, +) -> Result> { + if gen.dirtree_found.contains(checksum) { + return Ok(None); + } + let child_v = repo.load_variant(ostree::ObjectType::DirTree, checksum)?; + let checksum = RcStr::from(checksum); + gen.dirtree_found.insert(RcStr::clone(&checksum)); + gen.meta.push(Meta::DirTree(checksum)); + gen.metadata_size += child_v.data_as_bytes().as_ref().len() as u64; + Ok(Some(child_v)) +} + +fn generate_chunking_recurse( + repo: &ostree::Repo, + gen: &mut Generation, + chunk: &mut Chunk, + dt: &glib::Variant, +) -> Result<()> { + let dt = dt.data_as_bytes(); + let dt = dt.try_as_aligned()?; + let dt = gv_dirtree!().cast(dt); + let (files, dirs) = dt.to_tuple(); + // A reusable buffer to avoid heap allocating these + let mut hexbuf = [0u8; 64]; + for file in files { + let (name, csum) = file.to_tuple(); + let fpath = gen.path.join(name.to_str()); + hex::encode_to_slice(csum, &mut hexbuf)?; + let checksum = std::str::from_utf8(&hexbuf)?; + let (_, meta, _) = repo.load_file(checksum, gio::NONE_CANCELLABLE)?; + // SAFETY: We know this API returns this value; it only has a return nullable because the + // caller can pass NULL to skip it. + let meta = meta.unwrap(); + let size = meta.size() as u64; + let entry = chunk.content.entry(RcStr::from(checksum)).or_default(); + entry.0 = size; + let first = entry.1.is_empty(); + if first { + chunk.size += size; + } + entry.1.push(fpath); + } + for item in dirs { + let (name, contents_csum, meta_csum) = item.to_tuple(); + let name = name.to_str(); + // Extend our current path + gen.path.push(name); + hex::encode_to_slice(contents_csum, &mut hexbuf)?; + let checksum_s = std::str::from_utf8(&hexbuf)?; + if let Some(child_v) = push_dirtree(repo, gen, checksum_s)? { + generate_chunking_recurse(repo, gen, chunk, &child_v)?; + } + hex::encode_to_slice(meta_csum, &mut hexbuf)?; + let checksum_s = std::str::from_utf8(&hexbuf)?; + push_dirmeta(repo, gen, checksum_s)?; + // We did a push above, so pop must succeed. + assert!(gen.path.pop()); + } + Ok(()) +} + +impl Chunk { + fn new(name: &str) -> Self { + Chunk { + name: name.to_string(), + ..Default::default() + } + } + + fn move_obj(&mut self, dest: &mut Self, checksum: &str) -> bool { + // In most cases, we expect the object to exist in the source. However, it's + // conveneient here to simply ignore objects which were already moved into + // a chunk. + if let Some((name, (size, paths))) = self.content.remove_entry(checksum) { + let v = dest.content.insert(name, (size, paths)); + debug_assert!(v.is_none()); + self.size -= size; + dest.size += size; + true + } else { + false + } + } +} + +impl Chunking { + /// Generate an initial single chunk. + pub fn new(repo: &ostree::Repo, rev: &str) -> Result { + // Find the target commit + let rev = repo.require_rev(rev)?; + + // Load and parse the commit object + let (commit_v, _) = repo.load_commit(&rev)?; + let commit_v = commit_v.data_as_bytes(); + let commit_v = commit_v.try_as_aligned()?; + let commit = gv_commit!().cast(commit_v); + let commit = commit.to_tuple(); + + // Load it all into a single chunk + let mut gen = Generation { + path: Utf8PathBuf::from("/"), + ..Default::default() + }; + let mut chunk: Chunk = Default::default(); + + // Find the root directory tree + let contents_checksum = &hex::encode(commit.6); + let contents_v = repo.load_variant(ostree::ObjectType::DirTree, contents_checksum)?; + push_dirtree(repo, &mut gen, contents_checksum)?; + let meta_checksum = &hex::encode(commit.7); + push_dirmeta(repo, &mut gen, meta_checksum.as_str())?; + + generate_chunking_recurse(repo, &mut gen, &mut chunk, &contents_v)?; + + let chunking = Chunking { + commit: Box::from(rev.as_str()), + metadata_size: gen.metadata_size, + meta: gen.meta, + remainder: chunk, + ..Default::default() + }; + Ok(chunking) + } + + /// Generate a chunking from an object mapping. + pub fn from_mapping( + repo: &ostree::Repo, + rev: &str, + meta: ObjectMetaSized, + max_layers: Option, + ) -> Result { + let mut r = Self::new(repo, rev)?; + r.process_mapping(meta, max_layers)?; + Ok(r) + } + + fn remaining(&self) -> u32 { + self.max.saturating_sub(self.chunks.len() as u32) + } + + /// Given metadata about which objects are owned by a particular content source, + /// generate chunks that group together those objects. + #[allow(clippy::or_fun_call)] + pub fn process_mapping( + &mut self, + meta: ObjectMetaSized, + max_layers: Option, + ) -> Result<()> { + self.max = max_layers + .unwrap_or(NonZeroU32::new(MAX_CHUNKS).unwrap()) + .get(); + + let sizes = &meta.sizes; + // It doesn't make sense to handle multiple mappings + assert!(!self.processed_mapping); + self.processed_mapping = true; + let remaining = self.remaining(); + if remaining == 0 { + return Ok(()); + } + + // Reverses `contentmeta.map` i.e. contentid -> Vec + let mut rmap = HashMap::>::new(); + for (checksum, contentid) in meta.map.iter() { + rmap.entry(Rc::clone(contentid)).or_default().push(checksum); + } + + // Safety: Let's assume no one has over 4 billion components. + self.n_provided_components = meta.sizes.len().try_into().unwrap(); + self.n_sized_components = sizes + .iter() + .filter(|v| v.size > 0) + .count() + .try_into() + .unwrap(); + + // TODO: Compute bin packing in a better way + let packing = basic_packing(sizes, NonZeroU32::new(self.max).unwrap()); + + for bin in packing.into_iter() { + let first = bin[0]; + let first_name = &*first.meta.name; + let name = match bin.len() { + 0 => unreachable!(), + 1 => Cow::Borrowed(first_name), + 2..=5 => { + let r = bin.iter().map(|v| &*v.meta.name).fold( + String::from(first_name), + |mut acc, v| { + write!(acc, " and {}", v).unwrap(); + acc + }, + ); + Cow::Owned(r) + } + n => Cow::Owned(format!("{} components", n)), + }; + let mut chunk = Chunk::new(&*name); + for szmeta in bin { + for &obj in rmap.get(&szmeta.meta.identifier).unwrap() { + self.remainder.move_obj(&mut chunk, obj.as_str()); + } + } + if !chunk.content.is_empty() { + self.chunks.push(chunk); + } + } + + assert_eq!(self.remainder.content.len(), 0); + + Ok(()) + } + + pub(crate) fn take_chunks(&mut self) -> Vec { + let mut r = Vec::new(); + std::mem::swap(&mut self.chunks, &mut r); + r + } + + /// Print information about chunking to standard output. + pub fn print(&self) { + println!("Metadata: {}", glib::format_size(self.metadata_size)); + if self.n_provided_components > 0 { + println!( + "Components: provided={} sized={}", + self.n_provided_components, self.n_sized_components + ); + } + for (n, chunk) in self.chunks.iter().enumerate() { + let sz = glib::format_size(chunk.size); + println!( + "Chunk {}: \"{}\": objects:{} size:{}", + n, + chunk.name, + chunk.content.len(), + sz + ); + } + if !self.remainder.content.is_empty() { + let sz = glib::format_size(self.remainder.size); + println!( + "Remainder: \"{}\": objects:{} size:{}", + self.remainder.name, + self.remainder.content.len(), + sz + ); + } + } +} + +type ChunkedComponents<'a> = Vec<&'a ObjectSourceMetaSized>; + +fn components_size(components: &[&ObjectSourceMetaSized]) -> u64 { + components.iter().map(|k| k.size).sum() +} + +/// Compute the total size of a packing +#[cfg(test)] +fn packing_size(packing: &[ChunkedComponents]) -> u64 { + packing.iter().map(|v| components_size(&v)).sum() +} + +fn sort_packing(packing: &mut [ChunkedComponents]) { + packing.sort_by(|a, b| { + let a: u64 = components_size(a); + let b: u64 = components_size(b); + b.cmp(&a) + }); +} + +/// Given a set of components with size metadata (e.g. boxes of a certain size) +/// and a number of bins (possible container layers) to use, determine which components +/// go in which bin. This algorithm is pretty simple: +/// +/// - order by size +/// - If we have fewer components than bins, we're done +/// - Take the "tail" (all components past maximum), and group by source package +/// - If we have fewer components than bins, we're done +/// - Take the whole tail and group them toether (this is the overly simplistic part) +fn basic_packing(components: &[ObjectSourceMetaSized], bins: NonZeroU32) -> Vec { + // let total_size: u64 = components.iter().map(|v| v.size).sum(); + // let avg_size: u64 = total_size / components.len() as u64; + let mut r = Vec::new(); + // And handle the easy case of enough bins for all components + // TODO: Possibly try to split off large files? + if components.len() <= bins.get() as usize { + r.extend(components.iter().map(|v| vec![v])); + return r; + } + // Create a mutable copy + let mut components: Vec<_> = components.iter().collect(); + // Iterate over the component tail, folding by source id + let mut by_src = HashMap::<_, Vec<&ObjectSourceMetaSized>>::new(); + // Take the tail off components, then build up mapping from srcid -> Vec + for component in components.split_off(bins.get() as usize) { + by_src + .entry(&component.meta.srcid) + .or_default() + .push(component); + } + // Take all the non-tail (largest) components, and append them first + r.extend(components.into_iter().map(|v| vec![v])); + // Add the tail + r.extend(by_src.into_values()); + // And order the new list + sort_packing(&mut r); + // It's possible that merging components gave us enough space; if so + // we're done! + if r.len() <= bins.get() as usize { + return r; + } + + let last = (bins.get().checked_sub(1).unwrap()) as usize; + // The "tail" is components past our maximum. For now, we simply group all of that together as a single unit. + if let Some(tail) = r.drain(last..).reduce(|mut a, b| { + a.extend(b.into_iter()); + a + }) { + r.push(tail); + } + + assert!(r.len() <= bins.get() as usize); + r +} + +#[cfg(test)] +mod test { + use super::*; + + const FCOS_CONTENTMETA: &[u8] = include_bytes!("fixtures/fedora-coreos-contentmeta.json.gz"); + + #[test] + fn test_packing_basics() -> Result<()> { + // null cases + for v in [1u32, 7].map(|v| NonZeroU32::new(v).unwrap()) { + assert_eq!(basic_packing(&[], v).len(), 0); + } + Ok(()) + } + + #[test] + fn test_packing_fcos() -> Result<()> { + let contentmeta: Vec = + serde_json::from_reader(flate2::read::GzDecoder::new(FCOS_CONTENTMETA))?; + let total_size = contentmeta.iter().map(|v| v.size).sum::(); + + let packing = basic_packing(&contentmeta, NonZeroU32::new(MAX_CHUNKS).unwrap()); + assert!(!contentmeta.is_empty()); + // We should fit into the assigned chunk size + assert_eq!(packing.len() as u32, MAX_CHUNKS); + // And verify that the sizes match + let packed_total_size = packing_size(&packing); + assert_eq!(total_size, packed_total_size); + Ok(()) + } +} diff --git a/lib/src/cli.rs b/lib/src/cli.rs index ad61e45bd..fffa2a562 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -17,9 +17,10 @@ use structopt::StructOpt; use tokio_stream::StreamExt; use crate::commit::container_commit; -use crate::container::store::{LayeredImageImporter, PrepareResult}; -use crate::container::{self as ostree_container, UnencapsulationProgress}; +use crate::container as ostree_container; use crate::container::{Config, ImageReference, OstreeImageReference, UnencapsulateOptions}; +use ostree_container::store::{ImageImporter, PrepareResult}; +use ostree_container::UnencapsulationProgress; /// Parse an [`OstreeImageReference`] from a CLI arguemnt. pub fn parse_imgref(s: &str) -> Result { @@ -257,7 +258,7 @@ struct ImaSignOpts { /// Options for internal testing #[derive(Debug, StructOpt)] enum TestingOpts { - // Detect the current environment + /// Detect the current environment DetectEnv, /// Execute integration tests, assuming mutable environment Run, @@ -413,7 +414,8 @@ async fn container_export( copy_meta_keys, ..Default::default() }; - let pushed = crate::container::encapsulate(repo, rev, &config, Some(opts), imgref).await?; + let pushed = + crate::container::encapsulate(repo, rev, &config, Some(opts), None, imgref).await?; println!("{}", pushed); Ok(()) } @@ -431,7 +433,7 @@ async fn container_store( imgref: &OstreeImageReference, proxyopts: ContainerProxyOpts, ) -> Result<()> { - let mut imp = LayeredImageImporter::new(repo, imgref, proxyopts.into()).await?; + let mut imp = ImageImporter::new(repo, imgref, proxyopts.into()).await?; let prep = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => { println!("No changes in {} => {}", imgref, c.merge_commit); @@ -439,17 +441,7 @@ async fn container_store( } PrepareResult::Ready(r) => r, }; - if prep.base_layer.commit.is_none() { - let size = crate::glib::format_size(prep.base_layer.size()); - println!( - "Downloading base layer: {} ({})", - prep.base_layer.digest(), - size - ); - } else { - println!("Using base: {}", prep.base_layer.digest()); - } - for layer in prep.layers.iter() { + for layer in prep.all_layers() { if layer.commit.is_some() { println!("Using layer: {}", layer.digest()); } else { diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 39b2b688a..0137ab795 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -41,12 +41,9 @@ pub async fn deploy( let cancellable = ostree::gio::NONE_CANCELLABLE; let options = options.unwrap_or_default(); let repo = &sysroot.repo().unwrap(); - let mut imp = super::store::LayeredImageImporter::new( - repo, - imgref, - options.proxy_cfg.unwrap_or_default(), - ) - .await?; + let mut imp = + super::store::ImageImporter::new(repo, imgref, options.proxy_cfg.unwrap_or_default()) + .await?; if let Some(target) = options.target_imgref { imp.set_target(target); } diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 6a01897ff..efc7d7f6a 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -3,6 +3,7 @@ use super::ocidir::OciDir; use super::{ocidir, OstreeImageReference, Transport}; use super::{ImageReference, SignatureSource, OSTREE_COMMIT_LABEL}; +use crate::chunking::{Chunking, ObjectMetaSized}; use crate::container::skopeo; use crate::tar as ostree_tar; use anyhow::{anyhow, Context, Result}; @@ -12,6 +13,7 @@ use oci_spec::image as oci_image; use ostree::gio; use std::borrow::Cow; use std::collections::{BTreeMap, HashMap}; +use std::num::NonZeroU32; use std::path::Path; use std::rc::Rc; use tracing::{instrument, Level}; @@ -70,6 +72,46 @@ fn commit_meta_to_labels<'a>( Ok(()) } +/// Write an ostree commit to an OCI blob +#[context("Writing ostree root to blob")] +#[allow(clippy::too_many_arguments)] +fn export_chunked( + repo: &ostree::Repo, + ociw: &mut OciDir, + manifest: &mut oci_image::ImageManifest, + imgcfg: &mut oci_image::ImageConfiguration, + labels: &mut HashMap, + mut chunking: Chunking, + compression: Option, + description: &str, +) -> Result<()> { + let layers: Result> = chunking + .take_chunks() + .into_iter() + .enumerate() + .map(|(i, chunk)| -> Result<_> { + let mut w = ociw.create_layer(compression)?; + ostree_tar::export_chunk(repo, &chunk, &mut w) + .with_context(|| format!("Exporting chunk {}", i))?; + let w = w.into_inner()?; + Ok((w.complete()?, chunk.name)) + }) + .collect(); + for (layer, name) in layers? { + ociw.push_layer(manifest, imgcfg, layer, &name); + } + let mut w = ociw.create_layer(compression)?; + ostree_tar::export_final_chunk(repo, &chunking, &mut w)?; + let w = w.into_inner()?; + let final_layer = w.complete()?; + labels.insert( + crate::container::OSTREE_DIFFID_LABEL.into(), + format!("sha256:{}", final_layer.uncompressed_sha256), + ); + ociw.push_layer(manifest, imgcfg, final_layer, description); + Ok(()) +} + /// Generate an OCI image from a given ostree root #[context("Building oci")] fn build_oci( @@ -78,6 +120,7 @@ fn build_oci( ocidir_path: &Path, config: &Config, opts: ExportOpts, + contentmeta: Option, ) -> Result { // Explicitly error if the target exists std::fs::create_dir(ocidir_path).context("Creating OCI dir")?; @@ -109,30 +152,21 @@ fn build_oci( let mut manifest = ocidir::new_empty_manifest().build().unwrap(); + let chunking = contentmeta + .map(|meta| crate::chunking::Chunking::from_mapping(repo, commit, meta, opts.max_layers)) + .transpose()?; + if let Some(version) = commit_meta.lookup_value("version", Some(glib::VariantTy::new("s").unwrap())) { let version = version.str().unwrap(); labels.insert("version".into(), version.into()); } - labels.insert(OSTREE_COMMIT_LABEL.into(), commit.into()); for (k, v) in config.labels.iter().flat_map(|k| k.iter()) { labels.insert(k.into(), v.into()); } - // Lookup the cmd embedded in commit metadata - let cmd = commit_meta.lookup::>(ostree::COMMIT_META_CONTAINER_CMD)?; - // But support it being overridden by CLI options - - // https://github.com/rust-lang/rust-clippy/pull/7639#issuecomment-1050340564 - #[allow(clippy::unnecessary_lazy_evaluations)] - let cmd = config.cmd.as_ref().or_else(|| cmd.as_ref()); - if let Some(cmd) = cmd { - ctrcfg.set_cmd(Some(cmd.clone())); - } - - imgcfg.set_config(Some(ctrcfg)); let compression = if opts.compress { flate2::Compression::default() @@ -140,21 +174,52 @@ fn build_oci( flate2::Compression::none() }; - let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, Some(compression))?; + let mut annos = HashMap::new(); + annos.insert(BLOB_OSTREE_ANNOTATION.to_string(), "true".to_string()); let description = if commit_subject.is_empty() { Cow::Owned(format!("ostree export of commit {}", commit)) } else { Cow::Borrowed(commit_subject) }; - let mut annos = HashMap::new(); - annos.insert(BLOB_OSTREE_ANNOTATION.to_string(), "true".to_string()); - writer.push_layer_annotated( - &mut manifest, - &mut imgcfg, - rootfs_blob, - Some(annos), - &description, - ); + + if let Some(chunking) = chunking { + export_chunked( + repo, + &mut writer, + &mut manifest, + &mut imgcfg, + labels, + chunking, + Some(compression), + &description, + )?; + } else { + let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, Some(compression))?; + labels.insert( + crate::container::OSTREE_DIFFID_LABEL.into(), + format!("sha256:{}", rootfs_blob.uncompressed_sha256), + ); + writer.push_layer_annotated( + &mut manifest, + &mut imgcfg, + rootfs_blob, + Some(annos), + &description, + ); + } + + // Lookup the cmd embedded in commit metadata + let cmd = commit_meta.lookup::>(ostree::COMMIT_META_CONTAINER_CMD)?; + // But support it being overridden by CLI options + + // https://github.com/rust-lang/rust-clippy/pull/7639#issuecomment-1050340564 + #[allow(clippy::unnecessary_lazy_evaluations)] + let cmd = config.cmd.as_ref().or_else(|| cmd.as_ref()); + if let Some(cmd) = cmd { + ctrcfg.set_cmd(Some(cmd.clone())); + } + + imgcfg.set_config(Some(ctrcfg)); let ctrcfg = writer.write_config(imgcfg)?; manifest.set_config(ctrcfg); writer.write_manifest(manifest, oci_image::Platform::default())?; @@ -166,12 +231,13 @@ fn build_oci( } /// Helper for `build()` that avoids generics -#[instrument(skip(repo))] +#[instrument(skip(repo, contentmeta))] async fn build_impl( repo: &ostree::Repo, ostree_ref: &str, config: &Config, opts: Option, + contentmeta: Option, dest: &ImageReference, ) -> Result { let mut opts = opts.unwrap_or_default(); @@ -185,6 +251,7 @@ async fn build_impl( Path::new(dest.name.as_str()), config, opts, + contentmeta, )?; None } else { @@ -193,7 +260,14 @@ async fn build_impl( let tempdest = tempdest.to_str().unwrap(); let digestfile = tempdir.path().join("digestfile"); - let src = build_oci(repo, ostree_ref, Path::new(tempdest), config, opts)?; + let src = build_oci( + repo, + ostree_ref, + Path::new(tempdest), + config, + opts, + contentmeta, + )?; let mut cmd = skopeo::new_cmd(); tracing::event!(Level::DEBUG, "Copying {} to {}", src, dest); @@ -230,6 +304,8 @@ pub struct ExportOpts { pub compress: bool, /// A set of commit metadata keys to copy as image labels. pub copy_meta_keys: Vec, + /// Maximum number of layers to use + pub max_layers: Option, } /// Given an OSTree repository and ref, generate a container image. @@ -240,7 +316,8 @@ pub async fn encapsulate>( ostree_ref: S, config: &Config, opts: Option, + contentmeta: Option, dest: &ImageReference, ) -> Result { - build_impl(repo, ostree_ref.as_ref(), config, opts, dest).await + build_impl(repo, ostree_ref.as_ref(), config, opts, contentmeta, dest).await } diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 713108f64..99ac102a8 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -32,6 +32,8 @@ use std::ops::Deref; /// The label injected into a container image that contains the ostree commit SHA-256. pub const OSTREE_COMMIT_LABEL: &str = "ostree.commit"; +/// The label/annotation which contains the sha256 of the final commit. +const OSTREE_DIFFID_LABEL: &str = "ostree.diffid"; /// Our generic catchall fatal error, expected to be converted /// to a string to output to a terminal or logs. diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index c3dc6f783..0eff16880 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -10,11 +10,12 @@ use crate::refescape; use anyhow::{anyhow, Context}; use containers_image_proxy::{ImageProxy, OpenedImage}; use fn_error_context::context; -use oci_spec::image::{self as oci_image, ImageManifest}; +use oci_spec::image::{self as oci_image, Descriptor, History, ImageConfiguration, ImageManifest}; use ostree::prelude::{Cast, ToVariant}; use ostree::{gio, glib}; use std::collections::HashMap; use std::iter::FromIterator; +use std::sync::{Arc, Mutex}; /// Configuration for the proxy. /// @@ -84,12 +85,12 @@ impl LayeredImageState { /// Context for importing a container image. #[derive(Debug)] -pub struct LayeredImageImporter { +pub struct ImageImporter { repo: ostree::Repo, - proxy: ImageProxy, + pub(crate) proxy: ImageProxy, imgref: OstreeImageReference, target_imgref: Option, - proxy_img: OpenedImage, + pub(crate) proxy_img: OpenedImage, } /// Result of invoking [`LayeredImageImporter::prepare`]. @@ -104,7 +105,7 @@ pub enum PrepareResult { /// A container image layer with associated downloaded-or-not state. #[derive(Debug)] pub struct ManifestLayerState { - layer: oci_image::Descriptor, + pub(crate) layer: oci_image::Descriptor, /// The ostree ref name for this layer. pub ostree_ref: String, /// The ostree commit that caches this layer, if present. @@ -131,19 +132,60 @@ pub struct PreparedImport { /// The deserialized manifest. pub manifest: oci_image::ImageManifest, /// The deserialized configuration. - pub config: Option, + pub config: oci_image::ImageConfiguration, /// The previously stored manifest digest. pub previous_manifest_digest: Option, /// The previously stored image ID. pub previous_imageid: Option, - /// The required base layer. - pub base_layer: ManifestLayerState, - /// Any further layers. + /// The layers containing split objects + pub ostree_layers: Vec, + /// The layer for the ostree commit. + pub ostree_commit_layer: ManifestLayerState, + /// Any further non-ostree (derived) layers. pub layers: Vec, } +impl PreparedImport { + /// Iterate over all layers; the ostree split object layers, the commit layer, and any non-ostree layers. + pub fn all_layers(&self) -> impl Iterator { + self.ostree_layers + .iter() + .chain(std::iter::once(&self.ostree_commit_layer)) + .chain(self.layers.iter()) + } + + /// Iterate over all layers paired with their history entry. + /// An error will be returned if the history does not cover all entries. + pub fn layers_with_history( + &self, + ) -> impl Iterator> { + // FIXME use .filter(|h| h.empty_layer.unwrap_or_default()) after https://github.com/containers/oci-spec-rs/pull/100 lands. + let truncated = std::iter::once(Err(anyhow::anyhow!("Truncated history"))); + let history = self.config.history().iter().map(Ok).chain(truncated); + self.all_layers() + .zip(history) + .map(|(s, h)| h.map(|h| (s, h))) + } + + /// Iterate over all layers that are not present, along with their history description. + pub fn layers_to_fetch(&self) -> impl Iterator> { + self.layers_with_history().filter_map(|r| { + r.map(|(l, h)| { + l.commit.is_none().then(|| { + let comment = h.created_by().as_deref().unwrap_or(""); + (l, comment) + }) + }) + .transpose() + }) + } +} + // Given a manifest, compute its ostree ref name and cached ostree commit -fn query_layer(repo: &ostree::Repo, layer: oci_image::Descriptor) -> Result { +pub(crate) fn query_layer( + repo: &ostree::Repo, + layer: oci_image::Descriptor, +) -> Result { let ostree_ref = ref_for_layer(&layer)?; let commit = repo.resolve_rev(&ostree_ref, true)?.map(|s| s.to_string()); Ok(ManifestLayerState { @@ -177,7 +219,30 @@ pub fn manifest_digest_from_commit(commit: &glib::Variant) -> Result { Ok(manifest_data_from_commitmeta(commit_meta)?.1) } -impl LayeredImageImporter { +/// Given a target diffid, return its corresponding layer. In our current model, +/// we require a 1-to-1 mapping between the two up until the ostree level. +/// For a bit more information on this, see https://github.com/opencontainers/image-spec/blob/main/config.md +fn layer_from_diffid<'a>( + manifest: &'a ImageManifest, + config: &ImageConfiguration, + diffid: &str, +) -> Result<&'a Descriptor> { + let idx = config + .rootfs() + .diff_ids() + .iter() + .position(|x| x.as_str() == diffid) + .ok_or_else(|| anyhow!("Missing {} {}", OSTREE_DIFFID_LABEL, diffid))?; + manifest.layers().get(idx).ok_or_else(|| { + anyhow!( + "diffid position {} exceeds layer count {}", + idx, + manifest.layers().len() + ) + }) +} + +impl ImageImporter { /// Create a new importer. pub async fn new( repo: &ostree::Repo, @@ -189,7 +254,7 @@ impl LayeredImageImporter { let proxy = ImageProxy::new_with_config(config).await?; let proxy_img = proxy.open_image(&imgref.imgref.to_string()).await?; let repo = repo.clone(); - Ok(LayeredImageImporter { + Ok(ImageImporter { repo, proxy, proxy_img, @@ -202,15 +267,19 @@ impl LayeredImageImporter { pub fn set_target(&mut self, target: &OstreeImageReference) { self.target_imgref = Some(target.clone()) } + /// Determine if there is a new manifest, and if so return its digest. + pub async fn prepare(&mut self) -> Result { + self.prepare_internal(false).await + } /// Determine if there is a new manifest, and if so return its digest. #[context("Fetching manifest")] - pub async fn prepare(&mut self) -> Result { + pub(crate) async fn prepare_internal(&mut self, verify_layers: bool) -> Result { match &self.imgref.sigverify { SignatureSource::ContainerPolicy if skopeo::container_policy_is_default_insecure()? => { return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); } - SignatureSource::OstreeRemote(_) => { + SignatureSource::OstreeRemote(_) if verify_layers => { return Err(anyhow!( "Cannot currently verify layered containers via ostree remote" )); @@ -244,25 +313,46 @@ impl LayeredImageImporter { (None, None) }; - #[cfg(feature = "proxy_v0_2_3")] - let config = { - let config_bytes = self.proxy.fetch_config(&self.proxy_img).await?; - let config: oci_image::ImageConfiguration = - serde_json::from_slice(&config_bytes).context("Parsing image configuration")?; - Some(config) - }; - #[cfg(not(feature = "proxy_v0_2_3"))] - let config = None; - - let mut layers = manifest.layers().iter().cloned(); - // We require a base layer. - let base_layer = layers.next().ok_or_else(|| anyhow!("No layers found"))?; - let base_layer = query_layer(&self.repo, base_layer)?; + let config = self.proxy.fetch_config(&self.proxy_img).await?; - let layers: Result> = layers - .map(|layer| -> Result<_> { query_layer(&self.repo, layer) }) - .collect(); - let layers = layers?; + let label = crate::container::OSTREE_DIFFID_LABEL; + let config_labels = config.config().as_ref().and_then(|c| c.labels().as_ref()); + // For backwards compatibility, if there's only 1 layer, don't require the label. + // This can be dropped when we drop format version 0 support. + let commit_layer_digest = if config.rootfs().diff_ids().len() == 1 { + manifest.layers()[0].digest() + } else { + let diffid = config_labels + .and_then(|labels| labels.get(label)) + .ok_or_else(|| { + anyhow!( + "Missing annotation {} (not an ostree-exported container?)", + label + ) + })?; + + let layer = layer_from_diffid(&manifest, &config, diffid.as_str())?; + layer.digest() + }; + let mut component_layers = Vec::new(); + let mut commit_layer = None; + let mut remaining_layers = Vec::new(); + let query = |l: &Descriptor| query_layer(&self.repo, l.clone()); + for layer in manifest.layers() { + if layer.digest() == commit_layer_digest { + commit_layer = Some(query(layer)?); + } else if commit_layer.is_none() { + component_layers.push(query(layer)?); + } else { + remaining_layers.push(query(layer)?); + } + } + let commit_layer = commit_layer.ok_or_else(|| { + anyhow!( + "Image does not contain ostree-exported layer {}", + commit_layer_digest + ) + })?; let imp = PreparedImport { manifest, @@ -270,43 +360,132 @@ impl LayeredImageImporter { config, previous_manifest_digest, previous_imageid, - base_layer, - layers, + ostree_layers: component_layers, + ostree_commit_layer: commit_layer, + layers: remaining_layers, }; Ok(PrepareResult::Ready(Box::new(imp))) } - /// Import a layered container image - pub async fn import(self, import: Box) -> Result { - let mut proxy = self.proxy; - let target_imgref = self.target_imgref.as_ref().unwrap_or(&self.imgref); + /// Extract the base ostree commit. + pub(crate) async fn unencapsulate_base( + &mut self, + import: &mut store::PreparedImport, + options: Option, + write_refs: bool, + ) -> Result<()> { + tracing::debug!("Fetching base"); + if matches!(self.imgref.sigverify, SignatureSource::ContainerPolicy) + && skopeo::container_policy_is_default_insecure()? + { + return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); + } + let options = options.unwrap_or_default(); + let remote = match &self.imgref.sigverify { + SignatureSource::OstreeRemote(remote) => Some(remote.clone()), + SignatureSource::ContainerPolicy | SignatureSource::ContainerPolicyAllowInsecure => { + None + } + }; - // First download the base image (if necessary) - we need the SELinux policy - // there to label all following layers. - let base_layer = import.base_layer; - let base_commit = if let Some(c) = base_layer.commit { - c - } else { - let base_commit = super::unencapsulate_from_manifest_impl( - &self.repo, - &mut proxy, - target_imgref, + let progress = options.progress.map(|v| Arc::new(Mutex::new(v))); + for layer in import.ostree_layers.iter_mut() { + if layer.commit.is_some() { + continue; + } + let (blob, driver) = + fetch_layer_decompress(&mut self.proxy, &self.proxy_img, &layer.layer).await?; + let blob = super::unencapsulate::ProgressReader { + reader: blob, + progress: progress.as_ref().map(Arc::clone), + }; + let repo = self.repo.clone(); + let target_ref = layer.ostree_ref.clone(); + let import_task = + crate::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| { + let txn = repo.auto_transaction(Some(cancellable))?; + let mut importer = crate::tar::Importer::new_for_object_set(&repo); + let blob = tokio_util::io::SyncIoBridge::new(blob); + let mut archive = tar::Archive::new(blob); + importer.import_objects(&mut archive, Some(cancellable))?; + let commit = if write_refs { + let commit = importer.finish_import_object_set()?; + repo.transaction_set_ref(None, &target_ref, Some(commit.as_str())); + tracing::debug!("Wrote {} => {}", target_ref, commit); + Some(commit) + } else { + None + }; + txn.commit(Some(cancellable))?; + Ok::<_, anyhow::Error>(commit) + }); + let commit = super::unencapsulate::join_fetch(import_task, driver).await?; + layer.commit = commit; + } + if import.ostree_commit_layer.commit.is_none() { + let (blob, driver) = fetch_layer_decompress( + &mut self.proxy, &self.proxy_img, - &import.manifest, - None, - true, + &import.ostree_commit_layer.layer, ) .await?; - // Write the ostree ref for that single layer; TODO - // handle this as part of the overall transaction. - self.repo.set_ref_immediate( - None, - base_layer.ostree_ref.as_str(), - Some(base_commit.as_str()), - gio::NONE_CANCELLABLE, - )?; - base_commit + let blob = ProgressReader { + reader: blob, + progress: progress.as_ref().map(Arc::clone), + }; + let repo = self.repo.clone(); + let target_ref = import.ostree_commit_layer.ostree_ref.clone(); + let import_task = + crate::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| { + let txn = repo.auto_transaction(Some(cancellable))?; + let mut importer = crate::tar::Importer::new_for_commit(&repo, remote); + let blob = tokio_util::io::SyncIoBridge::new(blob); + let mut archive = tar::Archive::new(blob); + importer.import_commit(&mut archive, Some(cancellable))?; + let commit = importer.finish_import_commit(); + if write_refs { + repo.transaction_set_ref(None, &target_ref, Some(commit.as_str())); + tracing::debug!("Wrote {} => {}", target_ref, commit); + } + repo.mark_commit_partial(&commit, false)?; + txn.commit(Some(cancellable))?; + Ok::<_, anyhow::Error>(commit) + }); + let commit = super::unencapsulate::join_fetch(import_task, driver).await?; + import.ostree_commit_layer.commit = Some(commit); }; + Ok(()) + } + + /// Retrieve an inner ostree commit. + /// + /// This does not write cached references for each blob, and errors out if + /// the image has any non-ostree layers. + pub async fn unencapsulate( + mut self, + mut import: Box, + options: Option, + ) -> Result { + if !import.layers.is_empty() { + anyhow::bail!("Image has {} non-ostree layers", import.layers.len()); + } + self.unencapsulate_base(&mut import, options, false).await?; + let ostree_commit = import.ostree_commit_layer.commit.unwrap(); + let image_digest = import.manifest_digest; + Ok(Import { + ostree_commit, + image_digest, + }) + } + + /// Import a layered container image + pub async fn import(mut self, mut import: Box) -> Result { + // First download all layers for the base image (if necessary) - we need the SELinux policy + // there to label all following layers. + self.unencapsulate_base(&mut import, None, true).await?; + let mut proxy = self.proxy; + let target_imgref = self.target_imgref.as_ref().unwrap_or(&self.imgref); + let base_commit = import.ostree_commit_layer.commit.clone().unwrap(); let ostree_ref = ref_for_image(&target_imgref.imgref)?; @@ -329,9 +508,9 @@ impl LayeredImageImporter { base: Some(base_commit.clone()), selinux: true, }; - let w = + let r = crate::tar::write_tar(&self.repo, blob, layer.ostree_ref.as_str(), Some(opts)); - let r = super::unencapsulate::join_fetch(w, driver) + let r = super::unencapsulate::join_fetch(r, driver) .await .with_context(|| format!("Parsing layer blob {}", layer.digest()))?; layer_commits.push(r.commit); diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index 321349a0b..e0f3ca793 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -32,13 +32,13 @@ // which is exactly what is exported by the [`crate::tar::export`] process. use super::*; -use anyhow::{anyhow, Context}; use containers_image_proxy::{ImageProxy, OpenedImage}; use fn_error_context::context; use futures_util::Future; use oci_spec::image as oci_image; +use std::sync::{Arc, Mutex}; use tokio::io::{AsyncBufRead, AsyncRead}; -use tracing::{event, instrument, Level}; +use tracing::instrument; /// The result of an import operation #[derive(Copy, Clone, Debug, Default)] @@ -52,11 +52,11 @@ type Progress = tokio::sync::watch::Sender; /// A read wrapper that updates the download progress. #[pin_project::pin_project] #[derive(Debug)] -struct ProgressReader { +pub(crate) struct ProgressReader { #[pin] - reader: T, + pub(crate) reader: T, #[pin] - progress: Option, + pub(crate) progress: Option>>, } impl AsyncRead for ProgressReader { @@ -70,6 +70,7 @@ impl AsyncRead for ProgressReader { match this.reader.poll_read(cx, buf) { v @ std::task::Poll::Ready(Ok(_)) => { if let Some(progress) = this.progress.as_ref().get_ref() { + let progress = progress.lock().unwrap(); let state = { let mut state = *progress.borrow(); let newlen = buf.filled().len(); @@ -116,20 +117,6 @@ pub struct Import { pub image_digest: String, } -fn require_one_layer_blob(manifest: &oci_image::ImageManifest) -> Result<&oci_image::Descriptor> { - let n = manifest.layers().len(); - if let Some(layer) = manifest.layers().get(0) { - if n > 1 { - Err(anyhow!("Expected 1 layer, found {}", n)) - } else { - Ok(layer) - } - } else { - // Validated by find_layer_blobids() - unreachable!() - } -} - /// Use this to process potential errors from a worker and a driver. /// This is really a brutal hack around the fact that an error can occur /// on either our side or in the proxy. But if an error occurs on our @@ -180,17 +167,17 @@ pub async fn unencapsulate( imgref: &OstreeImageReference, options: Option, ) -> Result { - let mut proxy = ImageProxy::new().await?; - let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; - let (image_digest, manifest) = proxy.fetch_manifest(oi).await?; - let ostree_commit = - unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, oi, &manifest, options, false) - .await?; - proxy.close_image(oi).await?; - Ok(Import { - ostree_commit, - image_digest, - }) + let mut importer = super::store::ImageImporter::new(repo, imgref, Default::default()).await?; + let prep = match importer.prepare().await? { + store::PrepareResult::AlreadyPresent(r) => { + return Ok(Import { + ostree_commit: r.base_commit, + image_digest: r.manifest_digest, + }); + } + store::PrepareResult::Ready(r) => r, + }; + importer.unencapsulate(prep, options).await } /// Create a decompressor for this MIME type, given a stream of input. @@ -224,71 +211,3 @@ pub(crate) async fn fetch_layer_decompress<'a>( let blob = new_async_decompressor(layer.media_type(), blob)?; Ok((blob, driver)) } - -pub(crate) async fn unencapsulate_from_manifest_impl( - repo: &ostree::Repo, - proxy: &mut ImageProxy, - imgref: &OstreeImageReference, - oi: &containers_image_proxy::OpenedImage, - manifest: &oci_spec::image::ImageManifest, - options: Option, - ignore_layered: bool, -) -> Result { - if matches!(imgref.sigverify, SignatureSource::ContainerPolicy) - && skopeo::container_policy_is_default_insecure()? - { - return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); - } - let options = options.unwrap_or_default(); - let layer = if ignore_layered { - manifest - .layers() - .get(0) - .ok_or_else(|| anyhow!("No layers in image"))? - } else { - require_one_layer_blob(manifest)? - }; - event!( - Level::DEBUG, - "target blob digest:{} size: {}", - layer.digest().as_str(), - layer.size() - ); - let (blob, driver) = fetch_layer_decompress(proxy, oi, layer).await?; - let blob = ProgressReader { - reader: blob, - progress: options.progress, - }; - let mut taropts: crate::tar::TarImportOptions = Default::default(); - match &imgref.sigverify { - SignatureSource::OstreeRemote(remote) => taropts.remote = Some(remote.clone()), - SignatureSource::ContainerPolicy | SignatureSource::ContainerPolicyAllowInsecure => {} - } - let import = crate::tar::import_tar(repo, blob, Some(taropts)); - let ostree_commit = join_fetch(import, driver) - .await - .with_context(|| format!("Parsing blob {}", layer.digest()))?; - - event!(Level::DEBUG, "created commit {}", ostree_commit); - Ok(ostree_commit) -} - -/// Fetch a container image using an in-memory manifest and import its embedded OSTree commit. -#[context("Importing {}", imgref)] -#[instrument(skip(repo, options, manifest))] -pub async fn unencapsulate_from_manifest( - repo: &ostree::Repo, - imgref: &OstreeImageReference, - manifest: &oci_spec::image::ImageManifest, - options: Option, -) -> Result { - let mut proxy = ImageProxy::new().await?; - let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; - let r = - unencapsulate_from_manifest_impl(repo, &mut proxy, imgref, oi, manifest, options, false) - .await?; - proxy.close_image(oi).await?; - // FIXME write ostree commit after proxy finalization - proxy.finalize().await?; - Ok(r) -} diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 819191980..802f3eff6 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -2,6 +2,8 @@ #![allow(missing_docs)] +use crate::chunking::ObjectMetaSized; +use crate::container::{Config, ExportOpts, ImageReference, Transport}; use crate::objectsource::{ObjectMeta, ObjectSourceMeta}; use crate::prelude::*; use crate::{gio, glib}; @@ -587,4 +589,42 @@ impl Fixture { outf.flush()?; Ok(path.into()) } + + /// Export the current ref as a container image. + /// This defaults to using chunking. + #[context("Exporting container")] + pub async fn export_container(&self) -> Result<(ImageReference, String)> { + let container_path = &self.path.join("oci"); + if container_path.exists() { + std::fs::remove_dir_all(container_path)?; + } + let imgref = ImageReference { + transport: Transport::OciDir, + name: container_path.as_str().to_string(), + }; + let config = Config { + labels: Some( + [("foo", "bar"), ("test", "value")] + .iter() + .map(|(k, v)| (k.to_string(), v.to_string())) + .collect(), + ), + ..Default::default() + }; + let contentmeta = self.get_object_meta().context("Computing object meta")?; + let contentmeta = ObjectMetaSized::compute_sizes(self.srcrepo(), contentmeta) + .context("Computing sizes")?; + let opts = ExportOpts::default(); + let digest = crate::container::encapsulate( + self.srcrepo(), + self.testref(), + &config, + Some(opts), + Some(contentmeta), + &imgref, + ) + .await + .context("exporting")?; + Ok((imgref, digest)) + } } diff --git a/lib/src/fixtures/fedora-coreos-contentmeta.json.gz b/lib/src/fixtures/fedora-coreos-contentmeta.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..a1276a3f34046a9ecd83fbc96c1c3f7fee818f6e GIT binary patch literal 10233 zcmVyqlYc7R{y)c%#!Ii_^A ztbT{&1@dFcfT2ANhOtfWODcKxNCry=Eoq5qcB& z`fqRF4sCb&QS93O_`U9%f)Qr^A-bSUuXYlgf}%0Eise?uNT%b%nX*-N3E>?PMFrKJdDKc3bBL~?=Z8oqUIlF zz6umQi=8D;!HD2Q5-&JoOzEl$1aG@`?Caxh_{YC}G=0|`{vlV?{8O24BTJe7-T#26 z#>_wDsX@^15ljnRDyHT9t5ijWDk~XZNnJC3KlLANw=d3zi5e4o!_21v$iNVzn;TFVwhX1Fo!&KR&pmO)4J3w zG=f8YT-U|s(3<~Nm|Zgz>`mAwCX2|LePV>0>yzG*fl?b)hz8$aHh^@>A z(%4GCuvtpW-6NQLR5I_{395w(?eX5skaf2diQ9lA&1%=Ki#$;Sy%qzdP-zjPb( z0O^Xxw|tUu#?(T02qGQT-bZ7?Xj2ZX&Y#2B9Jd7%dZl8zJ`K4w2vNvRFTL8NjB3VJ zsEy3<;^sxz*dVTuaB$(5z$CUF{hpyzNWqB;#J!B|p}590X05CEqQt>j+$f1H&xMmt zDn=l2?Y=z?g>fav`fzA^qdklQjBfQ@+%MYUsv8o6uwUV=0VR|v0)$96?`?L9^ISrV zevNBrWo8sG40cxRlO4?b9FNBIR4dL3o|yOy+J_7?e#c=RSn1a{G0IE=@z1=?FWcrb zPi-W~7wHZyjI!h5viQ$yy!0P+XCjZ@+S2YgIV_ynP&Xp?lcVO}@G$HFM}7?Rmd-d4 zCYlbzoqcyX@9pRDgrQ3@&d>o6vYsV(u6ZS8sc2~DcI~0LdMk1zNTFc?5o#83!j=TK zaD8N32`RM-*TTNP81p`!PG(QgH?|^WK_d=W4(cXzV(%DCHz@b0D=tB+Y`=Pa`eF1W zS!uoEv6YH)vy?Fq5!JY~PE>04m<&bTeZQP-;!7FFGqNaCh{O?!DKu-h#|}1;DN#7EA1aIRh25kWJbhD$4&ol zw)rLD6{F!C4#TjW-43=YjrxeC1W?c!d`sRT$aHeA;UzQMMTM%U-i=MazVw}0Lq;LT z@~{|ow_hSK_Uwtv3rJd)oP~b(@Lu=Lwm6z`#xdTVY~mr7#K>aZ%qC!DDQN1{y(CH# zAlhD!{cb4E{b@fX)5>@>mO;9GA4QRQ)|7dHiBU{I3Zogv=ErCh9;0SozzQJ6I1Kzt`f`uZI0f~*P z=X2Zb4{p+9RWOTviAyDnbIO2xVZ(OAb%oKDNT@KBo286_X;B$beqy;)N&peoWoz@L zCK5VFU6mx`Q~>}X;w@&9YvcB-P;;HmO*agOs~1nFq!m5H)&L9`Afkzi0R^?dCCM~N{ge&&?h|U{VXv?w( z8T@P305ww%sD*9w*>0NRSf9@(dNcnU+QF8li)}sD#?WiCSt3j0?+l%P%M)YKh2-|A ziOUE9eD|^Lr^8FcVWmD)qy--Td>P1*I$N;YG9r%DMU6Y#tD-^6i5L zCMrr8V17@h(WZBd>%1zXb~g##1!W+Lc?mv)$%LpR&|46cDeU){5UQ`AEsCH6e@}UY z2_`ERE|QP6jRyytzYs>ZRIpA)DAypd-rf^P@`$LVe4MIqw+(y#$JHoVS32tQrYUFS zKrBDmR47ppR83VD%hj?Sf1TjWAG*)M$Bget2a~kjIKqzOlKnz_7UggqK5EA-2AN^Y1 z;+YlIar1?8Zbw=0oJ7AL7Iuz zcm-s{Y*AjUsETLq62!8ye~)0Pxy+Q4Z^xj%N6t z!$NfG9=`3Es8pEj*z`ZoqoFV7**6jKj7J{qlFXjI1F?Q#-`htuF-}+5=dt-|6iqSI z*KC1JSFKolqBChHw#Oe(WVTD?EBTdK9W2yZ=KHs6quKe2RfUWMzG=ugi4cW&HhfXE zue8)0prXLR8olb%Jl z_#>1SS&@Y?HEsz3x}kS%x4i`vZ^9VrsC$BgzP)88@3{OX7b@)el+YAy+uiQ^!F&~+ z)imU?R1k$2ODeop6do~VS*UUU z{wFwX{PahlE1xZXIM*C-Z~FFPLylWiHUdAKB4APm6D|%1m=QAm%6$8r5!@-Q3TIyJ|_K{?qd;pZ6 zyfEUK@Bk3+W-Q%FvOF`4;jgmAHbD<)GxI$6Q@k-1F{2eb3|m+>h{FN^>76ic_7YGL z{b+tPn_E$f*~Y4(G z@V*Vzx&$`u zXV&j2QX1zgRIzhb%!J40+2-ZjlI)ApZ6?w!je?CfMom+e(;XEDU(+H*3F89jhOqtj z2X1qd=EuvpkAiXwJ7B>(2=(vo&Y)C)n9bqlE}M@x|KApIs%Wt*4|c6aGku34dRB?W z5fq|1;EQ*gOFzuHO51g3f+wTNi6oIJ&L7~49=8z?oL>MjHcUZ^tp5Hn^$SNsvIOB4 zZGfOlb|IHg6sR5p3oNo)f9FconlqppxH-2|Qz*A}UN{bk6D|x15KJzgrPO690T+5> zj9t^)f8Eq$GpTV~hn8%*;{fzgo*CjK$HySbmHQ}Hmz1cuyKS?QS&nube36T-Wu<|1 z%&*>j@xC!0{uBpKeRnaVmBrrG3{xX81LoZrBRY-N3McnCaY!AZ?rVjQ7?OBjoCeiG zLhInTt+%!w#fp9tSpg|zmJVQ$7=RP61*q|~LeaY2Tzh)6>ptqNqMXz84qRf0oXW>4 z+QwF(E*(yXPhV91&3P!gy-kR$sEW-m%DH@Z1VnAUOT$(|3gFmj*Ni?Gw)H3D zWD;5uP1(L3BWw4WF>W1a49fRhD#4-lPGfqEO6;b>0JWvxh#^KWZ__{yF%Oa~#AKR- zo6oMs_OqFMkZ7v|$}Kh^2x2J>AAZ?uAOKhJzNhkdIgIVO9^dDnrCOCmB*y(4BjH9) zKlE-C))nJxd=)Qs84oP8BFf(yN>)@>KpT>cVLomDs!gBMXvqx27Hb6JP%XU>QxKrC zHMQTMkv(Tz&KuNbUiU-+uN( zRjdNKwQNJsw3YGJq7?Ru{vxF$v@RbG5s#Pl>66OoF|+KiI<2hiiAFF_vp?^Prtc?T zUql;@aB!Ki2T4R&zSv?V;S%hK`8AB&B6-7A8k&U+5oAelwm7+NEpHe%oOiulX7Thv zf^bnofS_&*@p6gCtPRcibg<4{!WW1$OGo?>!n_rKa85BTfW(HiY#+_fV%ttF)KkU@ zj=E9sE{u)hXkE^EEsAO>!>wM`W4U zoK5BpTL}H4WRN0UK@FIf^W3-kW(&~#bY}7X%PiOhD3%T|WIZzJZ&0YX1e}mQIp)VcRE;(g@|2`OX zEml?3*@*nvt;hSn=lA+p{V0KSLD%jt{pDC2ZKiFWbN-<;jLkAo7oNpg)U9-mTS=J! z4l$mG>$x*~A6djerVzInLnc4%D798Vm7y8mZ8ET6Z}wu6sE$liPCeeRC4||)#g#0& zg3e6sx9#vT1^3abmr{sxfI=p}4k|UTNVqd>NPpPY*OMYLn!LxbWrhIY5MwB-#WQ9C zC^7WI&u(Kr&G;v=O`JkptPnETX7;*8+DuUBk{O$O*siA^oN^=L%pxcVAk58Se&Q;@ z0lU{V<9KMMv)9pZFv%=(?g5maV>dQg12=zlpT}nE2B9lTZ_BF}DN7)K=C8TAooLIuJSPP^b@=5Qbfi#>hGFWT z!>KE5Lt0^9$Pv}r7&glp;jq5Zg?co>7ZClsC_h<@JDH)(+iJ z+=l`r+RB~>JCQwo2cnsDzq#z|Q-ISv#y>U6SVZrpi~~y-_l`g&QZC;LQI){8e|AEK z_vvg+bZsV?Wftub!a_se#cDh;sE=(pZECy5)Ji1o{!c>AwS_3e>Ifenh>8n9Bkj-o zqir^}FKCE`{Yl2TJOG3xmP_iT0}nWX-KCLk)!*BwD$0rhx#ev)9P8mPv&E`tx-7}K zya@nd!Jk}6S85hUFY|f1amu`A9>Pn*ZXT zyqXb`aNzRmb3F{-?IkG{iFW!W88>kN^u(+&S?X}se*Btl)ReJ2a`f-BRF44WYcs!d z8R5XoF8Ze49_*{Is(24vl5v3m0HKk9*LpU?4ZOgtzjW45O02Rt;Z|9IV7V{vz8b|0 zT|m^HZ#R$G)?US;(xIGpUtkcMLEkWFB&E=etAAbU;e9vUx_e9rX@fQUEt>LuJ2pgA zvWlJE`dfpYA$(>uV!^3@fsY^r49j-2Gj+SI#b)T`loaVVk%j%#7~k?Y4+>b!4e^N= zu2luxEogn48Sc1YBzRiM+#5E##a<;|(xiew$tUS>N;^R|Spu*cj## zbyseIpzVWxE~y}ZVlvmsHr=nB>OXx+LV8t{DGRZN{(omaBMoZLh?}uWlg6^J7DY_t3&S77oLHz}=3cTj= z_M+J9^$dq6UM-t5!mZe^y;@cgC@z^)^l@rLN7a4;yVLj~m)|ADO08mzXSWyeX1w?) z1-`(Vljo5x`XnwBh0?Z0&HLSaXhTevf;^6R{~nd7g9(u zg!(RBR|#70Mi3-La`@mx0KHzz_^Ni=X? z^Uw@KA$8ohkNw?A^#EX-*`H5g(o7+Es;b>~>~^zv=%a`zFypoZ(kJ+9`U<61V%ZKIdx4opF?yS^Z3P#CI{pv#qB$! zfh+FpVY$OcyTu*PB=$!X6Yy;>Kdv_z3R}dD-tdZYORow7(YC?m`v)LNZ~6DR_bVf#pG04k z);H>^)ae(Tp6gAHuaChm@%IU@pe^6Jx@&1~V2Yj4LNK@GVGdoa-uWkl1KP9R93}@T znkJ6}E(`HUpm}G1O`p~pXkv`Lz97_>2B*mErzb3nBUmXG@BqP7W?xo+QZ+`#mN7!xN~W86h{K^FQNj0c=c`Wd*9PWhm5gJ7X}aDs&+-}e_u%$ zAt0p`ZEE&36Hy;-8B3{r=d>JfBtl($iXSHmL@#4g`m`NVZw;c{gFhq@K@_mZz)%aB zTAZ0iMB9f;27PzhF$rR@5ev6@Vss@nITjhpR207Uj6)dpRNL5Scv z`PFoVM~w9m7Ghl8;_n-*8k#`SPuBOX{GP@|SqOD3RtTz2N7}l;47V>j=7VH!MkftI zsh{+V&WR1m7NrJlpPP55vg(WWXWrCxFFe2A&;Y=qE;v7<`@OM;+`Zt0a?2K$;8oe) z`1Nm@uY^$g`B`Hd))=Nxw|Z$iEfcCh1Iq~Qk(fn9syO%tAM9Gmnc7CbfF07_-F>&$y#O}&x`H$Vl!Wu~ zaGi4a&Ba(Kr#6}Zbpp9VeClgUq=1?)0<7IpmU&x;BUm79pRK74%4-ls0bqv|k1;_n zHs2`-rK}PV+i>Rv5jCVT)TNN7r}8X)y2PXAcZsvgwIqfVd4GfN5D>E(;f-nPDJ6V1j z07aA1w=`tS+Wk`|=5;5PpQKQ$y!+u$6`w7;3ICEg0*7+r0!<+D3KL(O;*}7fM||$F zR<@)eiKT+w3aN-IL?H{LnYIJ@1_TpS#N-Fj-PbHRO~l;R(UA>S1J|7Zbwqq*R|WFo zHg+$!L;C7g)RqN?F;vmfoZF^na;7RtEyfqdsbw0#4VMpJDO|tTA7x~UOD?mCYi9Il z`V!i-Is$Y)QEi*5;>_}g=hzU+POE71t39EJPag}Z$1 ztr7w>XOT5l%}3Ez>~RubKQZr6Cw5$}4MtQseYRDc0mZx3mG7>n7M{KF`@(djFive) z0nQos{hGxI&|OBH0oK)`R)C1QOS8wois?rR;M}EC7C{Zm`Nw{+ma8o9GH&Gsw#09Y z)Ydwpfn)af?0pjGHyt#lRZ~NS4rc`M7h~{2SE4ahBOO-={{fi<<2+K(+4b?k(rk2 zKq@VGKOh)g34G#*!92d`fN4wjvUB>Ghd}qElqOLB8~(Yh>+YQiO9h@gL&(wOqMh@B*3qV!oZ)~#LRx`KdvrI!RfCL z_N#i9)SDU{*pP8)0E3efxFTpl4&C2Lo@5OOxTqy!Zz*faa$)!kY z`cch*+HFh1-D#1E3Ce5Nkm$0S3sGQ{fHp+Ah}Mj|n<(L0#M*8+Rjj|UcdskDPE26D z6xC2yM*>U1;HHPvE%SV8j$!Tvm!ugy6X({+;E|qDU*}}V(0Kx-U8KP>Mb=z@=#9sTqHPSPl;swxhS8Ln0+o$`RAX@{^*~M`ROqHQ>($xfAHvZPdYz~$k}M>2Af0_ zFp`#5`g1pIG3;%R>}K63GzO=A2~RKUuVXh8nI2)38!qs4GDmt2B9};*Ou~agHNHSg z6;R{<&dw}J=)<`)`Jrj$=wX{5&jRTwdn?La9Zd+-@1*IxTiYDu6_q5Wz$`+ID}*sL zUZ*WF0Fvm7Q+a5b{(9yg8yj;n5o%l_jG=;88~X)i65j3yE>*hw=~Vva2che|BRh3$ zkTDTaqcxWDZ1ZzUFs~k5K7zh=^_Ai@C7{>zJ!iM_89Fu_Q#MN5x&VgmU6pGUM=3D! z2Dm}t+xQzV;+S$7x>jmg-E~9fFU!kAD5cthn7eixhkmqnk(KS=2UamzD)WO#kzzRf z4y3w~CUgdFSZt65hSvGr&%9{+tIS%quoQcTe1ixupnKIUGe6u5sd#h0vp6 zbHv>;i2VexT&~Ug;5zFxWJLy5QwG8lmZ=PP-)WBY_IO`4(7j!+QA3FR|nVE zu(FY!U_yvTEaT>FnC?CSICoVk;^s7Xvh${@io6$w{Zk7wXJ6s$e%V)18o)Vs_rAE^ zda00!tWk4$9|G$6K_Kbw)f3gaV9ThURaKMA!f)G@VJkL^E3LULtBioI_fV53$j~q{ zx~1{w-J+r-%+^XrSF_vgA-(^`nQK-?U5WsN8U5P4I{xC;i+jJZw*PJKW@Ba4VcEBN zjTnCgxG0*sXJyU*fw87Xwt7vHNg}UY6B`~;f^_}N{0T%E;oD&P9zNg!tnXKr_tj{d z^UK(nAn1aQ>mJ!ThF2UfJ(!Ss#zXR5R<~afH-ltpP=rn@u%RQRo9{qn(i9A~l3q<{;W3 zCDR}Zy{pP>f8ba79?Q{W{yR}8R^niN|ol4ml zGB6j-84+!9O9pv)VW!XI0<;0?GZHNkh6W0Fkvqoj;f$gL3a$%Djc}Ffv zpcYcMw&|P0{p95`pVllsF-}QP1L*E#rhW#Pd`uLu)~qi-hU9Z&)*BA0bA5SNWm&{A z^^IFt+MqLNPODHX%5ztoKcuJF;<2}4+QET1cV}6c20-b}Xz0i=?8EE10#WqE(c>RY zlqI~ht+qX+j??_98H}b@OmjjKABbT;oy-mrz=7Q3{D(1A7^>nf7jCfStjZmuIS9U2 zSSO5Ac=&mTO`NteD%SV%py@CsK$0o{$B(SIO2-~Ie*?2%wPM;^ggEyLG&GIes84m# z%YE< zigMy~VWv*z>Vrns{qE8k+eVEG-DRX%Xc!2|V=zLtslYZ zj2}hY4}l${+YO;KF$Q4n@qr+t;aY-yUdsI9$%bF1j@&k6TpYll+Mzba3Mw-bT0Yt_ z#||l5;Dx3+K}uubLUr&`e7B22)V{!Tt7PwXUr01_`j!RK-J~nZ{mKkdsNc2}VAavd z47Ff;$ipCo+WYZuUEbKw%K>Yzjf1-^GKsUb#(Ck2fmnu^&K#PeQ+8-v97v(RvNne! z1&rnT)?b7(*yOM^{#^xZ7Jz%GCI}75+FTv^S+p&#rz7Bg%6bq&BhrQj4TC@Z+%}&g z#wdioYN52!(3~Hb@*$=tbkIGEEC?c?h*jnHc10_cgJq)ca^=pvO4oWdftdflVK_GH zFF(gO64=|+;Ou^h5~9H~xZvZ3b(d$(jb*J?Y|mv9xWG*)x842NIBCHaa2HwX6JU;w zEatBG?yEa41cM)$s7wo#Ik4QbuDD9p6#N20;y963EuKQv>>uUTJ2I{}1V2)~0V$mZ zVl0>If{sfS>=LpsHSx8|doWGrR2-X@d2LY@Vw&sICJZAlj8|)QVwJW&hX)Fa-)ikY z5(?Ctm34XXsIhm7>CZ|$&HwIxi;@It#y&Nib?i&~jSXodsL!TwQQWHgxvW(_2o>E< zjCBL<59nTyIf5)YnquI5(Lf<{B}U$yezLY@582f^vdK>r-A{({<-(fFfpulx9DWsf zFDK8~ZzmUe&qYK1e(J9CSS6Ziin^ttI|?xv=1#2A3oWp_jz}UO=qx#gw6Am?)n3&= zVBAhIauURVu5vuwC)e5!S$`R*R*}o4y1{BU2zmh_IeyXP8_!a*Uac*pfdozdiC9Sj zbqwLZS=UsBS2aRTJ?mkwMc1)&A*QHw_u^dCL$#)As{y!8U-%Hsn`-%#`NN1(5m2R6 z#E=^F%#nVMNkLPt&D>;U{CU!KNWc_F=XbH~*Z9Bu2-F7xHMhJi0>*t{w{R6>nIDkjSBq;Y0I&8q$N+}cKl&0hJPkk9zu-zfo-P-<2;>Xe9S z&rQp(Atm9L-_8|RHjLmkV1sWPh*xyak&Yykvr-Ha7@;Pu8p6;}-ej@>NwY)mm2qUw zqEpLioldY??w_=E(E?~-6nVJxMFJ)@g)cBnk0=A2dtBN7?|=RW4+K{M=>h=&shb8I literal 0 HcmV?d00001 diff --git a/lib/src/lib.rs b/lib/src/lib.rs index c0b9b8e8b..23a41ac95 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -37,6 +37,7 @@ pub mod refescape; pub mod tar; pub mod tokio_util; +pub mod chunking; pub(crate) mod commit; pub mod objectsource; pub(crate) mod objgv; diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 2d188b1ce..646722d1a 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -1,5 +1,7 @@ //! APIs for creating container images from OSTree commits +use crate::chunking; +use crate::chunking::Chunking; use crate::objgv::*; use anyhow::{anyhow, bail, ensure, Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; @@ -9,6 +11,7 @@ use gio::prelude::*; use gvariant::aligned_bytes::TryAsAligned; use gvariant::{Marker, Structure}; use ostree::gio; +use std::borrow::Borrow; use std::borrow::Cow; use std::collections::HashSet; use std::io::BufReader; @@ -526,6 +529,73 @@ pub fn export_commit( Ok(()) } +/// Output a chunk. +pub(crate) fn export_chunk( + repo: &ostree::Repo, + chunk: &chunking::Chunk, + out: &mut tar::Builder, +) -> Result<()> { + let writer = &mut OstreeTarWriter::new(repo, out, ExportOptions::default()); + writer.write_repo_structure()?; + for (checksum, (_size, paths)) in chunk.content.iter() { + let (objpath, h) = writer.append_content(checksum.borrow())?; + for path in paths.iter() { + let path = path.strip_prefix("/").unwrap_or(path); + let h = h.clone(); + writer.append_content_hardlink(&objpath, h, path)?; + } + } + Ok(()) +} + +/// Output the last chunk in a chunking. +#[context("Exporting final chunk")] +pub(crate) fn export_final_chunk( + repo: &ostree::Repo, + chunking: &Chunking, + out: &mut tar::Builder, +) -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; + // For chunking, we default to format version 1 + #[allow(clippy::needless_update)] + let options = ExportOptions { + format_version: 1, + ..Default::default() + }; + let writer = &mut OstreeTarWriter::new(repo, out, options); + writer.write_repo_structure()?; + + let (commit_v, _) = repo.load_commit(&chunking.commit)?; + let commit_v = &commit_v; + writer.append(ostree::ObjectType::Commit, &chunking.commit, commit_v)?; + if let Some(commitmeta) = repo.read_commit_detached_metadata(&chunking.commit, cancellable)? { + writer.append( + ostree::ObjectType::CommitMeta, + &chunking.commit, + &commitmeta, + )?; + } + + // In the chunked case, the final layer has all ostree metadata objects. + for meta in &chunking.meta { + let objtype = meta.objtype(); + let checksum = meta.checksum(); + let v = repo.load_variant(objtype, checksum)?; + writer.append(objtype, checksum, &v)?; + } + + for (checksum, (_size, paths)) in chunking.remainder.content.iter() { + let (objpath, h) = writer.append_content(checksum.borrow())?; + for path in paths.iter() { + let path = path.strip_prefix("/").unwrap_or(path); + let h = h.clone(); + writer.append_content_hardlink(&objpath, h, path)?; + } + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 78a1f371b..87741995d 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1,9 +1,12 @@ use anyhow::{Context, Result}; use camino::Utf8Path; +use cap_std::fs::{Dir, DirBuilder}; use once_cell::sync::Lazy; +use ostree::cap_std; +use ostree_ext::chunking::ObjectMetaSized; use ostree_ext::container::store::PrepareResult; use ostree_ext::container::{ - Config, ImageReference, OstreeImageReference, SignatureSource, Transport, + Config, ExportOpts, ImageReference, OstreeImageReference, SignatureSource, Transport, }; use ostree_ext::prelude::FileExt; use ostree_ext::tar::TarImportOptions; @@ -11,6 +14,7 @@ use ostree_ext::{gio, glib}; use sh_inline::bash_in; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::os::unix::fs::DirBuilderExt; use std::process::Command; use ostree_ext::fixture::{FileDef, Fixture, CONTENTS_CHECKSUM_V0}; @@ -20,7 +24,7 @@ const TEST_REGISTRY_DEFAULT: &str = "localhost:5000"; fn assert_err_contains(r: Result, s: impl AsRef) { let s = s.as_ref(); - let msg = format!("{:#}", r.err().unwrap()); + let msg = format!("{:#}", r.err().expect("Expecting an error")); if !msg.contains(s) { panic!(r#"Error message "{}" did not contain "{}""#, msg, s); } @@ -220,8 +224,6 @@ fn test_tar_export_structure() -> Result<()> { use tar::EntryType::{Directory, Regular}; let mut fixture = Fixture::new_v1()?; - // Just test that we can retrieve ownership for all objects - let _objmeta = fixture.get_object_meta()?; let src_tar = fixture.export_tar()?; let src_tar = std::io::BufReader::new(fixture.dir.open(src_tar)?); @@ -391,8 +393,7 @@ fn skopeo_inspect_config(imgref: &str) -> Result Result<()> { +async fn impl_test_container_import_export(chunked: bool) -> Result<()> { let fixture = Fixture::new_v1()?; let testrev = fixture .srcrepo() @@ -413,7 +414,14 @@ async fn test_container_import_export() -> Result<()> { ), ..Default::default() }; - let opts = ostree_ext::container::ExportOpts { + // If chunking is requested, compute object ownership and size mappings + let contentmeta = chunked + .then(|| { + let meta = fixture.get_object_meta().context("Computing object meta")?; + ObjectMetaSized::compute_sizes(fixture.srcrepo(), meta).context("Computing sizes") + }) + .transpose()?; + let opts = ExportOpts { copy_meta_keys: vec!["buildsys.checksum".to_string()], ..Default::default() }; @@ -422,6 +430,7 @@ async fn test_container_import_export() -> Result<()> { fixture.testref(), &config, Some(opts), + contentmeta, &srcoci_imgref, ) .await @@ -451,6 +460,10 @@ async fn test_container_import_export() -> Result<()> { "/usr/bin/bash" ); + let n_chunks = if chunked { 7 } else { 1 }; + assert_eq!(cfg.rootfs().diff_ids().len(), n_chunks); + assert_eq!(cfg.history().len(), n_chunks); + let srcoci_unverified = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref: srcoci_imgref.clone(), @@ -505,6 +518,116 @@ async fn test_container_import_export() -> Result<()> { Ok(()) } +#[tokio::test] +async fn impl_test_container_chunked() -> Result<()> { + let nlayers = 6u32; + let mut fixture = Fixture::new_v1()?; + + let (imgref, expected_digest) = fixture.export_container().await.unwrap(); + let imgref = OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref: imgref, + }; + + let mut imp = ostree_ext::container::store::ImageImporter::new( + fixture.destrepo(), + &imgref, + Default::default(), + ) + .await?; + let prep = match imp.prepare().await.context("Init prep derived")? { + PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + PrepareResult::Ready(r) => r, + }; + let digest = prep.manifest_digest.clone(); + assert!(prep.ostree_commit_layer.commit.is_none()); + assert_eq!(prep.ostree_layers.len(), nlayers as usize); + assert_eq!(prep.layers.len(), 0); + for layer in prep.layers.iter() { + assert!(layer.commit.is_none()); + } + assert_eq!(digest, expected_digest); + let _import = imp.import(prep).await.context("Init pull derived").unwrap(); + + const ADDITIONS: &str = indoc::indoc! { " +r usr/bin/bash bash-v0 +"}; + fixture + .update(FileDef::iter_from(ADDITIONS), std::iter::empty()) + .context("Failed to update")?; + + let expected_digest = fixture.export_container().await.unwrap().1; + assert_ne!(digest, expected_digest); + + let mut imp = ostree_ext::container::store::ImageImporter::new( + fixture.destrepo(), + &imgref, + Default::default(), + ) + .await?; + let prep = match imp.prepare().await.context("Init prep derived")? { + PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + PrepareResult::Ready(r) => r, + }; + let to_fetch = prep.layers_to_fetch().collect::>>()?; + assert_eq!(to_fetch.len(), 2); + assert_eq!(expected_digest, prep.manifest_digest.as_str()); + assert!(prep.ostree_commit_layer.commit.is_none()); + assert_eq!(prep.ostree_layers.len(), nlayers as usize); + let (first, second) = (to_fetch[0], to_fetch[1]); + assert_eq!(first.1, "bash"); + assert!(first.0.commit.is_none()); + assert!(second.1.starts_with("ostree export of commit")); + assert!(second.0.commit.is_none()); + + let _import = imp.import(prep).await.unwrap(); + + // Build a derived image + let derived_path = &fixture.path.join("derived.oci"); + let srcpath = imgref.imgref.name.as_str(); + oci_clone(srcpath, derived_path).await.unwrap(); + let temproot = &fixture.path.join("temproot"); + || -> Result<_> { + std::fs::create_dir(temproot)?; + let temprootd = Dir::open_ambient_dir(temproot, cap_std::ambient_authority())?; + let mut db = DirBuilder::new(); + db.mode(0o755); + db.recursive(true); + temprootd.create_dir_with("usr/bin", &db)?; + temprootd.write("usr/bin/newderivedfile", "newderivedfile v0")?; + temprootd.write("usr/bin/newderivedfile3", "newderivedfile3 v0")?; + Ok(()) + }() + .context("generating temp content")?; + ostree_ext::integrationtest::generate_derived_oci(derived_path, temproot)?; + + let derived_imgref = OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref: ImageReference { + transport: Transport::OciDir, + name: derived_path.to_string(), + }, + }; + let mut imp = ostree_ext::container::store::ImageImporter::new( + fixture.destrepo(), + &derived_imgref, + Default::default(), + ) + .await?; + let prep = match imp.prepare().await.unwrap() { + PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + PrepareResult::Ready(r) => r, + }; + let to_fetch = prep.layers_to_fetch().collect::>>()?; + assert_eq!(to_fetch.len(), 1); + assert!(prep.ostree_commit_layer.commit.is_some()); + assert_eq!(prep.ostree_layers.len(), nlayers as usize); + + let _import = imp.import(prep).await.unwrap(); + + Ok(()) +} + /// Copy an OCI directory. async fn oci_clone(src: impl AsRef, dest: impl AsRef) -> Result<()> { let src = src.as_ref(); @@ -522,6 +645,13 @@ async fn oci_clone(src: impl AsRef, dest: impl AsRef) -> Res Ok(()) } +#[tokio::test] +async fn test_container_import_export() -> Result<()> { + impl_test_container_import_export(false).await?; + impl_test_container_import_export(true).await?; + Ok(()) +} + /// But layers work via the container::write module. #[tokio::test] async fn test_container_write_derive() -> Result<()> { @@ -535,6 +665,7 @@ async fn test_container_write_derive() -> Result<()> { ..Default::default() }, None, + None, &ImageReference { transport: Transport::OciDir, name: base_oci_path.to_string(), @@ -578,28 +709,28 @@ async fn test_container_write_derive() -> Result<()> { let images = ostree_ext::container::store::list_images(fixture.destrepo())?; assert!(images.is_empty()); - // Verify importing a derive dimage fails + // Verify importing a derived image fails let r = ostree_ext::container::unencapsulate(fixture.destrepo(), &derived_ref, None).await; - assert_err_contains(r, "Expected 1 layer, found 2"); + assert_err_contains(r, "Image has 1 non-ostree layers"); // Pull a derived image - two layers, new base plus one layer. - let mut imp = ostree_ext::container::store::LayeredImageImporter::new( + let mut imp = ostree_ext::container::store::ImageImporter::new( fixture.destrepo(), &derived_ref, Default::default(), ) .await?; - let prep = match imp.prepare().await? { + let prep = match imp.prepare().await.context("Init prep derived")? { PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), PrepareResult::Ready(r) => r, }; let expected_digest = prep.manifest_digest.clone(); - assert!(prep.base_layer.commit.is_none()); + assert!(prep.ostree_commit_layer.commit.is_none()); assert_eq!(prep.layers.len(), 1); for layer in prep.layers.iter() { assert!(layer.commit.is_none()); } - let import = imp.import(prep).await?; + let import = imp.import(prep).await.context("Init pull derived")?; // We should have exactly one image stored. let images = ostree_ext::container::store::list_images(fixture.destrepo())?; assert_eq!(images.len(), 1); @@ -613,17 +744,13 @@ async fn test_container_write_derive() -> Result<()> { assert!(digest.starts_with("sha256:")); assert_eq!(digest, expected_digest); - #[cfg(feature = "proxy_v0_2_3")] - { - let commit_meta = &imported_commit.child_value(0); - let proxy = containers_image_proxy::ImageProxy::new().await?; - let commit_meta = glib::VariantDict::new(Some(commit_meta)); - let config = commit_meta - .lookup::("ostree.container.image-config")? - .unwrap(); - let config: oci_spec::image::ImageConfiguration = serde_json::from_str(&config)?; - assert_eq!(config.os(), &oci_spec::image::Os::Linux); - } + let commit_meta = &imported_commit.child_value(0); + let commit_meta = glib::VariantDict::new(Some(commit_meta)); + let config = commit_meta + .lookup::("ostree.container.image-config")? + .unwrap(); + let config: oci_spec::image::ImageConfiguration = serde_json::from_str(&config)?; + assert_eq!(config.os(), &oci_spec::image::Os::Linux); // Parse the commit and verify we pulled the derived content. bash_in!( @@ -633,7 +760,7 @@ async fn test_container_write_derive() -> Result<()> { )?; // Import again, but there should be no changes. - let mut imp = ostree_ext::container::store::LayeredImageImporter::new( + let mut imp = ostree_ext::container::store::ImageImporter::new( fixture.destrepo(), &derived_ref, Default::default(), @@ -650,7 +777,7 @@ async fn test_container_write_derive() -> Result<()> { // Test upgrades; replace the oci-archive with new content. std::fs::remove_dir_all(derived_path)?; std::fs::rename(derived2_path, derived_path)?; - let mut imp = ostree_ext::container::store::LayeredImageImporter::new( + let mut imp = ostree_ext::container::store::ImageImporter::new( fixture.destrepo(), &derived_ref, Default::default(), @@ -661,7 +788,7 @@ async fn test_container_write_derive() -> Result<()> { PrepareResult::Ready(r) => r, }; // We *should* already have the base layer. - assert!(prep.base_layer.commit.is_some()); + assert!(prep.ostree_commit_layer.commit.is_some()); // One new layer assert_eq!(prep.layers.len(), 1); for layer in prep.layers.iter() { @@ -689,7 +816,7 @@ async fn test_container_write_derive() -> Result<()> { )?; // And there should be no changes on upgrade again. - let mut imp = ostree_ext::container::store::LayeredImageImporter::new( + let mut imp = ostree_ext::container::store::ImageImporter::new( fixture.destrepo(), &derived_ref, Default::default(), @@ -744,10 +871,16 @@ async fn test_container_import_export_registry() -> Result<()> { cmd: Some(vec!["/bin/bash".to_string()]), ..Default::default() }; - let digest = - ostree_ext::container::encapsulate(fixture.srcrepo(), testref, &config, None, &src_imgref) - .await - .context("exporting to registry")?; + let digest = ostree_ext::container::encapsulate( + fixture.srcrepo(), + testref, + &config, + None, + None, + &src_imgref, + ) + .await + .context("exporting to registry")?; let mut digested_imgref = src_imgref.clone(); digested_imgref.name = format!("{}@{}", src_imgref.name, digest); From 2626dbf9b71d8ab67b723bc4f2fae5d179ca3822 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 22 Mar 2022 20:54:51 -0400 Subject: [PATCH 334/775] container: Add manifest/config to image query, add `image history` CLI Extend our image state struct to include the manifest and image configuration (if available, only in v1). Add a `container image history` CLI verb which prints it. --- lib/Cargo.toml | 1 + lib/src/cli.rs | 69 ++++++++++++++++++++++++++++++++++++++ lib/src/container/store.rs | 38 ++++++++++++--------- 3 files changed, 93 insertions(+), 15 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index dc24fdded..f7c763e04 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -39,6 +39,7 @@ serde_json = "1.0.64" structopt = "0.3.21" tar = "0.4.38" tempfile = "3.2.0" +term_size = "0.3.2" tokio = { features = ["full"], version = "1" } tokio-util = { features = ["io-util"], version = "0.6.9" } tokio-stream = { features = ["sync"], version = "0.1.8" } diff --git a/lib/src/cli.rs b/lib/src/cli.rs index fffa2a562..a599a45fb 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -188,6 +188,17 @@ enum ContainerImageOpts { proxyopts: ContainerProxyOpts, }, + /// Pull (or update) a container image. + History { + /// Path to the repository + #[structopt(long, parse(try_from_str = parse_repo))] + repo: ostree::Repo, + + /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest + #[structopt(parse(try_from_str = parse_imgref))] + imgref: OstreeImageReference, + }, + /// Copy a pulled container image from one repo to another. Copy { /// Path to the source repository @@ -467,6 +478,61 @@ async fn container_store( Ok(()) } +fn print_column(s: &str, clen: usize, remaining: &mut usize) { + let l = s.len().min(*remaining); + print!("{}", &s[0..l]); + if clen > 0 { + // We always want two trailing spaces + let pad = clen.saturating_sub(l) + 2; + for _ in 0..pad { + print!(" "); + } + *remaining = remaining.checked_sub(l + pad).unwrap(); + } +} + +/// Output the container image history +async fn container_history(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result<()> { + let img = crate::container::store::query_image(repo, imgref)? + .ok_or_else(|| anyhow::anyhow!("No such image: {}", imgref))?; + let columns = [("ID", 20), ("SIZE", 10), ("CREATED BY", 0usize)]; + let width = term_size::dimensions().map(|x| x.0).unwrap_or(80); + if let Some(config) = img.configuration.as_ref() { + { + let mut remaining = width; + for (name, width) in columns.iter() { + print_column(name, *width as usize, &mut remaining); + } + println!(); + } + + let mut history = config.history().iter(); + let layers = img.manifest.layers().iter(); + for layer in layers { + let histent = history.next(); + let created_by = histent + .and_then(|s| s.created_by().as_deref()) + .unwrap_or(""); + + let mut remaining = width; + + let digest = layer.digest().as_str(); + // Verify it's OK to slice, this should all be ASCII + assert!(digest.chars().all(|c| c.is_ascii())); + let digest_max = columns[0].1; + let digest = &digest[0..digest_max]; + print_column(digest, digest_max, &mut remaining); + let size = glib::format_size(layer.size() as u64); + print_column(size.as_str(), columns[1].1, &mut remaining); + print_column(created_by, columns[2].1, &mut remaining); + println!(); + } + Ok(()) + } else { + anyhow::bail!("v0 image does not have fetched configuration"); + } +} + /// Add IMA signatures to an ostree commit, generating a new commit. fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { let signopts = crate::ima::ImaOpts { @@ -550,6 +616,9 @@ where imgref, proxyopts, } => container_store(&repo, &imgref, proxyopts).await, + ContainerImageOpts::History { repo, imgref } => { + container_history(&repo, &imgref).await + } ContainerImageOpts::Copy { src_repo, dest_repo, diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 0eff16880..9da849010 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -65,6 +65,10 @@ pub struct LayeredImageState { pub is_layered: bool, /// The digest of the original manifest pub manifest_digest: String, + /// The image manfiest + pub manifest: ImageManifest, + /// The image configuration; for v0 images, may not be available. + pub configuration: Option, } impl LayeredImageState { @@ -208,6 +212,16 @@ fn manifest_data_from_commitmeta( Ok((r, digest)) } +fn image_config_from_commitmeta( + commit_meta: &glib::VariantDict, +) -> Result> { + commit_meta + .lookup::(META_CONFIG)? + .filter(|v| v != "null") // Format v0 apparently old versions injected `null` here sadly... + .map(|v| serde_json::from_str(&v).map_err(anyhow::Error::msg)) + .transpose() +} + /// Return the original digest of the manifest stored in the commit metadata. /// This will be a string of the form e.g. `sha256:`. /// @@ -293,15 +307,13 @@ impl ImageImporter { // Query for previous stored state let (previous_manifest_digest, previous_imageid) = - if let Some((previous_manifest, previous_state)) = - query_image_impl(&self.repo, &self.imgref)? - { + if let Some(previous_state) = query_image(&self.repo, &self.imgref)? { // If the manifest digests match, we're done. if previous_state.manifest_digest == manifest_digest { return Ok(PrepareResult::AlreadyPresent(previous_state)); } // Failing that, if they have the same imageID, we're also done. - let previous_imageid = previous_manifest.config().digest().as_str(); + let previous_imageid = previous_state.manifest.config().digest().as_str(); if previous_imageid == new_imageid { return Ok(PrepareResult::AlreadyPresent(previous_state)); } @@ -598,10 +610,11 @@ pub fn list_images(repo: &ostree::Repo) -> Result> { .collect() } -fn query_image_impl( +/// Query metadata for a pulled image. +pub fn query_image( repo: &ostree::Repo, imgref: &OstreeImageReference, -) -> Result> { +) -> Result> { let ostree_ref = &ref_for_image(&imgref.imgref)?; let merge_rev = repo.resolve_rev(ostree_ref, true)?; let (merge_commit, merge_commit_obj) = if let Some(r) = merge_rev { @@ -612,6 +625,7 @@ fn query_image_impl( let commit_meta = &merge_commit_obj.child_value(0); let commit_meta = &ostree::glib::VariantDict::new(Some(commit_meta)); let (manifest, manifest_digest) = manifest_data_from_commitmeta(commit_meta)?; + let configuration = image_config_from_commitmeta(commit_meta)?; let mut layers = manifest.layers().iter().cloned(); // We require a base layer. let base_layer = layers.next().ok_or_else(|| anyhow!("No layers found"))?; @@ -626,17 +640,11 @@ fn query_image_impl( merge_commit, is_layered, manifest_digest, + manifest, + configuration, }; tracing::debug!(state = ?state); - Ok(Some((manifest, state))) -} - -/// Query metadata for a pulled image. -pub fn query_image( - repo: &ostree::Repo, - imgref: &OstreeImageReference, -) -> Result> { - Ok(query_image_impl(repo, imgref)?.map(|v| v.1)) + Ok(Some(state)) } /// Copy a downloaded image from one repository to another. From d7b283894ae174c59851ccff4c15bb5b0c5f87d6 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 27 Mar 2022 14:40:02 -0400 Subject: [PATCH 335/775] container: Also `Box` image state Adding the manifest and config directly to the struct greatly inflates the size of one enum variant as clippy points out. Let's `Box` this to avoid putting ~1k bytes on the stack. --- lib/src/container/store.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 9da849010..57f5967e8 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -101,7 +101,7 @@ pub struct ImageImporter { #[derive(Debug)] pub enum PrepareResult { /// The image reference is already present; the contained string is the OSTree commit. - AlreadyPresent(LayeredImageState), + AlreadyPresent(Box), /// The image needs to be downloaded Ready(Box), } @@ -491,7 +491,10 @@ impl ImageImporter { } /// Import a layered container image - pub async fn import(mut self, mut import: Box) -> Result { + pub async fn import( + mut self, + mut import: Box, + ) -> Result> { // First download all layers for the base image (if necessary) - we need the SELinux policy // there to label all following layers. self.unencapsulate_base(&mut import, None, true).await?; @@ -555,7 +558,7 @@ impl ImageImporter { let repo = self.repo; let imgref = self.target_imgref.unwrap_or(self.imgref); let state = crate::tokio_util::spawn_blocking_cancellable_flatten( - move |cancellable| -> Result { + move |cancellable| -> Result> { let cancellable = Some(cancellable); let repo = &repo; let txn = repo.auto_transaction(cancellable)?; @@ -614,7 +617,7 @@ pub fn list_images(repo: &ostree::Repo) -> Result> { pub fn query_image( repo: &ostree::Repo, imgref: &OstreeImageReference, -) -> Result> { +) -> Result>> { let ostree_ref = &ref_for_image(&imgref.imgref)?; let merge_rev = repo.resolve_rev(ostree_ref, true)?; let (merge_commit, merge_commit_obj) = if let Some(r) = merge_rev { @@ -635,14 +638,14 @@ pub fn query_image( .ok_or_else(|| anyhow!("Missing base image ref"))?; // If there are more layers after the base, then we're layered. let is_layered = layers.count() > 0; - let state = LayeredImageState { + let state = Box::new(LayeredImageState { base_commit, merge_commit, is_layered, manifest_digest, manifest, configuration, - }; + }); tracing::debug!(state = ?state); Ok(Some(state)) } From 2eb297072a913d11136514782db1ae8877866011 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 31 Mar 2022 18:31:58 -0400 Subject: [PATCH 336/775] ci: Update MSRV to 1.58.1 To use the new inline format bits. --- .github/workflows/rust.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index dd6dec573..24a79fa80 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -15,7 +15,7 @@ on: env: CARGO_TERM_COLOR: always # Minimum supported Rust version (MSRV) - ACTION_MSRV_TOOLCHAIN: 1.54.0 + ACTION_MSRV_TOOLCHAIN: 1.58.1 # Pinned toolchain for linting ACTION_LINTS_TOOLCHAIN: 1.58.1 From aed62719f35586d85b96fa6ec351aea29f61050b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 29 Mar 2022 20:37:16 -0400 Subject: [PATCH 337/775] cli: Add more docstrings Specifically for `commit`. --- lib/src/cli.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index a599a45fb..ff8c21c0e 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -136,6 +136,8 @@ enum ContainerOpts { }, #[structopt(alias = "commit")] + /// Perform build-time checking and canonicalization. + /// This is presently an optional command, but may become required in the future. Commit, /// Commands for working with (possibly layered, non-encapsulated) container images. @@ -288,6 +290,7 @@ enum Opt { Container(ContainerOpts), /// IMA signatures ImaSign(ImaSignOpts), + /// Internal integration testing helpers. #[structopt(setting(structopt::clap::AppSettings::Hidden))] #[cfg(feature = "internal-testing-api")] InternalOnlyForTesting(TestingOpts), From 18baae0ac37a388c8010a54904329091b732329e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 29 Mar 2022 20:39:02 -0400 Subject: [PATCH 338/775] cli: Use `bool::then` Minor drive by code improvement. --- lib/src/cli.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index ff8c21c0e..ec753e65b 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -353,16 +353,14 @@ async fn container_import( let (tx_progress, rx_progress) = tokio::sync::watch::channel(Default::default()); let target = indicatif::ProgressDrawTarget::stdout(); let style = indicatif::ProgressStyle::default_bar(); - let pb = if !quiet { + let pb = (!quiet).then(|| { let pb = indicatif::ProgressBar::new_spinner(); pb.set_draw_target(target); pb.set_style(style.template("{spinner} {prefix} {msg}")); pb.enable_steady_tick(200); pb.set_message("Downloading..."); - Some(pb) - } else { - None - }; + pb + }); let opts = UnencapsulateOptions { progress: Some(tx_progress), }; From d8e6be86dc2746e31cc0ffa1ea9174fcca44580c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 29 Mar 2022 20:43:20 -0400 Subject: [PATCH 339/775] tree-wide: Use some captured identifiers in format strings xref https://blog.rust-lang.org/2022/01/13/Rust-1.58.0.html#captured-identifiers-in-format-strings Another drive by code improvement, no immediate motivation. I've just been using it in other places and I think we're fine to hard depend on Rust 1.58 - it's in RHEL8.6 for example. --- lib/src/chunking.rs | 2 +- lib/src/container/encapsulate.rs | 2 +- lib/src/container/mod.rs | 4 ++-- lib/src/diff.rs | 4 ++-- lib/src/tar/export.rs | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 7a128baee..2f7ffa338 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -342,7 +342,7 @@ impl Chunking { ); Cow::Owned(r) } - n => Cow::Owned(format!("{} components", n)), + n => Cow::Owned(format!("{n} components")), }; let mut chunk = Chunk::new(&*name); for szmeta in bin { diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index efc7d7f6a..1a79e8fbb 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -92,7 +92,7 @@ fn export_chunked( .map(|(i, chunk)| -> Result<_> { let mut w = ociw.create_layer(compression)?; ostree_tar::export_chunk(repo, &chunk, &mut w) - .with_context(|| format!("Exporting chunk {}", i))?; + .with_context(|| format!("Exporting chunk {i}"))?; let w = w.into_inner()?; Ok((w.complete()?, chunk.name)) }) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 99ac102a8..cd83ab013 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -152,7 +152,7 @@ impl TryFrom<&str> for OstreeImageReference { // Shorthand for ostree-unverified-image:registry: "ostree-unverified-registry" => ( SignatureSource::ContainerPolicyAllowInsecure, - Cow::Owned(format!("registry:{}", second)), + Cow::Owned(format!("registry:{second}")), ), // This is a shorthand for ostree-remote-image with registry: "ostree-remote-registry" => { @@ -161,7 +161,7 @@ impl TryFrom<&str> for OstreeImageReference { .ok_or_else(|| anyhow!("Missing second ':' in {}", value))?; ( SignatureSource::OstreeRemote(remote.to_string()), - Cow::Owned(format!("registry:{}", rest)), + Cow::Owned(format!("registry:{rest}")), ) } "ostree-remote-image" => { diff --git a/lib/src/diff.rs b/lib/src/diff.rs index f965d25b2..6f7c5a62b 100644 --- a/lib/src/diff.rs +++ b/lib/src/diff.rs @@ -89,7 +89,7 @@ fn diff_recurse( let from_child = from_iter.child(&from_info); let name = from_info.name(); let name = name.to_str().expect("UTF-8 ostree name"); - let path = format!("{}{}", prefix, name); + let path = format!("{prefix}{name}"); let to_child = to.child(&name); let to_info = query_info_optional(&to_child, queryattrs, queryflags) .context("querying optional to")?; @@ -132,7 +132,7 @@ fn diff_recurse( while let Some(to_info) = to_iter.next_file(cancellable)? { let name = to_info.name(); let name = name.to_str().expect("UTF-8 ostree name"); - let path = format!("{}{}", prefix, name); + let path = format!("{prefix}{name}"); let from_child = from.child(name); let from_info = query_info_optional(&from_child, queryattrs, queryflags) .context("querying optional from")?; diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 646722d1a..2579e25ed 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -289,7 +289,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let data = v.data_as_bytes(); let data = data.as_ref(); self.append_default_data(&object_path(objtype, checksum), data) - .with_context(|| format!("Writing object {}", checksum))?; + .with_context(|| format!("Writing object {checksum}"))?; Ok(()) } From f91a180132a5120adc2fed4bfe40619377339212 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 2 Apr 2022 14:04:19 -0400 Subject: [PATCH 340/775] container: Add `--write-commitid-to` for `image deploy` Right now in coreos-assembler we have code that wants to know the exact deploy root, and for that with layered images we need the merge commit ID. First, change the deploy API to return the image state. There's no reason to just throw it away. Add an option to the CLI which writes the commitid to a file. --- lib/src/cli.rs | 22 +++++++++++++++++++--- lib/src/container/deploy.rs | 6 ++++-- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index ec753e65b..2ff4f99ae 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -5,7 +5,8 @@ //! also exported as a library too, so that projects //! such as `rpm-ostree` can directly reuse it. -use anyhow::Result; +use anyhow::{Context, Result}; +use camino::Utf8PathBuf; use futures_util::FutureExt; use ostree::{cap_std, gio, glib}; use std::borrow::Borrow; @@ -247,6 +248,10 @@ enum ContainerImageOpts { #[structopt(long)] /// Add a kernel argument karg: Option>, + + /// Write the deployed checksum to this file + #[structopt(long)] + write_commitid_to: Option, }, } @@ -632,6 +637,7 @@ where target_imgref, karg, proxyopts, + write_commitid_to, } => { let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); sysroot.load(gio::NONE_CANCELLABLE)?; @@ -645,8 +651,18 @@ where target_imgref: target_imgref.as_ref(), proxy_cfg: Some(proxyopts.into()), }; - crate::container::deploy::deploy(sysroot, &stateroot, &imgref, Some(options)) - .await + let state = crate::container::deploy::deploy( + sysroot, + &stateroot, + &imgref, + Some(options), + ) + .await?; + if let Some(p) = write_commitid_to { + std::fs::write(&p, state.merge_commit.as_bytes()) + .with_context(|| format!("Failed to write commitid to {}", p))?; + } + Ok(()) } }, }, diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 0137ab795..855f11866 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -1,5 +1,6 @@ //! Perform initial setup for a container image based system root +use super::store::LayeredImageState; use super::OstreeImageReference; use crate::container::store::PrepareResult; use anyhow::Result; @@ -37,7 +38,7 @@ pub async fn deploy( stateroot: &str, imgref: &OstreeImageReference, options: Option>, -) -> Result<()> { +) -> Result> { let cancellable = ostree::gio::NONE_CANCELLABLE; let options = options.unwrap_or_default(); let repo = &sysroot.repo().unwrap(); @@ -66,5 +67,6 @@ pub async fn deploy( let flags = ostree::SysrootSimpleWriteDeploymentFlags::NONE; sysroot.simple_write_deployment(Some(stateroot), deployment, None, flags, cancellable)?; sysroot.cleanup(cancellable)?; - Ok(()) + + Ok(state) } From 44cdd5ccf599904fa69a02455c7caff10263ccc9 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 4 Apr 2022 12:33:11 -0400 Subject: [PATCH 341/775] Bump to 0.7.0 (lots of semver-incompat API changes) The biggest thing here was the merge of "chunked ostree" support in https://github.com/ostreedev/ostree-rs-ext/pull/123 cascading into various API changes. But I think it's working, let's cut 0.7 so the next rpm-ostree can roll in the support. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index f7c763e04..78621e0eb 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.6.5" +version = "0.7.0" [dependencies] anyhow = "1.0" From 2d547face7107b090845995c9b0422ead7fc0c64 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 10 Apr 2022 18:10:02 -0400 Subject: [PATCH 342/775] container: Add support for layer fetch notifications Part of https://github.com/ostreedev/ostree-rs-ext/issues/277 When we only supported one big ostree tarball, our current single u64 progress notification was OK. But clients (including our CLI) really want more dynamic progress notifications. This adds support for an optional channel that receives started/finished notifications for layers. Note that the client *must* read from this channel if they request it. The channel is not lossy. --- lib/src/cli.rs | 58 ++++++++++++++++++++++++++++++++------ lib/src/container/store.rs | 53 ++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 9 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 2ff4f99ae..5a4cc46b1 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -15,10 +15,12 @@ use std::convert::TryFrom; use std::ffi::OsString; use std::path::PathBuf; use structopt::StructOpt; +use tokio::sync::mpsc::Receiver; use tokio_stream::StreamExt; use crate::commit::container_commit; use crate::container as ostree_container; +use crate::container::store::{ImportProgress, PreparedImport}; use crate::container::{Config, ImageReference, OstreeImageReference, UnencapsulateOptions}; use ostree_container::store::{ImageImporter, PrepareResult}; use ostree_container::UnencapsulationProgress; @@ -348,6 +350,46 @@ enum ProgressOrFinish { Finished(Result), } +/// Render an import progress notification as a string. +pub fn layer_progress_format(p: &ImportProgress) -> String { + let (starting, s, layer) = match p { + ImportProgress::OstreeChunkStarted(v) => (true, "ostree chunk", v), + ImportProgress::OstreeChunkCompleted(v) => (false, "ostree chunk", v), + ImportProgress::DerivedLayerStarted(v) => (true, "layer", v), + ImportProgress::DerivedLayerCompleted(v) => (false, "layer", v), + }; + // podman outputs 12 characters of digest, let's add 7 for `sha256:`. + let short_digest = layer.digest().chars().take(12 + 7).collect::(); + if starting { + let size = glib::format_size(layer.size() as u64); + format!("Fetching {s} {short_digest} ({size})") + } else { + format!("Fetched {s} {short_digest}") + } +} + +async fn handle_layer_progress_print(mut r: Receiver) { + while let Some(v) = r.recv().await { + println!("{}", layer_progress_format(&v)); + } +} + +fn print_layer_status(prep: &PreparedImport) { + let (stored, to_fetch, to_fetch_size) = + prep.all_layers() + .fold((0u32, 0u32, 0u64), |(stored, to_fetch, sz), v| { + if v.commit.is_some() { + (stored + 1, to_fetch, sz) + } else { + (stored, to_fetch + 1, sz + v.size()) + } + }); + if to_fetch > 0 { + let size = crate::glib::format_size(to_fetch_size); + println!("layers stored: {stored} needed: {to_fetch} ({size})"); + } +} + /// Import a container image with an encapsulated ostree commit. async fn container_import( repo: &ostree::Repo, @@ -451,6 +493,7 @@ async fn container_store( proxyopts: ContainerProxyOpts, ) -> Result<()> { let mut imp = ImageImporter::new(repo, imgref, proxyopts.into()).await?; + let layer_progress = imp.request_progress(); let prep = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => { println!("No changes in {} => {}", imgref, c.merge_commit); @@ -458,15 +501,12 @@ async fn container_store( } PrepareResult::Ready(r) => r, }; - for layer in prep.all_layers() { - if layer.commit.is_some() { - println!("Using layer: {}", layer.digest()); - } else { - let size = crate::glib::format_size(layer.size()); - println!("Downloading layer: {} ({})", layer.digest(), size); - } - } - let import = imp.import(prep).await?; + print_layer_status(&prep); + let progress_printer = + tokio::task::spawn(async move { handle_layer_progress_print(layer_progress).await }); + let import = imp.import(prep).await; + let _ = progress_printer.await; + let import = import?; let commit = &repo.load_commit(&import.merge_commit)?.0; let commit_meta = &glib::VariantDict::new(Some(&commit.child_value(0))); let filtered = commit_meta.lookup::( diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 57f5967e8..4c0c63b3d 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -16,6 +16,7 @@ use ostree::{gio, glib}; use std::collections::HashMap; use std::iter::FromIterator; use std::sync::{Arc, Mutex}; +use tokio::sync::mpsc::{Receiver, Sender}; /// Configuration for the proxy. /// @@ -54,6 +55,19 @@ fn ref_for_image(l: &ImageReference) -> Result { refescape::prefix_escape_for_ref(IMAGE_PREFIX, &l.to_string()) } +/// Sent across a channel to track start and end of a container fetch. +#[derive(Debug)] +pub enum ImportProgress { + /// Started fetching this layer. + OstreeChunkStarted(Descriptor), + /// Successfully completed the fetch of this layer. + OstreeChunkCompleted(Descriptor), + /// Started fetching this layer. + DerivedLayerStarted(Descriptor), + /// Successfully completed the fetch of this layer. + DerivedLayerCompleted(Descriptor), +} + /// State of an already pulled layered image. #[derive(Debug, PartialEq, Eq)] pub struct LayeredImageState { @@ -95,6 +109,8 @@ pub struct ImageImporter { imgref: OstreeImageReference, target_imgref: Option, pub(crate) proxy_img: OpenedImage, + + layer_progress: Option>, } /// Result of invoking [`LayeredImageImporter::prepare`]. @@ -274,6 +290,7 @@ impl ImageImporter { proxy_img, target_imgref: None, imgref: imgref.clone(), + layer_progress: None, }) } @@ -286,6 +303,14 @@ impl ImageImporter { self.prepare_internal(false).await } + /// Create a channel receiver that will get notifications for layer fetches. + pub fn request_progress(&mut self) -> Receiver { + assert!(self.layer_progress.is_none()); + let (s, r) = tokio::sync::mpsc::channel(2); + self.layer_progress = Some(s); + r + } + /// Determine if there is a new manifest, and if so return its digest. #[context("Fetching manifest")] pub(crate) async fn prepare_internal(&mut self, verify_layers: bool) -> Result { @@ -405,6 +430,10 @@ impl ImageImporter { if layer.commit.is_some() { continue; } + if let Some(p) = self.layer_progress.as_ref() { + p.send(ImportProgress::OstreeChunkStarted(layer.layer.clone())) + .await?; + } let (blob, driver) = fetch_layer_decompress(&mut self.proxy, &self.proxy_img, &layer.layer).await?; let blob = super::unencapsulate::ProgressReader { @@ -433,8 +462,18 @@ impl ImageImporter { }); let commit = super::unencapsulate::join_fetch(import_task, driver).await?; layer.commit = commit; + if let Some(p) = self.layer_progress.as_ref() { + p.send(ImportProgress::OstreeChunkCompleted(layer.layer.clone())) + .await?; + } } if import.ostree_commit_layer.commit.is_none() { + if let Some(p) = self.layer_progress.as_ref() { + p.send(ImportProgress::OstreeChunkStarted( + import.ostree_commit_layer.layer.clone(), + )) + .await?; + } let (blob, driver) = fetch_layer_decompress( &mut self.proxy, &self.proxy_img, @@ -465,6 +504,12 @@ impl ImageImporter { }); let commit = super::unencapsulate::join_fetch(import_task, driver).await?; import.ostree_commit_layer.commit = Some(commit); + if let Some(p) = self.layer_progress.as_ref() { + p.send(ImportProgress::OstreeChunkCompleted( + import.ostree_commit_layer.layer.clone(), + )) + .await?; + } }; Ok(()) } @@ -511,6 +556,10 @@ impl ImageImporter { tracing::debug!("Reusing fetched commit {}", c); layer_commits.push(c.to_string()); } else { + if let Some(p) = self.layer_progress.as_ref() { + p.send(ImportProgress::DerivedLayerStarted(layer.layer.clone())) + .await?; + } let (blob, driver) = super::unencapsulate::fetch_layer_decompress( &mut proxy, &self.proxy_img, @@ -533,6 +582,10 @@ impl ImageImporter { let filtered = HashMap::from_iter(r.filtered.into_iter()); layer_filtered_content.insert(layer.digest().to_string(), filtered); } + if let Some(p) = self.layer_progress.as_ref() { + p.send(ImportProgress::DerivedLayerCompleted(layer.layer.clone())) + .await?; + } } } From e69225ebf1eda901b4498acda43789e9b1156580 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 14 Apr 2022 14:12:26 -0400 Subject: [PATCH 343/775] Fix pulling format 0 layered images Yeah...another big bug that snuck through in the last release. We can't hard require the new format label, it breaks compatibility with old format layered images. --- lib/src/container/store.rs | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 57f5967e8..251f25566 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -329,22 +329,14 @@ impl ImageImporter { let label = crate::container::OSTREE_DIFFID_LABEL; let config_labels = config.config().as_ref().and_then(|c| c.labels().as_ref()); + let diffid = config_labels.and_then(|labels| labels.get(label)); // For backwards compatibility, if there's only 1 layer, don't require the label. // This can be dropped when we drop format version 0 support. - let commit_layer_digest = if config.rootfs().diff_ids().len() == 1 { - manifest.layers()[0].digest() - } else { - let diffid = config_labels - .and_then(|labels| labels.get(label)) - .ok_or_else(|| { - anyhow!( - "Missing annotation {} (not an ostree-exported container?)", - label - ) - })?; - + let commit_layer_digest = if let Some(diffid) = diffid { let layer = layer_from_diffid(&manifest, &config, diffid.as_str())?; layer.digest() + } else { + manifest.layers()[0].digest() }; let mut component_layers = Vec::new(); let mut commit_layer = None; From d87d8bf4eb67278d33500b0ba5408f8694eebbbc Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 12 Apr 2022 19:06:28 -0400 Subject: [PATCH 344/775] ci: Add a test flow that does privileged integration testing Trying to close the gap we have on this repository around integration testing. I am hopeful that by merging at least the ostree{,-rs,-ext} repositories together we will get this naturally in the future. This first test crucially also uses the *existing* images as fixtures, so we'll test compatibility with current ones. I do still plan to set up OCP Prow for this repo to have reliable nested virt CI, but...I realized that a pattern that is pretty useful is that we can run a privileged container that "owns" the host and execute code there too. This current CI test actually doesn't run code directly on the host, it uses ostree's `--sysroot` bits to just write files. There are interesting aspects to this; for example while Ubuntu doesn't use SELinux enabled by default, it does have the kernel code enabled and we can write the `security.selinux` xattrs just fine from FCOS. --- .github/workflows/rust.yml | 18 ++++++++++++++++++ ci/priv-integration.sh | 27 +++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100755 ci/priv-integration.sh diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 24a79fa80..c99d2ccc2 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -96,3 +96,21 @@ jobs: run: install ostree-ext-cli /usr/bin && rm -v ostree-ext-cli - name: Integration tests run: ./ci/integration.sh + privtest: + name: "Privileged testing" + needs: build + runs-on: ubuntu-latest + container: + image: quay.io/coreos-assembler/fcos:testing-devel + options: "--privileged -v /:/run/host" + steps: + - name: Checkout repository + uses: actions/checkout@v2 + - name: Download + uses: actions/download-artifact@v2 + with: + name: ostree-ext-cli + - name: Install + run: install ostree-ext-cli /usr/bin && rm -v ostree-ext-cli + - name: Integration tests + run: ./ci/priv-integration.sh diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh new file mode 100755 index 000000000..6e0f402c7 --- /dev/null +++ b/ci/priv-integration.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Assumes that the current environment is a privileged container +# with the host mounted at /run/host. We can basically write +# whatever we want, however we can't actually *reboot* the host. +set -euo pipefail + +sysroot=/run/host +# Current stable image fixture +image=quay.io/coreos-assembler/fcos:testing-devel +# My hand-uploaded chunked images +chunked_image=quay.io/cgwalters/fcos-chunked:latest +imgref=ostree-unverified-registry:${image} +stateroot=testos + +set -x + +if test '!' -e "${sysroot}/ostree"; then + ostree admin init-fs --modern "${sysroot}" + ostree config --repo $sysroot/ostree/repo set sysroot.bootloader none +fi +ostree admin os-init "${stateroot}" --sysroot "${sysroot}" +ostree-ext-cli container image deploy --sysroot "${sysroot}" \ + --stateroot "${stateroot}" --imgref "${imgref}" +ostree admin --sysroot="${sysroot}" status +ostree-ext-cli container image deploy --sysroot "${sysroot}" \ + --stateroot "${stateroot}" --imgref ostree-unverified-registry:"${chunked_image}" +ostree admin --sysroot="${sysroot}" status From 76c07bd8c47bd623d1bd38d767f5fce9a27eedf8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 17 Apr 2022 13:32:40 -0400 Subject: [PATCH 345/775] ima: Clarify that key is a path No functional changes, it's just clearer. --- lib/src/cli.rs | 2 +- lib/src/ima.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 2ff4f99ae..7a3ecfee9 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -270,7 +270,7 @@ struct ImaSignOpts { /// Digest algorithm algorithm: String, /// Path to IMA key - key: String, + key: Utf8PathBuf, } /// Options for internal testing diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 83aef9127..7a99ad2c1 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -4,6 +4,7 @@ use crate::objgv::*; use anyhow::{Context, Result}; +use camino::Utf8PathBuf; use cap_std_ext::rustix::fd::BorrowedFd; use fn_error_context::context; use gio::glib; @@ -34,7 +35,7 @@ pub struct ImaOpts { pub algorithm: String, /// Path to IMA key - pub key: String, + pub key: Utf8PathBuf, } /// Convert a GVariant of type `a(ayay)` to a mutable map From 07e1c6864d0071c02e299e81678b932fd3dd6819 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 17 Apr 2022 13:49:28 -0400 Subject: [PATCH 346/775] ima: Only do IMA signatures, not EVM Now, there is high alignment between ostree and EVM around ensuring that the security xattrs are immutable/signed too; but as I understand things, this attribute is really intended to be machine-local. --- lib/src/ima.rs | 50 +++++++++++++------------------------------------- 1 file changed, 13 insertions(+), 37 deletions(-) diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 7a99ad2c1..3449c3b16 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -25,8 +25,7 @@ use std::rc::Rc; use std::{convert::TryInto, io::Seek}; /// Extended attribute keys used for IMA. -const IMA_XATTRS: &[&str] = &["security.ima", "security.evm"]; -const SELINUX_XATTR: &[u8] = b"security.selinux\0"; +const IMA_XATTR: &str = "security.ima"; /// Attributes to configure IMA signatures. #[derive(Debug, Clone)] @@ -114,12 +113,8 @@ impl<'a> CommitRewriter<'a> { /// evmctl can write a separate file but it picks the name...so /// we do this hacky dance of `--xattr-user` instead. #[allow(unsafe_code)] - #[context("Invoking evmctl")] - fn ima_sign( - &self, - instream: &gio::InputStream, - selinux: Option<&Vec>, - ) -> Result, Vec>> { + #[context("IMA signing object")] + fn ima_sign(&self, instream: &gio::InputStream) -> Result, Vec>> { let mut tempf = tempfile::NamedTempFile::new_in(self.tempdir.path())?; // If we're operating on a bare repo, we can clone the file (copy_file_range) directly. if let Ok(instream) = instream.clone().downcast::() { @@ -137,26 +132,11 @@ impl<'a> CommitRewriter<'a> { let mut proc = Command::new("evmctl"); proc.current_dir(self.tempdir.path()) - .args(&[ - "sign", - "--portable", - "--xattr-user", - "--key", - self.ima.key.as_str(), - ]) - .args(&["--hashalgo", self.ima.algorithm.as_str()]); - if let Some(selinux) = selinux { - let selinux = std::str::from_utf8(selinux) - .context("Non-UTF8 selinux value")? - .trim_end_matches('\0'); - proc.args(&["--selinux", selinux]); - } - - let proc = proc - .arg("--imasig") - .arg(tempf.path().file_name().unwrap()) .stdout(Stdio::null()) - .stderr(Stdio::piped()); + .stderr(Stdio::piped()) + .args(&["ima_sign", "--xattr-user", "--key", self.ima.key.as_str()]) + .args(&["--hashalgo", self.ima.algorithm.as_str()]) + .arg(tempf.path().file_name().unwrap()); let status = proc.output().context("Spawning evmctl")?; if !status.status.success() { return Err(anyhow::anyhow!( @@ -166,13 +146,11 @@ impl<'a> CommitRewriter<'a> { )); } let mut r = HashMap::new(); - for &k in IMA_XATTRS { - let user_k = k.replace("security.", "user."); - let v = steal_xattr(tempf.as_file(), user_k.as_str())?; - // NUL terminate the key - let k = CString::new(k)?.into_bytes_with_nul(); - r.insert(k, v); - } + let user_k = IMA_XATTR.replace("security.", "user."); + let v = steal_xattr(tempf.as_file(), user_k.as_str())?; + // NUL terminate the key + let k = CString::new(IMA_XATTR)?.into_bytes_with_nul(); + r.insert(k, v); Ok(r) } @@ -195,11 +173,9 @@ impl<'a> CommitRewriter<'a> { let meta = meta.unwrap(); let mut xattrs = xattrs_to_map(&xattrs.unwrap()); - let selinux = xattrs.get(SELINUX_XATTR); - // Now inject the IMA xattr let xattrs = { - let signed = self.ima_sign(&instream, selinux)?; + let signed = self.ima_sign(&instream)?; xattrs.extend(signed); new_variant_a_ayay(&xattrs) }; From 85af74c7be1e24200b6fc03e255be8e4ad8b513f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 17 Apr 2022 14:37:41 -0400 Subject: [PATCH 347/775] ima: Don't overwrite existing signatures by default A use case here is where rpm-ostree may propagate existing IMA signatures from RPMs, but one wants to add signatures for other files that aren't signed (for example, the RPM database and non-packaged files). --- lib/src/cli.rs | 5 +++++ lib/src/ima.rs | 11 +++++++++++ 2 files changed, 16 insertions(+) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 7a3ecfee9..4d7ec6d13 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -271,6 +271,10 @@ struct ImaSignOpts { algorithm: String, /// Path to IMA key key: Utf8PathBuf, + + #[structopt(long)] + /// Overwrite any existing signatures + overwrite: bool, } /// Options for internal testing @@ -544,6 +548,7 @@ fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { let signopts = crate::ima::ImaOpts { algorithm: cmdopts.algorithm.clone(), key: cmdopts.key.clone(), + overwrite: cmdopts.overwrite, }; let signed_commit = crate::ima::ima_sign(&cmdopts.repo, cmdopts.src_rev.as_str(), &signopts)?; cmdopts.repo.set_ref_immediate( diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 3449c3b16..84b815b8f 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -26,6 +26,7 @@ use std::{convert::TryInto, io::Seek}; /// Extended attribute keys used for IMA. const IMA_XATTR: &str = "security.ima"; +const IMA_XATTR_C: &[u8] = b"security.ima\0"; /// Attributes to configure IMA signatures. #[derive(Debug, Clone)] @@ -35,6 +36,9 @@ pub struct ImaOpts { /// Path to IMA key pub key: Utf8PathBuf, + + /// Replace any existing IMA signatures. + pub overwrite: bool, } /// Convert a GVariant of type `a(ayay)` to a mutable map @@ -172,6 +176,13 @@ impl<'a> CommitRewriter<'a> { }; let meta = meta.unwrap(); let mut xattrs = xattrs_to_map(&xattrs.unwrap()); + let existing_sig = xattrs.remove(IMA_XATTR_C); + if existing_sig.is_some() && !self.ima.overwrite { + let r: Rc = checksum.into(); + self.rewritten_files + .insert(checksum.to_string(), Rc::clone(&r)); + return Ok(r); + } // Now inject the IMA xattr let xattrs = { From a5bd124273cd5c29ee763ab2796a0d269d344393 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 17 Apr 2022 14:53:10 -0400 Subject: [PATCH 348/775] ima: Use an ostree transaction It's cleaner and more efficient. --- lib/src/cli.rs | 8 +++++--- lib/src/ima.rs | 3 +++ 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 4d7ec6d13..f0f81a601 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -545,18 +545,20 @@ async fn container_history(repo: &ostree::Repo, imgref: &OstreeImageReference) - /// Add IMA signatures to an ostree commit, generating a new commit. fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; let signopts = crate::ima::ImaOpts { algorithm: cmdopts.algorithm.clone(), key: cmdopts.key.clone(), overwrite: cmdopts.overwrite, }; + let tx = cmdopts.repo.auto_transaction(cancellable)?; let signed_commit = crate::ima::ima_sign(&cmdopts.repo, cmdopts.src_rev.as_str(), &signopts)?; - cmdopts.repo.set_ref_immediate( + cmdopts.repo.transaction_set_ref( None, cmdopts.target_ref.as_str(), Some(signed_commit.as_str()), - gio::NONE_CANCELLABLE, - )?; + ); + let _stats = tx.commit(cancellable)?; println!("{} => {}", cmdopts.target_ref, signed_commit); Ok(()) } diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 84b815b8f..5ac3b1a56 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -294,6 +294,9 @@ impl<'a> CommitRewriter<'a> { /// /// The generated commit object will inherit all metadata from the existing commit object /// such as version, etc. +/// +/// This function does not create an ostree transaction; it's recommended to use outside the call +/// to this function. pub fn ima_sign(repo: &ostree::Repo, ostree_ref: &str, opts: &ImaOpts) -> Result { let writer = &mut CommitRewriter::new(repo, opts)?; writer.map_commit(ostree_ref) From 06583e73271ce1cecf2e45ec0acd5cbdf2cd3187 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 18 Apr 2022 16:29:51 -0400 Subject: [PATCH 349/775] ima: Remove unnecessary Rc and mutable state in signing Change the mapping function to be "pure" and not have side effects on a cache, moving the cache mutation outside. Prep for possible parallelization. --- lib/src/ima.rs | 38 +++++++++++++++----------------------- 1 file changed, 15 insertions(+), 23 deletions(-) diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 5ac3b1a56..f2f807020 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -21,7 +21,6 @@ use std::fs::File; use std::ops::DerefMut; use std::os::unix::io::AsRawFd; use std::process::{Command, Stdio}; -use std::rc::Rc; use std::{convert::TryInto, io::Seek}; /// Extended attribute keys used for IMA. @@ -73,8 +72,8 @@ struct CommitRewriter<'a> { repo: &'a ostree::Repo, ima: &'a ImaOpts, tempdir: tempfile::TempDir, - /// Files that we already changed - rewritten_files: HashMap>, + /// Maps content object sha256 hex string to a signed object sha256 hex string + rewritten_files: HashMap, } #[allow(unsafe_code)] @@ -159,29 +158,19 @@ impl<'a> CommitRewriter<'a> { } #[context("Content object {}", checksum)] - fn map_file(&mut self, checksum: &str) -> Result> { - if let Some(r) = self.rewritten_files.get(checksum) { - return Ok(Rc::clone(r)); - } + fn map_file(&mut self, checksum: &str) -> Result> { let cancellable = gio::NONE_CANCELLABLE; let (instream, meta, xattrs) = self.repo.load_file(checksum, cancellable)?; let instream = if let Some(i) = instream { i } else { - // If there's no input stream, it must be a symlink. Skip it. - let r: Rc = checksum.into(); - self.rewritten_files - .insert(checksum.to_string(), Rc::clone(&r)); - return Ok(r); + return Ok(None); }; let meta = meta.unwrap(); let mut xattrs = xattrs_to_map(&xattrs.unwrap()); let existing_sig = xattrs.remove(IMA_XATTR_C); if existing_sig.is_some() && !self.ima.overwrite { - let r: Rc = checksum.into(); - self.rewritten_files - .insert(checksum.to_string(), Rc::clone(&r)); - return Ok(r); + return Ok(None); } // Now inject the IMA xattr @@ -200,10 +189,7 @@ impl<'a> CommitRewriter<'a> { .write_content(None, &ostream, size, cancellable)? .to_hex(); - let r: Rc = new_checksum.into(); - self.rewritten_files - .insert(checksum.to_string(), Rc::clone(&r)); - Ok(r) + Ok(Some(new_checksum)) } /// Write a dirtree object. @@ -225,9 +211,15 @@ impl<'a> CommitRewriter<'a> { let name = name.to_str(); hex::encode_to_slice(csum, &mut hexbuf)?; let checksum = std::str::from_utf8(&hexbuf)?; - let mapped = self.map_file(checksum)?; - let mapped = hex::decode(&*mapped)?; - new_files.push((name, mapped)); + if let Some(mapped) = self.rewritten_files.get(checksum) { + new_files.push((name, hex::decode(mapped)?)); + } else if let Some(mapped) = self.map_file(checksum)? { + let mapped_bytes = hex::decode(&mapped)?; + self.rewritten_files.insert(checksum.into(), mapped); + new_files.push((name, mapped_bytes)); + } else { + new_files.push((name, Vec::from(csum))); + } } let mut new_dirs = Vec::new(); From 9fa759380147266230daf1fed69b3c06c9faf752 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 19 Apr 2022 09:53:35 -0400 Subject: [PATCH 350/775] tar/export: Deduplicate chunk writing code Prep for further fixes. --- lib/src/tar/export.rs | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 2579e25ed..55d136473 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -529,14 +529,12 @@ pub fn export_commit( Ok(()) } -/// Output a chunk. -pub(crate) fn export_chunk( - repo: &ostree::Repo, +/// Implementation of chunk writing, assumes that the preliminary structure +/// has been written to the tar stream. +fn write_chunk( + writer: &mut OstreeTarWriter, chunk: &chunking::Chunk, - out: &mut tar::Builder, ) -> Result<()> { - let writer = &mut OstreeTarWriter::new(repo, out, ExportOptions::default()); - writer.write_repo_structure()?; for (checksum, (_size, paths)) in chunk.content.iter() { let (objpath, h) = writer.append_content(checksum.borrow())?; for path in paths.iter() { @@ -548,6 +546,17 @@ pub(crate) fn export_chunk( Ok(()) } +/// Output a chunk to a tar stream. +pub(crate) fn export_chunk( + repo: &ostree::Repo, + chunk: &chunking::Chunk, + out: &mut tar::Builder, +) -> Result<()> { + let writer = &mut OstreeTarWriter::new(repo, out, ExportOptions::default()); + writer.write_repo_structure()?; + write_chunk(writer, chunk) +} + /// Output the last chunk in a chunking. #[context("Exporting final chunk")] pub(crate) fn export_final_chunk( @@ -584,16 +593,7 @@ pub(crate) fn export_final_chunk( writer.append(objtype, checksum, &v)?; } - for (checksum, (_size, paths)) in chunking.remainder.content.iter() { - let (objpath, h) = writer.append_content(checksum.borrow())?; - for path in paths.iter() { - let path = path.strip_prefix("/").unwrap_or(path); - let h = h.clone(); - writer.append_content_hardlink(&objpath, h, path)?; - } - } - - Ok(()) + write_chunk(writer, &chunking.remainder) } #[cfg(test)] From 67daa43a525c0e64db622a036e4b0c135f49c9f9 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 14 Apr 2022 14:59:22 -0400 Subject: [PATCH 351/775] export: Fix `/etc` for chunked archives In our current chunked archives, we were missing the code to map `usr/etc` back to `etc`, which breaks running it as a container image in various ways. Yeah, the fact that this got past CI shows a big gap. I am working on integration tests here that would cover this. Unit testing is possible, just a little annoying right now because we need to manually unpack the container image and inspect the tarballs (can't rely on a container runtime in our unit tests). Now, there's a whole further mess that I realized when working on this that we are inconsistent about using `./` versus the empty string `` when writing paths into the tar stream today. I would like to clean that up, but doing so adds more risk. Let's get this targeted fix out, and revisit that in subsequent work. --- lib/src/tar/export.rs | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 55d136473..2e8f5a2bf 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -24,6 +24,13 @@ const SYSROOT: &str = "sysroot"; // This way the default ostree -> sysroot/ostree symlink works. const OSTREEDIR: &str = "sysroot/ostree"; +/// In v0 format, we use this relative path prefix. I think I chose this by looking +/// at the current Fedora base image tar stream. However, several others don't do +/// this and have paths be relative by simply omitting `./`, i.e. the tar stream +/// contains `usr/bin/bash` and not `./usr/bin/bash`. The former looks cleaner +/// to me, so in v1 we drop it. +const TAR_PATH_PREFIX_V0: &str = "./"; + /// The base repository configuration that identifies this is a tar export. // See https://github.com/ostreedev/ostree/issues/2499 const REPO_CONFIG: &str = r#"[core] @@ -43,6 +50,16 @@ fn map_path(p: &Utf8Path) -> std::borrow::Cow { } } +/// Convert usr/etc back to etc for the tar stream. +fn map_path_v1(p: &Utf8Path) -> &Utf8Path { + debug_assert!(!p.starts_with("/") && !p.starts_with(".")); + if p.starts_with("usr/etc") { + p.strip_prefix("usr/").unwrap() + } else { + p + } +} + struct OstreeTarWriter<'a, W: std::io::Write> { repo: &'a ostree::Repo, out: &'a mut tar::Builder, @@ -241,7 +258,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { .load_variant(ostree::ObjectType::DirMeta, metadata_checksum)?; // Safety: We passed the correct variant type just above let metadata = &ostree::DirMetaParsed::from_variant(&metadata_v).unwrap(); - let rootpath = Utf8Path::new("./"); + let rootpath = Utf8Path::new(TAR_PATH_PREFIX_V0); // We need to write the root directory, before we write any objects. This should be the very // first thing. @@ -262,7 +279,12 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { self.append(ostree::ObjectType::DirMeta, metadata_checksum, &metadata_v)?; // Recurse and write everything else. - self.append_dirtree(Utf8Path::new("./"), contents, true, cancellable)?; + self.append_dirtree( + Utf8Path::new(TAR_PATH_PREFIX_V0), + contents, + true, + cancellable, + )?; Ok(()) } @@ -529,6 +551,12 @@ pub fn export_commit( Ok(()) } +/// Chunked (or version 1) tar streams don't have a leading `./`. +fn path_for_tar_v1(p: &Utf8Path) -> &Utf8Path { + debug_assert!(!p.starts_with(".")); + map_path_v1(p.strip_prefix("/").unwrap_or(p)) +} + /// Implementation of chunk writing, assumes that the preliminary structure /// has been written to the tar stream. fn write_chunk( @@ -538,7 +566,7 @@ fn write_chunk( for (checksum, (_size, paths)) in chunk.content.iter() { let (objpath, h) = writer.append_content(checksum.borrow())?; for path in paths.iter() { - let path = path.strip_prefix("/").unwrap_or(path); + let path = path_for_tar_v1(path); let h = h.clone(); writer.append_content_hardlink(&objpath, h, path)?; } @@ -607,6 +635,15 @@ mod tests { map_path("./usr/etc/blah".into()), Utf8Path::new("./etc/blah") ); + for unchanged in ["boot", "usr/bin", "usr/lib/foo"].iter().map(Utf8Path::new) { + assert_eq!(unchanged, map_path_v1(unchanged)); + } + + assert_eq!(Utf8Path::new("etc"), map_path_v1(Utf8Path::new("usr/etc"))); + assert_eq!( + Utf8Path::new("etc/foo"), + map_path_v1(Utf8Path::new("usr/etc/foo")) + ); } #[test] From 402aa7d1e59056532167e10a5bf5e5225c2fdc16 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 19 Apr 2022 11:41:34 -0400 Subject: [PATCH 352/775] Release 0.7.1 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 78621e0eb..18c3052a6 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.7.0" +version = "0.7.1" [dependencies] anyhow = "1.0" From c9b62e9922372f469b41393c6b4c87d6bd3e21b3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 27 Apr 2022 10:35:38 -0400 Subject: [PATCH 353/775] Fix `history` command and `query_image` to accept plain `ImageReference` This is a big confusing topic, because ostree tries to impose higher level signing semantics on top of container image references. For images that are already stored, we don't actually today store that signing state, though we probably should. Anyways, change this CLI to accept plain image references, and also add a new query API that takes that directly. Prep for adding other CLI verbs which should do the same. --- lib/src/cli.rs | 12 ++++++------ lib/src/container/store.rs | 17 ++++++++++++++--- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index a09ae1682..d5a38a003 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -193,15 +193,15 @@ enum ContainerImageOpts { proxyopts: ContainerProxyOpts, }, - /// Pull (or update) a container image. + /// Output metadata about an already stored container image. History { /// Path to the repository #[structopt(long, parse(try_from_str = parse_repo))] repo: ostree::Repo, - /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest - #[structopt(parse(try_from_str = parse_imgref))] - imgref: OstreeImageReference, + /// Container image reference, e.g. registry:quay.io/exampleos/exampleos:latest + #[structopt(parse(try_from_str = parse_base_imgref))] + imgref: ImageReference, }, /// Copy a pulled container image from one repo to another. @@ -542,8 +542,8 @@ fn print_column(s: &str, clen: usize, remaining: &mut usize) { } /// Output the container image history -async fn container_history(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result<()> { - let img = crate::container::store::query_image(repo, imgref)? +async fn container_history(repo: &ostree::Repo, imgref: &ImageReference) -> Result<()> { + let img = crate::container::store::query_image_ref(repo, imgref)? .ok_or_else(|| anyhow::anyhow!("No such image: {}", imgref))?; let columns = [("ID", 20), ("SIZE", 10), ("CREATED BY", 0usize)]; let width = term_size::dimensions().map(|x| x.0).unwrap_or(80); diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index b0c40a97c..7ce2c97b7 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -659,11 +659,11 @@ pub fn list_images(repo: &ostree::Repo) -> Result> { } /// Query metadata for a pulled image. -pub fn query_image( +pub fn query_image_ref( repo: &ostree::Repo, - imgref: &OstreeImageReference, + imgref: &ImageReference, ) -> Result>> { - let ostree_ref = &ref_for_image(&imgref.imgref)?; + let ostree_ref = &ref_for_image(imgref)?; let merge_rev = repo.resolve_rev(ostree_ref, true)?; let (merge_commit, merge_commit_obj) = if let Some(r) = merge_rev { (r.to_string(), repo.load_commit(r.as_str())?.0) @@ -695,6 +695,17 @@ pub fn query_image( Ok(Some(state)) } +/// Query metadata for a pulled image. +/// +/// This is a thin wrapper for [`query_image_ref`] and should +/// be considered deprecated. +pub fn query_image( + repo: &ostree::Repo, + imgref: &OstreeImageReference, +) -> Result>> { + query_image_ref(repo, &imgref.imgref) +} + /// Copy a downloaded image from one repository to another. pub async fn copy( src_repo: &ostree::Repo, From ec4ec42b0f6fb1edaed473acaace61ef2055748e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 27 Apr 2022 12:34:10 -0400 Subject: [PATCH 354/775] tests: Consistently use `store::` Seeing the full `ostree_ext::container::store::` a lot is too verbose. Consistently shorten this to just `store::`. --- lib/tests/it/main.rs | 98 ++++++++++++++++---------------------------- 1 file changed, 35 insertions(+), 63 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 87741995d..b553a2725 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -4,7 +4,7 @@ use cap_std::fs::{Dir, DirBuilder}; use once_cell::sync::Lazy; use ostree::cap_std; use ostree_ext::chunking::ObjectMetaSized; -use ostree_ext::container::store::PrepareResult; +use ostree_ext::container::store; use ostree_ext::container::{ Config, ExportOpts, ImageReference, OstreeImageReference, SignatureSource, Transport, }; @@ -529,15 +529,11 @@ async fn impl_test_container_chunked() -> Result<()> { imgref: imgref, }; - let mut imp = ostree_ext::container::store::ImageImporter::new( - fixture.destrepo(), - &imgref, - Default::default(), - ) - .await?; + let mut imp = + store::ImageImporter::new(fixture.destrepo(), &imgref, Default::default()).await?; let prep = match imp.prepare().await.context("Init prep derived")? { - PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), - PrepareResult::Ready(r) => r, + store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + store::PrepareResult::Ready(r) => r, }; let digest = prep.manifest_digest.clone(); assert!(prep.ostree_commit_layer.commit.is_none()); @@ -559,15 +555,11 @@ r usr/bin/bash bash-v0 let expected_digest = fixture.export_container().await.unwrap().1; assert_ne!(digest, expected_digest); - let mut imp = ostree_ext::container::store::ImageImporter::new( - fixture.destrepo(), - &imgref, - Default::default(), - ) - .await?; + let mut imp = + store::ImageImporter::new(fixture.destrepo(), &imgref, Default::default()).await?; let prep = match imp.prepare().await.context("Init prep derived")? { - PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), - PrepareResult::Ready(r) => r, + store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + store::PrepareResult::Ready(r) => r, }; let to_fetch = prep.layers_to_fetch().collect::>>()?; assert_eq!(to_fetch.len(), 2); @@ -608,15 +600,11 @@ r usr/bin/bash bash-v0 name: derived_path.to_string(), }, }; - let mut imp = ostree_ext::container::store::ImageImporter::new( - fixture.destrepo(), - &derived_imgref, - Default::default(), - ) - .await?; + let mut imp = + store::ImageImporter::new(fixture.destrepo(), &derived_imgref, Default::default()).await?; let prep = match imp.prepare().await.unwrap() { - PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), - PrepareResult::Ready(r) => r, + store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + store::PrepareResult::Ready(r) => r, }; let to_fetch = prep.layers_to_fetch().collect::>>()?; assert_eq!(to_fetch.len(), 1); @@ -706,7 +694,7 @@ async fn test_container_write_derive() -> Result<()> { }, }; // There shouldn't be any container images stored yet. - let images = ostree_ext::container::store::list_images(fixture.destrepo())?; + let images = store::list_images(fixture.destrepo())?; assert!(images.is_empty()); // Verify importing a derived image fails @@ -714,15 +702,11 @@ async fn test_container_write_derive() -> Result<()> { assert_err_contains(r, "Image has 1 non-ostree layers"); // Pull a derived image - two layers, new base plus one layer. - let mut imp = ostree_ext::container::store::ImageImporter::new( - fixture.destrepo(), - &derived_ref, - Default::default(), - ) - .await?; + let mut imp = + store::ImageImporter::new(fixture.destrepo(), &derived_ref, Default::default()).await?; let prep = match imp.prepare().await.context("Init prep derived")? { - PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), - PrepareResult::Ready(r) => r, + store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + store::PrepareResult::Ready(r) => r, }; let expected_digest = prep.manifest_digest.clone(); assert!(prep.ostree_commit_layer.commit.is_none()); @@ -732,7 +716,7 @@ async fn test_container_write_derive() -> Result<()> { } let import = imp.import(prep).await.context("Init pull derived")?; // We should have exactly one image stored. - let images = ostree_ext::container::store::list_images(fixture.destrepo())?; + let images = store::list_images(fixture.destrepo())?; assert_eq!(images.len(), 1); assert_eq!(images[0], derived_ref.imgref.to_string()); @@ -740,7 +724,7 @@ async fn test_container_write_derive() -> Result<()> { .destrepo() .load_commit(import.merge_commit.as_str())? .0; - let digest = ostree_ext::container::store::manifest_digest_from_commit(imported_commit)?; + let digest = store::manifest_digest_from_commit(imported_commit)?; assert!(digest.starts_with("sha256:")); assert_eq!(digest, expected_digest); @@ -760,15 +744,11 @@ async fn test_container_write_derive() -> Result<()> { )?; // Import again, but there should be no changes. - let mut imp = ostree_ext::container::store::ImageImporter::new( - fixture.destrepo(), - &derived_ref, - Default::default(), - ) - .await?; + let mut imp = + store::ImageImporter::new(fixture.destrepo(), &derived_ref, Default::default()).await?; let already_present = match imp.prepare().await? { - PrepareResult::AlreadyPresent(c) => c, - PrepareResult::Ready(_) => { + store::PrepareResult::AlreadyPresent(c) => c, + store::PrepareResult::Ready(_) => { panic!("Should have already imported {}", &derived_ref) } }; @@ -777,15 +757,11 @@ async fn test_container_write_derive() -> Result<()> { // Test upgrades; replace the oci-archive with new content. std::fs::remove_dir_all(derived_path)?; std::fs::rename(derived2_path, derived_path)?; - let mut imp = ostree_ext::container::store::ImageImporter::new( - fixture.destrepo(), - &derived_ref, - Default::default(), - ) - .await?; + let mut imp = + store::ImageImporter::new(fixture.destrepo(), &derived_ref, Default::default()).await?; let prep = match imp.prepare().await? { - PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), - PrepareResult::Ready(r) => r, + store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + store::PrepareResult::Ready(r) => r, }; // We *should* already have the base layer. assert!(prep.ostree_commit_layer.commit.is_some()); @@ -798,7 +774,7 @@ async fn test_container_write_derive() -> Result<()> { // New commit. assert_ne!(import.merge_commit, already_present.merge_commit); // We should still have exactly one image stored. - let images = ostree_ext::container::store::list_images(fixture.destrepo())?; + let images = store::list_images(fixture.destrepo())?; assert_eq!(images[0], derived_ref.imgref.to_string()); assert_eq!(images.len(), 1); @@ -816,15 +792,11 @@ async fn test_container_write_derive() -> Result<()> { )?; // And there should be no changes on upgrade again. - let mut imp = ostree_ext::container::store::ImageImporter::new( - fixture.destrepo(), - &derived_ref, - Default::default(), - ) - .await?; + let mut imp = + store::ImageImporter::new(fixture.destrepo(), &derived_ref, Default::default()).await?; let already_present = match imp.prepare().await? { - PrepareResult::AlreadyPresent(c) => c, - PrepareResult::Ready(_) => { + store::PrepareResult::AlreadyPresent(c) => c, + store::PrepareResult::Ready(_) => { panic!("Should have already imported {}", &derived_ref) } }; @@ -838,9 +810,9 @@ async fn test_container_write_derive() -> Result<()> { None, gio::NONE_CANCELLABLE, )?; - ostree_ext::container::store::copy(fixture.destrepo(), &destrepo2, &derived_ref).await?; + store::copy(fixture.destrepo(), &destrepo2, &derived_ref).await?; - let images = ostree_ext::container::store::list_images(&destrepo2)?; + let images = store::list_images(&destrepo2)?; assert_eq!(images.len(), 1); assert_eq!(images[0], derived_ref.imgref.to_string()); From f58bcbb4ac93a998ed1c97078297c0bcdc33812f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 26 Apr 2022 20:06:36 -0400 Subject: [PATCH 355/775] store: Implement container image removal and layer garbage collection This is a kind of important thing for production usage. Implementation was pretty straightforward. Closes: https://github.com/ostreedev/ostree-rs-ext/issues/146 --- ci/priv-integration.sh | 14 +++++- lib/src/cli.rs | 31 ++++++++++++++ lib/src/container/store.rs | 87 ++++++++++++++++++++++++++++++++------ lib/tests/it/main.rs | 34 +++++++++++++++ 4 files changed, 153 insertions(+), 13 deletions(-) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index 6e0f402c7..aa55ef30a 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -18,10 +18,22 @@ if test '!' -e "${sysroot}/ostree"; then ostree admin init-fs --modern "${sysroot}" ostree config --repo $sysroot/ostree/repo set sysroot.bootloader none fi -ostree admin os-init "${stateroot}" --sysroot "${sysroot}" +if test '!' -d "${sysroot}/ostree/deploy/${stateroot}"; then + ostree admin os-init "${stateroot}" --sysroot "${sysroot}" +fi ostree-ext-cli container image deploy --sysroot "${sysroot}" \ --stateroot "${stateroot}" --imgref "${imgref}" ostree admin --sysroot="${sysroot}" status ostree-ext-cli container image deploy --sysroot "${sysroot}" \ --stateroot "${stateroot}" --imgref ostree-unverified-registry:"${chunked_image}" ostree admin --sysroot="${sysroot}" status +ostree-ext-cli container image remove --repo "${sysroot}/ostree/repo" registry:"${image}" registry:"${chunked_image}" +ostree admin --sysroot="${sysroot}" undeploy 0 +ostree --repo="${sysroot}/ostree/repo" refs > refs.txt +if test "$(wc -l < refs.txt)" -ne 0; then + echo "found refs" + cat refs.txt + exit 1 +fi + +echo ok privileged integration diff --git a/lib/src/cli.rs b/lib/src/cli.rs index d5a38a003..bc5b92b85 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -221,6 +221,22 @@ enum ContainerImageOpts { imgref: OstreeImageReference, }, + /// Unreference one or more pulled container images and perform a garbage collection. + Remove { + /// Path to the repository + #[structopt(long)] + #[structopt(parse(try_from_str = parse_repo))] + repo: ostree::Repo, + + /// Image reference, e.g. quay.io/exampleos/exampleos:latest + #[structopt(parse(try_from_str = parse_base_imgref))] + imgrefs: Vec, + + /// Do not garbage collect unused layers + #[structopt(long)] + skip_gc: bool, + }, + /// Perform initial deployment for a container image Deploy { /// Path to the system root @@ -672,6 +688,21 @@ where ContainerImageOpts::History { repo, imgref } => { container_history(&repo, &imgref).await } + ContainerImageOpts::Remove { + repo, + imgrefs, + skip_gc, + } => { + let nimgs = imgrefs.len(); + crate::container::store::remove_images(&repo, imgrefs.iter())?; + if !skip_gc { + let nlayers = crate::container::store::gc_image_layers(&repo)?; + println!("Removed images: {nimgs} layers: {nlayers}"); + } else { + println!("Removed images: {nimgs}"); + } + Ok(()) + } ContainerImageOpts::Copy { src_repo, dest_repo, diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 7ce2c97b7..1174198a1 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -13,7 +13,7 @@ use fn_error_context::context; use oci_spec::image::{self as oci_image, Descriptor, History, ImageConfiguration, ImageManifest}; use ostree::prelude::{Cast, ToVariant}; use ostree::{gio, glib}; -use std::collections::HashMap; +use std::collections::{BTreeSet, HashMap}; use std::iter::FromIterator; use std::sync::{Arc, Mutex}; use tokio::sync::mpsc::{Receiver, Sender}; @@ -706,6 +706,14 @@ pub fn query_image( query_image_ref(repo, &imgref.imgref) } +fn manifest_for_image(repo: &ostree::Repo, imgref: &ImageReference) -> Result { + let ostree_ref = ref_for_image(imgref)?; + let rev = repo.require_rev(&ostree_ref)?; + let (commit_obj, _) = repo.load_commit(rev.as_str())?; + let commit_meta = &glib::VariantDict::new(Some(&commit_obj.child_value(0))); + Ok(manifest_data_from_commitmeta(commit_meta)?.0) +} + /// Copy a downloaded image from one repository to another. pub async fn copy( src_repo: &ostree::Repo, @@ -713,10 +721,7 @@ pub async fn copy( imgref: &OstreeImageReference, ) -> Result<()> { let ostree_ref = ref_for_image(&imgref.imgref)?; - let rev = src_repo.require_rev(&ostree_ref)?; - let (commit_obj, _) = src_repo.load_commit(rev.as_str())?; - let commit_meta = &glib::VariantDict::new(Some(&commit_obj.child_value(0))); - let (manifest, _) = manifest_data_from_commitmeta(commit_meta)?; + let manifest = manifest_for_image(src_repo, &imgref.imgref)?; // Create a task to copy each layer, plus the final ref let layer_refs = manifest .layers() @@ -746,11 +751,69 @@ pub async fn copy( Ok(()) } -/// Remove the specified images and their corresponding blobs. -pub fn prune_images(_repo: &ostree::Repo, _imgs: &[&str]) -> Result<()> { - // Most robust approach is to iterate over all known images, load the - // manifest and build the set of reachable blobs, then compute the set - // Set(unreachable) = Set(all) - Set(reachable) - // And remove the unreachable ones. - unimplemented!() +/// Garbage collect unused image layer references. +/// +/// This function assumes no transaction is active on the repository. +/// The underlying objects are *not* pruned; that requires a separate invocation +/// of [`ostree::Repo::prune`]. +pub fn gc_image_layers(repo: &ostree::Repo) -> Result { + let cancellable = gio::NONE_CANCELLABLE; + let all_images = list_images(repo)?; + let all_manifests = all_images + .into_iter() + .map(|img| { + ImageReference::try_from(img.as_str()).and_then(|ir| manifest_for_image(repo, &ir)) + }) + .collect::>>()?; + let mut referenced_layers = BTreeSet::new(); + for m in all_manifests.iter() { + for layer in m.layers() { + referenced_layers.insert(layer.digest().as_str()); + } + } + let found_layers = repo + .list_refs_ext( + Some(LAYER_PREFIX), + ostree::RepoListRefsExtFlags::empty(), + cancellable, + )? + .into_iter() + .map(|v| v.0); + let mut pruned = 0u32; + for layer_ref in found_layers { + let layer_digest = refescape::unprefix_unescape_ref(LAYER_PREFIX, &layer_ref)?; + if referenced_layers.remove(layer_digest.as_str()) { + continue; + } + pruned += 1; + repo.set_ref_immediate(None, layer_ref.as_str(), None, cancellable)?; + } + + Ok(pruned) +} + +#[context("Pruning {}", image)] +fn prune_image(repo: &ostree::Repo, image: &ImageReference) -> Result<()> { + let ostree_ref = &ref_for_image(image)?; + + if repo.resolve_rev(ostree_ref, true)?.is_none() { + anyhow::bail!("No such image"); + } + repo.set_ref_immediate(None, ostree_ref, None, gio::NONE_CANCELLABLE)?; + Ok(()) +} + +/// Remove the specified image references. +/// +/// This function assumes no transaction is active on the repository. +/// The underlying layers are *not* pruned; that requires a separate invocation +/// of [`gc_image_layers`]. +pub fn remove_images<'a>( + repo: &ostree::Repo, + imgs: impl IntoIterator, +) -> Result<()> { + for img in imgs.into_iter() { + prune_image(repo, img)?; + } + Ok(()) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index b553a2725..76a9e9c93 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -545,6 +545,8 @@ async fn impl_test_container_chunked() -> Result<()> { assert_eq!(digest, expected_digest); let _import = imp.import(prep).await.context("Init pull derived").unwrap(); + assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); + const ADDITIONS: &str = indoc::indoc! { " r usr/bin/bash bash-v0 "}; @@ -574,6 +576,14 @@ r usr/bin/bash bash-v0 let _import = imp.import(prep).await.unwrap(); + assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); + + let n_removed = store::gc_image_layers(&fixture.destrepo())?; + assert_eq!(n_removed, 2); + fixture + .destrepo() + .prune(ostree::RepoPruneFlags::REFS_ONLY, 0, gio::NONE_CANCELLABLE)?; + // Build a derived image let derived_path = &fixture.path.join("derived.oci"); let srcpath = imgref.imgref.name.as_str(); @@ -612,6 +622,30 @@ r usr/bin/bash bash-v0 assert_eq!(prep.ostree_layers.len(), nlayers as usize); let _import = imp.import(prep).await.unwrap(); + assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 2); + + // Should only be new layers + let n_removed = store::gc_image_layers(&fixture.destrepo())?; + assert_eq!(n_removed, 0); + store::remove_images(fixture.destrepo(), [&imgref.imgref]).unwrap(); + assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); + // Still no removed layers after removing the base image + let n_removed = store::gc_image_layers(&fixture.destrepo())?; + assert_eq!(n_removed, 0); + store::remove_images(fixture.destrepo(), [&derived_imgref.imgref]).unwrap(); + assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 0); + let n_removed = store::gc_image_layers(&fixture.destrepo())?; + assert_eq!(n_removed, 8); + + // Repo should be clean now + assert_eq!( + fixture + .destrepo() + .list_refs(None, gio::NONE_CANCELLABLE) + .unwrap() + .len(), + 0 + ); Ok(()) } From 86bfe99d2fbe01142d769f4297c04ee30eb35586 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 2 May 2022 09:46:46 -0400 Subject: [PATCH 356/775] ci: Split build vs test, try cache keys Some follow up from seeing https://www.reddit.com/r/rust/comments/ug4utz/tips_for_speeding_up_rust_builds_times_in_ci_we/ --- .github/workflows/rust.yml | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index c99d2ccc2..2ed0fd373 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -20,7 +20,7 @@ env: ACTION_LINTS_TOOLCHAIN: 1.58.1 jobs: - build: + tests: runs-on: ubuntu-latest container: quay.io/coreos-assembler/fcos-buildroot:testing-devel steps: @@ -29,18 +29,33 @@ jobs: run: ./ci/installdeps.sh # xref containers/containers-image-proxy-rs - name: Cache Dependencies - uses: Swatinem/rust-cache@ce325b60658c1b38465c06cc965b79baf32c1e72 + uses: Swatinem/rust-cache@v1 + with: + key: "tests" - name: Build - run: cargo test --no-run && cargo build + run: cargo test --no-run - name: Run tests run: cargo test -- --nocapture --quiet + build: + runs-on: ubuntu-latest + container: quay.io/coreos-assembler/fcos-buildroot:testing-devel + steps: + - uses: actions/checkout@v2 + - name: Install deps + run: ./ci/installdeps.sh + - name: Cache Dependencies + uses: Swatinem/rust-cache@v1 + with: + key: "build" + - name: Build + run: cargo build --release - name: Upload binary uses: actions/upload-artifact@v2 with: name: ostree-ext-cli - path: target/debug/ostree-ext-cli + path: target/release/ostree-ext-cli build-minimum-toolchain: - name: "Build, minimum supported toolchain (MSRV)" + name: "Build using MSRV" runs-on: ubuntu-latest container: quay.io/coreos-assembler/fcos-buildroot:testing-devel steps: @@ -56,9 +71,11 @@ jobs: toolchain: ${{ env['ACTION_MSRV_TOOLCHAIN'] }} default: true - name: Cache Dependencies - uses: Swatinem/rust-cache@ce325b60658c1b38465c06cc965b79baf32c1e72 - - name: cargo build (release) - run: cargo build --release + uses: Swatinem/rust-cache@v1 + with: + key: "min" + - name: cargo check + run: cargo check linting: name: "Lints, pinned toolchain" runs-on: ubuntu-latest From bcb98f81466e7bc7c41cdd87d60dd816165c919c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 4 May 2022 16:10:11 -0400 Subject: [PATCH 357/775] build: Be compatible with cap-std-ext 0.25 We only use the `cmdext` bits right now, not the atomic writes. Widen our compatibility matrix so we can be built with both. --- lib/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 18c3052a6..79d3dbdd0 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -17,7 +17,7 @@ bitflags = "1" camino = "1.0.4" chrono = "0.4.19" cjson = "0.1.1" -cap-std-ext = "0.24" +cap-std-ext = ">= 0.24, <= 0.25" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" futures-util = "0.3.13" @@ -46,7 +46,7 @@ tokio-stream = { features = ["sync"], version = "0.1.8" } tracing = "0.1" indoc = { version = "1.0.3", optional = true } -sh-inline = { version = "0.2", features = ["cap-std-ext"], optional = true } +sh-inline = { version = "0.2.2", features = ["cap-std-ext"], optional = true } [dev-dependencies] quickcheck = "1" From 239e6d7c16b427d630f48c02685db7143791d55c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 6 May 2022 10:32:13 -0400 Subject: [PATCH 358/775] ci: make use of `cargo-deny` for baseline license check and crate dups See https://crates.io/crates/cargo-deny This looks like a really nice tool, let's use it to validate license compatibility mainly. But I also added `sources` and `bans`. --- .github/workflows/rust.yml | 8 ++++++++ deny.toml | 10 ++++++++++ 2 files changed, 18 insertions(+) create mode 100644 deny.toml diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 2ed0fd373..279088c24 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -76,6 +76,14 @@ jobs: key: "min" - name: cargo check run: cargo check + cargo-deny: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: EmbarkStudios/cargo-deny-action@v1 + with: + log-level: warn + command: check bans sources licenses linting: name: "Lints, pinned toolchain" runs-on: ubuntu-latest diff --git a/deny.toml b/deny.toml new file mode 100644 index 000000000..75b6ac9b2 --- /dev/null +++ b/deny.toml @@ -0,0 +1,10 @@ +[licenses] +unlicensed = "deny" +allow = ["Apache-2.0", "Apache-2.0 WITH LLVM-exception", "MIT", "BSD-3-Clause", "BSD-2-Clause"] + +[bans] + +[sources] +unknown-registry = "deny" +unknown-git = "deny" +allow-git = [] From cb3124002b3f015125d6a3487e97b154c23fdf1d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 6 May 2022 15:37:43 -0400 Subject: [PATCH 359/775] Fix extra NUL char in IMA xattr, add tests I went to add testing for our IMA bits and I tripped over a bug; we are going out of our way to store a trailing `NUL` character in the ostree xattrs for the key name. We should not do this, because it will break fsck. What we end up passing to the kernel looks like `security.ima\0\0`, and the kernel being C will happily take that to mean `security.ima`. But the checksum we computed originally is using `security.ima\0`, not what we will read back from disk as `security.ima`. --- .github/workflows/rust.yml | 16 +++++++++ ci/ima.sh | 12 +++++++ lib/src/cli.rs | 3 ++ lib/src/ima.rs | 7 ++-- lib/src/integrationtest.rs | 68 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 101 insertions(+), 5 deletions(-) create mode 100755 ci/ima.sh diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 279088c24..06efa0fb4 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -121,6 +121,22 @@ jobs: run: install ostree-ext-cli /usr/bin && rm -v ostree-ext-cli - name: Integration tests run: ./ci/integration.sh + ima: + name: "Integration (IMA)" + needs: build + runs-on: ubuntu-latest + container: quay.io/coreos-assembler/fcos:testing-devel + steps: + - name: Checkout repository + uses: actions/checkout@v2 + - name: Download ostree-ext-cli + uses: actions/download-artifact@v2 + with: + name: ostree-ext-cli + - name: Install + run: install ostree-ext-cli /usr/bin && rm -v ostree-ext-cli + - name: Integration tests + run: ./ci/ima.sh privtest: name: "Privileged testing" needs: build diff --git a/ci/ima.sh b/ci/ima.sh new file mode 100755 index 000000000..be7480173 --- /dev/null +++ b/ci/ima.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# Assumes that the current environment is a mutable ostree-container +# with ostree-ext-cli installed in /usr/bin. +# Runs IMA tests. +set -xeuo pipefail + +if test '!' -x /usr/bin/evmctl; then + rpm-ostree install ima-evm-utils +fi + +ostree-ext-cli internal-only-for-testing run-ima +echo ok "ima" diff --git a/lib/src/cli.rs b/lib/src/cli.rs index bc5b92b85..41f167f6b 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -302,6 +302,8 @@ enum TestingOpts { DetectEnv, /// Execute integration tests, assuming mutable environment Run, + /// Execute IMA tests + RunIMA, FilterTar, } @@ -628,6 +630,7 @@ fn testing(opts: &TestingOpts) -> Result<()> { Ok(()) } TestingOpts::Run => crate::integrationtest::run_tests(), + TestingOpts::RunIMA => crate::integrationtest::test_ima(), TestingOpts::FilterTar => { crate::tar::filter_tar(std::io::stdin(), std::io::stdout()).map(|_| {}) } diff --git a/lib/src/ima.rs b/lib/src/ima.rs index f2f807020..18d1f8ec9 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -25,7 +25,6 @@ use std::{convert::TryInto, io::Seek}; /// Extended attribute keys used for IMA. const IMA_XATTR: &str = "security.ima"; -const IMA_XATTR_C: &[u8] = b"security.ima\0"; /// Attributes to configure IMA signatures. #[derive(Debug, Clone)] @@ -151,9 +150,7 @@ impl<'a> CommitRewriter<'a> { let mut r = HashMap::new(); let user_k = IMA_XATTR.replace("security.", "user."); let v = steal_xattr(tempf.as_file(), user_k.as_str())?; - // NUL terminate the key - let k = CString::new(IMA_XATTR)?.into_bytes_with_nul(); - r.insert(k, v); + r.insert(Vec::from(IMA_XATTR.as_bytes()), v); Ok(r) } @@ -168,7 +165,7 @@ impl<'a> CommitRewriter<'a> { }; let meta = meta.unwrap(); let mut xattrs = xattrs_to_map(&xattrs.unwrap()); - let existing_sig = xattrs.remove(IMA_XATTR_C); + let existing_sig = xattrs.remove(IMA_XATTR.as_bytes()); if existing_sig.is_some() && !self.ima.overwrite { return Ok(None); } diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index badf244ec..49ffd2dd1 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -6,7 +6,9 @@ use crate::container::ocidir; use anyhow::Result; use camino::Utf8Path; use fn_error_context::context; +use gio::prelude::*; use oci_spec::image as oci_image; +use ostree::gio; fn has_ostree() -> bool { std::path::Path::new("/sysroot/ostree/repo").exists() @@ -88,6 +90,72 @@ fn test_proxy_auth() -> Result<()> { Ok(()) } +pub(crate) fn test_ima() -> Result<()> { + use gvariant::aligned_bytes::TryAsAligned; + use gvariant::{gv, Marker, Structure}; + + let cancellable = gio::NONE_CANCELLABLE; + let fixture = crate::fixture::Fixture::new_v1()?; + + let config = indoc::indoc! { r#" + [ req ] + default_bits = 3048 + distinguished_name = req_distinguished_name + prompt = no + string_mask = utf8only + x509_extensions = myexts + [ req_distinguished_name ] + O = Test + CN = Test key + emailAddress = example@example.com + [ myexts ] + basicConstraints=critical,CA:FALSE + keyUsage=digitalSignature + subjectKeyIdentifier=hash + authorityKeyIdentifier=keyid + "#}; + std::fs::write(fixture.path.join("genkey.config"), config)?; + sh_inline::bash_in!( + &fixture.dir, + "openssl req -new -nodes -utf8 -sha256 -days 36500 -batch \ + -x509 -config genkey.config \ + -outform DER -out ima.der -keyout privkey_ima.pem &>/dev/null" + )?; + + let imaopts = crate::ima::ImaOpts { + algorithm: "sha256".into(), + key: fixture.path.join("privkey_ima.pem"), + overwrite: false, + }; + let rewritten_commit = + crate::ima::ima_sign(fixture.srcrepo(), fixture.testref(), &imaopts).unwrap(); + + let root = fixture + .srcrepo() + .read_commit(&rewritten_commit, cancellable)? + .0; + let bash = root.resolve_relative_path("/usr/bin/bash"); + let bash = bash.downcast_ref::().unwrap(); + let xattrs = bash.xattrs(cancellable).unwrap(); + let v = xattrs.data_as_bytes(); + let v = v.try_as_aligned().unwrap(); + let v = gv!("a(ayay)").cast(v); + let mut found_ima = false; + for xattr in v.iter() { + let k = xattr.to_tuple().0; + if k != b"security.ima" { + continue; + } + found_ima = true; + break; + } + if !found_ima { + anyhow::bail!("Failed to find IMA xattr"); + } + println!("ok IMA"); + Ok(()) +} + #[cfg(feature = "internal-testing-api")] #[context("Running integration tests")] pub(crate) fn run_tests() -> Result<()> { From d6f1ddbf948ff0b3f1b3441c3a55f32e2fb9212a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 9 May 2022 13:28:37 -0400 Subject: [PATCH 360/775] Release 0.7.2 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 79d3dbdd0..9be23ef3a 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.7.1" +version = "0.7.2" [dependencies] anyhow = "1.0" From 109fa28589a6ff43c338c406065d7d7e226fa666 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 12 May 2022 14:14:25 -0400 Subject: [PATCH 361/775] tests: Add a case for exporting to ociarchive Since this exercises our "build oci, then copy" bits. --- lib/tests/it/main.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 76a9e9c93..27c18965b 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -464,6 +464,27 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { assert_eq!(cfg.rootfs().diff_ids().len(), n_chunks); assert_eq!(cfg.history().len(), n_chunks); + // Verify exporting to ociarchive + { + let archivepath = &fixture.path.join("export.ociarchive"); + let ociarchive_dest = ImageReference { + transport: Transport::OciArchive, + name: archivepath.as_str().to_string(), + }; + let _: String = ostree_ext::container::encapsulate( + fixture.srcrepo(), + fixture.testref(), + &config, + None, + None, + &ociarchive_dest, + ) + .await + .context("exporting to ociarchive") + .unwrap(); + assert!(archivepath.is_file()); + } + let srcoci_unverified = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref: srcoci_imgref.clone(), From 21def6d3fbf0c803ee656098a209295829d90a3c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 10 May 2022 20:07:49 -0400 Subject: [PATCH 362/775] container: Factor out helper to copy image reference Prep for some signing work, but this also may be generally useful in the future. --- lib/src/container/encapsulate.rs | 20 ++++---------------- lib/src/container/skopeo.rs | 22 ++++++++++++++++++++++ 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 1a79e8fbb..0b6e4fd59 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -16,7 +16,7 @@ use std::collections::{BTreeMap, HashMap}; use std::num::NonZeroU32; use std::path::Path; use std::rc::Rc; -use tracing::{instrument, Level}; +use tracing::instrument; /// Annotation injected into the layer to say that this is an ostree commit. /// However, because this gets lost when converted to D2S2 https://docs.docker.com/registry/spec/manifest-v2-2/ @@ -258,9 +258,8 @@ async fn build_impl( let tempdir = tempfile::tempdir_in("/var/tmp")?; let tempdest = tempdir.path().join("d"); let tempdest = tempdest.to_str().unwrap(); - let digestfile = tempdir.path().join("digestfile"); - let src = build_oci( + let tempoci = build_oci( repo, ostree_ref, Path::new(tempdest), @@ -269,19 +268,8 @@ async fn build_impl( contentmeta, )?; - let mut cmd = skopeo::new_cmd(); - tracing::event!(Level::DEBUG, "Copying {} to {}", src, dest); - cmd.stdout(std::process::Stdio::null()).arg("copy"); - cmd.arg("--digestfile"); - cmd.arg(&digestfile); - cmd.args(&[src.to_string(), dest.to_string()]); - let proc = super::skopeo::spawn(cmd)?; - let output = proc.wait_with_output().await?; - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - return Err(anyhow::anyhow!("skopeo failed: {}\n", stderr)); - } - Some(std::fs::read_to_string(digestfile)?.trim().to_string()) + let digest = skopeo::copy(&tempoci, dest).await?; + Some(digest) }; if let Some(digest) = digest { Ok(digest) diff --git a/lib/src/container/skopeo.rs b/lib/src/container/skopeo.rs index 3aa02703b..2ae9210cd 100644 --- a/lib/src/container/skopeo.rs +++ b/lib/src/container/skopeo.rs @@ -1,7 +1,9 @@ //! Fork skopeo as a subprocess +use super::ImageReference; use anyhow::{Context, Result}; use serde::Deserialize; +use std::io::Read; use std::process::Stdio; use tokio::process::Command; @@ -55,6 +57,26 @@ pub(crate) fn spawn(mut cmd: Command) -> Result { cmd.spawn().context("Failed to exec skopeo") } +/// Use skopeo to copy a container image. +pub(crate) async fn copy(src: &ImageReference, dest: &ImageReference) -> Result { + let digestfile = tempfile::NamedTempFile::new()?; + let mut cmd = new_cmd(); + cmd.stdout(std::process::Stdio::null()).arg("copy"); + cmd.arg("--digestfile"); + cmd.arg(digestfile.path()); + cmd.args(&[src.to_string(), dest.to_string()]); + let proc = super::skopeo::spawn(cmd)?; + let output = proc.wait_with_output().await?; + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(anyhow::anyhow!("skopeo failed: {}\n", stderr)); + } + let mut digestfile = digestfile.into_file(); + let mut r = String::new(); + digestfile.read_to_string(&mut r)?; + Ok(r.trim().to_string()) +} + #[cfg(test)] mod tests { use super::*; From 9abd49b111c448771d55082b6b04db54b1dc1175 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 13 May 2022 10:54:41 -0400 Subject: [PATCH 363/775] tar: Factor out a helper to copy a `tar::Entry` The need to handle symlinks specially is a big trap. I need this for other code relating to injecting signatures. --- lib/src/tar/write.rs | 46 +++++++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index b196597be..be20eeee4 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -24,6 +24,33 @@ use std::sync::Arc; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite}; use tracing::instrument; +/// Copy a tar entry to a new tar archive, optionally using a different filesystem path. +pub(crate) fn copy_entry( + entry: tar::Entry, + dest: &mut tar::Builder, + path: Option<&Path>, +) -> Result<()> { + // Make copies of both the header and path, since that's required for the append APIs + let path = if let Some(path) = path { + path.to_owned() + } else { + (&*entry.path()?).to_owned() + }; + let mut header = entry.header().clone(); + + // Need to use the entry.link_name() not the header.link_name() + // api as the header api does not handle long paths: + // https://github.com/alexcrichton/tar-rs/issues/192 + match entry.header().entry_type() { + tar::EntryType::Link | tar::EntryType::Symlink => { + let target = entry.link_name()?.ok_or_else(|| anyhow!("Invalid link"))?; + dest.append_link(&mut header, path, target) + } + _ => dest.append_data(&mut header, path, entry), + } + .map_err(Into::into) +} + /// Configuration for tar layer commits. #[derive(Debug, Default)] pub struct WriteTarOptions { @@ -155,24 +182,7 @@ pub(crate) fn filter_tar( NormalizedPathResult::Normal(path) => path, }; - let mut header = entry.header().clone(); - - // Need to use the entry.link_name() not the header.link_name() - // api as the header api does not handle long paths: - // https://github.com/alexcrichton/tar-rs/issues/192 - match entry.header().entry_type() { - tar::EntryType::Link | tar::EntryType::Symlink => { - let target = entry.link_name()?.ok_or_else(|| anyhow!("Invalid link"))?; - let target = target - .as_os_str() - .to_str() - .ok_or_else(|| anyhow!("Non-utf8 link"))?; - dest.append_link(&mut header, &normalized, target)?; - } - _ => { - dest.append_data(&mut header, normalized, entry)?; - } - } + copy_entry(entry, &mut dest, Some(normalized.as_std_path()))?; } dest.into_inner()?.flush()?; Ok(filtered) From febb76a9bf76eb57e11b28ebfd1303dde315c33d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 13 May 2022 17:36:50 -0400 Subject: [PATCH 364/775] import: Fix error message for missing commit metadata I'm writing some code around changing commit metadata, and it took me an embarassingly long amount of time to figure out that it wasn't my code that was broken - it was this error message. In this case, output the actual next object type we found instead of saying "commit" which we already did find. --- lib/src/tar/import.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index c99c5c334..908fc6b95 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -654,7 +654,7 @@ impl Importer { return Err(anyhow!( "Using remote {} for verification; Expected commitmeta object, not {:?}", remote, - objtype + next_objtype )); } if next_checksum != checksum { From 3a29ff46c01ebe2a8fa00558a09fe7ad4af670ee Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 13 May 2022 17:42:59 -0400 Subject: [PATCH 365/775] ocidir: Various new internal API improvements Prep for signature injection work. --- lib/src/container/ocidir.rs | 56 ++++++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 22 deletions(-) diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index 34df415dd..4f6ee862f 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -6,12 +6,13 @@ use camino::Utf8Path; use flate2::write::GzEncoder; use fn_error_context::context; use oci_image::MediaType; -use oci_spec::image as oci_image; +use oci_spec::image::{self as oci_image, Descriptor}; use openat_ext::*; use openssl::hash::{Hasher, MessageDigest}; use std::collections::HashMap; -use std::io::prelude::*; -use std::path::Path; +use std::fs::File; +use std::io::{prelude::*, BufReader}; +use std::path::{Path, PathBuf}; use std::rc::Rc; /// Path inside an OCI directory to the blobs @@ -80,16 +81,6 @@ pub(crate) fn write_json_blob( Ok(blob.descriptor().media_type(media_type)) } -fn deserialize_json_path( - d: &openat::Dir, - p: impl AsRef, -) -> Result { - let p = p.as_ref(); - let ctx = || format!("Parsing {:?}", p); - let f = std::io::BufReader::new(d.open_file(p).with_context(ctx)?); - serde_json::from_reader(f).with_context(ctx) -} - // Parse a filename from a string; this will ignore any directory components, and error out on `/` and `..` for example. fn parse_one_filename(s: &str) -> Result<&str> { Utf8Path::new(s) @@ -205,11 +196,7 @@ impl OciDir { config.history_mut().push(h); } - /// Read a JSON blob. - pub(crate) fn read_json_blob( - &self, - desc: &oci_spec::image::Descriptor, - ) -> Result { + fn parse_descriptor_to_path(desc: &oci_spec::image::Descriptor) -> Result { let (alg, hash) = desc .digest() .split_once(':') @@ -219,7 +206,21 @@ impl OciDir { anyhow::bail!("Unsupported digest algorithm {}", desc.digest()); } let hash = parse_one_filename(hash)?; - deserialize_json_path(&self.dir, Path::new(BLOBDIR).join(hash)) + Ok(Path::new(BLOBDIR).join(hash)) + } + + pub(crate) fn read_blob(&self, desc: &oci_spec::image::Descriptor) -> Result { + let path = Self::parse_descriptor_to_path(desc)?; + self.dir.open_file(&path).map_err(Into::into) + } + + /// Read a JSON blob. + pub(crate) fn read_json_blob( + &self, + desc: &oci_spec::image::Descriptor, + ) -> Result { + let blob = BufReader::new(self.read_blob(desc)?); + serde_json::from_reader(blob).with_context(|| format!("Parsing object {}", desc.digest())) } /// Write a configuration blob. @@ -258,13 +259,24 @@ impl OciDir { /// If this OCI directory has a single manifest, return it. Otherwise, an error is returned. pub(crate) fn read_manifest(&self) -> Result { - let idx: oci_image::ImageIndex = deserialize_json_path(&self.dir, "index.json")?; + self.read_manifest_and_descriptor().map(|r| r.0) + } + + /// If this OCI directory has a single manifest, return it. Otherwise, an error is returned. + pub(crate) fn read_manifest_and_descriptor( + &self, + ) -> Result<(oci_image::ImageManifest, Descriptor)> { + let f = self + .dir + .open_file("index.json") + .context("Failed to open index.json")?; + let idx: oci_image::ImageIndex = serde_json::from_reader(BufReader::new(f))?; let desc = match idx.manifests().as_slice() { [] => anyhow::bail!("No manifests found"), - [desc] => desc, + [desc] => desc.clone(), manifests => anyhow::bail!("Expected exactly 1 manifest, found {}", manifests.len()), }; - self.read_json_blob(desc) + Ok((self.read_json_blob(&desc)?, desc)) } } From 21d92dfb391f1091c162982ca5a48bf7bcf625ff Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 13 May 2022 18:16:01 -0400 Subject: [PATCH 366/775] store: Extract a helper function to find ostree layer Prep for further work around signature injection. --- lib/src/container/store.rs | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 1174198a1..3c60c8770 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -272,6 +272,23 @@ fn layer_from_diffid<'a>( }) } +pub(crate) fn ostree_layer<'a>( + manifest: &'a ImageManifest, + config: &ImageConfiguration, +) -> Result<&'a Descriptor> { + let label = crate::container::OSTREE_DIFFID_LABEL; + let config_labels = config.config().as_ref().and_then(|c| c.labels().as_ref()); + let diffid = config_labels.and_then(|labels| labels.get(label)); + // For backwards compatibility, if there's only 1 layer, don't require the label. + // This can be dropped when we drop format version 0 support. + let r = if let Some(diffid) = diffid { + layer_from_diffid(manifest, config, diffid.as_str())? + } else { + &manifest.layers()[0] + }; + Ok(r) +} + impl ImageImporter { /// Create a new importer. pub async fn new( @@ -352,17 +369,8 @@ impl ImageImporter { let config = self.proxy.fetch_config(&self.proxy_img).await?; - let label = crate::container::OSTREE_DIFFID_LABEL; - let config_labels = config.config().as_ref().and_then(|c| c.labels().as_ref()); - let diffid = config_labels.and_then(|labels| labels.get(label)); - // For backwards compatibility, if there's only 1 layer, don't require the label. - // This can be dropped when we drop format version 0 support. - let commit_layer_digest = if let Some(diffid) = diffid { - let layer = layer_from_diffid(&manifest, &config, diffid.as_str())?; - layer.digest() - } else { - manifest.layers()[0].digest() - }; + let commit_layer_digest = ostree_layer(&manifest, &config)?.digest(); + let mut component_layers = Vec::new(); let mut commit_layer = None; let mut remaining_layers = Vec::new(); From f044271ecef58d306207fa96ccc1437326334e40 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 13 May 2022 18:17:05 -0400 Subject: [PATCH 367/775] tar/export: Extract helper functions Prep for signature rewriting. --- lib/src/tar/export.rs | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 2e8f5a2bf..743b58365 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -71,7 +71,7 @@ struct OstreeTarWriter<'a, W: std::io::Write> { wrote_xattrs: HashSet, } -fn object_path(objtype: ostree::ObjectType, checksum: &str) -> Utf8PathBuf { +pub(crate) fn object_path(objtype: ostree::ObjectType, checksum: &str) -> Utf8PathBuf { let suffix = match objtype { ostree::ObjectType::Commit => "commit", ostree::ObjectType::CommitMeta => "commitmeta", @@ -118,6 +118,20 @@ fn symlink_is_denormal(target: &str) -> bool { target.contains("//") } +pub(crate) fn tar_append_default_data( + out: &mut tar::Builder, + path: &Utf8Path, + buf: &[u8], +) -> Result<()> { + let mut h = tar::Header::new_gnu(); + h.set_entry_type(tar::EntryType::Regular); + h.set_uid(0); + h.set_gid(0); + h.set_mode(0o644); + h.set_size(buf.len() as u64); + out.append_data(&mut h, path, buf).map_err(Into::into) +} + impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { fn new(repo: &'a ostree::Repo, out: &'a mut tar::Builder, options: ExportOptions) -> Self { Self { @@ -156,15 +170,8 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { } /// Add a regular file entry with default permissions (root/root 0644) - fn append_default_data(&mut self, path: &Utf8Path, data: &[u8]) -> Result<()> { - let mut h = tar::Header::new_gnu(); - h.set_entry_type(tar::EntryType::Regular); - h.set_uid(0); - h.set_gid(0); - h.set_mode(0o644); - h.set_size(data.len() as u64); - self.out.append_data(&mut h, &path, data)?; - Ok(()) + fn append_default_data(&mut self, path: &Utf8Path, buf: &[u8]) -> Result<()> { + tar_append_default_data(self.out, path, buf) } /// Add an hardlink entry with default permissions (root/root 0644) From d4e8e029c7c01a470101ec58e4710e9494b396e9 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 10 May 2022 19:45:02 -0400 Subject: [PATCH 368/775] Add API and CLI to update detached metadata In the FCOS use case and I'm sure others, we want a flow where we do a build (including a container image) and then once it's ready, we sign it by passing the commit metadata to a separate system. Basically what we want is the ability to update the detached metadata object in an exported container image. Now, I'm regretting the design choice to have the container flow reuse the tar path of having the signature be part of the tar stream instead of part of the container metadata, because it *greatly* complicates things here, particularly in terms of handling chunked images. We want to preserve all metadata and other layers in the image; we just need to add/replace a single entry in the layer that has the ostree metadata. Except, because this ostree layer gets its own special label in the container image metadata, we need to update that label. What would make this a lot easier is if we had write support via skopeo/containers-image-proxy. Because we don't, given an image on a remote registry, right now we pull the whole thing down into a temporary OCI directory, even though we only want to mutate one layer. Closes: https://github.com/ostreedev/ostree-rs-ext/issues/295 --- lib/src/cli.rs | 32 ++++++ lib/src/container/mod.rs | 2 + lib/src/container/update_detachedmeta.rs | 126 +++++++++++++++++++++++ lib/src/fixture.rs | 9 ++ lib/src/tar/export.rs | 79 ++++++++++++++ lib/src/tar/import.rs | 2 +- lib/tests/it/main.rs | 79 +++++++++++++- 7 files changed, 324 insertions(+), 5 deletions(-) create mode 100644 lib/src/container/update_detachedmeta.rs diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 41f167f6b..996e337d6 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -221,6 +221,23 @@ enum ContainerImageOpts { imgref: OstreeImageReference, }, + /// Replace the detached metadata (e.g. to add a signature) + ReplaceDetachedMetadata { + /// Path to the source repository + #[structopt(long)] + #[structopt(parse(try_from_str = parse_base_imgref))] + src: ImageReference, + + /// Target image + #[structopt(long)] + #[structopt(parse(try_from_str = parse_base_imgref))] + dest: ImageReference, + + /// Path to file containing new detached metadata; if not provided, + /// any existing detached metadata will be deleted. + contents: Option, + }, + /// Unreference one or more pulled container images and perform a garbage collection. Remove { /// Path to the repository @@ -711,6 +728,21 @@ where dest_repo, imgref, } => crate::container::store::copy(&src_repo, &dest_repo, &imgref).await, + ContainerImageOpts::ReplaceDetachedMetadata { + src, + dest, + contents, + } => { + let contents = contents.map(std::fs::read).transpose()?; + let digest = crate::container::update_detached_metadata( + &src, + &dest, + contents.as_deref(), + ) + .await?; + println!("Pushed: {}", digest); + Ok(()) + } ContainerImageOpts::Deploy { sysroot, stateroot, diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index cd83ab013..b2fbad6bd 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -243,6 +243,8 @@ pub use unencapsulate::*; pub(crate) mod ocidir; mod skopeo; pub mod store; +mod update_detachedmeta; +pub use update_detachedmeta::*; #[cfg(test)] mod tests { diff --git a/lib/src/container/update_detachedmeta.rs b/lib/src/container/update_detachedmeta.rs new file mode 100644 index 000000000..35d8d6c45 --- /dev/null +++ b/lib/src/container/update_detachedmeta.rs @@ -0,0 +1,126 @@ +use super::ImageReference; +use crate::container::{ocidir, skopeo}; +use crate::container::{store as container_store, Transport}; +use anyhow::{anyhow, Context, Result}; +use camino::Utf8Path; +use std::io::{BufReader, BufWriter}; +use std::rc::Rc; + +/// Given an OSTree container image reference, update the detached metadata (e.g. GPG signature) +/// while preserving all other container image metadata. +/// +/// The return value is the manifest digest of (e.g. `@sha256:`) the image. +pub async fn update_detached_metadata( + src: &ImageReference, + dest: &ImageReference, + detached_buf: Option<&[u8]>, +) -> Result { + // For now, convert the source to a temporary OCI directory, so we can directly + // parse and manipulate it. In the future this will be replaced by https://github.com/ostreedev/ostree-rs-ext/issues/153 + // and other work to directly use the containers/image API via containers-image-proxy. + let tempdir = tempfile::tempdir_in("/var/tmp")?; + let tempsrc = tempdir.path().join("src"); + let tempsrc_utf8 = Utf8Path::from_path(&tempsrc).ok_or_else(|| anyhow!("Invalid tempdir"))?; + let tempsrc_ref = ImageReference { + transport: Transport::OciDir, + name: tempsrc_utf8.to_string(), + }; + + // Full copy of the source image + let pulled_digest: String = skopeo::copy(src, &tempsrc_ref) + .await + .context("Creating temporary copy to OCI dir")?; + + // Copy to the thread + let detached_buf = detached_buf.map(Vec::from); + let tempsrc_ref_path = tempsrc_ref.name.clone(); + // Fork a thread to do the heavy lifting of filtering the tar stream, rewriting the manifest/config. + crate::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| { + // Open the temporary OCI directory. + let tempsrc = Rc::new(openat::Dir::open(tempsrc_ref_path).context("Opening src")?); + let tempsrc = ocidir::OciDir::open(tempsrc)?; + + // Load the manifest, platform, and config + let (mut manifest, manifest_descriptor) = tempsrc + .read_manifest_and_descriptor() + .context("Reading manifest from source")?; + anyhow::ensure!(manifest_descriptor.digest().as_str() == pulled_digest.as_str()); + let platform = manifest_descriptor + .platform() + .as_ref() + .cloned() + .unwrap_or_default(); + let mut config: oci_spec::image::ImageConfiguration = + tempsrc.read_json_blob(manifest.config())?; + let mut ctrcfg = config + .config() + .as_ref() + .cloned() + .ok_or_else(|| anyhow!("Image is missing container configuration"))?; + + // Find the OSTree commit layer we want to replace + let commit_layer = container_store::ostree_layer(&manifest, &config)?; + let commit_layer_idx = manifest + .layers() + .iter() + .position(|x| x == commit_layer) + .unwrap(); + + // Create a new layer + let out_layer = { + // Create tar streams for source and destination + let src_layer = BufReader::new(tempsrc.read_blob(commit_layer)?); + let mut src_layer = flate2::read::GzDecoder::new(src_layer); + let mut out_layer = BufWriter::new(tempsrc.create_raw_layer(None)?); + + // Process the tar stream and inject our new detached metadata + crate::tar::update_detached_metadata( + &mut src_layer, + &mut out_layer, + detached_buf.as_deref(), + Some(cancellable), + )?; + + // Flush all wrappers, and finalize the layer + out_layer + .into_inner() + .map_err(|_| anyhow!("Failed to flush buffer"))? + .complete()? + }; + // Get the diffid and descriptor for our new tar layer + let out_layer_diffid = format!("sha256:{}", out_layer.uncompressed_sha256); + let out_layer_descriptor = out_layer + .descriptor() + .media_type(oci_spec::image::MediaType::ImageLayerGzip) + .build() + .unwrap(); // SAFETY: We pass all required fields + + // Splice it into both the manifest and config + manifest.layers_mut()[commit_layer_idx] = out_layer_descriptor; + config.rootfs_mut().diff_ids_mut()[commit_layer_idx] = out_layer_diffid.clone(); + + let labels = ctrcfg.labels_mut().get_or_insert_with(Default::default); + labels.insert( + crate::container::OSTREE_DIFFID_LABEL.into(), + out_layer_diffid, + ); + config.set_config(Some(ctrcfg)); + + // Write the config and manifest + let new_config_descriptor = tempsrc.write_config(config)?; + manifest.set_config(new_config_descriptor); + // This entirely replaces the single entry in the OCI directory, which skopeo will find by default. + tempsrc + .write_manifest(manifest, platform) + .context("Writing manifest")?; + Ok(()) + }) + .await + .context("Regenerating commit layer")?; + + // Finally, copy the mutated image back to the target. For chunked images, + // because we only changed one layer, skopeo should know not to re-upload shared blobs. + crate::container::skopeo::copy(&tempsrc_ref, dest) + .await + .context("Copying to destination") +} diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 802f3eff6..ceb6f553f 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -402,6 +402,15 @@ impl Fixture { &self.destrepo } + // Delete all objects in the destrepo + pub fn clear_destrepo(&self) -> Result<()> { + self.destrepo() + .set_ref_immediate(None, self.testref(), None, gio::NONE_CANCELLABLE)?; + self.destrepo() + .prune(ostree::RepoPruneFlags::REFS_ONLY, 0, gio::NONE_CANCELLABLE)?; + Ok(()) + } + pub fn write_filedef(&self, root: &ostree::MutableTree, def: &FileDef) -> Result<()> { let parent_path = def.path.parent(); let parent = if let Some(parent_path) = parent_path { diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 743b58365..4e8b81bf4 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -14,6 +14,7 @@ use ostree::gio; use std::borrow::Borrow; use std::borrow::Cow; use std::collections::HashSet; +use std::convert::TryInto; use std::io::BufReader; /// The repository mode generated by a tar export stream. @@ -631,6 +632,84 @@ pub(crate) fn export_final_chunk( write_chunk(writer, &chunking.remainder) } +/// Process an exported tar stream, and update the detached metadata. +#[allow(clippy::while_let_on_iterator)] +#[context("Replacing detached metadata")] +pub(crate) fn reinject_detached_metadata>( + src: &mut tar::Archive, + dest: &mut tar::Builder, + detached_buf: Option<&[u8]>, + cancellable: Option<&C>, +) -> Result<()> { + let mut entries = src.entries()?; + let mut commit_ent = None; + // Loop through the tar stream until we find the commit object; copy all prior entries + // such as the baseline directory structure. + while let Some(entry) = entries.next() { + if let Some(c) = cancellable { + c.set_error_if_cancelled()?; + } + let entry = entry?; + let header = entry.header(); + let path = entry.path()?; + let path: &Utf8Path = (&*path).try_into()?; + if !(header.entry_type() == tar::EntryType::Regular && path.as_str().ends_with(".commit")) { + crate::tar::write::copy_entry(entry, dest, None)?; + } else { + commit_ent = Some(entry); + break; + } + } + let commit_ent = commit_ent.ok_or_else(|| anyhow!("Missing commit object"))?; + let commit_path = commit_ent.path()?; + let commit_path = Utf8Path::from_path(&*commit_path) + .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", commit_path))?; + let (checksum, objtype) = crate::tar::import::Importer::parse_metadata_entry(commit_path)?; + assert_eq!(objtype, ostree::ObjectType::Commit); // Should have been verified above + crate::tar::write::copy_entry(commit_ent, dest, None)?; + + // If provided, inject our new detached metadata object + if let Some(detached_buf) = detached_buf { + let detached_path = object_path(ostree::ObjectType::CommitMeta, &checksum); + tar_append_default_data(dest, &detached_path, detached_buf)?; + } + + // If the next entry is detached metadata, then drop it since we wrote a new one + let next_ent = entries + .next() + .ok_or_else(|| anyhow!("Expected metadata object after commit"))??; + let next_ent_path = next_ent.path()?; + let next_ent_path: &Utf8Path = (&*next_ent_path).try_into()?; + let objtype = crate::tar::import::Importer::parse_metadata_entry(next_ent_path)?.1; + if objtype != ostree::ObjectType::CommitMeta { + dbg!(objtype); + crate::tar::write::copy_entry(next_ent, dest, None)?; + } + + // Finally, copy all remaining entries. + while let Some(entry) = entries.next() { + if let Some(c) = cancellable { + c.set_error_if_cancelled()?; + } + crate::tar::write::copy_entry(entry?, dest, None)?; + } + + Ok(()) +} + +/// Replace the detached metadata in an tar stream which is an export of an OSTree commit. +pub fn update_detached_metadata>( + src: impl std::io::Read, + dest: D, + detached_buf: Option<&[u8]>, + cancellable: Option<&C>, +) -> Result { + let mut src = tar::Archive::new(src); + let mut dest = tar::Builder::new(dest); + reinject_detached_metadata(&mut src, &mut dest, detached_buf, cancellable)?; + dest.into_inner().map_err(Into::into) +} + #[cfg(test)] mod tests { use super::*; diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 908fc6b95..71974add1 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -214,7 +214,7 @@ impl Importer { } } - fn parse_metadata_entry(path: &Utf8Path) -> Result<(String, ostree::ObjectType)> { + pub(crate) fn parse_metadata_entry(path: &Utf8Path) -> Result<(String, ostree::ObjectType)> { let (parentname, name, objtype) = parse_object_entry_path(path)?; let checksum = parse_checksum(parentname, name)?; let objtype = objtype_from_string(objtype) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 27c18965b..f1f233bf6 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -14,6 +14,7 @@ use ostree_ext::{gio, glib}; use sh_inline::bash_in; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; +use std::io::{BufReader, BufWriter}; use std::os::unix::fs::DirBuilderExt; use std::process::Command; @@ -148,6 +149,62 @@ async fn test_tar_import_signed() -> Result<()> { .as_str() ); assert_eq!(state, ostree::RepoCommitState::NORMAL); + + // Drop the commit metadata, and verify that import fails + fixture.clear_destrepo()?; + let nometa = "test-no-commitmeta.tar"; + let srcf = fixture.dir.open(test_tar)?; + let destf = fixture.dir.create(nometa)?; + tokio::task::spawn_blocking(move || -> Result<_> { + let src = BufReader::new(srcf); + let f = BufWriter::new(destf); + ostree_ext::tar::update_detached_metadata(src, f, None, gio::NONE_CANCELLABLE).unwrap(); + Ok(()) + }) + .await??; + let src_tar = tokio::fs::File::from_std(fixture.dir.open(nometa)?.into_std()); + let r = ostree_ext::tar::import_tar( + fixture.destrepo(), + src_tar, + Some(TarImportOptions { + remote: Some("myremote".to_string()), + }), + ) + .await; + assert_err_contains(r, "Expected commitmeta object"); + + // Now inject garbage into the commitmeta by flipping some bits in the signature + let rev = fixture.srcrepo().require_rev(fixture.testref())?; + let commitmeta = fixture + .srcrepo() + .read_commit_detached_metadata(&rev, gio::NONE_CANCELLABLE)? + .unwrap(); + let mut commitmeta = Vec::from(&*commitmeta.data_as_bytes()); + let len = commitmeta.len() / 2; + let last = commitmeta.get_mut(len).unwrap(); + (*last) = last.wrapping_add(1); + + let srcf = fixture.dir.open(test_tar)?; + let destf = fixture.dir.create(nometa)?; + tokio::task::spawn_blocking(move || -> Result<_> { + let src = BufReader::new(srcf); + let f = BufWriter::new(destf); + ostree_ext::tar::update_detached_metadata(src, f, Some(&commitmeta), gio::NONE_CANCELLABLE) + .unwrap(); + Ok(()) + }) + .await??; + let src_tar = tokio::fs::File::from_std(fixture.dir.open(nometa)?.into_std()); + let r = ostree_ext::tar::import_tar( + fixture.destrepo(), + src_tar, + Some(TarImportOptions { + remote: Some("myremote".to_string()), + }), + ) + .await; + assert_err_contains(r, "BAD signature"); + Ok(()) } @@ -514,8 +571,6 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { &fixture.dir, "ostree --repo=dest/repo remote gpg-import --stdin myremote < src/gpghome/key1.asc", )?; - - // No remote matching let srcoci_verified = OstreeImageReference { sigverify: SignatureSource::OstreeRemote("myremote".to_string()), imgref: srcoci_imgref.clone(), @@ -525,6 +580,22 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); + let temp_unsigned = ImageReference { + transport: Transport::OciDir, + name: fixture.path.join("unsigned.ocidir").to_string(), + }; + let _: String = + ostree_ext::container::update_detached_metadata(&srcoci_imgref, &temp_unsigned, None) + .await + .unwrap(); + let temp_unsigned = OstreeImageReference { + sigverify: SignatureSource::OstreeRemote("myremote".to_string()), + imgref: temp_unsigned, + }; + fixture.clear_destrepo()?; + let r = ostree_ext::container::unencapsulate(fixture.destrepo(), &temp_unsigned, None).await; + assert_err_contains(r, "Expected commitmeta object"); + // Test without signature verification // Create a new repo { @@ -690,8 +761,8 @@ async fn oci_clone(src: impl AsRef, dest: impl AsRef) -> Res #[tokio::test] async fn test_container_import_export() -> Result<()> { - impl_test_container_import_export(false).await?; - impl_test_container_import_export(true).await?; + impl_test_container_import_export(false).await.unwrap(); + impl_test_container_import_export(true).await.unwrap(); Ok(()) } From 0e8a1762c84a877e84a8f73ba1c19cf22dcd91f0 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 24 May 2022 05:51:59 -0400 Subject: [PATCH 369/775] lib: Release 0.7.3 Basically just picks up the new detached metadata bits. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 9be23ef3a..812c67877 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.7.2" +version = "0.7.3" [dependencies] anyhow = "1.0" From e3c6771499bdf9218980a8d2304929fa89254f11 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 15 Apr 2022 13:52:49 -0400 Subject: [PATCH 370/775] container: Remove current byte-level progress option We need to rework this to have useful per-layer information. --- lib/src/cli.rs | 40 +++--------------------------- lib/src/container/store.rs | 37 +++++++++++---------------- lib/src/container/unencapsulate.rs | 39 +++++------------------------ lib/tests/it/main.rs | 17 ++++++------- 4 files changed, 32 insertions(+), 101 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 996e337d6..2565c7f6a 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -7,23 +7,19 @@ use anyhow::{Context, Result}; use camino::Utf8PathBuf; -use futures_util::FutureExt; use ostree::{cap_std, gio, glib}; -use std::borrow::Borrow; use std::collections::BTreeMap; use std::convert::TryFrom; use std::ffi::OsString; use std::path::PathBuf; use structopt::StructOpt; use tokio::sync::mpsc::Receiver; -use tokio_stream::StreamExt; use crate::commit::container_commit; -use crate::container as ostree_container; use crate::container::store::{ImportProgress, PreparedImport}; -use crate::container::{Config, ImageReference, OstreeImageReference, UnencapsulateOptions}; +use crate::container::{self as ostree_container}; +use crate::container::{Config, ImageReference, OstreeImageReference}; use ostree_container::store::{ImageImporter, PrepareResult}; -use ostree_container::UnencapsulationProgress; /// Parse an [`OstreeImageReference`] from a CLI arguemnt. pub fn parse_imgref(s: &str) -> Result { @@ -384,11 +380,6 @@ fn tar_export(opts: &ExportOpts) -> Result<()> { Ok(()) } -enum ProgressOrFinish { - Progress(UnencapsulationProgress), - Finished(Result), -} - /// Render an import progress notification as a string. pub fn layer_progress_format(p: &ImportProgress) -> String { let (starting, s, layer) = match p { @@ -436,7 +427,6 @@ async fn container_import( write_ref: Option<&str>, quiet: bool, ) -> Result<()> { - let (tx_progress, rx_progress) = tokio::sync::watch::channel(Default::default()); let target = indicatif::ProgressDrawTarget::stdout(); let style = indicatif::ProgressStyle::default_bar(); let pb = (!quiet).then(|| { @@ -447,30 +437,8 @@ async fn container_import( pb.set_message("Downloading..."); pb }); - let opts = UnencapsulateOptions { - progress: Some(tx_progress), - }; - let rx_progress_stream = - tokio_stream::wrappers::WatchStream::new(rx_progress).map(ProgressOrFinish::Progress); - let import = crate::container::unencapsulate(repo, imgref, Some(opts)) - .into_stream() - .map(ProgressOrFinish::Finished); - let stream = rx_progress_stream.merge(import); - tokio::pin!(stream); - let mut import_result = None; - while let Some(value) = stream.next().await { - match value { - ProgressOrFinish::Progress(progress) => { - let n = progress.borrow().processed_bytes; - if let Some(pb) = pb.as_ref() { - pb.set_message(format!("Processed: {}", indicatif::HumanBytes(n))); - } - } - ProgressOrFinish::Finished(import) => { - import_result = Some(import?); - } - } - } + let importer = ImageImporter::new(repo, imgref, Default::default()).await?; + let import_result = importer.unencapsulate().await; if let Some(pb) = pb.as_ref() { pb.finish(); } diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 3c60c8770..c0a53a7c0 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -15,7 +15,6 @@ use ostree::prelude::{Cast, ToVariant}; use ostree::{gio, glib}; use std::collections::{BTreeSet, HashMap}; use std::iter::FromIterator; -use std::sync::{Arc, Mutex}; use tokio::sync::mpsc::{Receiver, Sender}; /// Configuration for the proxy. @@ -315,6 +314,7 @@ impl ImageImporter { pub fn set_target(&mut self, target: &OstreeImageReference) { self.target_imgref = Some(target.clone()) } + /// Determine if there is a new manifest, and if so return its digest. pub async fn prepare(&mut self) -> Result { self.prepare_internal(false).await @@ -408,7 +408,6 @@ impl ImageImporter { pub(crate) async fn unencapsulate_base( &mut self, import: &mut store::PreparedImport, - options: Option, write_refs: bool, ) -> Result<()> { tracing::debug!("Fetching base"); @@ -417,7 +416,6 @@ impl ImageImporter { { return Err(anyhow!("containers-policy.json specifies a default of `insecureAcceptAnything`; refusing usage")); } - let options = options.unwrap_or_default(); let remote = match &self.imgref.sigverify { SignatureSource::OstreeRemote(remote) => Some(remote.clone()), SignatureSource::ContainerPolicy | SignatureSource::ContainerPolicyAllowInsecure => { @@ -425,7 +423,6 @@ impl ImageImporter { } }; - let progress = options.progress.map(|v| Arc::new(Mutex::new(v))); for layer in import.ostree_layers.iter_mut() { if layer.commit.is_some() { continue; @@ -436,10 +433,6 @@ impl ImageImporter { } let (blob, driver) = fetch_layer_decompress(&mut self.proxy, &self.proxy_img, &layer.layer).await?; - let blob = super::unencapsulate::ProgressReader { - reader: blob, - progress: progress.as_ref().map(Arc::clone), - }; let repo = self.repo.clone(); let target_ref = layer.ostree_ref.clone(); let import_task = @@ -480,10 +473,6 @@ impl ImageImporter { &import.ostree_commit_layer.layer, ) .await?; - let blob = ProgressReader { - reader: blob, - progress: progress.as_ref().map(Arc::clone), - }; let repo = self.repo.clone(); let target_ref = import.ostree_commit_layer.ostree_ref.clone(); let import_task = @@ -518,17 +507,19 @@ impl ImageImporter { /// /// This does not write cached references for each blob, and errors out if /// the image has any non-ostree layers. - pub async fn unencapsulate( - mut self, - mut import: Box, - options: Option, - ) -> Result { - if !import.layers.is_empty() { - anyhow::bail!("Image has {} non-ostree layers", import.layers.len()); + pub async fn unencapsulate(mut self) -> Result { + let mut prep = match self.prepare_internal(false).await? { + PrepareResult::AlreadyPresent(_) => { + panic!("Should not have image present for unencapsulation") + } + PrepareResult::Ready(r) => r, + }; + if !prep.layers.is_empty() { + anyhow::bail!("Image has {} non-ostree layers", prep.layers.len()); } - self.unencapsulate_base(&mut import, options, false).await?; - let ostree_commit = import.ostree_commit_layer.commit.unwrap(); - let image_digest = import.manifest_digest; + self.unencapsulate_base(&mut prep, false).await?; + let ostree_commit = prep.ostree_commit_layer.commit.unwrap(); + let image_digest = prep.manifest_digest; Ok(Import { ostree_commit, image_digest, @@ -542,7 +533,7 @@ impl ImageImporter { ) -> Result> { // First download all layers for the base image (if necessary) - we need the SELinux policy // there to label all following layers. - self.unencapsulate_base(&mut import, None, true).await?; + self.unencapsulate_base(&mut import, true).await?; let mut proxy = self.proxy; let target_imgref = self.target_imgref.as_ref().unwrap_or(&self.imgref); let base_commit = import.ostree_commit_layer.commit.clone().unwrap(); diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index e0f3ca793..279af5513 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -40,14 +40,7 @@ use std::sync::{Arc, Mutex}; use tokio::io::{AsyncBufRead, AsyncRead}; use tracing::instrument; -/// The result of an import operation -#[derive(Copy, Clone, Debug, Default)] -pub struct UnencapsulationProgress { - /// Number of bytes downloaded (approximate) - pub processed_bytes: u64, -} - -type Progress = tokio::sync::watch::Sender; +type Progress = tokio::sync::watch::Sender; /// A read wrapper that updates the download progress. #[pin_project::pin_project] @@ -76,7 +69,7 @@ impl AsyncRead for ProgressReader { let newlen = buf.filled().len(); debug_assert!(newlen >= len); let read = (newlen - len) as u64; - state.processed_bytes += read; + state += read; state }; // Ignore errors, if the caller disconnected from progress that's OK. @@ -152,32 +145,12 @@ pub(crate) async fn join_fetch( } } -/// Configuration for container fetches. -#[derive(Debug, Default)] -pub struct UnencapsulateOptions { - /// Channel which will receive progress updates - pub progress: Option>, -} - /// Fetch a container image and import its embedded OSTree commit. #[context("Importing {}", imgref)] -#[instrument(skip(repo, options))] -pub async fn unencapsulate( - repo: &ostree::Repo, - imgref: &OstreeImageReference, - options: Option, -) -> Result { - let mut importer = super::store::ImageImporter::new(repo, imgref, Default::default()).await?; - let prep = match importer.prepare().await? { - store::PrepareResult::AlreadyPresent(r) => { - return Ok(Import { - ostree_commit: r.base_commit, - image_digest: r.manifest_digest, - }); - } - store::PrepareResult::Ready(r) => r, - }; - importer.unencapsulate(prep, options).await +#[instrument(skip(repo))] +pub async fn unencapsulate(repo: &ostree::Repo, imgref: &OstreeImageReference) -> Result { + let importer = super::store::ImageImporter::new(repo, imgref, Default::default()).await?; + importer.unencapsulate().await } /// Create a decompressor for this MIME type, given a stream of input. diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index f1f233bf6..dbff8a460 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -555,7 +555,7 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { sigverify: SignatureSource::OstreeRemote("unknownremote".to_string()), imgref: srcoci_imgref.clone(), }; - let r = ostree_ext::container::unencapsulate(fixture.destrepo(), &srcoci_unknownremote, None) + let r = ostree_ext::container::unencapsulate(fixture.destrepo(), &srcoci_unknownremote) .await .context("importing"); assert_err_contains(r, r#"Remote "unknownremote" not found"#); @@ -575,7 +575,7 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { sigverify: SignatureSource::OstreeRemote("myremote".to_string()), imgref: srcoci_imgref.clone(), }; - let import = ostree_ext::container::unencapsulate(fixture.destrepo(), &srcoci_verified, None) + let import = ostree_ext::container::unencapsulate(fixture.destrepo(), &srcoci_verified) .await .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); @@ -593,17 +593,16 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { imgref: temp_unsigned, }; fixture.clear_destrepo()?; - let r = ostree_ext::container::unencapsulate(fixture.destrepo(), &temp_unsigned, None).await; + let r = ostree_ext::container::unencapsulate(fixture.destrepo(), &temp_unsigned).await; assert_err_contains(r, "Expected commitmeta object"); // Test without signature verification // Create a new repo { let fixture = Fixture::new_v1()?; - let import = - ostree_ext::container::unencapsulate(fixture.destrepo(), &srcoci_unverified, None) - .await - .context("importing")?; + let import = ostree_ext::container::unencapsulate(fixture.destrepo(), &srcoci_unverified) + .await + .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); } @@ -824,7 +823,7 @@ async fn test_container_write_derive() -> Result<()> { assert!(images.is_empty()); // Verify importing a derived image fails - let r = ostree_ext::container::unencapsulate(fixture.destrepo(), &derived_ref, None).await; + let r = ostree_ext::container::unencapsulate(fixture.destrepo(), &derived_ref).await; assert_err_contains(r, "Image has 1 non-ostree layers"); // Pull a derived image - two layers, new base plus one layer. @@ -986,7 +985,7 @@ async fn test_container_import_export_registry() -> Result<()> { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref: digested_imgref, }; - let import = ostree_ext::container::unencapsulate(fixture.destrepo(), &import_ref, None) + let import = ostree_ext::container::unencapsulate(fixture.destrepo(), &import_ref) .await .context("importing")?; assert_eq!(import.ostree_commit, testrev.as_str()); From 8469aa1c230f23247762fd9124e60a2ef00979e4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 17 May 2022 16:55:58 -0400 Subject: [PATCH 371/775] container: Add new byte-level progress This new byte-level progress API allows clients to render fine-grained download progress of individual layers. Notably, it replaces the previous broken API which operated on the *decompressed* stream - but we don't know the total size of that. This one operates on the compressed stream, and we have the total in the manifest so we can render an accurate progress bar. --- lib/src/container/store.rs | 49 ++++++++++++++++++- lib/src/container/unencapsulate.rs | 75 ++++++++++++++++++++++-------- 2 files changed, 102 insertions(+), 22 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index c0a53a7c0..d561ed554 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -67,6 +67,29 @@ pub enum ImportProgress { DerivedLayerCompleted(Descriptor), } +impl ImportProgress { + /// Returns `true` if this message signifies the start of a new layer being fetched. + pub fn is_starting(&self) -> bool { + match self { + ImportProgress::OstreeChunkStarted(_) => true, + ImportProgress::OstreeChunkCompleted(_) => false, + ImportProgress::DerivedLayerStarted(_) => true, + ImportProgress::DerivedLayerCompleted(_) => false, + } + } +} + +/// Sent across a channel to track the byte-level progress of a layer fetch. +#[derive(Debug)] +pub struct LayerProgress { + /// Index of the layer in the manifest + pub layer_index: usize, + /// Number of bytes downloaded + pub fetched: u64, + /// Total number of bytes outstanding + pub total: u64, +} + /// State of an already pulled layered image. #[derive(Debug, PartialEq, Eq)] pub struct LayeredImageState { @@ -110,6 +133,7 @@ pub struct ImageImporter { pub(crate) proxy_img: OpenedImage, layer_progress: Option>, + layer_byte_progress: Option>>, } /// Result of invoking [`LayeredImageImporter::prepare`]. @@ -307,6 +331,7 @@ impl ImageImporter { target_imgref: None, imgref: imgref.clone(), layer_progress: None, + layer_byte_progress: None, }) } @@ -328,6 +353,16 @@ impl ImageImporter { r } + /// Create a channel receiver that will get notifications for byte-level progress of layer fetches. + pub fn request_layer_progress( + &mut self, + ) -> tokio::sync::watch::Receiver> { + assert!(self.layer_byte_progress.is_none()); + let (s, r) = tokio::sync::watch::channel(None); + self.layer_byte_progress = Some(s); + r + } + /// Determine if there is a new manifest, and if so return its digest. #[context("Fetching manifest")] pub(crate) async fn prepare_internal(&mut self, verify_layers: bool) -> Result { @@ -431,8 +466,14 @@ impl ImageImporter { p.send(ImportProgress::OstreeChunkStarted(layer.layer.clone())) .await?; } - let (blob, driver) = - fetch_layer_decompress(&mut self.proxy, &self.proxy_img, &layer.layer).await?; + let (blob, driver) = fetch_layer_decompress( + &mut self.proxy, + &self.proxy_img, + &import.manifest, + &layer.layer, + self.layer_byte_progress.as_ref(), + ) + .await?; let repo = self.repo.clone(); let target_ref = layer.ostree_ref.clone(); let import_task = @@ -470,7 +511,9 @@ impl ImageImporter { let (blob, driver) = fetch_layer_decompress( &mut self.proxy, &self.proxy_img, + &import.manifest, &import.ostree_commit_layer.layer, + self.layer_byte_progress.as_ref(), ) .await?; let repo = self.repo.clone(); @@ -554,7 +597,9 @@ impl ImageImporter { let (blob, driver) = super::unencapsulate::fetch_layer_decompress( &mut proxy, &self.proxy_img, + &import.manifest, &layer.layer, + self.layer_byte_progress.as_ref(), ) .await?; // An important aspect of this is that we SELinux label the derived layers using diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index 279af5513..7d2fb75ff 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -31,13 +31,18 @@ // Once we have the manifest, we expect it to point to a single `application/vnd.oci.image.layer.v1.tar+gzip` layer, // which is exactly what is exported by the [`crate::tar::export`] process. +use crate::container::store::LayerProgress; + use super::*; use containers_image_proxy::{ImageProxy, OpenedImage}; use fn_error_context::context; -use futures_util::Future; +use futures_util::{Future, FutureExt}; use oci_spec::image as oci_image; use std::sync::{Arc, Mutex}; -use tokio::io::{AsyncBufRead, AsyncRead}; +use tokio::{ + io::{AsyncBufRead, AsyncRead}, + sync::watch::{Receiver, Sender}, +}; use tracing::instrument; type Progress = tokio::sync::watch::Sender; @@ -49,7 +54,15 @@ pub(crate) struct ProgressReader { #[pin] pub(crate) reader: T, #[pin] - pub(crate) progress: Option>>, + pub(crate) progress: Arc>, +} + +impl ProgressReader { + pub(crate) fn new(reader: T) -> (Self, Receiver) { + let (progress, r) = tokio::sync::watch::channel(1); + let progress = Arc::new(Mutex::new(progress)); + (ProgressReader { reader, progress }, r) + } } impl AsyncRead for ProgressReader { @@ -62,19 +75,17 @@ impl AsyncRead for ProgressReader { let len = buf.filled().len(); match this.reader.poll_read(cx, buf) { v @ std::task::Poll::Ready(Ok(_)) => { - if let Some(progress) = this.progress.as_ref().get_ref() { - let progress = progress.lock().unwrap(); - let state = { - let mut state = *progress.borrow(); - let newlen = buf.filled().len(); - debug_assert!(newlen >= len); - let read = (newlen - len) as u64; - state += read; - state - }; - // Ignore errors, if the caller disconnected from progress that's OK. - let _ = progress.send(state); - } + let progress = this.progress.lock().unwrap(); + let state = { + let mut state = *progress.borrow(); + let newlen = buf.filled().len(); + debug_assert!(newlen >= len); + let read = (newlen - len) as u64; + state += read; + state + }; + // Ignore errors, if the caller disconnected from progress that's OK. + let _ = progress.send(state); v } o => o, @@ -168,19 +179,43 @@ fn new_async_decompressor<'a>( } /// A wrapper for [`get_blob`] which fetches a layer and decompresses it. -#[instrument(skip(proxy, img, layer))] +//#[instrument(skip(proxy, img, layer))] pub(crate) async fn fetch_layer_decompress<'a>( proxy: &'a mut ImageProxy, img: &OpenedImage, - layer: &oci_image::Descriptor, + manifest: &oci_image::ImageManifest, + layer: &'a oci_image::Descriptor, + progress: Option<&'a Sender>>, ) -> Result<( Box, impl Future> + 'a, )> { + use futures_util::future::Either; tracing::debug!("fetching {}", layer.digest()); + let layer_index = manifest.layers().iter().position(|x| x == layer).unwrap(); + let (blob, driver) = proxy .get_blob(img, layer.digest().as_str(), layer.size() as u64) .await?; - let blob = new_async_decompressor(layer.media_type(), blob)?; - Ok((blob, driver)) + if let Some(progress) = progress { + let (readprogress, mut readwatch) = ProgressReader::new(blob); + let readprogress = tokio::io::BufReader::new(readprogress); + let readproxy = async move { + while let Ok(()) = readwatch.changed().await { + let fetched = readwatch.borrow_and_update(); + let status = LayerProgress { + layer_index, + fetched: *fetched, + total: layer.size() as u64, + }; + progress.send_replace(Some(status)); + } + }; + let reader = new_async_decompressor(layer.media_type(), readprogress)?; + let driver = futures_util::future::join(readproxy, driver).map(|r| r.1); + Ok((reader, Either::Left(driver))) + } else { + let blob = new_async_decompressor(layer.media_type(), blob)?; + Ok((blob, Either::Right(driver))) + } } From 641e7a672be55d3b54ca6af15bef615734e4e362 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 21 May 2022 08:51:02 -0400 Subject: [PATCH 372/775] cli: Use byte progress for containers We can now show an accurate download progress. --- lib/src/cli.rs | 64 +++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 55 insertions(+), 9 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 2565c7f6a..7b1ffd9f6 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -16,7 +16,7 @@ use structopt::StructOpt; use tokio::sync::mpsc::Receiver; use crate::commit::container_commit; -use crate::container::store::{ImportProgress, PreparedImport}; +use crate::container::store::{ImportProgress, LayerProgress, PreparedImport}; use crate::container::{self as ostree_container}; use crate::container::{Config, ImageReference, OstreeImageReference}; use ostree_container::store::{ImageImporter, PrepareResult}; @@ -187,6 +187,10 @@ enum ContainerImageOpts { #[structopt(flatten)] proxyopts: ContainerProxyOpts, + + /// Don't display progress + #[structopt(long)] + quiet: bool, }, /// Output metadata about an already stored container image. @@ -398,9 +402,43 @@ pub fn layer_progress_format(p: &ImportProgress) -> String { } } -async fn handle_layer_progress_print(mut r: Receiver) { - while let Some(v) = r.recv().await { - println!("{}", layer_progress_format(&v)); +async fn handle_layer_progress_print( + mut layers: Receiver, + mut layer_bytes: tokio::sync::watch::Receiver>, +) { + let style = indicatif::ProgressStyle::default_bar(); + let pb = indicatif::ProgressBar::new(100); + pb.set_style(style.template("{prefix} {bytes} [{bar:20}] ({eta}) {msg}")); + loop { + tokio::select! { + // Always handle layer changes first. + biased; + layer = layers.recv() => { + if let Some(l) = layer { + if l.is_starting() { + pb.set_position(0); + } else { + pb.finish(); + } + pb.set_message(layer_progress_format(&l)); + } else { + // If the receiver is disconnected, then we're done + break + }; + }, + r = layer_bytes.changed() => { + if r.is_err() { + // If the receiver is disconnected, then we're done + break + } + let bytes = layer_bytes.borrow(); + if let Some(bytes) = &*bytes { + pb.set_length(bytes.total); + pb.set_position(bytes.fetched); + } + } + + } } } @@ -498,9 +536,9 @@ async fn container_store( repo: &ostree::Repo, imgref: &OstreeImageReference, proxyopts: ContainerProxyOpts, + quiet: bool, ) -> Result<()> { let mut imp = ImageImporter::new(repo, imgref, proxyopts.into()).await?; - let layer_progress = imp.request_progress(); let prep = match imp.prepare().await? { PrepareResult::AlreadyPresent(c) => { println!("No changes in {} => {}", imgref, c.merge_commit); @@ -509,10 +547,17 @@ async fn container_store( PrepareResult::Ready(r) => r, }; print_layer_status(&prep); - let progress_printer = - tokio::task::spawn(async move { handle_layer_progress_print(layer_progress).await }); + let printer = (!quiet).then(|| { + let layer_progress = imp.request_progress(); + let layer_byte_progress = imp.request_layer_progress(); + tokio::task::spawn(async move { + handle_layer_progress_print(layer_progress, layer_byte_progress).await + }) + }); let import = imp.import(prep).await; - let _ = progress_printer.await; + if let Some(printer) = printer { + let _ = printer.await; + } let import = import?; let commit = &repo.load_commit(&import.merge_commit)?.0; let commit_meta = &glib::VariantDict::new(Some(&commit.child_value(0))); @@ -672,7 +717,8 @@ where repo, imgref, proxyopts, - } => container_store(&repo, &imgref, proxyopts).await, + quiet, + } => container_store(&repo, &imgref, proxyopts, quiet).await, ContainerImageOpts::History { repo, imgref } => { container_history(&repo, &imgref).await } From bf9002c76f86b9e71145048a5a715e04544b9b79 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 24 May 2022 08:30:28 -0400 Subject: [PATCH 373/775] Bump to 0.8 for semver-incompatible API changes --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 812c67877..4383dfd0e 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.7.3" +version = "0.8.0" [dependencies] anyhow = "1.0" From e379a14d2dcfbdfa71a5a19adb8402c6873ed3ad Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 31 May 2022 10:21:08 -0400 Subject: [PATCH 374/775] Convert to Rust 2021 edition No major changes, main thing is using the new implicit Try{From,Into} imports and one place we can use the new array `iter()`. Rust 2021 is supported by our MSRV of 1.58.1. --- cli/Cargo.toml | 2 +- lib/Cargo.toml | 2 +- lib/src/chunking.rs | 1 - lib/src/cli.rs | 1 - lib/src/container/mod.rs | 1 - lib/src/fixture.rs | 1 - lib/src/ima.rs | 2 +- lib/src/refescape.rs | 1 - lib/src/tar/export.rs | 1 - lib/src/tar/import.rs | 1 - lib/src/tar/write.rs | 1 - lib/tests/it/main.rs | 2 +- 12 files changed, 4 insertions(+), 12 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index f102346cf..b80bacd02 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -2,7 +2,7 @@ name = "ostree-ext-cli" version = "0.1.4" authors = ["Colin Walters "] -edition = "2018" +edition = "2021" license = "MIT OR Apache-2.0" repository = "https://github.com/ostreedev/ostree-rs-ext" readme = "README.md" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 4383dfd0e..a4a3512da 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -1,7 +1,7 @@ [package] authors = ["Colin Walters "] description = "Extension APIs for OSTree" -edition = "2018" +edition = "2021" license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 2f7ffa338..39ae81573 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -4,7 +4,6 @@ use std::borrow::{Borrow, Cow}; use std::collections::{BTreeMap, BTreeSet, HashMap}; -use std::convert::TryInto; use std::fmt::Write; use std::num::NonZeroU32; use std::rc::Rc; diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 7b1ffd9f6..4516973ae 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -9,7 +9,6 @@ use anyhow::{Context, Result}; use camino::Utf8PathBuf; use ostree::{cap_std, gio, glib}; use std::collections::BTreeMap; -use std::convert::TryFrom; use std::ffi::OsString; use std::path::PathBuf; use structopt::StructOpt; diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index b2fbad6bd..040719c91 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -27,7 +27,6 @@ use anyhow::anyhow; use std::borrow::Cow; -use std::convert::{TryFrom, TryInto}; use std::ops::Deref; /// The label injected into a container image that contains the ostree commit SHA-256. diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index ceb6f553f..bf9354c8a 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -17,7 +17,6 @@ use once_cell::sync::Lazy; use ostree::cap_std; use regex::Regex; use std::borrow::Cow; -use std::convert::{TryFrom, TryInto}; use std::io::Write; use std::ops::Add; use std::process::Stdio; diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 18d1f8ec9..a2399e10e 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -18,10 +18,10 @@ use ostree::gio; use std::collections::{BTreeMap, HashMap}; use std::ffi::CString; use std::fs::File; +use std::io::Seek; use std::ops::DerefMut; use std::os::unix::io::AsRawFd; use std::process::{Command, Stdio}; -use std::{convert::TryInto, io::Seek}; /// Extended attribute keys used for IMA. const IMA_XATTR: &str = "security.ima"; diff --git a/lib/src/refescape.rs b/lib/src/refescape.rs index f8bd8f6d2..fbd15e125 100644 --- a/lib/src/refescape.rs +++ b/lib/src/refescape.rs @@ -10,7 +10,6 @@ //! Because the empty path is not valid, `//` is escaped as `/_2F_` (i.e. the second `/` is escaped). use anyhow::Result; -use std::convert::TryInto; use std::fmt::Write; /// Escape a single string; this is a backend of [`prefix_escape_for_ref`]. diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 4e8b81bf4..bf5d6dc9d 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -14,7 +14,6 @@ use ostree::gio; use std::borrow::Borrow; use std::borrow::Cow; use std::collections::HashSet; -use std::convert::TryInto; use std::io::BufReader; /// The repository mode generated by a tar export stream. diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 71974add1..c04b20d0c 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -11,7 +11,6 @@ use glib::Variant; use ostree::gio; use std::collections::BTreeSet; use std::collections::HashMap; -use std::convert::TryInto; use std::io::prelude::*; use tracing::{event, instrument, Level}; diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index be20eeee4..8fde2b755 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -16,7 +16,6 @@ use ostree::gio; use ostree::prelude::FileExt; use rustix::fd::FromFd; use std::collections::BTreeMap; -use std::convert::TryInto; use std::io::{BufWriter, Write}; use std::path::Path; use std::process::Stdio; diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index dbff8a460..807a611f9 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1002,7 +1002,7 @@ d /usr/share fixture .update( FileDef::iter_from(ADDITIONS), - IntoIterator::into_iter([Cow::Borrowed("/usr/bin/bash".into())]), + [Cow::Borrowed("/usr/bin/bash".into())].into_iter(), ) .context("Failed to update")?; let from = &format!("{}^", fixture.testref()); From ac5f0ea244d04cf672dd3a9ee9c83ed6973188b9 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 9 Jun 2022 14:46:49 -0400 Subject: [PATCH 375/775] lib: Bump to ostree-rs 0.14 This bumped ostree semver, and since we re-export it also bumps ours, but we've already got that queued. When doing this I went to see if there were any new APIs that we should be using, and saw https://github.com/ostreedev/ostree-rs/pull/50 and was like "ah hah, I remember that now" and a quick grep found the place I wanted to use it. --- lib/Cargo.toml | 2 +- lib/src/chunking.rs | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index a4a3512da..1b6fa505c 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -31,7 +31,7 @@ oci-spec = "0.5.4" openat = "0.1.20" openat-ext = "0.2.0" openssl = "0.10.33" -ostree = { features = ["v2021_5", "cap-std-apis"], version = "0.13.5" } +ostree = { features = ["v2021_5", "cap-std-apis"], version = "0.14.0" } pin-project = "1.0" regex = "1.5.4" serde = { features = ["derive"], version = "1.0.125" } diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 39ae81573..d7b8cf99e 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -83,8 +83,7 @@ impl ObjectMetaSized { let mut sizes = HashMap::<&str, u64>::new(); // Populate two mappings above, iterating over the object -> contentid mapping for (checksum, contentid) in map.iter() { - let (_, finfo, _) = repo.load_file(checksum, cancellable)?; - let finfo = finfo.unwrap(); + let finfo = repo.query_file(checksum, cancellable)?.0; let sz = sizes.entry(contentid).or_default(); *sz += finfo.size() as u64; } From fb2adf862f73d489c526bfccfc39c6e4eb802d3b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 13 Jun 2022 10:46:14 -0400 Subject: [PATCH 376/775] chunking: Use `query_file` This drops an `unwrap()`. --- lib/src/chunking.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index d7b8cf99e..2266c2774 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -174,10 +174,7 @@ fn generate_chunking_recurse( let fpath = gen.path.join(name.to_str()); hex::encode_to_slice(csum, &mut hexbuf)?; let checksum = std::str::from_utf8(&hexbuf)?; - let (_, meta, _) = repo.load_file(checksum, gio::NONE_CANCELLABLE)?; - // SAFETY: We know this API returns this value; it only has a return nullable because the - // caller can pass NULL to skip it. - let meta = meta.unwrap(); + let meta = repo.query_file(checksum, gio::NONE_CANCELLABLE)?.0; let size = meta.size() as u64; let entry = chunk.content.entry(RcStr::from(checksum)).or_default(); entry.0 = size; From 3e40c4a9067b9323aa4917e8c627ef6155008b84 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 23 Jun 2022 17:49:13 -0400 Subject: [PATCH 377/775] container/encapsulate: Drop two `unwrap()` I happened to be looking at this code and realized we didn't need to do either `unwrap()` here. --- lib/src/container/encapsulate.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 0b6e4fd59..254ac5186 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -156,11 +156,8 @@ fn build_oci( .map(|meta| crate::chunking::Chunking::from_mapping(repo, commit, meta, opts.max_layers)) .transpose()?; - if let Some(version) = - commit_meta.lookup_value("version", Some(glib::VariantTy::new("s").unwrap())) - { - let version = version.str().unwrap(); - labels.insert("version".into(), version.into()); + if let Some(version) = commit_meta.lookup::("version")? { + labels.insert("version".into(), version); } labels.insert(OSTREE_COMMIT_LABEL.into(), commit.into()); From e6bc8c3999c7a909c014efe06773c3ee367b4339 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 23 Jun 2022 18:09:15 -0400 Subject: [PATCH 378/775] Add a helper to generate a text fixture externally To aid debugging. --- lib/src/cli.rs | 3 +++ lib/src/fixture.rs | 9 +++++++-- lib/src/integrationtest.rs | 16 ++++++++++++++++ 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 4516973ae..3dbefab1a 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -316,6 +316,8 @@ struct ImaSignOpts { enum TestingOpts { /// Detect the current environment DetectEnv, + /// Generate a test fixture + CreateFixture, /// Execute integration tests, assuming mutable environment Run, /// Execute IMA tests @@ -658,6 +660,7 @@ fn testing(opts: &TestingOpts) -> Result<()> { println!("{}", s); Ok(()) } + TestingOpts::CreateFixture => crate::integrationtest::create_fixture(), TestingOpts::Run => crate::integrationtest::run_tests(), TestingOpts::RunIMA => crate::integrationtest::test_ima(), TestingOpts::FilterTar => { diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index bf9354c8a..3d9267965 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -335,7 +335,7 @@ fn build_mapping_recurse( #[derive(Debug)] pub struct Fixture { // Just holds a reference - _tempdir: tempfile::TempDir, + tempdir: tempfile::TempDir, pub dir: Arc, pub path: Utf8PathBuf, srcrepo: ostree::Repo, @@ -383,7 +383,7 @@ impl Fixture { let destrepo = ostree::Repo::create_at_dir(&dir, "dest/repo", ostree::RepoMode::BareUser, None)?; Ok(Self { - _tempdir: tempdir, + tempdir, dir, path, srcrepo, @@ -582,6 +582,11 @@ impl Fixture { Ok(ret) } + /// Unload all in-memory data, and return the underlying temporary directory without deleting it. + pub fn into_tempdir(self) -> tempfile::TempDir { + self.tempdir + } + #[context("Exporting tar")] pub fn export_tar(&self) -> Result<&'static Utf8Path> { let cancellable = gio::NONE_CANCELLABLE; diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 49ffd2dd1..1cadca034 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -90,6 +90,22 @@ fn test_proxy_auth() -> Result<()> { Ok(()) } +/// Create a test fixture in the same way our unit tests does, and print +/// the location of the temporary directory. Also export a chunked image. +/// Useful for debugging things interactively. +pub(crate) fn create_fixture() -> Result<()> { + let fixture = crate::fixture::Fixture::new_v1()?; + let imgref = tokio::task::block_in_place(|| { + tokio::runtime::Handle::current() + .block_on(fixture.export_container()) + .map(|v| v.0) + })?; + println!("Wrote: {:?}", imgref); + let path = fixture.into_tempdir().into_path(); + println!("Wrote: {:?}", path); + Ok(()) +} + pub(crate) fn test_ima() -> Result<()> { use gvariant::aligned_bytes::TryAsAligned; use gvariant::{gv, Marker, Structure}; From 764ce5b2544688eb5107bc56bd5a768060957f78 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 23 Jun 2022 21:36:02 -0400 Subject: [PATCH 379/775] container/tar: Thread commit checksum down into tar writer Prep for future refactoring where we will want to cache things related to it. We don't have any design for emitting multiple commits into a single tar stream, so drop that implicit ability. --- lib/src/container/encapsulate.rs | 6 ++++-- lib/src/tar/export.rs | 22 ++++++++++++++++------ 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 254ac5186..a7b175b38 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -77,6 +77,7 @@ fn commit_meta_to_labels<'a>( #[allow(clippy::too_many_arguments)] fn export_chunked( repo: &ostree::Repo, + commit: &str, ociw: &mut OciDir, manifest: &mut oci_image::ImageManifest, imgcfg: &mut oci_image::ImageConfiguration, @@ -91,7 +92,7 @@ fn export_chunked( .enumerate() .map(|(i, chunk)| -> Result<_> { let mut w = ociw.create_layer(compression)?; - ostree_tar::export_chunk(repo, &chunk, &mut w) + ostree_tar::export_chunk(repo, commit, &chunk, &mut w) .with_context(|| format!("Exporting chunk {i}"))?; let w = w.into_inner()?; Ok((w.complete()?, chunk.name)) @@ -101,7 +102,7 @@ fn export_chunked( ociw.push_layer(manifest, imgcfg, layer, &name); } let mut w = ociw.create_layer(compression)?; - ostree_tar::export_final_chunk(repo, &chunking, &mut w)?; + ostree_tar::export_final_chunk(repo, commit, &chunking, &mut w)?; let w = w.into_inner()?; let final_layer = w.complete()?; labels.insert( @@ -182,6 +183,7 @@ fn build_oci( if let Some(chunking) = chunking { export_chunked( repo, + commit, &mut writer, &mut manifest, &mut imgcfg, diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index bf5d6dc9d..5222dfc75 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -62,6 +62,7 @@ fn map_path_v1(p: &Utf8Path) -> &Utf8Path { struct OstreeTarWriter<'a, W: std::io::Write> { repo: &'a ostree::Repo, + commit_checksum: &'a str, out: &'a mut tar::Builder, options: ExportOptions, wrote_initdirs: bool, @@ -133,9 +134,15 @@ pub(crate) fn tar_append_default_data( } impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { - fn new(repo: &'a ostree::Repo, out: &'a mut tar::Builder, options: ExportOptions) -> Self { + fn new( + repo: &'a ostree::Repo, + commit_checksum: &'a str, + out: &'a mut tar::Builder, + options: ExportOptions, + ) -> Self { Self { repo, + commit_checksum, out, options, wrote_initdirs: false, @@ -248,9 +255,10 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { } /// Recursively serialize a commit object to the target tar stream. - fn write_commit(&mut self, checksum: &str) -> Result<()> { + fn write_commit(&mut self) -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; + let checksum = self.commit_checksum; let (commit_v, _) = self.repo.load_commit(checksum)?; let commit_v = &commit_v; @@ -530,8 +538,8 @@ fn impl_export( out: &mut tar::Builder, options: ExportOptions, ) -> Result<()> { - let writer = &mut OstreeTarWriter::new(repo, out, options); - writer.write_commit(commit_checksum)?; + let writer = &mut OstreeTarWriter::new(repo, commit_checksum, out, options); + writer.write_commit()?; Ok(()) } @@ -584,10 +592,11 @@ fn write_chunk( /// Output a chunk to a tar stream. pub(crate) fn export_chunk( repo: &ostree::Repo, + commit: &str, chunk: &chunking::Chunk, out: &mut tar::Builder, ) -> Result<()> { - let writer = &mut OstreeTarWriter::new(repo, out, ExportOptions::default()); + let writer = &mut OstreeTarWriter::new(repo, commit, out, ExportOptions::default()); writer.write_repo_structure()?; write_chunk(writer, chunk) } @@ -596,6 +605,7 @@ pub(crate) fn export_chunk( #[context("Exporting final chunk")] pub(crate) fn export_final_chunk( repo: &ostree::Repo, + commit_checksum: &str, chunking: &Chunking, out: &mut tar::Builder, ) -> Result<()> { @@ -606,7 +616,7 @@ pub(crate) fn export_final_chunk( format_version: 1, ..Default::default() }; - let writer = &mut OstreeTarWriter::new(repo, out, options); + let writer = &mut OstreeTarWriter::new(repo, commit_checksum, out, options); writer.write_repo_structure()?; let (commit_v, _) = repo.load_commit(&chunking.commit)?; From 1ec1e96b5b3e3f53a5708b7b71adb3d45fef0edb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 23 Jun 2022 21:40:28 -0400 Subject: [PATCH 380/775] container/tar: Pass ownership of chunks down into export There's no reason to keep all the data alive until the export is done, and it may allow us to optimize things in the future. --- lib/src/chunking.rs | 3 ++- lib/src/container/encapsulate.rs | 4 ++-- lib/src/tar/export.rs | 10 +++++----- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 2266c2774..5b70e52cb 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -23,11 +23,12 @@ use serde::{Deserialize, Serialize}; pub(crate) const MAX_CHUNKS: u32 = 64; type RcStr = Rc; +pub(crate) type ChunkMapping = BTreeMap)>; #[derive(Debug, Default)] pub(crate) struct Chunk { pub(crate) name: String, - pub(crate) content: BTreeMap)>, + pub(crate) content: ChunkMapping, pub(crate) size: u64, } diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index a7b175b38..3c8f8213a 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -92,7 +92,7 @@ fn export_chunked( .enumerate() .map(|(i, chunk)| -> Result<_> { let mut w = ociw.create_layer(compression)?; - ostree_tar::export_chunk(repo, commit, &chunk, &mut w) + ostree_tar::export_chunk(repo, commit, chunk.content, &mut w) .with_context(|| format!("Exporting chunk {i}"))?; let w = w.into_inner()?; Ok((w.complete()?, chunk.name)) @@ -102,7 +102,7 @@ fn export_chunked( ociw.push_layer(manifest, imgcfg, layer, &name); } let mut w = ociw.create_layer(compression)?; - ostree_tar::export_final_chunk(repo, commit, &chunking, &mut w)?; + ostree_tar::export_final_chunk(repo, commit, chunking, &mut w)?; let w = w.into_inner()?; let final_layer = w.complete()?; labels.insert( diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 5222dfc75..37cd5d2ff 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -576,9 +576,9 @@ fn path_for_tar_v1(p: &Utf8Path) -> &Utf8Path { /// has been written to the tar stream. fn write_chunk( writer: &mut OstreeTarWriter, - chunk: &chunking::Chunk, + chunk: chunking::ChunkMapping, ) -> Result<()> { - for (checksum, (_size, paths)) in chunk.content.iter() { + for (checksum, (_size, paths)) in chunk.into_iter() { let (objpath, h) = writer.append_content(checksum.borrow())?; for path in paths.iter() { let path = path_for_tar_v1(path); @@ -593,7 +593,7 @@ fn write_chunk( pub(crate) fn export_chunk( repo: &ostree::Repo, commit: &str, - chunk: &chunking::Chunk, + chunk: chunking::ChunkMapping, out: &mut tar::Builder, ) -> Result<()> { let writer = &mut OstreeTarWriter::new(repo, commit, out, ExportOptions::default()); @@ -606,7 +606,7 @@ pub(crate) fn export_chunk( pub(crate) fn export_final_chunk( repo: &ostree::Repo, commit_checksum: &str, - chunking: &Chunking, + chunking: Chunking, out: &mut tar::Builder, ) -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; @@ -638,7 +638,7 @@ pub(crate) fn export_final_chunk( writer.append(objtype, checksum, &v)?; } - write_chunk(writer, &chunking.remainder) + write_chunk(writer, chunking.remainder.content) } /// Process an exported tar stream, and update the detached metadata. From 0c35f9f310cf3ac8bb3c16f1e205784e781c396e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 10:52:37 -0400 Subject: [PATCH 381/775] tar/export: Make constructor fallible Prep for a future patch which will add actually fallible code there. --- lib/src/tar/export.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 37cd5d2ff..e43a134a0 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -139,8 +139,8 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { commit_checksum: &'a str, out: &'a mut tar::Builder, options: ExportOptions, - ) -> Self { - Self { + ) -> Result { + let r = Self { repo, commit_checksum, out, @@ -150,7 +150,8 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { wrote_dirtree: HashSet::new(), wrote_content: HashSet::new(), wrote_xattrs: HashSet::new(), - } + }; + Ok(r) } /// Convert the ostree mode to tar mode. @@ -538,7 +539,7 @@ fn impl_export( out: &mut tar::Builder, options: ExportOptions, ) -> Result<()> { - let writer = &mut OstreeTarWriter::new(repo, commit_checksum, out, options); + let writer = &mut OstreeTarWriter::new(repo, commit_checksum, out, options)?; writer.write_commit()?; Ok(()) } @@ -596,7 +597,7 @@ pub(crate) fn export_chunk( chunk: chunking::ChunkMapping, out: &mut tar::Builder, ) -> Result<()> { - let writer = &mut OstreeTarWriter::new(repo, commit, out, ExportOptions::default()); + let writer = &mut OstreeTarWriter::new(repo, commit, out, ExportOptions::default())?; writer.write_repo_structure()?; write_chunk(writer, chunk) } @@ -616,7 +617,7 @@ pub(crate) fn export_final_chunk( format_version: 1, ..Default::default() }; - let writer = &mut OstreeTarWriter::new(repo, commit_checksum, out, options); + let writer = &mut OstreeTarWriter::new(repo, commit_checksum, out, options)?; writer.write_repo_structure()?; let (commit_v, _) = repo.load_commit(&chunking.commit)?; From e1513a3ab1f6fbb20cbfdab965148689a95edea2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 11:01:12 -0400 Subject: [PATCH 382/775] tar/export: Load and cache commit object The writer can only write a single commit. Having this data already loaded will help with future patches for the chunked export. --- lib/src/tar/export.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index e43a134a0..34fa237eb 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -63,6 +63,7 @@ fn map_path_v1(p: &Utf8Path) -> &Utf8Path { struct OstreeTarWriter<'a, W: std::io::Write> { repo: &'a ostree::Repo, commit_checksum: &'a str, + commit_object: glib::Variant, out: &'a mut tar::Builder, options: ExportOptions, wrote_initdirs: bool, @@ -140,9 +141,11 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { out: &'a mut tar::Builder, options: ExportOptions, ) -> Result { + let commit_object = repo.load_commit(commit_checksum)?.0; let r = Self { repo, commit_checksum, + commit_object, out, options, wrote_initdirs: false, @@ -260,10 +263,8 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let cancellable = gio::NONE_CANCELLABLE; let checksum = self.commit_checksum; - let (commit_v, _) = self.repo.load_commit(checksum)?; - let commit_v = &commit_v; - let commit_bytes = commit_v.data_as_bytes(); + let commit_bytes = self.commit_object.data_as_bytes(); let commit_bytes = commit_bytes.try_as_aligned()?; let commit = gv_commit!().cast(commit_bytes); let commit = commit.to_tuple(); @@ -283,7 +284,11 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { // Now, we create sysroot/ and everything under it self.write_repo_structure()?; - self.append(ostree::ObjectType::Commit, checksum, commit_v)?; + self.append( + ostree::ObjectType::Commit, + checksum, + &self.commit_object.clone(), + )?; if let Some(commitmeta) = self .repo .read_commit_detached_metadata(checksum, cancellable)? From 05ffff6f987780994108b39b554c767b00a532c0 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 12:49:51 -0400 Subject: [PATCH 383/775] tar/export: Minor import cleanup --- lib/src/tar/export.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 34fa237eb..068f8711e 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -1,7 +1,6 @@ //! APIs for creating container images from OSTree commits use crate::chunking; -use crate::chunking::Chunking; use crate::objgv::*; use anyhow::{anyhow, bail, ensure, Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; @@ -612,7 +611,7 @@ pub(crate) fn export_chunk( pub(crate) fn export_final_chunk( repo: &ostree::Repo, commit_checksum: &str, - chunking: Chunking, + chunking: chunking::Chunking, out: &mut tar::Builder, ) -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; From ec9dab68dae1479279dd1c930dd410507db1a97a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 12:56:35 -0400 Subject: [PATCH 384/775] tar: Add a common helper to write commit object and detached metadata General cleanup, prep for further work. --- lib/src/chunking.rs | 1 + lib/src/tar/export.rs | 46 +++++++++++++++++++++---------------------- 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 5b70e52cb..e2226bdbd 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -107,6 +107,7 @@ impl ObjectMetaSized { #[derive(Debug, Default)] pub struct Chunking { pub(crate) metadata_size: u64, + #[allow(dead_code)] pub(crate) commit: Box, pub(crate) meta: Vec, pub(crate) remainder: Chunk, diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 068f8711e..19af5d354 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -261,8 +261,6 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { fn write_commit(&mut self) -> Result<()> { let cancellable = gio::NONE_CANCELLABLE; - let checksum = self.commit_checksum; - let commit_bytes = self.commit_object.data_as_bytes(); let commit_bytes = commit_bytes.try_as_aligned()?; let commit = gv_commit!().cast(commit_bytes); @@ -283,17 +281,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { // Now, we create sysroot/ and everything under it self.write_repo_structure()?; - self.append( - ostree::ObjectType::Commit, - checksum, - &self.commit_object.clone(), - )?; - if let Some(commitmeta) = self - .repo - .read_commit_detached_metadata(checksum, cancellable)? - { - self.append(ostree::ObjectType::CommitMeta, checksum, &commitmeta)?; - } + self.append_commit_object()?; // The ostree dirmeta object for the root. self.append(ostree::ObjectType::DirMeta, metadata_checksum, &metadata_v)?; @@ -308,6 +296,25 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { Ok(()) } + fn append_commit_object(&mut self) -> Result<()> { + self.append( + ostree::ObjectType::Commit, + self.commit_checksum, + &self.commit_object.clone(), + )?; + if let Some(commitmeta) = self + .repo + .read_commit_detached_metadata(self.commit_checksum, gio::NONE_CANCELLABLE)? + { + self.append( + ostree::ObjectType::CommitMeta, + self.commit_checksum, + &commitmeta, + )?; + } + Ok(()) + } + fn append( &mut self, objtype: ostree::ObjectType, @@ -614,7 +621,6 @@ pub(crate) fn export_final_chunk( chunking: chunking::Chunking, out: &mut tar::Builder, ) -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; // For chunking, we default to format version 1 #[allow(clippy::needless_update)] let options = ExportOptions { @@ -624,16 +630,8 @@ pub(crate) fn export_final_chunk( let writer = &mut OstreeTarWriter::new(repo, commit_checksum, out, options)?; writer.write_repo_structure()?; - let (commit_v, _) = repo.load_commit(&chunking.commit)?; - let commit_v = &commit_v; - writer.append(ostree::ObjectType::Commit, &chunking.commit, commit_v)?; - if let Some(commitmeta) = repo.read_commit_detached_metadata(&chunking.commit, cancellable)? { - writer.append( - ostree::ObjectType::CommitMeta, - &chunking.commit, - &commitmeta, - )?; - } + // Write the commit + writer.append_commit_object()?; // In the chunked case, the final layer has all ostree metadata objects. for meta in &chunking.meta { From c672a8de9843360abcb8d5588274fb28ac7f342e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 12:57:19 -0400 Subject: [PATCH 385/775] chunking: Drop unnecessary cached commit digest The callers are already storing this, no need to do so anymore. --- lib/src/chunking.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index e2226bdbd..40a6eeccf 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -107,8 +107,6 @@ impl ObjectMetaSized { #[derive(Debug, Default)] pub struct Chunking { pub(crate) metadata_size: u64, - #[allow(dead_code)] - pub(crate) commit: Box, pub(crate) meta: Vec, pub(crate) remainder: Chunk, pub(crate) chunks: Vec, @@ -259,7 +257,6 @@ impl Chunking { generate_chunking_recurse(repo, &mut gen, &mut chunk, &contents_v)?; let chunking = Chunking { - commit: Box::from(rev.as_str()), metadata_size: gen.metadata_size, meta: gen.meta, remainder: chunk, From 61291627b9e9856984d6929a5192729dddd8360f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 14:26:04 -0400 Subject: [PATCH 386/775] ci: Allow manually invoking tests I want to be able to run them manually too to sanity check things. https://docs.github.com/en/actions/using-workflows/triggering-a-workflow#defining-inputs-for-manually-triggered-workflows --- .github/workflows/rust.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 06efa0fb4..336088d9e 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -11,6 +11,7 @@ on: branches: [main] pull_request: branches: [main] + workflow_dispatch: {} env: CARGO_TERM_COLOR: always From 290ebfa2d3e49f1cdf875485e5c03f2ceb93854d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 14:58:01 -0400 Subject: [PATCH 387/775] ocidir: Port to cap-std, drop openat Part of an ongoing effort. --- lib/Cargo.toml | 3 +- lib/src/container/encapsulate.rs | 7 +-- lib/src/container/ocidir.rs | 69 ++++++++++++++---------- lib/src/container/update_detachedmeta.rs | 8 +-- lib/src/integrationtest.rs | 7 +-- 5 files changed, 55 insertions(+), 39 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 1b6fa505c..56c5085b6 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -18,6 +18,7 @@ camino = "1.0.4" chrono = "0.4.19" cjson = "0.1.1" cap-std-ext = ">= 0.24, <= 0.25" +cap-tempfile = "0.24" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" futures-util = "0.3.13" @@ -28,8 +29,6 @@ io-lifetimes = "0.5" once_cell = "1.9" libc = "0.2.92" oci-spec = "0.5.4" -openat = "0.1.20" -openat-ext = "0.2.0" openssl = "0.10.33" ostree = { features = ["v2021_5", "cap-std-apis"], version = "0.14.0" } pin-project = "1.0" diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 254ac5186..6b0a9255c 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -7,6 +7,8 @@ use crate::chunking::{Chunking, ObjectMetaSized}; use crate::container::skopeo; use crate::tar as ostree_tar; use anyhow::{anyhow, Context, Result}; +use cap_std::fs::Dir; +use cap_std_ext::cap_std; use fn_error_context::context; use gio::glib; use oci_spec::image as oci_image; @@ -15,7 +17,6 @@ use std::borrow::Cow; use std::collections::{BTreeMap, HashMap}; use std::num::NonZeroU32; use std::path::Path; -use std::rc::Rc; use tracing::instrument; /// Annotation injected into the layer to say that this is an ostree commit. @@ -124,8 +125,8 @@ fn build_oci( ) -> Result { // Explicitly error if the target exists std::fs::create_dir(ocidir_path).context("Creating OCI dir")?; - let ocidir = Rc::new(openat::Dir::open(ocidir_path)?); - let mut writer = ocidir::OciDir::create(ocidir)?; + let ocidir = Dir::open_ambient_dir(ocidir_path, cap_std::ambient_authority())?; + let mut writer = ocidir::OciDir::create(&ocidir)?; let commit = repo.require_rev(rev)?; let commit = commit.as_str(); diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index 4f6ee862f..20260fd57 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -3,17 +3,19 @@ use anyhow::{anyhow, Context, Result}; use camino::Utf8Path; +use cap_std::fs::Dir; +use cap_std_ext::cap_std; +use cap_std_ext::dirext::CapStdExtDirExt; use flate2::write::GzEncoder; use fn_error_context::context; use oci_image::MediaType; use oci_spec::image::{self as oci_image, Descriptor}; -use openat_ext::*; use openssl::hash::{Hasher, MessageDigest}; use std::collections::HashMap; use std::fs::File; use std::io::{prelude::*, BufReader}; +use std::os::unix::fs::DirBuilderExt; use std::path::{Path, PathBuf}; -use std::rc::Rc; /// Path inside an OCI directory to the blobs const BLOBDIR: &str = "blobs/sha256"; @@ -53,7 +55,7 @@ impl Layer { /// Create an OCI blob. pub(crate) struct BlobWriter<'a> { pub(crate) hash: Hasher, - pub(crate) target: Option>, + pub(crate) target: Option>, size: u64, } @@ -65,13 +67,13 @@ pub(crate) struct RawLayerWriter<'a> { } pub(crate) struct OciDir { - pub(crate) dir: Rc, + pub(crate) dir: std::sync::Arc, } /// Write a serializable data (JSON) as an OCI blob #[context("Writing json blob")] pub(crate) fn write_json_blob( - ocidir: &openat::Dir, + ocidir: &Dir, v: &S, media_type: oci_image::MediaType, ) -> Result { @@ -111,29 +113,33 @@ pub(crate) fn new_empty_manifest() -> oci_image::ImageManifestBuilder { impl OciDir { /// Create a new, empty OCI directory at the target path, which should be empty. - pub(crate) fn create(dir: impl Into>) -> Result { - let dir = dir.into(); - dir.ensure_dir_all(BLOBDIR, 0o755)?; - dir.write_file_contents("oci-layout", 0o644, r#"{"imageLayoutVersion":"1.0.0"}"#)?; + pub(crate) fn create(dir: &Dir) -> Result { + let mut db = cap_std::fs::DirBuilder::new(); + db.recursive(true).mode(0o755); + dir.ensure_dir_with(BLOBDIR, &db)?; + dir.atomic_write("oci-layout", r#"{"imageLayoutVersion":"1.0.0"}"#)?; Self::open(dir) } /// Clone an OCI directory, using reflinks for blobs. - pub(crate) fn clone_to(&self, destdir: &openat::Dir, p: impl AsRef) -> Result { + pub(crate) fn clone_to(&self, destdir: &Dir, p: impl AsRef) -> Result { let p = p.as_ref(); - destdir.ensure_dir(p, 0o755)?; - let cloned = Self::create(destdir.sub_dir(p)?)?; - for blob in self.dir.list_dir(BLOBDIR)? { + destdir.create_dir(p)?; + let cloned = Self::create(&destdir.open_dir(p)?)?; + for blob in self.dir.read_dir(BLOBDIR)? { let blob = blob?; let path = Path::new(BLOBDIR).join(blob.file_name()); - self.dir.copy_file_at(&path, destdir, &path)?; + let mut src = self.dir.open(&path).map(BufReader::new)?; + self.dir + .atomic_replace_with(&path, |w| std::io::copy(&mut src, w))?; } Ok(cloned) } /// Open an existing OCI directory. - pub(crate) fn open(dir: impl Into>) -> Result { - Ok(Self { dir: dir.into() }) + pub(crate) fn open(dir: &Dir) -> Result { + let dir = std::sync::Arc::new(dir.try_clone()?); + Ok(Self { dir }) } /// Create a writer for a new blob (expected to be a tar stream) @@ -211,7 +217,10 @@ impl OciDir { pub(crate) fn read_blob(&self, desc: &oci_spec::image::Descriptor) -> Result { let path = Self::parse_descriptor_to_path(desc)?; - self.dir.open_file(&path).map_err(Into::into) + self.dir + .open(&path) + .map_err(Into::into) + .map(|f| f.into_std()) } /// Read a JSON blob. @@ -250,7 +259,7 @@ impl OciDir { .build() .unwrap(); self.dir - .write_file_with("index.json", 0o644, |w| -> Result<()> { + .atomic_replace_with("index.json", |w| -> Result<()> { cjson::to_writer(w, &index_data).map_err(|e| anyhow::anyhow!("{:?}", e))?; Ok(()) })?; @@ -268,7 +277,7 @@ impl OciDir { ) -> Result<(oci_image::ImageManifest, Descriptor)> { let f = self .dir - .open_file("index.json") + .open("index.json") .context("Failed to open index.json")?; let idx: oci_image::ImageIndex = serde_json::from_reader(BufReader::new(f))?; let desc = match idx.manifests().as_slice() { @@ -282,11 +291,11 @@ impl OciDir { impl<'a> BlobWriter<'a> { #[context("Creating blob writer")] - fn new(ocidir: &'a openat::Dir) -> Result { + fn new(ocidir: &'a Dir) -> Result { Ok(Self { hash: Hasher::new(MessageDigest::sha256())?, // FIXME add ability to choose filename after completion - target: Some(ocidir.new_file_writer(0o644)?), + target: Some(cap_tempfile::TempFile::new(ocidir)?), size: 0, }) } @@ -295,8 +304,9 @@ impl<'a> BlobWriter<'a> { /// Finish writing this blob object. pub(crate) fn complete(mut self) -> Result { let sha256 = hex::encode(self.hash.finish()?); - let target = &format!("{}/{}", BLOBDIR, sha256); - self.target.take().unwrap().complete(target)?; + let destname = &format!("{}/{}", BLOBDIR, sha256); + let target = self.target.take().unwrap(); + target.replace(destname)?; Ok(Blob { sha256, size: self.size, @@ -307,7 +317,11 @@ impl<'a> BlobWriter<'a> { impl<'a> std::io::Write for BlobWriter<'a> { fn write(&mut self, srcbuf: &[u8]) -> std::io::Result { self.hash.update(srcbuf)?; - self.target.as_mut().unwrap().writer.write_all(srcbuf)?; + self.target + .as_mut() + .unwrap() + .as_file_mut() + .write_all(srcbuf)?; self.size += srcbuf.len() as u64; Ok(srcbuf.len()) } @@ -319,7 +333,7 @@ impl<'a> std::io::Write for BlobWriter<'a> { impl<'a> RawLayerWriter<'a> { /// Create a writer for a gzip compressed layer blob. - fn new(ocidir: &'a openat::Dir, c: Option) -> Result { + fn new(ocidir: &'a Dir, c: Option) -> Result { let bw = BlobWriter::new(ocidir)?; Ok(Self { bw, @@ -400,9 +414,8 @@ mod tests { #[test] fn test_build() -> Result<()> { - let td = tempfile::tempdir()?; - let td = openat::Dir::open(td.path())?; - let w = OciDir::create(td)?; + let td = cap_tempfile::tempdir(cap_std::ambient_authority())?; + let w = OciDir::create(&td)?; let mut layerw = w.create_raw_layer(None)?; layerw.write_all(b"pretend this is a tarball")?; let root_layer = layerw.complete()?; diff --git a/lib/src/container/update_detachedmeta.rs b/lib/src/container/update_detachedmeta.rs index 35d8d6c45..cef2bdce9 100644 --- a/lib/src/container/update_detachedmeta.rs +++ b/lib/src/container/update_detachedmeta.rs @@ -3,8 +3,9 @@ use crate::container::{ocidir, skopeo}; use crate::container::{store as container_store, Transport}; use anyhow::{anyhow, Context, Result}; use camino::Utf8Path; +use cap_std::fs::Dir; +use cap_std_ext::cap_std; use std::io::{BufReader, BufWriter}; -use std::rc::Rc; /// Given an OSTree container image reference, update the detached metadata (e.g. GPG signature) /// while preserving all other container image metadata. @@ -37,8 +38,9 @@ pub async fn update_detached_metadata( // Fork a thread to do the heavy lifting of filtering the tar stream, rewriting the manifest/config. crate::tokio_util::spawn_blocking_cancellable_flatten(move |cancellable| { // Open the temporary OCI directory. - let tempsrc = Rc::new(openat::Dir::open(tempsrc_ref_path).context("Opening src")?); - let tempsrc = ocidir::OciDir::open(tempsrc)?; + let tempsrc = Dir::open_ambient_dir(tempsrc_ref_path, cap_std::ambient_authority()) + .context("Opening src")?; + let tempsrc = ocidir::OciDir::open(&tempsrc)?; // Load the manifest, platform, and config let (mut manifest, manifest_descriptor) = tempsrc diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 1cadca034..d1acfe614 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -5,6 +5,8 @@ use std::path::Path; use crate::container::ocidir; use anyhow::Result; use camino::Utf8Path; +use cap_std::fs::Dir; +use cap_std_ext::cap_std; use fn_error_context::context; use gio::prelude::*; use oci_spec::image as oci_image; @@ -27,10 +29,9 @@ pub(crate) fn detectenv() -> &'static str { /// Should only be enabled for testing. #[context("Generating derived oci")] pub fn generate_derived_oci(src: impl AsRef, dir: impl AsRef) -> Result<()> { - use std::rc::Rc; let src = src.as_ref(); - let src = Rc::new(openat::Dir::open(src.as_std_path())?); - let src = ocidir::OciDir::open(src)?; + let src = Dir::open_ambient_dir(src, cap_std::ambient_authority())?; + let src = ocidir::OciDir::open(&src)?; let dir = dir.as_ref(); let mut manifest = src.read_manifest()?; let mut config: oci_spec::image::ImageConfiguration = src.read_json_blob(manifest.config())?; From 95ba5e941b2b0436a451b89ae591f77c751431d2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 14:11:45 -0400 Subject: [PATCH 388/775] container/encapsulate: Pass down options farther Pass the `ExportOpts` farther down into processing; prep for adding more fields and using them there. Add a helper method that gives us the compression from the options, instead of open-coding it. --- lib/src/container/encapsulate.rs | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 70b5add02..a3858e576 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -9,6 +9,7 @@ use crate::tar as ostree_tar; use anyhow::{anyhow, Context, Result}; use cap_std::fs::Dir; use cap_std_ext::cap_std; +use flate2::Compression; use fn_error_context::context; use gio::glib; use oci_spec::image as oci_image; @@ -39,10 +40,10 @@ fn export_ostree_ref( repo: &ostree::Repo, rev: &str, writer: &mut OciDir, - compression: Option, + opts: &ExportOpts, ) -> Result { let commit = repo.require_rev(rev)?; - let mut w = writer.create_raw_layer(compression)?; + let mut w = writer.create_raw_layer(Some(opts.compression()))?; ostree_tar::export_commit(repo, commit.as_str(), &mut w, None)?; w.complete() } @@ -84,7 +85,7 @@ fn export_chunked( imgcfg: &mut oci_image::ImageConfiguration, labels: &mut HashMap, mut chunking: Chunking, - compression: Option, + opts: &ExportOpts, description: &str, ) -> Result<()> { let layers: Result> = chunking @@ -92,7 +93,7 @@ fn export_chunked( .into_iter() .enumerate() .map(|(i, chunk)| -> Result<_> { - let mut w = ociw.create_layer(compression)?; + let mut w = ociw.create_layer(Some(opts.compression()))?; ostree_tar::export_chunk(repo, commit, chunk.content, &mut w) .with_context(|| format!("Exporting chunk {i}"))?; let w = w.into_inner()?; @@ -102,7 +103,7 @@ fn export_chunked( for (layer, name) in layers? { ociw.push_layer(manifest, imgcfg, layer, &name); } - let mut w = ociw.create_layer(compression)?; + let mut w = ociw.create_layer(Some(opts.compression()))?; ostree_tar::export_final_chunk(repo, commit, chunking, &mut w)?; let w = w.into_inner()?; let final_layer = w.complete()?; @@ -167,12 +168,6 @@ fn build_oci( labels.insert(k.into(), v.into()); } - let compression = if opts.compress { - flate2::Compression::default() - } else { - flate2::Compression::none() - }; - let mut annos = HashMap::new(); annos.insert(BLOB_OSTREE_ANNOTATION.to_string(), "true".to_string()); let description = if commit_subject.is_empty() { @@ -190,11 +185,11 @@ fn build_oci( &mut imgcfg, labels, chunking, - Some(compression), + &opts, &description, )?; } else { - let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, Some(compression))?; + let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, &opts)?; labels.insert( crate::container::OSTREE_DIFFID_LABEL.into(), format!("sha256:{}", rootfs_blob.uncompressed_sha256), @@ -296,6 +291,17 @@ pub struct ExportOpts { pub max_layers: Option, } +impl ExportOpts { + /// Return the gzip compression level to use, as configured by the export options. + fn compression(&self) -> Compression { + if self.compress { + Compression::default() + } else { + Compression::none() + } + } +} + /// Given an OSTree repository and ref, generate a container image. /// /// The returned `ImageReference` will contain a digested (e.g. `@sha256:`) version of the destination. From 8db927499773d038b98f0acd076a59d6683e6b90 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 16:19:35 -0400 Subject: [PATCH 389/775] lib/tar: Add public constant `FORMAT_VERSIONS`, validate more Prep for expanding this; validate it upfront in more places. --- lib/src/cli.rs | 3 +++ lib/src/tar/export.rs | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 3dbefab1a..a3f5ca2d5 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -371,6 +371,9 @@ async fn tar_import(opts: &ImportOpts) -> Result<()> { /// Export a tar archive containing an ostree commit. fn tar_export(opts: &ExportOpts) -> Result<()> { + if !crate::tar::FORMAT_VERSIONS.contains(&opts.format_version) { + anyhow::bail!("Invalid format version: {}", opts.format_version); + } #[allow(clippy::needless_update)] let subopts = crate::tar::ExportOptions { format_version: opts.format_version, diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 19af5d354..0d26322dd 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -14,10 +14,14 @@ use std::borrow::Borrow; use std::borrow::Cow; use std::collections::HashSet; use std::io::BufReader; +use std::ops::RangeInclusive; /// The repository mode generated by a tar export stream. pub const BARE_SPLIT_XATTRS_MODE: &str = "bare-split-xattrs"; +/// The set of allowed format versions; ranges from zero to 1, inclusive. +pub const FORMAT_VERSIONS: RangeInclusive = 0..=1; + // This is both special in the tar stream *and* it's in the ostree commit. const SYSROOT: &str = "sysroot"; // This way the default ostree -> sysroot/ostree symlink works. @@ -140,6 +144,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { out: &'a mut tar::Builder, options: ExportOptions, ) -> Result { + anyhow::ensure!(FORMAT_VERSIONS.contains(&options.format_version)); let commit_object = repo.load_commit(commit_checksum)?.0; let r = Self { repo, @@ -558,7 +563,7 @@ fn impl_export( /// Configuration for tar export. #[derive(Debug, Default, PartialEq, Eq)] pub struct ExportOptions { - /// Format version; must be 0 or 1. + /// Format version; must be in [`FORMAT_VERSIONS`]. pub format_version: u32, } From 285ff38da2b38f81d9241884c81262f15d094e11 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 16:21:11 -0400 Subject: [PATCH 390/775] lib: Fix two warnings from `cargo doc` --- lib/src/commit.rs | 2 +- lib/src/container/store.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/src/commit.rs b/lib/src/commit.rs index 70aed4b22..3f3084291 100644 --- a/lib/src/commit.rs +++ b/lib/src/commit.rs @@ -1,6 +1,6 @@ //! This module contains the functions to implement the commit //! procedures as part of building an ostree container image. -//! https://github.com/ostreedev/ostree-rs-ext/issues/159 +//! use crate::container_utils::require_ostree_container; use anyhow::Context; diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index d561ed554..c4a1886fc 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -136,7 +136,7 @@ pub struct ImageImporter { layer_byte_progress: Option>>, } -/// Result of invoking [`LayeredImageImporter::prepare`]. +/// Result of invoking [`ImageImporter::prepare`]. #[derive(Debug)] pub enum PrepareResult { /// The image reference is already present; the contained string is the OSTree commit. From c3d0bd0ad2ebdc70b8ab3f6e6692d0cffb1ce345 Mon Sep 17 00:00:00 2001 From: Jonathan Lebon Date: Fri, 24 Jun 2022 16:21:16 -0400 Subject: [PATCH 391/775] lib: Bump tokio minimal version to 1.13.0 We use `Sender::send_replace` which was added in 1.13.0: https://github.com/tokio-rs/tokio/pull/3962 Closes: #316 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 56c5085b6..f092d33b0 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -39,7 +39,7 @@ structopt = "0.3.21" tar = "0.4.38" tempfile = "3.2.0" term_size = "0.3.2" -tokio = { features = ["full"], version = "1" } +tokio = { features = ["full"], version = ">= 1.13.0" } tokio-util = { features = ["io-util"], version = "0.6.9" } tokio-stream = { features = ["sync"], version = "0.1.8" } tracing = "0.1" From bec51bd51f9bbfa931746b05dc39abd1ce561edb Mon Sep 17 00:00:00 2001 From: Jonathan Lebon Date: Fri, 24 Jun 2022 16:28:40 -0400 Subject: [PATCH 392/775] lib: Bump cap-std-ext minimal version to 0.25 We use `atomic_write` which was added in 0.25: https://github.com/coreos/cap-std-ext/pull/12 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index f092d33b0..7a48f376b 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -17,7 +17,7 @@ bitflags = "1" camino = "1.0.4" chrono = "0.4.19" cjson = "0.1.1" -cap-std-ext = ">= 0.24, <= 0.25" +cap-std-ext = ">= 0.25" cap-tempfile = "0.24" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" From ed8f8e1e0e9f6746d644e241003e1c97641aa039 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 18:10:44 -0400 Subject: [PATCH 393/775] integrationtest: Properly thread `async` through to test helper In various places we're lazy about properly using async; e.g. some of the test fixtures do a ton of blocking I/O. None of that really matters honestly as long as the *core* library does things properly. This bit of code in the test case was very hackily re-entering the async context that we already have from `main`; but it only works on the multithreaded tokio and we're aiming to switch to single threaded. That was just lazy of me; it was actually not at all hard to just properly add a few `async fn` and `await`. --- lib/src/cli.rs | 6 +++--- lib/src/integrationtest.rs | 8 ++------ 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 3dbefab1a..0feee1fe4 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -653,14 +653,14 @@ fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { } #[cfg(feature = "internal-testing-api")] -fn testing(opts: &TestingOpts) -> Result<()> { +async fn testing(opts: &TestingOpts) -> Result<()> { match opts { TestingOpts::DetectEnv => { let s = crate::integrationtest::detectenv(); println!("{}", s); Ok(()) } - TestingOpts::CreateFixture => crate::integrationtest::create_fixture(), + TestingOpts::CreateFixture => crate::integrationtest::create_fixture().await, TestingOpts::Run => crate::integrationtest::run_tests(), TestingOpts::RunIMA => crate::integrationtest::test_ima(), TestingOpts::FilterTar => { @@ -797,6 +797,6 @@ where }, Opt::ImaSign(ref opts) => ima_sign(opts), #[cfg(feature = "internal-testing-api")] - Opt::InternalOnlyForTesting(ref opts) => testing(opts), + Opt::InternalOnlyForTesting(ref opts) => testing(opts).await, } } diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index d1acfe614..1cbc4d050 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -94,13 +94,9 @@ fn test_proxy_auth() -> Result<()> { /// Create a test fixture in the same way our unit tests does, and print /// the location of the temporary directory. Also export a chunked image. /// Useful for debugging things interactively. -pub(crate) fn create_fixture() -> Result<()> { +pub(crate) async fn create_fixture() -> Result<()> { let fixture = crate::fixture::Fixture::new_v1()?; - let imgref = tokio::task::block_in_place(|| { - tokio::runtime::Handle::current() - .block_on(fixture.export_container()) - .map(|v| v.0) - })?; + let imgref = fixture.export_container().await?.0; println!("Wrote: {:?}", imgref); let path = fixture.into_tempdir().into_path(); println!("Wrote: {:?}", path); From 191ffed55a647fec4d1ec7ec677cbcdead047f0c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 26 Jun 2022 15:32:45 -0400 Subject: [PATCH 394/775] container: Enable compression by default I'd noticed that some of our images were large but hadn't looked closely. I now really wish I had... I think this regressed quite a while ago during a refactoring. --- lib/src/container/encapsulate.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index a3858e576..0a39b17a1 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -237,7 +237,7 @@ async fn build_impl( ) -> Result { let mut opts = opts.unwrap_or_default(); if dest.transport == Transport::ContainerStorage { - opts.compress = false; + opts.skip_compression = false; } let digest = if dest.transport == Transport::OciDir { let _copied: ImageReference = build_oci( @@ -283,8 +283,8 @@ async fn build_impl( /// Options controlling commit export into OCI #[derive(Debug, Default)] pub struct ExportOpts { - /// If true, perform gzip compression of the tar layers. - pub compress: bool, + /// If false, do not perform gzip compression of the tar layers. + pub skip_compression: bool, /// A set of commit metadata keys to copy as image labels. pub copy_meta_keys: Vec, /// Maximum number of layers to use @@ -294,10 +294,10 @@ pub struct ExportOpts { impl ExportOpts { /// Return the gzip compression level to use, as configured by the export options. fn compression(&self) -> Compression { - if self.compress { - Compression::default() - } else { + if self.skip_compression { Compression::none() + } else { + Compression::default() } } } From ba97ffe10c9236e20ca61b7604c6228a8a6acd8d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 27 Jun 2022 10:54:50 -0400 Subject: [PATCH 395/775] tests: Deduplicate tar structure validation code Most of the expected entries are common between v0 and v1. Prep for some further code refactoring. --- lib/tests/it/main.rs | 49 ++++++++++++++++++-------------------------- 1 file changed, 20 insertions(+), 29 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 807a611f9..7cbe9a3f3 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -215,7 +215,7 @@ struct TarExpected { mode: u32, } -impl Into for &(&'static str, tar::EntryType, u32) { +impl Into for (&'static str, tar::EntryType, u32) { fn into(self) -> TarExpected { TarExpected { path: self.0, @@ -294,11 +294,7 @@ fn test_tar_export_structure() -> Result<()> { let next = entries.next().unwrap().unwrap(); assert_eq!(next.path().unwrap().as_os_str(), "sysroot"); - // Validate format version 0 - let expected = [ - ("sysroot/config", Regular, 0o644), - ("sysroot/ostree/repo", Directory, 0o755), - ("sysroot/ostree/repo/extensions", Directory, 0o755), + let common_structure = [ ("sysroot/ostree/repo/objects/00", Directory, 0o755), ("sysroot/ostree/repo/objects/23", Directory, 0o755), ("sysroot/ostree/repo/objects/77", Directory, 0o755), @@ -312,15 +308,21 @@ fn test_tar_export_structure() -> Result<()> { ("sysroot/ostree/repo/state", Directory, 0o755), ("sysroot/ostree/repo/tmp", Directory, 0o755), ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), + ] + .into_iter(); + + // Validate format version 0 + let expected = [ + ("sysroot/config", Regular, 0o644), + ("sysroot/ostree/repo", Directory, 0o755), + ("sysroot/ostree/repo/extensions", Directory, 0o755)] + .into_iter().chain(common_structure.clone()) + .chain([ ("sysroot/ostree/repo/xattrs", Directory, 0o755), ("sysroot/ostree/repo/xattrs/d67db507c5a6e7bfd078f0f3ded0a5669479a902e812931fc65c6f5e01831ef5", Regular, 0o644), ("usr", Directory, 0o755), - ]; - validate_tar_expected( - fixture.format_version, - entries, - expected.iter().map(Into::into), - )?; + ]).into_iter(); + validate_tar_expected(fixture.format_version, entries, expected.map(Into::into))?; // Validate format version 1 fixture.format_version = 1; @@ -330,26 +332,15 @@ fn test_tar_export_structure() -> Result<()> { let expected = [ ("sysroot/ostree/repo", Directory, 0o755), ("sysroot/ostree/repo/config", Regular, 0o644), - ("sysroot/ostree/repo/extensions", Directory, 0o755), - ("sysroot/ostree/repo/objects/00", Directory, 0o755), - ("sysroot/ostree/repo/objects/23", Directory, 0o755), - ("sysroot/ostree/repo/objects/77", Directory, 0o755), - ("sysroot/ostree/repo/objects/bc", Directory, 0o755), - ("sysroot/ostree/repo/objects/ff", Directory, 0o755), - ("sysroot/ostree/repo/refs", Directory, 0o755), - ("sysroot/ostree/repo/refs", Directory, 0o755), - ("sysroot/ostree/repo/refs/heads", Directory, 0o755), - ("sysroot/ostree/repo/refs/mirrors", Directory, 0o755), - ("sysroot/ostree/repo/refs/remotes", Directory, 0o755), - ("sysroot/ostree/repo/state", Directory, 0o755), - ("sysroot/ostree/repo/tmp", Directory, 0o755), - ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), - ("usr", Directory, 0o755), - ]; + ] + .into_iter() + .chain(common_structure.clone()) + .chain([("usr", Directory, 0o755)].into_iter()) + .into_iter(); validate_tar_expected( fixture.format_version, src_tar.entries()?, - expected.iter().map(Into::into), + expected.map(Into::into), )?; Ok(()) From 41807ad39d4441c4042828cf24e3eb49353a921a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 27 Jun 2022 11:19:31 -0400 Subject: [PATCH 396/775] tests: Expose `ocidir`, but only when testing I want to do some more unit-test-style validation of our exported container images. Now, we could have the tests directly use the [imageproxy][imageproxy] but it's very async-oriented which is heavy for the tests. [imageproxy]: https://github.com/containers/containers-image-proxy-rs --- lib/src/container/mod.rs | 6 ++- lib/src/container/ocidir.rs | 104 +++++++++++++++++++++++------------- lib/src/lib.rs | 2 + 3 files changed, 73 insertions(+), 39 deletions(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 040719c91..5d5ae59e9 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -238,8 +238,10 @@ pub use unencapsulate::*; // But that isn't turned on for other crates that use this, and correctly gating all // of it is a little tedious. So let's just use the big hammer for now to // quiet the dead code warnings. -#[allow(dead_code)] -pub(crate) mod ocidir; +#[cfg(feature = "internal-testing-api")] +pub mod ocidir; +#[cfg(not(feature = "internal-testing-api"))] +mod ocidir; mod skopeo; pub mod store; mod update_detachedmeta; diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index 20260fd57..f450ce70b 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -1,5 +1,8 @@ //! Internal API to interact with Open Container Images; mostly //! oriented towards generating images. +//! +//! NOTE: Everything in here is `pub`, but that's only used to +//! expose this API when we're running our own tests. use anyhow::{anyhow, Context, Result}; use camino::Utf8Path; @@ -12,6 +15,7 @@ use oci_image::MediaType; use oci_spec::image::{self as oci_image, Descriptor}; use openssl::hash::{Hasher, MessageDigest}; use std::collections::HashMap; +use std::fmt::Debug; use std::fs::File; use std::io::{prelude::*, BufReader}; use std::os::unix::fs::DirBuilderExt; @@ -22,17 +26,21 @@ const BLOBDIR: &str = "blobs/sha256"; /// Completed blob metadata #[derive(Debug)] -pub(crate) struct Blob { - pub(crate) sha256: String, - pub(crate) size: u64, +pub struct Blob { + /// SHA-256 digest + pub sha256: String, + /// Size + pub size: u64, } impl Blob { - pub(crate) fn digest_id(&self) -> String { + /// The OCI standard checksum-type:checksum + pub fn digest_id(&self) -> String { format!("sha256:{}", self.sha256) } - pub(crate) fn descriptor(&self) -> oci_image::DescriptorBuilder { + /// Descriptor + pub fn descriptor(&self) -> oci_image::DescriptorBuilder { oci_image::DescriptorBuilder::default() .digest(self.digest_id()) .size(self.size as i64) @@ -41,38 +49,64 @@ impl Blob { /// Completed layer metadata #[derive(Debug)] -pub(crate) struct Layer { - pub(crate) blob: Blob, - pub(crate) uncompressed_sha256: String, +pub struct Layer { + /// The underlying blob (usually compressed) + pub blob: Blob, + /// The uncompressed digest, which will be used for "diffid"s + pub uncompressed_sha256: String, } impl Layer { - pub(crate) fn descriptor(&self) -> oci_image::DescriptorBuilder { + /// Return the descriptor for this layer + pub fn descriptor(&self) -> oci_image::DescriptorBuilder { self.blob.descriptor() } } /// Create an OCI blob. -pub(crate) struct BlobWriter<'a> { - pub(crate) hash: Hasher, - pub(crate) target: Option>, +pub struct BlobWriter<'a> { + /// Compute checksum + pub hash: Hasher, + /// Target file + pub target: Option>, size: u64, } +impl<'a> Debug for BlobWriter<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("BlobWriter") + .field("target", &self.target) + .field("size", &self.size) + .finish() + } +} + /// Create an OCI layer (also a blob). -pub(crate) struct RawLayerWriter<'a> { +pub struct RawLayerWriter<'a> { bw: BlobWriter<'a>, uncompressed_hash: Hasher, compressor: GzEncoder>, } -pub(crate) struct OciDir { - pub(crate) dir: std::sync::Arc, +impl<'a> Debug for RawLayerWriter<'a> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RawLayerWriter") + .field("bw", &self.bw) + .field("compressor", &self.compressor) + .finish() + } +} + +#[derive(Debug)] +/// An opened OCI directory. +pub struct OciDir { + /// The underlying directory. + pub dir: std::sync::Arc, } /// Write a serializable data (JSON) as an OCI blob #[context("Writing json blob")] -pub(crate) fn write_json_blob( +pub fn write_json_blob( ocidir: &Dir, v: &S, media_type: oci_image::MediaType, @@ -104,7 +138,7 @@ fn empty_config_descriptor() -> oci_image::Descriptor { } /// Generate a "valid" empty manifest. See above. -pub(crate) fn new_empty_manifest() -> oci_image::ImageManifestBuilder { +pub fn new_empty_manifest() -> oci_image::ImageManifestBuilder { oci_image::ImageManifestBuilder::default() .schema_version(oci_image::SCHEMA_VERSION) .config(empty_config_descriptor()) @@ -113,7 +147,7 @@ pub(crate) fn new_empty_manifest() -> oci_image::ImageManifestBuilder { impl OciDir { /// Create a new, empty OCI directory at the target path, which should be empty. - pub(crate) fn create(dir: &Dir) -> Result { + pub fn create(dir: &Dir) -> Result { let mut db = cap_std::fs::DirBuilder::new(); db.recursive(true).mode(0o755); dir.ensure_dir_with(BLOBDIR, &db)?; @@ -122,7 +156,7 @@ impl OciDir { } /// Clone an OCI directory, using reflinks for blobs. - pub(crate) fn clone_to(&self, destdir: &Dir, p: impl AsRef) -> Result { + pub fn clone_to(&self, destdir: &Dir, p: impl AsRef) -> Result { let p = p.as_ref(); destdir.create_dir(p)?; let cloned = Self::create(&destdir.open_dir(p)?)?; @@ -137,21 +171,18 @@ impl OciDir { } /// Open an existing OCI directory. - pub(crate) fn open(dir: &Dir) -> Result { + pub fn open(dir: &Dir) -> Result { let dir = std::sync::Arc::new(dir.try_clone()?); Ok(Self { dir }) } /// Create a writer for a new blob (expected to be a tar stream) - pub(crate) fn create_raw_layer( - &self, - c: Option, - ) -> Result { + pub fn create_raw_layer(&self, c: Option) -> Result { RawLayerWriter::new(&self.dir, c) } /// Create a tar output stream, backed by a blob - pub(crate) fn create_layer( + pub fn create_layer( &self, c: Option, ) -> Result> { @@ -160,7 +191,7 @@ impl OciDir { /// Add a layer to the top of the image stack. The firsh pushed layer becomes the root. - pub(crate) fn push_layer( + pub fn push_layer( &self, manifest: &mut oci_image::ImageManifest, config: &mut oci_image::ImageConfiguration, @@ -174,7 +205,7 @@ impl OciDir { /// Add a layer to the top of the image stack with optional annotations. /// /// This is otherwise equivalent to [`Self::push_layer`]. - pub(crate) fn push_layer_annotated( + pub fn push_layer_annotated( &self, manifest: &mut oci_image::ImageManifest, config: &mut oci_image::ImageConfiguration, @@ -215,7 +246,8 @@ impl OciDir { Ok(Path::new(BLOBDIR).join(hash)) } - pub(crate) fn read_blob(&self, desc: &oci_spec::image::Descriptor) -> Result { + /// Open a blob + pub fn read_blob(&self, desc: &oci_spec::image::Descriptor) -> Result { let path = Self::parse_descriptor_to_path(desc)?; self.dir .open(&path) @@ -224,7 +256,7 @@ impl OciDir { } /// Read a JSON blob. - pub(crate) fn read_json_blob( + pub fn read_json_blob( &self, desc: &oci_spec::image::Descriptor, ) -> Result { @@ -233,7 +265,7 @@ impl OciDir { } /// Write a configuration blob. - pub(crate) fn write_config( + pub fn write_config( &self, config: oci_image::ImageConfiguration, ) -> Result { @@ -243,7 +275,7 @@ impl OciDir { } /// Write a manifest as a blob, and replace the index with a reference to it. - pub(crate) fn write_manifest( + pub fn write_manifest( &self, manifest: oci_image::ImageManifest, platform: oci_image::Platform, @@ -267,14 +299,12 @@ impl OciDir { } /// If this OCI directory has a single manifest, return it. Otherwise, an error is returned. - pub(crate) fn read_manifest(&self) -> Result { + pub fn read_manifest(&self) -> Result { self.read_manifest_and_descriptor().map(|r| r.0) } /// If this OCI directory has a single manifest, return it. Otherwise, an error is returned. - pub(crate) fn read_manifest_and_descriptor( - &self, - ) -> Result<(oci_image::ImageManifest, Descriptor)> { + pub fn read_manifest_and_descriptor(&self) -> Result<(oci_image::ImageManifest, Descriptor)> { let f = self .dir .open("index.json") @@ -302,7 +332,7 @@ impl<'a> BlobWriter<'a> { #[context("Completing blob")] /// Finish writing this blob object. - pub(crate) fn complete(mut self) -> Result { + pub fn complete(mut self) -> Result { let sha256 = hex::encode(self.hash.finish()?); let destname = &format!("{}/{}", BLOBDIR, sha256); let target = self.target.take().unwrap(); @@ -344,7 +374,7 @@ impl<'a> RawLayerWriter<'a> { #[context("Completing layer")] /// Consume this writer, flushing buffered data and put the blob in place. - pub(crate) fn complete(mut self) -> Result { + pub fn complete(mut self) -> Result { self.compressor.get_mut().clear(); let buf = self.compressor.finish()?; self.bw.write_all(&buf)?; diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 23a41ac95..e7323c920 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -52,3 +52,5 @@ pub mod prelude { pub mod fixture; #[cfg(feature = "internal-testing-api")] pub mod integrationtest; +#[cfg(feature = "internal-testing-api")] +pub use container::ocidir; From 0da48a6c8ab3c2a6f9a931115432bcfbfeb5d778 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 27 Jun 2022 13:23:29 -0400 Subject: [PATCH 397/775] tar/export: Drop a leftover `dbg!` Oops. --- lib/src/tar/export.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 0d26322dd..8dba09415 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -699,7 +699,6 @@ pub(crate) fn reinject_detached_metadata>( let next_ent_path: &Utf8Path = (&*next_ent_path).try_into()?; let objtype = crate::tar::import::Importer::parse_metadata_entry(next_ent_path)?.1; if objtype != ostree::ObjectType::CommitMeta { - dbg!(objtype); crate::tar::write::copy_entry(next_ent, dest, None)?; } From 1eaa016cb27591092b41749c36da4568536e93d4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 27 Jun 2022 13:28:28 -0400 Subject: [PATCH 398/775] ci: Add a custom lint check for leftover `dbg!` It's tempting to add this to clippy. --- .github/workflows/rust.yml | 2 ++ ci/lints.sh | 9 +++++++++ 2 files changed, 11 insertions(+) create mode 100755 ci/lints.sh diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 336088d9e..926d4eb6b 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -26,6 +26,8 @@ jobs: container: quay.io/coreos-assembler/fcos-buildroot:testing-devel steps: - uses: actions/checkout@v2 + - name: Code lints + run: ./ci/lints.sh - name: Install deps run: ./ci/installdeps.sh # xref containers/containers-image-proxy-rs diff --git a/ci/lints.sh b/ci/lints.sh new file mode 100755 index 000000000..4a07f6693 --- /dev/null +++ b/ci/lints.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +set -xeuo pipefail +tmpf=$(mktemp) +git grep 'dbg!' '*.rs' > ${tmpf} || true +if test -s ${tmpf}; then + echo "Found dbg!" 1>&2 + cat "${tmpf}" + exit 1 +fi \ No newline at end of file From f1becff3dc56e4737263bdeb68bd2fd2aaff8d17 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 16:51:39 -0400 Subject: [PATCH 399/775] Use single-threaded tokio by default I saw https://www.reddit.com/r/rust/comments/v8e9fa/local_async_executors_and_why_they_should_be_the/ go by and I think it's a good argument why one should use single-threaded tokio by default. There's more to do around potentially using https://docs.rs/tokio/latest/tokio/task/struct.LocalSet.html etc. --- cli/Cargo.toml | 2 +- cli/src/main.rs | 2 +- lib/Cargo.toml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index b80bacd02..be624c8a0 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -13,7 +13,7 @@ ostree-ext = { path = "../lib" } clap = "2.33.3" structopt = "0.3.21" libc = "0.2.92" -tokio = { version = "1", features = ["full"] } +tokio = { version = "1", features = ["io-std", "macros"] } log = "0.4.0" tracing = "0.1" tracing-subscriber = "0.2.17" diff --git a/cli/src/main.rs b/cli/src/main.rs index f80554811..5f3e9e034 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -10,7 +10,7 @@ async fn run() -> Result<()> { ostree_ext::cli::run_from_iter(std::env::args_os()).await } -#[tokio::main] +#[tokio::main(flavor = "current_thread")] async fn main() { if let Err(e) = run().await { eprintln!("error: {:#}", e); diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 7a48f376b..f21db5164 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -10,7 +10,7 @@ version = "0.8.0" [dependencies] anyhow = "1.0" -containers-image-proxy = "0.5.0" +containers-image-proxy = "0.5.1" async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" @@ -39,7 +39,7 @@ structopt = "0.3.21" tar = "0.4.38" tempfile = "3.2.0" term_size = "0.3.2" -tokio = { features = ["full"], version = ">= 1.13.0" } +tokio = { features = ["time", "process", "rt", "net"], version = ">= 1.13.0" } tokio-util = { features = ["io-util"], version = "0.6.9" } tokio-stream = { features = ["sync"], version = "0.1.8" } tracing = "0.1" From f5d772156b6497400292e92e3c7e10119ebc0a5d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 27 Jun 2022 12:57:55 -0400 Subject: [PATCH 400/775] tests: Validate structure of chunked image ostree layer Chunked images are format v1 tar; let's add some unit-test style code that validates the tar structure there in the same way we verify direct tar exports. Prep for fixing https://github.com/ostreedev/ostree-rs-ext/issues/309 --- lib/tests/it/main.rs | 109 +++++++++++++++++++++++++++++-------------- 1 file changed, 73 insertions(+), 36 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 7cbe9a3f3..11c38bbfa 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -8,6 +8,7 @@ use ostree_ext::container::store; use ostree_ext::container::{ Config, ExportOpts, ImageReference, OstreeImageReference, SignatureSource, Transport, }; +use ostree_ext::ocidir; use ostree_ext::prelude::FileExt; use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; @@ -275,6 +276,42 @@ fn validate_tar_expected( Ok(()) } +fn common_tar_structure() -> impl Iterator { + use tar::EntryType::Directory; + [ + ("sysroot/ostree/repo/objects/00", Directory, 0o755), + ("sysroot/ostree/repo/objects/23", Directory, 0o755), + ("sysroot/ostree/repo/objects/77", Directory, 0o755), + ("sysroot/ostree/repo/objects/bc", Directory, 0o755), + ("sysroot/ostree/repo/objects/ff", Directory, 0o755), + ("sysroot/ostree/repo/refs", Directory, 0o755), + ("sysroot/ostree/repo/refs", Directory, 0o755), + ("sysroot/ostree/repo/refs/heads", Directory, 0o755), + ("sysroot/ostree/repo/refs/mirrors", Directory, 0o755), + ("sysroot/ostree/repo/refs/remotes", Directory, 0o755), + ("sysroot/ostree/repo/state", Directory, 0o755), + ("sysroot/ostree/repo/tmp", Directory, 0o755), + ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), + ] + .into_iter() + .map(Into::into) +} + +fn validate_tar_v1(mut src: tar::Archive) -> Result<()> { + use tar::EntryType::{Directory, Regular}; + let prelude = [ + ("sysroot/ostree/repo", Directory, 0o755), + ("sysroot/ostree/repo/config", Regular, 0o644), + ] + .into_iter() + .map(Into::into); + + let expected = prelude.chain(common_tar_structure()); + validate_tar_expected(1, src.entries()?, expected)?; + + Ok(()) +} + /// Validate basic structure of the tar export. #[test] fn test_tar_export_structure() -> Result<()> { @@ -294,54 +331,32 @@ fn test_tar_export_structure() -> Result<()> { let next = entries.next().unwrap().unwrap(); assert_eq!(next.path().unwrap().as_os_str(), "sysroot"); - let common_structure = [ - ("sysroot/ostree/repo/objects/00", Directory, 0o755), - ("sysroot/ostree/repo/objects/23", Directory, 0o755), - ("sysroot/ostree/repo/objects/77", Directory, 0o755), - ("sysroot/ostree/repo/objects/bc", Directory, 0o755), - ("sysroot/ostree/repo/objects/ff", Directory, 0o755), - ("sysroot/ostree/repo/refs", Directory, 0o755), - ("sysroot/ostree/repo/refs", Directory, 0o755), - ("sysroot/ostree/repo/refs/heads", Directory, 0o755), - ("sysroot/ostree/repo/refs/mirrors", Directory, 0o755), - ("sysroot/ostree/repo/refs/remotes", Directory, 0o755), - ("sysroot/ostree/repo/state", Directory, 0o755), - ("sysroot/ostree/repo/tmp", Directory, 0o755), - ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), + let v0_prelude = [ + ("sysroot/config", Regular, 0o644), + ("sysroot/ostree/repo", Directory, 0o755), + ("sysroot/ostree/repo/extensions", Directory, 0o755), ] - .into_iter(); + .into_iter() + .map(Into::into); // Validate format version 0 - let expected = [ - ("sysroot/config", Regular, 0o644), - ("sysroot/ostree/repo", Directory, 0o755), - ("sysroot/ostree/repo/extensions", Directory, 0o755)] - .into_iter().chain(common_structure.clone()) + let expected = v0_prelude.chain(common_tar_structure()) .chain([ ("sysroot/ostree/repo/xattrs", Directory, 0o755), ("sysroot/ostree/repo/xattrs/d67db507c5a6e7bfd078f0f3ded0a5669479a902e812931fc65c6f5e01831ef5", Regular, 0o644), ("usr", Directory, 0o755), - ]).into_iter(); + ].into_iter().map(Into::into)); validate_tar_expected(fixture.format_version, entries, expected.map(Into::into))?; // Validate format version 1 fixture.format_version = 1; let src_tar = fixture.export_tar()?; - let src_tar = std::io::BufReader::new(fixture.dir.open(src_tar)?); - let mut src_tar = tar::Archive::new(src_tar); - let expected = [ - ("sysroot/ostree/repo", Directory, 0o755), - ("sysroot/ostree/repo/config", Regular, 0o644), - ] - .into_iter() - .chain(common_structure.clone()) - .chain([("usr", Directory, 0o755)].into_iter()) - .into_iter(); - validate_tar_expected( - fixture.format_version, - src_tar.entries()?, - expected.map(Into::into), - )?; + let src_tar = fixture + .dir + .open(src_tar) + .map(BufReader::new) + .map(tar::Archive::new)?; + validate_tar_v1(src_tar).unwrap(); Ok(()) } @@ -600,6 +615,20 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { Ok(()) } +/// Parse a chunked container image and validate its structure; particularly +fn validate_chunked_structure(oci_path: &Utf8Path) -> Result<()> { + let d = Dir::open_ambient_dir(oci_path, cap_std::ambient_authority())?; + let d = ocidir::OciDir::open(&d)?; + let manifest = d.read_manifest()?; + let ostree_layer = manifest.layers().last().unwrap(); + let ostree_layer_blob = d + .read_blob(ostree_layer) + .map(BufReader::new) + .map(flate2::read::GzDecoder::new) + .map(tar::Archive::new)?; + validate_tar_v1(ostree_layer_blob) +} + #[tokio::test] async fn impl_test_container_chunked() -> Result<()> { let nlayers = 6u32; @@ -610,6 +639,14 @@ async fn impl_test_container_chunked() -> Result<()> { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref: imgref, }; + // Validate the structure of the image + match &imgref.imgref { + ImageReference { + transport: Transport::OciDir, + name, + } => validate_chunked_structure(Utf8Path::new(name)).unwrap(), + _ => unreachable!(), + }; let mut imp = store::ImageImporter::new(fixture.destrepo(), &imgref, Default::default()).await?; From b2265f3bf1df39c3223312c49579159010063229 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 27 Jun 2022 15:20:09 -0400 Subject: [PATCH 401/775] container: Do continue to skip compression for `containers-storage:` Fixes previous commit. --- lib/src/container/encapsulate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 0a39b17a1..a02f125f8 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -237,7 +237,7 @@ async fn build_impl( ) -> Result { let mut opts = opts.unwrap_or_default(); if dest.transport == Transport::ContainerStorage { - opts.skip_compression = false; + opts.skip_compression = true; } let digest = if dest.transport == Transport::OciDir { let _copied: ImageReference = build_oci( From a123581d869d397920ea1e4ca057821a5bfafc6f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 27 Jun 2022 15:20:46 -0400 Subject: [PATCH 402/775] container/encapsulate: Fix `skip_compression` docstring Remove an accidental double negative. --- lib/src/container/encapsulate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index a02f125f8..76a8f1f08 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -283,7 +283,7 @@ async fn build_impl( /// Options controlling commit export into OCI #[derive(Debug, Default)] pub struct ExportOpts { - /// If false, do not perform gzip compression of the tar layers. + /// If true, do not perform gzip compression of the tar layers. pub skip_compression: bool, /// A set of commit metadata keys to copy as image labels. pub copy_meta_keys: Vec, From b6b998bfad950d1562c43574f1568d68370a5051 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 27 Jun 2022 13:04:28 -0400 Subject: [PATCH 403/775] chunking: Include all directories in ostree layer This is prep for fixing https://github.com/ostreedev/ostree-rs-ext/issues/309 but it will not fix it on its own; to do that we need to introduce a new chunked format. That is forthcoming. In the default tar export, we walk a commit from the "root". When exporting chunked images (what will soon be called "chunkedv0") we instead export content objects, then a final layer which contains the ostree metadata (commit and dirtree/dirmeta). But in that "chunkedv0" flow, nothing creates the *tar* equivalent of directories. This has a number of problems; primarily it will implicitly omit any directories which do not have content in them (for example `/tmp`). We also won't have the proper ownership/directories reflected in the tar stream. In this commit, change how we generate the "ostree layer" which contains the ostree metadata to walk from the ostree-commit as a root; except we just don't emit any content objects in that flow to start, assuming they were emitted earlier. (It is common for chunkedv0 images to have content objects, they are just emitted as a second phase) Note that we no longer need to gather all metadata objects when processing chunking, because we'll end up walking everything in the export phase instead. --- lib/src/chunking.rs | 27 --------------------------- lib/src/tar/export.rs | 39 ++++++++++++++++++--------------------- lib/tests/it/main.rs | 5 ++++- 3 files changed, 22 insertions(+), 49 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 40a6eeccf..0cb1aa457 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -32,28 +32,6 @@ pub(crate) struct Chunk { pub(crate) size: u64, } -#[derive(Debug)] -pub(crate) enum Meta { - DirTree(RcStr), - DirMeta(RcStr), -} - -impl Meta { - pub(crate) fn objtype(&self) -> ostree::ObjectType { - match self { - Meta::DirTree(_) => ostree::ObjectType::DirTree, - Meta::DirMeta(_) => ostree::ObjectType::DirMeta, - } - } - - pub(crate) fn checksum(&self) -> &str { - match self { - Meta::DirTree(v) => v, - Meta::DirMeta(v) => v, - } - } -} - #[derive(Debug, Deserialize, Serialize)] /// Object metadata, but with additional size data pub struct ObjectSourceMetaSized { @@ -107,7 +85,6 @@ impl ObjectMetaSized { #[derive(Debug, Default)] pub struct Chunking { pub(crate) metadata_size: u64, - pub(crate) meta: Vec, pub(crate) remainder: Chunk, pub(crate) chunks: Vec, @@ -124,7 +101,6 @@ pub struct Chunking { struct Generation { path: Utf8PathBuf, metadata_size: u64, - meta: Vec, dirtree_found: BTreeSet, dirmeta_found: BTreeSet, } @@ -137,7 +113,6 @@ fn push_dirmeta(repo: &ostree::Repo, gen: &mut Generation, checksum: &str) -> Re gen.dirmeta_found.insert(RcStr::clone(&checksum)); let child_v = repo.load_variant(ostree::ObjectType::DirMeta, checksum.borrow())?; gen.metadata_size += child_v.data_as_bytes().as_ref().len() as u64; - gen.meta.push(Meta::DirMeta(checksum)); Ok(()) } @@ -152,7 +127,6 @@ fn push_dirtree( let child_v = repo.load_variant(ostree::ObjectType::DirTree, checksum)?; let checksum = RcStr::from(checksum); gen.dirtree_found.insert(RcStr::clone(&checksum)); - gen.meta.push(Meta::DirTree(checksum)); gen.metadata_size += child_v.data_as_bytes().as_ref().len() as u64; Ok(Some(child_v)) } @@ -258,7 +232,6 @@ impl Chunking { let chunking = Chunking { metadata_size: gen.metadata_size, - meta: gen.meta, remainder: chunk, ..Default::default() }; diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 8dba09415..01b26edf2 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -70,6 +70,8 @@ struct OstreeTarWriter<'a, W: std::io::Write> { out: &'a mut tar::Builder, options: ExportOptions, wrote_initdirs: bool, + /// True if we're only writing directories + structure_only: bool, wrote_dirtree: HashSet, wrote_dirmeta: HashSet, wrote_content: HashSet, @@ -153,6 +155,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { out, options, wrote_initdirs: false, + structure_only: false, wrote_dirmeta: HashSet::new(), wrote_dirtree: HashSet::new(), wrote_content: HashSet::new(), @@ -508,14 +511,16 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { c.set_error_if_cancelled()?; } - for file in files { - let (name, csum) = file.to_tuple(); - let name = name.to_str(); - let checksum = &hex::encode(csum); - let (objpath, h) = self.append_content(checksum)?; - let subpath = &dirpath.join(name); - let subpath = map_path(subpath); - self.append_content_hardlink(&objpath, h, &*subpath)?; + if !self.structure_only { + for file in files { + let (name, csum) = file.to_tuple(); + let name = name.to_str(); + let checksum = &hex::encode(csum); + let (objpath, h) = self.append_content(checksum)?; + let subpath = &dirpath.join(name); + let subpath = map_path(subpath); + self.append_content_hardlink(&objpath, h, &*subpath)?; + } } for item in dirs { @@ -633,19 +638,11 @@ pub(crate) fn export_final_chunk( ..Default::default() }; let writer = &mut OstreeTarWriter::new(repo, commit_checksum, out, options)?; - writer.write_repo_structure()?; - - // Write the commit - writer.append_commit_object()?; - - // In the chunked case, the final layer has all ostree metadata objects. - for meta in &chunking.meta { - let objtype = meta.objtype(); - let checksum = meta.checksum(); - let v = repo.load_variant(objtype, checksum)?; - writer.append(objtype, checksum, &v)?; - } - + // For the final chunk, output the commit object, plus all ostree metadata objects along with + // the containing directories. + writer.structure_only = true; + writer.write_commit()?; + writer.structure_only = false; write_chunk(writer, chunking.remainder.content) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 11c38bbfa..3a501b498 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -306,7 +306,10 @@ fn validate_tar_v1(mut src: tar::Archive) -> Result<()> { .into_iter() .map(Into::into); - let expected = prelude.chain(common_tar_structure()); + let content = [("usr", Directory, 0o755), ("boot", Directory, 0o755)]; + let content = content.into_iter().map(Into::into); + + let expected = prelude.chain(common_tar_structure()).chain(content); validate_tar_expected(1, src.entries()?, expected)?; Ok(()) From 871969b029189aa8652c2b145c64ff7dd1fe646a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 23 Jun 2022 16:48:20 -0400 Subject: [PATCH 404/775] Port to cap-std 0.25, ostree 0.15, sh-inline 0.3 Our semver train is fun; updating cap-std for us involves a semver bump for ostree because it exports public types from that. We also have our utility crate `cap-std-ext` in the mix that needs bumping. And we have `sh-inline` which depends on all this. I had to bump semver for all those at once. I'm now regretting `sh-inline`'s existence, or at least its dependency on cap-std. --- lib/Cargo.toml | 10 +++++----- lib/src/fixture.rs | 2 +- lib/src/ima.rs | 12 +++++------- lib/src/tar/export.rs | 2 -- 4 files changed, 11 insertions(+), 15 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 7a48f376b..b1a9d5133 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -17,20 +17,20 @@ bitflags = "1" camino = "1.0.4" chrono = "0.4.19" cjson = "0.1.1" -cap-std-ext = ">= 0.25" -cap-tempfile = "0.24" +cap-std-ext = "0.26" +cap-tempfile = "0.25" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" futures-util = "0.3.13" gvariant = "0.4.0" hex = "0.4.3" indicatif = "0.16.0" -io-lifetimes = "0.5" +io-lifetimes = "0.7" once_cell = "1.9" libc = "0.2.92" oci-spec = "0.5.4" openssl = "0.10.33" -ostree = { features = ["v2021_5", "cap-std-apis"], version = "0.14.0" } +ostree = { features = ["v2021_5", "cap-std-apis"], version = "0.15.0" } pin-project = "1.0" regex = "1.5.4" serde = { features = ["derive"], version = "1.0.125" } @@ -45,7 +45,7 @@ tokio-stream = { features = ["sync"], version = "0.1.8" } tracing = "0.1" indoc = { version = "1.0.3", optional = true } -sh-inline = { version = "0.2.2", features = ["cap-std-ext"], optional = true } +sh-inline = { version = "0.3", features = ["cap-std-ext"], optional = true } [dev-dependencies] quickcheck = "1" diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 3d9267965..139a6a760 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -369,7 +369,7 @@ impl Fixture { srcdir_dfd.create_dir("gpghome")?; let gpghome = srcdir_dfd.open_dir("gpghome")?; let st = std::process::Command::new("tar") - .cwd_dir_owned(gpghome) + .cwd_dir(gpghome) .stdin(Stdio::from(gpgtar)) .args(&["-azxf", "-"]) .status()?; diff --git a/lib/src/ima.rs b/lib/src/ima.rs index a2399e10e..9425482a5 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -13,13 +13,11 @@ use glib::Cast; use glib::Variant; use gvariant::aligned_bytes::TryAsAligned; use gvariant::{gv, Marker, Structure}; -use io_lifetimes::AsFilelike; use ostree::gio; use std::collections::{BTreeMap, HashMap}; use std::ffi::CString; use std::fs::File; use std::io::Seek; -use std::ops::DerefMut; use std::os::unix::io::AsRawFd; use std::process::{Command, Stdio}; @@ -120,10 +118,11 @@ impl<'a> CommitRewriter<'a> { let mut tempf = tempfile::NamedTempFile::new_in(self.tempdir.path())?; // If we're operating on a bare repo, we can clone the file (copy_file_range) directly. if let Ok(instream) = instream.clone().downcast::() { + use io_lifetimes::AsFilelike; // View the fd as a File - let instream_fd = unsafe { BorrowedFd::borrow_raw_fd(instream.as_raw_fd()) }; - let instream_fd = &mut instream_fd.as_filelike_view::(); - std::io::copy(instream_fd.deref_mut(), tempf.as_file_mut())?; + let instream_fd = unsafe { BorrowedFd::borrow_raw(instream.as_raw_fd()) }; + let instream_fd = instream_fd.as_filelike_view::(); + std::io::copy(&mut (&*instream_fd), tempf.as_file_mut())?; } else { // If we're operating on an archive repo, then we need to uncompress // and recompress... @@ -163,8 +162,7 @@ impl<'a> CommitRewriter<'a> { } else { return Ok(None); }; - let meta = meta.unwrap(); - let mut xattrs = xattrs_to_map(&xattrs.unwrap()); + let mut xattrs = xattrs_to_map(&xattrs); let existing_sig = xattrs.remove(IMA_XATTR.as_bytes()); if existing_sig.is_some() && !self.ima.overwrite { return Ok(None); diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 8dba09415..484f34d58 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -403,8 +403,6 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let path = object_path(ostree::ObjectType::File, checksum); let (instream, meta, xattrs) = self.repo.load_file(checksum, gio::NONE_CANCELLABLE)?; - let meta = meta.ok_or_else(|| anyhow!("Missing metadata for object {}", checksum))?; - let xattrs = xattrs.ok_or_else(|| anyhow!("Missing xattrs for object {}", checksum))?; let mut h = tar::Header::new_gnu(); h.set_uid(meta.attribute_uint32("unix::uid") as u64); From d75e55477030ea720d8d1da9e70670ed684ceaf4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 24 Jun 2022 19:02:07 -0400 Subject: [PATCH 405/775] container: Introduce an `ExportLayout::ChunkedV1` Closes: https://github.com/ostreedev/ostree-rs-ext/issues/309 I discovered that (what are now called) "chunkedv0" images were missing some directories like `/tmp`. This is because the way chunking was implemented was...basically broken. In chunking v0 we have: - content objects - content objects - ... - ostree layer (commit, ostree metadata) - Any derived layers But...we really want to "mirror" in the tar stream in a proper fashion everything that's in the ostree layer. Particularly, in order to ensure e.g. correct permissions/ownership, the directory entries must come first. And logically, it makes sense to have the layer bearing the ostree commit and the ostree metadata come first. So the new "ChunkedV1" format is: - ostree layer (commit, ostree metadata, all directories_ - content objects - content objects - ... - Any derived layers The ChunkedV1 format can be identified by a new image label: `const OSTREE_FINAL_LAYER_LABEL: &str = "ostree.final-diffid";` this label points to the last content object layer. Implementation wise, this is mostly adding conditionals in various places. I'm perhaps being very conservative here in *also* continuing to support 'chunkedv0 images. Right now they're not deployed widely (AFAIK), just shipping Fedora Rawhide this way. But on the plus side, we'll be able to kill off all the old format code when this stuff is all stable. --- ci/priv-integration.sh | 26 +-- lib/src/container/encapsulate.rs | 198 +++++++++++++++++------ lib/src/container/mod.rs | 4 +- lib/src/container/store.rs | 127 +++++++++++---- lib/src/container/update_detachedmeta.rs | 27 +++- lib/src/fixture.rs | 19 ++- lib/src/integrationtest.rs | 8 +- lib/src/tar/export.rs | 4 +- lib/tests/it/main.rs | 72 +++++++-- 9 files changed, 362 insertions(+), 123 deletions(-) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index aa55ef30a..1fa75d1f9 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -9,6 +9,7 @@ sysroot=/run/host image=quay.io/coreos-assembler/fcos:testing-devel # My hand-uploaded chunked images chunked_image=quay.io/cgwalters/fcos-chunked:latest +chunked_image_v1=quay.io/cgwalters/fcos-chunked:v1 imgref=ostree-unverified-registry:${image} stateroot=testos @@ -24,16 +25,19 @@ fi ostree-ext-cli container image deploy --sysroot "${sysroot}" \ --stateroot "${stateroot}" --imgref "${imgref}" ostree admin --sysroot="${sysroot}" status -ostree-ext-cli container image deploy --sysroot "${sysroot}" \ - --stateroot "${stateroot}" --imgref ostree-unverified-registry:"${chunked_image}" -ostree admin --sysroot="${sysroot}" status -ostree-ext-cli container image remove --repo "${sysroot}/ostree/repo" registry:"${image}" registry:"${chunked_image}" -ostree admin --sysroot="${sysroot}" undeploy 0 -ostree --repo="${sysroot}/ostree/repo" refs > refs.txt -if test "$(wc -l < refs.txt)" -ne 0; then - echo "found refs" - cat refs.txt - exit 1 -fi +ostree-ext-cli container image remove --repo "${sysroot}/ostree/repo" registry:"${image}" +for img in "${chunked_image}" "${chunked_image_v1}"; do + ostree-ext-cli container image deploy --sysroot "${sysroot}" \ + --stateroot "${stateroot}" --imgref ostree-unverified-registry:"${img}" + ostree admin --sysroot="${sysroot}" status + ostree-ext-cli container image remove --repo "${sysroot}/ostree/repo" registry:"${img}" + ostree admin --sysroot="${sysroot}" undeploy 0 + ostree --repo="${sysroot}/ostree/repo" refs > refs.txt + if test "$(wc -l < refs.txt)" -ne 0; then + echo "found refs" + cat refs.txt + exit 1 + fi +done echo ok privileged integration diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 76a8f1f08..80b461438 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -1,9 +1,9 @@ //! APIs for creating container images from OSTree commits -use super::ocidir::OciDir; +use super::ocidir::{Layer, OciDir}; use super::{ocidir, OstreeImageReference, Transport}; use super::{ImageReference, SignatureSource, OSTREE_COMMIT_LABEL}; -use crate::chunking::{Chunking, ObjectMetaSized}; +use crate::chunking::{Chunk, Chunking, ObjectMetaSized}; use crate::container::skopeo; use crate::tar as ostree_tar; use anyhow::{anyhow, Context, Result}; @@ -20,6 +20,23 @@ use std::num::NonZeroU32; use std::path::Path; use tracing::instrument; +/// Type of container image generated +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum ExportLayout { + /// The original, very simplistic "export into single tarball" + SingleLayer, + /// The first attempt at chunked images, which has some bugs + ChunkedV0, + /// The second and hopefully final chunked image format + ChunkedV1, +} + +impl Default for ExportLayout { + fn default() -> Self { + Self::SingleLayer + } +} + /// Annotation injected into the layer to say that this is an ostree commit. /// However, because this gets lost when converted to D2S2 https://docs.docker.com/registry/spec/manifest-v2-2/ /// schema, it's not actually useful today. But, we keep it @@ -74,6 +91,26 @@ fn commit_meta_to_labels<'a>( Ok(()) } +fn export_chunks( + repo: &ostree::Repo, + commit: &str, + ociw: &mut OciDir, + chunks: Vec, + opts: &ExportOpts, +) -> Result> { + chunks + .into_iter() + .enumerate() + .map(|(i, chunk)| -> Result<_> { + let mut w = ociw.create_layer(Some(opts.compression()))?; + ostree_tar::export_chunk(repo, commit, chunk.content, &mut w) + .with_context(|| format!("Exporting chunk {i}"))?; + let w = w.into_inner()?; + Ok((w.complete()?, chunk.name)) + }) + .collect() +} + /// Write an ostree commit to an OCI blob #[context("Writing ostree root to blob")] #[allow(clippy::too_many_arguments)] @@ -88,31 +125,59 @@ fn export_chunked( opts: &ExportOpts, description: &str, ) -> Result<()> { - let layers: Result> = chunking - .take_chunks() - .into_iter() - .enumerate() - .map(|(i, chunk)| -> Result<_> { - let mut w = ociw.create_layer(Some(opts.compression()))?; - ostree_tar::export_chunk(repo, commit, chunk.content, &mut w) - .with_context(|| format!("Exporting chunk {i}"))?; + let layers = export_chunks(repo, commit, ociw, chunking.take_chunks(), opts)?; + let compression = Some(opts.compression()); + + match opts.format { + ExportLayout::SingleLayer => unreachable!(), + ExportLayout::ChunkedV0 => { + // In ChunkedV0, the component/content chunks come first. + for (layer, name) in layers { + ociw.push_layer(manifest, imgcfg, layer, name.as_str()); + } + // Then, export the final layer + let mut w = ociw.create_layer(compression)?; + ostree_tar::export_final_chunk(repo, commit, chunking.remainder, &mut w)?; let w = w.into_inner()?; - Ok((w.complete()?, chunk.name)) - }) - .collect(); - for (layer, name) in layers? { - ociw.push_layer(manifest, imgcfg, layer, &name); + let final_layer = w.complete()?; + labels.insert( + crate::container::OSTREE_DIFFID_LABEL.into(), + format!("sha256:{}", final_layer.uncompressed_sha256), + ); + ociw.push_layer(manifest, imgcfg, final_layer, description); + Ok(()) + } + ExportLayout::ChunkedV1 => { + // In ChunkedV1, the ostree layer comes first + let mut w = ociw.create_layer(compression)?; + ostree_tar::export_final_chunk(repo, commit, chunking.remainder, &mut w)?; + let w = w.into_inner()?; + let ostree_layer = w.complete()?; + + // Then, we have a label that points to the last chunk. + // Note in the pathological case of a single layer chunked v1 image, this could be the ostree layer. + let last_digest = layers + .last() + .map(|v| &v.0) + .unwrap_or(&ostree_layer) + .uncompressed_sha256 + .clone(); + + // Add the ostree layer + ociw.push_layer(manifest, imgcfg, ostree_layer, description); + // Add the component/content layers + for (layer, name) in layers { + ociw.push_layer(manifest, imgcfg, layer, name.as_str()); + } + // This label (mentioned above) points to the last layer that is part of + // the ostree commit. + labels.insert( + crate::container::OSTREE_FINAL_LAYER_LABEL.into(), + format!("sha256:{}", last_digest), + ); + Ok(()) + } } - let mut w = ociw.create_layer(Some(opts.compression()))?; - ostree_tar::export_final_chunk(repo, commit, chunking, &mut w)?; - let w = w.into_inner()?; - let final_layer = w.complete()?; - labels.insert( - crate::container::OSTREE_DIFFID_LABEL.into(), - format!("sha256:{}", final_layer.uncompressed_sha256), - ); - ociw.push_layer(manifest, imgcfg, final_layer, description); - Ok(()) } /// Generate an OCI image from a given ostree root @@ -176,31 +241,48 @@ fn build_oci( Cow::Borrowed(commit_subject) }; - if let Some(chunking) = chunking { - export_chunked( - repo, - commit, - &mut writer, - &mut manifest, - &mut imgcfg, - labels, - chunking, - &opts, - &description, - )?; - } else { - let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, &opts)?; - labels.insert( - crate::container::OSTREE_DIFFID_LABEL.into(), - format!("sha256:{}", rootfs_blob.uncompressed_sha256), - ); - writer.push_layer_annotated( - &mut manifest, - &mut imgcfg, - rootfs_blob, - Some(annos), - &description, - ); + match (&opts.format, chunking) { + (ExportLayout::SingleLayer, Some(_)) => { + anyhow::bail!("Chunking cannot be used with (legacy) single layer images") + } + (ExportLayout::ChunkedV0 | ExportLayout::ChunkedV1, None) => { + anyhow::bail!("Chunked layout requires object ownership metadata") + } + (ExportLayout::SingleLayer, None) => { + let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, &opts)?; + // In the legacy single layer case, insert both the diffid and final + // layer labels, becuase they mean the same thing. + let label_index_keys = [ + crate::container::OSTREE_DIFFID_LABEL, + crate::container::OSTREE_FINAL_LAYER_LABEL, + ]; + for v in label_index_keys { + labels.insert( + v.into(), + format!("sha256:{}", rootfs_blob.uncompressed_sha256), + ); + } + writer.push_layer_annotated( + &mut manifest, + &mut imgcfg, + rootfs_blob, + Some(annos), + &description, + ); + } + (ExportLayout::ChunkedV0 | ExportLayout::ChunkedV1, Some(chunking)) => { + export_chunked( + repo, + commit, + &mut writer, + &mut manifest, + &mut imgcfg, + labels, + chunking, + &opts, + &description, + )?; + } } // Lookup the cmd embedded in commit metadata @@ -235,7 +317,19 @@ async fn build_impl( contentmeta: Option, dest: &ImageReference, ) -> Result { - let mut opts = opts.unwrap_or_default(); + let mut opts = opts.unwrap_or_else(|| { + // For backwards compatibility, if content meta is specified + // but no options, assume v0 chunked. + let format = if contentmeta.is_some() { + ExportLayout::ChunkedV0 + } else { + ExportLayout::default() + }; + ExportOpts { + format, + ..Default::default() + } + }); if dest.transport == Transport::ContainerStorage { opts.skip_compression = true; } @@ -289,6 +383,8 @@ pub struct ExportOpts { pub copy_meta_keys: Vec, /// Maximum number of layers to use pub max_layers: Option, + /// The container image layout + pub format: ExportLayout, } impl ExportOpts { diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 5d5ae59e9..a1f661073 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -31,8 +31,10 @@ use std::ops::Deref; /// The label injected into a container image that contains the ostree commit SHA-256. pub const OSTREE_COMMIT_LABEL: &str = "ostree.commit"; -/// The label/annotation which contains the sha256 of the final commit. +/// The label/annotation which contains the sha256 of the final commit in chunked v1 format. const OSTREE_DIFFID_LABEL: &str = "ostree.diffid"; +/// The label/annotation which contains the sha256 of the final layer in chunked v2 format. +const OSTREE_FINAL_LAYER_LABEL: &str = "ostree.final-diffid"; /// Our generic catchall fatal error, expected to be converted /// to a string to output to a terminal or logs. diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index c4a1886fc..3488be281 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -10,6 +10,7 @@ use crate::refescape; use anyhow::{anyhow, Context}; use containers_image_proxy::{ImageProxy, OpenedImage}; use fn_error_context::context; +use futures_util::TryFutureExt; use oci_spec::image::{self as oci_image, Descriptor, History, ImageConfiguration, ImageManifest}; use ostree::prelude::{Cast, ToVariant}; use ostree::{gio, glib}; @@ -170,6 +171,9 @@ impl ManifestLayerState { /// Information about which layers need to be downloaded. #[derive(Debug)] pub struct PreparedImport { + /// The format we found from metadata + #[allow(dead_code)] + pub(crate) export_layout: ExportLayout, /// The manifest digest that was found pub manifest_digest: String, /// The deserialized manifest. @@ -295,21 +299,88 @@ fn layer_from_diffid<'a>( }) } -pub(crate) fn ostree_layer<'a>( +pub(crate) fn parse_manifest_layout<'a>( manifest: &'a ImageManifest, config: &ImageConfiguration, -) -> Result<&'a Descriptor> { - let label = crate::container::OSTREE_DIFFID_LABEL; +) -> Result<( + ExportLayout, + &'a Descriptor, + Vec<&'a Descriptor>, + Vec<&'a Descriptor>, +)> { let config_labels = config.config().as_ref().and_then(|c| c.labels().as_ref()); - let diffid = config_labels.and_then(|labels| labels.get(label)); - // For backwards compatibility, if there's only 1 layer, don't require the label. + + let first_layer = manifest + .layers() + .get(0) + .ok_or_else(|| anyhow!("No layers in manifest"))?; + let info = config_labels.and_then(|labels| { + labels + .get(OSTREE_FINAL_LAYER_LABEL) + .map(|v| (ExportLayout::ChunkedV1, v)) + .or_else(|| { + labels + .get(OSTREE_DIFFID_LABEL) + .map(|v| (ExportLayout::ChunkedV0, v)) + }) + }); + + // Look for the format v1 label + if let Some((layout, target_diffid)) = info { + let target_layer = layer_from_diffid(manifest, config, target_diffid.as_str())?; + let mut chunk_layers = Vec::new(); + let mut derived_layers = Vec::new(); + let mut after_target = false; + // Gather the ostree layer + let ostree_layer = match layout { + ExportLayout::SingleLayer | ExportLayout::ChunkedV0 => target_layer, + ExportLayout::ChunkedV1 => first_layer, + }; + // Now, we need to handle the split differently in chunked v1 vs v0 + match layout { + ExportLayout::SingleLayer | ExportLayout::ChunkedV0 => { + for layer in manifest.layers() { + if layer == target_layer { + if after_target { + anyhow::bail!("Multiple entries for {}", layer.digest()); + } + after_target = true; + } else if !after_target { + chunk_layers.push(layer); + } else { + derived_layers.push(layer); + } + } + } + ExportLayout::ChunkedV1 => { + for layer in manifest.layers() { + if layer == target_layer { + if after_target { + anyhow::bail!("Multiple entries for {}", layer.digest()); + } + after_target = true; + if layer != ostree_layer { + chunk_layers.push(layer); + } + } else if !after_target { + if layer != ostree_layer { + chunk_layers.push(layer); + } + } else { + derived_layers.push(layer); + } + } + } + } + + let r = (layout, ostree_layer, chunk_layers, derived_layers); + return Ok(r); + } + + // For backwards compatibility, if there's only 1 layer, don't require labels. // This can be dropped when we drop format version 0 support. - let r = if let Some(diffid) = diffid { - layer_from_diffid(manifest, config, diffid.as_str())? - } else { - &manifest.layers()[0] - }; - Ok(r) + let rest = manifest.layers().iter().skip(1).collect(); + Ok((ExportLayout::SingleLayer, first_layer, Vec::new(), rest)) } impl ImageImporter { @@ -404,29 +475,22 @@ impl ImageImporter { let config = self.proxy.fetch_config(&self.proxy_img).await?; - let commit_layer_digest = ostree_layer(&manifest, &config)?.digest(); + let (export_layout, commit_layer, component_layers, remaining_layers) = + parse_manifest_layout(&manifest, &config)?; - let mut component_layers = Vec::new(); - let mut commit_layer = None; - let mut remaining_layers = Vec::new(); let query = |l: &Descriptor| query_layer(&self.repo, l.clone()); - for layer in manifest.layers() { - if layer.digest() == commit_layer_digest { - commit_layer = Some(query(layer)?); - } else if commit_layer.is_none() { - component_layers.push(query(layer)?); - } else { - remaining_layers.push(query(layer)?); - } - } - let commit_layer = commit_layer.ok_or_else(|| { - anyhow!( - "Image does not contain ostree-exported layer {}", - commit_layer_digest - ) - })?; + let commit_layer = query(commit_layer)?; + let component_layers = component_layers + .into_iter() + .map(query) + .collect::>>()?; + let remaining_layers = remaining_layers + .into_iter() + .map(query) + .collect::>>()?; let imp = PreparedImport { + export_layout, manifest, manifest_digest, config, @@ -493,7 +557,8 @@ impl ImageImporter { }; txn.commit(Some(cancellable))?; Ok::<_, anyhow::Error>(commit) - }); + }) + .map_err(|e| e.context(format!("Layer {}", layer.digest()))); let commit = super::unencapsulate::join_fetch(import_task, driver).await?; layer.commit = commit; if let Some(p) = self.layer_progress.as_ref() { diff --git a/lib/src/container/update_detachedmeta.rs b/lib/src/container/update_detachedmeta.rs index cef2bdce9..e7a9e258c 100644 --- a/lib/src/container/update_detachedmeta.rs +++ b/lib/src/container/update_detachedmeta.rs @@ -1,5 +1,5 @@ use super::ImageReference; -use crate::container::{ocidir, skopeo}; +use crate::container::{ocidir, skopeo, ExportLayout}; use crate::container::{store as container_store, Transport}; use anyhow::{anyhow, Context, Result}; use camino::Utf8Path; @@ -61,7 +61,8 @@ pub async fn update_detached_metadata( .ok_or_else(|| anyhow!("Image is missing container configuration"))?; // Find the OSTree commit layer we want to replace - let commit_layer = container_store::ostree_layer(&manifest, &config)?; + let (export_layout, commit_layer, _, _) = + container_store::parse_manifest_layout(&manifest, &config)?; let commit_layer_idx = manifest .layers() .iter() @@ -102,10 +103,24 @@ pub async fn update_detached_metadata( config.rootfs_mut().diff_ids_mut()[commit_layer_idx] = out_layer_diffid.clone(); let labels = ctrcfg.labels_mut().get_or_insert_with(Default::default); - labels.insert( - crate::container::OSTREE_DIFFID_LABEL.into(), - out_layer_diffid, - ); + match export_layout { + ExportLayout::SingleLayer | ExportLayout::ChunkedV0 => { + labels.insert( + crate::container::OSTREE_DIFFID_LABEL.into(), + out_layer_diffid, + ); + } + ExportLayout::ChunkedV1 => { + // Nothing to do except in the special case where there's somehow only one + // chunked layer. + if manifest.layers().len() == 1 { + labels.insert( + crate::container::OSTREE_FINAL_LAYER_LABEL.into(), + out_layer_diffid, + ); + } + } + } config.set_config(Some(ctrcfg)); // Write the config and manifest diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 139a6a760..4cd648467 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -3,7 +3,7 @@ #![allow(missing_docs)] use crate::chunking::ObjectMetaSized; -use crate::container::{Config, ExportOpts, ImageReference, Transport}; +use crate::container::{Config, ExportLayout, ExportOpts, ImageReference, Transport}; use crate::objectsource::{ObjectMeta, ObjectSourceMeta}; use crate::prelude::*; use crate::{gio, glib}; @@ -606,8 +606,16 @@ impl Fixture { /// Export the current ref as a container image. /// This defaults to using chunking. #[context("Exporting container")] - pub async fn export_container(&self) -> Result<(ImageReference, String)> { - let container_path = &self.path.join("oci"); + pub async fn export_container( + &self, + export_format: ExportLayout, + ) -> Result<(ImageReference, String)> { + let name = match export_format { + ExportLayout::SingleLayer => "oci-single", + ExportLayout::ChunkedV0 => "oci-chunked-v0", + ExportLayout::ChunkedV1 => "oci-chunked-v1", + }; + let container_path = &self.path.join(name); if container_path.exists() { std::fs::remove_dir_all(container_path)?; } @@ -627,7 +635,10 @@ impl Fixture { let contentmeta = self.get_object_meta().context("Computing object meta")?; let contentmeta = ObjectMetaSized::compute_sizes(self.srcrepo(), contentmeta) .context("Computing sizes")?; - let opts = ExportOpts::default(); + let opts = ExportOpts { + format: export_format, + ..Default::default() + }; let digest = crate::container::encapsulate( self.srcrepo(), self.testref(), diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 1cbc4d050..b6cada131 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -2,7 +2,7 @@ use std::path::Path; -use crate::container::ocidir; +use crate::container::{ocidir, ExportLayout}; use anyhow::Result; use camino::Utf8Path; use cap_std::fs::Dir; @@ -96,8 +96,10 @@ fn test_proxy_auth() -> Result<()> { /// Useful for debugging things interactively. pub(crate) async fn create_fixture() -> Result<()> { let fixture = crate::fixture::Fixture::new_v1()?; - let imgref = fixture.export_container().await?.0; - println!("Wrote: {:?}", imgref); + for format in [ExportLayout::ChunkedV0, ExportLayout::ChunkedV1] { + let imgref = fixture.export_container(format).await?.0; + println!("Wrote: {:?}", imgref); + } let path = fixture.into_tempdir().into_path(); println!("Wrote: {:?}", path); Ok(()) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 157ff4045..f7180d98e 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -626,7 +626,7 @@ pub(crate) fn export_chunk( pub(crate) fn export_final_chunk( repo: &ostree::Repo, commit_checksum: &str, - chunking: chunking::Chunking, + remainder: chunking::Chunk, out: &mut tar::Builder, ) -> Result<()> { // For chunking, we default to format version 1 @@ -641,7 +641,7 @@ pub(crate) fn export_final_chunk( writer.structure_only = true; writer.write_commit()?; writer.structure_only = false; - write_chunk(writer, chunking.remainder.content) + write_chunk(writer, remainder.content) } /// Process an exported tar stream, and update the detached metadata. diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 3a501b498..98df9796b 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -4,7 +4,7 @@ use cap_std::fs::{Dir, DirBuilder}; use once_cell::sync::Lazy; use ostree::cap_std; use ostree_ext::chunking::ObjectMetaSized; -use ostree_ext::container::store; +use ostree_ext::container::{store, ExportLayout}; use ostree_ext::container::{ Config, ExportOpts, ImageReference, OstreeImageReference, SignatureSource, Transport, }; @@ -459,7 +459,7 @@ fn skopeo_inspect_config(imgref: &str) -> Result Result<()> { +async fn impl_test_container_import_export(export_format: ExportLayout) -> Result<()> { let fixture = Fixture::new_v1()?; let testrev = fixture .srcrepo() @@ -481,6 +481,10 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { ..Default::default() }; // If chunking is requested, compute object ownership and size mappings + let chunked = matches!( + export_format, + ExportLayout::ChunkedV0 | ExportLayout::ChunkedV1 + ); let contentmeta = chunked .then(|| { let meta = fixture.get_object_meta().context("Computing object meta")?; @@ -489,6 +493,7 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { .transpose()?; let opts = ExportOpts { copy_meta_keys: vec!["buildsys.checksum".to_string()], + format: export_format, ..Default::default() }; let digest = ostree_ext::container::encapsulate( @@ -619,11 +624,15 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { } /// Parse a chunked container image and validate its structure; particularly -fn validate_chunked_structure(oci_path: &Utf8Path) -> Result<()> { +fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Result<()> { let d = Dir::open_ambient_dir(oci_path, cap_std::ambient_authority())?; let d = ocidir::OciDir::open(&d)?; let manifest = d.read_manifest()?; - let ostree_layer = manifest.layers().last().unwrap(); + let ostree_layer = match format { + ExportLayout::SingleLayer | ExportLayout::ChunkedV0 => manifest.layers().last(), + ExportLayout::ChunkedV1 => manifest.layers().first(), + } + .unwrap(); let ostree_layer_blob = d .read_blob(ostree_layer) .map(BufReader::new) @@ -633,11 +642,20 @@ fn validate_chunked_structure(oci_path: &Utf8Path) -> Result<()> { } #[tokio::test] -async fn impl_test_container_chunked() -> Result<()> { +async fn test_container_chunked_v0() -> Result<()> { + impl_test_container_chunked(ExportLayout::ChunkedV0).await +} + +#[tokio::test] +async fn test_container_chunked_v1() -> Result<()> { + impl_test_container_chunked(ExportLayout::ChunkedV1).await +} + +async fn impl_test_container_chunked(format: ExportLayout) -> Result<()> { let nlayers = 6u32; let mut fixture = Fixture::new_v1()?; - let (imgref, expected_digest) = fixture.export_container().await.unwrap(); + let (imgref, expected_digest) = fixture.export_container(format).await.unwrap(); let imgref = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref: imgref, @@ -647,7 +665,7 @@ async fn impl_test_container_chunked() -> Result<()> { ImageReference { transport: Transport::OciDir, name, - } => validate_chunked_structure(Utf8Path::new(name)).unwrap(), + } => validate_chunked_structure(Utf8Path::new(name), format).unwrap(), _ => unreachable!(), }; @@ -676,7 +694,7 @@ r usr/bin/bash bash-v0 .update(FileDef::iter_from(ADDITIONS), std::iter::empty()) .context("Failed to update")?; - let expected_digest = fixture.export_container().await.unwrap().1; + let expected_digest = fixture.export_container(format).await.unwrap().1; assert_ne!(digest, expected_digest); let mut imp = @@ -691,10 +709,22 @@ r usr/bin/bash bash-v0 assert!(prep.ostree_commit_layer.commit.is_none()); assert_eq!(prep.ostree_layers.len(), nlayers as usize); let (first, second) = (to_fetch[0], to_fetch[1]); - assert_eq!(first.1, "bash"); assert!(first.0.commit.is_none()); - assert!(second.1.starts_with("ostree export of commit")); assert!(second.0.commit.is_none()); + match format { + ExportLayout::SingleLayer | ExportLayout::ChunkedV0 => { + assert_eq!(first.1, "bash"); + assert!( + second.1.starts_with("ostree export of commit"), + "{}", + second.1 + ); + } + ExportLayout::ChunkedV1 => { + assert_eq!(first.1, "testlink"); + assert_eq!(second.1, "bash"); + } + } let _import = imp.import(prep).await.unwrap(); @@ -790,10 +820,24 @@ async fn oci_clone(src: impl AsRef, dest: impl AsRef) -> Res } #[tokio::test] -async fn test_container_import_export() -> Result<()> { - impl_test_container_import_export(false).await.unwrap(); - impl_test_container_import_export(true).await.unwrap(); - Ok(()) +async fn test_container_import_export_single_layer() { + impl_test_container_import_export(ExportLayout::SingleLayer) + .await + .unwrap() +} + +#[tokio::test] +async fn test_container_import_export_chunked_v0() { + impl_test_container_import_export(ExportLayout::ChunkedV0) + .await + .unwrap() +} + +#[tokio::test] +async fn test_container_import_export_chunked_v1() { + impl_test_container_import_export(ExportLayout::ChunkedV1) + .await + .unwrap() } /// But layers work via the container::write module. From e92408c3f31b1fe3dcadfa89b7ed37ee916d4a6b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 28 Jun 2022 10:19:33 -0400 Subject: [PATCH 406/775] container: Condense `ExportLayout` to just V0 and V1 I had a galaxy brain moment earlier today when looking at this PR; a lot of the matching was doing `SingleLayer | ChunkedV1`. Everything just gets simpler if we consider the "single layer" a special case of a "chunked" image with a single chunk - which we already support! In the export code, if no content mapping is provided we create a default chunking. (This can and should be optimized later to avoid traversing all objects up front; we can special case this) Another way to look at this is that the change in the export side from the tar stream perspective is that we now have: - all directories and ostree metadata - all content objects in hash ordering And here's a key bit: The old (<= ostree-ext 0.7) tar parser will still parse this just fine - it doesn't currently care about object ordering. And the same is true for container runtimes. --- lib/src/container/encapsulate.rs | 121 +++++++---------------- lib/src/container/mod.rs | 4 - lib/src/container/store.rs | 23 ++--- lib/src/container/update_detachedmeta.rs | 14 +-- lib/src/fixture.rs | 5 +- lib/src/integrationtest.rs | 2 +- lib/tests/it/main.rs | 44 ++++----- 7 files changed, 77 insertions(+), 136 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 80b461438..49205f8b9 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -23,17 +23,25 @@ use tracing::instrument; /// Type of container image generated #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ExportLayout { - /// The original, very simplistic "export into single tarball" - SingleLayer, - /// The first attempt at chunked images, which has some bugs - ChunkedV0, - /// The second and hopefully final chunked image format - ChunkedV1, + /// Actually the second layout now, but the true first one can be parsed as either + V0, + /// The hopefully final (optionally chunked) container image layout + V1, } impl Default for ExportLayout { fn default() -> Self { - Self::SingleLayer + // For now + Self::V0 + } +} + +impl ExportLayout { + pub(crate) fn label(&self) -> &'static str { + match self { + ExportLayout::V0 => "ostree.diffid", + ExportLayout::V1 => "ostree.final-diffid", + } } } @@ -51,20 +59,6 @@ pub struct Config { pub cmd: Option>, } -/// Write an ostree commit to an OCI blob -#[context("Writing ostree root to blob")] -fn export_ostree_ref( - repo: &ostree::Repo, - rev: &str, - writer: &mut OciDir, - opts: &ExportOpts, -) -> Result { - let commit = repo.require_rev(rev)?; - let mut w = writer.create_raw_layer(Some(opts.compression()))?; - ostree_tar::export_commit(repo, commit.as_str(), &mut w, None)?; - w.complete() -} - fn commit_meta_to_labels<'a>( meta: &glib::VariantDict, keys: impl IntoIterator, @@ -129,9 +123,8 @@ fn export_chunked( let compression = Some(opts.compression()); match opts.format { - ExportLayout::SingleLayer => unreachable!(), - ExportLayout::ChunkedV0 => { - // In ChunkedV0, the component/content chunks come first. + ExportLayout::V0 => { + // In V0, the component/content chunks come first. for (layer, name) in layers { ociw.push_layer(manifest, imgcfg, layer, name.as_str()); } @@ -141,14 +134,14 @@ fn export_chunked( let w = w.into_inner()?; let final_layer = w.complete()?; labels.insert( - crate::container::OSTREE_DIFFID_LABEL.into(), + opts.format.label().into(), format!("sha256:{}", final_layer.uncompressed_sha256), ); ociw.push_layer(manifest, imgcfg, final_layer, description); Ok(()) } - ExportLayout::ChunkedV1 => { - // In ChunkedV1, the ostree layer comes first + ExportLayout::V1 => { + // In V1, the ostree layer comes first let mut w = ociw.create_layer(compression)?; ostree_tar::export_final_chunk(repo, commit, chunking.remainder, &mut w)?; let w = w.into_inner()?; @@ -172,7 +165,7 @@ fn export_chunked( // This label (mentioned above) points to the last layer that is part of // the ostree commit. labels.insert( - crate::container::OSTREE_FINAL_LAYER_LABEL.into(), + opts.format.label().into(), format!("sha256:{}", last_digest), ); Ok(()) @@ -223,6 +216,10 @@ fn build_oci( let chunking = contentmeta .map(|meta| crate::chunking::Chunking::from_mapping(repo, commit, meta, opts.max_layers)) .transpose()?; + // If no chunking was provided, create a logical single chunk. + let chunking = chunking + .map(Ok) + .unwrap_or_else(|| crate::chunking::Chunking::new(repo, commit))?; if let Some(version) = commit_meta.lookup::("version")? { labels.insert("version".into(), version); @@ -241,49 +238,17 @@ fn build_oci( Cow::Borrowed(commit_subject) }; - match (&opts.format, chunking) { - (ExportLayout::SingleLayer, Some(_)) => { - anyhow::bail!("Chunking cannot be used with (legacy) single layer images") - } - (ExportLayout::ChunkedV0 | ExportLayout::ChunkedV1, None) => { - anyhow::bail!("Chunked layout requires object ownership metadata") - } - (ExportLayout::SingleLayer, None) => { - let rootfs_blob = export_ostree_ref(repo, commit, &mut writer, &opts)?; - // In the legacy single layer case, insert both the diffid and final - // layer labels, becuase they mean the same thing. - let label_index_keys = [ - crate::container::OSTREE_DIFFID_LABEL, - crate::container::OSTREE_FINAL_LAYER_LABEL, - ]; - for v in label_index_keys { - labels.insert( - v.into(), - format!("sha256:{}", rootfs_blob.uncompressed_sha256), - ); - } - writer.push_layer_annotated( - &mut manifest, - &mut imgcfg, - rootfs_blob, - Some(annos), - &description, - ); - } - (ExportLayout::ChunkedV0 | ExportLayout::ChunkedV1, Some(chunking)) => { - export_chunked( - repo, - commit, - &mut writer, - &mut manifest, - &mut imgcfg, - labels, - chunking, - &opts, - &description, - )?; - } - } + export_chunked( + repo, + commit, + &mut writer, + &mut manifest, + &mut imgcfg, + labels, + chunking, + &opts, + &description, + )?; // Lookup the cmd embedded in commit metadata let cmd = commit_meta.lookup::>(ostree::COMMIT_META_CONTAINER_CMD)?; @@ -317,19 +282,7 @@ async fn build_impl( contentmeta: Option, dest: &ImageReference, ) -> Result { - let mut opts = opts.unwrap_or_else(|| { - // For backwards compatibility, if content meta is specified - // but no options, assume v0 chunked. - let format = if contentmeta.is_some() { - ExportLayout::ChunkedV0 - } else { - ExportLayout::default() - }; - ExportOpts { - format, - ..Default::default() - } - }); + let mut opts = opts.unwrap_or_default(); if dest.transport == Transport::ContainerStorage { opts.skip_compression = true; } diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index a1f661073..7e51ba014 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -31,10 +31,6 @@ use std::ops::Deref; /// The label injected into a container image that contains the ostree commit SHA-256. pub const OSTREE_COMMIT_LABEL: &str = "ostree.commit"; -/// The label/annotation which contains the sha256 of the final commit in chunked v1 format. -const OSTREE_DIFFID_LABEL: &str = "ostree.diffid"; -/// The label/annotation which contains the sha256 of the final layer in chunked v2 format. -const OSTREE_FINAL_LAYER_LABEL: &str = "ostree.final-diffid"; /// Our generic catchall fatal error, expected to be converted /// to a string to output to a terminal or logs. diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 3488be281..44a18aa17 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -280,6 +280,7 @@ pub fn manifest_digest_from_commit(commit: &glib::Variant) -> Result { /// we require a 1-to-1 mapping between the two up until the ostree level. /// For a bit more information on this, see https://github.com/opencontainers/image-spec/blob/main/config.md fn layer_from_diffid<'a>( + layout: ExportLayout, manifest: &'a ImageManifest, config: &ImageConfiguration, diffid: &str, @@ -289,7 +290,7 @@ fn layer_from_diffid<'a>( .diff_ids() .iter() .position(|x| x.as_str() == diffid) - .ok_or_else(|| anyhow!("Missing {} {}", OSTREE_DIFFID_LABEL, diffid))?; + .ok_or_else(|| anyhow!("Missing {} {}", layout.label(), diffid))?; manifest.layers().get(idx).ok_or_else(|| { anyhow!( "diffid position {} exceeds layer count {}", @@ -316,29 +317,29 @@ pub(crate) fn parse_manifest_layout<'a>( .ok_or_else(|| anyhow!("No layers in manifest"))?; let info = config_labels.and_then(|labels| { labels - .get(OSTREE_FINAL_LAYER_LABEL) - .map(|v| (ExportLayout::ChunkedV1, v)) + .get(ExportLayout::V1.label()) + .map(|v| (ExportLayout::V1, v)) .or_else(|| { labels - .get(OSTREE_DIFFID_LABEL) - .map(|v| (ExportLayout::ChunkedV0, v)) + .get(ExportLayout::V0.label()) + .map(|v| (ExportLayout::V0, v)) }) }); // Look for the format v1 label if let Some((layout, target_diffid)) = info { - let target_layer = layer_from_diffid(manifest, config, target_diffid.as_str())?; + let target_layer = layer_from_diffid(layout, manifest, config, target_diffid.as_str())?; let mut chunk_layers = Vec::new(); let mut derived_layers = Vec::new(); let mut after_target = false; // Gather the ostree layer let ostree_layer = match layout { - ExportLayout::SingleLayer | ExportLayout::ChunkedV0 => target_layer, - ExportLayout::ChunkedV1 => first_layer, + ExportLayout::V0 => target_layer, + ExportLayout::V1 => first_layer, }; // Now, we need to handle the split differently in chunked v1 vs v0 match layout { - ExportLayout::SingleLayer | ExportLayout::ChunkedV0 => { + ExportLayout::V0 => { for layer in manifest.layers() { if layer == target_layer { if after_target { @@ -352,7 +353,7 @@ pub(crate) fn parse_manifest_layout<'a>( } } } - ExportLayout::ChunkedV1 => { + ExportLayout::V1 => { for layer in manifest.layers() { if layer == target_layer { if after_target { @@ -380,7 +381,7 @@ pub(crate) fn parse_manifest_layout<'a>( // For backwards compatibility, if there's only 1 layer, don't require labels. // This can be dropped when we drop format version 0 support. let rest = manifest.layers().iter().skip(1).collect(); - Ok((ExportLayout::SingleLayer, first_layer, Vec::new(), rest)) + Ok((ExportLayout::V0, first_layer, Vec::new(), rest)) } impl ImageImporter { diff --git a/lib/src/container/update_detachedmeta.rs b/lib/src/container/update_detachedmeta.rs index e7a9e258c..3ba61dd8c 100644 --- a/lib/src/container/update_detachedmeta.rs +++ b/lib/src/container/update_detachedmeta.rs @@ -104,20 +104,14 @@ pub async fn update_detached_metadata( let labels = ctrcfg.labels_mut().get_or_insert_with(Default::default); match export_layout { - ExportLayout::SingleLayer | ExportLayout::ChunkedV0 => { - labels.insert( - crate::container::OSTREE_DIFFID_LABEL.into(), - out_layer_diffid, - ); + ExportLayout::V0 => { + labels.insert(export_layout.label().into(), out_layer_diffid); } - ExportLayout::ChunkedV1 => { + ExportLayout::V1 => { // Nothing to do except in the special case where there's somehow only one // chunked layer. if manifest.layers().len() == 1 { - labels.insert( - crate::container::OSTREE_FINAL_LAYER_LABEL.into(), - out_layer_diffid, - ); + labels.insert(export_layout.label().into(), out_layer_diffid); } } } diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 4cd648467..7ee81fb0e 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -611,9 +611,8 @@ impl Fixture { export_format: ExportLayout, ) -> Result<(ImageReference, String)> { let name = match export_format { - ExportLayout::SingleLayer => "oci-single", - ExportLayout::ChunkedV0 => "oci-chunked-v0", - ExportLayout::ChunkedV1 => "oci-chunked-v1", + ExportLayout::V0 => "oci-v0", + ExportLayout::V1 => "oci-v1", }; let container_path = &self.path.join(name); if container_path.exists() { diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index b6cada131..869548c58 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -96,7 +96,7 @@ fn test_proxy_auth() -> Result<()> { /// Useful for debugging things interactively. pub(crate) async fn create_fixture() -> Result<()> { let fixture = crate::fixture::Fixture::new_v1()?; - for format in [ExportLayout::ChunkedV0, ExportLayout::ChunkedV1] { + for format in [ExportLayout::V0, ExportLayout::V1] { let imgref = fixture.export_container(format).await?.0; println!("Wrote: {:?}", imgref); } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 98df9796b..18274fbfc 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -459,7 +459,10 @@ fn skopeo_inspect_config(imgref: &str) -> Result Result<()> { +async fn impl_test_container_import_export( + export_format: ExportLayout, + chunked: bool, +) -> Result<()> { let fixture = Fixture::new_v1()?; let testrev = fixture .srcrepo() @@ -481,10 +484,6 @@ async fn impl_test_container_import_export(export_format: ExportLayout) -> Resul ..Default::default() }; // If chunking is requested, compute object ownership and size mappings - let chunked = matches!( - export_format, - ExportLayout::ChunkedV0 | ExportLayout::ChunkedV1 - ); let contentmeta = chunked .then(|| { let meta = fixture.get_object_meta().context("Computing object meta")?; @@ -629,8 +628,8 @@ fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Resu let d = ocidir::OciDir::open(&d)?; let manifest = d.read_manifest()?; let ostree_layer = match format { - ExportLayout::SingleLayer | ExportLayout::ChunkedV0 => manifest.layers().last(), - ExportLayout::ChunkedV1 => manifest.layers().first(), + ExportLayout::V0 => manifest.layers().last(), + ExportLayout::V1 => manifest.layers().first(), } .unwrap(); let ostree_layer_blob = d @@ -643,12 +642,12 @@ fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Resu #[tokio::test] async fn test_container_chunked_v0() -> Result<()> { - impl_test_container_chunked(ExportLayout::ChunkedV0).await + impl_test_container_chunked(ExportLayout::V0).await } #[tokio::test] async fn test_container_chunked_v1() -> Result<()> { - impl_test_container_chunked(ExportLayout::ChunkedV1).await + impl_test_container_chunked(ExportLayout::V1).await } async fn impl_test_container_chunked(format: ExportLayout) -> Result<()> { @@ -712,7 +711,7 @@ r usr/bin/bash bash-v0 assert!(first.0.commit.is_none()); assert!(second.0.commit.is_none()); match format { - ExportLayout::SingleLayer | ExportLayout::ChunkedV0 => { + ExportLayout::V0 => { assert_eq!(first.1, "bash"); assert!( second.1.starts_with("ostree export of commit"), @@ -720,7 +719,7 @@ r usr/bin/bash bash-v0 second.1 ); } - ExportLayout::ChunkedV1 => { + ExportLayout::V1 => { assert_eq!(first.1, "testlink"); assert_eq!(second.1, "bash"); } @@ -820,24 +819,23 @@ async fn oci_clone(src: impl AsRef, dest: impl AsRef) -> Res } #[tokio::test] -async fn test_container_import_export_single_layer() { - impl_test_container_import_export(ExportLayout::SingleLayer) +async fn test_container_import_export_v0() { + impl_test_container_import_export(ExportLayout::V0, false) .await - .unwrap() -} - -#[tokio::test] -async fn test_container_import_export_chunked_v0() { - impl_test_container_import_export(ExportLayout::ChunkedV0) + .unwrap(); + impl_test_container_import_export(ExportLayout::V0, true) .await - .unwrap() + .unwrap(); } #[tokio::test] -async fn test_container_import_export_chunked_v1() { - impl_test_container_import_export(ExportLayout::ChunkedV1) +async fn test_container_import_export_v1() { + impl_test_container_import_export(ExportLayout::V1, false) .await - .unwrap() + .unwrap(); + impl_test_container_import_export(ExportLayout::V1, true) + .await + .unwrap(); } /// But layers work via the container::write module. From 7425b4901997535f5cd93f6278f09344aae2ee5e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 28 Jun 2022 10:29:05 -0400 Subject: [PATCH 407/775] container: Make `export_layout` bit in prepared import `pub` Right now we're not using it, but I think we should support callers e.g. logging it at least. --- lib/src/container/store.rs | 3 +-- lib/tests/it/main.rs | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 44a18aa17..44b13ddbd 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -172,8 +172,7 @@ impl ManifestLayerState { #[derive(Debug)] pub struct PreparedImport { /// The format we found from metadata - #[allow(dead_code)] - pub(crate) export_layout: ExportLayout, + pub export_layout: ExportLayout, /// The manifest digest that was found pub manifest_digest: String, /// The deserialized manifest. diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 18274fbfc..83148cbb2 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -674,6 +674,7 @@ async fn impl_test_container_chunked(format: ExportLayout) -> Result<()> { store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), store::PrepareResult::Ready(r) => r, }; + assert_eq!(prep.export_layout, format); let digest = prep.manifest_digest.clone(); assert!(prep.ostree_commit_layer.commit.is_none()); assert_eq!(prep.ostree_layers.len(), nlayers as usize); From d5bd98d03b07a1947b7ac98583c20e9e3a765cc1 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 27 Jun 2022 20:48:16 -0400 Subject: [PATCH 408/775] lib/container: `#[allow(dead_code)]` for ocidir When compiling from an external crate, dead code analysis hits a few things that we don't currently use. --- lib/src/container/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 5d5ae59e9..cc273f9a1 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -241,6 +241,7 @@ pub use unencapsulate::*; #[cfg(feature = "internal-testing-api")] pub mod ocidir; #[cfg(not(feature = "internal-testing-api"))] +#[allow(dead_code)] mod ocidir; mod skopeo; pub mod store; From 784562d067e754282f455014fe4c145292208a54 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 28 Jun 2022 12:36:04 -0400 Subject: [PATCH 409/775] Move `io-std` tokio feature into `lib` Our project is a workspace, and `cargo build` will build both `cli/` and `lib/`. Feature unification means the build gets all tokio features. But, I want trying to use the rust-analyzer ability to run an individual test, and that broke with the recent tokio feature cleanup because we're trying to just build the library, which actually does need `io-std`. (For us, all the code for the CLI is in the library) So move the feature there, and add CI covers individual builds. --- .github/workflows/rust.yml | 2 ++ cli/Cargo.toml | 2 +- lib/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 926d4eb6b..700740857 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -37,6 +37,8 @@ jobs: key: "tests" - name: Build run: cargo test --no-run + - name: Individual checks + run: (cd cli && cargo check) && (cd lib && cargo check) - name: Run tests run: cargo test -- --nocapture --quiet build: diff --git a/cli/Cargo.toml b/cli/Cargo.toml index be624c8a0..ec9d0abc5 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -13,7 +13,7 @@ ostree-ext = { path = "../lib" } clap = "2.33.3" structopt = "0.3.21" libc = "0.2.92" -tokio = { version = "1", features = ["io-std", "macros"] } +tokio = { version = "1", features = ["macros"] } log = "0.4.0" tracing = "0.1" tracing-subscriber = "0.2.17" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 8e84a18f5..bf1ec77dc 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -39,7 +39,7 @@ structopt = "0.3.21" tar = "0.4.38" tempfile = "3.2.0" term_size = "0.3.2" -tokio = { features = ["time", "process", "rt", "net"], version = ">= 1.13.0" } +tokio = { features = ["io-std", "time", "process", "rt", "net"], version = ">= 1.13.0" } tokio-util = { features = ["io-util"], version = "0.6.9" } tokio-stream = { features = ["sync"], version = "0.1.8" } tracing = "0.1" From 41028977d77278c187510aff2449d2a311784d59 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 28 Jun 2022 12:33:17 -0400 Subject: [PATCH 410/775] tests: Cover old ostree parsing latest code In a recent update we changed the serialization format just for chunked images; we need to test that the *old* ostree knows how to parse at least our changes to the v0 format, which should be compatible. Today rpm-ostree vendors a stable ostree-rs-ext; this test leverages that. --- lib/tests/it/main.rs | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 83148cbb2..6920da8d6 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1018,6 +1018,44 @@ async fn test_container_write_derive() -> Result<()> { Ok(()) } +#[tokio::test] +// Today rpm-ostree vendors a stable ostree-rs-ext; this test +// verifies that the old ostree-rs-ext code can parse the containers +// generated by the new ostree code. +async fn test_old_code_parses_new_export() -> Result<()> { + let rpmostree = Utf8Path::new("/usr/bin/rpm-ostree"); + if !rpmostree.exists() { + return Ok(()); + } + let fixture = Fixture::new_v1()?; + // We're testing the v0 version that was already shipped + let layout = ExportLayout::V0; + let imgref = fixture.export_container(layout).await?.0; + let imgref = OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref, + }; + fixture.clear_destrepo()?; + let destrepo_path = fixture.path.join("dest/repo"); + let s = Command::new("ostree") + .args(&[ + "container", + "unencapsulate", + "--repo", + destrepo_path.as_str(), + imgref.to_string().as_str(), + ]) + .output()?; + if !s.status.success() { + anyhow::bail!( + "Failed to run ostree: {:?}: {}", + s, + String::from_utf8_lossy(&s.stderr) + ); + } + Ok(()) +} + #[ignore] #[tokio::test] // Verify that we can push and pull to a registry, not just oci-archive:. From 2b3648d84d1afa3f80be637ed9585f007b91051b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 29 Jun 2022 14:28:42 -0400 Subject: [PATCH 411/775] container: Lazily create error I happened to be looking at this code and noticed we can lazily create the error, which is nicer. --- lib/src/container/store.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 44b13ddbd..a731da35f 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -206,7 +206,7 @@ impl PreparedImport { &self, ) -> impl Iterator> { // FIXME use .filter(|h| h.empty_layer.unwrap_or_default()) after https://github.com/containers/oci-spec-rs/pull/100 lands. - let truncated = std::iter::once(Err(anyhow::anyhow!("Truncated history"))); + let truncated = std::iter::once_with(|| Err(anyhow::anyhow!("Truncated history"))); let history = self.config.history().iter().map(Ok).chain(truncated); self.all_layers() .zip(history) From 97ad6e930f2117877ec22e11087db8d024d68889 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 6 Jul 2022 20:13:30 -0400 Subject: [PATCH 412/775] lib: Bump tokio-util It's the latest version of the project. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index bf1ec77dc..a7ccf513a 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -40,7 +40,7 @@ tar = "0.4.38" tempfile = "3.2.0" term_size = "0.3.2" tokio = { features = ["io-std", "time", "process", "rt", "net"], version = ">= 1.13.0" } -tokio-util = { features = ["io-util"], version = "0.6.9" } +tokio-util = { features = ["io-util"], version = "0.7" } tokio-stream = { features = ["sync"], version = "0.1.8" } tracing = "0.1" From a81530d12dc77a11d247940dc1e83e88fd30ee10 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 18 Jul 2022 12:47:08 -0400 Subject: [PATCH 413/775] deny: Allowlist `Unicode-DFS-2016` See https://github.com/dtolnay/unicode-ident/pull/9 This is a FOSS license, see e.g. https://fedoraproject.org/wiki/Licensing/Unicode --- deny.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deny.toml b/deny.toml index 75b6ac9b2..24802969c 100644 --- a/deny.toml +++ b/deny.toml @@ -1,6 +1,6 @@ [licenses] unlicensed = "deny" -allow = ["Apache-2.0", "Apache-2.0 WITH LLVM-exception", "MIT", "BSD-3-Clause", "BSD-2-Clause"] +allow = ["Apache-2.0", "Apache-2.0 WITH LLVM-exception", "MIT", "BSD-3-Clause", "BSD-2-Clause", "Unicode-DFS-2016"] [bans] From cb971c689f2d51f4bbcf77727977d385e3007799 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 18 Jul 2022 10:25:01 -0400 Subject: [PATCH 414/775] tests: Emit expected modes in octal Easier to read this way. --- lib/tests/it/main.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 6920da8d6..dbf7e7662 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -248,6 +248,7 @@ fn validate_tar_expected( assert_eq!(header.entry_type(), exp.etype, "{}", entry_path); let is_old_object = format_version == 0; let mut expected_mode = exp.mode; + let header_mode = header.mode().unwrap(); if is_old_object && !entry_path.starts_with("sysroot/") { let fmtbits = match header.entry_type() { tar::EntryType::Regular => libc::S_IFREG, @@ -258,9 +259,9 @@ fn validate_tar_expected( expected_mode |= fmtbits; } assert_eq!( - header.mode().unwrap(), + header_mode, expected_mode, - "fmtver: {} type: {:?} path: {}", + "h={header_mode:o} e={expected_mode:o} fmtver: {} type: {:?} path: {}", format_version, header.entry_type(), entry_path From 0c5165008c05113c4d3501ae43ad509d542bfaa4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 15 Jul 2022 16:45:34 -0400 Subject: [PATCH 415/775] tests: Add two subdirectories with identical content Prep for fixing https://github.com/coreos/fedora-coreos-tracker/issues/1258 --- lib/src/fixture.rs | 9 ++++++++- lib/tests/it/main.rs | 11 +++++++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 7ee81fb0e..15aa23289 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -131,6 +131,8 @@ static OWNERS: Lazy> = Lazy::new(|| { ("usr/bin/hardlink.*", "testlink"), ("usr/etc/someconfig.conf", "someconfig"), ("usr/etc/polkit.conf", "a-polkit-config"), + ("usr/lib/pkgdb", "pkgdb"), + ("usr/lib/sysimage/pkgdb", "pkgdb"), ] .iter() .map(|(k, v)| (Regex::new(k).unwrap(), *v)) @@ -150,6 +152,10 @@ r usr/bin/hardlink-b testlink r usr/etc/someconfig.conf someconfig m 10 10 644 r usr/etc/polkit.conf a-polkit-config +m 0 0 644 +# See https://github.com/coreos/fedora-coreos-tracker/issues/1258 +r usr/lib/sysimage/pkgdb some-package-database +r usr/lib/pkgdb/pkgdb some-package-database m d boot d run @@ -157,7 +163,8 @@ m 0 0 1755 d tmp "## }; pub const CONTENTS_CHECKSUM_V0: &str = - "76f0d5ec8814bc2a1d7868dbe8d3783535dc0cc9c7dcfdf37fa3512f8e276f6c"; + "3af747e156c34d08a3a2fb85b94de6999205a1d1c1c7b1993d6ce534a8918cd9"; +pub static CONTENTS_V0_LEN: Lazy = Lazy::new(|| OWNERS.len()); #[derive(Debug, PartialEq, Eq)] enum SeLabel { diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index dbf7e7662..ae4642b13 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -19,7 +19,7 @@ use std::io::{BufReader, BufWriter}; use std::os::unix::fs::DirBuilderExt; use std::process::Command; -use ostree_ext::fixture::{FileDef, Fixture, CONTENTS_CHECKSUM_V0}; +use ostree_ext::fixture::{FileDef, Fixture, CONTENTS_CHECKSUM_V0, CONTENTS_V0_LEN}; const EXAMPLE_TAR_LAYER: &[u8] = include_bytes!("fixtures/hlinks.tar.gz"); const TEST_REGISTRY_DEFAULT: &str = "localhost:5000"; @@ -293,6 +293,8 @@ fn common_tar_structure() -> impl Iterator { ("sysroot/ostree/repo/state", Directory, 0o755), ("sysroot/ostree/repo/tmp", Directory, 0o755), ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), + ("usr/lib/pkgdb", Directory, 0o755), + ("usr/lib/sysimage", Directory, 0o755), ] .into_iter() .map(Into::into) @@ -531,7 +533,7 @@ async fn impl_test_container_import_export( "/usr/bin/bash" ); - let n_chunks = if chunked { 7 } else { 1 }; + let n_chunks = if chunked { *CONTENTS_V0_LEN } else { 1 }; assert_eq!(cfg.rootfs().diff_ids().len(), n_chunks); assert_eq!(cfg.history().len(), n_chunks); @@ -628,6 +630,7 @@ fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Resu let d = Dir::open_ambient_dir(oci_path, cap_std::ambient_authority())?; let d = ocidir::OciDir::open(&d)?; let manifest = d.read_manifest()?; + assert_eq!(manifest.layers().len(), *CONTENTS_V0_LEN); let ostree_layer = match format { ExportLayout::V0 => manifest.layers().last(), ExportLayout::V1 => manifest.layers().first(), @@ -652,7 +655,7 @@ async fn test_container_chunked_v1() -> Result<()> { } async fn impl_test_container_chunked(format: ExportLayout) -> Result<()> { - let nlayers = 6u32; + let nlayers = *CONTENTS_V0_LEN - 1; let mut fixture = Fixture::new_v1()?; let (imgref, expected_digest) = fixture.export_container(format).await.unwrap(); @@ -788,7 +791,7 @@ r usr/bin/bash bash-v0 store::remove_images(fixture.destrepo(), [&derived_imgref.imgref]).unwrap(); assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 0); let n_removed = store::gc_image_layers(&fixture.destrepo())?; - assert_eq!(n_removed, 8); + assert_eq!(n_removed, (*CONTENTS_V0_LEN + 1) as u32); // Repo should be clean now assert_eq!( From 8ac66bbe40850093f13ded88626def34df337249 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 18 Jul 2022 10:47:34 -0400 Subject: [PATCH 416/775] tests: Pass tar entries by reference Prep for testing the full tar export vs chunked container paths better. --- lib/tests/it/main.rs | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index ae4642b13..99fc64e55 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -228,7 +228,7 @@ impl Into for (&'static str, tar::EntryType, u32) { fn validate_tar_expected( format_version: u32, - t: tar::Entries, + t: &mut tar::Entries, expected: impl IntoIterator, ) -> Result<()> { let mut expected: HashMap<&'static str, TarExpected> = @@ -300,7 +300,8 @@ fn common_tar_structure() -> impl Iterator { .map(Into::into) } -fn validate_tar_v1(mut src: tar::Archive) -> Result<()> { +/// Validate metadata (prelude) in a v1 tar. +fn validate_tar_v1_metadata(src: &mut tar::Entries) -> Result<()> { use tar::EntryType::{Directory, Regular}; let prelude = [ ("sysroot/ostree/repo", Directory, 0o755), @@ -313,7 +314,7 @@ fn validate_tar_v1(mut src: tar::Archive) -> Result<()> { let content = content.into_iter().map(Into::into); let expected = prelude.chain(common_tar_structure()).chain(content); - validate_tar_expected(1, src.entries()?, expected)?; + validate_tar_expected(1, src, expected)?; Ok(()) } @@ -352,17 +353,22 @@ fn test_tar_export_structure() -> Result<()> { ("sysroot/ostree/repo/xattrs/d67db507c5a6e7bfd078f0f3ded0a5669479a902e812931fc65c6f5e01831ef5", Regular, 0o644), ("usr", Directory, 0o755), ].into_iter().map(Into::into)); - validate_tar_expected(fixture.format_version, entries, expected.map(Into::into))?; + validate_tar_expected( + fixture.format_version, + &mut entries, + expected.map(Into::into), + )?; // Validate format version 1 fixture.format_version = 1; let src_tar = fixture.export_tar()?; - let src_tar = fixture + let mut src_tar = fixture .dir .open(src_tar) .map(BufReader::new) .map(tar::Archive::new)?; - validate_tar_v1(src_tar).unwrap(); + let mut src_tar = src_tar.entries()?; + validate_tar_v1_metadata(&mut src_tar).unwrap(); Ok(()) } @@ -636,12 +642,13 @@ fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Resu ExportLayout::V1 => manifest.layers().first(), } .unwrap(); - let ostree_layer_blob = d + let mut ostree_layer_blob = d .read_blob(ostree_layer) .map(BufReader::new) .map(flate2::read::GzDecoder::new) .map(tar::Archive::new)?; - validate_tar_v1(ostree_layer_blob) + let mut ostree_layer_blob = ostree_layer_blob.entries()?; + validate_tar_v1_metadata(&mut ostree_layer_blob) } #[tokio::test] From 47154c0b950147a03d23efae73a99bbe736d311c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 18 Jul 2022 11:13:35 -0400 Subject: [PATCH 417/775] tests: More refactoring of expected tar handling Add a missing 'return Ok(())` so we can split the tar stream validation to do: - validate prelude - validate contents We were also missing validation for content objects before, so fix that! The chunked container path in particular will need to handle content validation differently from the "all in one" tarball. --- lib/tests/it/main.rs | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 99fc64e55..da34092a3 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -238,6 +238,9 @@ fn validate_tar_expected( // Verify we're injecting directories, fixes the absence of `/tmp` in our // images for example. for entry in entries { + if expected.is_empty() { + return Ok(()); + } let header = entry.header(); let entry_path = entry.path().unwrap().to_string_lossy().into_owned(); if seen_paths.contains(&entry_path) { @@ -251,7 +254,8 @@ fn validate_tar_expected( let header_mode = header.mode().unwrap(); if is_old_object && !entry_path.starts_with("sysroot/") { let fmtbits = match header.entry_type() { - tar::EntryType::Regular => libc::S_IFREG, + // For now assume only hardlinks to regular files + tar::EntryType::Regular | tar::EntryType::Link => libc::S_IFREG, tar::EntryType::Directory => libc::S_IFDIR, tar::EntryType::Symlink => 0, o => panic!("Unexpected entry type {:?}", o), @@ -293,8 +297,20 @@ fn common_tar_structure() -> impl Iterator { ("sysroot/ostree/repo/state", Directory, 0o755), ("sysroot/ostree/repo/tmp", Directory, 0o755), ("sysroot/ostree/repo/tmp/cache", Directory, 0o755), - ("usr/lib/pkgdb", Directory, 0o755), - ("usr/lib/sysimage", Directory, 0o755), + ] + .into_iter() + .map(Into::into) +} + +// Find various expected files +fn common_tar_contents_all() -> impl Iterator { + use tar::EntryType::{Directory, Link}; + [ + ("boot", Directory, 0o755), + ("usr", Directory, 0o755), + ("usr/bin/bash", Link, 0o755), + ("usr/bin/hardlink-a", Link, 0o644), + ("usr/bin/hardlink-b", Link, 0o644), ] .into_iter() .map(Into::into) @@ -310,11 +326,7 @@ fn validate_tar_v1_metadata(src: &mut tar::Entries) -> Resu .into_iter() .map(Into::into); - let content = [("usr", Directory, 0o755), ("boot", Directory, 0o755)]; - let content = content.into_iter().map(Into::into); - - let expected = prelude.chain(common_tar_structure()).chain(content); - validate_tar_expected(1, src, expected)?; + validate_tar_expected(1, src, prelude)?; Ok(()) } @@ -356,7 +368,7 @@ fn test_tar_export_structure() -> Result<()> { validate_tar_expected( fixture.format_version, &mut entries, - expected.map(Into::into), + expected.chain(common_tar_contents_all()), )?; // Validate format version 1 @@ -369,6 +381,11 @@ fn test_tar_export_structure() -> Result<()> { .map(tar::Archive::new)?; let mut src_tar = src_tar.entries()?; validate_tar_v1_metadata(&mut src_tar).unwrap(); + validate_tar_expected( + fixture.format_version, + &mut src_tar, + common_tar_contents_all(), + )?; Ok(()) } From 223a7b367476b22d41bd43d59e02f1f1af021048 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 18 Jul 2022 10:23:55 -0400 Subject: [PATCH 418/775] tests: Verify at least one pkgdb entry is there Prep for fixing https://github.com/ostreedev/ostree-rs-ext/issues/339 --- lib/tests/it/main.rs | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index da34092a3..a6718ddd4 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -650,6 +650,8 @@ async fn impl_test_container_import_export( /// Parse a chunked container image and validate its structure; particularly fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Result<()> { + use tar::EntryType::Link; + let d = Dir::open_ambient_dir(oci_path, cap_std::ambient_authority())?; let d = ocidir::OciDir::open(&d)?; let manifest = d.read_manifest()?; @@ -665,7 +667,28 @@ fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Resu .map(flate2::read::GzDecoder::new) .map(tar::Archive::new)?; let mut ostree_layer_blob = ostree_layer_blob.entries()?; - validate_tar_v1_metadata(&mut ostree_layer_blob) + validate_tar_v1_metadata(&mut ostree_layer_blob)?; + + // This layer happens to be first + let pkgdb_layer_offset = match format { + ExportLayout::V0 => 0, + ExportLayout::V1 => 1, + }; + let pkgdb_layer = &manifest.layers()[pkgdb_layer_offset]; + let mut pkgdb_blob = d + .read_blob(pkgdb_layer) + .map(BufReader::new) + .map(flate2::read::GzDecoder::new) + .map(tar::Archive::new)?; + + // FIXME add usr/lib/sysimage/pkgdb here once https://github.com/ostreedev/ostree-rs-ext/issues/339 is fixed + let pkgdb = [("usr/lib/pkgdb/pkgdb", Link, 0o644)] + .into_iter() + .map(Into::into); + + validate_tar_expected(0, &mut pkgdb_blob.entries()?, pkgdb)?; + + Ok(()) } #[tokio::test] From ee74845e0a8dd2a511273c81327d545834028e5d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 18 Jul 2022 13:20:31 -0400 Subject: [PATCH 419/775] chunking: Recurse on duplicate directory trees See https://github.com/coreos/fedora-coreos-tracker/issues/1258 Regression from https://github.com/ostreedev/ostree-rs-ext/pull/331 Currently rpm-ostree emits two identical subdirectories in `/usr/lib/sysimage/rpm-ostree-base-db` and `/usr/share/rpm`, and the chunking export skips emitting this incorrectly. Closes: https://github.com/ostreedev/ostree-rs-ext/issues/339 --- lib/src/chunking.rs | 22 +++++++++++----------- lib/tests/it/main.rs | 10 ++++++---- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 0cb1aa457..4e62e7994 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -120,15 +120,15 @@ fn push_dirtree( repo: &ostree::Repo, gen: &mut Generation, checksum: &str, -) -> Result> { - if gen.dirtree_found.contains(checksum) { - return Ok(None); - } +) -> Result { let child_v = repo.load_variant(ostree::ObjectType::DirTree, checksum)?; - let checksum = RcStr::from(checksum); - gen.dirtree_found.insert(RcStr::clone(&checksum)); - gen.metadata_size += child_v.data_as_bytes().as_ref().len() as u64; - Ok(Some(child_v)) + if !gen.dirtree_found.contains(checksum) { + gen.metadata_size += child_v.data_as_bytes().as_ref().len() as u64; + } else { + let checksum = RcStr::from(checksum); + gen.dirtree_found.insert(checksum); + } + Ok(child_v) } fn generate_chunking_recurse( @@ -165,9 +165,9 @@ fn generate_chunking_recurse( gen.path.push(name); hex::encode_to_slice(contents_csum, &mut hexbuf)?; let checksum_s = std::str::from_utf8(&hexbuf)?; - if let Some(child_v) = push_dirtree(repo, gen, checksum_s)? { - generate_chunking_recurse(repo, gen, chunk, &child_v)?; - } + let dirtree_v = push_dirtree(repo, gen, checksum_s)?; + generate_chunking_recurse(repo, gen, chunk, &dirtree_v)?; + drop(dirtree_v); hex::encode_to_slice(meta_csum, &mut hexbuf)?; let checksum_s = std::str::from_utf8(&hexbuf)?; push_dirmeta(repo, gen, checksum_s)?; diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index a6718ddd4..83d73eaed 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -681,10 +681,12 @@ fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Resu .map(flate2::read::GzDecoder::new) .map(tar::Archive::new)?; - // FIXME add usr/lib/sysimage/pkgdb here once https://github.com/ostreedev/ostree-rs-ext/issues/339 is fixed - let pkgdb = [("usr/lib/pkgdb/pkgdb", Link, 0o644)] - .into_iter() - .map(Into::into); + let pkgdb = [ + ("usr/lib/pkgdb/pkgdb", Link, 0o644), + ("usr/lib/sysimage/pkgdb", Link, 0o644), + ] + .into_iter() + .map(Into::into); validate_tar_expected(0, &mut pkgdb_blob.entries()?, pkgdb)?; From 8ee3cf611887e8cde3d19bf0f3ce4cfd0d3c89a3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 18 Jul 2022 13:41:09 -0400 Subject: [PATCH 420/775] container: Use tar format v1 for container chunks too Amazingly while I was working on a different bug I stumbled across the fact that the chunked path was using format v0 tar streams. Fix that. --- lib/src/tar/export.rs | 8 +++++++- lib/tests/it/main.rs | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index f7180d98e..8efe7d339 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -616,7 +616,13 @@ pub(crate) fn export_chunk( chunk: chunking::ChunkMapping, out: &mut tar::Builder, ) -> Result<()> { - let writer = &mut OstreeTarWriter::new(repo, commit, out, ExportOptions::default())?; + // For chunking, we default to format version 1 + #[allow(clippy::needless_update)] + let opts = ExportOptions { + format_version: 1, + ..Default::default() + }; + let writer = &mut OstreeTarWriter::new(repo, commit, out, opts)?; writer.write_repo_structure()?; write_chunk(writer, chunk) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 83d73eaed..2899615a4 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -688,7 +688,7 @@ fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Resu .into_iter() .map(Into::into); - validate_tar_expected(0, &mut pkgdb_blob.entries()?, pkgdb)?; + validate_tar_expected(1, &mut pkgdb_blob.entries()?, pkgdb)?; Ok(()) } From ca38b8de4fdc1f563f7872941828ce2d83b1c470 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 19 Jul 2022 06:08:45 -0400 Subject: [PATCH 421/775] Release 0.8.1 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index a7ccf513a..d3f5dc61b 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.8.0" +version = "0.8.1" [dependencies] anyhow = "1.0" From f02f815d0fa0b692b24ec7b5ee8299de706a3062 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 19 Jul 2022 06:45:41 -0400 Subject: [PATCH 422/775] lib: Use re-exported `io-lifetimes` See https://github.com/bytecodealliance/cap-std/pull/266/commits/76412f5dcc980c39b04eb9c3a7c15611adc67515 --- lib/Cargo.toml | 1 - lib/src/ima.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index a7ccf513a..c85bac228 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -25,7 +25,6 @@ futures-util = "0.3.13" gvariant = "0.4.0" hex = "0.4.3" indicatif = "0.16.0" -io-lifetimes = "0.7" once_cell = "1.9" libc = "0.2.92" oci-spec = "0.5.4" diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 9425482a5..5c027a4cb 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -118,7 +118,7 @@ impl<'a> CommitRewriter<'a> { let mut tempf = tempfile::NamedTempFile::new_in(self.tempdir.path())?; // If we're operating on a bare repo, we can clone the file (copy_file_range) directly. if let Ok(instream) = instream.clone().downcast::() { - use io_lifetimes::AsFilelike; + use cap_std_ext::cap_std::io_lifetimes::AsFilelike; // View the fd as a File let instream_fd = unsafe { BorrowedFd::borrow_raw(instream.as_raw_fd()) }; let instream_fd = instream_fd.as_filelike_view::(); From fe957e9cfee3f023101d2c5db9f3558e3d432408 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 20 Jul 2022 16:37:16 -0400 Subject: [PATCH 423/775] cli: Don't panic on errors from unencapsulation I hit this when doing `ostree-ext-cli container unencapsulate --repo=tmp/repo ostree-remote-registry:fedora:quay.io/coreos-assembler/fcos:testing-devel` without a configured `fedora` remote. --- lib/src/cli.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 84ae30da1..100ec78dc 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -480,12 +480,12 @@ async fn container_import( pb }); let importer = ImageImporter::new(repo, imgref, Default::default()).await?; - let import_result = importer.unencapsulate().await; + let import = importer.unencapsulate().await; + // Ensure we finish the progress bar before potentially propagating an error if let Some(pb) = pb.as_ref() { pb.finish(); } - // It must have been set - let import = import_result.unwrap(); + let import = import?; if let Some(write_ref) = write_ref { repo.set_ref_immediate( None, From beb4dddc4bbd578563123156ec6911f27a358643 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 30 Jul 2022 08:36:13 -0400 Subject: [PATCH 424/775] lib: Extend clippy lints to deny `dbg!` and `todo!` xref https://github.com/rust-lang/rust-clippy/issues/9260 --- lib/src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/src/lib.rs b/lib/src/lib.rs index e7323c920..afc42dc2d 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -10,6 +10,8 @@ #![forbid(unused_must_use)] #![deny(unsafe_code)] #![cfg_attr(feature = "dox", feature(doc_cfg))] +#![deny(clippy::dbg_macro)] +#![deny(clippy::todo)] // Re-export our dependencies. See https://gtk-rs.org/blog/2021/06/22/new-release.html // "Dependencies are re-exported". Users will need e.g. `gio::File`, so this avoids From a563bde091946b1a796c1127918641118c984770 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 2 Aug 2022 05:46:35 -0400 Subject: [PATCH 425/775] lib: Update to indicatif 0.17 Lots of improvements: https://github.com/console-rs/indicatif/releases/tag/0.17.0 We'll be doing the matching change in rpm-ostree too so it's good to get down to one vendored version. --- lib/Cargo.toml | 2 +- lib/src/cli.rs | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 6ccaeaf8a..56c4c3baa 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -24,7 +24,7 @@ fn-error-context = "0.2.0" futures-util = "0.3.13" gvariant = "0.4.0" hex = "0.4.3" -indicatif = "0.16.0" +indicatif = "0.17.0" once_cell = "1.9" libc = "0.2.92" oci-spec = "0.5.4" diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 100ec78dc..0a13d4166 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -412,7 +412,11 @@ async fn handle_layer_progress_print( ) { let style = indicatif::ProgressStyle::default_bar(); let pb = indicatif::ProgressBar::new(100); - pb.set_style(style.template("{prefix} {bytes} [{bar:20}] ({eta}) {msg}")); + pb.set_style( + style + .template("{prefix} {bytes} [{bar:20}] ({eta}) {msg}") + .unwrap(), + ); loop { tokio::select! { // Always handle layer changes first. @@ -474,8 +478,8 @@ async fn container_import( let pb = (!quiet).then(|| { let pb = indicatif::ProgressBar::new_spinner(); pb.set_draw_target(target); - pb.set_style(style.template("{spinner} {prefix} {msg}")); - pb.enable_steady_tick(200); + pb.set_style(style.template("{spinner} {prefix} {msg}").unwrap()); + pb.enable_steady_tick(std::time::Duration::from_millis(200)); pb.set_message("Downloading..."); pb }); From 75dab5fa403a14b26cf09c6ffc69efb677147e53 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 2 Aug 2022 12:20:35 -0400 Subject: [PATCH 426/775] container: Make `skip_compression` really use gzip fast, not none Not doing any compression at all is quite bad for tar streams which have a ton of duplicate data. --- lib/src/container/encapsulate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 49205f8b9..ad8018d17 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -344,7 +344,7 @@ impl ExportOpts { /// Return the gzip compression level to use, as configured by the export options. fn compression(&self) -> Compression { if self.skip_compression { - Compression::none() + Compression::fast() } else { Compression::default() } From 61ecd5a75caa3a7d1ce1d75a46abee2526c96902 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 2 Aug 2022 12:24:08 -0400 Subject: [PATCH 427/775] cli: Add `container encapsulate --compression-fast` For the use case of exporting to a temporary OCI directory, which we in turn then want to push into `containers-storage:` which is going to decompress it again. This will be used for e.g. https://github.com/coreos/rpm-ostree/issues/3900 --- lib/src/cli.rs | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 100ec78dc..86c403b8f 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -131,6 +131,10 @@ enum ContainerOpts { /// Corresponds to the Dockerfile `CMD` instruction. #[structopt(long)] cmd: Option>, + + /// Compress at the fastest level (e.g. gzip level 1) + #[structopt(long)] + compression_fast: bool, }, #[structopt(alias = "commit")] @@ -513,6 +517,7 @@ async fn container_export( labels: BTreeMap, copy_meta_keys: Vec, cmd: Option>, + compression_fast: bool, ) -> Result<()> { let config = Config { labels: Some(labels), @@ -520,6 +525,7 @@ async fn container_export( }; let opts = crate::container::ExportOpts { copy_meta_keys, + skip_compression: compression_fast, // TODO rename this in the struct at the next semver break ..Default::default() }; let pushed = @@ -699,6 +705,7 @@ where labels, copy_meta_keys, cmd, + compression_fast, } => { let labels: Result> = labels .into_iter() @@ -709,7 +716,16 @@ where Ok((k.to_string(), v.to_string())) }) .collect(); - container_export(&repo, &rev, &imgref, labels?, copy_meta_keys, cmd).await + container_export( + &repo, + &rev, + &imgref, + labels?, + copy_meta_keys, + cmd, + compression_fast, + ) + .await } ContainerOpts::Image(opts) => match opts { ContainerImageOpts::List { repo } => { From 38970572aa623404073b6f653269607010e69c7b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 3 Aug 2022 13:55:05 -0400 Subject: [PATCH 428/775] Release 0.8.2 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 56c4c3baa..2b26a9238 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.8.1" +version = "0.8.2" [dependencies] anyhow = "1.0" From 30dee81c22ad5cb90e77198d3ddbcc25d388afb5 Mon Sep 17 00:00:00 2001 From: Jonathan Lebon Date: Wed, 17 Aug 2022 11:59:42 -0400 Subject: [PATCH 429/775] container: write `ostree/encapsulated` ref Although the OSTree commit checksum is in a label on the container image, sometimes we need to be able to inspect the commit from within the container itself. Rather than forcing tools to look through the whole repo for a commit object, write a canonical `ostree/encapsulated` ref pointing to it. Related: https://github.com/ostreedev/ostree/pull/2691 --- lib/src/tar/export.rs | 9 +++++++++ lib/src/tar/import.rs | 4 ++-- lib/tests/it/main.rs | 8 ++++++++ 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 8efe7d339..db3c50349 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -26,6 +26,8 @@ pub const FORMAT_VERSIONS: RangeInclusive = 0..=1; const SYSROOT: &str = "sysroot"; // This way the default ostree -> sysroot/ostree symlink works. const OSTREEDIR: &str = "sysroot/ostree"; +// The ref added (under ostree/) in the exported OSTree repo pointing at the commit. +const OSTREEREF: &str = "encapsulated"; /// In v0 format, we use this relative path prefix. I think I chose this by looking /// at the current Fedora base image tar stream. However, several others don't do @@ -320,6 +322,13 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { &commitmeta, )?; } + + // and add the canonical ref + let path: Utf8PathBuf = format!("{}/repo/refs/heads/ostree", OSTREEDIR).into(); + self.append_default_dir(&path)?; + let path: Utf8PathBuf = + format!("{}/repo/refs/heads/ostree/{}", OSTREEDIR, OSTREEREF).into(); + self.append_default_data(Utf8Path::new(&path), self.commit_checksum.as_bytes())?; Ok(()) } diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index c04b20d0c..36ec9b1b7 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -202,8 +202,8 @@ impl Importer { .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", orig_path))?; // Ignore the regular non-object file hardlinks we inject if let Ok(path) = path.strip_prefix(REPO_PREFIX) { - // Filter out the repo config file - if path.file_name() == Some("config") { + // Filter out the repo config file and refs dir + if path.file_name() == Some("config") || path.starts_with("refs") { return Ok(None); } let path = path.into(); diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 2899615a4..582af8903 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -322,6 +322,14 @@ fn validate_tar_v1_metadata(src: &mut tar::Entries) -> Resu let prelude = [ ("sysroot/ostree/repo", Directory, 0o755), ("sysroot/ostree/repo/config", Regular, 0o644), + ("sysroot/ostree/repo/refs", Directory, 0o755), + ("sysroot/ostree/repo/refs/heads", Directory, 0o755), + ("sysroot/ostree/repo/refs/heads/ostree", Directory, 0o755), + ( + "sysroot/ostree/repo/refs/heads/ostree/encapsulated", + Regular, + 0o644, + ), ] .into_iter() .map(Into::into); From b841e2adb8ce0714bf3ab439021218b1389f632a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 18 Aug 2022 15:53:15 -0400 Subject: [PATCH 430/775] container: Add an API to fetch both manifest and config I plan to use this as part of implementing `rpm-ostree compose container` for change detection. Basically we want to get the inputhash from the metadata/labels. --- lib/src/container/unencapsulate.rs | 16 ++++++++++++++++ lib/tests/it/main.rs | 4 ++++ 2 files changed, 20 insertions(+) diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index 7d2fb75ff..b1cd05664 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -112,6 +112,22 @@ pub async fn fetch_manifest( fetch_manifest_impl(&mut proxy, imgref).await } +/// Download the manifest for a target image and its sha256 digest, as well as the image configuration. +#[context("Fetching manifest and config")] +pub async fn fetch_manifest_and_config( + imgref: &OstreeImageReference, +) -> Result<( + oci_spec::image::ImageManifest, + String, + oci_spec::image::ImageConfiguration, +)> { + let proxy = ImageProxy::new().await?; + let oi = &proxy.open_image(&imgref.imgref.to_string()).await?; + let (digest, manifest) = proxy.fetch_manifest(oi).await?; + let config = proxy.fetch_config(oi).await?; + Ok((manifest, digest, config)) +} + /// The result of an import operation #[derive(Debug)] pub struct Import { diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 2899615a4..f3f8cc338 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -589,6 +589,10 @@ async fn impl_test_container_import_export( let (_, pushed_digest) = ostree_ext::container::fetch_manifest(&srcoci_unverified).await?; assert_eq!(pushed_digest, digest); + let (_, pushed_digest, _config) = + ostree_ext::container::fetch_manifest_and_config(&srcoci_unverified).await?; + assert_eq!(pushed_digest, digest); + // No remote matching let srcoci_unknownremote = OstreeImageReference { sigverify: SignatureSource::OstreeRemote("unknownremote".to_string()), From f58873310ca1e55b1c0e53eae21d3e389e712654 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 22 Aug 2022 16:04:12 -0400 Subject: [PATCH 431/775] Release 0.8.3 This just includes a change to generate an `ostree/encapsulated` ref in containers, and a new API to fetch both manifest and config. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 2b26a9238..018ee492f 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.8.2" +version = "0.8.3" [dependencies] anyhow = "1.0" From 2cb53645d70dc1265202d5583eacaa4d23eb33b4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 23 Aug 2022 13:38:41 -0400 Subject: [PATCH 432/775] Revert writing `ostree/encapsulated` ref This partially reverts 30dee81c22ad5cb90e77198d3ddbcc25d388afb5 We *do* keep the code which ignores the presence of the file during import, as an aid to future compatibility. We can't do this by default for quite a while until support for reading/ignoring the file has been out in the wild for e.g. at least a month or two. --- lib/src/tar/export.rs | 8 +------- lib/tests/it/main.rs | 8 -------- 2 files changed, 1 insertion(+), 15 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index db3c50349..b54aac4f8 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -27,6 +27,7 @@ const SYSROOT: &str = "sysroot"; // This way the default ostree -> sysroot/ostree symlink works. const OSTREEDIR: &str = "sysroot/ostree"; // The ref added (under ostree/) in the exported OSTree repo pointing at the commit. +#[allow(dead_code)] const OSTREEREF: &str = "encapsulated"; /// In v0 format, we use this relative path prefix. I think I chose this by looking @@ -322,13 +323,6 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { &commitmeta, )?; } - - // and add the canonical ref - let path: Utf8PathBuf = format!("{}/repo/refs/heads/ostree", OSTREEDIR).into(); - self.append_default_dir(&path)?; - let path: Utf8PathBuf = - format!("{}/repo/refs/heads/ostree/{}", OSTREEDIR, OSTREEREF).into(); - self.append_default_data(Utf8Path::new(&path), self.commit_checksum.as_bytes())?; Ok(()) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 923ada076..f3f8cc338 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -322,14 +322,6 @@ fn validate_tar_v1_metadata(src: &mut tar::Entries) -> Resu let prelude = [ ("sysroot/ostree/repo", Directory, 0o755), ("sysroot/ostree/repo/config", Regular, 0o644), - ("sysroot/ostree/repo/refs", Directory, 0o755), - ("sysroot/ostree/repo/refs/heads", Directory, 0o755), - ("sysroot/ostree/repo/refs/heads/ostree", Directory, 0o755), - ( - "sysroot/ostree/repo/refs/heads/ostree/encapsulated", - Regular, - 0o644, - ), ] .into_iter() .map(Into::into); From f8cc5419a96bf2a328d1fb06f63533f3881226f8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 23 Aug 2022 19:16:36 -0400 Subject: [PATCH 433/775] tests: Propagate metadata from prior commit We want in particular things like `ostree.bootable`. --- lib/src/fixture.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 15aa23289..bb12ea52b 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -539,6 +539,7 @@ impl Fixture { // Load our base commit let rev = &self.srcrepo().require_rev(self.testref())?; let (commit, _) = self.srcrepo.load_commit(rev)?; + let metadata = commit.child_value(0); let root = ostree::MutableTree::from_commit(self.srcrepo(), rev)?; // Bump the commit timestamp by one day let ts = chrono::Utc.timestamp(ostree::commit_get_timestamp(&commit) as i64, 0); @@ -568,7 +569,15 @@ impl Fixture { let root = root.downcast_ref::().unwrap(); let commit = self .srcrepo - .write_commit_with_time(Some(rev), None, None, None, root, new_ts, cancellable) + .write_commit_with_time( + Some(rev), + None, + None, + Some(&metadata), + root, + new_ts, + cancellable, + ) .context("Writing commit")?; self.srcrepo .transaction_set_ref(None, self.testref(), Some(commit.as_str())); From c438e7ed19443303245acb65857ef9e79ce3678d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 23 Aug 2022 19:16:55 -0400 Subject: [PATCH 434/775] tests: Use `#[track_caller]` This way panics are properly attributed to the real source. --- lib/tests/it/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index f3f8cc338..0309be762 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -24,6 +24,7 @@ use ostree_ext::fixture::{FileDef, Fixture, CONTENTS_CHECKSUM_V0, CONTENTS_V0_LE const EXAMPLE_TAR_LAYER: &[u8] = include_bytes!("fixtures/hlinks.tar.gz"); const TEST_REGISTRY_DEFAULT: &str = "localhost:5000"; +#[track_caller] fn assert_err_contains(r: Result, s: impl AsRef) { let s = s.as_ref(); let msg = format!("{:#}", r.err().expect("Expecting an error")); From 68ad11fee38c9f72383485aff68da0fb90f5db0d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 23 Aug 2022 19:17:18 -0400 Subject: [PATCH 435/775] tests/fixture: Inject `ostree.bootable` by default Since that's our intended use case for now. --- lib/src/fixture.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index bb12ea52b..c9e86dcf6 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -483,6 +483,7 @@ impl Fixture { ); metadata.insert("ostree.container-cmd", &vec!["/usr/bin/bash"]); metadata.insert("version", &"42.0"); + metadata.insert(*ostree::METADATA_KEY_BOOTABLE, &true); let metadata = metadata.to_variant(); let commit = self.srcrepo.write_commit_with_time( None, From 7db0bd2bfe454aae7963f9b0c0190b5f1901788a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 23 Aug 2022 20:23:11 -0400 Subject: [PATCH 436/775] Fix misc clippy lints I discovered `cargo clippy --fix` exists...I'd been doing it BY HAND. :facepalm: This reaches into the test code too. --- lib/src/chunking.rs | 2 +- lib/src/refescape.rs | 2 +- lib/tests/it/main.rs | 13 +++++++------ 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 4e62e7994..98df066ea 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -373,7 +373,7 @@ fn components_size(components: &[&ObjectSourceMetaSized]) -> u64 { /// Compute the total size of a packing #[cfg(test)] fn packing_size(packing: &[ChunkedComponents]) -> u64 { - packing.iter().map(|v| components_size(&v)).sum() + packing.iter().map(|v| components_size(v)).sum() } fn sort_packing(packing: &mut [ChunkedComponents]) { diff --git a/lib/src/refescape.rs b/lib/src/refescape.rs index fbd15e125..1e3011fb8 100644 --- a/lib/src/refescape.rs +++ b/lib/src/refescape.rs @@ -168,7 +168,7 @@ mod test { for &v in UNCHANGED.iter().chain(ROUNDTRIP).chain(CORNERCASES) { let escaped = &prefix_escape_for_ref(TESTPREFIX, v).unwrap(); ostree::validate_rev(escaped).unwrap(); - let unescaped = unprefix_unescape_ref(TESTPREFIX, &escaped).unwrap(); + let unescaped = unprefix_unescape_ref(TESTPREFIX, escaped).unwrap(); assert_eq!(v, unescaped); } // Explicit test diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index f3f8cc338..e22e4398b 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -216,6 +216,7 @@ struct TarExpected { mode: u32, } +#[allow(clippy::from_over_into)] impl Into for (&'static str, tar::EntryType, u32) { fn into(self) -> TarExpected { TarExpected { @@ -465,7 +466,7 @@ async fn test_tar_write_tar_layer() -> Result<()> { let uncompressed_tar = tokio::io::BufReader::new( async_compression::tokio::bufread::GzipDecoder::new(EXAMPLE_TAR_LAYER), ); - ostree_ext::tar::write_tar(&fixture.destrepo(), uncompressed_tar, "test", None).await?; + ostree_ext::tar::write_tar(fixture.destrepo(), uncompressed_tar, "test", None).await?; Ok(()) } @@ -714,7 +715,7 @@ async fn impl_test_container_chunked(format: ExportLayout) -> Result<()> { let (imgref, expected_digest) = fixture.export_container(format).await.unwrap(); let imgref = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, - imgref: imgref, + imgref, }; // Validate the structure of the image match &imgref.imgref { @@ -787,7 +788,7 @@ r usr/bin/bash bash-v0 assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); - let n_removed = store::gc_image_layers(&fixture.destrepo())?; + let n_removed = store::gc_image_layers(fixture.destrepo())?; assert_eq!(n_removed, 2); fixture .destrepo() @@ -834,16 +835,16 @@ r usr/bin/bash bash-v0 assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 2); // Should only be new layers - let n_removed = store::gc_image_layers(&fixture.destrepo())?; + let n_removed = store::gc_image_layers(fixture.destrepo())?; assert_eq!(n_removed, 0); store::remove_images(fixture.destrepo(), [&imgref.imgref]).unwrap(); assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); // Still no removed layers after removing the base image - let n_removed = store::gc_image_layers(&fixture.destrepo())?; + let n_removed = store::gc_image_layers(fixture.destrepo())?; assert_eq!(n_removed, 0); store::remove_images(fixture.destrepo(), [&derived_imgref.imgref]).unwrap(); assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 0); - let n_removed = store::gc_image_layers(&fixture.destrepo())?; + let n_removed = store::gc_image_layers(fixture.destrepo())?; assert_eq!(n_removed, (*CONTENTS_V0_LEN + 1) as u32); // Repo should be clean now From 9c4a75b3778a3f2fdece095f8f5f7a6289ab512d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 23 Aug 2022 19:05:30 -0400 Subject: [PATCH 437/775] container: Hard require `ostree.bootable` key We want to give a clear and useful error when someone tries to pull a non-ostree-based container image. --- lib/src/container/store.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index a731da35f..46ccccd82 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -309,6 +309,11 @@ pub(crate) fn parse_manifest_layout<'a>( Vec<&'a Descriptor>, )> { let config_labels = config.config().as_ref().and_then(|c| c.labels().as_ref()); + let bootable_key = *ostree::METADATA_KEY_BOOTABLE; + let bootable = config_labels.map_or(false, |l| l.contains_key(bootable_key)); + if !bootable { + anyhow::bail!("Target image does not have {bootable_key} label"); + } let first_layer = manifest .layers() From 75fe6d3eedabe9f8bc51c002ddebe12ffc48da68 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 25 Aug 2022 09:46:14 -0400 Subject: [PATCH 438/775] tests: Use API instead of forking off ostree via bash This is just cleaner. Or it WOULD be if there wasn't a bug in `OstreeRepoFile` we need to work around. --- lib/src/lib.rs | 4 ++++ lib/src/ostree_manual.rs | 35 +++++++++++++++++++++++++++++++++++ lib/tests/it/main.rs | 26 +++++++++++++++++++------- 3 files changed, 58 insertions(+), 7 deletions(-) create mode 100644 lib/src/ostree_manual.rs diff --git a/lib/src/lib.rs b/lib/src/lib.rs index afc42dc2d..fa1ee0200 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -43,6 +43,10 @@ pub mod chunking; pub(crate) mod commit; pub mod objectsource; pub(crate) mod objgv; +#[cfg(feature = "internal-testing-api")] +pub mod ostree_manual; +#[cfg(not(feature = "internal-testing-api"))] +mod ostree_manual; /// Prelude, intended for glob import. pub mod prelude { diff --git a/lib/src/ostree_manual.rs b/lib/src/ostree_manual.rs new file mode 100644 index 000000000..a70294a7b --- /dev/null +++ b/lib/src/ostree_manual.rs @@ -0,0 +1,35 @@ +//! Manual workarounds for ostree bugs + +use std::io::Read; +use std::ptr; + +use ostree; +use ostree::prelude::{Cast, InputStreamExtManual}; +use ostree::{gio, glib}; + +#[allow(unsafe_code)] + +/// Equivalent of `g_file_read()` for ostree::RepoFile to work around an ostree bug. +pub fn repo_file_read(f: &ostree::RepoFile) -> Result { + use glib::translate::*; + let stream = unsafe { + let f = f.upcast_ref::(); + let mut error = ptr::null_mut(); + let stream = gio::ffi::g_file_read(f.to_glib_none().0, ptr::null_mut(), &mut error); + if !error.is_null() { + return Err(from_glib_full(error)); + } + let stream = stream as *mut gio::ffi::GInputStream; + from_glib_full(stream) + }; + + Ok(stream) +} + +/// Read a repo file to a string. +pub fn repo_file_read_to_string(f: &ostree::RepoFile) -> anyhow::Result { + let mut r = String::new(); + let mut s = repo_file_read(f)?.into_read(); + s.read_to_string(&mut r)?; + Ok(r) +} diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index e22e4398b..0f6b1e3ff 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -9,7 +9,7 @@ use ostree_ext::container::{ Config, ExportOpts, ImageReference, OstreeImageReference, SignatureSource, Transport, }; use ostree_ext::ocidir; -use ostree_ext::prelude::FileExt; +use ostree_ext::prelude::{Cast, FileExt}; use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; use sh_inline::bash_in; @@ -900,6 +900,7 @@ async fn test_container_import_export_v1() { /// But layers work via the container::write module. #[tokio::test] async fn test_container_write_derive() -> Result<()> { + let cancellable = gio::NONE_CANCELLABLE; let fixture = Fixture::new_v1()?; let base_oci_path = &fixture.path.join("exampleos.oci"); let _digest = ostree_ext::container::encapsulate( @@ -925,7 +926,11 @@ async fn test_container_write_derive() -> Result<()> { oci_clone(base_oci_path, derived_path).await?; let temproot = &fixture.path.join("temproot"); std::fs::create_dir_all(&temproot.join("usr/bin"))?; - std::fs::write(temproot.join("usr/bin/newderivedfile"), "newderivedfile v0")?; + let newderivedfile_contents = "newderivedfile v0"; + std::fs::write( + temproot.join("usr/bin/newderivedfile"), + newderivedfile_contents, + )?; std::fs::write( temproot.join("usr/bin/newderivedfile3"), "newderivedfile3 v0", @@ -994,11 +999,18 @@ async fn test_container_write_derive() -> Result<()> { assert_eq!(config.os(), &oci_spec::image::Os::Linux); // Parse the commit and verify we pulled the derived content. - bash_in!( - &fixture.dir, - "ostree --repo=dest/repo ls ${r} /usr/bin/newderivedfile >/dev/null", - r = import.merge_commit.as_str() - )?; + let root = fixture + .destrepo() + .read_commit(&import.merge_commit, cancellable)? + .0; + let root = root.downcast_ref::().unwrap(); + { + let derived = root.resolve_relative_path("usr/bin/newderivedfile"); + let derived = derived.downcast_ref::().unwrap(); + let found_newderived_contents = + ostree_ext::ostree_manual::repo_file_read_to_string(derived)?; + assert_eq!(found_newderived_contents, newderivedfile_contents); + } // Import again, but there should be no changes. let mut imp = From c4b2f1bf65229c4db4d0144388846ac7aad91326 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 25 Aug 2022 09:59:16 -0400 Subject: [PATCH 439/775] container/store: Process whiteouts A long time ago, in a galaxy far far away, a few of us in the ostree space did some initial work on Docker-related things; specifically support for whiteouts landed in https://github.com/ostreedev/ostree/commit/baaf7450da8a3870e8a42f0cdd4e0ea0ed5018d6 I wish at that time we'd realized how we could more natively support fetching containers; but, it never occurred to me to fork off skopeo to do all the heavy lifting for the *fetch* side, which would have been a lot of work to reimplement particularly in C. Oh well, better late than never! Anyways, that whiteout processing was only designed to happen at checkout time - i.e. when materializing the final filesystem tree. I think this was actually a misdesign and we should add `ostree_mutable_tree_write_with_whiteouts` so that the whiteouts are processed in-memory. However for now, there's a relatively low cost to temporarily materializing the merged tree via hardlinks and handle whiteouts via the existing code, so let's do that. Closes: https://github.com/ostreedev/ostree-rs-ext/issues/273 --- lib/src/container/store.rs | 81 ++++++++++++++++++++++++++++---------- lib/src/ostree_manual.rs | 7 ++-- lib/tests/it/main.rs | 12 ++++++ 3 files changed, 76 insertions(+), 24 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index a731da35f..b175412fd 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -713,34 +713,75 @@ impl ImageImporter { let imgref = self.target_imgref.unwrap_or(self.imgref); let state = crate::tokio_util::spawn_blocking_cancellable_flatten( move |cancellable| -> Result> { + use cap_std_ext::rustix::fd::AsRawFd; + let cancellable = Some(cancellable); let repo = &repo; let txn = repo.auto_transaction(cancellable)?; - let (base_commit_tree, _) = repo.read_commit(&base_commit, cancellable)?; - let base_commit_tree = base_commit_tree.downcast::().unwrap(); - let base_contents_obj = base_commit_tree.tree_get_contents_checksum().unwrap(); - let base_metadata_obj = base_commit_tree.tree_get_metadata_checksum().unwrap(); - let mt = ostree::MutableTree::from_checksum( - repo, - &base_contents_obj, - &base_metadata_obj, - ); + + let devino = ostree::RepoDevInoCache::new(); + let repodir = repo.dfd_as_dir()?; + let repo_tmp = repodir.open_dir("tmp")?; + let td = cap_tempfile::TempDir::new_in(&repo_tmp)?; + + let rootpath = "root"; + let checkout_mode = if repo.mode() == ostree::RepoMode::Bare { + ostree::RepoCheckoutMode::None + } else { + ostree::RepoCheckoutMode::User + }; + let mut checkout_opts = ostree::RepoCheckoutAtOptions { + mode: checkout_mode, + overwrite_mode: ostree::RepoCheckoutOverwriteMode::UnionFiles, + devino_to_csum_cache: Some(devino.clone()), + no_copy_fallback: true, + force_copy_zerosized: true, + process_whiteouts: false, + ..Default::default() + }; + repo.checkout_at( + Some(&checkout_opts), + (*td).as_raw_fd(), + rootpath, + &base_commit, + cancellable, + ) + .context("Checking out base commit")?; + // Layer all subsequent commits + checkout_opts.process_whiteouts = true; for commit in layer_commits { - let (layer_tree, _) = repo.read_commit(&commit, cancellable)?; - repo.write_directory_to_mtree(&layer_tree, &mt, None, cancellable)?; + repo.checkout_at( + Some(&checkout_opts), + (*td).as_raw_fd(), + rootpath, + &commit, + cancellable, + ) + .with_context(|| format!("Checking out layer {commit}"))?; } - let merged_root = repo.write_mtree(&mt, cancellable)?; - let merged_root = merged_root.downcast::().unwrap(); - let merged_commit = repo.write_commit( - None, - None, - None, - Some(&metadata), - &merged_root, + let modifier = + ostree::RepoCommitModifier::new(ostree::RepoCommitModifierFlags::CONSUME, None); + modifier.set_devino_cache(&devino); + + let mt = ostree::MutableTree::new(); + repo.write_dfd_to_mtree( + (*td).as_raw_fd(), + rootpath, + &mt, + Some(&modifier), cancellable, - )?; + ) + .context("Writing merged filesystem to mtree")?; + + let merged_root = repo + .write_mtree(&mt, cancellable) + .context("Writing mtree")?; + let merged_root = merged_root.downcast::().unwrap(); + let merged_commit = repo + .write_commit(None, None, None, Some(&metadata), &merged_root, cancellable) + .context("Writing commit")?; repo.transaction_set_ref(None, &ostree_ref, Some(merged_commit.as_str())); txn.commit(cancellable)?; // Here we re-query state just to run through the same code path, diff --git a/lib/src/ostree_manual.rs b/lib/src/ostree_manual.rs index a70294a7b..a4b30d138 100644 --- a/lib/src/ostree_manual.rs +++ b/lib/src/ostree_manual.rs @@ -7,9 +7,8 @@ use ostree; use ostree::prelude::{Cast, InputStreamExtManual}; use ostree::{gio, glib}; +/// Equivalent of `g_file_read()` for ostree::RepoFile to work around https://github.com/ostreedev/ostree/issues/2703 #[allow(unsafe_code)] - -/// Equivalent of `g_file_read()` for ostree::RepoFile to work around an ostree bug. pub fn repo_file_read(f: &ostree::RepoFile) -> Result { use glib::translate::*; let stream = unsafe { @@ -19,8 +18,8 @@ pub fn repo_file_read(f: &ostree::RepoFile) -> Result Result<()> { temproot.join("usr/bin/newderivedfile3"), "newderivedfile3 v0", )?; + // Remove the kernel directory and make a new one + let moddir = temproot.join("usr/lib/modules"); + let oldkernel = "5.10.18-200.x86_64"; + std::fs::create_dir_all(&moddir)?; + let oldkernel_wh = &format!(".wh.{oldkernel}"); + std::fs::write(moddir.join(oldkernel_wh), "")?; + let newkdir = moddir.join("5.12.7-42.x86_64"); + std::fs::create_dir_all(&newkdir)?; + std::fs::write(newkdir.join("vmlinuz"), "a new kernel")?; ostree_ext::integrationtest::generate_derived_oci(derived_path, temproot)?; // And v2 let derived2_path = &fixture.path.join("derived2.oci"); @@ -1010,6 +1019,9 @@ async fn test_container_write_derive() -> Result<()> { let found_newderived_contents = ostree_ext::ostree_manual::repo_file_read_to_string(derived)?; assert_eq!(found_newderived_contents, newderivedfile_contents); + + let old_kernel_dir = root.resolve_relative_path(format!("usr/lib/modules/{oldkernel}")); + assert!(!old_kernel_dir.query_exists(cancellable)); } // Import again, but there should be no changes. From 3048447ac0800fc98041875470a17da61b6f86c5 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 26 Aug 2022 07:52:34 -0400 Subject: [PATCH 440/775] lib: Release 0.8.4 This adds a notable new feature in correctly handling Docker/OCI whiteouts (file deletions) as well as accepting (but not writing) refs in the tar stream. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 018ee492f..eb9b039bf 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.8.3" +version = "0.8.4" [dependencies] anyhow = "1.0" From 6f84d338725d22e1f21931ecaaad5cb58988a511 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 26 Aug 2022 08:13:21 -0400 Subject: [PATCH 441/775] bootabletree: Add an API to find kernel in fs checkout too See https://github.com/coreos/rpm-ostree/pull/3966#discussion_r955281628 I want to switch over rpm-ostree to use this API instead of reimplementing it. This also adds test coverage for the existing GFile based API. --- lib/src/bootabletree.rs | 99 ++++++++++++++++++++++++++++++++++++++++- lib/tests/it/main.rs | 8 ++++ 2 files changed, 105 insertions(+), 2 deletions(-) diff --git a/lib/src/bootabletree.rs b/lib/src/bootabletree.rs index 6be01cb90..591cbeb4d 100644 --- a/lib/src/bootabletree.rs +++ b/lib/src/bootabletree.rs @@ -1,12 +1,20 @@ //! Helper functions for bootable OSTrees. +use std::path::Path; + use anyhow::Result; +use camino::Utf8Path; +use camino::Utf8PathBuf; +use cap_std::fs::Dir; +use cap_std_ext::cap_std; use ostree::gio; use ostree::prelude::*; -const MODULES: &str = "/usr/lib/modules"; +const MODULES: &str = "usr/lib/modules"; +const VMLINUZ: &str = "vmlinuz"; /// Find the kernel modules directory in a bootable OSTree commit. +/// The target directory will have a `vmlinuz` file representing the kernel binary. pub fn find_kernel_dir( root: &gio::File, cancellable: Option<&gio::Cancellable>, @@ -20,10 +28,97 @@ pub fn find_kernel_dir( let mut r = None; for child in e.clone() { let child = &child?; + if child.file_type() != gio::FileType::Directory { + continue; + } let childpath = e.child(child); - if child.file_type() == gio::FileType::Directory && r.replace(childpath).is_some() { + let vmlinuz = childpath.child(VMLINUZ); + if !vmlinuz.query_exists(cancellable) { + continue; + } + if r.replace(childpath).is_some() { + anyhow::bail!("Found multiple subdirectories in {}", MODULES); + } + } + Ok(r) +} + +fn read_dir_optional( + d: &Dir, + p: impl AsRef, +) -> std::io::Result> { + match d.read_dir(p.as_ref()) { + Ok(r) => Ok(Some(r)), + Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(None), + Err(e) => Err(e), + } +} + +/// Find the kernel modules directory in checked out directory tree. +/// The target directory will have a `vmlinuz` file representing the kernel binary. +pub fn find_kernel_dir_fs(root: &Dir) -> Result> { + let mut r = None; + let entries = if let Some(entries) = read_dir_optional(root, MODULES)? { + entries + } else { + return Ok(None); + }; + for child in entries { + let child = &child?; + if !child.file_type()?.is_dir() { + continue; + } + let name = child.file_name(); + let name = if let Some(n) = name.to_str() { + n + } else { + continue; + }; + let mut pbuf = Utf8Path::new(MODULES).to_owned(); + pbuf.push(name); + pbuf.push(VMLINUZ); + if !root.try_exists(&pbuf)? { + continue; + } + pbuf.pop(); + if r.replace(pbuf).is_some() { anyhow::bail!("Found multiple subdirectories in {}", MODULES); } } Ok(r) } + +#[cfg(test)] +mod test { + use super::*; + use cap_tempfile::cap_std; + + #[test] + fn test_find_kernel_dir_fs() -> Result<()> { + let td = cap_tempfile::tempdir(cap_std::ambient_authority())?; + + // Verify the empty case + assert!(find_kernel_dir_fs(&td).unwrap().is_none()); + let moddir = Utf8Path::new("usr/lib/modules"); + td.create_dir_all(moddir)?; + assert!(find_kernel_dir_fs(&td).unwrap().is_none()); + + let kpath = moddir.join("5.12.8-32.aarch64"); + td.create_dir_all(&kpath)?; + td.write(kpath.join("vmlinuz"), "some kernel")?; + let kpath2 = moddir.join("5.13.7-44.aarch64"); + td.create_dir_all(&kpath2)?; + td.write(kpath2.join("foo.ko"), "some kmod")?; + + assert_eq!( + find_kernel_dir_fs(&td) + .unwrap() + .unwrap() + .file_name() + .unwrap(), + kpath.file_name().unwrap() + ); + + Ok(()) + } +} diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 129f7e70e..a573e1392 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1021,6 +1021,14 @@ async fn test_container_write_derive() -> Result<()> { ostree_ext::ostree_manual::repo_file_read_to_string(derived)?; assert_eq!(found_newderived_contents, newderivedfile_contents); + let kver = ostree_ext::bootabletree::find_kernel_dir(root.upcast_ref(), cancellable) + .unwrap() + .unwrap() + .basename() + .unwrap(); + let kver = Utf8Path::from_path(&kver).unwrap(); + assert_eq!(kver, newkdir.file_name().unwrap()); + let old_kernel_dir = root.resolve_relative_path(format!("usr/lib/modules/{oldkernel}")); assert!(!old_kernel_dir.query_exists(cancellable)); } From cead045578635abcc46145d948bbdde473ac907e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 2 Sep 2022 07:32:29 -0400 Subject: [PATCH 442/775] ci/priv-integration: Update image fixtures chunked v1 has had time to make it to FCOS; manually copy the *old* images now to my quay. --- ci/priv-integration.sh | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index 1fa75d1f9..4336aebf0 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -7,9 +7,8 @@ set -euo pipefail sysroot=/run/host # Current stable image fixture image=quay.io/coreos-assembler/fcos:testing-devel -# My hand-uploaded chunked images -chunked_image=quay.io/cgwalters/fcos-chunked:latest -chunked_image_v1=quay.io/cgwalters/fcos-chunked:v1 +# An unchunked v1 image +old_image=quay.io/cgwalters/fcos:unchunked imgref=ostree-unverified-registry:${image} stateroot=testos @@ -26,7 +25,7 @@ ostree-ext-cli container image deploy --sysroot "${sysroot}" \ --stateroot "${stateroot}" --imgref "${imgref}" ostree admin --sysroot="${sysroot}" status ostree-ext-cli container image remove --repo "${sysroot}/ostree/repo" registry:"${image}" -for img in "${chunked_image}" "${chunked_image_v1}"; do +for img in "${image}" "${old_image}"; do ostree-ext-cli container image deploy --sysroot "${sysroot}" \ --stateroot "${stateroot}" --imgref ostree-unverified-registry:"${img}" ostree admin --sysroot="${sysroot}" status From 578a4dba853dc56b404878dc64a8b8bb8be9f6ce Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 9 Sep 2022 10:07:06 -0400 Subject: [PATCH 443/775] container/commit: Actually return an error code I ended up rewriting a lot of this so we could more properly unit test it. But the problem here boils down to `move ||` having *moved* the error counter into the closure, but since it's copy it made a new copy. --- lib/src/commit.rs | 54 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 13 deletions(-) diff --git a/lib/src/commit.rs b/lib/src/commit.rs index 3f3084291..50107ffcd 100644 --- a/lib/src/commit.rs +++ b/lib/src/commit.rs @@ -5,23 +5,25 @@ use crate::container_utils::require_ostree_container; use anyhow::Context; use anyhow::Result; +use camino::Utf8Path; +use camino::Utf8PathBuf; use std::fs; -use std::path::Path; use tokio::task; /// Check if there are any files that are not directories and error out if /// we find any, /var should not contain any files to commit in a container /// as it is where we expect user data to reside. -fn validate_directories_only(path: &Path, error_count: &mut i32) -> Result<()> { +fn validate_directories_only_recurse(path: &Utf8Path, error_count: &mut i32) -> Result<()> { let context = || format!("Validating file: {:?}", path); for entry in fs::read_dir(path).with_context(context)? { let entry = entry?; let path = entry.path(); + let path: Utf8PathBuf = path.try_into()?; let metadata = path.symlink_metadata()?; if metadata.is_dir() { - validate_directories_only(&path, error_count)?; + validate_directories_only_recurse(&path, error_count)?; } else { *error_count += 1; if *error_count < 20 { @@ -32,22 +34,48 @@ fn validate_directories_only(path: &Path, error_count: &mut i32) -> Result<()> { Ok(()) } +fn validate_ostree_compatibility_in(root: &Utf8Path) -> Result<()> { + let var_path = root.join("var"); + println!("Checking /var for files"); + let mut error_count = 0; + validate_directories_only_recurse(&var_path, &mut error_count)?; + if error_count != 0 { + anyhow::bail!("Found content in {var_path}"); + } + Ok(()) +} + +fn validate_ostree_compatibility() -> Result<()> { + validate_ostree_compatibility_in(Utf8Path::new("/")) +} + /// Entrypoint to the commit procedures, initially we just /// have one validation but we expect more in the future. pub(crate) async fn container_commit() -> Result<()> { require_ostree_container()?; - println!("Checking /var for files"); - let var_path = Path::new("/var"); - let mut error_count = 0; + task::spawn_blocking(validate_ostree_compatibility).await? +} - task::spawn_blocking(move || -> Result<()> { - validate_directories_only(var_path, &mut error_count) - }) - .await??; +#[cfg(test)] +mod tests { + use super::*; - if error_count != 0 { - anyhow::bail!("Found content in /var"); + #[test] + fn commit() -> Result<()> { + let td = tempfile::tempdir()?; + let td = td.path(); + let td = Utf8Path::from_path(td).unwrap(); + + let var = td.join("var"); + + std::fs::create_dir(&var)?; + validate_ostree_compatibility_in(td).unwrap(); + + std::fs::write(var.join("foo"), "somefile")?; + + assert!(validate_ostree_compatibility_in(td).is_err()); + + Ok(()) } - Ok(()) } From bec7c0a4062f6add509fa7b8d596b28feedfa037 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 1 Sep 2022 17:15:06 -0400 Subject: [PATCH 444/775] deploy: Add a `--no-imgref` option In https://github.com/coreos/coreos-assembler/pull/2523 we taught coreos-assembler how to generate disk images with a "pre-pulled" container image. This means that the *first* OS update will use shared layers. However...right now running e.g. `rpm-ostree rebase quay.io/newimage` won't necessarily prune the previous image. (This may be considered a bug) But in practice, particularly for RHEL CoreOS we may not want to have a default image reference - we don't (necessarily) want typing `rpm-ostree upgrade` to do something. With this, we can effectively pre-pull just the layers but not the final image. --- lib/src/cli.rs | 1 + lib/src/container/deploy.rs | 9 +++++++++ lib/src/container/store.rs | 13 ++++++++++++- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 080ef32ad..8c172e377 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -802,6 +802,7 @@ where kargs: kargs.as_deref(), target_imgref: target_imgref.as_ref(), proxy_cfg: Some(proxyopts.into()), + ..Default::default() }; let state = crate::container::deploy::deploy( sysroot, diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 855f11866..38e41df34 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -27,6 +27,12 @@ pub struct DeployOpts<'a> { /// Configuration for fetching containers. pub proxy_cfg: Option, + + /// If true, then no image reference will be written; but there will be refs + /// for the fetched layers. This ensures that if the machine is later updated + /// to a different container image, the fetch process will reuse shared layers, but + /// it will not be necessary to remove the previous image. + pub no_imgref: bool, } /// Write a container image to an OSTree deployment. @@ -48,6 +54,9 @@ pub async fn deploy( if let Some(target) = options.target_imgref { imp.set_target(target); } + if options.no_imgref { + imp.set_no_imgref(); + } let state = match imp.prepare().await? { PrepareResult::AlreadyPresent(r) => r, PrepareResult::Ready(prep) => imp.import(prep).await?, diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index f6ceb69c9..61fdb344d 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -131,6 +131,7 @@ pub struct ImageImporter { pub(crate) proxy: ImageProxy, imgref: OstreeImageReference, target_imgref: Option, + no_imgref: bool, // If true, do not write final image ref pub(crate) proxy_img: OpenedImage, layer_progress: Option>, @@ -405,6 +406,7 @@ impl ImageImporter { proxy, proxy_img, target_imgref: None, + no_imgref: false, imgref: imgref.clone(), layer_progress: None, layer_byte_progress: None, @@ -416,6 +418,13 @@ impl ImageImporter { self.target_imgref = Some(target.clone()) } + /// Do not write the final image ref, but do write refs for shared layers. + /// This is useful in scenarios where you want to "pre-pull" an image, + /// but in such a way that it does not need to be manually removed later. + pub fn set_no_imgref(&mut self) { + self.no_imgref = true; + } + /// Determine if there is a new manifest, and if so return its digest. pub async fn prepare(&mut self) -> Result { self.prepare_internal(false).await @@ -787,7 +796,9 @@ impl ImageImporter { let merged_commit = repo .write_commit(None, None, None, Some(&metadata), &merged_root, cancellable) .context("Writing commit")?; - repo.transaction_set_ref(None, &ostree_ref, Some(merged_commit.as_str())); + if !self.no_imgref { + repo.transaction_set_ref(None, &ostree_ref, Some(merged_commit.as_str())); + } txn.commit(cancellable)?; // Here we re-query state just to run through the same code path, // though it'd be cheaper to synthesize it from the data we already have. From 0079d52f93c177dc6a21ef4281df3b0f87b04230 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 13 Sep 2022 14:55:52 -0400 Subject: [PATCH 445/775] container: Add an API to warn about non-ostree compatible content And use it consistently in both pull and deploy paths. I also plan to make this very visible in rpm-ostree. Motivated by a private chat discussion with someone not realizing that files put in `/var` in a container build would just be discarded. --- lib/src/cli.rs | 23 +++++++------- lib/src/container/store.rs | 33 ++++++++++++++++++++ lib/tests/it/main.rs | 64 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 108 insertions(+), 12 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 8c172e377..3e66fda3e 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -573,18 +573,10 @@ async fn container_store( let _ = printer.await; } let import = import?; - let commit = &repo.load_commit(&import.merge_commit)?.0; - let commit_meta = &glib::VariantDict::new(Some(&commit.child_value(0))); - let filtered = commit_meta.lookup::( - ostree_container::store::META_FILTERED, - )?; - if let Some(filtered) = filtered { - for (layerid, filtered) in filtered { - eprintln!("Unsupported paths filtered from {}:", layerid); - for (prefix, count) in filtered { - eprintln!(" {}: {}", prefix, count); - } - } + if let Some(msg) = + ostree_container::store::image_filtered_content_warning(repo, &imgref.imgref)? + { + eprintln!("{msg}") } println!("Wrote: {} => {}", imgref, import.merge_commit); Ok(()) @@ -793,6 +785,7 @@ where } => { let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); sysroot.load(gio::NONE_CANCELLABLE)?; + let repo = &sysroot.repo().unwrap(); let kargs = karg.as_deref(); let kargs = kargs.map(|v| { let r: Vec<_> = v.iter().map(|s| s.as_str()).collect(); @@ -811,6 +804,12 @@ where Some(options), ) .await?; + if let Some(msg) = ostree_container::store::image_filtered_content_warning( + repo, + &imgref.imgref, + )? { + eprintln!("{msg}") + } if let Some(p) = write_commitid_to { std::fs::write(&p, state.merge_commit.as_bytes()) .with_context(|| format!("Failed to write commitid to {}", p))?; diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 61fdb344d..0cbfd871b 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -969,6 +969,39 @@ fn prune_image(repo: &ostree::Repo, image: &ImageReference) -> Result<()> { Ok(()) } +/// Given an image, if it has any non-ostree compatible content, return a suitable +/// warning message. +pub fn image_filtered_content_warning( + repo: &ostree::Repo, + image: &ImageReference, +) -> Result> { + use std::fmt::Write; + + let ostree_ref = ref_for_image(image)?; + let rev = repo.require_rev(&ostree_ref)?; + let commit_obj = repo.load_commit(rev.as_str())?.0; + let commit_meta = &glib::VariantDict::new(Some(&commit_obj.child_value(0))); + + let r = commit_meta + .lookup::(META_FILTERED)? + .filter(|v| !v.is_empty()) + .map(|v| { + let mut filtered = HashMap::<&String, u32>::new(); + for paths in v.values() { + for (k, v) in paths { + let e = filtered.entry(k).or_default(); + *e += v; + } + } + let mut buf = "Image contains non-ostree compatible file paths:".to_string(); + for (k, v) in filtered { + write!(buf, " {k}: {v}").unwrap(); + } + buf + }); + Ok(r) +} + /// Remove the specified image references. /// /// This function assumes no transaction is active on the repository. diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index a573e1392..a18e7728a 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -746,6 +746,12 @@ async fn impl_test_container_chunked(format: ExportLayout) -> Result<()> { assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); + assert!( + store::image_filtered_content_warning(fixture.destrepo(), &imgref.imgref) + .unwrap() + .is_none() + ); + const ADDITIONS: &str = indoc::indoc! { " r usr/bin/bash bash-v0 "}; @@ -835,6 +841,12 @@ r usr/bin/bash bash-v0 let _import = imp.import(prep).await.unwrap(); assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 2); + assert!( + store::image_filtered_content_warning(fixture.destrepo(), &derived_imgref.imgref) + .unwrap() + .is_none() + ); + // Should only be new layers let n_removed = store::gc_image_layers(fixture.destrepo())?; assert_eq!(n_removed, 0); @@ -861,6 +873,58 @@ r usr/bin/bash bash-v0 Ok(()) } +#[tokio::test] +async fn test_container_var_content() -> Result<()> { + let fixture = Fixture::new_v1()?; + + let imgref = fixture.export_container(ExportLayout::V1).await.unwrap().0; + let imgref = OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref, + }; + + // Build a derived image + let derived_path = &fixture.path.join("derived.oci"); + let srcpath = imgref.imgref.name.as_str(); + oci_clone(srcpath, derived_path).await.unwrap(); + let temproot = &fixture.path.join("temproot"); + || -> Result<_> { + std::fs::create_dir(temproot)?; + let temprootd = Dir::open_ambient_dir(temproot, cap_std::ambient_authority())?; + let mut db = DirBuilder::new(); + db.mode(0o755); + db.recursive(true); + temprootd.create_dir_with("var/lib", &db)?; + temprootd.write("var/lib/foo", "junk var data")?; + Ok(()) + }() + .context("generating temp content")?; + ostree_ext::integrationtest::generate_derived_oci(derived_path, temproot)?; + + let derived_imgref = OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref: ImageReference { + transport: Transport::OciDir, + name: derived_path.to_string(), + }, + }; + let mut imp = + store::ImageImporter::new(fixture.destrepo(), &derived_imgref, Default::default()).await?; + let prep = match imp.prepare().await.unwrap() { + store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + store::PrepareResult::Ready(r) => r, + }; + let _import = imp.import(prep).await.unwrap(); + + assert!( + store::image_filtered_content_warning(fixture.destrepo(), &derived_imgref.imgref) + .unwrap() + .is_some() + ); + + Ok(()) +} + /// Copy an OCI directory. async fn oci_clone(src: impl AsRef, dest: impl AsRef) -> Result<()> { let src = src.as_ref(); From a913ab0d5a451dd21f099c4f22de8ee366269879 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 13 Sep 2022 16:05:53 -0400 Subject: [PATCH 446/775] container/commit: Auto-clean `/var/{tmp,cache}`, `/tmp`, `/run` The original command here was just scoped to `/var`, but we also don't want content in `/run`. Extend the tooling to handle that and the other two temporary directories. Also, let's be a bit nicer here and auto-clean empty directories in `/var`. I was testing out the https://github.com/coreos/coreos-layering-examples/blob/main/tailscale/Dockerfile example and today we have this: ``` drwxr-xr-x root/root 0 2022-09-13 18:53 run/ drwxr-xr-x root/root 0 2022-09-13 18:51 run/rpm-ostree/ drwxr-xr-x root/root 0 2022-09-13 18:53 run/rpm-ostree/lock/ drwxr-xr-x root/root 0 2022-09-13 18:51 run/systemd/ drwxr-xr-x root/root 0 2022-09-13 18:51 run/systemd/resolve/ -rwx------ root/root 0 2022-09-13 18:51 run/systemd/resolve/stub-resolv.conf ... drwxr-xr-x root/root 0 2022-09-13 18:53 var/ drwxr-xr-x root/root 0 2022-09-13 18:53 var/cache/ drwx------ root/root 0 2022-09-13 18:53 var/cache/ldconfig/ -rw------- root/root 22000 2022-09-13 18:53 var/cache/ldconfig/aux-cache drwxr-xr-x root/root 0 2022-09-08 23:10 var/cache/tailscale/ drwxr-xr-x root/root 0 2022-09-13 18:53 var/tmp/ ``` In this set, we can auto-clean the leftover locking directories rpm-ostree (erroneously) leaves in `/run`, as well as `/var/cache/ldconfig`. --- lib/src/commit.rs | 118 +++++++++++++++++++++++++++++++++------------- 1 file changed, 84 insertions(+), 34 deletions(-) diff --git a/lib/src/commit.rs b/lib/src/commit.rs index 50107ffcd..855baa892 100644 --- a/lib/src/commit.rs +++ b/lib/src/commit.rs @@ -6,55 +6,75 @@ use crate::container_utils::require_ostree_container; use anyhow::Context; use anyhow::Result; use camino::Utf8Path; -use camino::Utf8PathBuf; -use std::fs; +use cap_std::fs::Dir; +use cap_std_ext::cap_std; +use cap_std_ext::dirext::CapStdExtDirExt; +use std::convert::TryInto; +use std::path::Path; use tokio::task; -/// Check if there are any files that are not directories and error out if -/// we find any, /var should not contain any files to commit in a container -/// as it is where we expect user data to reside. -fn validate_directories_only_recurse(path: &Utf8Path, error_count: &mut i32) -> Result<()> { - let context = || format!("Validating file: {:?}", path); - for entry in fs::read_dir(path).with_context(context)? { +/// Directories for which we will always remove all content. +const FORCE_CLEAN_PATHS: &[&str] = &["run", "tmp", "var/tmp", "var/cache"]; + +/// Gather count of non-empty directories. Empty directories are removed. +fn process_dir_recurse(root: &Dir, path: &Utf8Path, error_count: &mut i32) -> Result { + let context = || format!("Validating: {path}"); + let mut validated = true; + for entry in root.read_dir(path).with_context(context)? { let entry = entry?; - let path = entry.path(); - let path: Utf8PathBuf = path.try_into()?; + let name = entry.file_name(); + let name = Path::new(&name); + let name: &Utf8Path = name.try_into()?; + let path = &path.join(name); - let metadata = path.symlink_metadata()?; + let metadata = root.symlink_metadata(path)?; if metadata.is_dir() { - validate_directories_only_recurse(&path, error_count)?; + if !process_dir_recurse(root, path, error_count)? { + validated = false; + } } else { + validated = false; *error_count += 1; if *error_count < 20 { eprintln!("Found file: {:?}", path) } } } - Ok(()) + if validated { + root.remove_dir(path).with_context(context)?; + } + Ok(validated) } -fn validate_ostree_compatibility_in(root: &Utf8Path) -> Result<()> { - let var_path = root.join("var"); - println!("Checking /var for files"); +/// Given a root filesystem, clean out empty directories and warn about +/// files in /var. /run, /tmp, and /var/tmp have their contents recursively cleaned. +fn prepare_ostree_commit_in(root: &Dir) -> Result<()> { let mut error_count = 0; - validate_directories_only_recurse(&var_path, &mut error_count)?; - if error_count != 0 { - anyhow::bail!("Found content in {var_path}"); + for path in FORCE_CLEAN_PATHS { + if let Some(subdir) = root.open_dir_optional(path)? { + for entry in subdir.entries()? { + let entry = entry?; + subdir.remove_all_optional(entry.file_name())?; + } + } + } + let var = Utf8Path::new("var"); + if root.try_exists(var)? && !process_dir_recurse(root, var, &mut error_count)? { + anyhow::bail!("Found content in {var}"); } Ok(()) } -fn validate_ostree_compatibility() -> Result<()> { - validate_ostree_compatibility_in(Utf8Path::new("/")) -} - /// Entrypoint to the commit procedures, initially we just /// have one validation but we expect more in the future. pub(crate) async fn container_commit() -> Result<()> { - require_ostree_container()?; - - task::spawn_blocking(validate_ostree_compatibility).await? + task::spawn_blocking(move || { + require_ostree_container()?; + let rootdir = Dir::open_ambient_dir("/", cap_std::ambient_authority())?; + prepare_ostree_commit_in(&rootdir) + }) + .await? } #[cfg(test)] @@ -63,18 +83,48 @@ mod tests { #[test] fn commit() -> Result<()> { - let td = tempfile::tempdir()?; - let td = td.path(); - let td = Utf8Path::from_path(td).unwrap(); + let td = &cap_tempfile::tempdir(cap_std::ambient_authority())?; + + // Handle the empty case + prepare_ostree_commit_in(td).unwrap(); + + let var = Utf8Path::new("var"); + let run = Utf8Path::new("run"); + let tmp = Utf8Path::new("tmp"); + let vartmp_foobar = &var.join("tmp/foo/bar"); + let runsystemd = &run.join("systemd"); + let resolvstub = &runsystemd.join("resolv.conf"); + + for p in [var, run, tmp] { + td.create_dir(p)?; + } - let var = td.join("var"); + td.create_dir_all(vartmp_foobar)?; + td.write(vartmp_foobar.join("a"), "somefile")?; + td.write(vartmp_foobar.join("b"), "somefile2")?; + td.create_dir_all(runsystemd)?; + td.write(resolvstub, "stub resolv")?; + prepare_ostree_commit_in(td).unwrap(); + assert!(!td.try_exists(var)?); + assert!(td.try_exists(run)?); + assert!(!td.try_exists(runsystemd)?); - std::fs::create_dir(&var)?; - validate_ostree_compatibility_in(td).unwrap(); + let systemd = run.join("systemd"); + td.create_dir_all(&systemd)?; + prepare_ostree_commit_in(td).unwrap(); + assert!(!td.try_exists(var)?); - std::fs::write(var.join("foo"), "somefile")?; + td.create_dir(&var)?; + td.write(var.join("foo"), "somefile")?; + assert!(prepare_ostree_commit_in(td).is_err()); + assert!(td.try_exists(var)?); - assert!(validate_ostree_compatibility_in(td).is_err()); + let nested = Utf8Path::new("var/lib/nested"); + td.create_dir_all(&nested)?; + td.write(nested.join("foo"), "test1")?; + td.write(nested.join("foo2"), "test2")?; + assert!(prepare_ostree_commit_in(td).is_err()); + assert!(td.try_exists(var)?); Ok(()) } From 14402640007b70a026aa75c5e3623a8154b9c0c9 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 15 Sep 2022 10:12:47 -0400 Subject: [PATCH 447/775] Make `commit` module public I think it makes sense for us to use this in rpm-ostree directly too at build time for example. --- lib/src/commit.rs | 2 +- lib/src/lib.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/src/commit.rs b/lib/src/commit.rs index 855baa892..7f639f424 100644 --- a/lib/src/commit.rs +++ b/lib/src/commit.rs @@ -49,7 +49,7 @@ fn process_dir_recurse(root: &Dir, path: &Utf8Path, error_count: &mut i32) -> Re /// Given a root filesystem, clean out empty directories and warn about /// files in /var. /run, /tmp, and /var/tmp have their contents recursively cleaned. -fn prepare_ostree_commit_in(root: &Dir) -> Result<()> { +pub fn prepare_ostree_commit_in(root: &Dir) -> Result<()> { let mut error_count = 0; for path in FORCE_CLEAN_PATHS { if let Some(subdir) = root.open_dir_optional(path)? { diff --git a/lib/src/lib.rs b/lib/src/lib.rs index fa1ee0200..759d373f9 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -40,7 +40,7 @@ pub mod tar; pub mod tokio_util; pub mod chunking; -pub(crate) mod commit; +pub mod commit; pub mod objectsource; pub(crate) mod objgv; #[cfg(feature = "internal-testing-api")] From 972a1349d7643de7dd61d953d4b924b8e069bebd Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 15 Sep 2022 10:05:25 -0400 Subject: [PATCH 448/775] ci: Add a flow that tests `ostree container commit` To verify our changes there too. --- .github/workflows/rust.yml | 18 ++++++++++++++++++ ci/container-build-integration.sh | 20 ++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100755 ci/container-build-integration.sh diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 700740857..69afc1324 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -160,3 +160,21 @@ jobs: run: install ostree-ext-cli /usr/bin && rm -v ostree-ext-cli - name: Integration tests run: ./ci/priv-integration.sh + container-build: + name: "Container build" + needs: build + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v2 + - name: Checkout coreos-layering-examples + uses: actions/checkout@v3 + with: + repository: coreos/coreos-layering-examples + path: coreos-layering-examples + - name: Download + uses: actions/download-artifact@v2 + with: + name: ostree-ext-cli + - name: Integration tests + run: ./ci/container-build-integration.sh diff --git a/ci/container-build-integration.sh b/ci/container-build-integration.sh new file mode 100755 index 000000000..08ef8b5ca --- /dev/null +++ b/ci/container-build-integration.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Verify `ostree container commit` +set -euo pipefail + +image=quay.io/coreos-assembler/fcos:stable +example=coreos-layering-examples/tailscale +set -x + +mv ostree-ext-cli ${example} +cd ${example} +chmod a+x ostree-ext-cli +sed -ie 's,ostree container commit,ostree-ext-cli container commit,' Dockerfile +sed -ie 's,^\(FROM .*\),\1\nADD ostree-ext-cli /usr/bin,' Dockerfile +git diff + +docker build -t localhost/fcos-tailscale . + +docker run --rm localhost/fcos-tailscale rpm -q tailscale + +echo ok container image integration From 77f7144b132763baf5016f48c1159c963b1035b2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 15 Sep 2022 15:39:05 -0400 Subject: [PATCH 449/775] Release 0.8.5 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index eb9b039bf..0e2c8fd43 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.8.4" +version = "0.8.5" [dependencies] anyhow = "1.0" From c5ac974f493f92f666f9df8bfcb42d02232eb01e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 16 Sep 2022 08:08:16 -0400 Subject: [PATCH 450/775] commit: Add an API to warn but continue on files in `/var` xref https://github.com/coreos/rpm-ostree/issues/4017 I want to have rpm-ostree use this API, but in order to do so in a backwards compatible way we need a mode that just warns but continues. --- lib/src/commit.rs | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/lib/src/commit.rs b/lib/src/commit.rs index 7f639f424..4dc3d8bbb 100644 --- a/lib/src/commit.rs +++ b/lib/src/commit.rs @@ -47,10 +47,7 @@ fn process_dir_recurse(root: &Dir, path: &Utf8Path, error_count: &mut i32) -> Re Ok(validated) } -/// Given a root filesystem, clean out empty directories and warn about -/// files in /var. /run, /tmp, and /var/tmp have their contents recursively cleaned. -pub fn prepare_ostree_commit_in(root: &Dir) -> Result<()> { - let mut error_count = 0; +fn clean_paths_in(root: &Dir) -> Result<()> { for path in FORCE_CLEAN_PATHS { if let Some(subdir) = root.open_dir_optional(path)? { for entry in subdir.entries()? { @@ -59,13 +56,35 @@ pub fn prepare_ostree_commit_in(root: &Dir) -> Result<()> { } } } + Ok(()) +} + +#[allow(clippy::collapsible_if)] +fn process_var(root: &Dir, strict: bool) -> Result<()> { let var = Utf8Path::new("var"); - if root.try_exists(var)? && !process_dir_recurse(root, var, &mut error_count)? { - anyhow::bail!("Found content in {var}"); + let mut error_count = 0; + if root.try_exists(var)? { + if !process_dir_recurse(root, var, &mut error_count)? && strict { + anyhow::bail!("Found content in {var}"); + } } Ok(()) } +/// Given a root filesystem, clean out empty directories and warn about +/// files in /var. /run, /tmp, and /var/tmp have their contents recursively cleaned. +pub fn prepare_ostree_commit_in(root: &Dir) -> Result<()> { + clean_paths_in(root)?; + process_var(root, true) +} + +/// Like [`prepare_ostree_commit_in`] but only emits warnings about unsupported +/// files in `/var` and will not error. +pub fn prepare_ostree_commit_in_nonstrict(root: &Dir) -> Result<()> { + clean_paths_in(root)?; + process_var(root, false) +} + /// Entrypoint to the commit procedures, initially we just /// have one validation but we expect more in the future. pub(crate) async fn container_commit() -> Result<()> { @@ -87,6 +106,7 @@ mod tests { // Handle the empty case prepare_ostree_commit_in(td).unwrap(); + prepare_ostree_commit_in_nonstrict(td).unwrap(); let var = Utf8Path::new("var"); let run = Utf8Path::new("run"); @@ -119,6 +139,10 @@ mod tests { assert!(prepare_ostree_commit_in(td).is_err()); assert!(td.try_exists(var)?); + td.write(var.join("foo"), "somefile")?; + prepare_ostree_commit_in_nonstrict(td).unwrap(); + assert!(td.try_exists(var)?); + let nested = Utf8Path::new("var/lib/nested"); td.create_dir_all(&nested)?; td.write(nested.join("foo"), "test1")?; From 3089166a4456cdcfa0568aedcda31eac65ac00ee Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 18 Sep 2022 11:11:38 -0400 Subject: [PATCH 451/775] deploy: Properly use merge deployment and staging flow when booted This won't affect the offline deploy case, but we want to be able to execute this code from a privileged container too for the online case. Previously, we were passing `None` for the merge deployment, which is fine and correct in the offline case for the *first* deployment. But te sysroot already has a handy API to do "find merge if available" which we should always use. Then, the next problem is that the Rust binding API for `simple_write_deployment` is buggy because the C API distinguishes between "NULL pointer" and "zero length array" - it's only when passing `NULL` that one gets the behavior of "inherit kernel arguments". A zero length array is treated as overriding with the empty set. The new staging API fixes this - it takes an `Option<&[&str]>`, and we can then pass `None` and propagate the merge kernel arguments. --- ci/priv-integration.sh | 1 + lib/src/container/deploy.rs | 45 ++++++++++++++++++++++++++++--------- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index 4336aebf0..189888547 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -25,6 +25,7 @@ ostree-ext-cli container image deploy --sysroot "${sysroot}" \ --stateroot "${stateroot}" --imgref "${imgref}" ostree admin --sysroot="${sysroot}" status ostree-ext-cli container image remove --repo "${sysroot}/ostree/repo" registry:"${image}" +ostree admin --sysroot="${sysroot}" undeploy 0 for img in "${image}" "${old_image}"; do ostree-ext-cli container image deploy --sysroot "${sysroot}" \ --stateroot "${stateroot}" --imgref ostree-unverified-registry:"${img}" diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 38e41df34..02d436368 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -48,6 +48,7 @@ pub async fn deploy( let cancellable = ostree::gio::NONE_CANCELLABLE; let options = options.unwrap_or_default(); let repo = &sysroot.repo().unwrap(); + let merge_deployment = sysroot.merge_deployment(Some(stateroot)); let mut imp = super::store::ImageImporter::new(repo, imgref, options.proxy_cfg.unwrap_or_default()) .await?; @@ -65,17 +66,39 @@ pub async fn deploy( let origin = glib::KeyFile::new(); let target_imgref = options.target_imgref.unwrap_or(imgref); origin.set_string("origin", ORIGIN_CONTAINER, &target_imgref.to_string()); - let deployment = &sysroot.deploy_tree( - Some(stateroot), - commit, - Some(&origin), - None, - options.kargs.unwrap_or_default(), - cancellable, - )?; - let flags = ostree::SysrootSimpleWriteDeploymentFlags::NONE; - sysroot.simple_write_deployment(Some(stateroot), deployment, None, flags, cancellable)?; - sysroot.cleanup(cancellable)?; + + if sysroot.booted_deployment().is_some() { + let opts = ostree::SysrootDeployTreeOpts { + override_kernel_argv: options.kargs, + ..Default::default() + }; + sysroot.stage_tree_with_options( + Some(stateroot), + commit, + Some(&origin), + merge_deployment.as_ref(), + &opts, + cancellable, + )?; + } else { + let deployment = &sysroot.deploy_tree( + Some(stateroot), + commit, + Some(&origin), + merge_deployment.as_ref(), + options.kargs.unwrap_or_default(), + cancellable, + )?; + let flags = ostree::SysrootSimpleWriteDeploymentFlags::NONE; + sysroot.simple_write_deployment( + Some(stateroot), + deployment, + merge_deployment.as_ref(), + flags, + cancellable, + )?; + sysroot.cleanup(cancellable)?; + } Ok(state) } From 8a524b53a3e35897c364eb89eea833b39ed36281 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 19 Sep 2022 13:41:52 -0400 Subject: [PATCH 452/775] Release 0.8.6 Mainly to get the deploy changes out so we can get a firstboot path in OCP that doesn't involve double reboot. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 0e2c8fd43..5c7fd62f8 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.8.5" +version = "0.8.6" [dependencies] anyhow = "1.0" From f7fbf50a97473784517a2e3ecd0f1eaaa7952218 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 21 Sep 2022 07:52:53 -0400 Subject: [PATCH 453/775] Fix two minor clippy lints Caught by newer clippy than is in our MSRV. --- lib/src/objectsource.rs | 2 +- lib/src/tar/import.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/src/objectsource.rs b/lib/src/objectsource.rs index 9faa26b92..96d87e501 100644 --- a/lib/src/objectsource.rs +++ b/lib/src/objectsource.rs @@ -18,7 +18,7 @@ mod rcstr_serialize { where S: Serializer, { - serializer.serialize_str(&*v) + serializer.serialize_str(v) } pub(crate) fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 36ec9b1b7..057d19ce8 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -382,7 +382,7 @@ impl Importer { } } tar::EntryType::Symlink => self.import_symlink_object(entry, checksum, xattrs), - o => return Err(anyhow!("Invalid tar entry of type {:?}", o)), + o => Err(anyhow!("Invalid tar entry of type {:?}", o)), } } From c855d63710804f884901cb5068ec6f342e405ecc Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 20 Sep 2022 17:50:08 -0400 Subject: [PATCH 454/775] container: Support writing tags to `oci:` directories I want to teach coreos-assembler to maintain an oci directory with version tags for builds instead of using an ostree repository. It now works to do e.g.: ``` $ ostree-ext-cli container encapsulate --repo=tmp/repo 36.20220920.dev.0 oci:tmp/builds:36.20220920.dev.0 $ ostree-ext-cli container encapsulate --repo=tmp/repo 36.20220920.dev.1 oci:tmp/builds:36.20220920.dev.1 ``` But in an OCI/container native build flow, we'd write the OCI builds not as `.ociarchive` but in an oci directory `builds-oci` or so. Closes: https://github.com/ostreedev/ostree-rs-ext/issues/154 --- lib/src/container/encapsulate.rs | 28 +++++++-- lib/src/container/ocidir.rs | 80 +++++++++++++++++++++++- lib/src/container/update_detachedmeta.rs | 2 +- lib/src/integrationtest.rs | 12 +++- lib/tests/it/main.rs | 13 ++-- 5 files changed, 119 insertions(+), 16 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index ad8018d17..00cfb3216 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -179,12 +179,14 @@ fn build_oci( repo: &ostree::Repo, rev: &str, ocidir_path: &Path, + tag: Option<&str>, config: &Config, opts: ExportOpts, contentmeta: Option, ) -> Result { - // Explicitly error if the target exists - std::fs::create_dir(ocidir_path).context("Creating OCI dir")?; + if !ocidir_path.exists() { + std::fs::create_dir(ocidir_path).context("Creating OCI dir")?; + } let ocidir = Dir::open_ambient_dir(ocidir_path, cap_std::ambient_authority())?; let mut writer = ocidir::OciDir::create(&ocidir)?; @@ -264,7 +266,12 @@ fn build_oci( imgcfg.set_config(Some(ctrcfg)); let ctrcfg = writer.write_config(imgcfg)?; manifest.set_config(ctrcfg); - writer.write_manifest(manifest, oci_image::Platform::default())?; + let platform = oci_image::Platform::default(); + if let Some(tag) = tag { + writer.insert_manifest(manifest, Some(tag), platform)?; + } else { + writer.replace_with_single_manifest(manifest, platform)?; + } Ok(ImageReference { transport: Transport::OciDir, @@ -272,6 +279,16 @@ fn build_oci( }) } +/// Interpret a filesystem path as optionally including a tag. Paths +/// such as `/foo/bar` will return `("/foo/bar"`, None)`, whereas +/// e.g. `/foo/bar:latest` will return `("/foo/bar", Some("latest"))`. +pub(crate) fn parse_oci_path_and_tag(path: &str) -> (&str, Option<&str>) { + match path.rsplit_once(':') { + Some((path, tag)) => (path, Some(tag)), + None => (path, None), + } +} + /// Helper for `build()` that avoids generics #[instrument(skip(repo, contentmeta))] async fn build_impl( @@ -287,10 +304,12 @@ async fn build_impl( opts.skip_compression = true; } let digest = if dest.transport == Transport::OciDir { + let (path, tag) = parse_oci_path_and_tag(dest.name.as_str()); let _copied: ImageReference = build_oci( repo, ostree_ref, - Path::new(dest.name.as_str()), + Path::new(path), + tag, config, opts, contentmeta, @@ -305,6 +324,7 @@ async fn build_impl( repo, ostree_ref, Path::new(tempdest), + None, config, opts, contentmeta, diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index f450ce70b..2dd5587cc 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -24,6 +24,8 @@ use std::path::{Path, PathBuf}; /// Path inside an OCI directory to the blobs const BLOBDIR: &str = "blobs/sha256"; +const OCI_TAG_ANNOTATION: &str = "org.opencontainers.image.ref.name"; + /// Completed blob metadata #[derive(Debug)] pub struct Blob { @@ -275,7 +277,48 @@ impl OciDir { } /// Write a manifest as a blob, and replace the index with a reference to it. - pub fn write_manifest( + pub fn insert_manifest( + &self, + manifest: oci_image::ImageManifest, + tag: Option<&str>, + platform: oci_image::Platform, + ) -> Result { + let mut manifest = write_json_blob(&self.dir, &manifest, MediaType::ImageManifest)? + .platform(platform) + .build() + .unwrap(); + if let Some(tag) = tag { + let annotations: HashMap<_, _> = [(OCI_TAG_ANNOTATION.to_string(), tag.to_string())] + .into_iter() + .collect(); + manifest.set_annotations(Some(annotations)); + } + + let index = self.dir.open_optional("index.json")?.map(BufReader::new); + let index = + if let Some(mut index) = index.map(oci_image::ImageIndex::from_reader).transpose()? { + let mut manifests = index.manifests().clone(); + manifests.push(manifest.clone()); + index.set_manifests(manifests); + index + } else { + oci_image::ImageIndexBuilder::default() + .schema_version(oci_image::SCHEMA_VERSION) + .manifests(vec![manifest.clone()]) + .build() + .unwrap() + }; + + self.dir + .atomic_replace_with("index.json", |w| -> Result<()> { + cjson::to_writer(w, &index).map_err(|e| anyhow::anyhow!("{:?}", e))?; + Ok(()) + })?; + Ok(manifest) + } + + /// Write a manifest as a blob, and replace the index with a reference to it. + pub fn replace_with_single_manifest( &self, manifest: oci_image::ImageManifest, platform: oci_image::Platform, @@ -303,6 +346,27 @@ impl OciDir { self.read_manifest_and_descriptor().map(|r| r.0) } + /// Find the manifest with the provided tag + pub fn find_manifest_with_tag(&self, tag: &str) -> Result> { + let f = self + .dir + .open("index.json") + .context("Failed to open index.json")?; + let idx: oci_image::ImageIndex = serde_json::from_reader(BufReader::new(f))?; + for img in idx.manifests() { + if img + .annotations() + .as_ref() + .and_then(|annos| annos.get(OCI_TAG_ANNOTATION)) + .filter(|tagval| tagval.as_str() == tag) + .is_some() + { + return self.read_json_blob(img).map(Some); + } + } + Ok(None) + } + /// If this OCI directory has a single manifest, return it. Otherwise, an error is returned. pub fn read_manifest_and_descriptor(&self) -> Result<(oci_image::ImageManifest, Descriptor)> { let f = self @@ -460,7 +524,19 @@ mod tests { w.push_layer(&mut manifest, &mut config, root_layer, "root"); let config = w.write_config(config)?; manifest.set_config(config); - w.write_manifest(manifest, oci_image::Platform::default())?; + w.replace_with_single_manifest(manifest.clone(), oci_image::Platform::default())?; + + let read_manifest = w.read_manifest().unwrap(); + assert_eq!(&read_manifest, &manifest); + + let _: Descriptor = + w.insert_manifest(manifest, Some("latest"), oci_image::Platform::default())?; + // There's more than one now + assert!(w.read_manifest().is_err()); + assert!(w.find_manifest_with_tag("noent").unwrap().is_none()); + let found_via_tag = w.find_manifest_with_tag("latest").unwrap().unwrap(); + assert_eq!(found_via_tag, read_manifest); + Ok(()) } } diff --git a/lib/src/container/update_detachedmeta.rs b/lib/src/container/update_detachedmeta.rs index 3ba61dd8c..4476d3ccf 100644 --- a/lib/src/container/update_detachedmeta.rs +++ b/lib/src/container/update_detachedmeta.rs @@ -122,7 +122,7 @@ pub async fn update_detached_metadata( manifest.set_config(new_config_descriptor); // This entirely replaces the single entry in the OCI directory, which skopeo will find by default. tempsrc - .write_manifest(manifest, platform) + .replace_with_single_manifest(manifest, platform) .context("Writing manifest")?; Ok(()) }) diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 869548c58..f71dae015 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -28,7 +28,11 @@ pub(crate) fn detectenv() -> &'static str { /// Using `src` as a base, take append `dir` into OCI image. /// Should only be enabled for testing. #[context("Generating derived oci")] -pub fn generate_derived_oci(src: impl AsRef, dir: impl AsRef) -> Result<()> { +pub fn generate_derived_oci( + src: impl AsRef, + dir: impl AsRef, + tag: Option<&str>, +) -> Result<()> { let src = src.as_ref(); let src = Dir::open_ambient_dir(src, cap_std::ambient_authority())?; let src = ocidir::OciDir::open(&src)?; @@ -63,7 +67,11 @@ pub fn generate_derived_oci(src: impl AsRef, dir: impl AsRef let new_config_desc = src.write_config(config)?; manifest.set_config(new_config_desc); - src.write_manifest(manifest, oci_image::Platform::default())?; + if let Some(tag) = tag { + src.insert_manifest(manifest, Some(tag), oci_image::Platform::default())?; + } else { + src.replace_with_single_manifest(manifest, oci_image::Platform::default())?; + } Ok(()) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index a18e7728a..dfcca5711 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -802,9 +802,7 @@ r usr/bin/bash bash-v0 .prune(ostree::RepoPruneFlags::REFS_ONLY, 0, gio::NONE_CANCELLABLE)?; // Build a derived image - let derived_path = &fixture.path.join("derived.oci"); let srcpath = imgref.imgref.name.as_str(); - oci_clone(srcpath, derived_path).await.unwrap(); let temproot = &fixture.path.join("temproot"); || -> Result<_> { std::fs::create_dir(temproot)?; @@ -818,13 +816,14 @@ r usr/bin/bash bash-v0 Ok(()) }() .context("generating temp content")?; - ostree_ext::integrationtest::generate_derived_oci(derived_path, temproot)?; + let derived_tag = "derived"; + ostree_ext::integrationtest::generate_derived_oci(srcpath, temproot, Some(derived_tag))?; let derived_imgref = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref: ImageReference { transport: Transport::OciDir, - name: derived_path.to_string(), + name: format!("{srcpath}:{derived_tag}"), }, }; let mut imp = @@ -899,7 +898,7 @@ async fn test_container_var_content() -> Result<()> { Ok(()) }() .context("generating temp content")?; - ostree_ext::integrationtest::generate_derived_oci(derived_path, temproot)?; + ostree_ext::integrationtest::generate_derived_oci(derived_path, temproot, None)?; let derived_imgref = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, @@ -1009,7 +1008,7 @@ async fn test_container_write_derive() -> Result<()> { let newkdir = moddir.join("5.12.7-42.x86_64"); std::fs::create_dir_all(&newkdir)?; std::fs::write(newkdir.join("vmlinuz"), "a new kernel")?; - ostree_ext::integrationtest::generate_derived_oci(derived_path, temproot)?; + ostree_ext::integrationtest::generate_derived_oci(derived_path, temproot, None)?; // And v2 let derived2_path = &fixture.path.join("derived2.oci"); oci_clone(base_oci_path, derived2_path).await?; @@ -1020,7 +1019,7 @@ async fn test_container_write_derive() -> Result<()> { temproot.join("usr/bin/newderivedfile2"), "newderivedfile2 v0", )?; - ostree_ext::integrationtest::generate_derived_oci(derived2_path, temproot)?; + ostree_ext::integrationtest::generate_derived_oci(derived2_path, temproot, None)?; let derived_ref = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, From b21d1b4834c5da862e3e9c952efae7131f6a9557 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 21 Sep 2022 10:50:27 -0400 Subject: [PATCH 455/775] ci: Also check via podman It turns out that podman adds default mounts in `/run` which means there's a behavior difference here. But we want to cross check with both on general principle. I was just using `docker` because I thought podman might not be installed on the stock GH action runner, but it apparently is. --- ci/container-build-integration.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ci/container-build-integration.sh b/ci/container-build-integration.sh index 08ef8b5ca..3db7b0334 100755 --- a/ci/container-build-integration.sh +++ b/ci/container-build-integration.sh @@ -13,8 +13,9 @@ sed -ie 's,ostree container commit,ostree-ext-cli container commit,' Dockerfile sed -ie 's,^\(FROM .*\),\1\nADD ostree-ext-cli /usr/bin,' Dockerfile git diff -docker build -t localhost/fcos-tailscale . - -docker run --rm localhost/fcos-tailscale rpm -q tailscale +for runtime in podman docker; do + $runtime build -t localhost/fcos-tailscale . + $runtime run --rm localhost/fcos-tailscale rpm -q tailscale +done echo ok container image integration From 53a776ef1d6e7b180633e41b2d3c00b77b4752cd Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 21 Sep 2022 11:09:30 -0400 Subject: [PATCH 456/775] commit: Ignore files and directories not on the root mount podman injects e.g. `/run/.containerenv` as a tmpfs mount. We should not attempt to remove this - it will be gone when podman goes to write the tar stream itself. Also in the general case, traversing and recursively removing is potentially really dangerous if e.g. someone mounted an external drive during a container build process at say `/var/srv`. Hopefully most of those cases would be read-only, but still. Ultimately the point is that we only want to remove files that will become part of the serialized tar stream. Pass the device ID of `/` down into our traversal and ignore files/directories which don't match. --- lib/src/commit.rs | 89 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 72 insertions(+), 17 deletions(-) diff --git a/lib/src/commit.rs b/lib/src/commit.rs index 4dc3d8bbb..008317f27 100644 --- a/lib/src/commit.rs +++ b/lib/src/commit.rs @@ -9,28 +9,37 @@ use camino::Utf8Path; use cap_std::fs::Dir; use cap_std_ext::cap_std; use cap_std_ext::dirext::CapStdExtDirExt; +use cap_std_ext::rustix::fs::MetadataExt; use std::convert::TryInto; use std::path::Path; +use std::path::PathBuf; use tokio::task; /// Directories for which we will always remove all content. const FORCE_CLEAN_PATHS: &[&str] = &["run", "tmp", "var/tmp", "var/cache"]; /// Gather count of non-empty directories. Empty directories are removed. -fn process_dir_recurse(root: &Dir, path: &Utf8Path, error_count: &mut i32) -> Result { +fn process_dir_recurse( + root: &Dir, + rootdev: u64, + path: &Utf8Path, + error_count: &mut i32, +) -> Result { let context = || format!("Validating: {path}"); let mut validated = true; for entry in root.read_dir(path).with_context(context)? { let entry = entry?; + let metadata = entry.metadata()?; + if metadata.dev() != rootdev { + continue; + } let name = entry.file_name(); let name = Path::new(&name); let name: &Utf8Path = name.try_into()?; let path = &path.join(name); - let metadata = root.symlink_metadata(path)?; - if metadata.is_dir() { - if !process_dir_recurse(root, path, error_count)? { + if !process_dir_recurse(root, rootdev, path, error_count)? { validated = false; } } else { @@ -47,24 +56,68 @@ fn process_dir_recurse(root: &Dir, path: &Utf8Path, error_count: &mut i32) -> Re Ok(validated) } -fn clean_paths_in(root: &Dir) -> Result<()> { - for path in FORCE_CLEAN_PATHS { - if let Some(subdir) = root.open_dir_optional(path)? { - for entry in subdir.entries()? { - let entry = entry?; - subdir.remove_all_optional(entry.file_name())?; - } +/// Recursively remove the target directory, but avoid traversing across mount points. +fn remove_all_on_mount_recurse(root: &Dir, rootdev: u64, path: &Path) -> Result { + let mut skipped = false; + for entry in root.read_dir(path)? { + let entry = entry?; + let metadata = entry.metadata()?; + if metadata.dev() != rootdev { + skipped = true; + continue; + } + let name = entry.file_name(); + let path = &path.join(name); + + if metadata.is_dir() { + skipped |= remove_all_on_mount_recurse(root, rootdev, path.as_path())?; + } else { + root.remove_file(path)?; + } + } + if !skipped { + root.remove_dir(&path)?; + } + Ok(skipped) +} + +fn clean_subdir(root: &Dir, rootdev: u64) -> Result<()> { + for entry in root.entries()? { + let entry = entry?; + let metadata = entry.metadata()?; + let dev = metadata.dev(); + // Ignore other filesystem mounts, e.g. podman injects /run/.containerenv + if dev != rootdev { + continue; + } + let path = PathBuf::from(entry.file_name()); + if metadata.is_dir() { + remove_all_on_mount_recurse(root, rootdev, &path)?; + } else { + root.remove_file(&path)?; } } Ok(()) } +fn clean_paths_in(root: &Dir, rootdev: u64) -> Result<()> { + for path in FORCE_CLEAN_PATHS { + let subdir = if let Some(subdir) = root.open_dir_optional(path)? { + subdir + } else { + continue; + }; + clean_subdir(&subdir, rootdev).with_context(|| format!("Cleaning {path}"))?; + } + Ok(()) +} + #[allow(clippy::collapsible_if)] -fn process_var(root: &Dir, strict: bool) -> Result<()> { +fn process_var(root: &Dir, rootdev: u64, strict: bool) -> Result<()> { let var = Utf8Path::new("var"); let mut error_count = 0; if root.try_exists(var)? { - if !process_dir_recurse(root, var, &mut error_count)? && strict { + if !process_dir_recurse(root, rootdev, var, &mut error_count)? && strict { anyhow::bail!("Found content in {var}"); } } @@ -74,15 +127,17 @@ fn process_var(root: &Dir, strict: bool) -> Result<()> { /// Given a root filesystem, clean out empty directories and warn about /// files in /var. /run, /tmp, and /var/tmp have their contents recursively cleaned. pub fn prepare_ostree_commit_in(root: &Dir) -> Result<()> { - clean_paths_in(root)?; - process_var(root, true) + let rootdev = root.dir_metadata()?.dev(); + clean_paths_in(root, rootdev)?; + process_var(root, rootdev, true) } /// Like [`prepare_ostree_commit_in`] but only emits warnings about unsupported /// files in `/var` and will not error. pub fn prepare_ostree_commit_in_nonstrict(root: &Dir) -> Result<()> { - clean_paths_in(root)?; - process_var(root, false) + let rootdev = root.dir_metadata()?.dev(); + clean_paths_in(root, rootdev)?; + process_var(root, rootdev, false) } /// Entrypoint to the commit procedures, initially we just From 444b32e436e3140c823638238c94313b9308e9cf Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 23 Sep 2022 17:27:27 -0400 Subject: [PATCH 457/775] Release 0.8.7 We need to get the `ostree container commit` fix out. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 5c7fd62f8..3629bec8a 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.8.6" +version = "0.8.7" [dependencies] anyhow = "1.0" From b4f8d2e20819ce77566692c6cec23c3d092bc3dc Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 29 Sep 2022 08:15:00 +0000 Subject: [PATCH 458/775] lib: port CLI handling to clap v3 This reworks CLI handling logic, porting to latest clap v3. --- cli/Cargo.toml | 3 +- lib/Cargo.toml | 3 +- lib/src/cli.rs | 145 +++++++++++++++++++++++++------------------------ 3 files changed, 76 insertions(+), 75 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index ec9d0abc5..e5e96e2dd 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -10,8 +10,7 @@ readme = "README.md" [dependencies] anyhow = "1.0" ostree-ext = { path = "../lib" } -clap = "2.33.3" -structopt = "0.3.21" +clap = "3.2" libc = "0.2.92" tokio = { version = "1", features = ["macros"] } log = "0.4.0" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 3629bec8a..506747d87 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -11,12 +11,12 @@ version = "0.8.7" [dependencies] anyhow = "1.0" containers-image-proxy = "0.5.1" - async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" camino = "1.0.4" chrono = "0.4.19" cjson = "0.1.1" +clap = { version= "3.2", features = ["derive"] } cap-std-ext = "0.26" cap-tempfile = "0.25" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } @@ -34,7 +34,6 @@ pin-project = "1.0" regex = "1.5.4" serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" -structopt = "0.3.21" tar = "0.4.38" tempfile = "3.2.0" term_size = "0.3.2" diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 3e66fda3e..287031aef 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -7,11 +7,11 @@ use anyhow::{Context, Result}; use camino::Utf8PathBuf; +use clap::{Parser, Subcommand}; use ostree::{cap_std, gio, glib}; use std::collections::BTreeMap; use std::ffi::OsString; use std::path::PathBuf; -use structopt::StructOpt; use tokio::sync::mpsc::Receiver; use crate::commit::container_commit; @@ -37,11 +37,11 @@ pub fn parse_repo(s: &str) -> Result { } /// Options for importing a tar archive. -#[derive(Debug, StructOpt)] +#[derive(Debug, Parser)] struct ImportOpts { /// Path to the repository - #[structopt(long)] - #[structopt(parse(try_from_str = parse_repo))] + #[clap(long)] + #[clap(parse(try_from_str = parse_repo))] repo: ostree::Repo, /// Path to a tar archive; if unspecified, will be stdin. Currently the tar archive must not be compressed. @@ -49,15 +49,15 @@ struct ImportOpts { } /// Options for exporting a tar archive. -#[derive(Debug, StructOpt)] +#[derive(Debug, Parser)] struct ExportOpts { /// Path to the repository - #[structopt(long)] - #[structopt(parse(try_from_str = parse_repo))] + #[clap(long)] + #[clap(parse(try_from_str = parse_repo))] repo: ostree::Repo, /// The format version. Must be 0 or 1. - #[structopt(long)] + #[clap(long)] format_version: u32, /// The ostree ref or commit to export @@ -65,7 +65,7 @@ struct ExportOpts { } /// Options for import/export to tar archives. -#[derive(Debug, StructOpt)] +#[derive(Debug, Subcommand)] enum TarOpts { /// Import a tar archive (currently, must not be compressed) Import(ImportOpts), @@ -75,165 +75,166 @@ enum TarOpts { } /// Options for container import/export. -#[derive(Debug, StructOpt)] +#[derive(Debug, Subcommand)] enum ContainerOpts { - #[structopt(alias = "import")] + #[clap(alias = "import")] /// Import an ostree commit embedded in a remote container image Unencapsulate { /// Path to the repository - #[structopt(long)] - #[structopt(parse(try_from_str = parse_repo))] + #[clap(long)] + #[clap(parse(try_from_str = parse_repo))] repo: ostree::Repo, /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest - #[structopt(parse(try_from_str = parse_imgref))] + #[clap(parse(try_from_str = parse_imgref))] imgref: OstreeImageReference, /// Create an ostree ref pointing to the imported commit - #[structopt(long)] + #[clap(long)] write_ref: Option, /// Don't display progress - #[structopt(long)] + #[clap(long)] quiet: bool, }, /// Print information about an exported ostree-container image. Info { /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest - #[structopt(parse(try_from_str = parse_imgref))] + #[clap(parse(try_from_str = parse_imgref))] imgref: OstreeImageReference, }, /// Wrap an ostree commit into a container - #[structopt(alias = "export")] + #[clap(alias = "export")] Encapsulate { /// Path to the repository - #[structopt(long)] - #[structopt(parse(try_from_str = parse_repo))] + #[clap(long)] + #[clap(parse(try_from_str = parse_repo))] repo: ostree::Repo, /// The ostree ref or commit to export rev: String, /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest - #[structopt(parse(try_from_str = parse_base_imgref))] + #[clap(parse(try_from_str = parse_base_imgref))] imgref: ImageReference, /// Additional labels for the container - #[structopt(name = "label", long, short)] + #[clap(name = "label", long, short)] labels: Vec, /// Propagate an OSTree commit metadata key to container label - #[structopt(name = "copymeta", long)] + #[clap(name = "copymeta", long)] copy_meta_keys: Vec, /// Corresponds to the Dockerfile `CMD` instruction. - #[structopt(long)] + #[clap(long)] cmd: Option>, /// Compress at the fastest level (e.g. gzip level 1) - #[structopt(long)] + #[clap(long)] compression_fast: bool, }, - #[structopt(alias = "commit")] + #[clap(alias = "commit")] /// Perform build-time checking and canonicalization. /// This is presently an optional command, but may become required in the future. Commit, /// Commands for working with (possibly layered, non-encapsulated) container images. + #[clap(subcommand)] Image(ContainerImageOpts), } /// Options for container image fetching. -#[derive(Debug, StructOpt)] +#[derive(Debug, Parser)] struct ContainerProxyOpts { - #[structopt(long)] + #[clap(long)] /// Do not use default authentication files. auth_anonymous: bool, - #[structopt(long)] + #[clap(long)] /// Path to Docker-formatted authentication file. authfile: Option, - #[structopt(long)] + #[clap(long)] /// Directory with certificates (*.crt, *.cert, *.key) used to connect to registry /// Equivalent to `skopeo --cert-dir` cert_dir: Option, - #[structopt(long)] + #[clap(long)] /// Skip TLS verification. insecure_skip_tls_verification: bool, } /// Options for import/export to tar archives. -#[derive(Debug, StructOpt)] +#[derive(Debug, Subcommand)] enum ContainerImageOpts { /// List container images List { /// Path to the repository - #[structopt(long)] - #[structopt(parse(try_from_str = parse_repo))] + #[clap(long)] + #[clap(parse(try_from_str = parse_repo))] repo: ostree::Repo, }, /// Pull (or update) a container image. Pull { /// Path to the repository - #[structopt(parse(try_from_str = parse_repo))] + #[clap(parse(try_from_str = parse_repo))] repo: ostree::Repo, /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest - #[structopt(parse(try_from_str = parse_imgref))] + #[clap(parse(try_from_str = parse_imgref))] imgref: OstreeImageReference, - #[structopt(flatten)] + #[clap(flatten)] proxyopts: ContainerProxyOpts, /// Don't display progress - #[structopt(long)] + #[clap(long)] quiet: bool, }, /// Output metadata about an already stored container image. History { /// Path to the repository - #[structopt(long, parse(try_from_str = parse_repo))] + #[clap(long, parse(try_from_str = parse_repo))] repo: ostree::Repo, /// Container image reference, e.g. registry:quay.io/exampleos/exampleos:latest - #[structopt(parse(try_from_str = parse_base_imgref))] + #[clap(parse(try_from_str = parse_base_imgref))] imgref: ImageReference, }, /// Copy a pulled container image from one repo to another. Copy { /// Path to the source repository - #[structopt(long)] - #[structopt(parse(try_from_str = parse_repo))] + #[clap(long)] + #[clap(parse(try_from_str = parse_repo))] src_repo: ostree::Repo, /// Path to the destination repository - #[structopt(long)] - #[structopt(parse(try_from_str = parse_repo))] + #[clap(long)] + #[clap(parse(try_from_str = parse_repo))] dest_repo: ostree::Repo, /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest - #[structopt(parse(try_from_str = parse_imgref))] + #[clap(parse(try_from_str = parse_imgref))] imgref: OstreeImageReference, }, /// Replace the detached metadata (e.g. to add a signature) ReplaceDetachedMetadata { /// Path to the source repository - #[structopt(long)] - #[structopt(parse(try_from_str = parse_base_imgref))] + #[clap(long)] + #[clap(parse(try_from_str = parse_base_imgref))] src: ImageReference, /// Target image - #[structopt(long)] - #[structopt(parse(try_from_str = parse_base_imgref))] + #[clap(long)] + #[clap(parse(try_from_str = parse_base_imgref))] dest: ImageReference, /// Path to file containing new detached metadata; if not provided, @@ -244,61 +245,61 @@ enum ContainerImageOpts { /// Unreference one or more pulled container images and perform a garbage collection. Remove { /// Path to the repository - #[structopt(long)] - #[structopt(parse(try_from_str = parse_repo))] + #[clap(long)] + #[clap(parse(try_from_str = parse_repo))] repo: ostree::Repo, /// Image reference, e.g. quay.io/exampleos/exampleos:latest - #[structopt(parse(try_from_str = parse_base_imgref))] + #[clap(parse(try_from_str = parse_base_imgref))] imgrefs: Vec, /// Do not garbage collect unused layers - #[structopt(long)] + #[clap(long)] skip_gc: bool, }, /// Perform initial deployment for a container image Deploy { /// Path to the system root - #[structopt(long)] + #[clap(long)] sysroot: String, /// Name for the state directory, also known as "osname". - #[structopt(long)] + #[clap(long)] stateroot: String, /// Source image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos@sha256:abcd... - #[structopt(long)] - #[structopt(parse(try_from_str = parse_imgref))] + #[clap(long)] + #[clap(parse(try_from_str = parse_imgref))] imgref: OstreeImageReference, - #[structopt(flatten)] + #[clap(flatten)] proxyopts: ContainerProxyOpts, /// Target image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest /// /// If specified, `--imgref` will be used as a source, but this reference will be emitted into the origin /// so that later OS updates pull from it. - #[structopt(long)] - #[structopt(parse(try_from_str = parse_imgref))] + #[clap(long)] + #[clap(parse(try_from_str = parse_imgref))] target_imgref: Option, - #[structopt(long)] + #[clap(long)] /// Add a kernel argument karg: Option>, /// Write the deployed checksum to this file - #[structopt(long)] + #[clap(long)] write_commitid_to: Option, }, } /// Options for the Integrity Measurement Architecture (IMA). -#[derive(Debug, StructOpt)] +#[derive(Debug, Parser)] struct ImaSignOpts { /// Path to the repository - #[structopt(long)] - #[structopt(parse(try_from_str = parse_repo))] + #[clap(long)] + #[clap(parse(try_from_str = parse_repo))] repo: ostree::Repo, /// The ostree ref or commit to use as a base src_rev: String, @@ -310,13 +311,13 @@ struct ImaSignOpts { /// Path to IMA key key: Utf8PathBuf, - #[structopt(long)] + #[clap(long)] /// Overwrite any existing signatures overwrite: bool, } /// Options for internal testing -#[derive(Debug, StructOpt)] +#[derive(Debug, Subcommand)] enum TestingOpts { /// Detect the current environment DetectEnv, @@ -330,19 +331,21 @@ enum TestingOpts { } /// Toplevel options for extended ostree functionality. -#[derive(Debug, StructOpt)] -#[structopt(name = "ostree-ext")] -#[structopt(rename_all = "kebab-case")] +#[derive(Debug, Parser)] +#[clap(name = "ostree-ext")] +#[clap(rename_all = "kebab-case")] #[allow(clippy::large_enum_variant)] enum Opt { /// Import and export to tar + #[clap(subcommand)] Tar(TarOpts), /// Import and export to a container image + #[clap(subcommand)] Container(ContainerOpts), /// IMA signatures ImaSign(ImaSignOpts), /// Internal integration testing helpers. - #[structopt(setting(structopt::clap::AppSettings::Hidden))] + #[clap(hide(true), subcommand)] #[cfg(feature = "internal-testing-api")] InternalOnlyForTesting(TestingOpts), } From 2b7d0dcb26725152ab709193c3c95e77543cc273 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Thu, 29 Sep 2022 08:15:01 +0000 Subject: [PATCH 459/775] lib: fix all clap-v3 deprecations --- lib/src/cli.rs | 128 ++++++++++++++++++++++--------------------- lib/tests/it/main.rs | 2 +- 2 files changed, 68 insertions(+), 62 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 287031aef..4641505ec 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -6,7 +6,7 @@ //! such as `rpm-ostree` can directly reuse it. use anyhow::{Context, Result}; -use camino::Utf8PathBuf; +use camino::{Utf8Path, Utf8PathBuf}; use clap::{Parser, Subcommand}; use ostree::{cap_std, gio, glib}; use std::collections::BTreeMap; @@ -31,18 +31,19 @@ pub fn parse_base_imgref(s: &str) -> Result { } /// Parse an [`ostree::Repo`] from a CLI arguemnt. -pub fn parse_repo(s: &str) -> Result { - let repofd = cap_std::fs::Dir::open_ambient_dir(s, cap_std::ambient_authority())?; - Ok(ostree::Repo::open_at_dir(&repofd, ".")?) +pub fn parse_repo(s: &Utf8Path) -> Result { + let repofd = cap_std::fs::Dir::open_ambient_dir(s, cap_std::ambient_authority()) + .with_context(|| format!("Opening directory at '{s}'"))?; + ostree::Repo::open_at_dir(&repofd, ".") + .with_context(|| format!("Opening ostree repository at '{s}'")) } /// Options for importing a tar archive. #[derive(Debug, Parser)] struct ImportOpts { /// Path to the repository - #[clap(long)] - #[clap(parse(try_from_str = parse_repo))] - repo: ostree::Repo, + #[clap(long, value_parser)] + repo: Utf8PathBuf, /// Path to a tar archive; if unspecified, will be stdin. Currently the tar archive must not be compressed. path: Option, @@ -52,9 +53,8 @@ struct ImportOpts { #[derive(Debug, Parser)] struct ExportOpts { /// Path to the repository - #[clap(long)] - #[clap(parse(try_from_str = parse_repo))] - repo: ostree::Repo, + #[clap(long, value_parser)] + repo: Utf8PathBuf, /// The format version. Must be 0 or 1. #[clap(long)] @@ -81,12 +81,11 @@ enum ContainerOpts { /// Import an ostree commit embedded in a remote container image Unencapsulate { /// Path to the repository - #[clap(long)] - #[clap(parse(try_from_str = parse_repo))] - repo: ostree::Repo, + #[clap(long, value_parser)] + repo: Utf8PathBuf, /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest - #[clap(parse(try_from_str = parse_imgref))] + #[clap(value_parser = parse_imgref)] imgref: OstreeImageReference, /// Create an ostree ref pointing to the imported commit @@ -101,7 +100,7 @@ enum ContainerOpts { /// Print information about an exported ostree-container image. Info { /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest - #[clap(parse(try_from_str = parse_imgref))] + #[clap(value_parser = parse_imgref)] imgref: OstreeImageReference, }, @@ -109,15 +108,14 @@ enum ContainerOpts { #[clap(alias = "export")] Encapsulate { /// Path to the repository - #[clap(long)] - #[clap(parse(try_from_str = parse_repo))] - repo: ostree::Repo, + #[clap(long, value_parser)] + repo: Utf8PathBuf, /// The ostree ref or commit to export rev: String, /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest - #[clap(parse(try_from_str = parse_base_imgref))] + #[clap(value_parser = parse_base_imgref)] imgref: ImageReference, /// Additional labels for the container @@ -174,19 +172,18 @@ enum ContainerImageOpts { /// List container images List { /// Path to the repository - #[clap(long)] - #[clap(parse(try_from_str = parse_repo))] - repo: ostree::Repo, + #[clap(long, value_parser)] + repo: Utf8PathBuf, }, /// Pull (or update) a container image. Pull { /// Path to the repository - #[clap(parse(try_from_str = parse_repo))] - repo: ostree::Repo, + #[clap(value_parser)] + repo: Utf8PathBuf, /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest - #[clap(parse(try_from_str = parse_imgref))] + #[clap(value_parser = parse_imgref)] imgref: OstreeImageReference, #[clap(flatten)] @@ -200,28 +197,26 @@ enum ContainerImageOpts { /// Output metadata about an already stored container image. History { /// Path to the repository - #[clap(long, parse(try_from_str = parse_repo))] - repo: ostree::Repo, + #[clap(long, value_parser)] + repo: Utf8PathBuf, /// Container image reference, e.g. registry:quay.io/exampleos/exampleos:latest - #[clap(parse(try_from_str = parse_base_imgref))] + #[clap(value_parser = parse_base_imgref)] imgref: ImageReference, }, /// Copy a pulled container image from one repo to another. Copy { /// Path to the source repository - #[clap(long)] - #[clap(parse(try_from_str = parse_repo))] - src_repo: ostree::Repo, + #[clap(long, value_parser)] + src_repo: Utf8PathBuf, /// Path to the destination repository - #[clap(long)] - #[clap(parse(try_from_str = parse_repo))] - dest_repo: ostree::Repo, + #[clap(long, value_parser)] + dest_repo: Utf8PathBuf, /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest - #[clap(parse(try_from_str = parse_imgref))] + #[clap(value_parser = parse_imgref)] imgref: OstreeImageReference, }, @@ -229,12 +224,12 @@ enum ContainerImageOpts { ReplaceDetachedMetadata { /// Path to the source repository #[clap(long)] - #[clap(parse(try_from_str = parse_base_imgref))] + #[clap(value_parser = parse_base_imgref)] src: ImageReference, /// Target image #[clap(long)] - #[clap(parse(try_from_str = parse_base_imgref))] + #[clap(value_parser = parse_base_imgref)] dest: ImageReference, /// Path to file containing new detached metadata; if not provided, @@ -245,12 +240,11 @@ enum ContainerImageOpts { /// Unreference one or more pulled container images and perform a garbage collection. Remove { /// Path to the repository - #[clap(long)] - #[clap(parse(try_from_str = parse_repo))] - repo: ostree::Repo, + #[clap(long, value_parser)] + repo: Utf8PathBuf, /// Image reference, e.g. quay.io/exampleos/exampleos:latest - #[clap(parse(try_from_str = parse_base_imgref))] + #[clap(value_parser = parse_base_imgref)] imgrefs: Vec, /// Do not garbage collect unused layers @@ -270,7 +264,7 @@ enum ContainerImageOpts { /// Source image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos@sha256:abcd... #[clap(long)] - #[clap(parse(try_from_str = parse_imgref))] + #[clap(value_parser = parse_imgref)] imgref: OstreeImageReference, #[clap(flatten)] @@ -281,7 +275,7 @@ enum ContainerImageOpts { /// If specified, `--imgref` will be used as a source, but this reference will be emitted into the origin /// so that later OS updates pull from it. #[clap(long)] - #[clap(parse(try_from_str = parse_imgref))] + #[clap(value_parser = parse_imgref)] target_imgref: Option, #[clap(long)] @@ -298,9 +292,9 @@ enum ContainerImageOpts { #[derive(Debug, Parser)] struct ImaSignOpts { /// Path to the repository - #[clap(long)] - #[clap(parse(try_from_str = parse_repo))] - repo: ostree::Repo, + #[clap(long, value_parser)] + repo: Utf8PathBuf, + /// The ostree ref or commit to use as a base src_rev: String, /// The ostree ref to use for writing the signed commit @@ -365,12 +359,13 @@ impl Into for ContainerProxyOpts { /// Import a tar archive containing an ostree commit. async fn tar_import(opts: &ImportOpts) -> Result<()> { + let repo = parse_repo(&opts.repo)?; let imported = if let Some(path) = opts.path.as_ref() { let instream = tokio::fs::File::open(path).await?; - crate::tar::import_tar(&opts.repo, instream, None).await? + crate::tar::import_tar(&repo, instream, None).await? } else { let stdin = tokio::io::stdin(); - crate::tar::import_tar(&opts.repo, stdin, None).await? + crate::tar::import_tar(&repo, stdin, None).await? }; println!("Imported: {}", imported); Ok(()) @@ -381,17 +376,13 @@ fn tar_export(opts: &ExportOpts) -> Result<()> { if !crate::tar::FORMAT_VERSIONS.contains(&opts.format_version) { anyhow::bail!("Invalid format version: {}", opts.format_version); } + let repo = parse_repo(&opts.repo)?; #[allow(clippy::needless_update)] let subopts = crate::tar::ExportOptions { format_version: opts.format_version, ..Default::default() }; - crate::tar::export_commit( - &opts.repo, - opts.rev.as_str(), - std::io::stdout(), - Some(subopts), - )?; + crate::tar::export_commit(&repo, opts.rev.as_str(), std::io::stdout(), Some(subopts))?; Ok(()) } @@ -648,9 +639,10 @@ fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { key: cmdopts.key.clone(), overwrite: cmdopts.overwrite, }; - let tx = cmdopts.repo.auto_transaction(cancellable)?; - let signed_commit = crate::ima::ima_sign(&cmdopts.repo, cmdopts.src_rev.as_str(), &signopts)?; - cmdopts.repo.transaction_set_ref( + let repo = parse_repo(&cmdopts.repo)?; + let tx = repo.auto_transaction(cancellable)?; + let signed_commit = crate::ima::ima_sign(&repo, cmdopts.src_rev.as_str(), &signopts)?; + repo.transaction_set_ref( None, cmdopts.target_ref.as_str(), Some(signed_commit.as_str()), @@ -684,7 +676,7 @@ where I: IntoIterator, I::Item: Into + Clone, { - let opt = Opt::from_iter(args); + let opt = Opt::parse_from(args); match opt { Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt).await, Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), @@ -696,7 +688,10 @@ where imgref, write_ref, quiet, - } => container_import(&repo, &imgref, write_ref.as_deref(), quiet).await, + } => { + let repo = parse_repo(&repo)?; + container_import(&repo, &imgref, write_ref.as_deref(), quiet).await + } ContainerOpts::Encapsulate { repo, rev, @@ -715,6 +710,7 @@ where Ok((k.to_string(), v.to_string())) }) .collect(); + let repo = parse_repo(&repo)?; container_export( &repo, &rev, @@ -728,6 +724,7 @@ where } ContainerOpts::Image(opts) => match opts { ContainerImageOpts::List { repo } => { + let repo = parse_repo(&repo)?; for image in crate::container::store::list_images(&repo)? { println!("{}", image); } @@ -738,8 +735,12 @@ where imgref, proxyopts, quiet, - } => container_store(&repo, &imgref, proxyopts, quiet).await, + } => { + let repo = parse_repo(&repo)?; + container_store(&repo, &imgref, proxyopts, quiet).await + } ContainerImageOpts::History { repo, imgref } => { + let repo = parse_repo(&repo)?; container_history(&repo, &imgref).await } ContainerImageOpts::Remove { @@ -748,6 +749,7 @@ where skip_gc, } => { let nimgs = imgrefs.len(); + let repo = parse_repo(&repo)?; crate::container::store::remove_images(&repo, imgrefs.iter())?; if !skip_gc { let nlayers = crate::container::store::gc_image_layers(&repo)?; @@ -761,7 +763,11 @@ where src_repo, dest_repo, imgref, - } => crate::container::store::copy(&src_repo, &dest_repo, &imgref).await, + } => { + let src_repo = parse_repo(&src_repo)?; + let dest_repo = parse_repo(&dest_repo)?; + crate::container::store::copy(&src_repo, &dest_repo, &imgref).await + } ContainerImageOpts::ReplaceDetachedMetadata { src, dest, diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index dfcca5711..17da67d60 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -43,7 +43,7 @@ static TEST_REGISTRY: Lazy = Lazy::new(|| match std::env::var_os("TEST_R fn test_cli_fns() -> Result<()> { let fixture = Fixture::new_v1()?; let srcpath = fixture.path.join("src/repo"); - let srcrepo_parsed = ostree_ext::cli::parse_repo(srcpath.as_str()).unwrap(); + let srcrepo_parsed = ostree_ext::cli::parse_repo(&srcpath).unwrap(); assert_eq!(srcrepo_parsed.mode(), fixture.srcrepo().mode()); let ir = From 0f2e3ba5a765e905031e794053f7005ef91e2437 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 28 Sep 2022 20:31:23 -0400 Subject: [PATCH 460/775] container: Loosen container detection to work with `docker buildx` Apparently, `docker buildx build` creates containers without `/.dockerenv`. I debated forcing on a `container=oci` environment variable for our containers, but upon some reflection I think it's about equally as clean to loosen our container detection logic. The presence of a bare-split-xattrs ostree repo is a powerful signal. Let's use that as the signal, unless overridden by strong contrary evidence: - We're running under systemd - We are apparently on a booted ostree systemd --- ci/container-build-integration.sh | 17 ++++++++++++++--- lib/src/cli.rs | 3 +-- lib/src/container_utils.rs | 11 ++++++++++- lib/src/integrationtest.rs | 27 +++++++++++++++------------ 4 files changed, 40 insertions(+), 18 deletions(-) diff --git a/ci/container-build-integration.sh b/ci/container-build-integration.sh index 3db7b0334..a3741b36d 100755 --- a/ci/container-build-integration.sh +++ b/ci/container-build-integration.sh @@ -6,11 +6,12 @@ image=quay.io/coreos-assembler/fcos:stable example=coreos-layering-examples/tailscale set -x -mv ostree-ext-cli ${example} -cd ${example} chmod a+x ostree-ext-cli +workdir=${PWD} +cd ${example} +cp ${workdir}/ostree-ext-cli . sed -ie 's,ostree container commit,ostree-ext-cli container commit,' Dockerfile -sed -ie 's,^\(FROM .*\),\1\nADD ostree-ext-cli /usr/bin,' Dockerfile +sed -ie 's,^\(FROM .*\),\1\nADD ostree-ext-cli /usr/bin/,' Dockerfile git diff for runtime in podman docker; do @@ -18,4 +19,14 @@ for runtime in podman docker; do $runtime run --rm localhost/fcos-tailscale rpm -q tailscale done +cd $(mktemp -d) +cp ${workdir}/ostree-ext-cli . +cat > Dockerfile << EOF +FROM $image +ADD ostree-ext-cli /usr/bin/ +RUN set -x; test \$(ostree-ext-cli internal-only-for-testing detect-env) = ostree-container +EOF +# Also verify docker buildx, which apparently doesn't have /.dockerenv +docker buildx build -t localhost/fcos-tailscale . + echo ok container image integration diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 3e66fda3e..9a4e40629 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -661,8 +661,7 @@ fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { async fn testing(opts: &TestingOpts) -> Result<()> { match opts { TestingOpts::DetectEnv => { - let s = crate::integrationtest::detectenv(); - println!("{}", s); + println!("{}", crate::integrationtest::detectenv()?); Ok(()) } TestingOpts::CreateFixture => crate::integrationtest::create_fixture().await, diff --git a/lib/src/container_utils.rs b/lib/src/container_utils.rs index 388fe50a1..f4c7ed934 100644 --- a/lib/src/container_utils.rs +++ b/lib/src/container_utils.rs @@ -15,6 +15,8 @@ const V1_REPO_CONFIG: &str = "/sysroot/ostree/repo/config"; /// Attempts to detect if the current process is running inside a container. /// This looks for the `container` environment variable or the presence /// of Docker or podman's more generic `/run/.containerenv`. +/// This is a best-effort function, as there is not a 100% reliable way +/// to determine this. pub fn running_in_container() -> bool { if std::env::var_os("container").is_some() { return true; @@ -66,7 +68,14 @@ pub fn is_bare_split_xattrs() -> Result { /// /// This just invokes [`is_bare_split_xattrs`] and [`running_in_container`]. pub fn is_ostree_container() -> Result { - Ok(running_in_container() && is_bare_split_xattrs()?) + let is_container_ostree = is_bare_split_xattrs()?; + let running_in_systemd = std::env::var_os("INVOCATION_ID").is_some(); + // If we have a container-ostree repo format, then we'll assume we're + // running in a container unless there's strong evidence not (we detect + // we're part of a systemd unit or are in a booted ostree system). + let maybe_container = running_in_container() + || (!running_in_systemd && !Path::new("/run/ostree-booted").exists()); + Ok(is_container_ostree && maybe_container) } /// Returns an error unless the current filesystem is an ostree-based container diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index f71dae015..0c40d87ee 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -2,7 +2,10 @@ use std::path::Path; -use crate::container::{ocidir, ExportLayout}; +use crate::{ + container::{ocidir, ExportLayout}, + container_utils::is_ostree_container, +}; use anyhow::Result; use camino::Utf8Path; use cap_std::fs::Dir; @@ -12,17 +15,17 @@ use gio::prelude::*; use oci_spec::image as oci_image; use ostree::gio; -fn has_ostree() -> bool { - std::path::Path::new("/sysroot/ostree/repo").exists() -} - -pub(crate) fn detectenv() -> &'static str { - match (crate::container_utils::running_in_container(), has_ostree()) { - (true, true) => "ostree-container", - (true, false) => "container", - (false, true) => "ostree", - (false, false) => "none", - } +pub(crate) fn detectenv() -> Result<&'static str> { + let r = if is_ostree_container()? { + "ostree-container" + } else if Path::new("/run/ostree-booted").exists() { + "ostree" + } else if crate::container_utils::running_in_container() { + "container" + } else { + "none" + }; + Ok(r) } /// Using `src` as a base, take append `dir` into OCI image. From 6f8ba3d9e67846df02e08c3fad990a2216e6569d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 6 Oct 2022 18:34:03 -0400 Subject: [PATCH 461/775] lib/container: Add context to prepare/import I was debugging an error and was trying to figure out which of these it was coming from (the answer was neither though). --- lib/src/container/store.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 0cbfd871b..ca9806a6b 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -426,6 +426,7 @@ impl ImageImporter { } /// Determine if there is a new manifest, and if so return its digest. + #[context("Preparing import")] pub async fn prepare(&mut self) -> Result { self.prepare_internal(false).await } @@ -649,6 +650,7 @@ impl ImageImporter { } /// Import a layered container image + #[context("Importing")] pub async fn import( mut self, mut import: Box, From 4d83512b10c916754dd762aac336354ef67cea82 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 6 Oct 2022 18:36:18 -0400 Subject: [PATCH 462/775] cli: Fix error when using deploy --target-imgref I was trying to use https://github.com/coreos/coreos-assembler/pull/2523 to test something and discovered that https://github.com/coreos/coreos-assembler/pull/2523 regressed this. We need to use the target imgref if provided. --- lib/src/cli.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index ef35f8548..28cd61c30 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -812,9 +812,10 @@ where Some(options), ) .await?; + let wrote_imgref = target_imgref.as_ref().unwrap_or(&imgref); if let Some(msg) = ostree_container::store::image_filtered_content_warning( repo, - &imgref.imgref, + &wrote_imgref.imgref, )? { eprintln!("{msg}") } From 396dc6c5c574ed300cb6a3d0835c917edfd0d927 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 7 Oct 2022 13:32:41 -0400 Subject: [PATCH 463/775] Add warning infrastructure when we find v0 images Let's make this much more visible by exposing the ability to do warn-and-sleep. --- lib/src/cli.rs | 11 +++++++++++ lib/src/container/deploy.rs | 8 +++++++- lib/src/container/store.rs | 10 ++++++++++ lib/src/container/unencapsulate.rs | 3 +++ lib/tests/it/main.rs | 4 ++++ 5 files changed, 35 insertions(+), 1 deletion(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 28cd61c30..0a8ac809d 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -464,6 +464,11 @@ fn print_layer_status(prep: &PreparedImport) { } } +pub(crate) fn print_deprecated_warning(msg: &str) { + eprintln!("warning: {msg}"); + std::thread::sleep(std::time::Duration::from_secs(3)); +} + /// Import a container image with an encapsulated ostree commit. async fn container_import( repo: &ostree::Repo, @@ -488,6 +493,9 @@ async fn container_import( pb.finish(); } let import = import?; + if let Some(warning) = import.deprecated_warning.as_deref() { + print_deprecated_warning(warning); + } if let Some(write_ref) = write_ref { repo.set_ref_immediate( None, @@ -554,6 +562,9 @@ async fn container_store( } PrepareResult::Ready(r) => r, }; + if let Some(warning) = prep.deprecated_warning() { + print_deprecated_warning(warning); + } print_layer_status(&prep); let printer = (!quiet).then(|| { let layer_progress = imp.request_progress(); diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 02d436368..78bdf1515 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -60,7 +60,13 @@ pub async fn deploy( } let state = match imp.prepare().await? { PrepareResult::AlreadyPresent(r) => r, - PrepareResult::Ready(prep) => imp.import(prep).await?, + PrepareResult::Ready(prep) => { + if let Some(warning) = prep.deprecated_warning() { + crate::cli::print_deprecated_warning(warning); + } + + imp.import(prep).await? + } }; let commit = state.get_commit(); let origin = glib::KeyFile::new(); diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index ca9806a6b..e2ca49913 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -201,6 +201,14 @@ impl PreparedImport { .chain(self.layers.iter()) } + /// If this image is using any deprecated features, return a message saying so. + pub fn deprecated_warning(&self) -> Option<&'static str> { + match self.export_layout { + ExportLayout::V0 => Some("Image is using v0 export layout, this is deprecated and support will be dropped in the future"), + ExportLayout::V1 => None, + } + } + /// Iterate over all layers paired with their history entry. /// An error will be returned if the history does not cover all entries. pub fn layers_with_history( @@ -640,12 +648,14 @@ impl ImageImporter { if !prep.layers.is_empty() { anyhow::bail!("Image has {} non-ostree layers", prep.layers.len()); } + let deprecated_warning = prep.deprecated_warning().map(ToOwned::to_owned); self.unencapsulate_base(&mut prep, false).await?; let ostree_commit = prep.ostree_commit_layer.commit.unwrap(); let image_digest = prep.manifest_digest; Ok(Import { ostree_commit, image_digest, + deprecated_warning, }) } diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index b1cd05664..a36b796ca 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -135,6 +135,9 @@ pub struct Import { pub ostree_commit: String, /// The image digest retrieved pub image_digest: String, + + /// Any deprecation warning + pub deprecated_warning: Option, } /// Use this to process potential errors from a worker and a driver. diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 17da67d60..f159450eb 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -733,6 +733,10 @@ async fn impl_test_container_chunked(format: ExportLayout) -> Result<()> { store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), store::PrepareResult::Ready(r) => r, }; + assert_eq!( + format == ExportLayout::V0, + prep.deprecated_warning().is_some() + ); assert_eq!(prep.export_layout, format); let digest = prep.manifest_digest.clone(); assert!(prep.ostree_commit_layer.commit.is_none()); From db1a2d66691c3f71da238e3c4481dc0cd8e2a734 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 14 Oct 2022 10:51:06 -0400 Subject: [PATCH 464/775] Release 0.8.8 Nothing major/critical but I am specifically wanting to get the image deploy fix out. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 506747d87..e71b85690 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.8.7" +version = "0.8.8" [dependencies] anyhow = "1.0" From 280b736881164f1bc050796bbe3036c0ce215a4d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 18 Oct 2022 14:19:47 -0400 Subject: [PATCH 465/775] Automatically log some container metadata to systemd journal (if avail) I was logged into one of my OpenShift machines live and watching `journalctl -b -u rpm-ostreed -f` during an upgrade, and was surprised not to see any useful output. This is because of course rpm-ostreed logs everything to the client, in this case the MCD. I think we should also duplicate these messages directly in the journal. libostree does something similar when fetching via HTTP. --- .github/workflows/rust.yml | 2 +- ci/priv-integration.sh | 4 ++++ lib/Cargo.toml | 1 + lib/src/cli.rs | 14 ++------------ lib/src/container/store.rs | 28 ++++++++++++++++++++++++++++ lib/src/lib.rs | 1 + lib/src/logging.rs | 37 +++++++++++++++++++++++++++++++++++++ 7 files changed, 74 insertions(+), 13 deletions(-) create mode 100644 lib/src/logging.rs diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 69afc1324..9d957937b 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -148,7 +148,7 @@ jobs: runs-on: ubuntu-latest container: image: quay.io/coreos-assembler/fcos:testing-devel - options: "--privileged -v /:/run/host" + options: "--privileged --pid=host -v /run/systemd/journal:/run/systemd/journal -v /:/run/host" steps: - name: Checkout repository uses: actions/checkout@v2 diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index 189888547..6a268f2bb 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -40,4 +40,8 @@ for img in "${image}" "${old_image}"; do fi done +# Verify we have systemd journal messages +nsenter -m -t 1 journalctl _COMM=ostree-ext-cli > logs.txt +grep 'layers stored: ' logs.txt + echo ok privileged integration diff --git a/lib/Cargo.toml b/lib/Cargo.toml index e71b85690..2fd55f3f7 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -27,6 +27,7 @@ hex = "0.4.3" indicatif = "0.17.0" once_cell = "1.9" libc = "0.2.92" +libsystemd = "0.5.0" oci-spec = "0.5.4" openssl = "0.10.33" ostree = { features = ["v2021_5", "cap-std-apis"], version = "0.15.0" } diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 28cd61c30..7b0fc884a 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -449,18 +449,8 @@ async fn handle_layer_progress_print( } fn print_layer_status(prep: &PreparedImport) { - let (stored, to_fetch, to_fetch_size) = - prep.all_layers() - .fold((0u32, 0u32, 0u64), |(stored, to_fetch, sz), v| { - if v.commit.is_some() { - (stored + 1, to_fetch, sz) - } else { - (stored, to_fetch + 1, sz + v.size()) - } - }); - if to_fetch > 0 { - let size = crate::glib::format_size(to_fetch_size); - println!("layers stored: {stored} needed: {to_fetch} ({size})"); + if let Some(status) = prep.format_layer_status() { + println!("{status}"); } } diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index ca9806a6b..86988c222 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -6,6 +6,7 @@ //! base. See [`encapsulate`][`super::encapsulate()`] for more information on encaspulation of images. use super::*; +use crate::logging::system_repo_journal_print; use crate::refescape; use anyhow::{anyhow, Context}; use containers_image_proxy::{ImageProxy, OpenedImage}; @@ -226,6 +227,23 @@ impl PreparedImport { .transpose() }) } + + /// Common helper to format a string for the status + pub(crate) fn format_layer_status(&self) -> Option { + let (stored, to_fetch, to_fetch_size) = + self.all_layers() + .fold((0u32, 0u32, 0u64), |(stored, to_fetch, sz), v| { + if v.commit.is_some() { + (stored + 1, to_fetch, sz) + } else { + (stored, to_fetch + 1, sz + v.size()) + } + }); + (to_fetch > 0).then(|| { + let size = crate::glib::format_size(to_fetch_size); + format!("layers stored: {stored} needed: {to_fetch} ({size})") + }) + } } // Given a manifest, compute its ostree ref name and cached ostree commit @@ -399,6 +417,13 @@ impl ImageImporter { // Apply our defaults to the proxy config merge_default_container_proxy_opts(&mut config)?; let proxy = ImageProxy::new_with_config(config).await?; + + system_repo_journal_print( + repo, + libsystemd::logging::Priority::Info, + &format!("Fetching {}", imgref), + ); + let proxy_img = proxy.open_image(&imgref.imgref.to_string()).await?; let repo = repo.clone(); Ok(ImageImporter { @@ -655,6 +680,9 @@ impl ImageImporter { mut self, mut import: Box, ) -> Result> { + if let Some(status) = import.format_layer_status() { + system_repo_journal_print(&self.repo, libsystemd::logging::Priority::Info, &status); + } // First download all layers for the base image (if necessary) - we need the SELinux policy // there to label all following layers. self.unencapsulate_base(&mut import, true).await?; diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 759d373f9..a19940feb 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -35,6 +35,7 @@ pub mod container_utils; pub mod diff; pub mod ima; pub mod keyfileext; +pub(crate) mod logging; pub mod refescape; pub mod tar; pub mod tokio_util; diff --git a/lib/src/logging.rs b/lib/src/logging.rs new file mode 100644 index 000000000..32fe6f751 --- /dev/null +++ b/lib/src/logging.rs @@ -0,0 +1,37 @@ +use std::collections::HashMap; +use std::sync::atomic::{AtomicBool, Ordering}; + +/// Set to true if we failed to write to the journal once +static EMITTED_JOURNAL_ERROR: AtomicBool = AtomicBool::new(false); + +/// Wrapper for systemd structured logging which only emits a message +/// if we're targeting the system repository, and it's booted. +pub(crate) fn system_repo_journal_send( + repo: &ostree::Repo, + priority: libsystemd::logging::Priority, + msg: &str, + vars: impl Iterator, +) where + K: AsRef, + V: AsRef, +{ + if !repo.is_system() { + return; + } + if let Err(e) = libsystemd::logging::journal_send(priority, msg, vars) { + if !EMITTED_JOURNAL_ERROR.swap(true, Ordering::SeqCst) { + eprintln!("failed to write to journal: {e}"); + } + } +} + +/// Wrapper for systemd structured logging which only emits a message +/// if we're targeting the system repository, and it's booted. +pub(crate) fn system_repo_journal_print( + repo: &ostree::Repo, + priority: libsystemd::logging::Priority, + msg: &str, +) { + let vars: HashMap<&str, &str> = HashMap::new(); + system_repo_journal_send(repo, priority, msg, vars.into_iter()) +} From 40b843788e43c89c4838cef1fd435f0e5f4b195a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 26 Oct 2022 12:58:37 -0400 Subject: [PATCH 466/775] container: Force on anonymous fetches if no config file We've seen a weird error out of the container stack when we're not authorized to fetch an image, *and* no pull secret is set up. e.g. https://github.com/coreos/fedora-coreos-tracker/issues/1328#issuecomment-1292067775 ``` error: remote error: getting username and password: 1 error occurred: * reading JSON file "/run/containers/62011/auth.json": open /run/containers/62011/auth.json: permission denied ``` We don't want the containers/image stack trying to read the "standard" config paths at the moment for a few reasons; one is that the standard paths conflate "root" and "the system". We want to support separate pull secrets. But, it should also work to symlink the authfile. --- lib/src/container/mod.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index e598892c6..faa8dd15e 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -219,6 +219,12 @@ pub fn merge_default_container_proxy_opts( ) -> Result<()> { if !config.auth_anonymous && config.authfile.is_none() { config.authfile = crate::globals::get_global_authfile_path()?; + // If there's no authfile, then force on anonymous pulls to ensure + // that the container stack doesn't try to find it in the standard + // container paths. + if config.authfile.is_none() { + config.auth_anonymous = true; + } } Ok(()) } From c1e2fcd87849825af6bda33682bc068dfe5eebc0 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 27 Oct 2022 11:52:06 -0400 Subject: [PATCH 467/775] lib: release 0.8.9 Just two changes, one feature for better systemd logging, and one for unauthenticated fetches. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 2fd55f3f7..130e88700 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.8.8" +version = "0.8.9" [dependencies] anyhow = "1.0" From b5df5bc482ce7cbb452dc0b260f5e828e2203fe5 Mon Sep 17 00:00:00 2001 From: Joseph Marrero Date: Thu, 27 Oct 2022 15:37:21 -0400 Subject: [PATCH 468/775] ci: use Containerfile instead of Dockerfile --- ci/container-build-integration.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ci/container-build-integration.sh b/ci/container-build-integration.sh index a3741b36d..7877a863c 100755 --- a/ci/container-build-integration.sh +++ b/ci/container-build-integration.sh @@ -10,23 +10,23 @@ chmod a+x ostree-ext-cli workdir=${PWD} cd ${example} cp ${workdir}/ostree-ext-cli . -sed -ie 's,ostree container commit,ostree-ext-cli container commit,' Dockerfile -sed -ie 's,^\(FROM .*\),\1\nADD ostree-ext-cli /usr/bin/,' Dockerfile +sed -ie 's,ostree container commit,ostree-ext-cli container commit,' Containerfile +sed -ie 's,^\(FROM .*\),\1\nADD ostree-ext-cli /usr/bin/,' Containerfile git diff for runtime in podman docker; do - $runtime build -t localhost/fcos-tailscale . + $runtime build -t localhost/fcos-tailscale -f Containerfile . $runtime run --rm localhost/fcos-tailscale rpm -q tailscale done cd $(mktemp -d) cp ${workdir}/ostree-ext-cli . -cat > Dockerfile << EOF +cat > Containerfile << EOF FROM $image ADD ostree-ext-cli /usr/bin/ RUN set -x; test \$(ostree-ext-cli internal-only-for-testing detect-env) = ostree-container EOF # Also verify docker buildx, which apparently doesn't have /.dockerenv -docker buildx build -t localhost/fcos-tailscale . +docker buildx build -t localhost/fcos-tailscale -f Containerfile . echo ok container image integration From d64929596a5034191643007175284c8383f06030 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 3 Nov 2022 12:07:27 -0400 Subject: [PATCH 469/775] ci: Adapt to new FCOS image location We deleted the old image. --- .github/workflows/rust.yml | 6 +++--- ci/container-build-integration.sh | 2 +- ci/priv-integration.sh | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 9d957937b..b152fdb67 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -114,7 +114,7 @@ jobs: name: "Integration" needs: build runs-on: ubuntu-latest - container: quay.io/coreos-assembler/fcos:testing-devel + container: quay.io/fedora/fedora-coreos:testing-devel steps: - name: Checkout repository uses: actions/checkout@v2 @@ -130,7 +130,7 @@ jobs: name: "Integration (IMA)" needs: build runs-on: ubuntu-latest - container: quay.io/coreos-assembler/fcos:testing-devel + container: quay.io/fedora/fedora-coreos:testing-devel steps: - name: Checkout repository uses: actions/checkout@v2 @@ -147,7 +147,7 @@ jobs: needs: build runs-on: ubuntu-latest container: - image: quay.io/coreos-assembler/fcos:testing-devel + image: quay.io/fedora/fedora-coreos:testing-devel options: "--privileged --pid=host -v /run/systemd/journal:/run/systemd/journal -v /:/run/host" steps: - name: Checkout repository diff --git a/ci/container-build-integration.sh b/ci/container-build-integration.sh index 7877a863c..4f10dac9e 100755 --- a/ci/container-build-integration.sh +++ b/ci/container-build-integration.sh @@ -2,7 +2,7 @@ # Verify `ostree container commit` set -euo pipefail -image=quay.io/coreos-assembler/fcos:stable +image=quay.io/fedora/fedora-coreos:stable example=coreos-layering-examples/tailscale set -x diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index 6a268f2bb..08025e561 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -6,7 +6,7 @@ set -euo pipefail sysroot=/run/host # Current stable image fixture -image=quay.io/coreos-assembler/fcos:testing-devel +image=quay.io/fedora/fedora-coreos:testing-devel # An unchunked v1 image old_image=quay.io/cgwalters/fcos:unchunked imgref=ostree-unverified-registry:${image} From 1ddaa5b771d90b8dbd90c21786d55531aa4be33f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 3 Nov 2022 08:44:25 -0400 Subject: [PATCH 470/775] cli: Add `--no-imgref` to deploy option Previously I added it to the API, intending to use it for FCOS, but hadn't actually tested that at the time. I'm still not entirely sure we will, but let's add it to the CLI anyways. --- lib/src/cli.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 09bf9eba8..3b1bb6b96 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -278,6 +278,13 @@ enum ContainerImageOpts { #[clap(value_parser = parse_imgref)] target_imgref: Option, + /// If set, only write the layer refs, but not the final container image reference. This + /// allows generating a disk image that when booted uses "native ostree", but has layer + /// references "pre-cached" such that a container image fetch will avoid redownloading + /// everything. + #[clap(long)] + no_imgref: bool, + #[clap(long)] /// Add a kernel argument karg: Option>, @@ -788,6 +795,7 @@ where stateroot, imgref, target_imgref, + no_imgref, karg, proxyopts, write_commitid_to, @@ -800,10 +808,12 @@ where let r: Vec<_> = v.iter().map(|s| s.as_str()).collect(); r }); + #[allow(clippy::needless_update)] let options = crate::container::deploy::DeployOpts { kargs: kargs.as_deref(), target_imgref: target_imgref.as_ref(), proxy_cfg: Some(proxyopts.into()), + no_imgref, ..Default::default() }; let state = crate::container::deploy::deploy( From 68ec25d155c44efb740622d05475009d58cb5ed8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 3 Nov 2022 08:56:44 -0400 Subject: [PATCH 471/775] logging: Don't write to journal if not running systemd I noticed an error message when we're doing the deployment in supermin in coreos-assembler. --- .github/workflows/rust.yml | 2 +- lib/src/logging.rs | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index b152fdb67..3a8d0f6ba 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -148,7 +148,7 @@ jobs: runs-on: ubuntu-latest container: image: quay.io/fedora/fedora-coreos:testing-devel - options: "--privileged --pid=host -v /run/systemd/journal:/run/systemd/journal -v /:/run/host" + options: "--privileged --pid=host -v /run/systemd:/run/systemd -v /:/run/host" steps: - name: Checkout repository uses: actions/checkout@v2 diff --git a/lib/src/logging.rs b/lib/src/logging.rs index 32fe6f751..b80f30ebd 100644 --- a/lib/src/logging.rs +++ b/lib/src/logging.rs @@ -15,6 +15,9 @@ pub(crate) fn system_repo_journal_send( K: AsRef, V: AsRef, { + if !libsystemd::daemon::booted() { + return; + } if !repo.is_system() { return; } From 2bfac370aa52ba8a0e5089aee74924861671e401 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 21 Sep 2022 13:29:08 -0400 Subject: [PATCH 472/775] Add a `compat` feature This is the first step towards disabling our support for old tar formats and the legacy container formats. When the `compat` feature is off, everything defaults to v1 format, and we reject parsing the old v0 container images in particular. --- .github/workflows/rust.yml | 15 ++++++++ ci/priv-integration.sh | 11 +++++- lib/Cargo.toml | 1 + lib/src/container/encapsulate.rs | 11 +++++- lib/src/container/store.rs | 7 ++++ lib/src/fixture.rs | 2 +- lib/src/tar/export.rs | 14 ++++++- lib/tests/it/main.rs | 65 ++++++++++++++++++-------------- 8 files changed, 91 insertions(+), 35 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index b152fdb67..092a0c5cd 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -41,6 +41,21 @@ jobs: run: (cd cli && cargo check) && (cd lib && cargo check) - name: Run tests run: cargo test -- --nocapture --quiet + test-compat: + runs-on: ubuntu-latest + container: quay.io/coreos-assembler/fcos-buildroot:testing-devel + steps: + - uses: actions/checkout@v2 + - name: Install deps + run: ./ci/installdeps.sh + - name: Cache Dependencies + uses: Swatinem/rust-cache@v1 + with: + key: "test-compat" + - name: Build + run: cargo test --no-run --features=compat + - name: Run tests + run: cargo test --features=compat -- --nocapture --quiet build: runs-on: ubuntu-latest container: quay.io/coreos-assembler/fcos-buildroot:testing-devel diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index 08025e561..41b95520a 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -7,7 +7,6 @@ set -euo pipefail sysroot=/run/host # Current stable image fixture image=quay.io/fedora/fedora-coreos:testing-devel -# An unchunked v1 image old_image=quay.io/cgwalters/fcos:unchunked imgref=ostree-unverified-registry:${image} stateroot=testos @@ -26,7 +25,7 @@ ostree-ext-cli container image deploy --sysroot "${sysroot}" \ ostree admin --sysroot="${sysroot}" status ostree-ext-cli container image remove --repo "${sysroot}/ostree/repo" registry:"${image}" ostree admin --sysroot="${sysroot}" undeploy 0 -for img in "${image}" "${old_image}"; do +for img in "${image}"; do ostree-ext-cli container image deploy --sysroot "${sysroot}" \ --stateroot "${stateroot}" --imgref ostree-unverified-registry:"${img}" ostree admin --sysroot="${sysroot}" status @@ -40,6 +39,14 @@ for img in "${image}" "${old_image}"; do fi done +if ostree-ext-cli container image deploy --sysroot "${sysroot}" \ + --stateroot "${stateroot}" --imgref ostree-unverified-registry:"${old_image}" 2>err.txt; then + echo "deployed old image" + exit 1 +fi +grep 'legacy format.*no longer supported' err.txt +echo "ok old image failed to parse" + # Verify we have systemd journal messages nsenter -m -t 1 journalctl _COMM=ostree-ext-cli > logs.txt grep 'layers stored: ' logs.txt diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 130e88700..29b703ec5 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -57,4 +57,5 @@ features = ["dox"] [features] dox = ["ostree/dox"] +compat = [] internal-testing-api = ["sh-inline", "indoc"] diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 00cfb3216..56c422d0d 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -31,8 +31,11 @@ pub enum ExportLayout { impl Default for ExportLayout { fn default() -> Self { - // For now - Self::V0 + if cfg!(feature = "compat") { + Self::V0 + } else { + Self::V1 + } } } @@ -124,6 +127,10 @@ fn export_chunked( match opts.format { ExportLayout::V0 => { + if cfg!(not(feature = "compat")) { + let label = opts.format.label(); + anyhow::bail!("This legacy format using the {label} label is no longer supported"); + } // In V0, the component/content chunks come first. for (layer, name) in layers { ociw.push_layer(manifest, imgcfg, layer, name.as_str()); diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 01d1e14cf..24d5624e5 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -371,6 +371,13 @@ pub(crate) fn parse_manifest_layout<'a>( // Now, we need to handle the split differently in chunked v1 vs v0 match layout { ExportLayout::V0 => { + if cfg!(not(feature = "compat")) { + let label = layout.label(); + anyhow::bail!( + "This legacy format using the {label} label is no longer supported" + ); + } + for layer in manifest.layers() { if layer == target_layer { if after_target { diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index c9e86dcf6..85ee995f4 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -395,7 +395,7 @@ impl Fixture { path, srcrepo, destrepo, - format_version: 0, + format_version: if cfg!(feature = "compat") { 0 } else { 1 }, selinux: true, }) } diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index b54aac4f8..41637de40 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -20,7 +20,11 @@ use std::ops::RangeInclusive; pub const BARE_SPLIT_XATTRS_MODE: &str = "bare-split-xattrs"; /// The set of allowed format versions; ranges from zero to 1, inclusive. +#[cfg(feature = "compat")] pub const FORMAT_VERSIONS: RangeInclusive = 0..=1; +#[cfg(not(feature = "compat"))] +/// The set of allowed format versions. +pub const FORMAT_VERSIONS: RangeInclusive = 1..=1; // This is both special in the tar stream *and* it's in the ostree commit. const SYSROOT: &str = "sysroot"; @@ -567,12 +571,20 @@ fn impl_export( } /// Configuration for tar export. -#[derive(Debug, Default, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq)] pub struct ExportOptions { /// Format version; must be in [`FORMAT_VERSIONS`]. pub format_version: u32, } +impl Default for ExportOptions { + fn default() -> Self { + Self { + format_version: if cfg!(feature = "compat") { 0 } else { 1 }, + } + } +} + /// Export an ostree commit to an (uncompressed) tar archive stream. #[context("Exporting commit")] pub fn export_commit( diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index f159450eb..79c3d79d2 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -340,38 +340,40 @@ fn test_tar_export_structure() -> Result<()> { let mut fixture = Fixture::new_v1()?; - let src_tar = fixture.export_tar()?; - let src_tar = std::io::BufReader::new(fixture.dir.open(src_tar)?); - let mut src_tar = tar::Archive::new(src_tar); - let mut entries = src_tar.entries()?; - // The first entry should be the root directory. - let first = entries.next().unwrap()?; - let firstpath = first.path()?; - assert_eq!(firstpath.to_str().unwrap(), "./"); - assert_eq!(first.header().mode()?, libc::S_IFDIR | 0o755); - let next = entries.next().unwrap().unwrap(); - assert_eq!(next.path().unwrap().as_os_str(), "sysroot"); - - let v0_prelude = [ - ("sysroot/config", Regular, 0o644), - ("sysroot/ostree/repo", Directory, 0o755), - ("sysroot/ostree/repo/extensions", Directory, 0o755), - ] - .into_iter() - .map(Into::into); - - // Validate format version 0 - let expected = v0_prelude.chain(common_tar_structure()) + if cfg!(feature = "compat") { + let src_tar = fixture.export_tar()?; + let src_tar = std::io::BufReader::new(fixture.dir.open(src_tar)?); + let mut src_tar = tar::Archive::new(src_tar); + let mut entries = src_tar.entries()?; + // The first entry should be the root directory. + let first = entries.next().unwrap()?; + let firstpath = first.path()?; + assert_eq!(firstpath.to_str().unwrap(), "./"); + assert_eq!(first.header().mode()?, libc::S_IFDIR | 0o755); + let next = entries.next().unwrap().unwrap(); + assert_eq!(next.path().unwrap().as_os_str(), "sysroot"); + + let v0_prelude = [ + ("sysroot/config", Regular, 0o644), + ("sysroot/ostree/repo", Directory, 0o755), + ("sysroot/ostree/repo/extensions", Directory, 0o755), + ] + .into_iter() + .map(Into::into); + + // Validate format version 0 + let expected = v0_prelude.chain(common_tar_structure()) .chain([ ("sysroot/ostree/repo/xattrs", Directory, 0o755), ("sysroot/ostree/repo/xattrs/d67db507c5a6e7bfd078f0f3ded0a5669479a902e812931fc65c6f5e01831ef5", Regular, 0o644), ("usr", Directory, 0o755), ].into_iter().map(Into::into)); - validate_tar_expected( - fixture.format_version, - &mut entries, - expected.chain(common_tar_contents_all()), - )?; + validate_tar_expected( + fixture.format_version, + &mut entries, + expected.chain(common_tar_contents_all()), + )?; + } // Validate format version 1 fixture.format_version = 1; @@ -700,6 +702,7 @@ fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Resu } #[tokio::test] +#[cfg(feature = "compat")] async fn test_container_chunked_v0() -> Result<()> { impl_test_container_chunked(ExportLayout::V0).await } @@ -946,6 +949,7 @@ async fn oci_clone(src: impl AsRef, dest: impl AsRef) -> Res } #[tokio::test] +#[cfg(feature = "compat")] async fn test_container_import_export_v0() { impl_test_container_import_export(ExportLayout::V0, false) .await @@ -1186,8 +1190,11 @@ async fn test_old_code_parses_new_export() -> Result<()> { return Ok(()); } let fixture = Fixture::new_v1()?; - // We're testing the v0 version that was already shipped - let layout = ExportLayout::V0; + let layout = if cfg!(feature = "compat") { + ExportLayout::V0 + } else { + ExportLayout::V1 + }; let imgref = fixture.export_container(layout).await?.0; let imgref = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, From 07d0ed821ce3d3f9293dfe90a6b8605520518f78 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 21 Sep 2022 15:56:58 -0400 Subject: [PATCH 473/775] Bump semver to 0.9 Because we have an off-by-default `compat` feature. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 29b703ec5..dc7cb78cd 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.8.9" +version = "0.9.0" [dependencies] anyhow = "1.0" From bfe14ff62ddb33a5193bc09de40c235e9d277569 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 4 Nov 2022 14:28:19 -0400 Subject: [PATCH 474/775] cli: Don't publish We aren't productizing this CLI entrypoint. --- cli/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index e5e96e2dd..d604767cf 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" license = "MIT OR Apache-2.0" repository = "https://github.com/ostreedev/ostree-rs-ext" readme = "README.md" +publish = false [dependencies] anyhow = "1.0" From bac2d7aa9300421f68387a732ff0b27703847e29 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 4 Nov 2022 14:30:08 -0400 Subject: [PATCH 475/775] Don't expose `ostree_manual` API at all in non-test mode This avoids dead code warnings for external crate users. --- lib/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/src/lib.rs b/lib/src/lib.rs index a19940feb..3b34573d0 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -46,8 +46,6 @@ pub mod objectsource; pub(crate) mod objgv; #[cfg(feature = "internal-testing-api")] pub mod ostree_manual; -#[cfg(not(feature = "internal-testing-api"))] -mod ostree_manual; /// Prelude, intended for glob import. pub mod prelude { From ed6075f86effce601f169c5385c1bff9d153f3a2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 4 Nov 2022 17:11:12 -0400 Subject: [PATCH 476/775] Initial support for generating manpages It's about time we had online (and shipped in packages) docs for our tools. Some code inspired/taken from https://github.com/coreos/coreos-installer/blob/main/src/cmdline/doc.rs --- .github/workflows/rust.yml | 2 ++ lib/Cargo.toml | 2 ++ lib/src/cli.rs | 31 ++++++++++++++++++++-------- lib/src/docgen.rs | 42 ++++++++++++++++++++++++++++++++++++++ lib/src/lib.rs | 3 +++ 5 files changed, 71 insertions(+), 9 deletions(-) create mode 100644 lib/src/docgen.rs diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 060d49da3..1f585ef8e 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -41,6 +41,8 @@ jobs: run: (cd cli && cargo check) && (cd lib && cargo check) - name: Run tests run: cargo test -- --nocapture --quiet + - name: Manpage generation + run: mkdir -p target/man && cargo run --features=docgen -- man --directory target/man test-compat: runs-on: ubuntu-latest container: quay.io/coreos-assembler/fcos-buildroot:testing-devel diff --git a/lib/Cargo.toml b/lib/Cargo.toml index dc7cb78cd..327e4853c 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -17,6 +17,7 @@ camino = "1.0.4" chrono = "0.4.19" cjson = "0.1.1" clap = { version= "3.2", features = ["derive"] } +clap_mangen = { version = "0.1", optional = true } cap-std-ext = "0.26" cap-tempfile = "0.25" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } @@ -56,6 +57,7 @@ ostree-ext = { path = ".", features = ["internal-testing-api"] } features = ["dox"] [features] +docgen = ["clap_mangen"] dox = ["ostree/dox"] compat = [] internal-testing-api = ["sh-inline", "indoc"] diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 3b1bb6b96..17de33c5c 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -40,7 +40,7 @@ pub fn parse_repo(s: &Utf8Path) -> Result { /// Options for importing a tar archive. #[derive(Debug, Parser)] -struct ImportOpts { +pub(crate) struct ImportOpts { /// Path to the repository #[clap(long, value_parser)] repo: Utf8PathBuf, @@ -51,7 +51,7 @@ struct ImportOpts { /// Options for exporting a tar archive. #[derive(Debug, Parser)] -struct ExportOpts { +pub(crate) struct ExportOpts { /// Path to the repository #[clap(long, value_parser)] repo: Utf8PathBuf, @@ -66,7 +66,7 @@ struct ExportOpts { /// Options for import/export to tar archives. #[derive(Debug, Subcommand)] -enum TarOpts { +pub(crate) enum TarOpts { /// Import a tar archive (currently, must not be compressed) Import(ImportOpts), @@ -76,7 +76,7 @@ enum TarOpts { /// Options for container import/export. #[derive(Debug, Subcommand)] -enum ContainerOpts { +pub(crate) enum ContainerOpts { #[clap(alias = "import")] /// Import an ostree commit embedded in a remote container image Unencapsulate { @@ -147,7 +147,7 @@ enum ContainerOpts { /// Options for container image fetching. #[derive(Debug, Parser)] -struct ContainerProxyOpts { +pub(crate) struct ContainerProxyOpts { #[clap(long)] /// Do not use default authentication files. auth_anonymous: bool, @@ -168,7 +168,7 @@ struct ContainerProxyOpts { /// Options for import/export to tar archives. #[derive(Debug, Subcommand)] -enum ContainerImageOpts { +pub(crate) enum ContainerImageOpts { /// List container images List { /// Path to the repository @@ -297,7 +297,7 @@ enum ContainerImageOpts { /// Options for the Integrity Measurement Architecture (IMA). #[derive(Debug, Parser)] -struct ImaSignOpts { +pub(crate) struct ImaSignOpts { /// Path to the repository #[clap(long, value_parser)] repo: Utf8PathBuf, @@ -319,7 +319,7 @@ struct ImaSignOpts { /// Options for internal testing #[derive(Debug, Subcommand)] -enum TestingOpts { +pub(crate) enum TestingOpts { /// Detect the current environment DetectEnv, /// Generate a test fixture @@ -331,12 +331,20 @@ enum TestingOpts { FilterTar, } +/// Options for man page generation +#[derive(Debug, Parser)] +pub(crate) struct ManOpts { + #[clap(long)] + /// Output to this directory + directory: Utf8PathBuf, +} + /// Toplevel options for extended ostree functionality. #[derive(Debug, Parser)] #[clap(name = "ostree-ext")] #[clap(rename_all = "kebab-case")] #[allow(clippy::large_enum_variant)] -enum Opt { +pub(crate) enum Opt { /// Import and export to tar #[clap(subcommand)] Tar(TarOpts), @@ -349,6 +357,9 @@ enum Opt { #[clap(hide(true), subcommand)] #[cfg(feature = "internal-testing-api")] InternalOnlyForTesting(TestingOpts), + #[clap(hide(true))] + #[cfg(feature = "docgen")] + Man(ManOpts), } #[allow(clippy::from_over_into)] @@ -841,5 +852,7 @@ where Opt::ImaSign(ref opts) => ima_sign(opts), #[cfg(feature = "internal-testing-api")] Opt::InternalOnlyForTesting(ref opts) => testing(opts).await, + #[cfg(feature = "docgen")] + Opt::Man(manopts) => crate::docgen::generate_manpages(&manopts.directory), } } diff --git a/lib/src/docgen.rs b/lib/src/docgen.rs new file mode 100644 index 000000000..6bda7f4dd --- /dev/null +++ b/lib/src/docgen.rs @@ -0,0 +1,42 @@ +// Copyright 2022 Red Hat, Inc. +// +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use anyhow::{Context, Result}; +use camino::Utf8Path; +use clap::{Command, CommandFactory}; +use std::fs::OpenOptions; +use std::io::Write; + +pub fn generate_manpages(directory: &Utf8Path) -> Result<()> { + generate_one(directory, crate::cli::Opt::command()) +} + +fn generate_one(directory: &Utf8Path, cmd: Command) -> Result<()> { + let version = env!("CARGO_PKG_VERSION"); + let name = cmd.get_name(); + let path = directory.join(format!("{name}.8")); + println!("Generating {path}..."); + + let mut out = OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(&path) + .with_context(|| format!("opening {path}")) + .map(std::io::BufWriter::new)?; + clap_mangen::Man::new(cmd.clone()) + .title("ostree-ext") + .section("8") + .source(format!("ostree-ext {version}")) + .render(&mut out) + .with_context(|| format!("rendering {name}.8"))?; + out.flush().context("flushing man page")?; + drop(out); + + for subcmd in cmd.get_subcommands().filter(|c| !c.is_hide_set()) { + let subname = format!("{}-{}", name, subcmd.get_name()); + generate_one(directory, subcmd.clone().name(subname).version(version))?; + } + Ok(()) +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 3b34573d0..2b4c80f32 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -47,6 +47,9 @@ pub(crate) mod objgv; #[cfg(feature = "internal-testing-api")] pub mod ostree_manual; +#[cfg(feature = "docgen")] +mod docgen; + /// Prelude, intended for glob import. pub mod prelude { #[doc(hidden)] From 883a79e4d32b1ca6673a1589a3337ee7ea7663ac Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 8 Nov 2022 18:10:06 -0500 Subject: [PATCH 477/775] tar/export: Don't create hardlinks to zero sized files This is an echo of https://github.com/ostreedev/ostree/pull/2198/commits/558720e7aa1870cbbdb4a0dc22a3968d116daec3 If we hardlink zero sized files, then any modification to them will result in possibly *many* hardlinks needing to be reproduced when serializing to tar, which is definitely suboptimal. But further, it provokes a bug in our import path; when processing derived layers, we need to handle *both* cases of having the file in `/sysroot` be the source as well as the destination of the hardlink in the tar stream. --- lib/src/fixture.rs | 8 ++++++-- lib/src/tar/export.rs | 26 +++++++++++++++++++------- lib/tests/it/main.rs | 4 +++- 3 files changed, 28 insertions(+), 10 deletions(-) diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 85ee995f4..305dea24e 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -128,6 +128,7 @@ static OWNERS: Lazy> = Lazy::new(|| { ("usr/lib/modules/.*/initramfs", "initramfs"), ("usr/lib/modules", "kernel"), ("usr/bin/(ba)?sh", "bash"), + ("usr/lib.*/emptyfile.*", "bash"), ("usr/bin/hardlink.*", "testlink"), ("usr/etc/someconfig.conf", "someconfig"), ("usr/etc/polkit.conf", "a-polkit-config"), @@ -146,6 +147,9 @@ m 0 0 755 r usr/bin/bash the-bash-shell l usr/bin/sh bash m 0 0 644 +# Some empty files +r usr/lib/emptyfile +r usr/lib64/emptyfile2 # Should be the same object r usr/bin/hardlink-a testlink r usr/bin/hardlink-b testlink @@ -163,8 +167,8 @@ m 0 0 1755 d tmp "## }; pub const CONTENTS_CHECKSUM_V0: &str = - "3af747e156c34d08a3a2fb85b94de6999205a1d1c1c7b1993d6ce534a8918cd9"; -pub static CONTENTS_V0_LEN: Lazy = Lazy::new(|| OWNERS.len()); + "5e41de82f9f861fa51e53ce6dd640a260e4fb29b7657f5a3f14157e93d2c0659"; +pub static CONTENTS_V0_LEN: Lazy = Lazy::new(|| OWNERS.len().checked_sub(1).unwrap()); #[derive(Debug, PartialEq, Eq)] enum SeLabel { diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 41637de40..07bcf5d39 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -419,9 +419,9 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { h.set_gid(meta.attribute_uint32("unix::gid") as u64); let mode = meta.attribute_uint32("unix::mode"); h.set_mode(self.filter_mode(mode)); - let mut target_header = h.clone(); - target_header.set_size(0); - + if instream.is_some() { + h.set_size(meta.size() as u64); + } if !self.wrote_content.contains(checksum) { let inserted = self.wrote_content.insert(checksum.to_string()); debug_assert!(inserted); @@ -464,7 +464,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { } } - Ok((path, target_header)) + Ok((path, h)) } /// Write a directory using the provided metadata. @@ -488,9 +488,21 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { mut h: tar::Header, dest: &Utf8Path, ) -> Result<()> { - h.set_entry_type(tar::EntryType::Link); - h.set_link_name(srcpath)?; - self.out.append_data(&mut h, dest, &mut std::io::empty())?; + // Query the original size first + let size = h.size().context("Querying size for hardlink append")?; + // Don't create hardlinks to zero-sized files, it's much more likely + // to result in generated tar streams from container builds resulting + // in a modified linked-to file in /sysroot, which we don't currently handle. + // And in the case where the input is *not* zero sized, we still output + // a hardlink of size zero, as this is what is normal. + h.set_size(0); + if h.entry_type() == tar::EntryType::Regular && size == 0 { + self.out.append_data(&mut h, dest, &mut std::io::empty())?; + } else { + h.set_entry_type(tar::EntryType::Link); + h.set_link_name(srcpath)?; + self.out.append_data(&mut h, dest, &mut std::io::empty())?; + } Ok(()) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 79c3d79d2..08d4d55ce 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -306,10 +306,12 @@ fn common_tar_structure() -> impl Iterator { // Find various expected files fn common_tar_contents_all() -> impl Iterator { - use tar::EntryType::{Directory, Link}; + use tar::EntryType::{Directory, Link, Regular}; [ ("boot", Directory, 0o755), ("usr", Directory, 0o755), + ("usr/lib/emptyfile", Regular, 0o644), + ("usr/lib64/emptyfile2", Regular, 0o644), ("usr/bin/bash", Link, 0o755), ("usr/bin/hardlink-a", Link, 0o644), ("usr/bin/hardlink-b", Link, 0o644), From 97d8d3ded5ff2c02b49d6e3085d9b0fef5feae9d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 9 Nov 2022 16:13:22 -0500 Subject: [PATCH 478/775] container: Prune image layers by default A while ago we added APIs to do this, but we should really do it by default when fetching upgraded images. Otherwise we effectively leak space. --- lib/src/container/store.rs | 42 +++++++++++++++++++++++++++++++++++--- lib/tests/it/main.rs | 9 ++++++-- 2 files changed, 46 insertions(+), 5 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 24d5624e5..c365d8f35 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -132,7 +132,8 @@ pub struct ImageImporter { pub(crate) proxy: ImageProxy, imgref: OstreeImageReference, target_imgref: Option, - no_imgref: bool, // If true, do not write final image ref + no_imgref: bool, // If true, do not write final image ref + disable_gc: bool, // If true, don't prune unused image layers pub(crate) proxy_img: OpenedImage, layer_progress: Option>, @@ -447,6 +448,7 @@ impl ImageImporter { proxy_img, target_imgref: None, no_imgref: false, + disable_gc: false, imgref: imgref.clone(), layer_progress: None, layer_byte_progress: None, @@ -465,6 +467,11 @@ impl ImageImporter { self.no_imgref = true; } + /// Do not prune image layers. + pub fn disable_gc(&mut self) { + self.disable_gc = true; + } + /// Determine if there is a new manifest, and if so return its digest. #[context("Preparing import")] pub async fn prepare(&mut self) -> Result { @@ -691,7 +698,9 @@ impl ImageImporter { }) } - /// Import a layered container image + /// Import a layered container image. + /// + /// If enabled, this will also prune unused container image layers. #[context("Importing")] pub async fn import( mut self, @@ -847,6 +856,12 @@ impl ImageImporter { repo.transaction_set_ref(None, &ostree_ref, Some(merged_commit.as_str())); } txn.commit(cancellable)?; + + if !self.disable_gc { + let n: u32 = gc_image_layers_impl(repo, cancellable)?; + tracing::debug!("pruned {n} layers"); + } + // Here we re-query state just to run through the same code path, // though it'd be cheaper to synthesize it from the data we already have. let state = query_image(repo, &imgref)?.unwrap(); @@ -970,7 +985,14 @@ pub async fn copy( /// The underlying objects are *not* pruned; that requires a separate invocation /// of [`ostree::Repo::prune`]. pub fn gc_image_layers(repo: &ostree::Repo) -> Result { - let cancellable = gio::NONE_CANCELLABLE; + gc_image_layers_impl(repo, gio::NONE_CANCELLABLE) +} + +#[context("Pruning image layers")] +fn gc_image_layers_impl( + repo: &ostree::Repo, + cancellable: Option<&gio::Cancellable>, +) -> Result { let all_images = list_images(repo)?; let all_manifests = all_images .into_iter() @@ -1005,6 +1027,20 @@ pub fn gc_image_layers(repo: &ostree::Repo) -> Result { Ok(pruned) } +#[cfg(feature = "internal-testing-api")] +/// Return how many container blobs (layers) are stored +pub fn count_layer_references(repo: &ostree::Repo) -> Result { + let cancellable = gio::NONE_CANCELLABLE; + let n = repo + .list_refs_ext( + Some(LAYER_PREFIX), + ostree::RepoListRefsExtFlags::empty(), + cancellable, + )? + .len(); + Ok(n as u32) +} + #[context("Pruning {}", image)] fn prune_image(repo: &ostree::Repo, image: &ImageReference) -> Result<()> { let ostree_ref = &ref_for_image(image)?; diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 08d4d55ce..8260d2188 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -800,12 +800,14 @@ r usr/bin/bash bash-v0 } } + assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); + let n = store::count_layer_references(fixture.destrepo())? as i64; let _import = imp.import(prep).await.unwrap(); assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); - let n_removed = store::gc_image_layers(fixture.destrepo())?; - assert_eq!(n_removed, 2); + let n2 = store::count_layer_references(fixture.destrepo())? as i64; + assert_eq!(n, n2); fixture .destrepo() .prune(ostree::RepoPruneFlags::REFS_ONLY, 0, gio::NONE_CANCELLABLE)?; @@ -846,6 +848,8 @@ r usr/bin/bash bash-v0 assert!(prep.ostree_commit_layer.commit.is_some()); assert_eq!(prep.ostree_layers.len(), nlayers as usize); + // We want to test explicit layer pruning + imp.disable_gc(); let _import = imp.import(prep).await.unwrap(); assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 2); @@ -869,6 +873,7 @@ r usr/bin/bash bash-v0 assert_eq!(n_removed, (*CONTENTS_V0_LEN + 1) as u32); // Repo should be clean now + assert_eq!(store::count_layer_references(fixture.destrepo())?, 0); assert_eq!( fixture .destrepo() From bdd4f209f03300281df9415e2e55e5884a685530 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 9 Nov 2022 16:41:47 -0500 Subject: [PATCH 479/775] tar: Fix one clippy lint This shows up with newer clippy. --- lib/src/tar/write.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index 8fde2b755..9c2de031e 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -33,7 +33,7 @@ pub(crate) fn copy_entry( let path = if let Some(path) = path { path.to_owned() } else { - (&*entry.path()?).to_owned() + (*entry.path()?).to_owned() }; let mut header = entry.header().clone(); From ee502e54549797161afc57f885c7c4acbc67b07e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 9 Nov 2022 19:12:16 -0500 Subject: [PATCH 480/775] container: Add an API to query information from a commit object Prep for better support for pruning. An ostree deployment will retain a strong reference solely to a commit object; we can't rely on anything else. I plan to use this in rpm-ostree to display metadata information about the container used for a deployment, even if the image reference has been pruned. --- lib/src/container/store.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 24d5624e5..a4bc01dd1 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -771,7 +771,6 @@ impl ImageImporter { // Destructure to transfer ownership to thread let repo = self.repo; - let imgref = self.target_imgref.unwrap_or(self.imgref); let state = crate::tokio_util::spawn_blocking_cancellable_flatten( move |cancellable| -> Result> { use cap_std_ext::rustix::fd::AsRawFd; @@ -849,7 +848,7 @@ impl ImageImporter { txn.commit(cancellable)?; // Here we re-query state just to run through the same code path, // though it'd be cheaper to synthesize it from the data we already have. - let state = query_image(repo, &imgref)?.unwrap(); + let state = query_image_commit(repo, &merged_commit)?; Ok(state) }, ) @@ -878,11 +877,16 @@ pub fn query_image_ref( ) -> Result>> { let ostree_ref = &ref_for_image(imgref)?; let merge_rev = repo.resolve_rev(ostree_ref, true)?; - let (merge_commit, merge_commit_obj) = if let Some(r) = merge_rev { - (r.to_string(), repo.load_commit(r.as_str())?.0) - } else { - return Ok(None); - }; + merge_rev + .map(|r| query_image_commit(repo, r.as_str())) + .transpose() +} + +/// Query metadata for a pulled image via an OSTree commit digest. +/// The digest must refer to a pulled container image's merge commit. +pub fn query_image_commit(repo: &ostree::Repo, commit: &str) -> Result> { + let merge_commit = commit.to_string(); + let merge_commit_obj = repo.load_commit(commit)?.0; let commit_meta = &merge_commit_obj.child_value(0); let commit_meta = &ostree::glib::VariantDict::new(Some(commit_meta)); let (manifest, manifest_digest) = manifest_data_from_commitmeta(commit_meta)?; @@ -905,7 +909,7 @@ pub fn query_image_ref( configuration, }); tracing::debug!(state = ?state); - Ok(Some(state)) + Ok(state) } /// Query metadata for a pulled image. From aec69c8171d797176c795dbd9c2a434b2d5c785e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 10 Nov 2022 14:37:22 -0500 Subject: [PATCH 481/775] container: Add deployed commits into set of GC roots Prep for handling image pruning better. The way things are kind of expected to work today is that for a deployed ostree commit, we have *two* refs which point to it - one like e.g. `fedora:fedora/x86_64/coreos/stable`, as well as the "deployment ref" like "ostree/0/1/1" which is a synthetic ref generated by the sysroot core. We want to be able to remove the container image refs - but doing so today subjects the *layer* branches to garbage collection. Fix this by looking at the deployment refs as well as the set of images when computing the set of references for container images. --- ci/priv-integration.sh | 6 ++++++ lib/src/cli.rs | 13 +++++++++++++ lib/src/container/store.rs | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index 41b95520a..8a1b6e42b 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -29,8 +29,14 @@ for img in "${image}"; do ostree-ext-cli container image deploy --sysroot "${sysroot}" \ --stateroot "${stateroot}" --imgref ostree-unverified-registry:"${img}" ostree admin --sysroot="${sysroot}" status + initial_refs=$(ostree --repo="${sysroot}/ostree/repo" refs | wc -l) ostree-ext-cli container image remove --repo "${sysroot}/ostree/repo" registry:"${img}" + pruned_refs=$(ostree --repo="${sysroot}/ostree/repo" refs | wc -l) + # Removing the image should only drop the image reference, not its layers + test "$(($initial_refs - 1))" = "$pruned_refs" ostree admin --sysroot="${sysroot}" undeploy 0 + # TODO: when we fold together ostree and ostree-ext, automatically prune layers + ostree-ext-cli container image prune-layers --repo="${sysroot}/ostree/repo" ostree --repo="${sysroot}/ostree/repo" refs > refs.txt if test "$(wc -l < refs.txt)" -ne 0; then echo "found refs" diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 17de33c5c..e76a9e38a 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -252,6 +252,13 @@ pub(crate) enum ContainerImageOpts { skip_gc: bool, }, + /// Garbage collect unreferenced image layer references. + PruneLayers { + /// Path to the repository + #[clap(long, value_parser)] + repo: Utf8PathBuf, + }, + /// Perform initial deployment for a container image Deploy { /// Path to the system root @@ -777,6 +784,12 @@ where } Ok(()) } + ContainerImageOpts::PruneLayers { repo } => { + let repo = parse_repo(&repo)?; + let nlayers = crate::container::store::gc_image_layers(&repo)?; + println!("Removed layers: {nlayers}"); + Ok(()) + } ContainerImageOpts::Copy { src_repo, dest_repo, diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index d6315c546..e58a9b77b 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -983,6 +983,40 @@ pub async fn copy( Ok(()) } +/// Iterate over deployment commits, returning the manifests from +/// commits which point to a container image. +fn list_container_deployment_manifests( + repo: &ostree::Repo, + cancellable: Option<&gio::Cancellable>, +) -> Result> { + let commits = repo + .list_refs_ext( + Some("ostree/0"), + ostree::RepoListRefsExtFlags::empty(), + cancellable, + )? + .into_iter() + .chain(repo.list_refs_ext( + Some("ostree/1"), + ostree::RepoListRefsExtFlags::empty(), + cancellable, + )?) + .map(|v| v.1); + let mut r = Vec::new(); + for commit in commits { + let commit_obj = repo.load_commit(&commit)?.0; + let commit_meta = &glib::VariantDict::new(Some(&commit_obj.child_value(0))); + if commit_meta + .lookup::(META_MANIFEST_DIGEST)? + .is_some() + { + let manifest = manifest_data_from_commitmeta(commit_meta)?.0; + r.push(manifest); + } + } + Ok(r) +} + /// Garbage collect unused image layer references. /// /// This function assumes no transaction is active on the repository. @@ -998,11 +1032,13 @@ fn gc_image_layers_impl( cancellable: Option<&gio::Cancellable>, ) -> Result { let all_images = list_images(repo)?; + let deployment_commits = list_container_deployment_manifests(repo, cancellable)?; let all_manifests = all_images .into_iter() .map(|img| { ImageReference::try_from(img.as_str()).and_then(|ir| manifest_for_image(repo, &ir)) }) + .chain(deployment_commits.into_iter().map(Ok)) .collect::>>()?; let mut referenced_layers = BTreeSet::new(); for m in all_manifests.iter() { From 258cc970570cf133ca1da69112e17bd354750b62 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 10 Nov 2022 20:55:43 -0500 Subject: [PATCH 482/775] tests: Add a case to reproduce hardlink to /sysroot failure xref https://github.com/ostreedev/ostree-rs-ext/issues/405 This test case reproduces the failure when we're trying to import a tar archive with hardlinks into `/sysroot`. --- lib/src/integrationtest.rs | 33 ++++++++++++--- lib/tests/it/main.rs | 83 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+), 5 deletions(-) diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 0c40d87ee..b3234126a 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -5,6 +5,7 @@ use std::path::Path; use crate::{ container::{ocidir, ExportLayout}, container_utils::is_ostree_container, + ocidir::RawLayerWriter, }; use anyhow::Result; use camino::Utf8Path; @@ -36,17 +37,39 @@ pub fn generate_derived_oci( dir: impl AsRef, tag: Option<&str>, ) -> Result<()> { + generate_derived_oci_from_tar( + src, + move |w| { + let dir = dir.as_ref(); + let mut layer_tar = tar::Builder::new(w); + layer_tar.append_dir_all("./", dir.as_std_path())?; + layer_tar.finish()?; + Ok(()) + }, + tag, + ) +} + +/// Using `src` as a base, take append `dir` into OCI image. +/// Should only be enabled for testing. +#[context("Generating derived oci")] +pub fn generate_derived_oci_from_tar( + src: impl AsRef, + f: F, + tag: Option<&str>, +) -> Result<()> +where + F: FnOnce(&mut RawLayerWriter) -> Result<()>, +{ let src = src.as_ref(); let src = Dir::open_ambient_dir(src, cap_std::ambient_authority())?; let src = ocidir::OciDir::open(&src)?; - let dir = dir.as_ref(); + let mut manifest = src.read_manifest()?; let mut config: oci_spec::image::ImageConfiguration = src.read_json_blob(manifest.config())?; - let bw = src.create_raw_layer(None)?; - let mut layer_tar = tar::Builder::new(bw); - layer_tar.append_dir_all("./", dir.as_std_path())?; - let bw = layer_tar.into_inner()?; + let mut bw = src.create_raw_layer(None)?; + f(&mut bw)?; let new_layer = bw.complete()?; manifest.layers_mut().push( diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 8260d2188..bd089cc7d 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -18,6 +18,7 @@ use std::collections::{HashMap, HashSet}; use std::io::{BufReader, BufWriter}; use std::os::unix::fs::DirBuilderExt; use std::process::Command; +use std::time::SystemTime; use ostree_ext::fixture::{FileDef, Fixture, CONTENTS_CHECKSUM_V0, CONTENTS_V0_LEN}; @@ -1187,6 +1188,88 @@ async fn test_container_write_derive() -> Result<()> { Ok(()) } +/// Test for https://github.com/ostreedev/ostree-rs-ext/issues/405 +/// We need to handle the case of modified hardlinks into /sysroot +#[tokio::test] +async fn test_container_write_derive_sysroot_hardlink() -> Result<()> { + let fixture = Fixture::new_v1()?; + let baseimg = &fixture.export_container(ExportLayout::V1).await?.0; + let basepath = &match baseimg.transport { + Transport::OciDir => fixture.path.join(baseimg.name.as_str()), + _ => unreachable!(), + }; + + // Build a derived image + let derived_path = &fixture.path.join("derived.oci"); + oci_clone(basepath, derived_path).await?; + ostree_ext::integrationtest::generate_derived_oci_from_tar( + derived_path, + |w| { + let mut tar = tar::Builder::new(w); + let objpath = Utf8Path::new("sysroot/ostree/repo/objects/60/feb13e826d2f9b62490ab24cea0f4a2d09615fb57027e55f713c18c59f4796.file"); + let d = objpath.parent().unwrap(); + fn mkparents( + t: &mut tar::Builder, + path: &Utf8Path, + ) -> std::io::Result<()> { + if let Some(parent) = path.parent().filter(|p| !p.as_str().is_empty()) { + mkparents(t, parent)?; + } + let mut h = tar::Header::new_gnu(); + h.set_entry_type(tar::EntryType::Directory); + h.set_uid(0); + h.set_gid(0); + h.set_mode(0o755); + h.set_size(0); + t.append_data(&mut h, path, std::io::empty()) + } + mkparents(&mut tar, d).context("Appending parent")?; + + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH)? + .as_secs(); + let mut h = tar::Header::new_gnu(); + h.set_entry_type(tar::EntryType::Regular); + h.set_uid(0); + h.set_gid(0); + h.set_mode(0o644); + h.set_mtime(now); + let data = b"hello"; + h.set_size(data.len() as u64); + tar.append_data(&mut h, objpath, std::io::Cursor::new(data)) + .context("appending object")?; + let targetpath = Utf8Path::new("usr/bin/bash"); + h.set_size(0); + h.set_mtime(now); + h.set_entry_type(tar::EntryType::Link); + tar.append_link(&mut h, targetpath, objpath) + .context("appending target")?; + Ok::<_, anyhow::Error>(()) + }, + None, + )?; + let derived_ref = &OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref: ImageReference { + transport: Transport::OciDir, + name: derived_path.to_string(), + }, + }; + let mut imp = + store::ImageImporter::new(fixture.destrepo(), &derived_ref, Default::default()).await?; + let prep = match imp.prepare().await.context("Init prep derived")? { + store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), + store::PrepareResult::Ready(r) => r, + }; + // Should fail for now + assert_err_contains( + imp.import(prep).await, + "Failed to find object: No such file or directory: sysroot", + ); + + Ok(()) +} + #[tokio::test] // Today rpm-ostree vendors a stable ostree-rs-ext; this test // verifies that the old ostree-rs-ext code can parse the containers From 43e1648e97ef82f2cb86fd0813a5f338585eea97 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 11 Nov 2022 11:51:00 -0500 Subject: [PATCH 483/775] tar/import: Handle hardlinked changes into /sysroot Some container build processes run without overlayfs inode indexing on - in this common scenario, overlayfs is not quite POSIX compliant because it will break the hardlink instead of modifying all versions. We need to handle this case of having *all* names for a hardlinked file being modified though too. If the serialized tar stream has the file in `/sysroot` be the canonical version, then because we drop out that file here, we'll fail to import. Fix this by significantly beefing up the tar filtering/reprocessing logic: - When we see a *modified* file in `/sysroot` with a nonzero timestamp, cache its data into a lookaside temporary directory - If we then see a hardlink to that file path, make *that* file be the canonical version in e.g. `/usr`. - Any further hardlinks to `/sysroot` instead become hardlinks to the new canonical one. (Arguably perhaps...we should actually not have used hardlinks in ostree containers at all, but injected this metadata in some other way. But, the ship has sailed on that) Closes: https://github.com/ostreedev/ostree-rs-ext/issues/405 --- lib/src/tar/import.rs | 2 +- lib/src/tar/write.rs | 73 +++++++++++++++++++++++++++++++++++++++++-- lib/tests/it/main.rs | 16 +++++++--- 3 files changed, 82 insertions(+), 9 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 057d19ce8..b83cf5ea8 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -27,7 +27,7 @@ const MAX_METADATA_SIZE: u32 = 10 * 1024 * 1024; pub(crate) const SMALL_REGFILE_SIZE: usize = 127 * 1024; // The prefix for filenames that contain content we actually look at. -const REPO_PREFIX: &str = "sysroot/ostree/repo/"; +pub(crate) const REPO_PREFIX: &str = "sysroot/ostree/repo/"; /// Statistics from import. #[derive(Debug, Default)] struct ImportStats { diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index 9c2de031e..f92dbd9d6 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -10,15 +10,19 @@ use crate::Result; use anyhow::{anyhow, Context}; use camino::{Utf8Component, Utf8Path, Utf8PathBuf}; + +use cap_std_ext::cap_std; use cap_std_ext::cmdext::CapStdExtCommandExt; use cap_std_ext::rustix; +use once_cell::unsync::OnceCell; use ostree::gio; use ostree::prelude::FileExt; use rustix::fd::FromFd; -use std::collections::BTreeMap; -use std::io::{BufWriter, Write}; +use std::collections::{BTreeMap, HashMap}; +use std::io::{BufWriter, Seek, Write}; use std::path::Path; use std::process::Stdio; + use std::sync::Arc; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite}; use tracing::instrument; @@ -164,11 +168,74 @@ pub(crate) fn filter_tar( let mut filtered = BTreeMap::new(); let ents = src.entries()?; + + // Lookaside data for dealing with hardlinked files into /sysroot; see below. + let mut changed_sysroot_objects = HashMap::new(); + let mut new_sysroot_link_targets = HashMap::::new(); + // A temporary directory if needed + let tmpdir = OnceCell::new(); + for entry in ents { - let entry = entry?; + let mut entry = entry?; + let header = entry.header(); let path = entry.path()?; let path: &Utf8Path = (&*path).try_into()?; + let is_modified = header.mtime().unwrap_or_default() > 0; + let is_regular = header.entry_type() == tar::EntryType::Regular; + if path.strip_prefix(crate::tar::REPO_PREFIX).is_ok() { + // If it's a modified file in /sysroot, it may be a target for future hardlinks. + // In that case, we copy the data off to a temporary file. Then the first hardlink + // to it becomes instead the real file, and any *further* hardlinks refer to that + // file instead. + if is_modified && is_regular { + tracing::debug!("Processing modified sysroot file {path}"); + // Lazily allocate a temporary directory + let tmpdir = tmpdir.get_or_try_init(|| { + let vartmp = &cap_std::fs::Dir::open_ambient_dir( + "/var/tmp", + cap_std::ambient_authority(), + )?; + cap_tempfile::tempdir_in(vartmp) + })?; + // Create an O_TMPFILE (anonymous file) to use as a temporary store for the file data + let mut tmpf = cap_tempfile::TempFile::new_anonymous(tmpdir).map(BufWriter::new)?; + let path = path.to_owned(); + let header = header.clone(); + std::io::copy(&mut entry, &mut tmpf)?; + let mut tmpf = tmpf.into_inner()?; + tmpf.seek(std::io::SeekFrom::Start(0))?; + // Cache this data, indexed by the file path + changed_sysroot_objects.insert(path, (header, tmpf)); + continue; + } + } else if header.entry_type() == tar::EntryType::Link && is_modified { + let target = header + .link_name()? + .ok_or_else(|| anyhow!("Invalid empty hardlink"))?; + let target: &Utf8Path = (&*target).try_into()?; + // If this is a hardlink into /sysroot... + if target.strip_prefix(crate::tar::REPO_PREFIX).is_ok() { + // And we found a previously processed modified file there + if let Some((mut header, data)) = changed_sysroot_objects.remove(target) { + tracing::debug!("Making {path} canonical for sysroot link {target}"); + // Make *this* entry the canonical one, consuming the temporary file data + dest.append_data(&mut header, path, data)?; + // And cache this file path as the new link target + new_sysroot_link_targets.insert(target.to_owned(), path.to_owned()); + } else if let Some(target) = new_sysroot_link_targets.get(path) { + tracing::debug!("Relinking {path} to {target}"); + // We found a 2nd (or 3rd, etc.) link into /sysroot; rewrite the link + // target to be the first file outside of /sysroot we found. + let mut header = header.clone(); + dest.append_link(&mut header, path, target)?; + } else { + tracing::debug!("Found unhandled modified link from {path} to {target}"); + } + continue; + } + } + let normalized = match normalize_validate_path(path)? { NormalizedPathResult::Filtered(path) => { if let Some(v) = filtered.get_mut(path) { diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index bd089cc7d..69022ebb8 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1261,11 +1261,17 @@ async fn test_container_write_derive_sysroot_hardlink() -> Result<()> { store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), store::PrepareResult::Ready(r) => r, }; - // Should fail for now - assert_err_contains( - imp.import(prep).await, - "Failed to find object: No such file or directory: sysroot", - ); + let import = imp.import(prep).await.unwrap(); + + // Verify we have the new file + bash_in!( + &fixture.dir, + r#"set -x; + ostree --repo=dest/repo ls ${r} /usr/bin/bash >/dev/null + test "$(ostree --repo=dest/repo cat ${r} /usr/bin/bash)" = "hello" + "#, + r = import.merge_commit.as_str() + )?; Ok(()) } From 34f1ce6ee4c22484aac4fa3b6377c93fe5100fbc Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 11 Nov 2022 14:51:50 -0500 Subject: [PATCH 484/775] Release 0.9.1 This has two major fixes: - Better support for image pruning - Handling of hardlinked files into `/sysroot` which can occur with some container build systems --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 327e4853c..2c572bbb4 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.9.0" +version = "0.9.1" [dependencies] anyhow = "1.0" From f1af4f34147e43edbcf831198fb09fba0c3c6141 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 14 Nov 2022 09:03:01 -0500 Subject: [PATCH 485/775] container: Make single image removal idempotent Due to bugs or logic errors, it's possible that we might attempt to prune an image that's already been pruned. In order to make things more robust, let's avoid making this a hard error. Keep the semantics for the existing "prune multiple images" API, but add a new one that prunes a singular image, and does not error in the not-found case. This can be used by higher level software. --- lib/src/container/store.rs | 45 +++++++++++++++++++++++++++----------- lib/tests/it/main.rs | 4 +++- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index e58a9b77b..9d035aa96 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -1081,17 +1081,6 @@ pub fn count_layer_references(repo: &ostree::Repo) -> Result { Ok(n as u32) } -#[context("Pruning {}", image)] -fn prune_image(repo: &ostree::Repo, image: &ImageReference) -> Result<()> { - let ostree_ref = &ref_for_image(image)?; - - if repo.resolve_rev(ostree_ref, true)?.is_none() { - anyhow::bail!("No such image"); - } - repo.set_ref_immediate(None, ostree_ref, None, gio::NONE_CANCELLABLE)?; - Ok(()) -} - /// Given an image, if it has any non-ostree compatible content, return a suitable /// warning message. pub fn image_filtered_content_warning( @@ -1125,7 +1114,26 @@ pub fn image_filtered_content_warning( Ok(r) } -/// Remove the specified image references. +/// Remove the specified image reference. If the image is already +/// not present, this function will successfully perform no operation. +/// +/// This function assumes no transaction is active on the repository. +/// The underlying layers are *not* pruned; that requires a separate invocation +/// of [`gc_image_layers`]. +#[context("Pruning {img}")] +pub fn remove_image(repo: &ostree::Repo, img: &ImageReference) -> Result { + let ostree_ref = &ref_for_image(img)?; + let found = repo.resolve_rev(ostree_ref, true)?.is_some(); + // Note this API is already idempotent, but we might as well avoid another + // trip into ostree. + if found { + repo.set_ref_immediate(None, ostree_ref, None, gio::NONE_CANCELLABLE)?; + } + Ok(found) +} + +/// Remove the specified image references. If an image is not found, further +/// images will be removed, but an error will be returned. /// /// This function assumes no transaction is active on the repository. /// The underlying layers are *not* pruned; that requires a separate invocation @@ -1134,8 +1142,19 @@ pub fn remove_images<'a>( repo: &ostree::Repo, imgs: impl IntoIterator, ) -> Result<()> { + let mut missing = Vec::new(); for img in imgs.into_iter() { - prune_image(repo, img)?; + let found = remove_image(repo, img)?; + if !found { + missing.push(img); + } + } + if !missing.is_empty() { + let missing = missing.into_iter().fold("".to_string(), |mut a, v| { + a.push_str(&v.to_string()); + a + }); + return Err(anyhow::anyhow!("Missing images: {missing}")); } Ok(()) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 69022ebb8..04f519912 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -863,7 +863,9 @@ r usr/bin/bash bash-v0 // Should only be new layers let n_removed = store::gc_image_layers(fixture.destrepo())?; assert_eq!(n_removed, 0); - store::remove_images(fixture.destrepo(), [&imgref.imgref]).unwrap(); + // Also test idempotence + store::remove_image(fixture.destrepo(), &imgref.imgref).unwrap(); + store::remove_image(fixture.destrepo(), &imgref.imgref).unwrap(); assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); // Still no removed layers after removing the base image let n_removed = store::gc_image_layers(fixture.destrepo())?; From 7875bb33ab37807c1cf3f0ca96297d28c1ea0feb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 14 Nov 2022 09:27:58 -0500 Subject: [PATCH 486/775] tests: Update to non-deprecated chrono API The deprecation was introduced in a new chrono release and is triggering our clippy lints. --- lib/src/fixture.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 305dea24e..eee819446 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -547,7 +547,10 @@ impl Fixture { let metadata = commit.child_value(0); let root = ostree::MutableTree::from_commit(self.srcrepo(), rev)?; // Bump the commit timestamp by one day - let ts = chrono::Utc.timestamp(ostree::commit_get_timestamp(&commit) as i64, 0); + let ts = chrono::Utc + .timestamp_opt(ostree::commit_get_timestamp(&commit) as i64, 0) + .single() + .unwrap(); let new_ts = ts.add(chrono::Duration::days(1)).timestamp() as u64; // Prepare a transaction From f3a4c9b56984e8493265af85da1c0b942fb2201b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 16 Nov 2022 08:37:08 -0500 Subject: [PATCH 487/775] container: Implement FromStr for OstreeContainerReference It's preferred to implement `FromStr` where possible in addition to `TryFrom` because it's usable in more contexts. Specifically in this case, with `clap` and `value_parser`. --- lib/src/container/mod.rs | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index faa8dd15e..849aabeae 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -28,6 +28,7 @@ use anyhow::anyhow; use std::borrow::Cow; use std::ops::Deref; +use std::str::FromStr; /// The label injected into a container image that contains the ostree commit SHA-256. pub const OSTREE_COMMIT_LABEL: &str = "ostree.commit"; @@ -118,6 +119,14 @@ impl TryFrom<&str> for ImageReference { } } +impl FromStr for ImageReference { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Self::try_from(s) + } +} + impl TryFrom<&str> for SignatureSource { type Error = anyhow::Error; @@ -133,6 +142,14 @@ impl TryFrom<&str> for SignatureSource { } } +impl FromStr for SignatureSource { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Self::try_from(s) + } +} + impl TryFrom<&str> for OstreeImageReference { type Error = anyhow::Error; @@ -179,6 +196,14 @@ impl TryFrom<&str> for OstreeImageReference { } } +impl FromStr for OstreeImageReference { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Self::try_from(s) + } +} + impl std::fmt::Display for Transport { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let s = match self { @@ -303,7 +328,10 @@ mod tests { ); } + // Also verify our FromStr impls + let ir: OstreeImageReference = ir_s.try_into().unwrap(); + assert_eq!(ir, OstreeImageReference::from_str(ir_s).unwrap()); // test our Eq implementation assert_eq!(&ir, &OstreeImageReference::try_from(ir_registry).unwrap()); From 04ede4579b3cf68edfdf08dab2503be4a4940f6e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 21 Nov 2022 14:42:12 -0500 Subject: [PATCH 488/775] container: Add support for copying optionally-present keys This is to aid https://github.com/coreos/coreos-assembler/pull/3214 which is trying to inject the metadata key `fedora-coreos.stream` into the container image. However, this value will only be present in Fedora derivatives, and not RHEL/CentOS. Add support for copying a key only if present, instead of erroring if it's missing. --- lib/src/cli.rs | 9 +++++++++ lib/src/container/encapsulate.rs | 12 ++++++++++++ lib/tests/it/main.rs | 1 + 3 files changed, 22 insertions(+) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index e76a9e38a..026887392 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -126,6 +126,10 @@ pub(crate) enum ContainerOpts { #[clap(name = "copymeta", long)] copy_meta_keys: Vec, + /// Propagate an optionally-present OSTree commit metadata key to container label + #[clap(name = "copymeta-opt", long)] + copy_meta_opt_keys: Vec, + /// Corresponds to the Dockerfile `CMD` instruction. #[clap(long)] cmd: Option>, @@ -531,12 +535,14 @@ async fn container_import( } /// Export a container image with an encapsulated ostree commit. +#[allow(clippy::too_many_arguments)] async fn container_export( repo: &ostree::Repo, rev: &str, imgref: &ImageReference, labels: BTreeMap, copy_meta_keys: Vec, + copy_meta_opt_keys: Vec, cmd: Option>, compression_fast: bool, ) -> Result<()> { @@ -546,6 +552,7 @@ async fn container_export( }; let opts = crate::container::ExportOpts { copy_meta_keys, + copy_meta_opt_keys, skip_compression: compression_fast, // TODO rename this in the struct at the next semver break ..Default::default() }; @@ -723,6 +730,7 @@ where imgref, labels, copy_meta_keys, + copy_meta_opt_keys, cmd, compression_fast, } => { @@ -742,6 +750,7 @@ where &imgref, labels?, copy_meta_keys, + copy_meta_opt_keys, cmd, compression_fast, ) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 56c422d0d..dd7a08845 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -65,6 +65,7 @@ pub struct Config { fn commit_meta_to_labels<'a>( meta: &glib::VariantDict, keys: impl IntoIterator, + opt_keys: impl IntoIterator, labels: &mut HashMap, ) -> Result<()> { for k in keys { @@ -74,6 +75,14 @@ fn commit_meta_to_labels<'a>( .ok_or_else(|| anyhow!("Could not find commit metadata key: {}", k))?; labels.insert(k.to_string(), v); } + for k in opt_keys { + let v = meta + .lookup::(k) + .context("Expected string for commit metadata value")?; + if let Some(v) = v { + labels.insert(k.to_string(), v); + } + } // Copy standard metadata keys `ostree.bootable` and `ostree.linux`. // Bootable is an odd one out in being a boolean. if let Some(v) = meta.lookup::(*ostree::METADATA_KEY_BOOTABLE)? { @@ -217,6 +226,7 @@ fn build_oci( commit_meta_to_labels( &commit_meta, opts.copy_meta_keys.iter().map(|k| k.as_str()), + opts.copy_meta_opt_keys.iter().map(|k| k.as_str()), labels, )?; @@ -361,6 +371,8 @@ pub struct ExportOpts { pub skip_compression: bool, /// A set of commit metadata keys to copy as image labels. pub copy_meta_keys: Vec, + /// A set of optionally-present commit metadata keys to copy as image labels. + pub copy_meta_opt_keys: Vec, /// Maximum number of layers to use pub max_layers: Option, /// The container image layout diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 04f519912..3cc698a09 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -525,6 +525,7 @@ async fn impl_test_container_import_export( .transpose()?; let opts = ExportOpts { copy_meta_keys: vec!["buildsys.checksum".to_string()], + copy_meta_opt_keys: vec!["nosuchvalue".to_string()], format: export_format, ..Default::default() }; From bcf5823b535f7c924921b085eb22d53edbf44627 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 22 Nov 2022 08:34:00 -0500 Subject: [PATCH 489/775] ci/integration: `mkdir /var/tmp` for now We should obviously fix this in a nicer way. xref https://github.com/ostreedev/ostree-rs-ext/issues/417 --- ci/ima.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci/ima.sh b/ci/ima.sh index be7480173..6be4dc611 100755 --- a/ci/ima.sh +++ b/ci/ima.sh @@ -4,6 +4,9 @@ # Runs IMA tests. set -xeuo pipefail +# https://github.com/ostreedev/ostree-rs-ext/issues/417 +mkdir -p /var/tmp + if test '!' -x /usr/bin/evmctl; then rpm-ostree install ima-evm-utils fi From 42cc2a1aad95f29218df236f1532fd56f3b9441f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 22 Nov 2022 08:40:02 -0500 Subject: [PATCH 490/775] ci: Switch to checkout@v3 > Node.js 12 actions are deprecated. For more information see: > https://github.blog/changelog/2022-09-22-github-actions-all-actions-will-begin-running-on-node16-instead-of-node12/ --- .github/workflows/rust.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 1f585ef8e..6ac218794 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -101,7 +101,7 @@ jobs: cargo-deny: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: EmbarkStudios/cargo-deny-action@v1 with: log-level: warn @@ -112,7 +112,7 @@ jobs: container: quay.io/coreos-assembler/fcos-buildroot:testing-devel steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install deps run: ./ci/installdeps.sh - name: Remove system Rust toolchain @@ -134,7 +134,7 @@ jobs: container: quay.io/fedora/fedora-coreos:testing-devel steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Download ostree-ext-cli uses: actions/download-artifact@v2 with: @@ -150,7 +150,7 @@ jobs: container: quay.io/fedora/fedora-coreos:testing-devel steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Download ostree-ext-cli uses: actions/download-artifact@v2 with: @@ -168,7 +168,7 @@ jobs: options: "--privileged --pid=host -v /run/systemd:/run/systemd -v /:/run/host" steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Download uses: actions/download-artifact@v2 with: @@ -183,7 +183,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Checkout coreos-layering-examples uses: actions/checkout@v3 with: From f5481bd0f6508d961c0908714d0cdf0a0bfa983d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 21 Nov 2022 20:56:25 -0500 Subject: [PATCH 491/775] Add a `sysroot` module with helper for `SysrootLock` A common use case is wanting to acquire a locked system root. Add a helper function which handles this and returns a guard object that can be dereferenced to a sysroot. --- lib/src/lib.rs | 1 + lib/src/sysroot.rs | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 lib/src/sysroot.rs diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 2b4c80f32..b08ae4fe0 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -37,6 +37,7 @@ pub mod ima; pub mod keyfileext; pub(crate) mod logging; pub mod refescape; +pub mod sysroot; pub mod tar; pub mod tokio_util; diff --git a/lib/src/sysroot.rs b/lib/src/sysroot.rs new file mode 100644 index 000000000..d343d754b --- /dev/null +++ b/lib/src/sysroot.rs @@ -0,0 +1,45 @@ +//! Helpers for interacting with sysroots. + +use std::ops::Deref; + +use anyhow::Result; + +/// A locked system root. +#[derive(Debug)] +pub struct SysrootLock { + sysroot: ostree::Sysroot, +} + +impl Drop for SysrootLock { + fn drop(&mut self) { + self.sysroot.unlock(); + } +} + +impl Deref for SysrootLock { + type Target = ostree::Sysroot; + + fn deref(&self) -> &Self::Target { + &self.sysroot + } +} + +impl SysrootLock { + /// Asynchronously acquire a sysroot lock. If the lock cannot be acquired + /// immediately, a status message will be printed to standard output. + pub async fn new_from_sysroot(sysroot: &ostree::Sysroot) -> Result { + let mut printed = false; + loop { + if sysroot.try_lock()? { + return Ok(Self { + sysroot: sysroot.clone(), + }); + } + if !printed { + println!("Waiting for sysroot lock..."); + printed = true; + } + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + } + } +} From 254648733160cba7dab6e641ab7b866ef2eca572 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 23 Nov 2022 10:53:54 -0500 Subject: [PATCH 492/775] Update MSRV, revamp CI Prep for bumping ostree, which also bumps MSRV. xref https://github.com/containers/containers-image-proxy-rs/pull/40/commits/12cdbf5af5ef40b312df14d7968e30bfbbb02067 - Use `checkout@v3` to avoid deprecation warnings - switch to https://github.com/dtolnay/rust-toolchain after seeing https://www.reddit.com/r/rust/comments/z1mlls/actionsrs_github_actions_need_more_maintainers_or/ - bump to rust-cache@v2 --- .github/workflows/rust.yml | 35 ++++++++++++++++++----------------- cli/Cargo.toml | 1 + lib/Cargo.toml | 1 + 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 6ac218794..43eef38ca 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -15,24 +15,22 @@ on: env: CARGO_TERM_COLOR: always - # Minimum supported Rust version (MSRV) - ACTION_MSRV_TOOLCHAIN: 1.58.1 # Pinned toolchain for linting - ACTION_LINTS_TOOLCHAIN: 1.58.1 + ACTION_LINTS_TOOLCHAIN: 1.63.0 jobs: tests: runs-on: ubuntu-latest container: quay.io/coreos-assembler/fcos-buildroot:testing-devel steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Code lints run: ./ci/lints.sh - name: Install deps run: ./ci/installdeps.sh # xref containers/containers-image-proxy-rs - name: Cache Dependencies - uses: Swatinem/rust-cache@v1 + uses: Swatinem/rust-cache@v2 with: key: "tests" - name: Build @@ -47,11 +45,11 @@ jobs: runs-on: ubuntu-latest container: quay.io/coreos-assembler/fcos-buildroot:testing-devel steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install deps run: ./ci/installdeps.sh - name: Cache Dependencies - uses: Swatinem/rust-cache@v1 + uses: Swatinem/rust-cache@v2 with: key: "test-compat" - name: Build @@ -62,11 +60,11 @@ jobs: runs-on: ubuntu-latest container: quay.io/coreos-assembler/fcos-buildroot:testing-devel steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install deps run: ./ci/installdeps.sh - name: Cache Dependencies - uses: Swatinem/rust-cache@v1 + uses: Swatinem/rust-cache@v2 with: key: "build" - name: Build @@ -82,18 +80,23 @@ jobs: container: quay.io/coreos-assembler/fcos-buildroot:testing-devel steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install deps run: ./ci/installdeps.sh + - name: Detect crate MSRV + shell: bash + run: | + msrv=$(cargo metadata --format-version 1 --no-deps | \ + jq -r '.packages[1].rust_version') + echo "Crate MSRV: $msrv" + echo "ACTION_MSRV_TOOLCHAIN=$msrv" >> $GITHUB_ENV - name: Remove system Rust toolchain run: dnf remove -y rust cargo - - name: Install toolchain - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env['ACTION_MSRV_TOOLCHAIN'] }} - default: true - name: Cache Dependencies - uses: Swatinem/rust-cache@v1 + uses: Swatinem/rust-cache@v2 with: key: "min" - name: cargo check @@ -117,11 +120,9 @@ jobs: run: ./ci/installdeps.sh - name: Remove system Rust toolchain run: dnf remove -y rust cargo - - name: Install toolchain - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@master with: toolchain: ${{ env['ACTION_LINTS_TOOLCHAIN'] }} - default: true components: rustfmt, clippy - name: cargo fmt (check) run: cargo fmt -- --check -l diff --git a/cli/Cargo.toml b/cli/Cargo.toml index d604767cf..8769d05db 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -7,6 +7,7 @@ license = "MIT OR Apache-2.0" repository = "https://github.com/ostreedev/ostree-rs-ext" readme = "README.md" publish = false +rust-version = "1.63.0" [dependencies] anyhow = "1.0" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 2c572bbb4..ef889d78f 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -7,6 +7,7 @@ name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" version = "0.9.1" +rust-version = "1.63.0" [dependencies] anyhow = "1.0" From c3a9f8272071e20cbd58d63683e0c61447426f2e Mon Sep 17 00:00:00 2001 From: RishabhSaini Date: Fri, 18 Nov 2022 10:35:22 -0500 Subject: [PATCH 493/775] Ensure integration testing fails for ostree-ext-cli container image pull $osrepo ostree-unverified-image:containers-storage:$image --- ci/priv-integration.sh | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index 8a1b6e42b..a991b3a39 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -4,6 +4,9 @@ # whatever we want, however we can't actually *reboot* the host. set -euo pipefail +# https://github.com/ostreedev/ostree-rs-ext/issues/417 +mkdir -p /var/tmp + sysroot=/run/host # Current stable image fixture image=quay.io/fedora/fedora-coreos:testing-devel @@ -57,4 +60,13 @@ echo "ok old image failed to parse" nsenter -m -t 1 journalctl _COMM=ostree-ext-cli > logs.txt grep 'layers stored: ' logs.txt +podman pull ${image} +ostree --repo="${sysroot}/ostree/repo" init --mode=bare-user +if ostree-ext-cli container image pull ${sysroot}/ostree/repo ostree-unverified-image:containers-storage:${image} 2>err.txt; then + echo "unexpectedly pulled from containers storage?" + exit 1 +fi +grep "file does not exist" err.txt +echo "ok pulled from containers storage" + echo ok privileged integration From b40bdca50ed02f5949ae8e33b6d8ed60ea72d196 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 29 Nov 2022 08:09:26 -0500 Subject: [PATCH 494/775] tests: Always use v1 for compat testing With a new enough rpm-ostree, this test will always fail if we're built in compat mode. I'm hitting this locally, but it looks like the latest rpm-ostree only made it to testing-devel 2 days ago. --- lib/tests/it/main.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 3cc698a09..721a2735e 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1289,12 +1289,7 @@ async fn test_old_code_parses_new_export() -> Result<()> { return Ok(()); } let fixture = Fixture::new_v1()?; - let layout = if cfg!(feature = "compat") { - ExportLayout::V0 - } else { - ExportLayout::V1 - }; - let imgref = fixture.export_container(layout).await?.0; + let imgref = fixture.export_container(ExportLayout::V1).await?.0; let imgref = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref, From 79cc0fffc4b8016984334d72c80c6a975520c90b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 28 Nov 2022 17:57:37 -0500 Subject: [PATCH 495/775] tar/export: Always generate `/var/tmp` This is a bit of a hack, but basically the ostree model of empty `/var` defers creation of basic directory structure to e.g. `systemd-tmpfiles`, but that isn't run in containers. For now, let's keep this as the only special case. Perhaps down the line we may actually need to effectively parse/execute systemd-tmpfiles inside the stream in the future, but hopefully we can avoid that. Many things are going to use `/var/tmp` during image builds, but that's much less likely with e.g. `/var/log`. Closes: https://github.com/ostreedev/ostree-rs-ext/issues/417 --- lib/src/tar/export.rs | 36 ++++++++++++++++++++++++++++++++++++ lib/tests/it/main.rs | 1 + 2 files changed, 37 insertions(+) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 07bcf5d39..f15e78fa3 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -79,6 +79,7 @@ struct OstreeTarWriter<'a, W: std::io::Write> { wrote_initdirs: bool, /// True if we're only writing directories structure_only: bool, + wrote_vartmp: bool, // Set if the ostree commit contains /var/tmp wrote_dirtree: HashSet, wrote_dirmeta: HashSet, wrote_content: HashSet, @@ -163,6 +164,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { options, wrote_initdirs: false, structure_only: false, + wrote_vartmp: false, wrote_dirmeta: HashSet::new(), wrote_dirtree: HashSet::new(), wrote_content: HashSet::new(), @@ -308,6 +310,9 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { true, cancellable, )?; + + self.append_standard_var(cancellable)?; + Ok(()) } @@ -540,6 +545,12 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { } } + // Record if the ostree commit includes /var/tmp; if so we don't need to synthesize + // it in `append_standard_var()`. + if dirpath == "var/tmp" { + self.wrote_vartmp = true; + } + for item in dirs { let (name, contents_csum, meta_csum) = item.to_tuple(); let name = name.to_str(); @@ -565,6 +576,31 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { Ok(()) } + + /// Generate e.g. `/var/tmp`. + /// + /// In the OSTree model we expect `/var` to start out empty, and be populated via + /// e.g. `systemd-tmpfiles`. But, systemd doesn't run in Docker-style containers by default. + /// + /// So, this function creates a few critical directories in `/var` by default. + fn append_standard_var(&mut self, cancellable: Option<&gio::Cancellable>) -> Result<()> { + // If the commit included /var/tmp, then it's already in the tar stream. + if self.wrote_vartmp { + return Ok(()); + } + if let Some(c) = cancellable { + c.set_error_if_cancelled()?; + } + let mut header = tar::Header::new_gnu(); + header.set_entry_type(tar::EntryType::Directory); + header.set_size(0); + header.set_uid(0); + header.set_gid(0); + header.set_mode(self.filter_mode(libc::S_IFDIR | 0o1777)); + self.out + .append_data(&mut header, "var/tmp", std::io::empty())?; + Ok(()) + } } /// Recursively walk an OSTree commit and generate data into a `[tar::Builder]` diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 721a2735e..827b13363 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -316,6 +316,7 @@ fn common_tar_contents_all() -> impl Iterator { ("usr/bin/bash", Link, 0o755), ("usr/bin/hardlink-a", Link, 0o644), ("usr/bin/hardlink-b", Link, 0o644), + ("var/tmp", Directory, 0o1777), ] .into_iter() .map(Into::into) From 018e8a27560873d2ff1005327831f898ca59ac4e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 4 Nov 2022 08:42:24 -0400 Subject: [PATCH 496/775] Bump to ostree 0.17, cap-std 1.0 We need to do both bumps in one go because ostree exposes cap-std APIs. --- lib/Cargo.toml | 8 ++++---- lib/src/chunking.rs | 4 ++-- lib/src/cli.rs | 6 +++--- lib/src/container/deploy.rs | 2 +- lib/src/container/store.rs | 8 ++++---- lib/src/diff.rs | 8 ++++---- lib/src/fixture.rs | 29 +++++++++++++++++------------ lib/src/ima.rs | 23 ++++++++++------------- lib/src/integrationtest.rs | 2 +- lib/src/tar/export.rs | 6 +++--- lib/src/tar/import.rs | 12 ++++++------ lib/src/tar/write.rs | 7 +++---- lib/src/tokio_util.rs | 2 +- lib/tests/it/main.rs | 31 ++++++++++++++++++------------- 14 files changed, 77 insertions(+), 71 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index ef889d78f..98791f768 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -19,8 +19,8 @@ chrono = "0.4.19" cjson = "0.1.1" clap = { version= "3.2", features = ["derive"] } clap_mangen = { version = "0.1", optional = true } -cap-std-ext = "0.26" -cap-tempfile = "0.25" +cap-std-ext = "1.0" +cap-tempfile = "1.0" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" futures-util = "0.3.13" @@ -32,7 +32,7 @@ libc = "0.2.92" libsystemd = "0.5.0" oci-spec = "0.5.4" openssl = "0.10.33" -ostree = { features = ["v2021_5", "cap-std-apis"], version = "0.15.0" } +ostree = { features = ["v2022_5", "cap-std-apis"], version = "0.17.0" } pin-project = "1.0" regex = "1.5.4" serde = { features = ["derive"], version = "1.0.125" } @@ -46,7 +46,7 @@ tokio-stream = { features = ["sync"], version = "0.1.8" } tracing = "0.1" indoc = { version = "1.0.3", optional = true } -sh-inline = { version = "0.3", features = ["cap-std-ext"], optional = true } +sh-inline = { version = "0.4", features = ["cap-std-ext"], optional = true } [dev-dependencies] quickcheck = "1" diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 98df066ea..3d41fdf15 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -54,7 +54,7 @@ pub struct ObjectMetaSized { impl ObjectMetaSized { /// Given object metadata and a repo, compute the size of each content source. pub fn compute_sizes(repo: &ostree::Repo, meta: ObjectMeta) -> Result { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; // Destructure into component parts; we'll create the version with sizes let map = meta.map; let mut set = meta.set; @@ -148,7 +148,7 @@ fn generate_chunking_recurse( let fpath = gen.path.join(name.to_str()); hex::encode_to_slice(csum, &mut hexbuf)?; let checksum = std::str::from_utf8(&hexbuf)?; - let meta = repo.query_file(checksum, gio::NONE_CANCELLABLE)?.0; + let meta = repo.query_file(checksum, gio::Cancellable::NONE)?.0; let size = meta.size() as u64; let entry = chunk.content.entry(RcStr::from(checksum)).or_default(); entry.0 = size; diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 026887392..9e36f7987 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -520,7 +520,7 @@ async fn container_import( None, write_ref, Some(import.ostree_commit.as_str()), - gio::NONE_CANCELLABLE, + gio::Cancellable::NONE, )?; println!( "Imported: {} => {}", @@ -666,7 +666,7 @@ async fn container_history(repo: &ostree::Repo, imgref: &ImageReference) -> Resu /// Add IMA signatures to an ostree commit, generating a new commit. fn ima_sign(cmdopts: &ImaSignOpts) -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let signopts = crate::ima::ImaOpts { algorithm: cmdopts.algorithm.clone(), key: cmdopts.key.clone(), @@ -834,7 +834,7 @@ where write_commitid_to, } => { let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); - sysroot.load(gio::NONE_CANCELLABLE)?; + sysroot.load(gio::Cancellable::NONE)?; let repo = &sysroot.repo().unwrap(); let kargs = karg.as_deref(); let kargs = kargs.map(|v| { diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 78bdf1515..83dcc0148 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -45,7 +45,7 @@ pub async fn deploy( imgref: &OstreeImageReference, options: Option>, ) -> Result> { - let cancellable = ostree::gio::NONE_CANCELLABLE; + let cancellable = ostree::gio::Cancellable::NONE; let options = options.unwrap_or_default(); let repo = &sysroot.repo().unwrap(); let merge_deployment = sysroot.merge_deployment(Some(stateroot)); diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 9d035aa96..48530a08a 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -874,7 +874,7 @@ impl ImageImporter { /// List all images stored pub fn list_images(repo: &ostree::Repo) -> Result> { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let refs = repo.list_refs_ext( Some(IMAGE_PREFIX), ostree::RepoListRefsExtFlags::empty(), @@ -1023,7 +1023,7 @@ fn list_container_deployment_manifests( /// The underlying objects are *not* pruned; that requires a separate invocation /// of [`ostree::Repo::prune`]. pub fn gc_image_layers(repo: &ostree::Repo) -> Result { - gc_image_layers_impl(repo, gio::NONE_CANCELLABLE) + gc_image_layers_impl(repo, gio::Cancellable::NONE) } #[context("Pruning image layers")] @@ -1070,7 +1070,7 @@ fn gc_image_layers_impl( #[cfg(feature = "internal-testing-api")] /// Return how many container blobs (layers) are stored pub fn count_layer_references(repo: &ostree::Repo) -> Result { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let n = repo .list_refs_ext( Some(LAYER_PREFIX), @@ -1127,7 +1127,7 @@ pub fn remove_image(repo: &ostree::Repo, img: &ImageReference) -> Result { // Note this API is already idempotent, but we might as well avoid another // trip into ostree. if found { - repo.set_ref_immediate(None, ostree_ref, None, gio::NONE_CANCELLABLE)?; + repo.set_ref_immediate(None, ostree_ref, None, gio::Cancellable::NONE)?; } Ok(found) } diff --git a/lib/src/diff.rs b/lib/src/diff.rs index 6f7c5a62b..ddbaf7a8f 100644 --- a/lib/src/diff.rs +++ b/lib/src/diff.rs @@ -19,7 +19,7 @@ fn query_info_optional( queryattrs: &str, queryflags: gio::FileQueryInfoFlags, ) -> Result> { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; match f.query_info(queryattrs, queryflags, cancellable) { Ok(i) => Ok(Some(i)), Err(e) => { @@ -78,7 +78,7 @@ fn diff_recurse( from: &ostree::RepoFile, to: &ostree::RepoFile, ) -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let queryattrs = "standard::name,standard::type"; let queryflags = gio::FileQueryInfoFlags::NOFOLLOW_SYMLINKS; let from_iter = from.enumerate_children(queryattrs, queryflags, cancellable)?; @@ -159,8 +159,8 @@ pub fn diff>( ) -> Result { let subdir = subdir.as_ref(); let subdir = subdir.map(|s| s.as_ref()); - let (fromroot, _) = repo.read_commit(from, gio::NONE_CANCELLABLE)?; - let (toroot, _) = repo.read_commit(to, gio::NONE_CANCELLABLE)?; + let (fromroot, _) = repo.read_commit(from, gio::Cancellable::NONE)?; + let (toroot, _) = repo.read_commit(to, gio::Cancellable::NONE)?; let (fromroot, toroot) = if let Some(subdir) = subdir { ( fromroot.resolve_relative_path(subdir), diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index eee819446..424ad72c4 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -247,7 +247,12 @@ pub fn create_dirmeta(path: &Utf8Path, selinux: bool) -> glib::Variant { /// Wraps [`create_dirmeta`] and commits it. pub fn require_dirmeta(repo: &ostree::Repo, path: &Utf8Path, selinux: bool) -> Result { let v = create_dirmeta(path, selinux); - let r = repo.write_metadata(ostree::ObjectType::DirMeta, None, &v, gio::NONE_CANCELLABLE)?; + let r = repo.write_metadata( + ostree::ObjectType::DirMeta, + None, + &v, + gio::Cancellable::NONE, + )?; Ok(r.to_hex()) } @@ -280,7 +285,7 @@ fn build_mapping_recurse( ret: &mut ObjectMeta, ) -> Result<()> { use std::collections::btree_map::Entry; - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let e = dir.enumerate_children( "standard::name,standard::type", gio::FileQueryInfoFlags::NOFOLLOW_SYMLINKS, @@ -415,9 +420,9 @@ impl Fixture { // Delete all objects in the destrepo pub fn clear_destrepo(&self) -> Result<()> { self.destrepo() - .set_ref_immediate(None, self.testref(), None, gio::NONE_CANCELLABLE)?; + .set_ref_immediate(None, self.testref(), None, gio::Cancellable::NONE)?; self.destrepo() - .prune(ostree::RepoPruneFlags::REFS_ONLY, 0, gio::NONE_CANCELLABLE)?; + .prune(ostree::RepoPruneFlags::REFS_ONLY, 0, gio::Cancellable::NONE)?; Ok(()) } @@ -446,7 +451,7 @@ impl Fixture { libc::S_IFREG | def.mode, xattrs, contents.as_bytes(), - gio::NONE_CANCELLABLE, + gio::Cancellable::NONE, )?, FileDefType::Symlink(target) => self.srcrepo.write_symlink( None, @@ -454,7 +459,7 @@ impl Fixture { def.gid, xattrs, target.as_str(), - gio::NONE_CANCELLABLE, + gio::Cancellable::NONE, )?, FileDefType::Directory => { let d = parent.ensure_dir(name)?; @@ -469,7 +474,7 @@ impl Fixture { pub fn commit_filedefs(&self, defs: impl IntoIterator>) -> Result<()> { let root = ostree::MutableTree::new(); - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let tx = self.srcrepo.auto_transaction(cancellable)?; for def in defs { let def = def?; @@ -509,7 +514,7 @@ impl Fixture { self.srcrepo.write_commit_detached_metadata( commit.as_str(), Some(&detached), - gio::NONE_CANCELLABLE, + gio::Cancellable::NONE, )?; let gpghome = self.path.join("src/gpghome"); @@ -517,7 +522,7 @@ impl Fixture { &commit, TEST_GPG_KEYID_1, Some(gpghome.as_str()), - gio::NONE_CANCELLABLE, + gio::Cancellable::NONE, )?; Ok(()) @@ -539,7 +544,7 @@ impl Fixture { additions: impl Iterator>, removals: impl Iterator>, ) -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; // Load our base commit let rev = &self.srcrepo().require_rev(self.testref())?; @@ -595,7 +600,7 @@ impl Fixture { /// Gather object metadata for the current commit. pub fn get_object_meta(&self) -> Result { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; // Load our base commit let root = self.srcrepo.read_commit(self.testref(), cancellable)?.0; @@ -613,7 +618,7 @@ impl Fixture { #[context("Exporting tar")] pub fn export_tar(&self) -> Result<&'static Utf8Path> { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let (_, rev) = self.srcrepo.read_commit(self.testref(), cancellable)?; let path = "exampleos-export.tar"; let mut outf = std::io::BufWriter::new(self.dir.create(path)?); diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 5c027a4cb..f867e967d 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -54,15 +54,12 @@ fn xattrs_to_map(v: &glib::Variant) -> BTreeMap, Vec> { pub(crate) fn new_variant_a_ayay<'a, T: 'a + AsRef<[u8]>>( items: impl IntoIterator, ) -> glib::Variant { - let children: Vec<_> = items - .into_iter() - .map(|(a, b)| { - let a = a.as_ref(); - let b = b.as_ref(); - Variant::from_tuple(&[a.to_variant(), b.to_variant()]) - }) - .collect(); - Variant::from_array::<(&[u8], &[u8])>(&children) + let children = items.into_iter().map(|(a, b)| { + let a = a.as_ref(); + let b = b.as_ref(); + Variant::tuple_from_iter([a.to_variant(), b.to_variant()]) + }); + Variant::array_from_iter::<(&[u8], &[u8])>(children) } struct CommitRewriter<'a> { @@ -155,7 +152,7 @@ impl<'a> CommitRewriter<'a> { #[context("Content object {}", checksum)] fn map_file(&mut self, checksum: &str) -> Result> { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let (instream, meta, xattrs) = self.repo.load_file(checksum, cancellable)?; let instream = if let Some(i) = instream { i @@ -236,7 +233,7 @@ impl<'a> CommitRewriter<'a> { ostree::ObjectType::DirTree, None, &new_dirtree, - gio::NONE_CANCELLABLE, + gio::Cancellable::NONE, )? .to_hex(); @@ -247,7 +244,7 @@ impl<'a> CommitRewriter<'a> { #[context("Mapping {}", rev)] fn map_commit(&mut self, rev: &str) -> Result { let checksum = self.repo.require_rev(rev)?; - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let (commit_v, _) = self.repo.load_commit(&checksum)?; let commit_v = &commit_v; @@ -266,7 +263,7 @@ impl<'a> CommitRewriter<'a> { } let new_dt = hex::decode(new_dt)?; parts[6] = new_dt.to_variant(); - let new_commit = Variant::from_tuple(&parts); + let new_commit = Variant::tuple_from_iter(&parts); let new_commit_checksum = self .repo diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index b3234126a..8affb3d3f 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -143,7 +143,7 @@ pub(crate) fn test_ima() -> Result<()> { use gvariant::aligned_bytes::TryAsAligned; use gvariant::{gv, Marker, Structure}; - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let fixture = crate::fixture::Fixture::new_v1()?; let config = indoc::indoc! { r#" diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 07bcf5d39..7456be698 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -274,7 +274,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { /// Recursively serialize a commit object to the target tar stream. fn write_commit(&mut self) -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let commit_bytes = self.commit_object.data_as_bytes(); let commit_bytes = commit_bytes.try_as_aligned()?; @@ -319,7 +319,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { )?; if let Some(commitmeta) = self .repo - .read_commit_detached_metadata(self.commit_checksum, gio::NONE_CANCELLABLE)? + .read_commit_detached_metadata(self.commit_checksum, gio::Cancellable::NONE)? { self.append( ostree::ObjectType::CommitMeta, @@ -412,7 +412,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { fn append_content(&mut self, checksum: &str) -> Result<(Utf8PathBuf, tar::Header)> { let path = object_path(ostree::ObjectType::File, checksum); - let (instream, meta, xattrs) = self.repo.load_file(checksum, gio::NONE_CANCELLABLE)?; + let (instream, meta, xattrs) = self.repo.load_file(checksum, gio::Cancellable::NONE)?; let mut h = tar::Header::new_gnu(); h.set_uid(meta.attribute_uint32("unix::uid") as u64); diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index b83cf5ea8..7d7eb4f4f 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -243,7 +243,7 @@ impl Importer { // https://github.com/ostreedev/ostree-rs-ext/issues/1 let actual = self.repo - .write_metadata(objtype, Some(checksum), &v, gio::NONE_CANCELLABLE)?; + .write_metadata(objtype, Some(checksum), &v, gio::Cancellable::NONE)?; assert_eq!(actual.to_hex(), checksum); Ok(()) } @@ -333,7 +333,7 @@ impl Importer { gid, Some(&xattrs), target, - gio::NONE_CANCELLABLE, + gio::Cancellable::NONE, )?; debug_assert_eq!(c.as_str(), checksum); self.stats.symlinks += 1; @@ -710,7 +710,7 @@ impl Importer { self.repo.write_commit_detached_metadata( &checksum, Some(&commitmeta), - gio::NONE_CANCELLABLE, + gio::Cancellable::NONE, )?; } _ => { @@ -761,10 +761,10 @@ impl Importer { ostree::ObjectType::DirMeta, None, &Self::default_dirmeta(), - gio::NONE_CANCELLABLE, + gio::Cancellable::NONE, )?; mtree.set_metadata_checksum(&dirmeta.to_hex()); - let tree = self.repo.write_mtree(&mtree, gio::NONE_CANCELLABLE)?; + let tree = self.repo.write_mtree(&mtree, gio::Cancellable::NONE)?; let commit = self.repo.write_commit_with_time( None, None, @@ -772,7 +772,7 @@ impl Importer { None, tree.downcast_ref().unwrap(), 0, - gio::NONE_CANCELLABLE, + gio::Cancellable::NONE, )?; Ok(commit.to_string()) } diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index f92dbd9d6..bce64e4ba 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -11,13 +11,12 @@ use crate::Result; use anyhow::{anyhow, Context}; use camino::{Utf8Component, Utf8Path, Utf8PathBuf}; +use cap_std::io_lifetimes; use cap_std_ext::cap_std; use cap_std_ext::cmdext::CapStdExtCommandExt; -use cap_std_ext::rustix; use once_cell::unsync::OnceCell; use ostree::gio; use ostree::prelude::FileExt; -use rustix::fd::FromFd; use std::collections::{BTreeMap, HashMap}; use std::io::{BufWriter, Seek, Write}; use std::path::Path; @@ -79,7 +78,7 @@ pub struct WriteTarResult { // Copy of logic from https://github.com/ostreedev/ostree/pull/2447 // to avoid waiting for backport + releases fn sepolicy_from_base(repo: &ostree::Repo, base: &str) -> Result { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let policypath = "usr/etc/selinux"; let tempdir = tempfile::tempdir()?; let (root, _) = repo.read_commit(base, cancellable)?; @@ -294,7 +293,7 @@ pub async fn write_tar( }; let mut c = std::process::Command::new("ostree"); let repofd = repo.dfd_as_file()?; - let repofd = Arc::new(rustix::io::OwnedFd::from_into_fd(repofd)); + let repofd: Arc = Arc::new(repofd.into()); { let c = c .stdin(Stdio::piped()) diff --git a/lib/src/tokio_util.rs b/lib/src/tokio_util.rs index b11a158d5..d376dee23 100644 --- a/lib/src/tokio_util.rs +++ b/lib/src/tokio_util.rs @@ -4,7 +4,7 @@ use anyhow::Result; use core::fmt::{Debug, Display}; use futures_util::{Future, FutureExt}; use ostree::gio; -use ostree::prelude::CancellableExt; +use ostree::prelude::{CancellableExt, CancellableExtManual}; /// Call a faillible future, while monitoring `cancellable` and return an error if cancelled. pub async fn run_with_cancellable(f: F, cancellable: &gio::Cancellable) -> Result diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 3cc698a09..cc86cdbbb 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -70,7 +70,7 @@ async fn test_tar_export_reproducible() -> Result<()> { let fixture = Fixture::new_v1()?; let (_, rev) = fixture .srcrepo() - .read_commit(fixture.testref(), gio::NONE_CANCELLABLE)?; + .read_commit(fixture.testref(), gio::Cancellable::NONE)?; let export1 = { let mut h = openssl::hash::Hasher::new(openssl::hash::MessageDigest::sha256())?; ostree_ext::tar::export_commit(fixture.srcrepo(), rev.as_str(), &mut h, None)?; @@ -119,7 +119,7 @@ async fn test_tar_import_signed() -> Result<()> { opts.insert("custom-backend", &"ostree-rs-ext"); fixture .destrepo() - .remote_add("myremote", None, Some(&opts.end()), gio::NONE_CANCELLABLE)?; + .remote_add("myremote", None, Some(&opts.end()), gio::Cancellable::NONE)?; let src_tar = tokio::fs::File::from_std(fixture.dir.open(test_tar)?.into_std()); let r = ostree_ext::tar::import_tar( fixture.destrepo(), @@ -161,7 +161,7 @@ async fn test_tar_import_signed() -> Result<()> { tokio::task::spawn_blocking(move || -> Result<_> { let src = BufReader::new(srcf); let f = BufWriter::new(destf); - ostree_ext::tar::update_detached_metadata(src, f, None, gio::NONE_CANCELLABLE).unwrap(); + ostree_ext::tar::update_detached_metadata(src, f, None, gio::Cancellable::NONE).unwrap(); Ok(()) }) .await??; @@ -180,7 +180,7 @@ async fn test_tar_import_signed() -> Result<()> { let rev = fixture.srcrepo().require_rev(fixture.testref())?; let commitmeta = fixture .srcrepo() - .read_commit_detached_metadata(&rev, gio::NONE_CANCELLABLE)? + .read_commit_detached_metadata(&rev, gio::Cancellable::NONE)? .unwrap(); let mut commitmeta = Vec::from(&*commitmeta.data_as_bytes()); let len = commitmeta.len() / 2; @@ -192,8 +192,13 @@ async fn test_tar_import_signed() -> Result<()> { tokio::task::spawn_blocking(move || -> Result<_> { let src = BufReader::new(srcf); let f = BufWriter::new(destf); - ostree_ext::tar::update_detached_metadata(src, f, Some(&commitmeta), gio::NONE_CANCELLABLE) - .unwrap(); + ostree_ext::tar::update_detached_metadata( + src, + f, + Some(&commitmeta), + gio::Cancellable::NONE, + ) + .unwrap(); Ok(()) }) .await??; @@ -424,8 +429,8 @@ async fn test_tar_import_export() -> Result<()> { let (root, _) = fixture .destrepo() - .read_commit(&imported_commit, gio::NONE_CANCELLABLE)?; - let kdir = ostree_ext::bootabletree::find_kernel_dir(&root, gio::NONE_CANCELLABLE)?; + .read_commit(&imported_commit, gio::Cancellable::NONE)?; + let kdir = ostree_ext::bootabletree::find_kernel_dir(&root, gio::Cancellable::NONE)?; let kdir = kdir.unwrap(); assert_eq!( kdir.basename().unwrap().to_str().unwrap(), @@ -617,7 +622,7 @@ async fn impl_test_container_import_export( opts.insert("custom-backend", &"ostree-rs-ext"); fixture .destrepo() - .remote_add("myremote", None, Some(&opts.end()), gio::NONE_CANCELLABLE)?; + .remote_add("myremote", None, Some(&opts.end()), gio::Cancellable::NONE)?; bash_in!( &fixture.dir, "ostree --repo=dest/repo remote gpg-import --stdin myremote < src/gpghome/key1.asc", @@ -812,7 +817,7 @@ r usr/bin/bash bash-v0 assert_eq!(n, n2); fixture .destrepo() - .prune(ostree::RepoPruneFlags::REFS_ONLY, 0, gio::NONE_CANCELLABLE)?; + .prune(ostree::RepoPruneFlags::REFS_ONLY, 0, gio::Cancellable::NONE)?; // Build a derived image let srcpath = imgref.imgref.name.as_str(); @@ -881,7 +886,7 @@ r usr/bin/bash bash-v0 assert_eq!( fixture .destrepo() - .list_refs(None, gio::NONE_CANCELLABLE) + .list_refs(None, gio::Cancellable::NONE) .unwrap() .len(), 0 @@ -983,7 +988,7 @@ async fn test_container_import_export_v1() { /// But layers work via the container::write module. #[tokio::test] async fn test_container_write_derive() -> Result<()> { - let cancellable = gio::NONE_CANCELLABLE; + let cancellable = gio::Cancellable::NONE; let fixture = Fixture::new_v1()?; let base_oci_path = &fixture.path.join("exampleos.oci"); let _digest = ostree_ext::container::encapsulate( @@ -1180,7 +1185,7 @@ async fn test_container_write_derive() -> Result<()> { fixture.path.join("destrepo2").as_str(), ostree::RepoMode::Archive, None, - gio::NONE_CANCELLABLE, + gio::Cancellable::NONE, )?; store::copy(fixture.destrepo(), &destrepo2, &derived_ref).await?; From dd23363d25dd8774db6152a3dae0a629d2464f85 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 29 Nov 2022 09:25:38 -0500 Subject: [PATCH 497/775] Bump to 0.10 Since we bumped ostree and glib and cap-std. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 98791f768..d987aade0 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.9.1" +version = "0.10.0" rust-version = "1.63.0" [dependencies] From a63171ccd63de0086552cfffc8764d0066023a7f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 29 Nov 2022 11:04:36 -0500 Subject: [PATCH 498/775] Drop `compat` feature Most recent rpm-ostree doesn't enable the compat feature and no one yelled, so let's pull off the band-aid here and drop the old format logic. --- .github/workflows/rust.yml | 15 ------ lib/Cargo.toml | 1 - lib/src/container/encapsulate.rs | 27 ++-------- lib/src/container/store.rs | 21 +------- lib/src/fixture.rs | 3 -- lib/src/tar/export.rs | 8 +-- lib/tests/it/main.rs | 85 +++----------------------------- 7 files changed, 12 insertions(+), 148 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 43eef38ca..27d2db68d 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -41,21 +41,6 @@ jobs: run: cargo test -- --nocapture --quiet - name: Manpage generation run: mkdir -p target/man && cargo run --features=docgen -- man --directory target/man - test-compat: - runs-on: ubuntu-latest - container: quay.io/coreos-assembler/fcos-buildroot:testing-devel - steps: - - uses: actions/checkout@v3 - - name: Install deps - run: ./ci/installdeps.sh - - name: Cache Dependencies - uses: Swatinem/rust-cache@v2 - with: - key: "test-compat" - - name: Build - run: cargo test --no-run --features=compat - - name: Run tests - run: cargo test --features=compat -- --nocapture --quiet build: runs-on: ubuntu-latest container: quay.io/coreos-assembler/fcos-buildroot:testing-devel diff --git a/lib/Cargo.toml b/lib/Cargo.toml index d987aade0..5e1552c7f 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -60,5 +60,4 @@ features = ["dox"] [features] docgen = ["clap_mangen"] dox = ["ostree/dox"] -compat = [] internal-testing-api = ["sh-inline", "indoc"] diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index dd7a08845..1719278ed 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -31,11 +31,7 @@ pub enum ExportLayout { impl Default for ExportLayout { fn default() -> Self { - if cfg!(feature = "compat") { - Self::V0 - } else { - Self::V1 - } + Self::V1 } } @@ -136,25 +132,8 @@ fn export_chunked( match opts.format { ExportLayout::V0 => { - if cfg!(not(feature = "compat")) { - let label = opts.format.label(); - anyhow::bail!("This legacy format using the {label} label is no longer supported"); - } - // In V0, the component/content chunks come first. - for (layer, name) in layers { - ociw.push_layer(manifest, imgcfg, layer, name.as_str()); - } - // Then, export the final layer - let mut w = ociw.create_layer(compression)?; - ostree_tar::export_final_chunk(repo, commit, chunking.remainder, &mut w)?; - let w = w.into_inner()?; - let final_layer = w.complete()?; - labels.insert( - opts.format.label().into(), - format!("sha256:{}", final_layer.uncompressed_sha256), - ); - ociw.push_layer(manifest, imgcfg, final_layer, description); - Ok(()) + let label = opts.format.label(); + anyhow::bail!("This legacy format using the {label} label is no longer supported"); } ExportLayout::V1 => { // In V1, the ostree layer comes first diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 48530a08a..bc2ec9891 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -372,25 +372,8 @@ pub(crate) fn parse_manifest_layout<'a>( // Now, we need to handle the split differently in chunked v1 vs v0 match layout { ExportLayout::V0 => { - if cfg!(not(feature = "compat")) { - let label = layout.label(); - anyhow::bail!( - "This legacy format using the {label} label is no longer supported" - ); - } - - for layer in manifest.layers() { - if layer == target_layer { - if after_target { - anyhow::bail!("Multiple entries for {}", layer.digest()); - } - after_target = true; - } else if !after_target { - chunk_layers.push(layer); - } else { - derived_layers.push(layer); - } - } + let label = layout.label(); + anyhow::bail!("This legacy format using the {label} label is no longer supported"); } ExportLayout::V1 => { for layer in manifest.layers() { diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 424ad72c4..013920a69 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -357,7 +357,6 @@ pub struct Fixture { srcrepo: ostree::Repo, destrepo: ostree::Repo, - pub format_version: u32, pub selinux: bool, } @@ -404,7 +403,6 @@ impl Fixture { path, srcrepo, destrepo, - format_version: if cfg!(feature = "compat") { 0 } else { 1 }, selinux: true, }) } @@ -624,7 +622,6 @@ impl Fixture { let mut outf = std::io::BufWriter::new(self.dir.create(path)?); #[allow(clippy::needless_update)] let options = crate::tar::ExportOptions { - format_version: self.format_version, ..Default::default() }; crate::tar::export_commit(&self.srcrepo, rev.as_str(), &mut outf, Some(options))?; diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 21ba25368..c7c0d8680 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -19,10 +19,6 @@ use std::ops::RangeInclusive; /// The repository mode generated by a tar export stream. pub const BARE_SPLIT_XATTRS_MODE: &str = "bare-split-xattrs"; -/// The set of allowed format versions; ranges from zero to 1, inclusive. -#[cfg(feature = "compat")] -pub const FORMAT_VERSIONS: RangeInclusive = 0..=1; -#[cfg(not(feature = "compat"))] /// The set of allowed format versions. pub const FORMAT_VERSIONS: RangeInclusive = 1..=1; @@ -627,9 +623,7 @@ pub struct ExportOptions { impl Default for ExportOptions { fn default() -> Self { - Self { - format_version: if cfg!(feature = "compat") { 0 } else { 1 }, - } + Self { format_version: 1 } } } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 67e63cc8e..ea1491aef 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -235,7 +235,6 @@ impl Into for (&'static str, tar::EntryType, u32) { } fn validate_tar_expected( - format_version: u32, t: &mut tar::Entries, expected: impl IntoIterator, ) -> Result<()> { @@ -257,24 +256,12 @@ fn validate_tar_expected( seen_paths.insert(entry_path.clone()); if let Some(exp) = expected.remove(entry_path.as_str()) { assert_eq!(header.entry_type(), exp.etype, "{}", entry_path); - let is_old_object = format_version == 0; - let mut expected_mode = exp.mode; + let expected_mode = exp.mode; let header_mode = header.mode().unwrap(); - if is_old_object && !entry_path.starts_with("sysroot/") { - let fmtbits = match header.entry_type() { - // For now assume only hardlinks to regular files - tar::EntryType::Regular | tar::EntryType::Link => libc::S_IFREG, - tar::EntryType::Directory => libc::S_IFDIR, - tar::EntryType::Symlink => 0, - o => panic!("Unexpected entry type {:?}", o), - }; - expected_mode |= fmtbits; - } assert_eq!( header_mode, expected_mode, - "h={header_mode:o} e={expected_mode:o} fmtver: {} type: {:?} path: {}", - format_version, + "h={header_mode:o} e={expected_mode:o} type: {:?} path: {}", header.entry_type(), entry_path ); @@ -337,7 +324,7 @@ fn validate_tar_v1_metadata(src: &mut tar::Entries) -> Resu .into_iter() .map(Into::into); - validate_tar_expected(1, src, prelude)?; + validate_tar_expected(src, common_tar_structure().chain(prelude))?; Ok(()) } @@ -345,47 +332,8 @@ fn validate_tar_v1_metadata(src: &mut tar::Entries) -> Resu /// Validate basic structure of the tar export. #[test] fn test_tar_export_structure() -> Result<()> { - use tar::EntryType::{Directory, Regular}; - - let mut fixture = Fixture::new_v1()?; - - if cfg!(feature = "compat") { - let src_tar = fixture.export_tar()?; - let src_tar = std::io::BufReader::new(fixture.dir.open(src_tar)?); - let mut src_tar = tar::Archive::new(src_tar); - let mut entries = src_tar.entries()?; - // The first entry should be the root directory. - let first = entries.next().unwrap()?; - let firstpath = first.path()?; - assert_eq!(firstpath.to_str().unwrap(), "./"); - assert_eq!(first.header().mode()?, libc::S_IFDIR | 0o755); - let next = entries.next().unwrap().unwrap(); - assert_eq!(next.path().unwrap().as_os_str(), "sysroot"); - - let v0_prelude = [ - ("sysroot/config", Regular, 0o644), - ("sysroot/ostree/repo", Directory, 0o755), - ("sysroot/ostree/repo/extensions", Directory, 0o755), - ] - .into_iter() - .map(Into::into); - - // Validate format version 0 - let expected = v0_prelude.chain(common_tar_structure()) - .chain([ - ("sysroot/ostree/repo/xattrs", Directory, 0o755), - ("sysroot/ostree/repo/xattrs/d67db507c5a6e7bfd078f0f3ded0a5669479a902e812931fc65c6f5e01831ef5", Regular, 0o644), - ("usr", Directory, 0o755), - ].into_iter().map(Into::into)); - validate_tar_expected( - fixture.format_version, - &mut entries, - expected.chain(common_tar_contents_all()), - )?; - } + let fixture = Fixture::new_v1()?; - // Validate format version 1 - fixture.format_version = 1; let src_tar = fixture.export_tar()?; let mut src_tar = fixture .dir @@ -394,11 +342,7 @@ fn test_tar_export_structure() -> Result<()> { .map(tar::Archive::new)?; let mut src_tar = src_tar.entries()?; validate_tar_v1_metadata(&mut src_tar).unwrap(); - validate_tar_expected( - fixture.format_version, - &mut src_tar, - common_tar_contents_all(), - )?; + validate_tar_expected(&mut src_tar, common_tar_contents_all())?; Ok(()) } @@ -706,17 +650,11 @@ fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Resu .into_iter() .map(Into::into); - validate_tar_expected(1, &mut pkgdb_blob.entries()?, pkgdb)?; + validate_tar_expected(&mut pkgdb_blob.entries()?, pkgdb)?; Ok(()) } -#[tokio::test] -#[cfg(feature = "compat")] -async fn test_container_chunked_v0() -> Result<()> { - impl_test_container_chunked(ExportLayout::V0).await -} - #[tokio::test] async fn test_container_chunked_v1() -> Result<()> { impl_test_container_chunked(ExportLayout::V1).await @@ -965,17 +903,6 @@ async fn oci_clone(src: impl AsRef, dest: impl AsRef) -> Res Ok(()) } -#[tokio::test] -#[cfg(feature = "compat")] -async fn test_container_import_export_v0() { - impl_test_container_import_export(ExportLayout::V0, false) - .await - .unwrap(); - impl_test_container_import_export(ExportLayout::V0, true) - .await - .unwrap(); -} - #[tokio::test] async fn test_container_import_export_v1() { impl_test_container_import_export(ExportLayout::V1, false) From 48dfc0b74dc12bd11b0d697ba5302a6188ad795e Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 17 Nov 2022 14:26:26 -0500 Subject: [PATCH 499/775] Add an isolation module, use it for pulls by default This effectively lowers into this project code from https://github.com/coreos/rpm-ostree/pull/3937/commits/d661e8f974f8d2550a865c2866476160e333ec72 We want to do this by default; we use the `nobody` user here. --- lib/src/container/mod.rs | 34 +++++++++++++++++++++++++++++ lib/src/integrationtest.rs | 6 +++++- lib/src/isolation.rs | 44 ++++++++++++++++++++++++++++++++++++++ lib/src/lib.rs | 2 ++ 4 files changed, 85 insertions(+), 1 deletion(-) create mode 100644 lib/src/isolation.rs diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 849aabeae..f9ae004d9 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -26,6 +26,7 @@ //! for this is [planned but not implemented](https://github.com/ostreedev/ostree-rs-ext/issues/12). use anyhow::anyhow; + use std::borrow::Cow; use std::ops::Deref; use std::str::FromStr; @@ -239,8 +240,23 @@ impl std::fmt::Display for OstreeImageReference { /// Apply default configuration for container image pulls to an existing configuration. /// For example, if `authfile` is not set, and `auth_anonymous` is `false`, and a global configuration file exists, it will be used. +/// +/// If there is no configured explicit subprocess for skopeo, and the process is running +/// as root, then a default isolation of running the process via `nobody` will be applied. pub fn merge_default_container_proxy_opts( config: &mut containers_image_proxy::ImageProxyConfig, +) -> Result<()> { + let user = cap_std_ext::rustix::process::getuid() + .is_root() + .then(|| isolation::DEFAULT_UNPRIVILEGED_USER); + merge_default_container_proxy_opts_with_isolation(config, user) +} + +/// Apply default configuration for container image pulls, with optional support +/// for isolation as an unprivileged user. +pub fn merge_default_container_proxy_opts_with_isolation( + config: &mut containers_image_proxy::ImageProxyConfig, + isolation_user: Option<&str>, ) -> Result<()> { if !config.auth_anonymous && config.authfile.is_none() { config.authfile = crate::globals::get_global_authfile_path()?; @@ -251,6 +267,22 @@ pub fn merge_default_container_proxy_opts( config.auth_anonymous = true; } } + // By default, drop privileges, unless the higher level code + // has configured the skopeo command explicitly. + let isolation_user = config + .skopeo_cmd + .is_none() + .then(|| isolation_user.as_ref()) + .flatten(); + if let Some(user) = isolation_user { + // Read the default authfile if it exists and pass it via file descriptor + // which will ensure it's readable when we drop privileges. + if let Some(authfile) = config.authfile.take() { + config.auth_data = Some(std::fs::File::open(&authfile)?); + } + let cmd = crate::isolation::unprivileged_subprocess("skopeo", user); + config.skopeo_cmd = Some(cmd); + } Ok(()) } @@ -277,6 +309,8 @@ pub mod store; mod update_detachedmeta; pub use update_detachedmeta::*; +use crate::isolation; + #[cfg(test)] mod tests { use super::*; diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 8affb3d3f..bd98b4cf9 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -112,7 +112,11 @@ fn test_proxy_auth() -> Result<()> { std::fs::write(authpath, "{}")?; let mut c = ImageProxyConfig::default(); merge(&mut c)?; - assert_eq!(c.authfile.unwrap().as_path(), authpath,); + if cap_std_ext::rustix::process::getuid().is_root() { + assert!(c.auth_data.is_some()); + } else { + assert_eq!(c.authfile.unwrap().as_path(), authpath,); + } let c = ImageProxyConfig { auth_anonymous: true, ..Default::default() diff --git a/lib/src/isolation.rs b/lib/src/isolation.rs new file mode 100644 index 000000000..0d267a519 --- /dev/null +++ b/lib/src/isolation.rs @@ -0,0 +1,44 @@ +use std::process::Command; + +use once_cell::sync::Lazy; + +pub(crate) const DEFAULT_UNPRIVILEGED_USER: &str = "nobody"; + +/// Checks if the current process is (apparently at least) +/// running under systemd. We use this in various places +/// to e.g. log to the journal instead of printing to stdout. +pub(crate) fn running_in_systemd() -> bool { + static RUNNING_IN_SYSTEMD: Lazy = Lazy::new(|| { + // See https://www.freedesktop.org/software/systemd/man/systemd.exec.html#%24INVOCATION_ID + std::env::var_os("INVOCATION_ID") + .filter(|s| !s.is_empty()) + .is_some() + }); + + *RUNNING_IN_SYSTEMD +} + +/// Return a prepared subprocess configuration that will run as an unprivileged user if possible. +/// +/// This currently only drops privileges when run under systemd with DynamicUser. +pub(crate) fn unprivileged_subprocess(binary: &str, user: &str) -> Command { + // TODO: if we detect we're running in a container as uid 0, perhaps at least switch to the + // "bin" user if we can? + if !running_in_systemd() { + return Command::new(binary); + } + let mut cmd = Command::new("setpriv"); + cmd.args(&[ + "--no-new-privs", + "--init-groups", + "--reuid", + user, + "--bounding-set", + "-all", + "--pdeathsig", + "SIGTERM", + "--", + binary, + ]); + cmd +} diff --git a/lib/src/lib.rs b/lib/src/lib.rs index b08ae4fe0..83f381579 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -28,6 +28,8 @@ type Result = anyhow::Result; // Import global functions. mod globals; +mod isolation; + pub mod bootabletree; pub mod cli; pub mod container; From 34fbaee5a94d259304ff52010484eae2929231bf Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 29 Nov 2022 14:15:31 -0500 Subject: [PATCH 500/775] sysroot: Make underlying value `pub` Since we expose it via `Deref` anyways. Prep for usage in bootc. --- lib/src/sysroot.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/src/sysroot.rs b/lib/src/sysroot.rs index d343d754b..5516bac82 100644 --- a/lib/src/sysroot.rs +++ b/lib/src/sysroot.rs @@ -7,7 +7,8 @@ use anyhow::Result; /// A locked system root. #[derive(Debug)] pub struct SysrootLock { - sysroot: ostree::Sysroot, + /// The underlying sysroot value. + pub sysroot: ostree::Sysroot, } impl Drop for SysrootLock { From 8935b5fa89f278e3a9e6b72b97ddfeb2f2fc7070 Mon Sep 17 00:00:00 2001 From: RishabhSaini Date: Tue, 25 Oct 2022 15:34:33 -0400 Subject: [PATCH 501/775] Add support for pulling image from containers-storage Use get_layer_info API from containers/image-proxy, to allow pulling uncompressed blob via diffID --- ci/priv-integration.sh | 4 ++-- lib/Cargo.toml | 2 +- lib/src/container/store.rs | 9 +++++++- lib/src/container/unencapsulate.rs | 35 +++++++++++++++++++++++++----- 4 files changed, 40 insertions(+), 10 deletions(-) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index a991b3a39..f2a5cb61b 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -66,7 +66,7 @@ if ostree-ext-cli container image pull ${sysroot}/ostree/repo ostree-unverified- echo "unexpectedly pulled from containers storage?" exit 1 fi -grep "file does not exist" err.txt -echo "ok pulled from containers storage" +grep "skopeo too old to pull from containers-storage" err.txt +echo "ok pulled from containers storage failed" echo ok privileged integration diff --git a/lib/Cargo.toml b/lib/Cargo.toml index ef889d78f..66a088dd8 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -11,7 +11,7 @@ rust-version = "1.63.0" [dependencies] anyhow = "1.0" -containers-image-proxy = "0.5.1" +containers-image-proxy = "0.5.2" async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" camino = "1.0.4" diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 9d035aa96..2a96e8b1a 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -583,7 +583,7 @@ impl ImageImporter { None } }; - + let des_layers = self.proxy.get_layer_info(&self.proxy_img).await?; for layer in import.ostree_layers.iter_mut() { if layer.commit.is_some() { continue; @@ -598,6 +598,8 @@ impl ImageImporter { &import.manifest, &layer.layer, self.layer_byte_progress.as_ref(), + des_layers.as_ref(), + self.imgref.imgref.transport, ) .await?; let repo = self.repo.clone(); @@ -641,6 +643,8 @@ impl ImageImporter { &import.manifest, &import.ostree_commit_layer.layer, self.layer_byte_progress.as_ref(), + des_layers.as_ref(), + self.imgref.imgref.transport, ) .await?; let repo = self.repo.clone(); @@ -712,6 +716,7 @@ impl ImageImporter { // First download all layers for the base image (if necessary) - we need the SELinux policy // there to label all following layers. self.unencapsulate_base(&mut import, true).await?; + let des_layers = self.proxy.get_layer_info(&self.proxy_img).await?; let mut proxy = self.proxy; let target_imgref = self.target_imgref.as_ref().unwrap_or(&self.imgref); let base_commit = import.ostree_commit_layer.commit.clone().unwrap(); @@ -735,6 +740,8 @@ impl ImageImporter { &import.manifest, &layer.layer, self.layer_byte_progress.as_ref(), + des_layers.as_ref(), + self.imgref.imgref.transport, ) .await?; // An important aspect of this is that we SELinux label the derived layers using diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index a36b796ca..e4f37334a 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -205,6 +205,8 @@ pub(crate) async fn fetch_layer_decompress<'a>( manifest: &oci_image::ImageManifest, layer: &'a oci_image::Descriptor, progress: Option<&'a Sender>>, + layer_info: Option<&Vec>, + transport_src: Transport, ) -> Result<( Box, impl Future> + 'a, @@ -212,10 +214,31 @@ pub(crate) async fn fetch_layer_decompress<'a>( use futures_util::future::Either; tracing::debug!("fetching {}", layer.digest()); let layer_index = manifest.layers().iter().position(|x| x == layer).unwrap(); + let (blob, driver, size); + let media_type: &oci_image::MediaType; + match transport_src { + Transport::ContainerStorage => { + let layer_info = layer_info + .ok_or_else(|| anyhow!("skopeo too old to pull from containers-storage"))?; + let n_layers = layer_info.len(); + let layer_blob = layer_info.get(layer_index).ok_or_else(|| { + anyhow!("blobid position {layer_index} exceeds diffid count {n_layers}") + })?; + size = layer_blob.size; + media_type = &layer_blob.media_type; + (blob, driver) = proxy + .get_blob(img, layer_blob.digest.as_str(), size as u64) + .await?; + } + _ => { + size = layer.size(); + media_type = layer.media_type(); + (blob, driver) = proxy + .get_blob(img, layer.digest().as_str(), size as u64) + .await?; + } + }; - let (blob, driver) = proxy - .get_blob(img, layer.digest().as_str(), layer.size() as u64) - .await?; if let Some(progress) = progress { let (readprogress, mut readwatch) = ProgressReader::new(blob); let readprogress = tokio::io::BufReader::new(readprogress); @@ -225,16 +248,16 @@ pub(crate) async fn fetch_layer_decompress<'a>( let status = LayerProgress { layer_index, fetched: *fetched, - total: layer.size() as u64, + total: size as u64, }; progress.send_replace(Some(status)); } }; - let reader = new_async_decompressor(layer.media_type(), readprogress)?; + let reader = new_async_decompressor(media_type, readprogress)?; let driver = futures_util::future::join(readproxy, driver).map(|r| r.1); Ok((reader, Either::Left(driver))) } else { - let blob = new_async_decompressor(layer.media_type(), blob)?; + let blob = new_async_decompressor(media_type, blob)?; Ok((blob, Either::Right(driver))) } } From da7ed375037859dd1ff4c5aa034dcfd30fe4faf8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 30 Nov 2022 16:48:55 -0500 Subject: [PATCH 502/775] impl Display for SignatureSource This is the cleaner way to do things, and I want this in bootc. --- lib/src/container/mod.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index f9ae004d9..ba3fbc53c 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -224,20 +224,24 @@ impl std::fmt::Display for ImageReference { } } -impl std::fmt::Display for OstreeImageReference { +impl std::fmt::Display for SignatureSource { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match &self.sigverify { - SignatureSource::OstreeRemote(r) => { - write!(f, "ostree-remote-image:{}:{}", r, self.imgref) - } - SignatureSource::ContainerPolicy => write!(f, "ostree-image-signed:{}", self.imgref), + match self { + SignatureSource::OstreeRemote(r) => write!(f, "ostree-remote-image:{r}"), + SignatureSource::ContainerPolicy => write!(f, "ostree-image-signed"), SignatureSource::ContainerPolicyAllowInsecure => { - write!(f, "ostree-unverified-image:{}", self.imgref) + write!(f, "ostree-unverified-image") } } } } +impl std::fmt::Display for OstreeImageReference { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}:{}", self.sigverify, self.imgref) + } +} + /// Apply default configuration for container image pulls to an existing configuration. /// For example, if `authfile` is not set, and `auth_anonymous` is `false`, and a global configuration file exists, it will be used. /// From 3e7f8c8a69846e2d363b13afc39c28e9a04cdb81 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 1 Dec 2022 09:15:33 -0500 Subject: [PATCH 503/775] store: Tweak status message for layers The formatting was confusing before because one could associate the number both before and after "layers". --- ci/priv-integration.sh | 2 +- lib/src/container/store.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index a991b3a39..07a0fc91d 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -58,7 +58,7 @@ echo "ok old image failed to parse" # Verify we have systemd journal messages nsenter -m -t 1 journalctl _COMM=ostree-ext-cli > logs.txt -grep 'layers stored: ' logs.txt +grep 'layers already present: ' logs.txt podman pull ${image} ostree --repo="${sysroot}/ostree/repo" init --mode=bare-user diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index bc2ec9891..e2d76aef0 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -250,7 +250,7 @@ impl PreparedImport { }); (to_fetch > 0).then(|| { let size = crate::glib::format_size(to_fetch_size); - format!("layers stored: {stored} needed: {to_fetch} ({size})") + format!("layers already present: {stored}; layers needed: {to_fetch} ({size})") }) } } From 0b98cf6d66db0d422b234f342ea43f64e07c3b2f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 1 Dec 2022 09:28:26 -0500 Subject: [PATCH 504/775] README.md: Switch to mermaid for arch visualization It looks nicer and is more maintainable. --- README.md | 28 ++++------------------------ 1 file changed, 4 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index d0ade6b12..4a3f6c9be 100644 --- a/README.md +++ b/README.md @@ -23,30 +23,10 @@ High level features (more on this below): - Generalized tar import/export - APIs to diff ostree commits -``` -┌─────────────────┐ -│ │ -│ ostree-rs-ext ├────────────┐ -│ │ │ -└────────┬────────┘ │ - │ │ -┌────────▼────────┐ ┌────────▼─────────┐ -│ │ │ │ -│ ostree-rs │ │ imageproxy-rs │ -│ │ │ │ -└────────┬────────┘ └────────┬─────────┘ - │ │ -┌────────▼────────┐ ┌────────▼─────────┐ -│ │ │ │ -│ ostree │ │ skopeo │ -│ │ │ │ -└─────────────────┘ └────────┬─────────┘ - │ - ┌────────▼─────────┐ - │ │ - │ containers/image │ - │ │ - └──────────────────┘ +```mermaid +flowchart TD + ostree-rs-ext --- ostree-rs --- ostree + ostree-rs-ext --- containers-image-proxy-rs --- skopeo --- containers/image ``` For more information on the container stack, see below. From 90056193b1f78f55d12e8d21f948e5dbb0298d23 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 1 Dec 2022 12:49:45 -0500 Subject: [PATCH 505/775] README.md: Describe chunking briefly We need a ton more docs here, but this is a start. --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 4a3f6c9be..10525d14c 100644 --- a/README.md +++ b/README.md @@ -109,6 +109,13 @@ $ podman run --rm -ti --entrypoint bash quay.io/exampleos/exampleos:stable Running the container directly for e.g. CI testing is one use case. But more importantly, this container image can be pushed to any registry, and used as part of ostree-based operating system release engineering. +However, this is a very simplistic model - it currently generates a container image with a single layer, which means +every change requires redownloading that entire layer. As of recently, the underlying APIs for generating +container images support "chunked" images. But this requires coding for a specific package/build system. + +A good reference code base for generating "chunked" images is [rpm-ostree compose container-encapsulate](https://coreos.github.io/rpm-ostree/container/#converting-ostree-commits-to-new-base-images). This is used to generate the current [Fedora CoreOS](https://quay.io/repository/fedora/fedora-coreos?tab=tags&tag=latest) +images. + ### Unencapsulate an ostree-container directly A primary goal of this effort is to make it fully native to an ostree-based operating system to pull a container image directly too. From 00d70deca5c65e2e04bdb7bca689b7f8e48b2145 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 1 Dec 2022 14:09:19 -0500 Subject: [PATCH 506/775] README.md: Link to bootc It's real now! --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 4a3f6c9be..9221db1f2 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,8 @@ to let us know. At the moment, the following projects are known to use this crate: -- https://github.com/coreos/rpm-ostree/ +- https://github.com/containers/bootc +- https://github.com/coreos/rpm-ostree The intention of this crate is to be where new high level ostree-related features land. However, at this time it is kept separate from the core C library, which From 4ccb3fbd93b4e920f30e4f8a0b9982a8b0177008 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 5 Dec 2022 11:11:24 -0500 Subject: [PATCH 507/775] store: Print which ref is missing To aid debugging https://github.com/coreos/rpm-ostree/issues/4185 --- lib/src/container/store.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 202f18181..ce7a2d04f 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -900,9 +900,10 @@ pub fn query_image_commit(repo: &ostree::Repo, commit: &str) -> Result 0; let state = Box::new(LayeredImageState { From 8d61df981cf9dfdd36340ac58537b78781c4ff42 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 5 Dec 2022 11:25:41 -0500 Subject: [PATCH 508/775] store: Add some `tracing::` to GC paths To aid future debugging. We should probably add some of this into the systemd journal always too, but that can be done in the future. --- lib/src/container/store.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 202f18181..0894f5937 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -1000,6 +1000,7 @@ fn list_container_deployment_manifests( .lookup::(META_MANIFEST_DIGEST)? .is_some() { + tracing::trace!("Commit {commit} is a container image"); let manifest = manifest_data_from_commitmeta(commit_meta)?.0; r.push(manifest); } @@ -1030,12 +1031,14 @@ fn gc_image_layers_impl( }) .chain(deployment_commits.into_iter().map(Ok)) .collect::>>()?; + tracing::debug!("Images found: {}", all_manifests.len()); let mut referenced_layers = BTreeSet::new(); for m in all_manifests.iter() { for layer in m.layers() { referenced_layers.insert(layer.digest().as_str()); } } + tracing::debug!("Referenced layers: {}", referenced_layers.len()); let found_layers = repo .list_refs_ext( Some(LAYER_PREFIX), @@ -1044,6 +1047,7 @@ fn gc_image_layers_impl( )? .into_iter() .map(|v| v.0); + tracing::debug!("Found layers: {}", found_layers.len()); let mut pruned = 0u32; for layer_ref in found_layers { let layer_digest = refescape::unprefix_unescape_ref(LAYER_PREFIX, &layer_ref)?; @@ -1051,6 +1055,7 @@ fn gc_image_layers_impl( continue; } pruned += 1; + tracing::debug!("Pruning: {}", layer_ref.as_str()); repo.set_ref_immediate(None, layer_ref.as_str(), None, cancellable)?; } From c4eb7c2840043247ba55edfb55acd64d542c4d72 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 5 Dec 2022 13:13:12 -0500 Subject: [PATCH 509/775] store: Add rpm-ostree base refs into GC set Closes: https://github.com/coreos/rpm-ostree/issues/4185 Basically, with package layering or client side commits, rpm-ostree generates synthetic refs under `rpmostree/base` which point to the base commits. But this interacts badly with our image pruning logic. I'm happy about the dependency inversion here, but this is just a band-aid until we have time to think about a more proper fix. Basically there's two dynamic "layers" on top of core ostree going on here - rpm-ostree and container images and we need to figure out a general fix. --- lib/src/container/store.rs | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 9ba02d310..3d7719543 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -41,6 +41,11 @@ pub const META_FILTERED: &str = "ostree.tar-filtered"; /// The type used to store content filtering information with `META_FILTERED`. pub type MetaFilteredData = HashMap>; +/// The ref prefixes which point to ostree deployments. (TODO: Add an official API for this) +const OSTREE_BASE_DEPLOYMENT_REFS: &[&str] = &["ostree/0", "ostree/1"]; +/// A layering violation we'll carry for a bit to band-aid over https://github.com/coreos/rpm-ostree/issues/4185 +const RPMOSTREE_BASE_REFS: &[&str] = &["rpmostree/base"]; + /// Convert e.g. sha256:12345... into `/ostree/container/blob/sha256_2B12345...`. fn ref_for_blob_digest(d: &str) -> Result { refescape::prefix_escape_for_ref(LAYER_PREFIX, d) @@ -980,19 +985,26 @@ fn list_container_deployment_manifests( repo: &ostree::Repo, cancellable: Option<&gio::Cancellable>, ) -> Result> { - let commits = repo - .list_refs_ext( - Some("ostree/0"), - ostree::RepoListRefsExtFlags::empty(), - cancellable, - )? - .into_iter() - .chain(repo.list_refs_ext( - Some("ostree/1"), - ostree::RepoListRefsExtFlags::empty(), - cancellable, - )?) - .map(|v| v.1); + // Gather all refs which start with ostree/0/ or ostree/1/ or rpmostree/base/ + // and create a set of the commits which they reference. + let commits = OSTREE_BASE_DEPLOYMENT_REFS + .iter() + .chain(RPMOSTREE_BASE_REFS) + .try_fold( + std::collections::HashSet::new(), + |mut acc, &p| -> Result<_> { + let refs = repo.list_refs_ext( + Some(p), + ostree::RepoListRefsExtFlags::empty(), + cancellable, + )?; + for (_, v) in refs { + acc.insert(v); + } + Ok(acc) + }, + )?; + // Loop over the commits - if they refer to a container image, add that to our return value. let mut r = Vec::new(); for commit in commits { let commit_obj = repo.load_commit(&commit)?.0; From 6a6e0dc6e8d56605f793228094021a4976085ea0 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 6 Dec 2022 10:04:56 -0500 Subject: [PATCH 510/775] cli: Make more output/progress functions `pub` For reuse in bootc. (Until it becomes a daemon, then this all needs to be redone again) --- lib/src/cli.rs | 15 +++++++++------ lib/src/container/deploy.rs | 2 +- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 9e36f7987..91df29b32 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -433,7 +433,8 @@ pub fn layer_progress_format(p: &ImportProgress) -> String { } } -async fn handle_layer_progress_print( +/// Write container fetch progress to standard output. +pub async fn handle_layer_progress_print( mut layers: Receiver, mut layer_bytes: tokio::sync::watch::Receiver>, ) { @@ -477,15 +478,17 @@ async fn handle_layer_progress_print( } } -fn print_layer_status(prep: &PreparedImport) { +/// Write the status of layers to download. +pub fn print_layer_status(prep: &PreparedImport) { if let Some(status) = prep.format_layer_status() { println!("{status}"); } } -pub(crate) fn print_deprecated_warning(msg: &str) { +/// Write a deprecation notice, and sleep for 3 seconds. +pub async fn print_deprecated_warning(msg: &str) { eprintln!("warning: {msg}"); - std::thread::sleep(std::time::Duration::from_secs(3)); + tokio::time::sleep(std::time::Duration::from_secs(3)).await } /// Import a container image with an encapsulated ostree commit. @@ -513,7 +516,7 @@ async fn container_import( } let import = import?; if let Some(warning) = import.deprecated_warning.as_deref() { - print_deprecated_warning(warning); + print_deprecated_warning(warning).await; } if let Some(write_ref) = write_ref { repo.set_ref_immediate( @@ -585,7 +588,7 @@ async fn container_store( PrepareResult::Ready(r) => r, }; if let Some(warning) = prep.deprecated_warning() { - print_deprecated_warning(warning); + print_deprecated_warning(warning).await; } print_layer_status(&prep); let printer = (!quiet).then(|| { diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 83dcc0148..65d7c57d1 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -62,7 +62,7 @@ pub async fn deploy( PrepareResult::AlreadyPresent(r) => r, PrepareResult::Ready(prep) => { if let Some(warning) = prep.deprecated_warning() { - crate::cli::print_deprecated_warning(warning); + crate::cli::print_deprecated_warning(warning).await; } imp.import(prep).await? From a04576a36498da0bed93ac127a2e587baa065677 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 7 Dec 2022 14:40:27 -0500 Subject: [PATCH 511/775] Add a SELinux module with helper to verify install_t This is a hack to help detect the situation in https://github.com/containers/bootc/issues/24 Ultimately, this whole issue makes it extremely hard to expose a *library* interface to our users because the requirement is infectious - they also need to be `install_t`. Anyways for now, this new module will help at least detect the situation. --- lib/Cargo.toml | 1 + lib/src/lib.rs | 2 ++ lib/src/selinux.rs | 40 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 43 insertions(+) create mode 100644 lib/src/selinux.rs diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 1257a5cf9..63bf15148 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -26,6 +26,7 @@ fn-error-context = "0.2.0" futures-util = "0.3.13" gvariant = "0.4.0" hex = "0.4.3" +io-lifetimes = "1.0" indicatif = "0.17.0" once_cell = "1.9" libc = "0.2.92" diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 83f381579..f205525e7 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -43,6 +43,8 @@ pub mod sysroot; pub mod tar; pub mod tokio_util; +pub mod selinux; + pub mod chunking; pub mod commit; pub mod objectsource; diff --git a/lib/src/selinux.rs b/lib/src/selinux.rs new file mode 100644 index 000000000..9467651eb --- /dev/null +++ b/lib/src/selinux.rs @@ -0,0 +1,40 @@ +//! SELinux-related helper APIs. + +use anyhow::Result; +use cap_std_ext::rustix; +use fn_error_context::context; +use std::path::Path; + +/// The well-known selinuxfs mount point +const SELINUX_MNT: &str = "/sys/fs/selinux"; +/// Hardcoded value for SELinux domain capable of setting unknown contexts. +const INSTALL_T: &str = "install_t"; + +/// Query for whether or not SELinux is enabled. +pub fn is_selinux_enabled() -> bool { + Path::new(SELINUX_MNT).join("access").exists() +} + +/// Return an error If the current process is not running in the `install_t` domain. +#[context("Verifying self is install_t SELinux domain")] +pub fn verify_install_domain() -> Result<()> { + // If it doesn't look like SELinux is enabled, then nothing to do. + if !is_selinux_enabled() { + return Ok(()); + } + + // If we're not root, there's no need to try to warn because we can only + // do read-only operations anyways. + if !rustix::process::getuid().is_root() { + return Ok(()); + } + + let self_domain = std::fs::read_to_string("/proc/self/attr/current")?; + let is_install_t = self_domain.split(':').any(|x| x == INSTALL_T); + if !is_install_t { + anyhow::bail!( + "Detected SELinux enabled system, but the executing binary is not labeled install_exec_t" + ); + } + Ok(()) +} From cf0056c8e338448680124fd2bd522291d1c0efbe Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 7 Dec 2022 16:25:16 -0500 Subject: [PATCH 512/775] Release 0.10.1 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 63bf15148..dc5074727 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.10.0" +version = "0.10.1" rust-version = "1.63.0" [dependencies] From 5c6b928dc3f3244ba4d67e1e2e8b91f0aaea5ea3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 11 Dec 2022 14:08:37 -0500 Subject: [PATCH 513/775] store: Make failure to query base image non-fatal This is another necessary fix for https://github.com/coreos/rpm-ostree/pull/4204 --- lib/src/container/store.rs | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 3d7719543..02c806aa4 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -505,7 +505,7 @@ impl ImageImporter { // Query for previous stored state let (previous_manifest_digest, previous_imageid) = - if let Some(previous_state) = query_image(&self.repo, &self.imgref)? { + if let Some(previous_state) = try_query_image_ref(&self.repo, &self.imgref.imgref)? { // If the manifest digests match, we're done. if previous_state.manifest_digest == manifest_digest { return Ok(PrepareResult::AlreadyPresent(previous_state)); @@ -880,6 +880,26 @@ pub fn list_images(repo: &ostree::Repo) -> Result> { .collect() } +/// Attempt to query metadata for a pulled image; if it is corrupted, +/// the error is printed to stderr and None is returned. +fn try_query_image_ref( + repo: &ostree::Repo, + imgref: &ImageReference, +) -> Result>> { + let ostree_ref = &ref_for_image(imgref)?; + if let Some(merge_rev) = repo.resolve_rev(ostree_ref, true)? { + match query_image_commit(repo, merge_rev.as_str()) { + Ok(r) => Ok(Some(r)), + Err(e) => { + eprintln!("error: failed to query image commit: {e}"); + Ok(None) + } + } + } else { + Ok(None) + } +} + /// Query metadata for a pulled image. pub fn query_image_ref( repo: &ostree::Repo, From 36426459a22d3d25f478e77547b8c9181d822b1f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 11 Dec 2022 14:16:05 -0500 Subject: [PATCH 514/775] Release 0.10.2 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index dc5074727..0dd519f55 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.10.1" +version = "0.10.2" rust-version = "1.63.0" [dependencies] From d655a166e4be37ff60b949bd24616f173df0cd67 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 11 Dec 2022 16:04:09 -0500 Subject: [PATCH 515/775] lib: Switch to olpc-cjson This is maintained as part of https://github.com/awslabs/tough and is used by several crates, including the pure Rust OCI crate https://lib.rs/crates/oci-distribution The immediate motivation is dropping our duplicate `itoa` dep. --- lib/Cargo.toml | 2 +- lib/src/container/ocidir.rs | 19 ++++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 0dd519f55..96e3cbd41 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -16,7 +16,7 @@ async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" camino = "1.0.4" chrono = "0.4.19" -cjson = "0.1.1" +olpc-cjson = "0.1.1" clap = { version= "3.2", features = ["derive"] } clap_mangen = { version = "0.1", optional = true } cap-std-ext = "1.0" diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index 2dd5587cc..7ba759d66 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -13,7 +13,9 @@ use flate2::write::GzEncoder; use fn_error_context::context; use oci_image::MediaType; use oci_spec::image::{self as oci_image, Descriptor}; +use olpc_cjson::CanonicalFormatter; use openssl::hash::{Hasher, MessageDigest}; +use serde::Serialize; use std::collections::HashMap; use std::fmt::Debug; use std::fs::File; @@ -114,7 +116,8 @@ pub fn write_json_blob( media_type: oci_image::MediaType, ) -> Result { let mut w = BlobWriter::new(ocidir)?; - cjson::to_writer(&mut w, v).map_err(|e| anyhow!("{:?}", e))?; + let mut ser = serde_json::Serializer::with_formatter(&mut w, CanonicalFormatter::new()); + v.serialize(&mut ser).context("Failed to serialize")?; let blob = w.complete()?; Ok(blob.descriptor().media_type(media_type)) } @@ -310,8 +313,10 @@ impl OciDir { }; self.dir - .atomic_replace_with("index.json", |w| -> Result<()> { - cjson::to_writer(w, &index).map_err(|e| anyhow::anyhow!("{:?}", e))?; + .atomic_replace_with("index.json", |mut w| -> Result<()> { + let mut ser = + serde_json::Serializer::with_formatter(&mut w, CanonicalFormatter::new()); + index.serialize(&mut ser).context("Failed to serialize")?; Ok(()) })?; Ok(manifest) @@ -334,8 +339,12 @@ impl OciDir { .build() .unwrap(); self.dir - .atomic_replace_with("index.json", |w| -> Result<()> { - cjson::to_writer(w, &index_data).map_err(|e| anyhow::anyhow!("{:?}", e))?; + .atomic_replace_with("index.json", |mut w| -> Result<()> { + let mut ser = + serde_json::Serializer::with_formatter(&mut w, CanonicalFormatter::new()); + index_data + .serialize(&mut ser) + .context("Failed to serialize")?; Ok(()) })?; Ok(()) From 9165901d1e5cb598d45febbb8a28fb9d6d2e9318 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 13 Dec 2022 09:00:24 -0500 Subject: [PATCH 516/775] container/store: Close image in happy path Closes: https://github.com/coreos/rpm-ostree/issues/4213 This fixes leaking the temporary directory but only in the "happy path" i.e. non-error paths for now. Handling this better would require some API changes to ensure we can `impl Drop` on the image. --- lib/src/container/store.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 02c806aa4..02c180e17 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -681,6 +681,9 @@ impl ImageImporter { } let deprecated_warning = prep.deprecated_warning().map(ToOwned::to_owned); self.unencapsulate_base(&mut prep, false).await?; + // TODO change the imageproxy API to ensure this happens automatically when + // the image reference is dropped + self.proxy.close_image(&self.proxy_img).await?; let ostree_commit = prep.ostree_commit_layer.commit.unwrap(); let image_digest = prep.manifest_digest; Ok(Import { @@ -706,6 +709,7 @@ impl ImageImporter { self.unencapsulate_base(&mut import, true).await?; let des_layers = self.proxy.get_layer_info(&self.proxy_img).await?; let mut proxy = self.proxy; + let proxy_img = self.proxy_img; let target_imgref = self.target_imgref.as_ref().unwrap_or(&self.imgref); let base_commit = import.ostree_commit_layer.commit.clone().unwrap(); @@ -724,7 +728,7 @@ impl ImageImporter { } let (blob, driver) = super::unencapsulate::fetch_layer_decompress( &mut proxy, - &self.proxy_img, + &proxy_img, &import.manifest, &layer.layer, self.layer_byte_progress.as_ref(), @@ -755,6 +759,10 @@ impl ImageImporter { } } + // TODO change the imageproxy API to ensure this happens automatically when + // the image reference is dropped + proxy.close_image(&proxy_img).await?; + // We're done with the proxy, make sure it didn't have any errors. proxy.finalize().await?; tracing::debug!("finalized proxy"); From df760d4759955926c48f391fdbf9fd8741ae8b50 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 19 Dec 2022 16:31:04 -0500 Subject: [PATCH 517/775] container: Fix idempotency of config defaults merging In rpm-ostree we provide a custom config to override the isolation user, calling the merge function. But the logic for idempotency of auth handling was buggy - it needed to consider having `auth_data` already specified. --- lib/src/container/mod.rs | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index ba3fbc53c..e0cc68304 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -262,7 +262,9 @@ pub fn merge_default_container_proxy_opts_with_isolation( config: &mut containers_image_proxy::ImageProxyConfig, isolation_user: Option<&str>, ) -> Result<()> { - if !config.auth_anonymous && config.authfile.is_none() { + let auth_specified = + config.auth_anonymous || config.authfile.is_some() || config.auth_data.is_some(); + if !auth_specified { config.authfile = crate::globals::get_global_authfile_path()?; // If there's no authfile, then force on anonymous pulls to ensure // that the container stack doesn't try to find it in the standard @@ -317,6 +319,10 @@ use crate::isolation; #[cfg(test)] mod tests { + use std::process::Command; + + use containers_image_proxy::ImageProxyConfig; + use super::*; const INVALID_IRS: &[&str] = &["", "foo://", "docker:blah", "registry:", "foo:bar"]; @@ -397,4 +403,28 @@ mod tests { .unwrap(); assert_eq!(&ir_shorthand, &ir); } + + #[test] + fn test_merge_authopts() { + // Verify idempotence of authentication processing + let mut c = ImageProxyConfig::default(); + let authf = std::fs::File::open("/dev/null").unwrap(); + c.auth_data = Some(authf); + super::merge_default_container_proxy_opts_with_isolation(&mut c, None).unwrap(); + assert!(!c.auth_anonymous); + assert!(c.authfile.is_none()); + assert!(c.auth_data.is_some()); + assert!(c.skopeo_cmd.is_none()); + super::merge_default_container_proxy_opts_with_isolation(&mut c, None).unwrap(); + assert!(!c.auth_anonymous); + assert!(c.authfile.is_none()); + assert!(c.auth_data.is_some()); + assert!(c.skopeo_cmd.is_none()); + + // Verify interaction with explicit isolation + let mut c = ImageProxyConfig::default(); + c.skopeo_cmd = Some(Command::new("skopeo")); + super::merge_default_container_proxy_opts_with_isolation(&mut c, Some("foo")).unwrap(); + assert_eq!(c.skopeo_cmd.unwrap().get_program(), "skopeo"); + } } From 177a9138e02637ca68800e47992ec07568b03cde Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 19 Dec 2022 17:00:00 -0500 Subject: [PATCH 518/775] Release 0.10.3 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 96e3cbd41..28f516137 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.10.2" +version = "0.10.3" rust-version = "1.63.0" [dependencies] From a506e9bc52e101f8306e4e0cebd37d644e9f2ef6 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 3 Jan 2023 17:59:34 -0500 Subject: [PATCH 519/775] isolation: Avoid bug with `--pdeathsig SIGTERM` in Turkish locales Only in locales that use the dotless I case conversion we hit on the fact that util-linux uses `strcasecmp` which is locale-sensitive (It shouldn't be for this, because we're not accepting translated signal names. It should be using an ASCII-only string comparison) Anyways, we can work around this by omitting the leading `SIG` ourselves. --- lib/src/isolation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/isolation.rs b/lib/src/isolation.rs index 0d267a519..495af4078 100644 --- a/lib/src/isolation.rs +++ b/lib/src/isolation.rs @@ -36,7 +36,7 @@ pub(crate) fn unprivileged_subprocess(binary: &str, user: &str) -> Command { "--bounding-set", "-all", "--pdeathsig", - "SIGTERM", + "TERM", "--", binary, ]); From cffc82cb7e41729b99cbca1a555c702e51a3f272 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 11 Jan 2023 13:55:12 -0500 Subject: [PATCH 520/775] tar: Add error prefixing around commit verification In an error message we got a bare `error: No such remote ...` Let's clarify where this is coming from. --- lib/src/tar/import.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 7d7eb4f4f..9727179f8 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -670,12 +670,14 @@ impl Importer { // Now that we have both the commit and detached metadata in memory, verify that // the signatures in the detached metadata correctly sign the commit. - self.repo.signature_verify_commit_data( - remote, - &commit.data_as_bytes(), - &commitmeta.data_as_bytes(), - ostree::RepoVerifyFlags::empty(), - )?; + self.repo + .signature_verify_commit_data( + remote, + &commit.data_as_bytes(), + &commitmeta.data_as_bytes(), + ostree::RepoVerifyFlags::empty(), + ) + .context("Verifying ostree commit in tar stream")?; self.repo.mark_commit_partial(&checksum, true)?; From fcb0d48045b03e1c354198a633d89d059aa4ea1c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 11 Jan 2023 13:55:39 -0500 Subject: [PATCH 521/775] store: Add error prefixing on base import On general principle; we got a "bare errors" in the case of a missing ostree remote. --- lib/src/container/store.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 02c180e17..433dc0e0e 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -554,6 +554,7 @@ impl ImageImporter { } /// Extract the base ostree commit. + #[context("Unencapsulating base")] pub(crate) async fn unencapsulate_base( &mut self, import: &mut store::PreparedImport, @@ -643,7 +644,9 @@ impl ImageImporter { let mut importer = crate::tar::Importer::new_for_commit(&repo, remote); let blob = tokio_util::io::SyncIoBridge::new(blob); let mut archive = tar::Archive::new(blob); - importer.import_commit(&mut archive, Some(cancellable))?; + importer + .import_commit(&mut archive, Some(cancellable)) + .context("Importing commit layer")?; let commit = importer.finish_import_commit(); if write_refs { repo.transaction_set_ref(None, &target_ref, Some(commit.as_str())); From c2a9b4f0f5010e15fb8f9d688f8ed7e7fb11cc63 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 11 Jan 2023 15:38:03 -0500 Subject: [PATCH 522/775] Add even more error prefixing I looked through more of the container and tar paths, and decided to add even more error prefixing to aid future debugging. --- lib/src/container/store.rs | 10 +++++++--- lib/src/tar/import.rs | 6 ++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 433dc0e0e..643100e97 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -274,6 +274,7 @@ pub(crate) fn query_layer( }) } +#[context("Reading manifest data from commit")] fn manifest_data_from_commitmeta( commit_meta: &glib::VariantDict, ) -> Result<(oci_image::ImageManifest, String)> { @@ -332,6 +333,7 @@ fn layer_from_diffid<'a>( }) } +#[context("Parsing manifest layout")] pub(crate) fn parse_manifest_layout<'a>( manifest: &'a ImageManifest, config: &ImageConfiguration, @@ -413,6 +415,7 @@ pub(crate) fn parse_manifest_layout<'a>( impl ImageImporter { /// Create a new importer. + #[context("Creating importer")] pub async fn new( repo: &ostree::Repo, imgref: &OstreeImageReference, @@ -644,9 +647,7 @@ impl ImageImporter { let mut importer = crate::tar::Importer::new_for_commit(&repo, remote); let blob = tokio_util::io::SyncIoBridge::new(blob); let mut archive = tar::Archive::new(blob); - importer - .import_commit(&mut archive, Some(cancellable)) - .context("Importing commit layer")?; + importer.import_commit(&mut archive, Some(cancellable))?; let commit = importer.finish_import_commit(); if write_refs { repo.transaction_set_ref(None, &target_ref, Some(commit.as_str())); @@ -912,6 +913,7 @@ fn try_query_image_ref( } /// Query metadata for a pulled image. +#[context("Querying image {imgref}")] pub fn query_image_ref( repo: &ostree::Repo, imgref: &ImageReference, @@ -974,6 +976,7 @@ fn manifest_for_image(repo: &ostree::Repo, imgref: &ImageReference) -> Result, diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 9727179f8..db23609b0 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -222,6 +222,7 @@ impl Importer { } /// Import a metadata object. + #[context("Importing metadata object")] fn import_metadata( &mut self, entry: tar::Entry, @@ -249,6 +250,7 @@ impl Importer { } /// Import a content object, large regular file flavour. + #[context("Importing regfile")] fn import_large_regfile_object( &mut self, mut entry: tar::Entry, @@ -286,6 +288,7 @@ impl Importer { } /// Import a content object, small regular file flavour. + #[context("Importing regfile small")] fn import_small_regfile_object( &mut self, mut entry: tar::Entry, @@ -313,6 +316,7 @@ impl Importer { } /// Import a content object, symlink flavour. + #[context("Importing symlink")] fn import_symlink_object( &mut self, entry: tar::Entry, @@ -602,6 +606,7 @@ impl Importer { Ok(()) } + #[context("Importing objects")] pub(crate) fn import_objects( &mut self, archive: &mut tar::Archive, @@ -614,6 +619,7 @@ impl Importer { self.import_objects_impl(ents, cancellable) } + #[context("Importing commit")] pub(crate) fn import_commit( &mut self, archive: &mut tar::Archive, From 44845039946c187c215a0b3465ef7c01462b13c3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 12 Jan 2023 08:51:14 -0500 Subject: [PATCH 523/775] Release 0.10.4 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 28f516137..f9054c917 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.10.3" +version = "0.10.4" rust-version = "1.63.0" [dependencies] From 1aefaa72d914b28758ce0df17a39e5871dc41369 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 12 Jan 2023 12:01:19 -0500 Subject: [PATCH 524/775] tests: Trim some v0 export format code We're not supporting this anymore; continue to trim it from our tests. (There's even more to do here, but this compiles) --- lib/src/fixture.rs | 17 ++++------------- lib/src/integrationtest.rs | 12 +++--------- lib/tests/it/main.rs | 33 +++++++++------------------------ 3 files changed, 16 insertions(+), 46 deletions(-) diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 013920a69..53df2b9e5 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -3,7 +3,7 @@ #![allow(missing_docs)] use crate::chunking::ObjectMetaSized; -use crate::container::{Config, ExportLayout, ExportOpts, ImageReference, Transport}; +use crate::container::{Config, ExportOpts, ImageReference, Transport}; use crate::objectsource::{ObjectMeta, ObjectSourceMeta}; use crate::prelude::*; use crate::{gio, glib}; @@ -632,14 +632,8 @@ impl Fixture { /// Export the current ref as a container image. /// This defaults to using chunking. #[context("Exporting container")] - pub async fn export_container( - &self, - export_format: ExportLayout, - ) -> Result<(ImageReference, String)> { - let name = match export_format { - ExportLayout::V0 => "oci-v0", - ExportLayout::V1 => "oci-v1", - }; + pub async fn export_container(&self) -> Result<(ImageReference, String)> { + let name = "oci-v1"; let container_path = &self.path.join(name); if container_path.exists() { std::fs::remove_dir_all(container_path)?; @@ -660,10 +654,7 @@ impl Fixture { let contentmeta = self.get_object_meta().context("Computing object meta")?; let contentmeta = ObjectMetaSized::compute_sizes(self.srcrepo(), contentmeta) .context("Computing sizes")?; - let opts = ExportOpts { - format: export_format, - ..Default::default() - }; + let opts = ExportOpts::default(); let digest = crate::container::encapsulate( self.srcrepo(), self.testref(), diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index bd98b4cf9..d56aa95ea 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -2,11 +2,7 @@ use std::path::Path; -use crate::{ - container::{ocidir, ExportLayout}, - container_utils::is_ostree_container, - ocidir::RawLayerWriter, -}; +use crate::{container::ocidir, container_utils::is_ostree_container, ocidir::RawLayerWriter}; use anyhow::Result; use camino::Utf8Path; use cap_std::fs::Dir; @@ -134,10 +130,8 @@ fn test_proxy_auth() -> Result<()> { /// Useful for debugging things interactively. pub(crate) async fn create_fixture() -> Result<()> { let fixture = crate::fixture::Fixture::new_v1()?; - for format in [ExportLayout::V0, ExportLayout::V1] { - let imgref = fixture.export_container(format).await?.0; - println!("Wrote: {:?}", imgref); - } + let imgref = fixture.export_container().await?.0; + println!("Wrote: {:?}", imgref); let path = fixture.into_tempdir().into_path(); println!("Wrote: {:?}", path); Ok(()) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index ea1491aef..4d303adf3 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -656,15 +656,12 @@ fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Resu } #[tokio::test] -async fn test_container_chunked_v1() -> Result<()> { - impl_test_container_chunked(ExportLayout::V1).await -} - -async fn impl_test_container_chunked(format: ExportLayout) -> Result<()> { +async fn test_container_chunked() -> Result<()> { + let format = ExportLayout::V1; let nlayers = *CONTENTS_V0_LEN - 1; let mut fixture = Fixture::new_v1()?; - let (imgref, expected_digest) = fixture.export_container(format).await.unwrap(); + let (imgref, expected_digest) = fixture.export_container().await.unwrap(); let imgref = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref, @@ -714,7 +711,7 @@ r usr/bin/bash bash-v0 .update(FileDef::iter_from(ADDITIONS), std::iter::empty()) .context("Failed to update")?; - let expected_digest = fixture.export_container(format).await.unwrap().1; + let expected_digest = fixture.export_container().await.unwrap().1; assert_ne!(digest, expected_digest); let mut imp = @@ -731,20 +728,8 @@ r usr/bin/bash bash-v0 let (first, second) = (to_fetch[0], to_fetch[1]); assert!(first.0.commit.is_none()); assert!(second.0.commit.is_none()); - match format { - ExportLayout::V0 => { - assert_eq!(first.1, "bash"); - assert!( - second.1.starts_with("ostree export of commit"), - "{}", - second.1 - ); - } - ExportLayout::V1 => { - assert_eq!(first.1, "testlink"); - assert_eq!(second.1, "bash"); - } - } + assert_eq!(first.1, "testlink"); + assert_eq!(second.1, "bash"); assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); let n = store::count_layer_references(fixture.destrepo())? as i64; @@ -838,7 +823,7 @@ r usr/bin/bash bash-v0 async fn test_container_var_content() -> Result<()> { let fixture = Fixture::new_v1()?; - let imgref = fixture.export_container(ExportLayout::V1).await.unwrap().0; + let imgref = fixture.export_container().await.unwrap().0; let imgref = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref, @@ -1129,7 +1114,7 @@ async fn test_container_write_derive() -> Result<()> { #[tokio::test] async fn test_container_write_derive_sysroot_hardlink() -> Result<()> { let fixture = Fixture::new_v1()?; - let baseimg = &fixture.export_container(ExportLayout::V1).await?.0; + let baseimg = &fixture.export_container().await?.0; let basepath = &match baseimg.transport { Transport::OciDir => fixture.path.join(baseimg.name.as_str()), _ => unreachable!(), @@ -1222,7 +1207,7 @@ async fn test_old_code_parses_new_export() -> Result<()> { return Ok(()); } let fixture = Fixture::new_v1()?; - let imgref = fixture.export_container(ExportLayout::V1).await?.0; + let imgref = fixture.export_container().await?.0; let imgref = OstreeImageReference { sigverify: SignatureSource::ContainerPolicyAllowInsecure, imgref, From cf231fd0eea71ac2e40abfc60a2292ea6e837f63 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 12 Jan 2023 11:48:56 -0500 Subject: [PATCH 525/775] container: Add an API to retrieve the version number This makes it more convenient for higher level tools to display the associated image version number. --- lib/src/container/mod.rs | 23 +++++++++++++++++++++++ lib/src/container/store.rs | 7 ++++++- lib/tests/it/main.rs | 1 + 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index e0cc68304..ff666bd13 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -28,6 +28,7 @@ use anyhow::anyhow; use std::borrow::Cow; +use std::collections::HashMap; use std::ops::Deref; use std::str::FromStr; @@ -73,6 +74,9 @@ pub enum SignatureSource { ContainerPolicyAllowInsecure, } +/// A commonly used pre-OCI label for versions. +pub const LABEL_VERSION: &str = "version"; + /// Combination of a signature verification mechanism, and a standard container image reference. /// #[derive(Debug, Clone, PartialEq, Eq)] @@ -292,6 +296,25 @@ pub fn merge_default_container_proxy_opts_with_isolation( Ok(()) } +/// Convenience helper to return the labels, if present. +pub(crate) fn labels_of( + config: &oci_spec::image::ImageConfiguration, +) -> Option<&HashMap> { + config.config().as_ref().and_then(|c| c.labels().as_ref()) +} + +/// Retrieve the version number from an image configuration. +pub fn version_for_config(config: &oci_spec::image::ImageConfiguration) -> Option<&str> { + if let Some(labels) = labels_of(config) { + for k in [oci_spec::image::ANNOTATION_VERSION, LABEL_VERSION] { + if let Some(v) = labels.get(k) { + return Some(v.as_str()); + } + } + } + None +} + pub mod deploy; mod encapsulate; pub use encapsulate::*; diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 643100e97..d4fc4786b 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -208,6 +208,11 @@ impl PreparedImport { .chain(self.layers.iter()) } + /// Retrieve the container image version. + pub fn version(&self) -> Option<&str> { + super::version_for_config(&self.config) + } + /// If this image is using any deprecated features, return a message saying so. pub fn deprecated_warning(&self) -> Option<&'static str> { match self.export_layout { @@ -343,7 +348,7 @@ pub(crate) fn parse_manifest_layout<'a>( Vec<&'a Descriptor>, Vec<&'a Descriptor>, )> { - let config_labels = config.config().as_ref().and_then(|c| c.labels().as_ref()); + let config_labels = super::labels_of(config); let bootable_key = *ostree::METADATA_KEY_BOOTABLE; let bootable = config_labels.map_or(false, |l| l.contains_key(bootable_key)); if !bootable { diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index ea1491aef..b922906b0 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -689,6 +689,7 @@ async fn impl_test_container_chunked(format: ExportLayout) -> Result<()> { prep.deprecated_warning().is_some() ); assert_eq!(prep.export_layout, format); + assert_eq!(prep.version(), Some("42.0")); let digest = prep.manifest_digest.clone(); assert!(prep.ostree_commit_layer.commit.is_none()); assert_eq!(prep.ostree_layers.len(), nlayers as usize); From df8776609a24809736c08f41852bc49a60fbb32c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 12 Jan 2023 13:52:45 -0500 Subject: [PATCH 526/775] tests: Drop even more export layout bits This was all hardcoded to V1, so just drop the conditionals. --- lib/tests/it/main.rs | 38 +++++++++----------------------------- 1 file changed, 9 insertions(+), 29 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 698307c8f..c0244cd3d 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -4,7 +4,7 @@ use cap_std::fs::{Dir, DirBuilder}; use once_cell::sync::Lazy; use ostree::cap_std; use ostree_ext::chunking::ObjectMetaSized; -use ostree_ext::container::{store, ExportLayout}; +use ostree_ext::container::store; use ostree_ext::container::{ Config, ExportOpts, ImageReference, OstreeImageReference, SignatureSource, Transport, }; @@ -442,10 +442,7 @@ fn skopeo_inspect_config(imgref: &str) -> Result Result<()> { +async fn impl_test_container_import_export(chunked: bool) -> Result<()> { let fixture = Fixture::new_v1()?; let testrev = fixture .srcrepo() @@ -476,7 +473,6 @@ async fn impl_test_container_import_export( let opts = ExportOpts { copy_meta_keys: vec!["buildsys.checksum".to_string()], copy_meta_opt_keys: vec!["nosuchvalue".to_string()], - format: export_format, ..Default::default() }; let digest = ostree_ext::container::encapsulate( @@ -611,18 +607,14 @@ async fn impl_test_container_import_export( } /// Parse a chunked container image and validate its structure; particularly -fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Result<()> { +fn validate_chunked_structure(oci_path: &Utf8Path) -> Result<()> { use tar::EntryType::Link; let d = Dir::open_ambient_dir(oci_path, cap_std::ambient_authority())?; let d = ocidir::OciDir::open(&d)?; let manifest = d.read_manifest()?; assert_eq!(manifest.layers().len(), *CONTENTS_V0_LEN); - let ostree_layer = match format { - ExportLayout::V0 => manifest.layers().last(), - ExportLayout::V1 => manifest.layers().first(), - } - .unwrap(); + let ostree_layer = manifest.layers().first().unwrap(); let mut ostree_layer_blob = d .read_blob(ostree_layer) .map(BufReader::new) @@ -632,10 +624,7 @@ fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Resu validate_tar_v1_metadata(&mut ostree_layer_blob)?; // This layer happens to be first - let pkgdb_layer_offset = match format { - ExportLayout::V0 => 0, - ExportLayout::V1 => 1, - }; + let pkgdb_layer_offset = 1; let pkgdb_layer = &manifest.layers()[pkgdb_layer_offset]; let mut pkgdb_blob = d .read_blob(pkgdb_layer) @@ -657,7 +646,6 @@ fn validate_chunked_structure(oci_path: &Utf8Path, format: ExportLayout) -> Resu #[tokio::test] async fn test_container_chunked() -> Result<()> { - let format = ExportLayout::V1; let nlayers = *CONTENTS_V0_LEN - 1; let mut fixture = Fixture::new_v1()?; @@ -671,7 +659,7 @@ async fn test_container_chunked() -> Result<()> { ImageReference { transport: Transport::OciDir, name, - } => validate_chunked_structure(Utf8Path::new(name), format).unwrap(), + } => validate_chunked_structure(Utf8Path::new(name)).unwrap(), _ => unreachable!(), }; @@ -681,11 +669,7 @@ async fn test_container_chunked() -> Result<()> { store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), store::PrepareResult::Ready(r) => r, }; - assert_eq!( - format == ExportLayout::V0, - prep.deprecated_warning().is_some() - ); - assert_eq!(prep.export_layout, format); + assert!(prep.deprecated_warning().is_none()); assert_eq!(prep.version(), Some("42.0")); let digest = prep.manifest_digest.clone(); assert!(prep.ostree_commit_layer.commit.is_none()); @@ -891,12 +875,8 @@ async fn oci_clone(src: impl AsRef, dest: impl AsRef) -> Res #[tokio::test] async fn test_container_import_export_v1() { - impl_test_container_import_export(ExportLayout::V1, false) - .await - .unwrap(); - impl_test_container_import_export(ExportLayout::V1, true) - .await - .unwrap(); + impl_test_container_import_export(false).await.unwrap(); + impl_test_container_import_export(true).await.unwrap(); } /// But layers work via the container::write module. From bb7174d63d68ead6ca46fa35151d0699f96e67d8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 17 Jan 2023 11:25:19 -0500 Subject: [PATCH 527/775] Release 0.10.5 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index f9054c917..0584513e0 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.10.4" +version = "0.10.5" rust-version = "1.63.0" [dependencies] From 1959553cdfc1e05d13695aab266ce552a99a2a05 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 8 Feb 2023 17:30:17 -0500 Subject: [PATCH 528/775] container: Add a `copy_as` function, deprecate `copy` For use cases like installers where we want to pull from a source like `containers-storage:` or `oci:`, but have the destination ostree repo be configured to fetch from `docker://` AKA `registry:`. The deploy/import code already supports this via the `target_imgref` flow. But I have a new code path in bootc where I want to copy from one ostree repo to another, while also performing this conversion. While we're here, also ensure this API operates in terms of `ImageReference`, because that's all the ostree storage uses. And finally, deprecate `copy` because of the API wart above; `copy_as` is more general and not much more typing in the simple case of wanting to retain the same image reference. --- lib/src/cli.rs | 3 ++- lib/src/container/store.rs | 32 +++++++++++++++++++++++++++++--- lib/tests/it/main.rs | 25 ++++++++++++++++++++++++- 3 files changed, 55 insertions(+), 5 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 91df29b32..772c12de5 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -809,7 +809,8 @@ where } => { let src_repo = parse_repo(&src_repo)?; let dest_repo = parse_repo(&dest_repo)?; - crate::container::store::copy(&src_repo, &dest_repo, &imgref).await + let imgref = &imgref.imgref; + crate::container::store::copy_as(&src_repo, imgref, &dest_repo, imgref).await } ContainerImageOpts::ReplaceDetachedMetadata { src, diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index d4fc4786b..2bbb0a6d1 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -982,19 +982,36 @@ fn manifest_for_image(repo: &ostree::Repo, imgref: &ImageReference) -> Result Result<()> { - let ostree_ref = ref_for_image(&imgref.imgref)?; - let manifest = manifest_for_image(src_repo, &imgref.imgref)?; + // For historical reasons, this function takes an ostree refernece + // as input, but the storage only operaties on image references. + let imgref = &imgref.imgref; + copy_as(src_repo, imgref, dest_repo, imgref).await +} + +/// Copy a downloaded image from one repository to another, while also +/// optionally changing the image reference type. +#[context("Copying image")] +pub async fn copy_as( + src_repo: &ostree::Repo, + src_imgref: &ImageReference, + dest_repo: &ostree::Repo, + dest_imgref: &ImageReference, +) -> Result<()> { + let src_ostree_ref = ref_for_image(src_imgref)?; + let src_commit = src_repo.require_rev(&src_ostree_ref)?; + let manifest = manifest_for_image(src_repo, src_imgref)?; // Create a task to copy each layer, plus the final ref let layer_refs = manifest .layers() .iter() .map(ref_for_layer) - .chain(std::iter::once(Ok(ostree_ref))); + .chain(std::iter::once(Ok(src_commit.to_string()))); for ostree_ref in layer_refs { let ostree_ref = ostree_ref?; let src_repo = src_repo.clone(); @@ -1015,6 +1032,15 @@ pub async fn copy( }) .await?; } + + let dest_ostree_ref = ref_for_image(dest_imgref)?; + dest_repo.set_ref_immediate( + None, + &dest_ostree_ref, + Some(&src_commit), + gio::Cancellable::NONE, + )?; + Ok(()) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index c0244cd3d..5d05fcd6a 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1081,12 +1081,35 @@ async fn test_container_write_derive() -> Result<()> { None, gio::Cancellable::NONE, )?; - store::copy(fixture.destrepo(), &destrepo2, &derived_ref).await?; + #[allow(deprecated)] + store::copy(fixture.destrepo(), &destrepo2, &derived_ref) + .await + .context("Copying")?; let images = store::list_images(&destrepo2)?; assert_eq!(images.len(), 1); assert_eq!(images[0], derived_ref.imgref.to_string()); + // And test copy_as + let target_name = "quay.io/exampleos/centos:stream9"; + let registry_ref = ImageReference { + transport: Transport::Registry, + name: target_name.to_string(), + }; + store::copy_as( + fixture.destrepo(), + &derived_ref.imgref, + &destrepo2, + ®istry_ref, + ) + .await + .context("Copying")?; + + let mut images = store::list_images(&destrepo2)?; + images.sort_unstable(); + assert_eq!(images[0], registry_ref.to_string()); + assert_eq!(images[1], derived_ref.imgref.to_string()); + Ok(()) } From bdadb3b99e84e9bc4c9755f2132f71441949a5ad Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 13 Jan 2023 10:32:36 -0500 Subject: [PATCH 529/775] container: Add standard OCI version key by default And add an API to allow opting-out of the legacy label. --- lib/src/container/encapsulate.rs | 11 ++++++++++- lib/tests/it/main.rs | 3 +++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 1719278ed..b1cdd319a 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -20,6 +20,9 @@ use std::num::NonZeroU32; use std::path::Path; use tracing::instrument; +/// The label which may be used in addition to the standard OCI label. +pub const LEGACY_VERSION_LABEL: &str = "version"; + /// Type of container image generated #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum ExportLayout { @@ -220,7 +223,10 @@ fn build_oci( .unwrap_or_else(|| crate::chunking::Chunking::new(repo, commit))?; if let Some(version) = commit_meta.lookup::("version")? { - labels.insert("version".into(), version); + if !opts.no_legacy_version_label { + labels.insert(LEGACY_VERSION_LABEL.into(), version.clone()); + } + labels.insert(oci_image::ANNOTATION_VERSION.into(), version); } labels.insert(OSTREE_COMMIT_LABEL.into(), commit.into()); @@ -356,6 +362,9 @@ pub struct ExportOpts { pub max_layers: Option, /// The container image layout pub format: ExportLayout, + // TODO semver-break: remove this + /// Use only the standard OCI version label + pub no_legacy_version_label: bool, } impl ExportOpts { diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index c0244cd3d..9c7a44ed4 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -488,7 +488,10 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { assert!(srcoci_path.exists()); let inspect = skopeo_inspect(&srcoci_imgref.to_string())?; + // Legacy path includes this assert!(inspect.contains(r#""version": "42.0""#)); + // Also include the new standard version + assert!(inspect.contains(r#""org.opencontainers.image.version": "42.0""#)); assert!(inspect.contains(r#""foo": "bar""#)); assert!(inspect.contains(r#""test": "value""#)); assert!(inspect.contains( From 677ac5d7f52ad7c4b70da434d5fca0ad1bad3f68 Mon Sep 17 00:00:00 2001 From: RishabhSaini Date: Thu, 9 Feb 2023 18:27:45 -0500 Subject: [PATCH 530/775] Add ostree-ext-cli container compare image1 image2 Allows to compare the content between two OCI compliant images --- lib/src/cli.rs | 21 ++++++++++++++ lib/src/container/mod.rs | 59 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 772c12de5..fd3cdae84 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -147,6 +147,17 @@ pub(crate) enum ContainerOpts { /// Commands for working with (possibly layered, non-encapsulated) container images. #[clap(subcommand)] Image(ContainerImageOpts), + + /// Compare the contents of two OCI compliant images. + Compare { + /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest + #[clap(value_parser = parse_imgref)] + imgref_old: OstreeImageReference, + + /// Image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos:latest + #[clap(value_parser = parse_imgref)] + imgref_new: OstreeImageReference, + }, } /// Options for container image fetching. @@ -874,6 +885,16 @@ where Ok(()) } }, + ContainerOpts::Compare { + imgref_old, + imgref_new, + } => { + let (manifest_old, _) = crate::container::fetch_manifest(&imgref_old).await?; + let (manifest_new, _) = crate::container::fetch_manifest(&imgref_new).await?; + let manifest_diff = crate::container::manifest_diff(&manifest_old, &manifest_new); + manifest_diff.print(); + Ok(()) + } }, Opt::ImaSign(ref opts) => ima_sign(opts), #[cfg(feature = "internal-testing-api")] diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index ff666bd13..54dccb276 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -27,6 +27,7 @@ use anyhow::anyhow; +use ostree::glib; use std::borrow::Cow; use std::collections::HashMap; use std::ops::Deref; @@ -246,6 +247,64 @@ impl std::fmt::Display for OstreeImageReference { } } +/// Represent the difference in content between two OCI compliant Images +#[derive(Debug, Default)] +pub struct ManifestDiff { + all_layers_in_new: Vec, + removed: Vec, + added: Vec, +} + +/// Computes the difference between two OCI compliant images +pub fn manifest_diff( + src: &oci_spec::image::ImageManifest, + dest: &oci_spec::image::ImageManifest, +) -> ManifestDiff { + let src_layers = src + .layers() + .iter() + .map(|l| (l.digest(), l)) + .collect::>(); + let dest_layers = dest + .layers() + .iter() + .map(|l| (l.digest(), l)) + .collect::>(); + let mut diff = ManifestDiff::default(); + for (blobid, &descriptor) in src_layers.iter() { + if !dest_layers.contains_key(blobid) { + diff.removed.push(descriptor.clone()); + } + } + for (blobid, &descriptor) in dest_layers.iter() { + diff.all_layers_in_new.push(descriptor.clone()); + if !src_layers.contains_key(blobid) { + diff.added.push(descriptor.clone()); + } + } + diff +} + +impl ManifestDiff { + /// Prints the total, removed and added content between two OCI images + pub fn print(&self) { + let layersum = |layers: &Vec| -> u64 { + layers.iter().map(|layer| layer.size() as u64).sum() + }; + let new_total = &self.all_layers_in_new.len(); + let new_total_size = glib::format_size(layersum(&self.all_layers_in_new)); + let n_removed = &self.removed.len(); + let n_added = &self.added.len(); + let removed_size = layersum(&self.removed); + let removed_size_str = glib::format_size(removed_size); + let added_size = layersum(&self.removed); + let added_size_str = glib::format_size(added_size); + println!("Total new layers: {new_total} Size: {new_total_size}"); + println!("Removed layers: {n_removed} Size: {removed_size_str}"); + println!("Added layers: {n_added} Size: {added_size_str}"); + } +} + /// Apply default configuration for container image pulls to an existing configuration. /// For example, if `authfile` is not set, and `auth_anonymous` is `false`, and a global configuration file exists, it will be used. /// From e657e4f1f826eba8c9597c85546d8985e26d80ed Mon Sep 17 00:00:00 2001 From: RishabhSaini Date: Fri, 10 Feb 2023 12:10:19 -0500 Subject: [PATCH 531/775] Add basic check to --compare-with-build Compares an image against itself to result in a 0 delta --- ci/priv-integration.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index 003c37aac..ea5347246 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -69,4 +69,8 @@ fi grep "skopeo too old to pull from containers-storage" err.txt echo "ok pulled from containers storage failed" +ostree-ext-cli container compare ${imgref} ${imgref} > compare.txt +grep "Removed layers: 0 Size: 0 bytes" compare.txt +grep "Added layers: 0 Size: 0 bytes" compare.txt + echo ok privileged integration From eafd2c929b1e29cd772737e2b928746329d33b50 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 10 Feb 2023 14:05:28 -0500 Subject: [PATCH 532/775] Release 0.10.6 Two new APIs, and we now also inject the standard OCI version annotation. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 0584513e0..5ce4cda24 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.10.5" +version = "0.10.6" rust-version = "1.63.0" [dependencies] From 56af8e74ee5d3b042e6581ac2660d397f41f5e93 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 10 Feb 2023 13:43:13 -0500 Subject: [PATCH 533/775] tar: Drop dead code for format version 0 This is unused now. --- lib/src/tar/export.rs | 57 ++----------------------------------------- 1 file changed, 2 insertions(+), 55 deletions(-) diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index c7c0d8680..e35ab11f7 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -95,15 +95,6 @@ pub(crate) fn object_path(objtype: ostree::ObjectType, checksum: &str) -> Utf8Pa format!("{}/repo/objects/{}/{}.{}", OSTREEDIR, first, rest, suffix).into() } -fn v0_xattrs_path(checksum: &str) -> Utf8PathBuf { - format!("{}/repo/xattrs/{}", OSTREEDIR, checksum).into() -} - -fn v0_xattrs_object_path(checksum: &str) -> Utf8PathBuf { - let (first, rest) = checksum.split_at(2); - format!("{}/repo/objects/{}/{}.file.xattrs", OSTREEDIR, first, rest).into() -} - fn v1_xattrs_object_path(checksum: &str) -> Utf8PathBuf { let (first, rest) = checksum.split_at(2); format!("{}/repo/objects/{}/{}.file-xattrs", OSTREEDIR, first, rest).into() @@ -173,11 +164,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { /// The ostree mode bits include the format, tar does not. /// Historically in format version 0 we injected them, so we need to keep doing so. fn filter_mode(&self, mode: u32) -> u32 { - if self.options.format_version == 0 { - mode - } else { - mode & !libc::S_IFMT - } + mode & !libc::S_IFMT } /// Add a directory entry with default permissions (root/root 0755) @@ -250,16 +237,9 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { self.append_default_dir(&path)?; } - // The special `repo/xattrs` directory used in v0 format. - if self.options.format_version == 0 { - let path: Utf8PathBuf = format!("{}/repo/xattrs", OSTREEDIR).into(); - self.append_default_dir(&path)?; - } - // Repository configuration file. { let path = match self.options.format_version { - 0 => format!("{}/config", SYSROOT), 1 => format!("{}/repo/config", OSTREEDIR), n => anyhow::bail!("Unsupported ostree tar format version {}", n), }; @@ -363,30 +343,13 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { fn append_xattrs(&mut self, checksum: &str, xattrs: &glib::Variant) -> Result { let xattrs_data = xattrs.data_as_bytes(); let xattrs_data = xattrs_data.as_ref(); - if xattrs_data.is_empty() && self.options.format_version == 0 { - return Ok(false); - } let xattrs_checksum = { let digest = openssl::hash::hash(openssl::hash::MessageDigest::sha256(), xattrs_data)?; hex::encode(digest) }; - if self.options.format_version == 0 { - let path = v0_xattrs_path(&xattrs_checksum); - - // Write xattrs content into a separate directory. - if !self.wrote_xattrs.contains(&xattrs_checksum) { - let inserted = self.wrote_xattrs.insert(xattrs_checksum); - debug_assert!(inserted); - self.append_default_data(&path, xattrs_data)?; - } - // Hardlink the object in the repo. - { - let objpath = v0_xattrs_object_path(checksum); - self.append_default_hardlink(&objpath, &path)?; - } - } else if self.options.format_version == 1 { + if self.options.format_version == 1 { let path = v1_xattrs_object_path(&xattrs_checksum); // Write xattrs content into a separate `.file-xattrs` object. @@ -818,22 +781,6 @@ mod tests { } } - #[test] - fn test_v0_xattrs_path() { - let checksum = "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; - let expected = "sysroot/ostree/repo/xattrs/b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; - let output = v0_xattrs_path(checksum); - assert_eq!(&output, expected); - } - - #[test] - fn test_v0_xattrs_object_path() { - let checksum = "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; - let expected = "sysroot/ostree/repo/objects/b8/627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7.file.xattrs"; - let output = v0_xattrs_object_path(checksum); - assert_eq!(&output, expected); - } - #[test] fn test_v1_xattrs_object_path() { let checksum = "b8627e3ef0f255a322d2bd9610cfaaacc8f122b7f8d17c0e7e3caafa160f9fc7"; From 333328ebdef08f169755b3fc064fd8303a527bf7 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 2 Mar 2023 17:03:39 -0500 Subject: [PATCH 534/775] ci: We can now pull from containers-storage A new enough skopeo got shipped, so let's update our CI! --- ci/priv-integration.sh | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index ea5347246..f9fbfbd8e 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -62,12 +62,8 @@ grep 'layers already present: ' logs.txt podman pull ${image} ostree --repo="${sysroot}/ostree/repo" init --mode=bare-user -if ostree-ext-cli container image pull ${sysroot}/ostree/repo ostree-unverified-image:containers-storage:${image} 2>err.txt; then - echo "unexpectedly pulled from containers storage?" - exit 1 -fi -grep "skopeo too old to pull from containers-storage" err.txt -echo "ok pulled from containers storage failed" +ostree-ext-cli container image pull ${sysroot}/ostree/repo ostree-unverified-image:containers-storage:${image} +echo "ok pulled from containers storage" ostree-ext-cli container compare ${imgref} ${imgref} > compare.txt grep "Removed layers: 0 Size: 0 bytes" compare.txt From 65071c91ebe3f61e80bd4d0ef3a8e824cd4b8676 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 1 Mar 2023 13:39:57 -0500 Subject: [PATCH 535/775] deploy: Make `--stateroot` default to `default` See https://github.com/ostreedev/ostree/issues/2794 Having this e.g. be `rhcos` or `fcos` etc. doesn't add any value. --- lib/src/cli.rs | 2 +- lib/src/container/deploy.rs | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index fd3cdae84..339029142 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -281,7 +281,7 @@ pub(crate) enum ContainerImageOpts { sysroot: String, /// Name for the state directory, also known as "osname". - #[clap(long)] + #[clap(long, default_value = ostree_container::deploy::STATEROOT_DEFAULT)] stateroot: String, /// Source image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos@sha256:abcd... diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 65d7c57d1..55bdac382 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -10,6 +10,10 @@ use ostree::glib; /// The key in the OSTree origin which holds a serialized [`super::OstreeImageReference`]. pub const ORIGIN_CONTAINER: &str = "container-image-reference"; +/// The name of the default stateroot. +// xref https://github.com/ostreedev/ostree/issues/2794 +pub const STATEROOT_DEFAULT: &str = "default"; + /// Options configuring deployment. #[derive(Debug, Default)] pub struct DeployOpts<'a> { From dce5e0c7c6ea0090a0712cca4f9a5864ad9c408a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 3 Mar 2023 17:05:57 -0500 Subject: [PATCH 536/775] deploy: Add optional `--image` syntax This clones the nicer bootc syntax which does away with forcing users to understand the image reference strings. --- ci/priv-integration.sh | 8 +++++++ lib/src/cli.rs | 48 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index f9fbfbd8e..26cf495a3 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -23,11 +23,19 @@ fi if test '!' -d "${sysroot}/ostree/deploy/${stateroot}"; then ostree admin os-init "${stateroot}" --sysroot "${sysroot}" fi +# Test the syntax which uses full imgrefs. ostree-ext-cli container image deploy --sysroot "${sysroot}" \ --stateroot "${stateroot}" --imgref "${imgref}" ostree admin --sysroot="${sysroot}" status ostree-ext-cli container image remove --repo "${sysroot}/ostree/repo" registry:"${image}" ostree admin --sysroot="${sysroot}" undeploy 0 +# Now test the new syntax which has a nicer --image that defaults to registry. +ostree-ext-cli container image deploy --transport registry --sysroot "${sysroot}" \ + --stateroot "${stateroot}" --image "${image}" --no-signature-verification +ostree admin --sysroot="${sysroot}" status +ostree-ext-cli container image remove --repo "${sysroot}/ostree/repo" registry:"${image}" +ostree admin --sysroot="${sysroot}" undeploy 0 + for img in "${image}"; do ostree-ext-cli container image deploy --sysroot "${sysroot}" \ --stateroot "${stateroot}" --imgref ostree-unverified-registry:"${img}" diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 339029142..1f6798317 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -285,9 +285,26 @@ pub(crate) enum ContainerImageOpts { stateroot: String, /// Source image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos@sha256:abcd... + /// This conflicts with `--image`. + #[clap(long, required_unless_present = "image")] + imgref: Option, + + /// Name of the container image; for the `registry` transport this would be e.g. `quay.io/exampleos/foo:latest`. + /// This conflicts with `--imgref`. + #[clap(long, required_unless_present = "imgref")] + image: Option, + + /// The transport; e.g. registry, oci, oci-archive. The default is `registry`. #[clap(long)] - #[clap(value_parser = parse_imgref)] - imgref: OstreeImageReference, + transport: Option, + + /// Explicitly opt-out of requiring any form of signature verification. + #[clap(long)] + no_signature_verification: bool, + + /// Enable verification via an ostree remote + #[clap(long)] + ostree_remote: Option, #[clap(flatten)] proxyopts: ContainerProxyOpts, @@ -842,6 +859,10 @@ where sysroot, stateroot, imgref, + image, + transport, + no_signature_verification, + ostree_remote, target_imgref, no_imgref, karg, @@ -856,6 +877,29 @@ where let r: Vec<_> = v.iter().map(|s| s.as_str()).collect(); r }); + + let imgref = if let Some(image) = image { + let transport = transport.as_deref().unwrap_or("registry"); + let transport = ostree_container::Transport::try_from(transport)?; + let imgref = ostree_container::ImageReference { + transport, + name: image, + }; + let sigverify = if no_signature_verification { + ostree_container::SignatureSource::ContainerPolicyAllowInsecure + } else if let Some(remote) = ostree_remote.as_ref() { + ostree_container::SignatureSource::OstreeRemote(remote.to_string()) + } else { + ostree_container::SignatureSource::ContainerPolicy + }; + ostree_container::OstreeImageReference { sigverify, imgref } + } else { + // SAFETY: We use the clap required_unless_present flag, so this must be set + // because --image is not. + let imgref = imgref.expect("imgref option should be set"); + imgref.as_str().try_into()? + }; + #[allow(clippy::needless_update)] let options = crate::container::deploy::DeployOpts { kargs: kargs.as_deref(), From 053b7d6cc0c5374a531329c5365ddf52244fff25 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 5 Mar 2023 18:02:21 -0500 Subject: [PATCH 537/775] container/store: Add a well-known ref for holding base images This will allow us to not hardcode rpm-ostree in the future, and will be more extensible in general. This is targeted for use in bootc. --- lib/src/container/store.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 2bbb0a6d1..c56bc9863 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -29,6 +29,11 @@ pub use containers_image_proxy::ImageProxyConfig; const LAYER_PREFIX: &str = "ostree/container/blob"; /// The ostree ref prefix for image references. const IMAGE_PREFIX: &str = "ostree/container/image"; +/// The ostree ref prefix for "base" image references that are used by derived images. +/// If you maintain tooling which is locally building derived commits, write a ref +/// with this prefix that is owned by your code. It's a best practice to prefix the +/// ref with the project name, so the final ref may be of the form e.g. `ostree/container/baseimage/bootc/foo`. +const BASE_IMAGE_PREFIX: &str = "ostree/container/baseimage"; /// The key injected into the merge commit for the manifest digest. const META_MANIFEST_DIGEST: &str = "ostree.manifest-digest"; @@ -1056,6 +1061,7 @@ fn list_container_deployment_manifests( let commits = OSTREE_BASE_DEPLOYMENT_REFS .iter() .chain(RPMOSTREE_BASE_REFS) + .chain(std::iter::once(&BASE_IMAGE_PREFIX)) .try_fold( std::collections::HashSet::new(), |mut acc, &p| -> Result<_> { From 80c004d38f31ce16b135b510b984a1781c878ea4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 6 Mar 2023 08:16:17 -0500 Subject: [PATCH 538/775] Release 0.10.7 Colin Walters (5): ci: We can now pull from containers-storage deploy: Make `--stateroot` default to `default` deploy: Add optional `--image` syntax container/store: Add a well-known ref for holding base images Release 0.10.7 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 5ce4cda24..acb9362a6 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.10.6" +version = "0.10.7" rust-version = "1.63.0" [dependencies] From 1c9826630d767aefa6fd373cbcdbf0cf14e0a7df Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 25 Mar 2023 11:09:53 -0400 Subject: [PATCH 539/775] commit: Don't prune `/var` We've designed a conflicting system; the `ostree container commit` command today will nuke `/var`, but in most implementations this will create a whiteout, which the deploy command warns about. Also, too many commands expect `/var/tmp` to exist; so creating *further* derived images after `ostree container commit` is problematic. Instead, let's cave here and prune everything except `/var/tmp`, and also change the deploy code to not warn about empty directories there. Closes: https://github.com/ostreedev/ostree-rs-ext/issues/468 --- lib/src/commit.rs | 46 ++++++++++++++++++++++++++++++++++------------ 1 file changed, 34 insertions(+), 12 deletions(-) diff --git a/lib/src/commit.rs b/lib/src/commit.rs index 008317f27..5cb1cb3ec 100644 --- a/lib/src/commit.rs +++ b/lib/src/commit.rs @@ -10,6 +10,7 @@ use cap_std::fs::Dir; use cap_std_ext::cap_std; use cap_std_ext::dirext::CapStdExtDirExt; use cap_std_ext::rustix::fs::MetadataExt; +use std::borrow::Cow; use std::convert::TryInto; use std::path::Path; use std::path::PathBuf; @@ -18,16 +19,26 @@ use tokio::task; /// Directories for which we will always remove all content. const FORCE_CLEAN_PATHS: &[&str] = &["run", "tmp", "var/tmp", "var/cache"]; -/// Gather count of non-empty directories. Empty directories are removed. -fn process_dir_recurse( +/// Gather count of non-empty directories. Empty directories are removed, +/// except for var/tmp. +fn process_vardir_recurse( root: &Dir, rootdev: u64, path: &Utf8Path, error_count: &mut i32, ) -> Result { - let context = || format!("Validating: {path}"); + let prefix = "var"; + let tmp_name = "tmp"; + let empty_path = path.as_str().is_empty(); + let context = || format!("Validating: {prefix}/{path}"); let mut validated = true; - for entry in root.read_dir(path).with_context(context)? { + let entries = if empty_path { + root.entries() + } else { + root.read_dir(path) + }; + + for entry in entries.with_context(context)? { let entry = entry?; let metadata = entry.metadata()?; if metadata.dev() != rootdev { @@ -36,21 +47,25 @@ fn process_dir_recurse( let name = entry.file_name(); let name = Path::new(&name); let name: &Utf8Path = name.try_into()?; - let path = &path.join(name); + let path = &*if empty_path { + Cow::Borrowed(name) + } else { + Cow::Owned(path.join(name)) + }; if metadata.is_dir() { - if !process_dir_recurse(root, rootdev, path, error_count)? { + if !process_vardir_recurse(root, rootdev, path, error_count)? { validated = false; } } else { validated = false; *error_count += 1; if *error_count < 20 { - eprintln!("Found file: {:?}", path) + eprintln!("Found file: {prefix}/{path}") } } } - if validated { + if validated && !empty_path && path != tmp_name { root.remove_dir(path).with_context(context)?; } Ok(validated) @@ -116,8 +131,8 @@ fn clean_paths_in(root: &Dir, rootdev: u64) -> Result<()> { fn process_var(root: &Dir, rootdev: u64, strict: bool) -> Result<()> { let var = Utf8Path::new("var"); let mut error_count = 0; - if root.try_exists(var)? { - if !process_dir_recurse(root, rootdev, var, &mut error_count)? && strict { + if let Some(vardir) = root.open_dir_optional(var)? { + if !process_vardir_recurse(&vardir, rootdev, "".into(), &mut error_count)? && strict { anyhow::bail!("Found content in {var}"); } } @@ -180,18 +195,25 @@ mod tests { td.create_dir_all(runsystemd)?; td.write(resolvstub, "stub resolv")?; prepare_ostree_commit_in(td).unwrap(); - assert!(!td.try_exists(var)?); + assert!(td.try_exists(var)?); + assert!(td.try_exists(var.join("tmp"))?); + assert!(!td.try_exists(vartmp_foobar)?); assert!(td.try_exists(run)?); assert!(!td.try_exists(runsystemd)?); let systemd = run.join("systemd"); td.create_dir_all(&systemd)?; prepare_ostree_commit_in(td).unwrap(); - assert!(!td.try_exists(var)?); + assert!(td.try_exists(var)?); + assert!(!td.try_exists(&systemd)?); + td.remove_dir_all(&var)?; td.create_dir(&var)?; td.write(var.join("foo"), "somefile")?; assert!(prepare_ostree_commit_in(td).is_err()); + // Right now we don't auto-create var/tmp if it didn't exist, but maybe + // we will in the future. + assert!(!td.try_exists(var.join("tmp"))?); assert!(td.try_exists(var)?); td.write(var.join("foo"), "somefile")?; From 9f905f3f91f622096a67903e0d2bb9f82c0c0382 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 17 Mar 2023 10:35:14 -0400 Subject: [PATCH 540/775] Update to ostree 0.18 This adapts to the changes from https://github.com/ostreedev/ostree/pull/2791 in particular. --- lib/Cargo.toml | 2 +- lib/src/cli.rs | 2 +- lib/src/container/deploy.rs | 2 +- lib/src/diff.rs | 13 ++++++------- lib/src/fixture.rs | 4 ++-- lib/src/tar/import.rs | 2 +- 6 files changed, 12 insertions(+), 13 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index acb9362a6..d0eb29c5f 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -33,7 +33,7 @@ libc = "0.2.92" libsystemd = "0.5.0" oci-spec = "0.5.4" openssl = "0.10.33" -ostree = { features = ["v2022_5", "cap-std-apis"], version = "0.17.0" } +ostree = { features = ["v2022_5", "cap-std-apis"], version = "0.18.0" } pin-project = "1.0" regex = "1.5.4" serde = { features = ["derive"], version = "1.0.125" } diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 1f6798317..f10ebeac2 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -871,7 +871,7 @@ where } => { let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); sysroot.load(gio::Cancellable::NONE)?; - let repo = &sysroot.repo().unwrap(); + let repo = &sysroot.repo(); let kargs = karg.as_deref(); let kargs = kargs.map(|v| { let r: Vec<_> = v.iter().map(|s| s.as_str()).collect(); diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 55bdac382..98080c58e 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -51,7 +51,7 @@ pub async fn deploy( ) -> Result> { let cancellable = ostree::gio::Cancellable::NONE; let options = options.unwrap_or_default(); - let repo = &sysroot.repo().unwrap(); + let repo = &sysroot.repo(); let merge_deployment = sysroot.merge_deployment(Some(stateroot)); let mut imp = super::store::ImageImporter::new(repo, imgref, options.proxy_cfg.unwrap_or_default()) diff --git a/lib/src/diff.rs b/lib/src/diff.rs index ddbaf7a8f..620973eff 100644 --- a/lib/src/diff.rs +++ b/lib/src/diff.rs @@ -101,21 +101,20 @@ fn diff_recurse( from_child.ensure_resolved()?; if is_dir { - let from_contents_checksum = - from_child.tree_get_contents_checksum().expect("checksum"); - let to_contents_checksum = to_child.tree_get_contents_checksum().expect("checksum"); + let from_contents_checksum = from_child.tree_get_contents_checksum(); + let to_contents_checksum = to_child.tree_get_contents_checksum(); if from_contents_checksum != to_contents_checksum { let subpath = format!("{}/", path); diff_recurse(&subpath, diff, &from_child, &to_child)?; } - let from_meta_checksum = from_child.tree_get_metadata_checksum().expect("checksum"); - let to_meta_checksum = to_child.tree_get_metadata_checksum().expect("checksum"); + let from_meta_checksum = from_child.tree_get_metadata_checksum(); + let to_meta_checksum = to_child.tree_get_metadata_checksum(); if from_meta_checksum != to_meta_checksum { diff.changed_dirs.insert(path); } } else { - let from_checksum = from_child.checksum().expect("checksum"); - let to_checksum = to_child.checksum().expect("checksum"); + let from_checksum = from_child.checksum(); + let to_checksum = to_child.checksum(); if from_checksum != to_checksum { diff.changed_files.insert(path); } diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 53df2b9e5..2bb41fc2d 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -241,7 +241,7 @@ pub fn create_dirmeta(path: &Utf8Path, selinux: bool) -> glib::Variant { None }; let xattrs = label.map(|v| v.new_xattrs()); - ostree::create_directory_metadata(&finfo, xattrs.as_ref()).unwrap() + ostree::create_directory_metadata(&finfo, xattrs.as_ref()) } /// Wraps [`create_dirmeta`] and commits it. @@ -320,7 +320,7 @@ fn build_mapping_recurse( }); } - let checksum = child.checksum().unwrap().to_string(); + let checksum = child.checksum().to_string(); match ret.map.entry(checksum) { Entry::Vacant(v) => { v.insert(owner); diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index db23609b0..105dae498 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -752,7 +752,7 @@ impl Importer { finfo.set_attribute_uint32("unix::gid", 0); finfo.set_attribute_uint32("unix::mode", libc::S_IFDIR | 0o755); // SAFETY: TODO: This is not a nullable return, fix it in ostree - ostree::create_directory_metadata(&finfo, None).unwrap() + ostree::create_directory_metadata(&finfo, None) } pub(crate) fn finish_import_object_set(self) -> Result { From 1e456897700016803ddd8d16c4f5add9efe68983 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 1 May 2023 13:55:23 -0400 Subject: [PATCH 541/775] Bump MSRV to 1.64 Since it's what ostree requires now, due to one of its dependencies. --- .github/workflows/rust.yml | 2 +- cli/Cargo.toml | 2 +- lib/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 27d2db68d..4d34d7312 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -16,7 +16,7 @@ on: env: CARGO_TERM_COLOR: always # Pinned toolchain for linting - ACTION_LINTS_TOOLCHAIN: 1.63.0 + ACTION_LINTS_TOOLCHAIN: 1.64.0 jobs: tests: diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 8769d05db..e9683ca20 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT OR Apache-2.0" repository = "https://github.com/ostreedev/ostree-rs-ext" readme = "README.md" publish = false -rust-version = "1.63.0" +rust-version = "1.64.0" [dependencies] anyhow = "1.0" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index d0eb29c5f..8745b1585 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -7,7 +7,7 @@ name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" version = "0.10.7" -rust-version = "1.63.0" +rust-version = "1.64.0" [dependencies] anyhow = "1.0" From a880fd0d7414524fa4e9ef3d43eeb582bb6ea3f3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 1 May 2023 14:48:45 -0400 Subject: [PATCH 542/775] lib: Adapt to clippy lint re `then_some` --- lib/src/container/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 54dccb276..b303a1a8b 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -315,7 +315,7 @@ pub fn merge_default_container_proxy_opts( ) -> Result<()> { let user = cap_std_ext::rustix::process::getuid() .is_root() - .then(|| isolation::DEFAULT_UNPRIVILEGED_USER); + .then_some(isolation::DEFAULT_UNPRIVILEGED_USER); merge_default_container_proxy_opts_with_isolation(config, user) } @@ -341,7 +341,7 @@ pub fn merge_default_container_proxy_opts_with_isolation( let isolation_user = config .skopeo_cmd .is_none() - .then(|| isolation_user.as_ref()) + .then_some(isolation_user.as_ref()) .flatten(); if let Some(user) = isolation_user { // Read the default authfile if it exists and pass it via file descriptor From 67b9efffab4202b0b5b608fe2661762e690642ad Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 1 May 2023 14:33:43 -0400 Subject: [PATCH 543/775] Use `oci_spec` re-exported from `containers_image_proxy` This needs to stay in sync, because it's a public API dependency of that project. Motivated by a recent semver bump there. --- lib/Cargo.toml | 3 +-- lib/src/container/encapsulate.rs | 1 + lib/src/container/mod.rs | 3 ++- lib/src/container/ocidir.rs | 1 + lib/src/container/update_detachedmeta.rs | 1 + lib/src/integrationtest.rs | 1 + lib/src/lib.rs | 2 +- lib/tests/it/main.rs | 1 + 8 files changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 8745b1585..bfeefd32a 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -11,7 +11,7 @@ rust-version = "1.64.0" [dependencies] anyhow = "1.0" -containers-image-proxy = "0.5.2" +containers-image-proxy = "0.5.3" async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" camino = "1.0.4" @@ -31,7 +31,6 @@ indicatif = "0.17.0" once_cell = "1.9" libc = "0.2.92" libsystemd = "0.5.0" -oci-spec = "0.5.4" openssl = "0.10.33" ostree = { features = ["v2022_5", "cap-std-apis"], version = "0.18.0" } pin-project = "1.0" diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index b1cdd319a..824cc0270 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -9,6 +9,7 @@ use crate::tar as ostree_tar; use anyhow::{anyhow, Context, Result}; use cap_std::fs::Dir; use cap_std_ext::cap_std; +use containers_image_proxy::oci_spec; use flate2::Compression; use fn_error_context::context; use gio::glib; diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index b303a1a8b..f1a733a2b 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -26,8 +26,9 @@ //! for this is [planned but not implemented](https://github.com/ostreedev/ostree-rs-ext/issues/12). use anyhow::anyhow; - +use containers_image_proxy::oci_spec; use ostree::glib; + use std::borrow::Cow; use std::collections::HashMap; use std::ops::Deref; diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index 7ba759d66..831069024 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -9,6 +9,7 @@ use camino::Utf8Path; use cap_std::fs::Dir; use cap_std_ext::cap_std; use cap_std_ext::dirext::CapStdExtDirExt; +use containers_image_proxy::oci_spec; use flate2::write::GzEncoder; use fn_error_context::context; use oci_image::MediaType; diff --git a/lib/src/container/update_detachedmeta.rs b/lib/src/container/update_detachedmeta.rs index 4476d3ccf..6d66ea624 100644 --- a/lib/src/container/update_detachedmeta.rs +++ b/lib/src/container/update_detachedmeta.rs @@ -5,6 +5,7 @@ use anyhow::{anyhow, Context, Result}; use camino::Utf8Path; use cap_std::fs::Dir; use cap_std_ext::cap_std; +use containers_image_proxy::oci_spec; use std::io::{BufReader, BufWriter}; /// Given an OSTree container image reference, update the detached metadata (e.g. GPG signature) diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index d56aa95ea..757687b51 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -7,6 +7,7 @@ use anyhow::Result; use camino::Utf8Path; use cap_std::fs::Dir; use cap_std_ext::cap_std; +use containers_image_proxy::oci_spec; use fn_error_context::context; use gio::prelude::*; use oci_spec::image as oci_image; diff --git a/lib/src/lib.rs b/lib/src/lib.rs index f205525e7..3ceaba60e 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -16,7 +16,7 @@ // Re-export our dependencies. See https://gtk-rs.org/blog/2021/06/22/new-release.html // "Dependencies are re-exported". Users will need e.g. `gio::File`, so this avoids // them needing to update matching versions. -pub use oci_spec; +pub use containers_image_proxy::oci_spec; pub use ostree; pub use ostree::gio; pub use ostree::gio::glib; diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index a67903d27..45928e22e 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1,6 +1,7 @@ use anyhow::{Context, Result}; use camino::Utf8Path; use cap_std::fs::{Dir, DirBuilder}; +use containers_image_proxy::oci_spec; use once_cell::sync::Lazy; use ostree::cap_std; use ostree_ext::chunking::ObjectMetaSized; From a74d45fb3cd836ef510786f79c5515e0ae7379e6 Mon Sep 17 00:00:00 2001 From: RishabhSaini Date: Tue, 2 May 2023 14:15:10 -0400 Subject: [PATCH 544/775] chunking: Deduplicate the config history for each layer Fix the the repeated name of the first package in the history of oci config for each oci layer --- lib/src/chunking.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 3d41fdf15..605b7d981 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -300,7 +300,7 @@ impl Chunking { 0 => unreachable!(), 1 => Cow::Borrowed(first_name), 2..=5 => { - let r = bin.iter().map(|v| &*v.meta.name).fold( + let r = bin.iter().map(|v| &*v.meta.name).skip(1).fold( String::from(first_name), |mut acc, v| { write!(acc, " and {}", v).unwrap(); From eadde66a45c2ab057291828b202547da8a3d0718 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 8 May 2023 11:45:54 -0400 Subject: [PATCH 545/775] tests: Port to xshell This is better than my own `sh-inline` crate: - It properly supports variable capture from the Rust context which is way more ergonomic - There's higher level helper functions too that allow reading/writing files relative to the context directory - It doesn't depend on cap-std-ext, and we need to bump the semver there - It's maintained by someone else - It has more users --- lib/Cargo.toml | 4 +- lib/src/fixture.rs | 6 +++ lib/src/integrationtest.rs | 16 +++--- lib/tests/it/main.rs | 108 ++++++++++++++++++++++--------------- 4 files changed, 84 insertions(+), 50 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index bfeefd32a..521df3a19 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -46,7 +46,7 @@ tokio-stream = { features = ["sync"], version = "0.1.8" } tracing = "0.1" indoc = { version = "1.0.3", optional = true } -sh-inline = { version = "0.4", features = ["cap-std-ext"], optional = true } +xshell = { version = "0.2", optional = true } [dev-dependencies] quickcheck = "1" @@ -60,4 +60,4 @@ features = ["dox"] [features] docgen = ["clap_mangen"] dox = ["ostree/dox"] -internal-testing-api = ["sh-inline", "indoc"] +internal-testing-api = ["xshell", "indoc"] diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 2bb41fc2d..3b88a2a56 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -415,6 +415,12 @@ impl Fixture { &self.destrepo } + pub fn new_shell(&self) -> Result { + let sh = xshell::Shell::new()?; + sh.change_dir(&self.path); + Ok(sh) + } + // Delete all objects in the destrepo pub fn clear_destrepo(&self) -> Result<()> { self.destrepo() diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 757687b51..8ce6bf445 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -12,6 +12,7 @@ use fn_error_context::context; use gio::prelude::*; use oci_spec::image as oci_image; use ostree::gio; +use xshell::cmd; pub(crate) fn detectenv() -> Result<&'static str> { let r = if is_ostree_container()? { @@ -163,12 +164,15 @@ pub(crate) fn test_ima() -> Result<()> { authorityKeyIdentifier=keyid "#}; std::fs::write(fixture.path.join("genkey.config"), config)?; - sh_inline::bash_in!( - &fixture.dir, - "openssl req -new -nodes -utf8 -sha256 -days 36500 -batch \ - -x509 -config genkey.config \ - -outform DER -out ima.der -keyout privkey_ima.pem &>/dev/null" - )?; + let sh = xshell::Shell::new()?; + sh.change_dir(&fixture.path); + cmd!( + sh, + "openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 -config genkey.config -outform DER -out ima.der -keyout privkey_ima.pem" + ) + .ignore_stderr() + .ignore_stdout() + .run()?; let imaopts = crate::ima::ImaOpts { algorithm: "sha256".into(), diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 45928e22e..add8abfec 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -13,13 +13,13 @@ use ostree_ext::ocidir; use ostree_ext::prelude::{Cast, FileExt}; use ostree_ext::tar::TarImportOptions; use ostree_ext::{gio, glib}; -use sh_inline::bash_in; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::io::{BufReader, BufWriter}; use std::os::unix::fs::DirBuilderExt; use std::process::Command; use std::time::SystemTime; +use xshell::cmd; use ostree_ext::fixture::{FileDef, Fixture, CONTENTS_CHECKSUM_V0, CONTENTS_V0_LEN}; @@ -91,6 +91,7 @@ async fn test_tar_export_reproducible() -> Result<()> { #[tokio::test] async fn test_tar_import_signed() -> Result<()> { let fixture = Fixture::new_v1()?; + let sh = fixture.new_shell()?; let test_tar = fixture.export_tar()?; let rev = fixture.srcrepo().require_rev(fixture.testref())?; @@ -133,9 +134,13 @@ async fn test_tar_import_signed() -> Result<()> { assert_err_contains(r, r#"Can't check signature: public key not found"#); // And signed correctly - bash_in!(&fixture.dir, - "ostree --repo=dest/repo remote gpg-import --stdin myremote < src/gpghome/key1.asc >/dev/null", - )?; + cmd!( + sh, + "ostree --repo=dest/repo remote gpg-import --stdin myremote" + ) + .stdin(sh.read_file("src/gpghome/key1.asc")?) + .ignore_stdout() + .run()?; let src_tar = tokio::fs::File::from_std(fixture.dir.open(test_tar)?.into_std()); let imported = ostree_ext::tar::import_tar( fixture.destrepo(), @@ -351,6 +356,7 @@ fn test_tar_export_structure() -> Result<()> { #[tokio::test] async fn test_tar_import_export() -> Result<()> { let fixture = Fixture::new_v1()?; + let sh = fixture.new_shell()?; let p = fixture.export_tar()?; let src_tar = tokio::fs::File::from_std(fixture.dir.open(p)?.into_std()); @@ -363,15 +369,11 @@ async fn test_tar_import_export() -> Result<()> { .unwrap() .as_str() ); - bash_in!( - &fixture.dir, - r#" - ostree --repo=dest/repo ls -R ${imported_commit} >/dev/null - val=$(ostree --repo=dest/repo show --print-detached-metadata-key=my-detached-key ${imported_commit}) - test "${val}" = "'my-detached-value'" - "#, - imported_commit = imported_commit.as_str() - )?; + cmd!(sh, "ostree --repo=dest/repo ls -R {imported_commit}") + .ignore_stdout() + .run()?; + let val = cmd!(sh, "ostree --repo=dest/repo show --print-detached-metadata-key=my-detached-key {imported_commit}").read()?; + assert_eq!(val.as_str(), "'my-detached-value'"); let (root, _) = fixture .destrepo() @@ -389,6 +391,7 @@ async fn test_tar_import_export() -> Result<()> { #[tokio::test] async fn test_tar_write() -> Result<()> { let fixture = Fixture::new_v1()?; + let sh = fixture.new_shell()?; // Test translating /etc to /usr/etc fixture.dir.create_dir_all("tmproot/etc")?; let tmproot = &fixture.dir.open_dir("tmproot")?; @@ -400,16 +403,18 @@ async fn test_tar_write() -> Result<()> { tmpvarlog.write("bar.log", "barlog")?; tmproot.create_dir("boot")?; let tmptar = "testlayer.tar"; - bash_in!(fixture.dir, "tar cf ${tmptar} -C tmproot .", tmptar)?; + cmd!(sh, "tar cf {tmptar} -C tmproot .").run()?; let src = fixture.dir.open(tmptar)?; fixture.dir.remove_file(tmptar)?; let src = tokio::fs::File::from_std(src.into_std()); let r = ostree_ext::tar::write_tar(fixture.destrepo(), src, "layer", None).await?; - bash_in!( - &fixture.dir, - "ostree --repo=dest/repo ls ${layer_commit} /usr/etc/someconfig.conf >/dev/null", - layer_commit = r.commit.as_str() - )?; + let layer_commit = r.commit.as_str(); + cmd!( + sh, + "ostree --repo=dest/repo ls {layer_commit} /usr/etc/someconfig.conf" + ) + .ignore_stdout() + .run()?; assert_eq!(r.filtered.len(), 2); assert_eq!(*r.filtered.get("var").unwrap(), 4); assert_eq!(*r.filtered.get("boot").unwrap(), 1); @@ -445,6 +450,7 @@ fn skopeo_inspect_config(imgref: &str) -> Result Result<()> { let fixture = Fixture::new_v1()?; + let sh = fixture.new_shell()?; let testrev = fixture .srcrepo() .require_rev(fixture.testref()) @@ -568,10 +574,12 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { fixture .destrepo() .remote_add("myremote", None, Some(&opts.end()), gio::Cancellable::NONE)?; - bash_in!( - &fixture.dir, - "ostree --repo=dest/repo remote gpg-import --stdin myremote < src/gpghome/key1.asc", - )?; + cmd!( + sh, + "ostree --repo=dest/repo remote gpg-import --stdin myremote" + ) + .stdin(sh.read_file("src/gpghome/key1.asc")?) + .run()?; let srcoci_verified = OstreeImageReference { sigverify: SignatureSource::OstreeRemote("myremote".to_string()), imgref: srcoci_imgref.clone(), @@ -888,6 +896,7 @@ async fn test_container_import_export_v1() { async fn test_container_write_derive() -> Result<()> { let cancellable = gio::Cancellable::NONE; let fixture = Fixture::new_v1()?; + let sh = fixture.new_shell()?; let base_oci_path = &fixture.path.join("exampleos.oci"); let _digest = ostree_ext::container::encapsulate( fixture.srcrepo(), @@ -1054,17 +1063,26 @@ async fn test_container_write_derive() -> Result<()> { assert_eq!(images.len(), 1); // Verify we have the new file and *not* the old one - bash_in!( - &fixture.dir, - r#"set -x; - ostree --repo=dest/repo ls ${r} /usr/bin/newderivedfile2 >/dev/null - test "$(ostree --repo=dest/repo cat ${r} /usr/bin/newderivedfile)" = "newderivedfile v1" - if ostree --repo=dest/repo ls ${r} /usr/bin/newderivedfile3 2>/dev/null; then - echo oops; exit 1 - fi - "#, - r = import.merge_commit.as_str() - )?; + let merge_commit = import.merge_commit.as_str(); + cmd!( + sh, + "ostree --repo=dest/repo ls {merge_commit} /usr/bin/newderivedfile2" + ) + .ignore_stdout() + .run()?; + let c = cmd!( + sh, + "ostree --repo=dest/repo cat {merge_commit} /usr/bin/newderivedfile" + ) + .read()?; + assert_eq!(c.as_str(), "newderivedfile v1"); + assert!(cmd!( + sh, + "ostree --repo=dest/repo ls {merge_commit} /usr/bin/newderivedfile3" + ) + .ignore_stderr() + .run() + .is_err()); // And there should be no changes on upgrade again. let mut imp = @@ -1122,6 +1140,7 @@ async fn test_container_write_derive() -> Result<()> { #[tokio::test] async fn test_container_write_derive_sysroot_hardlink() -> Result<()> { let fixture = Fixture::new_v1()?; + let sh = fixture.new_shell()?; let baseimg = &fixture.export_container().await?.0; let basepath = &match baseimg.transport { Transport::OciDir => fixture.path.join(baseimg.name.as_str()), @@ -1193,14 +1212,19 @@ async fn test_container_write_derive_sysroot_hardlink() -> Result<()> { let import = imp.import(prep).await.unwrap(); // Verify we have the new file - bash_in!( - &fixture.dir, - r#"set -x; - ostree --repo=dest/repo ls ${r} /usr/bin/bash >/dev/null - test "$(ostree --repo=dest/repo cat ${r} /usr/bin/bash)" = "hello" - "#, - r = import.merge_commit.as_str() - )?; + let merge_commit = import.merge_commit.as_str(); + cmd!( + sh, + "ostree --repo=dest/repo ls {merge_commit} /usr/bin/bash" + ) + .ignore_stdout() + .run()?; + let r = cmd!( + sh, + "ostree --repo=dest/repo cat {merge_commit} /usr/bin/bash" + ) + .read()?; + assert_eq!(r.as_str(), "hello"); Ok(()) } From fc90859665601277096af95258a8e49496dd6541 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 8 May 2023 11:15:10 -0400 Subject: [PATCH 546/775] Bump to cap-std-ext 2.0, use rustix directly In cap-std-ext we made the mistake of making rustix a public API; the 2.0 version fixes that. Add rustix directly here and use it. --- lib/Cargo.toml | 3 ++- lib/src/commit.rs | 2 +- lib/src/container/mod.rs | 2 +- lib/src/container/store.rs | 2 +- lib/src/globals.rs | 1 - lib/src/ima.rs | 2 +- lib/src/integrationtest.rs | 2 +- lib/src/selinux.rs | 1 - 8 files changed, 7 insertions(+), 8 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 521df3a19..29906ae62 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -19,7 +19,7 @@ chrono = "0.4.19" olpc-cjson = "0.1.1" clap = { version= "3.2", features = ["derive"] } clap_mangen = { version = "0.1", optional = true } -cap-std-ext = "1.0" +cap-std-ext = "2.0" cap-tempfile = "1.0" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" @@ -35,6 +35,7 @@ openssl = "0.10.33" ostree = { features = ["v2022_5", "cap-std-apis"], version = "0.18.0" } pin-project = "1.0" regex = "1.5.4" +rustix = { version = "0.37.19", features = ["fs", "process"] } serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" tar = "0.4.38" diff --git a/lib/src/commit.rs b/lib/src/commit.rs index 5cb1cb3ec..b59646fbe 100644 --- a/lib/src/commit.rs +++ b/lib/src/commit.rs @@ -9,7 +9,7 @@ use camino::Utf8Path; use cap_std::fs::Dir; use cap_std_ext::cap_std; use cap_std_ext::dirext::CapStdExtDirExt; -use cap_std_ext::rustix::fs::MetadataExt; +use rustix::fs::MetadataExt; use std::borrow::Cow; use std::convert::TryInto; use std::path::Path; diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index f1a733a2b..4a9c21d7f 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -314,7 +314,7 @@ impl ManifestDiff { pub fn merge_default_container_proxy_opts( config: &mut containers_image_proxy::ImageProxyConfig, ) -> Result<()> { - let user = cap_std_ext::rustix::process::getuid() + let user = rustix::process::getuid() .is_root() .then_some(isolation::DEFAULT_UNPRIVILEGED_USER); merge_default_container_proxy_opts_with_isolation(config, user) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index c56bc9863..b8304a80a 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -799,7 +799,7 @@ impl ImageImporter { let repo = self.repo; let state = crate::tokio_util::spawn_blocking_cancellable_flatten( move |cancellable| -> Result> { - use cap_std_ext::rustix::fd::AsRawFd; + use rustix::fd::AsRawFd; let cancellable = Some(cancellable); let repo = &repo; diff --git a/lib/src/globals.rs b/lib/src/globals.rs index e45df1bd7..228e2a680 100644 --- a/lib/src/globals.rs +++ b/lib/src/globals.rs @@ -1,7 +1,6 @@ //! Global functions. use super::Result; -use cap_std_ext::rustix; use once_cell::sync::OnceCell; use ostree::glib; use std::fs::File; diff --git a/lib/src/ima.rs b/lib/src/ima.rs index f867e967d..2d286f998 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -5,7 +5,6 @@ use crate::objgv::*; use anyhow::{Context, Result}; use camino::Utf8PathBuf; -use cap_std_ext::rustix::fd::BorrowedFd; use fn_error_context::context; use gio::glib; use gio::prelude::*; @@ -14,6 +13,7 @@ use glib::Variant; use gvariant::aligned_bytes::TryAsAligned; use gvariant::{gv, Marker, Structure}; use ostree::gio; +use rustix::fd::BorrowedFd; use std::collections::{BTreeMap, HashMap}; use std::ffi::CString; use std::fs::File; diff --git a/lib/src/integrationtest.rs b/lib/src/integrationtest.rs index 8ce6bf445..38244f29f 100644 --- a/lib/src/integrationtest.rs +++ b/lib/src/integrationtest.rs @@ -110,7 +110,7 @@ fn test_proxy_auth() -> Result<()> { std::fs::write(authpath, "{}")?; let mut c = ImageProxyConfig::default(); merge(&mut c)?; - if cap_std_ext::rustix::process::getuid().is_root() { + if rustix::process::getuid().is_root() { assert!(c.auth_data.is_some()); } else { assert_eq!(c.authfile.unwrap().as_path(), authpath,); diff --git a/lib/src/selinux.rs b/lib/src/selinux.rs index 9467651eb..35acb7504 100644 --- a/lib/src/selinux.rs +++ b/lib/src/selinux.rs @@ -1,7 +1,6 @@ //! SELinux-related helper APIs. use anyhow::Result; -use cap_std_ext::rustix; use fn_error_context::context; use std::path::Path; From 7d2244cc61d56b57018b090a74ed31ac1f140b56 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 May 2023 08:05:34 -0400 Subject: [PATCH 547/775] Drop unused format-version API Since we're breaking semver we can just drop this. --- lib/src/cli.rs | 8 ++---- lib/src/tar/export.rs | 67 ++++++++++++------------------------------- 2 files changed, 21 insertions(+), 54 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index f10ebeac2..fdbeed526 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -56,8 +56,8 @@ pub(crate) struct ExportOpts { #[clap(long, value_parser)] repo: Utf8PathBuf, - /// The format version. Must be 0 or 1. - #[clap(long)] + /// The format version. Must be 1. + #[clap(long, hidden(true))] format_version: u32, /// The ostree ref or commit to export @@ -430,13 +430,9 @@ async fn tar_import(opts: &ImportOpts) -> Result<()> { /// Export a tar archive containing an ostree commit. fn tar_export(opts: &ExportOpts) -> Result<()> { - if !crate::tar::FORMAT_VERSIONS.contains(&opts.format_version) { - anyhow::bail!("Invalid format version: {}", opts.format_version); - } let repo = parse_repo(&opts.repo)?; #[allow(clippy::needless_update)] let subopts = crate::tar::ExportOptions { - format_version: opts.format_version, ..Default::default() }; crate::tar::export_commit(&repo, opts.rev.as_str(), std::io::stdout(), Some(subopts))?; diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index e35ab11f7..7a914b571 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -2,7 +2,7 @@ use crate::chunking; use crate::objgv::*; -use anyhow::{anyhow, bail, ensure, Context, Result}; +use anyhow::{anyhow, ensure, Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use fn_error_context::context; use gio::glib; @@ -14,14 +14,10 @@ use std::borrow::Borrow; use std::borrow::Cow; use std::collections::HashSet; use std::io::BufReader; -use std::ops::RangeInclusive; /// The repository mode generated by a tar export stream. pub const BARE_SPLIT_XATTRS_MODE: &str = "bare-split-xattrs"; -/// The set of allowed format versions. -pub const FORMAT_VERSIONS: RangeInclusive = 1..=1; - // This is both special in the tar stream *and* it's in the ostree commit. const SYSROOT: &str = "sysroot"; // This way the default ostree -> sysroot/ostree symlink works. @@ -71,6 +67,7 @@ struct OstreeTarWriter<'a, W: std::io::Write> { commit_checksum: &'a str, commit_object: glib::Variant, out: &'a mut tar::Builder, + #[allow(dead_code)] options: ExportOptions, wrote_initdirs: bool, /// True if we're only writing directories @@ -141,7 +138,6 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { out: &'a mut tar::Builder, options: ExportOptions, ) -> Result { - anyhow::ensure!(FORMAT_VERSIONS.contains(&options.format_version)); let commit_object = repo.load_commit(commit_checksum)?.0; let r = Self { repo, @@ -239,10 +235,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { // Repository configuration file. { - let path = match self.options.format_version { - 1 => format!("{}/repo/config", OSTREEDIR), - n => anyhow::bail!("Unsupported ostree tar format version {}", n), - }; + let path = format!("{}/repo/config", OSTREEDIR); self.append_default_data(Utf8Path::new(&path), REPO_CONFIG.as_bytes())?; } @@ -349,23 +342,18 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { hex::encode(digest) }; - if self.options.format_version == 1 { - let path = v1_xattrs_object_path(&xattrs_checksum); - - // Write xattrs content into a separate `.file-xattrs` object. - if !self.wrote_xattrs.contains(&xattrs_checksum) { - let inserted = self.wrote_xattrs.insert(xattrs_checksum); - debug_assert!(inserted); - self.append_default_data(&path, xattrs_data)?; - } - // Write a `.file-xattrs-link` which links the file object to - // the corresponding detached xattrs. - { - let link_obj_path = v1_xattrs_link_object_path(checksum); - self.append_default_hardlink(&link_obj_path, &path)?; - } - } else { - bail!("Unknown format version '{}'", self.options.format_version); + let path = v1_xattrs_object_path(&xattrs_checksum); + // Write xattrs content into a separate `.file-xattrs` object. + if !self.wrote_xattrs.contains(&xattrs_checksum) { + let inserted = self.wrote_xattrs.insert(xattrs_checksum); + debug_assert!(inserted); + self.append_default_data(&path, xattrs_data)?; + } + // Write a `.file-xattrs-link` which links the file object to + // the corresponding detached xattrs. + { + let link_obj_path = v1_xattrs_link_object_path(checksum); + self.append_default_hardlink(&link_obj_path, &path)?; } Ok(true) @@ -578,17 +566,8 @@ fn impl_export( } /// Configuration for tar export. -#[derive(Debug, PartialEq, Eq)] -pub struct ExportOptions { - /// Format version; must be in [`FORMAT_VERSIONS`]. - pub format_version: u32, -} - -impl Default for ExportOptions { - fn default() -> Self { - Self { format_version: 1 } - } -} +#[derive(Debug, PartialEq, Eq, Default)] +pub struct ExportOptions; /// Export an ostree commit to an (uncompressed) tar archive stream. #[context("Exporting commit")] @@ -638,10 +617,7 @@ pub(crate) fn export_chunk( ) -> Result<()> { // For chunking, we default to format version 1 #[allow(clippy::needless_update)] - let opts = ExportOptions { - format_version: 1, - ..Default::default() - }; + let opts = ExportOptions::default(); let writer = &mut OstreeTarWriter::new(repo, commit, out, opts)?; writer.write_repo_structure()?; write_chunk(writer, chunk) @@ -655,12 +631,7 @@ pub(crate) fn export_final_chunk( remainder: chunking::Chunk, out: &mut tar::Builder, ) -> Result<()> { - // For chunking, we default to format version 1 - #[allow(clippy::needless_update)] - let options = ExportOptions { - format_version: 1, - ..Default::default() - }; + let options = ExportOptions::default(); let writer = &mut OstreeTarWriter::new(repo, commit_checksum, out, options)?; // For the final chunk, output the commit object, plus all ostree metadata objects along with // the containing directories. From b0864b83367e325f17a4f97b358cd2ab7ff7e4b2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 May 2023 08:22:49 -0400 Subject: [PATCH 548/775] objectsource: Some docs additions Better describe `ContentID`. --- lib/src/objectsource.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/src/objectsource.rs b/lib/src/objectsource.rs index 96d87e501..3e1bccc7a 100644 --- a/lib/src/objectsource.rs +++ b/lib/src/objectsource.rs @@ -31,6 +31,8 @@ mod rcstr_serialize { } /// Identifier for content (e.g. package/layer). Not necessarily human readable. +/// For example in RPMs, this may be a full "NEVRA" i.e. name-epoch:version-release.architecture e.g. kernel-6.2-2.fc38.aarch64 +/// But that's not strictly required as this string should only live in memory and not be persisted. pub type ContentID = Rc; /// Metadata about a component/package. @@ -40,7 +42,8 @@ pub struct ObjectSourceMeta { #[serde(with = "rcstr_serialize")] pub identifier: ContentID, /// Identifier for this source (e.g. package name-version, git repo). - /// Unlike the [`ContentID`], this should be human readable. + /// Unlike the [`ContentID`], this should be human readable. It likely comes from an external source, + /// and may be re-serialized. #[serde(with = "rcstr_serialize")] pub name: Rc, /// Identifier for the *source* of this content; for example, if multiple binary From dc3ac8d310f22e105473aa1d26639f69b6c18f7d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 May 2023 08:45:42 -0400 Subject: [PATCH 549/775] container: Move ManifestDiff to use a constructor In Rust the norm is to use a `fn new` for these types of things. Just going over our public API and looking for improvements. --- lib/src/cli.rs | 3 ++- lib/src/container/mod.rs | 52 +++++++++++++++++++++------------------- 2 files changed, 29 insertions(+), 26 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index f10ebeac2..4d8fb30e3 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -935,7 +935,8 @@ where } => { let (manifest_old, _) = crate::container::fetch_manifest(&imgref_old).await?; let (manifest_new, _) = crate::container::fetch_manifest(&imgref_new).await?; - let manifest_diff = crate::container::manifest_diff(&manifest_old, &manifest_new); + let manifest_diff = + crate::container::ManifestDiff::new(&manifest_old, &manifest_new); manifest_diff.print(); Ok(()) } diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 4a9c21d7f..424d304c1 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -256,34 +256,36 @@ pub struct ManifestDiff { added: Vec, } -/// Computes the difference between two OCI compliant images -pub fn manifest_diff( - src: &oci_spec::image::ImageManifest, - dest: &oci_spec::image::ImageManifest, -) -> ManifestDiff { - let src_layers = src - .layers() - .iter() - .map(|l| (l.digest(), l)) - .collect::>(); - let dest_layers = dest - .layers() - .iter() - .map(|l| (l.digest(), l)) - .collect::>(); - let mut diff = ManifestDiff::default(); - for (blobid, &descriptor) in src_layers.iter() { - if !dest_layers.contains_key(blobid) { - diff.removed.push(descriptor.clone()); +impl ManifestDiff { + /// Compute the layer difference between two OCI image manifests. + pub fn new( + src: &oci_spec::image::ImageManifest, + dest: &oci_spec::image::ImageManifest, + ) -> Self { + let src_layers = src + .layers() + .iter() + .map(|l| (l.digest(), l)) + .collect::>(); + let dest_layers = dest + .layers() + .iter() + .map(|l| (l.digest(), l)) + .collect::>(); + let mut diff = ManifestDiff::default(); + for (blobid, &descriptor) in src_layers.iter() { + if !dest_layers.contains_key(blobid) { + diff.removed.push(descriptor.clone()); + } } - } - for (blobid, &descriptor) in dest_layers.iter() { - diff.all_layers_in_new.push(descriptor.clone()); - if !src_layers.contains_key(blobid) { - diff.added.push(descriptor.clone()); + for (blobid, &descriptor) in dest_layers.iter() { + diff.all_layers_in_new.push(descriptor.clone()); + if !src_layers.contains_key(blobid) { + diff.added.push(descriptor.clone()); + } } + diff } - diff } impl ManifestDiff { From 0a71e7c1a9ff3d9d08da44b8d7fe562030298300 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 May 2023 08:47:49 -0400 Subject: [PATCH 550/775] container: Document manifest diff fields On general principle. --- lib/src/container/mod.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 424d304c1..249490b13 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -251,8 +251,11 @@ impl std::fmt::Display for OstreeImageReference { /// Represent the difference in content between two OCI compliant Images #[derive(Debug, Default)] pub struct ManifestDiff { + /// All layers present in the new image. all_layers_in_new: Vec, + /// Layers which are present in the old image but not the new image. removed: Vec, + /// Layers which are present in the new image but not the old image. added: Vec, } From a903b6fcbd6df402f4c57564d3a1c0f9b234b6df Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 May 2023 08:35:39 -0400 Subject: [PATCH 551/775] Drop container ExportLayout from public interface Since we're making a semver-incompatible API change, and we don't support V0 anymore, drop the public API touchpoints here. --- lib/src/container/encapsulate.rs | 66 +++++++---------- lib/src/container/store.rs | 94 ++++++++++-------------- lib/src/container/update_detachedmeta.rs | 18 ++--- 3 files changed, 73 insertions(+), 105 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 824cc0270..78802b6ae 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -26,7 +26,7 @@ pub const LEGACY_VERSION_LABEL: &str = "version"; /// Type of container image generated #[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum ExportLayout { +pub(crate) enum ExportLayout { /// Actually the second layout now, but the true first one can be parsed as either V0, /// The hopefully final (optionally chunked) container image layout @@ -134,42 +134,34 @@ fn export_chunked( let layers = export_chunks(repo, commit, ociw, chunking.take_chunks(), opts)?; let compression = Some(opts.compression()); - match opts.format { - ExportLayout::V0 => { - let label = opts.format.label(); - anyhow::bail!("This legacy format using the {label} label is no longer supported"); - } - ExportLayout::V1 => { - // In V1, the ostree layer comes first - let mut w = ociw.create_layer(compression)?; - ostree_tar::export_final_chunk(repo, commit, chunking.remainder, &mut w)?; - let w = w.into_inner()?; - let ostree_layer = w.complete()?; - - // Then, we have a label that points to the last chunk. - // Note in the pathological case of a single layer chunked v1 image, this could be the ostree layer. - let last_digest = layers - .last() - .map(|v| &v.0) - .unwrap_or(&ostree_layer) - .uncompressed_sha256 - .clone(); - - // Add the ostree layer - ociw.push_layer(manifest, imgcfg, ostree_layer, description); - // Add the component/content layers - for (layer, name) in layers { - ociw.push_layer(manifest, imgcfg, layer, name.as_str()); - } - // This label (mentioned above) points to the last layer that is part of - // the ostree commit. - labels.insert( - opts.format.label().into(), - format!("sha256:{}", last_digest), - ); - Ok(()) - } + // In V1, the ostree layer comes first + let mut w = ociw.create_layer(compression)?; + ostree_tar::export_final_chunk(repo, commit, chunking.remainder, &mut w)?; + let w = w.into_inner()?; + let ostree_layer = w.complete()?; + + // Then, we have a label that points to the last chunk. + // Note in the pathological case of a single layer chunked v1 image, this could be the ostree layer. + let last_digest = layers + .last() + .map(|v| &v.0) + .unwrap_or(&ostree_layer) + .uncompressed_sha256 + .clone(); + + // Add the ostree layer + ociw.push_layer(manifest, imgcfg, ostree_layer, description); + // Add the component/content layers + for (layer, name) in layers { + ociw.push_layer(manifest, imgcfg, layer, name.as_str()); } + // This label (mentioned above) points to the last layer that is part of + // the ostree commit. + labels.insert( + ExportLayout::V1.label().into(), + format!("sha256:{}", last_digest), + ); + Ok(()) } /// Generate an OCI image from a given ostree root @@ -361,8 +353,6 @@ pub struct ExportOpts { pub copy_meta_opt_keys: Vec, /// Maximum number of layers to use pub max_layers: Option, - /// The container image layout - pub format: ExportLayout, // TODO semver-break: remove this /// Use only the standard OCI version label pub no_legacy_version_label: bool, diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index b8304a80a..f148c78c7 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -184,8 +184,6 @@ impl ManifestLayerState { /// Information about which layers need to be downloaded. #[derive(Debug)] pub struct PreparedImport { - /// The format we found from metadata - pub export_layout: ExportLayout, /// The manifest digest that was found pub manifest_digest: String, /// The deserialized manifest. @@ -220,10 +218,7 @@ impl PreparedImport { /// If this image is using any deprecated features, return a message saying so. pub fn deprecated_warning(&self) -> Option<&'static str> { - match self.export_layout { - ExportLayout::V0 => Some("Image is using v0 export layout, this is deprecated and support will be dropped in the future"), - ExportLayout::V1 => None, - } + None } /// Iterate over all layers paired with their history entry. @@ -347,12 +342,7 @@ fn layer_from_diffid<'a>( pub(crate) fn parse_manifest_layout<'a>( manifest: &'a ImageManifest, config: &ImageConfiguration, -) -> Result<( - ExportLayout, - &'a Descriptor, - Vec<&'a Descriptor>, - Vec<&'a Descriptor>, -)> { +) -> Result<(&'a Descriptor, Vec<&'a Descriptor>, Vec<&'a Descriptor>)> { let config_labels = super::labels_of(config); let bootable_key = *ostree::METADATA_KEY_BOOTABLE; let bootable = config_labels.map_or(false, |l| l.contains_key(bootable_key)); @@ -375,52 +365,49 @@ pub(crate) fn parse_manifest_layout<'a>( }) }); - // Look for the format v1 label - if let Some((layout, target_diffid)) = info { - let target_layer = layer_from_diffid(layout, manifest, config, target_diffid.as_str())?; - let mut chunk_layers = Vec::new(); - let mut derived_layers = Vec::new(); - let mut after_target = false; - // Gather the ostree layer - let ostree_layer = match layout { - ExportLayout::V0 => target_layer, - ExportLayout::V1 => first_layer, - }; - // Now, we need to handle the split differently in chunked v1 vs v0 - match layout { - ExportLayout::V0 => { - let label = layout.label(); - anyhow::bail!("This legacy format using the {label} label is no longer supported"); - } - ExportLayout::V1 => { - for layer in manifest.layers() { - if layer == target_layer { - if after_target { - anyhow::bail!("Multiple entries for {}", layer.digest()); - } - after_target = true; - if layer != ostree_layer { - chunk_layers.push(layer); - } - } else if !after_target { - if layer != ostree_layer { - chunk_layers.push(layer); - } - } else { - derived_layers.push(layer); + let (layout, target_diffid) = info.ok_or_else(|| { + anyhow!( + "No {} label found, not an ostree-bootable container", + ExportLayout::V1.label() + ) + })?; + let target_layer = layer_from_diffid(layout, manifest, config, target_diffid.as_str())?; + let mut chunk_layers = Vec::new(); + let mut derived_layers = Vec::new(); + let mut after_target = false; + // Gather the ostree layer + let ostree_layer = match layout { + ExportLayout::V0 => target_layer, + ExportLayout::V1 => first_layer, + }; + // Now, we need to handle the split differently in chunked v1 vs v0 + match layout { + ExportLayout::V0 => { + let label = layout.label(); + anyhow::bail!("This legacy format using the {label} label is no longer supported"); + } + ExportLayout::V1 => { + for layer in manifest.layers() { + if layer == target_layer { + if after_target { + anyhow::bail!("Multiple entries for {}", layer.digest()); + } + after_target = true; + if layer != ostree_layer { + chunk_layers.push(layer); + } + } else if !after_target { + if layer != ostree_layer { + chunk_layers.push(layer); } + } else { + derived_layers.push(layer); } } } - - let r = (layout, ostree_layer, chunk_layers, derived_layers); - return Ok(r); } - // For backwards compatibility, if there's only 1 layer, don't require labels. - // This can be dropped when we drop format version 0 support. - let rest = manifest.layers().iter().skip(1).collect(); - Ok((ExportLayout::V0, first_layer, Vec::new(), rest)) + Ok((ostree_layer, chunk_layers, derived_layers)) } impl ImageImporter { @@ -538,7 +525,7 @@ impl ImageImporter { let config = self.proxy.fetch_config(&self.proxy_img).await?; - let (export_layout, commit_layer, component_layers, remaining_layers) = + let (commit_layer, component_layers, remaining_layers) = parse_manifest_layout(&manifest, &config)?; let query = |l: &Descriptor| query_layer(&self.repo, l.clone()); @@ -553,7 +540,6 @@ impl ImageImporter { .collect::>>()?; let imp = PreparedImport { - export_layout, manifest, manifest_digest, config, diff --git a/lib/src/container/update_detachedmeta.rs b/lib/src/container/update_detachedmeta.rs index 6d66ea624..0e7eba80b 100644 --- a/lib/src/container/update_detachedmeta.rs +++ b/lib/src/container/update_detachedmeta.rs @@ -62,8 +62,7 @@ pub async fn update_detached_metadata( .ok_or_else(|| anyhow!("Image is missing container configuration"))?; // Find the OSTree commit layer we want to replace - let (export_layout, commit_layer, _, _) = - container_store::parse_manifest_layout(&manifest, &config)?; + let (commit_layer, _, _) = container_store::parse_manifest_layout(&manifest, &config)?; let commit_layer_idx = manifest .layers() .iter() @@ -104,17 +103,10 @@ pub async fn update_detached_metadata( config.rootfs_mut().diff_ids_mut()[commit_layer_idx] = out_layer_diffid.clone(); let labels = ctrcfg.labels_mut().get_or_insert_with(Default::default); - match export_layout { - ExportLayout::V0 => { - labels.insert(export_layout.label().into(), out_layer_diffid); - } - ExportLayout::V1 => { - // Nothing to do except in the special case where there's somehow only one - // chunked layer. - if manifest.layers().len() == 1 { - labels.insert(export_layout.label().into(), out_layer_diffid); - } - } + // Nothing to do except in the special case where there's somehow only one + // chunked layer. + if manifest.layers().len() == 1 { + labels.insert(ExportLayout::V1.label().into(), out_layer_diffid); } config.set_config(Some(ctrcfg)); From 06fdbb312b1d7a984c9ca11d8b818ae1b2e75ee3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 May 2023 15:01:11 -0400 Subject: [PATCH 552/775] tests: One run of `cargo clippy --fix` Nothing important, just some unnecessary `&` churn. --- lib/tests/it/main.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index add8abfec..799a25f39 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -434,7 +434,7 @@ async fn test_tar_write_tar_layer() -> Result<()> { fn skopeo_inspect(imgref: &str) -> Result { let out = Command::new("skopeo") - .args(&["inspect", imgref]) + .args(["inspect", imgref]) .stdout(std::process::Stdio::piped()) .output()?; Ok(String::from_utf8(out.stdout)?) @@ -442,7 +442,7 @@ fn skopeo_inspect(imgref: &str) -> Result { fn skopeo_inspect_config(imgref: &str) -> Result { let out = Command::new("skopeo") - .args(&["inspect", "--config", imgref]) + .args(["inspect", "--config", imgref]) .stdout(std::process::Stdio::piped()) .output()?; Ok(serde_json::from_slice(&out.stdout)?) @@ -685,7 +685,7 @@ async fn test_container_chunked() -> Result<()> { assert_eq!(prep.version(), Some("42.0")); let digest = prep.manifest_digest.clone(); assert!(prep.ostree_commit_layer.commit.is_none()); - assert_eq!(prep.ostree_layers.len(), nlayers as usize); + assert_eq!(prep.ostree_layers.len(), nlayers); assert_eq!(prep.layers.len(), 0); for layer in prep.layers.iter() { assert!(layer.commit.is_none()); @@ -721,7 +721,7 @@ r usr/bin/bash bash-v0 assert_eq!(to_fetch.len(), 2); assert_eq!(expected_digest, prep.manifest_digest.as_str()); assert!(prep.ostree_commit_layer.commit.is_none()); - assert_eq!(prep.ostree_layers.len(), nlayers as usize); + assert_eq!(prep.ostree_layers.len(), nlayers); let (first, second) = (to_fetch[0], to_fetch[1]); assert!(first.0.commit.is_none()); assert!(second.0.commit.is_none()); @@ -774,7 +774,7 @@ r usr/bin/bash bash-v0 let to_fetch = prep.layers_to_fetch().collect::>>()?; assert_eq!(to_fetch.len(), 1); assert!(prep.ostree_commit_layer.commit.is_some()); - assert_eq!(prep.ostree_layers.len(), nlayers as usize); + assert_eq!(prep.ostree_layers.len(), nlayers); // We want to test explicit layer pruning imp.disable_gc(); @@ -875,8 +875,8 @@ async fn oci_clone(src: impl AsRef, dest: impl AsRef) -> Res // For now we just fork off `cp` and rely on reflinks, but we could and should // explicitly hardlink blobs/sha256 e.g. let cmd = tokio::process::Command::new("cp") - .args(&["-a", "--reflink=auto"]) - .args(&[src, dest]) + .args(["-a", "--reflink=auto"]) + .args([src, dest]) .status() .await?; if !cmd.success() { @@ -920,7 +920,7 @@ async fn test_container_write_derive() -> Result<()> { let derived_path = &fixture.path.join("derived.oci"); oci_clone(base_oci_path, derived_path).await?; let temproot = &fixture.path.join("temproot"); - std::fs::create_dir_all(&temproot.join("usr/bin"))?; + std::fs::create_dir_all(temproot.join("usr/bin"))?; let newderivedfile_contents = "newderivedfile v0"; std::fs::write( temproot.join("usr/bin/newderivedfile"), @@ -944,7 +944,7 @@ async fn test_container_write_derive() -> Result<()> { let derived2_path = &fixture.path.join("derived2.oci"); oci_clone(base_oci_path, derived2_path).await?; std::fs::remove_dir_all(temproot)?; - std::fs::create_dir_all(&temproot.join("usr/bin"))?; + std::fs::create_dir_all(temproot.join("usr/bin"))?; std::fs::write(temproot.join("usr/bin/newderivedfile"), "newderivedfile v1")?; std::fs::write( temproot.join("usr/bin/newderivedfile2"), @@ -1204,7 +1204,7 @@ async fn test_container_write_derive_sysroot_hardlink() -> Result<()> { }, }; let mut imp = - store::ImageImporter::new(fixture.destrepo(), &derived_ref, Default::default()).await?; + store::ImageImporter::new(fixture.destrepo(), derived_ref, Default::default()).await?; let prep = match imp.prepare().await.context("Init prep derived")? { store::PrepareResult::AlreadyPresent(_) => panic!("should not be already imported"), store::PrepareResult::Ready(r) => r, @@ -1247,7 +1247,7 @@ async fn test_old_code_parses_new_export() -> Result<()> { fixture.clear_destrepo()?; let destrepo_path = fixture.path.join("dest/repo"); let s = Command::new("ostree") - .args(&[ + .args([ "container", "unencapsulate", "--repo", From 74c56efe0072d359437bdacec87e77489a95ea6f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 May 2023 15:03:21 -0400 Subject: [PATCH 553/775] lib: Ignore incorrect clippy lint about deref+`Lazy` `cargo clippy --fix` fails to compile if we remove the `*` here. --- lib/src/container/encapsulate.rs | 1 + lib/src/fixture.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 824cc0270..31d094069 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -85,6 +85,7 @@ fn commit_meta_to_labels<'a>( } // Copy standard metadata keys `ostree.bootable` and `ostree.linux`. // Bootable is an odd one out in being a boolean. + #[allow(clippy::explicit_auto_deref)] if let Some(v) = meta.lookup::(*ostree::METADATA_KEY_BOOTABLE)? { labels.insert(ostree::METADATA_KEY_BOOTABLE.to_string(), v.to_string()); } diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 3b88a2a56..c26cbd16e 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -496,6 +496,7 @@ impl Fixture { ); metadata.insert("ostree.container-cmd", &vec!["/usr/bin/bash"]); metadata.insert("version", &"42.0"); + #[allow(clippy::explicit_auto_deref)] metadata.insert(*ostree::METADATA_KEY_BOOTABLE, &true); let metadata = metadata.to_variant(); let commit = self.srcrepo.write_commit_with_time( From 700a29f160f4e7b1e30cd43b660c66a0c052a480 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 10 May 2023 15:06:33 -0400 Subject: [PATCH 554/775] tree-wide: Run `cargo clippy --fix` Nothing important here, just the usual spam of unnecessary `&` basically. --- lib/src/chunking.rs | 2 +- lib/src/cli.rs | 2 +- lib/src/commit.rs | 8 ++++---- lib/src/container/mod.rs | 8 +++++--- lib/src/container/ocidir.rs | 2 +- lib/src/diff.rs | 2 +- lib/src/fixture.rs | 2 +- lib/src/ima.rs | 4 ++-- lib/src/isolation.rs | 2 +- lib/src/objectsource.rs | 2 +- lib/src/tar/export.rs | 12 ++++++------ lib/src/tar/import.rs | 6 +++--- lib/src/tar/write.rs | 4 ++-- 13 files changed, 29 insertions(+), 27 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 605b7d981..873fbb7a2 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -311,7 +311,7 @@ impl Chunking { } n => Cow::Owned(format!("{n} components")), }; - let mut chunk = Chunk::new(&*name); + let mut chunk = Chunk::new(&name); for szmeta in bin { for &obj in rmap.get(&szmeta.meta.identifier).unwrap() { self.remainder.move_obj(&mut chunk, obj.as_str()); diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 34faceb6c..ba3c6f19e 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -659,7 +659,7 @@ async fn container_history(repo: &ostree::Repo, imgref: &ImageReference) -> Resu { let mut remaining = width; for (name, width) in columns.iter() { - print_column(name, *width as usize, &mut remaining); + print_column(name, *width, &mut remaining); } println!(); } diff --git a/lib/src/commit.rs b/lib/src/commit.rs index b59646fbe..de4fd2bfe 100644 --- a/lib/src/commit.rs +++ b/lib/src/commit.rs @@ -91,7 +91,7 @@ fn remove_all_on_mount_recurse(root: &Dir, rootdev: u64, path: &Path) -> Result< } } if !skipped { - root.remove_dir(&path)?; + root.remove_dir(path)?; } Ok(skipped) } @@ -207,8 +207,8 @@ mod tests { assert!(td.try_exists(var)?); assert!(!td.try_exists(&systemd)?); - td.remove_dir_all(&var)?; - td.create_dir(&var)?; + td.remove_dir_all(var)?; + td.create_dir(var)?; td.write(var.join("foo"), "somefile")?; assert!(prepare_ostree_commit_in(td).is_err()); // Right now we don't auto-create var/tmp if it didn't exist, but maybe @@ -221,7 +221,7 @@ mod tests { assert!(td.try_exists(var)?); let nested = Utf8Path::new("var/lib/nested"); - td.create_dir_all(&nested)?; + td.create_dir_all(nested)?; td.write(nested.join("foo"), "test1")?; td.write(nested.join("foo2"), "test2")?; assert!(prepare_ostree_commit_in(td).is_err()); diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 249490b13..e2bb7970d 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -353,7 +353,7 @@ pub fn merge_default_container_proxy_opts_with_isolation( // Read the default authfile if it exists and pass it via file descriptor // which will ensure it's readable when we drop privileges. if let Some(authfile) = config.authfile.take() { - config.auth_data = Some(std::fs::File::open(&authfile)?); + config.auth_data = Some(std::fs::File::open(authfile)?); } let cmd = crate::isolation::unprivileged_subprocess("skopeo", user); config.skopeo_cmd = Some(cmd); @@ -510,8 +510,10 @@ mod tests { assert!(c.skopeo_cmd.is_none()); // Verify interaction with explicit isolation - let mut c = ImageProxyConfig::default(); - c.skopeo_cmd = Some(Command::new("skopeo")); + let mut c = ImageProxyConfig { + skopeo_cmd: Some(Command::new("skopeo")), + ..Default::default() + }; super::merge_default_container_proxy_opts_with_isolation(&mut c, Some("foo")).unwrap(); assert_eq!(c.skopeo_cmd.unwrap().get_program(), "skopeo"); } diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index 831069024..424ede357 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -256,7 +256,7 @@ impl OciDir { pub fn read_blob(&self, desc: &oci_spec::image::Descriptor) -> Result { let path = Self::parse_descriptor_to_path(desc)?; self.dir - .open(&path) + .open(path) .map_err(Into::into) .map(|f| f.into_std()) } diff --git a/lib/src/diff.rs b/lib/src/diff.rs index 620973eff..a66c17a53 100644 --- a/lib/src/diff.rs +++ b/lib/src/diff.rs @@ -90,7 +90,7 @@ fn diff_recurse( let name = from_info.name(); let name = name.to_str().expect("UTF-8 ostree name"); let path = format!("{prefix}{name}"); - let to_child = to.child(&name); + let to_child = to.child(name); let to_info = query_info_optional(&to_child, queryattrs, queryflags) .context("querying optional to")?; let is_dir = matches!(from_info.file_type(), gio::FileType::Directory); diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index c26cbd16e..5218bc277 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -386,7 +386,7 @@ impl Fixture { let st = std::process::Command::new("tar") .cwd_dir(gpghome) .stdin(Stdio::from(gpgtar)) - .args(&["-azxf", "-"]) + .args(["-azxf", "-"]) .status()?; assert!(st.success()); diff --git a/lib/src/ima.rs b/lib/src/ima.rs index 2d286f998..ca6d8ccdb 100644 --- a/lib/src/ima.rs +++ b/lib/src/ima.rs @@ -132,8 +132,8 @@ impl<'a> CommitRewriter<'a> { proc.current_dir(self.tempdir.path()) .stdout(Stdio::null()) .stderr(Stdio::piped()) - .args(&["ima_sign", "--xattr-user", "--key", self.ima.key.as_str()]) - .args(&["--hashalgo", self.ima.algorithm.as_str()]) + .args(["ima_sign", "--xattr-user", "--key", self.ima.key.as_str()]) + .args(["--hashalgo", self.ima.algorithm.as_str()]) .arg(tempf.path().file_name().unwrap()); let status = proc.output().context("Spawning evmctl")?; if !status.status.success() { diff --git a/lib/src/isolation.rs b/lib/src/isolation.rs index 495af4078..48c6bca73 100644 --- a/lib/src/isolation.rs +++ b/lib/src/isolation.rs @@ -28,7 +28,7 @@ pub(crate) fn unprivileged_subprocess(binary: &str, user: &str) -> Command { return Command::new(binary); } let mut cmd = Command::new("setpriv"); - cmd.args(&[ + cmd.args([ "--no-new-privs", "--init-groups", "--reuid", diff --git a/lib/src/objectsource.rs b/lib/src/objectsource.rs index 3e1bccc7a..d8258c164 100644 --- a/lib/src/objectsource.rs +++ b/lib/src/objectsource.rs @@ -70,7 +70,7 @@ impl Hash for ObjectSourceMeta { impl Borrow for ObjectSourceMeta { fn borrow(&self) -> &str { - &*self.identifier + &self.identifier } } diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 7a914b571..839169455 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -171,7 +171,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { h.set_gid(0); h.set_mode(0o755); h.set_size(0); - self.out.append_data(&mut h, &path, &mut std::io::empty())?; + self.out.append_data(&mut h, path, &mut std::io::empty())?; Ok(()) } @@ -188,7 +188,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { h.set_gid(0); h.set_mode(0o644); h.set_size(0); - self.out.append_link(&mut h, &path, &link_target)?; + self.out.append_link(&mut h, path, link_target)?; Ok(()) } @@ -488,7 +488,7 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let (objpath, h) = self.append_content(checksum)?; let subpath = &dirpath.join(name); let subpath = map_path(subpath); - self.append_content_hardlink(&objpath, h, &*subpath)?; + self.append_content_hardlink(&objpath, h, &subpath)?; } } @@ -517,8 +517,8 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let dirtree_csum = hex::encode(contents_csum); let subpath = &dirpath.join(name); let subpath = map_path(subpath); - self.append_dir(&*subpath, &metadata)?; - self.append_dirtree(&*subpath, dirtree_csum, false, cancellable)?; + self.append_dir(&subpath, &metadata)?; + self.append_dirtree(&subpath, dirtree_csum, false, cancellable)?; } Ok(()) @@ -671,7 +671,7 @@ pub(crate) fn reinject_detached_metadata>( } let commit_ent = commit_ent.ok_or_else(|| anyhow!("Missing commit object"))?; let commit_path = commit_ent.path()?; - let commit_path = Utf8Path::from_path(&*commit_path) + let commit_path = Utf8Path::from_path(&commit_path) .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", commit_path))?; let (checksum, objtype) = crate::tar::import::Importer::parse_metadata_entry(commit_path)?; assert_eq!(objtype, ostree::ObjectType::Commit); // Should have been verified above diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 105dae498..38ce2823e 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -198,7 +198,7 @@ impl Importer { return Ok(None); } let orig_path = e.path()?; - let path = Utf8Path::from_path(&*orig_path) + let path = Utf8Path::from_path(&orig_path) .ok_or_else(|| anyhow!("Invalid non-utf8 path {:?}", orig_path))?; // Ignore the regular non-object file hardlinks we inject if let Ok(path) = path.strip_prefix(REPO_PREFIX) { @@ -475,7 +475,7 @@ impl Importer { let link_target = entry .link_name()? .ok_or_else(|| anyhow!("No xattrs link content for {}", checksum))?; - let xattr_target = Utf8Path::from_path(&*link_target) + let xattr_target = Utf8Path::from_path(&link_target) .ok_or_else(|| anyhow!("Invalid non-UTF8 xattrs link {}", checksum))?; parse_xattrs_link_target(xattr_target)? } @@ -515,7 +515,7 @@ impl Importer { let xattr_target = entry .link_name()? .ok_or_else(|| anyhow!("No xattrs link content for {}", target))?; - let xattr_target = Utf8Path::from_path(&*xattr_target) + let xattr_target = Utf8Path::from_path(&xattr_target) .ok_or_else(|| anyhow!("Invalid non-UTF8 xattrs link {}", target))?; let xattr_target = xattr_target .file_name() diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index bce64e4ba..29e0d82c7 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -299,7 +299,7 @@ pub async fn write_tar( .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) - .args(&["commit"]); + .args(["commit"]); c.take_fd_n(repofd.clone(), 3); c.arg("--repo=/proc/self/fd/3"); if let Some(sepolicy) = sepolicy.as_ref() { @@ -310,7 +310,7 @@ pub async fn write_tar( "--add-metadata-string=ostree.importer.version={}", env!("CARGO_PKG_VERSION") )); - c.args(&[ + c.args([ "--no-bindings", "--tar-autocreate-parents", "--tree=tar=/proc/self/fd/0", From f450812b9f9b5c00e92544c425c986714f43641c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 9 Jan 2023 08:53:11 -0500 Subject: [PATCH 555/775] container/store: Write final commit with timestamp I was doing some bootc tests and noticed that the deployment commit was different each time I ran an install. Write the commit with the timestamp of the config (or manifest) to increase reproducibility. --- .github/workflows/rust.yml | 2 +- ci/priv-integration.sh | 30 ++++++++++++++++++++++++++++++ lib/src/container/store.rs | 36 +++++++++++++++++++++++++++++++++++- lib/src/lib.rs | 2 ++ lib/src/utils.rs | 34 ++++++++++++++++++++++++++++++++++ 5 files changed, 102 insertions(+), 2 deletions(-) create mode 100644 lib/src/utils.rs diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 4d34d7312..747111410 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -151,7 +151,7 @@ jobs: runs-on: ubuntu-latest container: image: quay.io/fedora/fedora-coreos:testing-devel - options: "--privileged --pid=host -v /run/systemd:/run/systemd -v /:/run/host" + options: "--privileged --pid=host -v /var/tmp:/var/tmp -v /run/dbus:/run/dbus -v /run/systemd:/run/systemd -v /:/run/host" steps: - name: Checkout repository uses: actions/checkout@v3 diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index 26cf495a3..b78a7e9f1 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -14,6 +14,8 @@ old_image=quay.io/cgwalters/fcos:unchunked imgref=ostree-unverified-registry:${image} stateroot=testos +cd $(mktemp -d -p /var/tmp) + set -x if test '!' -e "${sysroot}/ostree"; then @@ -77,4 +79,32 @@ ostree-ext-cli container compare ${imgref} ${imgref} > compare.txt grep "Removed layers: 0 Size: 0 bytes" compare.txt grep "Added layers: 0 Size: 0 bytes" compare.txt +mkdir build +cd build +cat >Dockerfile << EOF +FROM ${image} +RUN touch /usr/share/somefile +EOF +systemd-run -dP --wait podman build -t localhost/fcos-derived . +derived_img=oci:/var/tmp/derived.oci +systemd-run -dP --wait skopeo copy containers-storage:localhost/fcos-derived "${derived_img}" + +# Prune to reset state +ostree refs ostree/container/image --delete + +repo="${sysroot}/ostree/repo" +images=$(ostree container image list --repo "${repo}" | wc -l) +test "${images}" -eq 1 +ostree-ext-cli container image deploy --sysroot "${sysroot}" \ + --stateroot "${stateroot}" --imgref ostree-unverified-image:"${derived_img}" +imgref=$(ostree refs --repo=${repo} ostree/container/image | head -1) +img_commit=$(ostree --repo=${repo} rev-parse ostree/container/image/${imgref}) +ostree-ext-cli container image remove --repo "${repo}" "${derived_img}" + +ostree-ext-cli container image deploy --sysroot "${sysroot}" \ + --stateroot "${stateroot}" --imgref ostree-unverified-image:"${derived_img}" +img_commit2=$(ostree --repo=${repo} rev-parse ostree/container/image/${imgref}) +test "${img_commit}" = "${img_commit2}" +echo "ok deploy derived container identical revs" + echo ok privileged integration diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index b8304a80a..f8d15c704 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -8,6 +8,7 @@ use super::*; use crate::logging::system_repo_journal_print; use crate::refescape; +use crate::utils::ResultExt; use anyhow::{anyhow, Context}; use containers_image_proxy::{ImageProxy, OpenedImage}; use fn_error_context::context; @@ -423,6 +424,29 @@ pub(crate) fn parse_manifest_layout<'a>( Ok((ExportLayout::V0, first_layer, Vec::new(), rest)) } +/// Find the timestamp of the manifest (or config), ignoring errors. +fn timestamp_of_manifest_or_config( + manifest: &ImageManifest, + config: &ImageConfiguration, +) -> Option { + // The manifest timestamp seems to not be widely used, but let's + // try it in preference to the config one. + let timestamp = manifest + .annotations() + .as_ref() + .and_then(|a| a.get(oci_image::ANNOTATION_CREATED)) + .or_else(|| config.created().as_ref()); + // Try to parse the timestamp + timestamp + .map(|t| { + chrono::DateTime::parse_from_rfc3339(t) + .context("Failed to parse manifest timestamp") + .map(|t| t.timestamp() as u64) + }) + .transpose() + .log_err_default() +} + impl ImageImporter { /// Create a new importer. #[context("Creating importer")] @@ -795,6 +819,8 @@ impl ImageImporter { metadata.insert(META_FILTERED, filtered); let metadata = metadata.to_variant(); + let timestamp = timestamp_of_manifest_or_config(&import.manifest, &import.config) + .unwrap_or_else(|| chrono::offset::Utc::now().timestamp() as u64); // Destructure to transfer ownership to thread let repo = self.repo; let state = crate::tokio_util::spawn_blocking_cancellable_flatten( @@ -866,7 +892,15 @@ impl ImageImporter { .context("Writing mtree")?; let merged_root = merged_root.downcast::().unwrap(); let merged_commit = repo - .write_commit(None, None, None, Some(&metadata), &merged_root, cancellable) + .write_commit_with_time( + None, + None, + None, + Some(&metadata), + &merged_root, + timestamp as u64, + cancellable, + ) .context("Writing commit")?; if !self.no_imgref { repo.transaction_set_ref(None, &ostree_ref, Some(merged_commit.as_str())); diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 3ceaba60e..54730c773 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -52,6 +52,8 @@ pub(crate) mod objgv; #[cfg(feature = "internal-testing-api")] pub mod ostree_manual; +mod utils; + #[cfg(feature = "docgen")] mod docgen; diff --git a/lib/src/utils.rs b/lib/src/utils.rs new file mode 100644 index 000000000..c7821df26 --- /dev/null +++ b/lib/src/utils.rs @@ -0,0 +1,34 @@ +pub(crate) trait ResultExt { + /// Return the Ok value unchanged. In the err case, log it, and call the closure to compute the default + fn log_err_or_else(self, default: F) -> T + where + F: FnOnce() -> T; + /// Return the Ok value unchanged. In the err case, log it, and return the default value + fn log_err_default(self) -> T + where + T: Default; +} + +impl ResultExt for Result { + #[track_caller] + fn log_err_or_else(self, default: F) -> T + where + F: FnOnce() -> T, + { + match self { + Ok(r) => r, + Err(e) => { + tracing::debug!("{e}"); + default() + } + } + } + + #[track_caller] + fn log_err_default(self) -> T + where + T: Default, + { + self.log_err_or_else(|| Default::default()) + } +} From 9e933f4f2e9c6d13910922a79a6f8b0cadb34d1d Mon Sep 17 00:00:00 2001 From: RishabhSaini Date: Sat, 13 May 2023 17:22:11 -0400 Subject: [PATCH 556/775] store: Change order of layers to ostree_commit, packaged layers main: Identify correctly the updated layers --- lib/src/container/store.rs | 7 +++---- lib/tests/it/main.rs | 5 ++++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 545ddbc6c..58cee1750 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -204,11 +204,10 @@ pub struct PreparedImport { } impl PreparedImport { - /// Iterate over all layers; the ostree split object layers, the commit layer, and any non-ostree layers. + /// Iterate over all layers; the commit layer, the ostree split object layers, and any non-ostree layers. pub fn all_layers(&self) -> impl Iterator { - self.ostree_layers - .iter() - .chain(std::iter::once(&self.ostree_commit_layer)) + std::iter::once(&self.ostree_commit_layer) + .chain(self.ostree_layers.iter()) .chain(self.layers.iter()) } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 799a25f39..63eda8724 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -725,7 +725,10 @@ r usr/bin/bash bash-v0 let (first, second) = (to_fetch[0], to_fetch[1]); assert!(first.0.commit.is_none()); assert!(second.0.commit.is_none()); - assert_eq!(first.1, "testlink"); + assert_eq!( + first.1, + "ostree export of commit 38ab1f9da373a0184b0b48db6e280076ab4b5d4691773475ae24825aae2272d4" + ); assert_eq!(second.1, "bash"); assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); From 5fe187c31c429b42006cb7256bc73909df91d6fd Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 15 May 2023 09:37:35 -0400 Subject: [PATCH 557/775] tests: Also verify manifest digest Just a drive by cleanup; unused variables are usually a red flag. Let's just add an assertion here. --- lib/tests/it/main.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 63eda8724..2627678df 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -691,7 +691,8 @@ async fn test_container_chunked() -> Result<()> { assert!(layer.commit.is_none()); } assert_eq!(digest, expected_digest); - let _import = imp.import(prep).await.context("Init pull derived").unwrap(); + let import = imp.import(prep).await.context("Init pull derived").unwrap(); + assert_eq!(import.manifest_digest.as_str(), digest); assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); From bb3043c2a355851784dcf1e0450d7282d7c064e5 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 15 May 2023 09:47:41 -0400 Subject: [PATCH 558/775] tests: Add a test for layers_with_history() Motivated by https://github.com/ostreedev/ostree-rs-ext/issues/480 --- lib/tests/it/main.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 2627678df..86b41b2d7 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -691,6 +691,25 @@ async fn test_container_chunked() -> Result<()> { assert!(layer.commit.is_none()); } assert_eq!(digest, expected_digest); + { + let mut layer_history = prep.layers_with_history(); + assert!(layer_history + .next() + .unwrap()? + .1 + .created_by() + .as_ref() + .unwrap() + .starts_with("ostree export")); + assert!(layer_history + .nth(6) + .unwrap()? + .1 + .created_by() + .as_ref() + .unwrap() + .starts_with("testlink")); + } let import = imp.import(prep).await.context("Init pull derived").unwrap(); assert_eq!(import.manifest_digest.as_str(), digest); From ee7127f2492596ab4f81cc2bcae262314a77cffb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 15 May 2023 13:13:54 -0400 Subject: [PATCH 559/775] container/store: Fix one clippy lint --- lib/src/container/store.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 58cee1750..e66c33ee8 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -883,7 +883,7 @@ impl ImageImporter { None, Some(&metadata), &merged_root, - timestamp as u64, + timestamp, cancellable, ) .context("Writing commit")?; From 5159164f091ca48d2e8ba2da31b86659b732dc79 Mon Sep 17 00:00:00 2001 From: RishabhSaini Date: Fri, 9 Dec 2022 17:08:18 -0500 Subject: [PATCH 560/775] chunking: Bin packing algorithm which allows to minimize layer deltas using historical builds Revamp basic_packing to follow the prior packing structure if the --prior-build flag exists. This simply modifies existing layers with upgrades/downgrades/removal of packages. The last layer contains any new addition to packages. In the case where --prior-build flag does not exist, the frequency of updates of the packages (frequencyinfo) and size is utilized to segment packages into different partitions (all combinations of low, medium, high frequency and low, medium, high size). The partition that each package falls into is decided by its deviation from mean. Then the packages are alloted to different layers to ensure 1) low frequency packages don't mix with high frequency packages 2) High sized packages are alloted separate bins 3) Low sized packages can be put together in the same bin This problem is aka multi-objective bin packing problem with constraints aka multiple knapsack problem. The objectives are conflicting given our constraints and hence a compromise is taken to minimize layer deltas while respecting the hard limit of overlayfs that the kernel can handle. --- lib/src/chunking.rs | 630 ++++++++++++++++-- lib/src/cli.rs | 2 +- lib/src/container/encapsulate.rs | 48 +- lib/src/container/mod.rs | 4 + lib/src/container/ocidir.rs | 5 +- lib/src/fixture.rs | 11 +- .../fedora-coreos-contentmeta.json.gz | Bin 10233 -> 11361 bytes lib/src/lib.rs | 1 + lib/src/objectsource.rs | 6 +- lib/src/statistics.rs | 109 +++ lib/tests/it/main.rs | 17 +- 11 files changed, 745 insertions(+), 88 deletions(-) create mode 100644 lib/src/statistics.rs diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 873fbb7a2..c8fdc333e 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -3,15 +3,20 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT use std::borrow::{Borrow, Cow}; -use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet}; use std::fmt::Write; +use std::hash::{Hash, Hasher}; use std::num::NonZeroU32; use std::rc::Rc; +use std::time::Instant; +use crate::container::CONTENT_ANNOTATION; use crate::objectsource::{ContentID, ObjectMeta, ObjectMetaMap, ObjectSourceMeta}; use crate::objgv::*; +use crate::statistics; use anyhow::{anyhow, Result}; use camino::Utf8PathBuf; +use containers_image_proxy::oci_spec; use gvariant::aligned_bytes::TryAsAligned; use gvariant::{Marker, Structure}; use ostree::{gio, glib}; @@ -24,12 +29,17 @@ pub(crate) const MAX_CHUNKS: u32 = 64; type RcStr = Rc; pub(crate) type ChunkMapping = BTreeMap)>; +// TODO type PackageSet = HashSet; + +const LOW_PARTITION: &str = "2ls"; +const HIGH_PARTITION: &str = "1hs"; #[derive(Debug, Default)] pub(crate) struct Chunk { pub(crate) name: String, pub(crate) content: ChunkMapping, pub(crate) size: u64, + pub(crate) packages: Vec, } #[derive(Debug, Deserialize, Serialize)] @@ -42,6 +52,20 @@ pub struct ObjectSourceMetaSized { size: u64, } +impl Hash for ObjectSourceMetaSized { + fn hash(&self, state: &mut H) { + self.meta.identifier.hash(state); + } +} + +impl Eq for ObjectSourceMetaSized {} + +impl PartialEq for ObjectSourceMetaSized { + fn eq(&self, other: &Self) -> bool { + self.meta.identifier == other.meta.identifier + } +} + /// Extend content source metadata with sizes. #[derive(Debug)] pub struct ObjectMetaSized { @@ -243,10 +267,11 @@ impl Chunking { repo: &ostree::Repo, rev: &str, meta: ObjectMetaSized, - max_layers: Option, + max_layers: &Option, + prior_build_metadata: Option<&oci_spec::image::ImageManifest>, ) -> Result { let mut r = Self::new(repo, rev)?; - r.process_mapping(meta, max_layers)?; + r.process_mapping(meta, max_layers, prior_build_metadata)?; Ok(r) } @@ -260,7 +285,8 @@ impl Chunking { pub fn process_mapping( &mut self, meta: ObjectMetaSized, - max_layers: Option, + max_layers: &Option, + prior_build_metadata: Option<&oci_spec::image::ImageManifest>, ) -> Result<()> { self.max = max_layers .unwrap_or(NonZeroU32::new(MAX_CHUNKS).unwrap()) @@ -291,16 +317,27 @@ impl Chunking { .unwrap(); // TODO: Compute bin packing in a better way - let packing = basic_packing(sizes, NonZeroU32::new(self.max).unwrap()); + let start = Instant::now(); + let packing = basic_packing( + sizes, + NonZeroU32::new(self.max).unwrap(), + prior_build_metadata, + )?; + let duration = start.elapsed(); + tracing::debug!("Time elapsed in packing: {:#?}", duration); for bin in packing.into_iter() { - let first = bin[0]; - let first_name = &*first.meta.name; let name = match bin.len() { - 0 => unreachable!(), - 1 => Cow::Borrowed(first_name), + 0 => Cow::Borrowed("Reserved for new packages"), + 1 => { + let first = bin[0]; + let first_name = &*first.meta.identifier; + Cow::Borrowed(first_name) + } 2..=5 => { - let r = bin.iter().map(|v| &*v.meta.name).skip(1).fold( + let first = bin[0]; + let first_name = &*first.meta.identifier; + let r = bin.iter().map(|v| &*v.meta.identifier).skip(1).fold( String::from(first_name), |mut acc, v| { write!(acc, " and {}", v).unwrap(); @@ -312,14 +349,13 @@ impl Chunking { n => Cow::Owned(format!("{n} components")), }; let mut chunk = Chunk::new(&name); + chunk.packages = bin.iter().map(|v| String::from(&*v.meta.name)).collect(); for szmeta in bin { for &obj in rmap.get(&szmeta.meta.identifier).unwrap() { self.remainder.move_obj(&mut chunk, obj.as_str()); } } - if !chunk.content.is_empty() { - self.chunks.push(chunk); - } + self.chunks.push(chunk); } assert_eq!(self.remainder.content.len(), 0); @@ -364,79 +400,366 @@ impl Chunking { } } -type ChunkedComponents<'a> = Vec<&'a ObjectSourceMetaSized>; - +#[cfg(test)] fn components_size(components: &[&ObjectSourceMetaSized]) -> u64 { components.iter().map(|k| k.size).sum() } /// Compute the total size of a packing #[cfg(test)] -fn packing_size(packing: &[ChunkedComponents]) -> u64 { +fn packing_size(packing: &[Vec<&ObjectSourceMetaSized>]) -> u64 { packing.iter().map(|v| components_size(v)).sum() } -fn sort_packing(packing: &mut [ChunkedComponents]) { - packing.sort_by(|a, b| { - let a: u64 = components_size(a); - let b: u64 = components_size(b); - b.cmp(&a) +///Given a certain threshold, divide a list of packages into all combinations +///of (high, medium, low) size and (high,medium,low) using the following +///outlier detection methods: +///- Median and Median Absolute Deviation Method +/// Aggressively detects outliers in size and classifies them by +/// high, medium, low. The high size and low size are separate partitions +/// and deserve bins of their own +///- Mean and Standard Deviation Method +/// The medium partition from the previous step is less aggressively +/// classified by using mean for both size and frequency + +//Assumes components is sorted by descending size +fn get_partitions_with_threshold( + components: Vec<&ObjectSourceMetaSized>, + limit_hs_bins: usize, + threshold: f64, +) -> Option>> { + let mut partitions: BTreeMap> = BTreeMap::new(); + let mut med_size: Vec<&ObjectSourceMetaSized> = Vec::new(); + let mut high_size: Vec<&ObjectSourceMetaSized> = Vec::new(); + + let mut sizes: Vec = components.iter().map(|a| a.size).collect(); + let (median_size, mad_size) = statistics::median_absolute_deviation(&mut sizes)?; + + //Avoids lower limit being negative + let size_low_limit = 0.5 * f64::abs(median_size - threshold * mad_size); + let size_high_limit = median_size + threshold * mad_size; + + for pkg in components { + let size = pkg.size as f64; + + //high size (hs) + if size >= size_high_limit { + high_size.push(pkg); + } + //low size (ls) + else if size <= size_low_limit { + partitions + .entry(LOW_PARTITION.to_string()) + .and_modify(|bin| bin.push(pkg)) + .or_insert_with(|| vec![pkg]); + } + //medium size (ms) + else { + med_size.push(pkg); + } + } + + //Extra hs packages + let mut remaining_pkgs: Vec<_> = high_size.drain(limit_hs_bins..).collect(); + assert_eq!(high_size.len(), limit_hs_bins); + + //Concatenate extra hs packages + med_sizes to keep it descending sorted + remaining_pkgs.append(&mut med_size); + partitions.insert(HIGH_PARTITION.to_string(), high_size); + + //Ascending sorted by frequency, so each partition within ms is freq sorted + remaining_pkgs.sort_by(|a, b| { + a.meta + .change_frequency + .partial_cmp(&b.meta.change_frequency) + .unwrap() }); + let med_sizes: Vec = remaining_pkgs.iter().map(|a| a.size).collect(); + let med_frequencies: Vec = remaining_pkgs + .iter() + .map(|a| a.meta.change_frequency.into()) + .collect(); + + let med_mean_freq = statistics::mean(&med_frequencies)?; + let med_stddev_freq = statistics::std_deviation(&med_frequencies)?; + let med_mean_size = statistics::mean(&med_sizes)?; + let med_stddev_size = statistics::std_deviation(&med_sizes)?; + + //Avoids lower limit being negative + let med_freq_low_limit = 0.5f64 * f64::abs(med_mean_freq - threshold * med_stddev_freq); + let med_freq_high_limit = med_mean_freq + threshold * med_stddev_freq; + let med_size_low_limit = 0.5f64 * f64::abs(med_mean_size - threshold * med_stddev_size); + let med_size_high_limit = med_mean_size + threshold * med_stddev_size; + + for pkg in remaining_pkgs { + let size = pkg.size as f64; + let freq = pkg.meta.change_frequency as f64; + + let size_name; + if size >= med_size_high_limit { + size_name = "hs"; + } else if size <= med_size_low_limit { + size_name = "ls"; + } else { + size_name = "ms"; + } + + //Numbered to maintain order of partitions in a BTreeMap of hf, mf, lf + let freq_name; + if freq >= med_freq_high_limit { + freq_name = "3hf"; + } else if freq <= med_freq_low_limit { + freq_name = "5lf"; + } else { + freq_name = "4mf"; + } + + let bucket = format!("{freq_name}_{size_name}"); + partitions + .entry(bucket.to_string()) + .and_modify(|bin| bin.push(pkg)) + .or_insert_with(|| vec![pkg]); + } + + for (name, pkgs) in &partitions { + tracing::debug!("{:#?}: {:#?}", name, pkgs.len()); + } + + Some(partitions) } /// Given a set of components with size metadata (e.g. boxes of a certain size) /// and a number of bins (possible container layers) to use, determine which components /// go in which bin. This algorithm is pretty simple: -/// -/// - order by size -/// - If we have fewer components than bins, we're done -/// - Take the "tail" (all components past maximum), and group by source package -/// - If we have fewer components than bins, we're done -/// - Take the whole tail and group them toether (this is the overly simplistic part) -fn basic_packing(components: &[ObjectSourceMetaSized], bins: NonZeroU32) -> Vec { - // let total_size: u64 = components.iter().map(|v| v.size).sum(); - // let avg_size: u64 = total_size / components.len() as u64; + +// Total available bins = n +// +// 1 bin for all the u32_max frequency pkgs +// 1 bin for all newly added pkgs +// 1 bin for all low size pkgs +// +// 60% of n-3 bins for high size pkgs +// 40% of n-3 bins for medium size pkgs +// +// If HS bins > limit, spillover to MS to package +// If MS bins > limit, fold by merging 2 bins from the end +// +fn basic_packing<'a>( + components: &'a [ObjectSourceMetaSized], + bin_size: NonZeroU32, + prior_build_metadata: Option<&oci_spec::image::ImageManifest>, +) -> Result>> { let mut r = Vec::new(); - // And handle the easy case of enough bins for all components - // TODO: Possibly try to split off large files? - if components.len() <= bins.get() as usize { - r.extend(components.iter().map(|v| vec![v])); - return r; - } - // Create a mutable copy let mut components: Vec<_> = components.iter().collect(); - // Iterate over the component tail, folding by source id - let mut by_src = HashMap::<_, Vec<&ObjectSourceMetaSized>>::new(); - // Take the tail off components, then build up mapping from srcid -> Vec - for component in components.split_off(bins.get() as usize) { - by_src - .entry(&component.meta.srcid) - .or_default() - .push(component); + let before_processing_pkgs_len = components.len(); + + //If the current rpm-ostree commit to be encapsulated is not the one in which packing structure changes, then + // Flatten out prior_build_metadata to view all the packages in prior build as a single vec + // Compare the flattened vector to components to see if pkgs added, updated, + // removed or kept same + // if pkgs added, then add them to the last bin of prior + // if pkgs removed, then remove them from the prior[i] + // iterate through prior[i] and make bins according to the name in nevra of pkgs to update + // required packages + //else if pkg structure to be changed || prior build not specified + // Recompute optimal packaging strcuture (Compute partitions, place packages and optimize build) + + if let Some(prior_build) = prior_build_metadata { + tracing::debug!("Keeping old package structure"); + + //1st layer is skipped as packing doesn't manage ostree_commit layer + let curr_build: Result>> = prior_build + .layers() + .iter() + .skip(1) + .map(|layer| -> Result<_> { + let annotation_layer = layer + .annotations() + .as_ref() + .and_then(|annos| annos.get(CONTENT_ANNOTATION)) + .ok_or_else(|| anyhow!("Missing {CONTENT_ANNOTATION} on prior build"))?; + Ok(annotation_layer.split(',').map(ToOwned::to_owned).collect()) + }) + .collect(); + let mut curr_build = curr_build?; + + // View the packages as unordered sets for lookups and differencing + let prev_pkgs_set: HashSet = curr_build + .iter() + .flat_map(|v| v.iter().cloned()) + .filter(|name| !name.is_empty()) + .collect(); + let curr_pkgs_set: HashSet = components + .iter() + .map(|pkg| pkg.meta.name.to_string()) + .collect(); + + //Handle added packages + if let Some(last_bin) = curr_build.last_mut() { + let added = curr_pkgs_set.difference(&prev_pkgs_set); + last_bin.retain(|name| !name.is_empty()); + last_bin.extend(added.into_iter().cloned()); + } else { + panic!("No empty last bin for added packages"); + } + + //Handle removed packages + let removed: HashSet<&String> = prev_pkgs_set.difference(&curr_pkgs_set).collect(); + for bin in curr_build.iter_mut() { + bin.retain(|pkg| !removed.contains(pkg)); + } + + //Handle updated packages + let mut name_to_component: HashMap = HashMap::new(); + for component in &components { + name_to_component + .entry(component.meta.name.to_string()) + .or_insert(component); + } + let mut modified_build: Vec> = Vec::new(); + for bin in curr_build { + let mut mod_bin = Vec::new(); + for pkg in bin { + mod_bin.push(name_to_component[&pkg]); + } + modified_build.push(mod_bin); + } + + //Verify all packages are included + let after_processing_pkgs_len: usize = modified_build.iter().map(|b| b.len()).sum(); + assert_eq!(after_processing_pkgs_len, before_processing_pkgs_len); + assert!(modified_build.len() <= bin_size.get() as usize); + return Ok(modified_build); } - // Take all the non-tail (largest) components, and append them first - r.extend(components.into_iter().map(|v| vec![v])); - // Add the tail - r.extend(by_src.into_values()); - // And order the new list - sort_packing(&mut r); - // It's possible that merging components gave us enough space; if so - // we're done! - if r.len() <= bins.get() as usize { - return r; + + tracing::debug!("Creating new packing structure"); + + //Handle trivial case of no pkgs < bins + if before_processing_pkgs_len < bin_size.get() as usize { + components.into_iter().for_each(|pkg| r.push(vec![pkg])); + if before_processing_pkgs_len > 0 { + let new_pkgs_bin: Vec<&ObjectSourceMetaSized> = Vec::new(); + r.push(new_pkgs_bin); + } + return Ok(r); } - let last = (bins.get().checked_sub(1).unwrap()) as usize; - // The "tail" is components past our maximum. For now, we simply group all of that together as a single unit. - if let Some(tail) = r.drain(last..).reduce(|mut a, b| { - a.extend(b.into_iter()); - a - }) { - r.push(tail); + let mut max_freq_components: Vec<&ObjectSourceMetaSized> = Vec::new(); + components.retain(|pkg| { + let retain: bool = pkg.meta.change_frequency != u32::MAX; + if !retain { + max_freq_components.push(pkg); + } + retain + }); + let components_len_after_max_freq = components.len(); + match components_len_after_max_freq { + 0 => (), + _ => { + //Defining Limits of each bins + let limit_ls_bins = 1usize; + let limit_new_bins = 1usize; + let _limit_new_pkgs = 0usize; + let limit_max_frequency_pkgs = max_freq_components.len(); + let limit_max_frequency_bins = limit_max_frequency_pkgs.min(1); + let limit_hs_bins = (0.6 + * (bin_size.get() + - (limit_ls_bins + limit_new_bins + limit_max_frequency_bins) as u32) + as f32) + .floor() as usize; + let limit_ms_bins = (bin_size.get() + - (limit_hs_bins + limit_ls_bins + limit_new_bins + limit_max_frequency_bins) + as u32) as usize; + let partitions = get_partitions_with_threshold(components, limit_hs_bins, 2f64) + .expect("Partitioning components into sets"); + + let limit_ls_pkgs = match partitions.get(LOW_PARTITION) { + Some(n) => n.len(), + None => 0usize, + }; + + let pkg_per_bin_ms: usize = + (components_len_after_max_freq - limit_hs_bins - limit_ls_pkgs) + .checked_div(limit_ms_bins) + .expect("number of bins should be >= 4"); + + //Bins assignment + for (partition, pkgs) in partitions.iter() { + if partition == HIGH_PARTITION { + for pkg in pkgs { + r.push(vec![*pkg]); + } + } else if partition == LOW_PARTITION { + let mut bin: Vec<&ObjectSourceMetaSized> = Vec::new(); + for pkg in pkgs { + bin.push(*pkg); + } + r.push(bin); + } else { + let mut bin: Vec<&ObjectSourceMetaSized> = Vec::new(); + for (i, pkg) in pkgs.iter().enumerate() { + if bin.len() < pkg_per_bin_ms { + bin.push(*pkg); + } else { + r.push(bin.clone()); + bin.clear(); + bin.push(*pkg); + } + if i == pkgs.len() - 1 && !bin.is_empty() { + r.push(bin.clone()); + bin.clear(); + } + } + } + } + tracing::debug!("Bins before unoptimized build: {}", r.len()); + + //Despite allocation certain number of pkgs per bin in MS partitions, the + //hard limit of number of MS bins can be exceeded. This is because the pkg_per_bin_ms + //is only upper limit and there is no lower limit. Thus, if a partition in MS has only 1 pkg + //but pkg_per_bin_ms > 1, then the entire bin will have 1 pkg. This prevents partition + //mixing. + // + //Addressing MS bins limit breach by mergin internal MS partitions + //The partitions in MS are merged beginnign from the end so to not mix hf bins with lf bins. The + //bins are kept in this order: hf, mf, lf by design. + while r.len() > (bin_size.get() as usize - limit_new_bins - limit_max_frequency_bins) { + for i in (limit_ls_bins + limit_hs_bins..r.len() - 1) + .step_by(2) + .rev() + { + if r.len() + <= (bin_size.get() as usize - limit_new_bins - limit_max_frequency_bins) + { + break; + } + let prev = &r[i - 1]; + let curr = &r[i]; + let mut merge: Vec<&ObjectSourceMetaSized> = Vec::new(); + merge.extend(prev.iter()); + merge.extend(curr.iter()); + r.remove(i); + r.remove(i - 1); + r.insert(i, merge); + } + } + tracing::debug!("Bins after optimization: {}", r.len()); + } } - assert!(r.len() <= bins.get() as usize); - r + if !max_freq_components.is_empty() { + r.push(max_freq_components); + } + + let new_pkgs_bin: Vec<&ObjectSourceMetaSized> = Vec::new(); + r.push(new_pkgs_bin); + let mut after_processing_pkgs_len = 0; + r.iter().for_each(|bin| { + after_processing_pkgs_len += bin.len(); + }); + assert_eq!(after_processing_pkgs_len, before_processing_pkgs_len); + assert!(r.len() <= bin_size.get() as usize); + Ok(r) } #[cfg(test)] @@ -449,7 +772,7 @@ mod test { fn test_packing_basics() -> Result<()> { // null cases for v in [1u32, 7].map(|v| NonZeroU32::new(v).unwrap()) { - assert_eq!(basic_packing(&[], v).len(), 0); + assert_eq!(basic_packing(&[], v, None).unwrap().len(), 0); } Ok(()) } @@ -460,7 +783,8 @@ mod test { serde_json::from_reader(flate2::read::GzDecoder::new(FCOS_CONTENTMETA))?; let total_size = contentmeta.iter().map(|v| v.size).sum::(); - let packing = basic_packing(&contentmeta, NonZeroU32::new(MAX_CHUNKS).unwrap()); + let packing = + basic_packing(&contentmeta, NonZeroU32::new(MAX_CHUNKS).unwrap(), None).unwrap(); assert!(!contentmeta.is_empty()); // We should fit into the assigned chunk size assert_eq!(packing.len() as u32, MAX_CHUNKS); @@ -469,4 +793,178 @@ mod test { assert_eq!(total_size, packed_total_size); Ok(()) } + + fn create_manifest(prev_expected_structure: Vec>) -> oci_spec::image::ImageManifest { + let mut p = prev_expected_structure + .iter() + .map(|b| { + b.iter() + .map(|p| p.split(".").collect::>()[0].to_string()) + .collect() + }) + .collect(); + let mut metadata_with_ostree_commit = vec![vec![String::from("ostree_commit")]]; + metadata_with_ostree_commit.append(&mut p); + + let config = oci_spec::image::DescriptorBuilder::default() + .media_type(oci_spec::image::MediaType::ImageConfig) + .size(7023) + .digest("sha256:imageconfig") + .build() + .expect("build config descriptor"); + + let layers: Vec = metadata_with_ostree_commit + .iter() + .map(|l| { + oci_spec::image::DescriptorBuilder::default() + .media_type(oci_spec::image::MediaType::ImageLayerGzip) + .size(100) + .digest(format!("sha256:{}", l.len())) + .annotations(HashMap::from([( + CONTENT_ANNOTATION.to_string(), + l.join(","), + )])) + .build() + .expect("build layer") + }) + .collect(); + + let image_manifest = oci_spec::image::ImageManifestBuilder::default() + .schema_version(oci_spec::image::SCHEMA_VERSION) + .config(config) + .layers(layers) + .build() + .expect("build image manifest"); + image_manifest + } + + #[test] + fn test_advanced_packing() -> Result<()> { + //Step1 : Initial build (Packing sructure computed) + let contentmeta_v0: Vec = vec![ + vec![1, u32::MAX, 100000], + vec![2, u32::MAX, 99999], + vec![3, 30, 99998], + vec![4, 100, 99997], + vec![10, 51, 1000], + vec![8, 50, 500], + vec![9, 1, 200], + vec![11, 100000, 199], + vec![6, 30, 2], + vec![7, 30, 1], + ] + .iter() + .map(|data| ObjectSourceMetaSized { + meta: ObjectSourceMeta { + identifier: RcStr::from(format!("pkg{}.0", data[0])), + name: RcStr::from(format!("pkg{}", data[0])), + srcid: RcStr::from(format!("srcpkg{}", data[0])), + change_time_offset: 0, + change_frequency: data[1], + }, + size: data[2] as u64, + }) + .collect(); + + let packing = basic_packing( + &contentmeta_v0.as_slice(), + NonZeroU32::new(6).unwrap(), + None, + ) + .unwrap(); + let structure: Vec> = packing + .iter() + .map(|bin| bin.iter().map(|pkg| &*pkg.meta.identifier).collect()) + .collect(); + let v0_expected_structure = vec![ + vec!["pkg3.0"], + vec!["pkg4.0"], + vec!["pkg6.0", "pkg7.0", "pkg11.0"], + vec!["pkg9.0", "pkg8.0", "pkg10.0"], + vec!["pkg1.0", "pkg2.0"], + vec![], + ]; + assert_eq!(structure, v0_expected_structure); + + //Step 2: Derive packing structure from last build + + let mut contentmeta_v1: Vec = contentmeta_v0; + //Upgrade pkg1.0 to 1.1 + contentmeta_v1[0].meta.identifier = RcStr::from("pkg1.1"); + //Remove pkg7 + contentmeta_v1.remove(contentmeta_v1.len() - 1); + //Add pkg5 + contentmeta_v1.push(ObjectSourceMetaSized { + meta: ObjectSourceMeta { + identifier: RcStr::from("pkg5.0"), + name: RcStr::from("pkg5"), + srcid: RcStr::from("srcpkg5"), + change_time_offset: 0, + change_frequency: 42, + }, + size: 100000, + }); + + let image_manifest_v0 = create_manifest(v0_expected_structure); + let packing_derived = basic_packing( + &contentmeta_v1.as_slice(), + NonZeroU32::new(6).unwrap(), + Some(&image_manifest_v0), + ) + .unwrap(); + let structure_derived: Vec> = packing_derived + .iter() + .map(|bin| bin.iter().map(|pkg| &*pkg.meta.identifier).collect()) + .collect(); + let v1_expected_structure = vec![ + vec!["pkg3.0"], + vec!["pkg4.0"], + vec!["pkg6.0", "pkg11.0"], + vec!["pkg9.0", "pkg8.0", "pkg10.0"], + vec!["pkg1.1", "pkg2.0"], + vec!["pkg5.0"], + ]; + + assert_eq!(structure_derived, v1_expected_structure); + + //Step 3: Another update on derived where the pkg in the last bin updates + + let mut contentmeta_v2: Vec = contentmeta_v1; + //Upgrade pkg5.0 to 5.1 + contentmeta_v2[9].meta.identifier = RcStr::from("pkg5.1"); + //Add pkg12 + contentmeta_v2.push(ObjectSourceMetaSized { + meta: ObjectSourceMeta { + identifier: RcStr::from("pkg12.0"), + name: RcStr::from("pkg12"), + srcid: RcStr::from("srcpkg12"), + change_time_offset: 0, + change_frequency: 42, + }, + size: 100000, + }); + + let image_manifest_v1 = create_manifest(v1_expected_structure); + let packing_derived = basic_packing( + &contentmeta_v2.as_slice(), + NonZeroU32::new(6).unwrap(), + Some(&image_manifest_v1), + ) + .unwrap(); + let structure_derived: Vec> = packing_derived + .iter() + .map(|bin| bin.iter().map(|pkg| &*pkg.meta.identifier).collect()) + .collect(); + let v2_expected_structure = vec![ + vec!["pkg3.0"], + vec!["pkg4.0"], + vec!["pkg6.0", "pkg11.0"], + vec!["pkg9.0", "pkg8.0", "pkg10.0"], + vec!["pkg1.1", "pkg2.0"], + vec!["pkg5.1", "pkg12.0"], + ]; + + assert_eq!(structure_derived, v2_expected_structure); + Ok(()) + } } diff --git a/lib/src/cli.rs b/lib/src/cli.rs index ba3c6f19e..f5a3ef687 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -584,7 +584,7 @@ async fn container_export( ..Default::default() }; let pushed = - crate::container::encapsulate(repo, rev, &config, Some(opts), None, imgref).await?; + crate::container::encapsulate(repo, rev, &config, None, Some(opts), None, imgref).await?; println!("{}", pushed); Ok(()) } diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 2e37c97bc..23e164c02 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -1,7 +1,7 @@ //! APIs for creating container images from OSTree commits use super::ocidir::{Layer, OciDir}; -use super::{ocidir, OstreeImageReference, Transport}; +use super::{ocidir, OstreeImageReference, Transport, CONTENT_ANNOTATION}; use super::{ImageReference, SignatureSource, OSTREE_COMMIT_LABEL}; use crate::chunking::{Chunk, Chunking, ObjectMetaSized}; use crate::container::skopeo; @@ -104,7 +104,7 @@ fn export_chunks( ociw: &mut OciDir, chunks: Vec, opts: &ExportOpts, -) -> Result> { +) -> Result)>> { chunks .into_iter() .enumerate() @@ -113,7 +113,7 @@ fn export_chunks( ostree_tar::export_chunk(repo, commit, chunk.content, &mut w) .with_context(|| format!("Exporting chunk {i}"))?; let w = w.into_inner()?; - Ok((w.complete()?, chunk.name)) + Ok((w.complete()?, chunk.name, chunk.packages)) }) .collect() } @@ -151,11 +151,20 @@ fn export_chunked( .clone(); // Add the ostree layer - ociw.push_layer(manifest, imgcfg, ostree_layer, description); + ociw.push_layer(manifest, imgcfg, ostree_layer, description, None); // Add the component/content layers - for (layer, name) in layers { - ociw.push_layer(manifest, imgcfg, layer, name.as_str()); + for (layer, name, packages) in layers { + let mut annotation_component_layer = HashMap::new(); + annotation_component_layer.insert(CONTENT_ANNOTATION.to_string(), packages.join(",")); + ociw.push_layer( + manifest, + imgcfg, + layer, + name.as_str(), + Some(annotation_component_layer), + ); } + // This label (mentioned above) points to the last layer that is part of // the ostree commit. labels.insert( @@ -167,6 +176,7 @@ fn export_chunked( /// Generate an OCI image from a given ostree root #[context("Building oci")] +#[allow(clippy::too_many_arguments)] fn build_oci( repo: &ostree::Repo, rev: &str, @@ -174,6 +184,7 @@ fn build_oci( tag: Option<&str>, config: &Config, opts: ExportOpts, + prior_build: Option<&oci_image::ImageManifest>, contentmeta: Option, ) -> Result { if !ocidir_path.exists() { @@ -209,7 +220,15 @@ fn build_oci( let mut manifest = ocidir::new_empty_manifest().build().unwrap(); let chunking = contentmeta - .map(|meta| crate::chunking::Chunking::from_mapping(repo, commit, meta, opts.max_layers)) + .map(|meta| { + crate::chunking::Chunking::from_mapping( + repo, + commit, + meta, + &opts.max_layers, + prior_build, + ) + }) .transpose()?; // If no chunking was provided, create a logical single chunk. let chunking = chunking @@ -291,6 +310,7 @@ async fn build_impl( repo: &ostree::Repo, ostree_ref: &str, config: &Config, + prior_build: Option<&oci_image::ImageManifest>, opts: Option, contentmeta: Option, dest: &ImageReference, @@ -308,6 +328,7 @@ async fn build_impl( tag, config, opts, + prior_build, contentmeta, )?; None @@ -323,6 +344,7 @@ async fn build_impl( None, config, opts, + prior_build, contentmeta, )?; @@ -377,9 +399,19 @@ pub async fn encapsulate>( repo: &ostree::Repo, ostree_ref: S, config: &Config, + prior_build: Option<&oci_image::ImageManifest>, opts: Option, contentmeta: Option, dest: &ImageReference, ) -> Result { - build_impl(repo, ostree_ref.as_ref(), config, opts, contentmeta, dest).await + build_impl( + repo, + ostree_ref.as_ref(), + config, + prior_build, + opts, + contentmeta, + dest, + ) + .await } diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index e2bb7970d..115912ca8 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -37,6 +37,10 @@ use std::str::FromStr; /// The label injected into a container image that contains the ostree commit SHA-256. pub const OSTREE_COMMIT_LABEL: &str = "ostree.commit"; +/// The name of an annotation attached to a layer which names the packages/components +/// which are part of it. +pub(crate) const CONTENT_ANNOTATION: &str = "ostree.components"; + /// Our generic catchall fatal error, expected to be converted /// to a string to output to a terminal or logs. type Result = anyhow::Result; diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index 424ede357..8ed72bf1c 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -203,8 +203,8 @@ impl OciDir { config: &mut oci_image::ImageConfiguration, layer: Layer, description: &str, + annotations: Option>, ) { - let annotations: Option> = None; self.push_layer_annotated(manifest, config, layer, annotations, description); } @@ -531,7 +531,8 @@ mod tests { let mut config = oci_image::ImageConfigurationBuilder::default() .build() .unwrap(); - w.push_layer(&mut manifest, &mut config, root_layer, "root"); + let annotations: Option> = None; + w.push_layer(&mut manifest, &mut config, root_layer, "root", annotations); let config = w.write_config(config)?; manifest.set_config(config); w.replace_with_single_manifest(manifest.clone(), oci_image::Platform::default())?; diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 5218bc277..fbf649e1d 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -168,7 +168,9 @@ d tmp "## }; pub const CONTENTS_CHECKSUM_V0: &str = "5e41de82f9f861fa51e53ce6dd640a260e4fb29b7657f5a3f14157e93d2c0659"; -pub static CONTENTS_V0_LEN: Lazy = Lazy::new(|| OWNERS.len().checked_sub(1).unwrap()); +// 1 for ostree commit, 2 for max frequency packages, 3 as empty layer +pub const LAYERS_V0_LEN: usize = 3usize; +pub const PKGS_V0_LEN: usize = 7usize; #[derive(Debug, PartialEq, Eq)] enum SeLabel { @@ -317,6 +319,7 @@ fn build_mapping_recurse( name: Rc::clone(&owner), srcid: Rc::clone(&owner), change_time_offset: u32::MAX, + change_frequency: u32::MAX, }); } @@ -661,11 +664,15 @@ impl Fixture { let contentmeta = self.get_object_meta().context("Computing object meta")?; let contentmeta = ObjectMetaSized::compute_sizes(self.srcrepo(), contentmeta) .context("Computing sizes")?; - let opts = ExportOpts::default(); + let opts = ExportOpts { + max_layers: std::num::NonZeroU32::new(PKGS_V0_LEN as u32), + ..Default::default() + }; let digest = crate::container::encapsulate( self.srcrepo(), self.testref(), &config, + None, Some(opts), Some(contentmeta), &imgref, diff --git a/lib/src/fixtures/fedora-coreos-contentmeta.json.gz b/lib/src/fixtures/fedora-coreos-contentmeta.json.gz index a1276a3f34046a9ecd83fbc96c1c3f7fee818f6e..285d587a7c52bb3725c14a104e522af0a6d9e8be 100644 GIT binary patch literal 11361 zcmV-nES}RJiwFp?!98RE17>ApZ*pNRV{dY0Z*whUZ*FvDZgg#BbYU)Pb8l_{%stDp z%s3QVUH)J_FDc6+kVRws7 z0R8U9fBnmsFaPcSeEITQv#Hym**10e+t)9@?VI-equ4gx;d|ND1tW}6LRmotTWu?@ zSM9Ovs&Bvjb3$&*Lmi{dPy4QFHd$01;jpnCay z+tq)b>$dvoo^mq(X!Faz5;amuuB~=|`#=A@$8L3Ohq7twZd1^eTA4yj@AJc`-<1ft z{D6))yhkX9F)ED$7*hdbf+#92XCPyT$Sr?sKmwSux_?3}EgCBjOICmbG+} z0URF$jZ8Jee8#>u8VVfDyz40S8ocx{TxupYYyy{{T$%Gon^(1mWsnm zJ*`G@IBw4Sx-XKe60*L;iI}}S4oGF~WupziXRw#0Wkf1sa`>x`PBuQmoJaUP&ZJO` zQkf@T+j{tZ?B1GoSDf}|CrElHKiofs$r89R`h5wE{Z!7U;GEw764C@;f(ywBvc{+5 z=1{hU_$pR>MG6M7J;6pC&LnOL@+)Pcr?e>y$uy^3xPK13>=@RG?k~N zU1V5WE43yBF+YF((Yu8#+@FK{ z^q>7uA2tP()LB4^f?Mwb980@2kvxG%0cXdC&JJ~6DmSpre;%5BF*(sWBJTq{;_EJ@ z9L|(_#EYX;Yp1ZX*6O^e-yd4A5#tkAjktZw5MA7z+sdh5Z+r@P!8~Mqusr$#IZx~{ za-R1aK~_{@A}~Zsu-8wD86cg-Fo&$dD0)Jq;nWC} zVSiA1_kM?iQ>oUylES#X#YlK3Gr$e49QL$=xC?bN5!x~&aPps%fAk+^>pc9<8++|x zw15S{6m<$UVl@C-(#^8-ouLot1h(YaYNrzz1zFxU`+Bm5D`_HmP78G0Sg;{IH^Ly+e0x6ayj{ds z1~KJF$k?t*5Xcji(Ai>v^G$& zp1j_Jm!~^Bt93XYo$R7t-EL8ZBH${0AjC^GK3fpUlkrgeIJAHb7pM^QueUz_aQcR< ztX=U!Lo8jOBX;gl$#S|h;1=^hPirZ(-DT$mT_kxudQVQN{ zBV5vVTw-VXzH7Y4$ZjZnS_Pg1vYK@r3vMQ0Tr+`EM$svV-&1` zw%^YupSLqB1#>h4#&$uBo_BblEhYj@>cljY40aQfMkZpgev+jst<$9xW+yZx|ALA7 z*~Y0CPI<}H{h}V>z^%XOu><4gcS)>W&nSByiITohjh~-4Vp^VA? zyd1W`noU=>++uu=IV+T=0?Ck<+oA5(=dN`&9LMrlSKzc}zC$Z(7b-xIu#hjQG&iuh zQL}A7Z5=166=n)vp(Dob(aB6d2{?;OxDntM|B1`k|VxBNeTH6 zdP{0~2BQMJEgoz|&Z)JCZ(o+(W()&u#v%zH3IeUhTlk2(`@}Mr57J=a>T!3>1RRX{ zF&}y9qbP_>{Bcim+@X@glCyCi;D~9V2&|KDLotV@Z4TxB;gHOX_&K_a`)ECdYclum zhw(kQmv0uVEpd<$A1U;_BsieEXkgbnoj?szA~Su^Q+2Idd%)iK!1|*);INSQ95&TI{!us;%Q7m|Cg^yBR#iYDubf*$Y4UJ;Dy0=tAT)}!shBHQET+Su zFOiK*32p_f$==r9*X?~o)9KY&sir033>$IdA1v^*R>x4t&Isfn6GB^s)SK-1?~_UA zeG53bc!iB^!z7hFqaLt(!x} z?NwP+b?5)TDhH=_$EqZ^Pp%R=z0Cr&I4+8j%`yoIwE0BHtkcA(Ep#*YcR$rCQST0`v^R7%VA(M{{ot3E=eadk^Dd!j8SbYVgnB+C#Z+s8%GRt zU1r$WGD#B2Dp~+FSSatQ&x^wW;kRxrFFCyxwJTQ4rCIb|hr^5zOMU+&qLQb@pvv9w z@KHjnfe)WAeH4>h2$y$#yx5{_a*3O%y@0H-47C?gQd{`&Oc~N|m(uUvnQi7o;S6Pf zawqBfZAmKY91#M9+QN7*BZ4Ud>*lKac^Vw?^J%u9B(_)qN~{Myo-e57iGQFtg9er% zwaS@NNdMwbC%KBgobum3GqPd^>8V+u#O!@MUQ)~2o&?OAheedu`9Ta0mbzEmtj9pK zdlNW?G=jz^Z;C*kw18L%3zIEX${-Dkr=eW?IKV{?8cbO&5F*B|PZyM)F82>DE^4fn z49=>!_dEBaF`#%su|@Krsbz{u53BxLKkq=yWR8Su=i0EN>&^_Odd9MjuJHT7?KZUNvP6B5Lm&UC@tQa z!8<+TBa(7wzfG|5o=ehrdf!D#R!PE;PMLo1$c&}H9NujsU@Qh=^t{q1z^)7Xvy}5g zkW$VlvUSHvTF0L33xZ-)3Qm#QtzZ4HxyT`q1rHbT5F_Ws!JxAq9(75bEWwQS^22;O1OXTnt_#OYd4_R zkfaiuJrDlY^9E74H3c;WX=mCr+wJsX9J9c$pm7EyMda_!bx>}!k?`{Nf9{*%(%j;t zmPISkKHy{NH$7XD$?|KV7;dy_xh4_`>CNWYb*RH*Cf*h86YV@MiIAw@v>U?-)9?Ta z=MJvWDSIMG=V>=LZ0DhzXu{y7+cxdylF0Zf#!Mi%AFtpMS098HWbzIf2V?&^P=jERJ2<%usfjjjV=#=R2 zj}}Do#0C(D4l}}(DkeBA3x{80?SRj!ZRby$PSj11ake8tAWN)kD1a16KquD)QqW(y z_ZW`t=n#1=L&4gtD|8~`n5L7(6_wBeg#j1J1VhSJFEbvdc#F?{BE2#<@NtIVW?saD zJu}ddu_$&)3AI>F-Pl-lZNJFs&bnI;{(>egN~G3E1T%H0A<6JbQh`WikS>HdmDJar zlgW=HrzVWg%1u$i4Wr0`y7VSK(&c!Epa1g)M|Y5vGulCZ+Unf(b9zIKrOp0;6A90< zyG4r&N-hsmZ4^Nu@KV-?48tnNG5Y1K4RNCXo1XNsrMI8 z)~$g1Z?9~OhI5DkO2ppv`GQtfB#J`9FtYv}dlb=IyYl;+&%%b8# z5etgRyop9Ww*QbHoJtV9%mcQ{GGt6xb!vR48^yAQ9xgVIBIKvzYbxYGX{3Xq-OgFq zug)h}30R8H(6OAmlDQ?4C*h#*mf9#V)otC45j;PC2#F{yF=DT2?Dhtz zK3(G&HkMgGR2CVc@_wjC?`f=YKq?(Cu<@cLiR9HuKoi}Fjuh%wA*xv5;1r4d_gTw zm_xzKprX%s{@`HxI0eNycc*VtXT>N04Z*f7Jm1-eleSOpYA}*q(7ijXERCY z`4Y2m`$~h&5pV0^BkDSqQaP8&Ic}D+T{D0>ZpMt2Tn5djqq^a!Ey+0IJfFH_b?S?D z=d)ESs^?ZycCU0{hL4LE?o!H(vjLV#;EfqSCmby=so#AkmA5G&kv_< z=Vh?p8el%l1I8|9jQzKVZ?$0BVB>%4lh0}(kw6C(yOaTHUK|h#)rRN7nFbn}9OD$n zTm7@xG-Kl()}na>o{q}grI(qu0xX}vG0}`0>{5rr$;q^_Rt!ET4lrIFhrB?|TOO$B zG2C5@T86s%)6fPVUCp@4;RZe>zwXd_UZsX>LnR@w`q^}4bsh>LHR2qOaIqU0p=YTT zC7=nZ;rdWwkSPy;)Q57vKUSByR-jcd!$!QMi9Br+gric8B#4>glv`8%{Fv6r#|lfR zdV7fwJ3{vfW;u9}<_uR$X<{_I=T22u`-?xEIWmi-d(EOFXv|NF!1K)z;e1i)90yqI z+-A%AN7HMGS!ORvt|_Srk_{b)R9|+>0|T$q`|l+auay)UG1(cl<1E_? zCUGh!BakrHv2Awe?tCcSNw(vZoqr#b=$6`DGpvcATeSiG*qK1=$>x*JbJitfu!aMd zxzjbDlAh!!zSJJm`Dyk)3_D>I7~q!aV!rQo7^#a zo_qr#m}s_$ALIdJQ#(e_I^XOOUrtfykiuK49=`dwbiux&<7qFT zu_nEccyuf+Gf><Ze&q-r|BAKKtY#qy&D}`H=ag5Q-2ir#=^!bhS@b z){kAlb;bI;dKGD;G-z~wzgdsU07*|zkSVt=B9OxgSfd<9bEI~X3<*n}%jle)mmckNb{IPH5+}FFZ`k6Z@ZGw(? zdO=sHzYZ8l2K~_=+rk$~k9VXic$Kceu?wCi^L$)B9Fr~_Luh!nI(PePtGiwqnNf3n zixaUoK3o#Y6Lu16!i(-Kc6tduS=Rw0?RI!*T#18Eq*7iotwb_wk;^`OSim{W;@XpR zH)UKUnzj75%5WL!c&; z*ll}Fe~Bky%f+Zf#tDA(AbPi z5qR1a4X-XY+#<78n$u;RJ7JVE?B3f8bV8h^sr>CaNH`sg2!l=AAIknuA8kVX(hFo9 zpHEQ8t4W~H<%U54lhA~4IHv8JFE{OUl^>Gax`3uE?3hHRvyd;voUOwVJyU(_j_pr> zj{!1oZiI`ifCxRyWP>Xr!_DQ)YovU29_=!W3Bw15#7r2m{(Q}-7$K>o@Lucca(tky#?Q7_rI+2tKX(!y7UM)fBJ!?GNkb=u24zcID?4IHh4yWS*`r z3eP>E*tPKOb~f2BbD4d;x*A(ix(bf72bgih{R$9K%-Wn5ybLhBHOcqD!P<1QKKI4C zJHFLj!I6fY8`#*?Nz%#6Z$rl|3|#s8bL6lOT`gP~oC_6|;hbLFQf}C!+liJ=C0Z!HK=P7xgD` z%c({BLHvQZ+_p=7p%G$tU*Xe7Ye^!{7=q@(hg}d(Bub>Px!yMKWp|l~W#FjD4Qw0- zo}PV>$l*qUUG;&L%C$AfG^IcO9PI*l4(K1TOBdih@0XJB)wMzHbze6BT%z-Yt*Dmp z{UtGCEF4~P$&=Ptci-u__udR;NOSoF7u($tdRA!xbdysUmUl*!Ktdw!3vb7yZq8Jz z893tQb>pnKq6X=Q9m{XZpaKUd;VBb&#}`!axTV1+;Z?_WoJ{j|nxP1{$&awHcX1(c zOW=7oRfYErGAodQntsA}e=6PC?y9p7Gy4ikP(Cj(;w=Zz%X2XDXHs(}2{LKZ+hA3B zazgiO<~%M`8v)xvR0#}Jd91WnR<5uPOIFQP!3N5%KAt9 zPZNS1sOh4LwKJ`qqm2*F^{^>+5;U@olZ1{_bX%g*So`Ntmj~ZU%PHI`HNvGB@edaG zPY3-&A*Yx`M(}tY!e=2z{nSTP8S(qthzCTTR!7kdstRrQUdDU=LX7Bh!b40^ja(48 zulv4`7IAVtRLnwvniYIB5A+*5f7Uro4=l``@jVZX35tWppEDtd2}zOgrJtcPSpm)m zpA?KAH(9;+h!pf#Dd7Q9%PdZNz$a#cbL48Udsi9W%UsXUiTw48?nGqV>0vaQx!%7u zn_NF}1pZ7vkqgaHHN0&$M;}0;6gzn{aB2&ts63rhi|&K<_bI?uVEw}#@-cmm*ytl* zoSBKy^UBqr5jv4Q;SMNv({xu>Z~O7yO}o+shv&RsfD?x&NhQzW0DE5phNx5qjbOIh zq20T$%CI2B44c|#2_nyDp+osb(Lwy~P0sJrpd55G<_MeO#Zy?|KW+0xCK7d~1iaHG z&u$9%oXRTS8N8x=jzdTp!`1AM>Fyfgt7$TvjJtQ>B7MRSzqWw{EXPuY?&KY|w(3RF zptdc`(od4zMSnnokozeXFp{6vm8aID-~&cl+xl2!kc=F2_GvNAwvQ%li|PVyieDJg z_v}gx4Yyqm$v(I`(*aN2XINYMyr?ze8QE%*492=lR@(Q|d;iF5H$tMs>dJW2bq;u* zNfBD_976!{;JYvn<~Wfo3lKZ48sQcBMtO_K{G^EzsG|PsoSlYEqvLQ{mPCkqrXn3x zcPdn<<0bH%Zp}n%4VOS66m62&vAN*n?Svy?ANy27{a?>j`!%BwFXF)8;rTNWGaexi z!LdP8Gjsx^_IaA<;iLWm{hnCHQlwql@O;}<-SIu|O7Hz3MGZw^%x7voGIwQut=yJf zzMz7_NoFW?4TZF5?q_Ik&tIzC${*UlVut9KtzgS8gBZ8HXADs0M@Xv&?W(?Zo9H^H ziRS*!c%I__vNR(2R?SXS+oM8!P~qD%CYTp_ByyXjVS>~_)uP?0366L>JV^+5A>>{i z$ThF^GzgH&f!AZZl@D|Gj#EV7`wdRox4Yb3umEb0xs7`(c48{Q;O;fJ(n6zd6+?3t z?^z5Mk;*uxtSAXEF=u%aR)QrdHtZ~a%MaC5(4BhBS)Xzxh915CX)c21L%JS~Ab8^k1#jWBMvSq4V720483P}I z@Soa~>XO5dytpE>B#R7QTGY)oyaQOZAP(^19Uu9zR<*6tC3HqoM^f9`KoMUJwPVs2 zBF&rXPd#ANM>++3TC!aR=W2ssdiK)OLBI*8YX9k}Ub9Y9ps3(RGFVAp@0vGn+GU>k zh&kJDH^*yPG7k_3Y@&w#i~40QU^T$eyd`;xwVYBQBeB(O)zIlF1Fh{uSVXLTp_oTP zfltxXeHJ>uB@ZWp(Hv>5pb9AC>pd_Y`beJswfvY07h=qZnP_2%&nkqb4!!(=ah8QB zdS@^HuyU7#ggmMuwJi-4K}%u8F08g0^XDu?41f-t4k%|r+|_>gzdiWM8D_`ctFGLi z^KLsf!O?r(mwXvRig^uApd3XqEfC{6Q|7|4j3Fs-W1Lt~x96=lU_<|CWZ6sr_!~TU zbeU8=KvGq5A68AJ9Lim_*Qsx55in=`$^oP9RkNQ$!YZ2+-32mnI27g^oW(IR#zY6v z^R6$3VW1>%?S%m-_BenoWAZ+C@7c|Hv#HDXEE z94I|H&K*shh@mT}3tr{|XyHWH1RbzGJ_dj$g-M`E1|HuFgMpI?mu$ zV@VGbx-E`$O|!_c$pTY5KjTOj#m`!|;pQsN>2#kyZO~^6H_2)Tt|UCjRrzR~w{G`c z+vk0$D+#dxU&TG2+hG6WwVoa+Ta|iFf_#HBRn)Mr`Z6rHY3#eu*M1}DfaJw(ts)#}EkD{%+BY|wNMnv56pFfhtAw(~%v~g<>EI#(c2aOBa|L&dhfo^$ zNaMGU{8z1eA4+TGOjjsEpn`>aHM-5*PCSa;f&Gox_No)D&M!C@VxY8IIeEycw{j_D zwV5;D+9?YXq@k@E-c_jkgEIfiGJhNLlL8;9t0vNT&!v8dbWPsXd4I}ZJy8l#$qzb| z_0YRSPil8N3|3a?Jd9*Zk+h7ytNbD~RxHUfBn$rg_aPUD3oaSEnr7rcsqwdkl&=Ph zCK=%YdlL23d>b>*lGJvdpoG;?1;Z{RX^bS6HC88V%Dx#MeZCuRF(U^W-4wMxs%XZ0 zlO#ccC7aLc#4jDAU$EQfN)F@k`lqFk)e$T+(JVb%La+bVy-M-uryBm29jReB-X>)= zJj29|C$XQPspj{)OEBJ_TY-^87>3S#qK`Z9-go+q0eg>mtS1qck{*(7d4HTbTI#W& z$c~(|Sj^${^HSJqht;^3laK&`crEf%rGtw&3R2VBth+@B=^1fP_x~gekcmQ;cA>X} z5ccrH_cj|%0<12h72c_9H`+H!k$mxm7N}yELW(+Y$JqheFr;DciK>zeE9>vB^)0)Mgf-h_5aZ zN?3xJxYK!4SA{t5cGYibj^$Eq^PR}`C`vzK7x1z@u5yDLIvRk^luz?I=*B%xQ--dg z5aMT1cKuP;YEecD%{ZXn@qiNWIOa$@8k>*maCD#JQ!QpW?jvp{VlkS$g`W0#T95IXW6wdyYoI?3tnyUnHFspt5BD>cS(b0@lj1ZTh@wq=!9K+v|x#8M_{Z8;Kf| z5oQ6P=%rwiNl#qEKocW?5&J%J>8gl$)ecR`8CGjO5L=__78$EuLcA zU_pMGO>`v`&C8c3cpnti-ACS-D)o$a4;hH95kQNW^(YGSD5*~EZYw|4oQ^3fa`ozs zLdl1euttnUC{8i&_HDyG?z7BKe7uZxpa&Y=5JPfjIlVDQQyEUmXJCQT?mv!l0ODEU-MxXU zEY+~_VVvR^3-w4pmVNGJc!Xl2g&PhES#2VKGFTO){J`C=QY{?iTNUdXAt`Og1_)Sx z@p7OCj^t70_Fmm@YdwJm>8~=Ux0(X=<3i4QLtrjW6bA^*_oF+@mr*WiZ!d&GJ}AU1 zPu*}bzy%HXm0~H7_Iz}LDKb;Z2&o~?&;3D^Dx5OnI-CaYN)26$8-i$&7nfffnmS=~ zMyjSdcoE{6Dm_LcTfx8*+}^q+_1Bk^dVy zdDC;#7oMkNr$d$^v1R=gs;4+vu_eco)QS-3Y_9sgHF;mXY48n4lGSr^Xyy*Dmwq4o zzC<;P=>0xJMrfKRs=X-pZ_3`{9_mPJJ61M1tFuyuLO~iv$k?kfVWi7^KD=JO>dL;{ zdh%ngeO2wxiyN$PJ1*%Kl1bN88ap;-W;hr@5@7A}<&%~+%x9p4*VA{;^9d5h@lG*+ zU<#jSFQiKN(HbzJhhtt8d0pItFugF@PTC`&ra|K45i&8%Z-0q!y8&%r$#+7&o%W^- z+U9eFEREQ?JU&Q(R({_$FONFOCT$nT&d@+?TN#^})xJe!>Vr)$Qs{tPQ})tK4ed!W zD9D__iZ!?)?W^lof+#^QVfn9lt?0Y595Wrh{1%4nKZ6NqIQ!j@N3t8KE1(&f=L*f_ zs0{R(K8#k;0*xNmDk})10Pn>jY8`VVcbTGik#a0BxuQlN=d25qA`ZWQy_6Y3y0MSJ{N zb{+NIGm<~;@xw!G;(2rG4=Rc8c3l(|P{L$gjSn&xB*el7L#N*Xh_LV4cZE+=7xi@p z(2Y#8g%1YlW<(2_t4n1}J$ZyQRJauViLz5Zq*486R}wJ6kp5$82{~s8Df}bO+xCv= zk0O7L(etrWrD6eNZn z)DMOPT3Ezcj%%izS?VK^{av|jyWH!_y3FNLjrg7*QI4lwB`XJfsV=Dofiwtj>gt+f z&Rw-iN7x;QUnbFpHE zPiX1N-Os%1@((@mI>PPYh2d%DQ0j#suH{E;a#0oS=2uztVwmNPt2XC(qIOsfS0jX% z*J_5BY5=G<5%ainL(Vy%1MCp@)y}Zokl6U95m|Q(m*Bb^W&Iawpw?p`dW^@2SGe7r zuQ<*hx~*C0zQkr6a2KlImO_Y-P}h7H1Avnyn2~oRk11*CCugIRByii0u^w>S<;A%t zmlh7>mKWH!7Z=7HX;dsvf@tf~t6Oy`ja~bWr4adE9{Ng%bGa{@pkRd8XS8`8$G=2VCja>^bZVJ0)iDw+p z7V94kIkdaQHBbd)C6R^}<9QbUob{tNLaOf<q^LZp(=h;iK)VHnDU{?WB$;dR%n(w5fH;h2>F{I!A-$Wf zY{fnAUW4^{4`3wST-LiGytFNM`MK`BkNlW>qG(U0dWNLQgax;2?O=!D_lb5Pmt=*Q zE36TiLw8c7)wKIEJGTAyYn+Qw(!cC7Wg!wpm|i0C|Gf)fmRfo=<9gB)CqoeJ?!W%2 zYc+~u{pydB{$;C=P1|Ymro9~Ia$EIn=f&6y(82_t?TRM8?->{~2M_`zJdkZI^lVE7 zl)jdW-Fp1lhFIG8Nt-uuAl1Ogb)-IPXu&%^&&`g$H?*0tK&2ki(iiEQuR}T7|55El jkij%pVtA+8yqlYc7R{y)c%#!Ii_^A ztbT{&1@dFcfT2ANhOtfWODcKxNCry=Eoq5qcB& z`fqRF4sCb&QS93O_`U9%f)Qr^A-bSUuXYlgf}%0Eise?uNT%b%nX*-N3E>?PMFrKJdDKc3bBL~?=Z8oqUIlF zz6umQi=8D;!HD2Q5-&JoOzEl$1aG@`?Caxh_{YC}G=0|`{vlV?{8O24BTJe7-T#26 z#>_wDsX@^15ljnRDyHT9t5ijWDk~XZNnJC3KlLANw=d3zi5e4o!_21v$iNVzn;TFVwhX1Fo!&KR&pmO)4J3w zG=f8YT-U|s(3<~Nm|Zgz>`mAwCX2|LePV>0>yzG*fl?b)hz8$aHh^@>A z(%4GCuvtpW-6NQLR5I_{395w(?eX5skaf2diQ9lA&1%=Ki#$;Sy%qzdP-zjPb( z0O^Xxw|tUu#?(T02qGQT-bZ7?Xj2ZX&Y#2B9Jd7%dZl8zJ`K4w2vNvRFTL8NjB3VJ zsEy3<;^sxz*dVTuaB$(5z$CUF{hpyzNWqB;#J!B|p}590X05CEqQt>j+$f1H&xMmt zDn=l2?Y=z?g>fav`fzA^qdklQjBfQ@+%MYUsv8o6uwUV=0VR|v0)$96?`?L9^ISrV zevNBrWo8sG40cxRlO4?b9FNBIR4dL3o|yOy+J_7?e#c=RSn1a{G0IE=@z1=?FWcrb zPi-W~7wHZyjI!h5viQ$yy!0P+XCjZ@+S2YgIV_ynP&Xp?lcVO}@G$HFM}7?Rmd-d4 zCYlbzoqcyX@9pRDgrQ3@&d>o6vYsV(u6ZS8sc2~DcI~0LdMk1zNTFc?5o#83!j=TK zaD8N32`RM-*TTNP81p`!PG(QgH?|^WK_d=W4(cXzV(%DCHz@b0D=tB+Y`=Pa`eF1W zS!uoEv6YH)vy?Fq5!JY~PE>04m<&bTeZQP-;!7FFGqNaCh{O?!DKu-h#|}1;DN#7EA1aIRh25kWJbhD$4&ol zw)rLD6{F!C4#TjW-43=YjrxeC1W?c!d`sRT$aHeA;UzQMMTM%U-i=MazVw}0Lq;LT z@~{|ow_hSK_Uwtv3rJd)oP~b(@Lu=Lwm6z`#xdTVY~mr7#K>aZ%qC!DDQN1{y(CH# zAlhD!{cb4E{b@fX)5>@>mO;9GA4QRQ)|7dHiBU{I3Zogv=ErCh9;0SozzQJ6I1Kzt`f`uZI0f~*P z=X2Zb4{p+9RWOTviAyDnbIO2xVZ(OAb%oKDNT@KBo286_X;B$beqy;)N&peoWoz@L zCK5VFU6mx`Q~>}X;w@&9YvcB-P;;HmO*agOs~1nFq!m5H)&L9`Afkzi0R^?dCCM~N{ge&&?h|U{VXv?w( z8T@P305ww%sD*9w*>0NRSf9@(dNcnU+QF8li)}sD#?WiCSt3j0?+l%P%M)YKh2-|A ziOUE9eD|^Lr^8FcVWmD)qy--Td>P1*I$N;YG9r%DMU6Y#tD-^6i5L zCMrr8V17@h(WZBd>%1zXb~g##1!W+Lc?mv)$%LpR&|46cDeU){5UQ`AEsCH6e@}UY z2_`ERE|QP6jRyytzYs>ZRIpA)DAypd-rf^P@`$LVe4MIqw+(y#$JHoVS32tQrYUFS zKrBDmR47ppR83VD%hj?Sf1TjWAG*)M$Bget2a~kjIKqzOlKnz_7UggqK5EA-2AN^Y1 z;+YlIar1?8Zbw=0oJ7AL7Iuz zcm-s{Y*AjUsETLq62!8ye~)0Pxy+Q4Z^xj%N6t z!$NfG9=`3Es8pEj*z`ZoqoFV7**6jKj7J{qlFXjI1F?Q#-`htuF-}+5=dt-|6iqSI z*KC1JSFKolqBChHw#Oe(WVTD?EBTdK9W2yZ=KHs6quKe2RfUWMzG=ugi4cW&HhfXE zue8)0prXLR8olb%Jl z_#>1SS&@Y?HEsz3x}kS%x4i`vZ^9VrsC$BgzP)88@3{OX7b@)el+YAy+uiQ^!F&~+ z)imU?R1k$2ODeop6do~VS*UUU z{wFwX{PahlE1xZXIM*C-Z~FFPLylWiHUdAKB4APm6D|%1m=QAm%6$8r5!@-Q3TIyJ|_K{?qd;pZ6 zyfEUK@Bk3+W-Q%FvOF`4;jgmAHbD<)GxI$6Q@k-1F{2eb3|m+>h{FN^>76ic_7YGL z{b+tPn_E$f*~Y4(G z@V*Vzx&$`u zXV&j2QX1zgRIzhb%!J40+2-ZjlI)ApZ6?w!je?CfMom+e(;XEDU(+H*3F89jhOqtj z2X1qd=EuvpkAiXwJ7B>(2=(vo&Y)C)n9bqlE}M@x|KApIs%Wt*4|c6aGku34dRB?W z5fq|1;EQ*gOFzuHO51g3f+wTNi6oIJ&L7~49=8z?oL>MjHcUZ^tp5Hn^$SNsvIOB4 zZGfOlb|IHg6sR5p3oNo)f9FconlqppxH-2|Qz*A}UN{bk6D|x15KJzgrPO690T+5> zj9t^)f8Eq$GpTV~hn8%*;{fzgo*CjK$HySbmHQ}Hmz1cuyKS?QS&nube36T-Wu<|1 z%&*>j@xC!0{uBpKeRnaVmBrrG3{xX81LoZrBRY-N3McnCaY!AZ?rVjQ7?OBjoCeiG zLhInTt+%!w#fp9tSpg|zmJVQ$7=RP61*q|~LeaY2Tzh)6>ptqNqMXz84qRf0oXW>4 z+QwF(E*(yXPhV91&3P!gy-kR$sEW-m%DH@Z1VnAUOT$(|3gFmj*Ni?Gw)H3D zWD;5uP1(L3BWw4WF>W1a49fRhD#4-lPGfqEO6;b>0JWvxh#^KWZ__{yF%Oa~#AKR- zo6oMs_OqFMkZ7v|$}Kh^2x2J>AAZ?uAOKhJzNhkdIgIVO9^dDnrCOCmB*y(4BjH9) zKlE-C))nJxd=)Qs84oP8BFf(yN>)@>KpT>cVLomDs!gBMXvqx27Hb6JP%XU>QxKrC zHMQTMkv(Tz&KuNbUiU-+uN( zRjdNKwQNJsw3YGJq7?Ru{vxF$v@RbG5s#Pl>66OoF|+KiI<2hiiAFF_vp?^Prtc?T zUql;@aB!Ki2T4R&zSv?V;S%hK`8AB&B6-7A8k&U+5oAelwm7+NEpHe%oOiulX7Thv zf^bnofS_&*@p6gCtPRcibg<4{!WW1$OGo?>!n_rKa85BTfW(HiY#+_fV%ttF)KkU@ zj=E9sE{u)hXkE^EEsAO>!>wM`W4U zoK5BpTL}H4WRN0UK@FIf^W3-kW(&~#bY}7X%PiOhD3%T|WIZzJZ&0YX1e}mQIp)VcRE;(g@|2`OX zEml?3*@*nvt;hSn=lA+p{V0KSLD%jt{pDC2ZKiFWbN-<;jLkAo7oNpg)U9-mTS=J! z4l$mG>$x*~A6djerVzInLnc4%D798Vm7y8mZ8ET6Z}wu6sE$liPCeeRC4||)#g#0& zg3e6sx9#vT1^3abmr{sxfI=p}4k|UTNVqd>NPpPY*OMYLn!LxbWrhIY5MwB-#WQ9C zC^7WI&u(Kr&G;v=O`JkptPnETX7;*8+DuUBk{O$O*siA^oN^=L%pxcVAk58Se&Q;@ z0lU{V<9KMMv)9pZFv%=(?g5maV>dQg12=zlpT}nE2B9lTZ_BF}DN7)K=C8TAooLIuJSPP^b@=5Qbfi#>hGFWT z!>KE5Lt0^9$Pv}r7&glp;jq5Zg?co>7ZClsC_h<@JDH)(+iJ z+=l`r+RB~>JCQwo2cnsDzq#z|Q-ISv#y>U6SVZrpi~~y-_l`g&QZC;LQI){8e|AEK z_vvg+bZsV?Wftub!a_se#cDh;sE=(pZECy5)Ji1o{!c>AwS_3e>Ifenh>8n9Bkj-o zqir^}FKCE`{Yl2TJOG3xmP_iT0}nWX-KCLk)!*BwD$0rhx#ev)9P8mPv&E`tx-7}K zya@nd!Jk}6S85hUFY|f1amu`A9>Pn*ZXT zyqXb`aNzRmb3F{-?IkG{iFW!W88>kN^u(+&S?X}se*Btl)ReJ2a`f-BRF44WYcs!d z8R5XoF8Ze49_*{Is(24vl5v3m0HKk9*LpU?4ZOgtzjW45O02Rt;Z|9IV7V{vz8b|0 zT|m^HZ#R$G)?US;(xIGpUtkcMLEkWFB&E=etAAbU;e9vUx_e9rX@fQUEt>LuJ2pgA zvWlJE`dfpYA$(>uV!^3@fsY^r49j-2Gj+SI#b)T`loaVVk%j%#7~k?Y4+>b!4e^N= zu2luxEogn48Sc1YBzRiM+#5E##a<;|(xiew$tUS>N;^R|Spu*cj## zbyseIpzVWxE~y}ZVlvmsHr=nB>OXx+LV8t{DGRZN{(omaBMoZLh?}uWlg6^J7DY_t3&S77oLHz}=3cTj= z_M+J9^$dq6UM-t5!mZe^y;@cgC@z^)^l@rLN7a4;yVLj~m)|ADO08mzXSWyeX1w?) z1-`(Vljo5x`XnwBh0?Z0&HLSaXhTevf;^6R{~nd7g9(u zg!(RBR|#70Mi3-La`@mx0KHzz_^Ni=X? z^Uw@KA$8ohkNw?A^#EX-*`H5g(o7+Es;b>~>~^zv=%a`zFypoZ(kJ+9`U<61V%ZKIdx4opF?yS^Z3P#CI{pv#qB$! zfh+FpVY$OcyTu*PB=$!X6Yy;>Kdv_z3R}dD-tdZYORow7(YC?m`v)LNZ~6DR_bVf#pG04k z);H>^)ae(Tp6gAHuaChm@%IU@pe^6Jx@&1~V2Yj4LNK@GVGdoa-uWkl1KP9R93}@T znkJ6}E(`HUpm}G1O`p~pXkv`Lz97_>2B*mErzb3nBUmXG@BqP7W?xo+QZ+`#mN7!xN~W86h{K^FQNj0c=c`Wd*9PWhm5gJ7X}aDs&+-}e_u%$ zAt0p`ZEE&36Hy;-8B3{r=d>JfBtl($iXSHmL@#4g`m`NVZw;c{gFhq@K@_mZz)%aB zTAZ0iMB9f;27PzhF$rR@5ev6@Vss@nITjhpR207Uj6)dpRNL5Scv z`PFoVM~w9m7Ghl8;_n-*8k#`SPuBOX{GP@|SqOD3RtTz2N7}l;47V>j=7VH!MkftI zsh{+V&WR1m7NrJlpPP55vg(WWXWrCxFFe2A&;Y=qE;v7<`@OM;+`Zt0a?2K$;8oe) z`1Nm@uY^$g`B`Hd))=Nxw|Z$iEfcCh1Iq~Qk(fn9syO%tAM9Gmnc7CbfF07_-F>&$y#O}&x`H$Vl!Wu~ zaGi4a&Ba(Kr#6}Zbpp9VeClgUq=1?)0<7IpmU&x;BUm79pRK74%4-ls0bqv|k1;_n zHs2`-rK}PV+i>Rv5jCVT)TNN7r}8X)y2PXAcZsvgwIqfVd4GfN5D>E(;f-nPDJ6V1j z07aA1w=`tS+Wk`|=5;5PpQKQ$y!+u$6`w7;3ICEg0*7+r0!<+D3KL(O;*}7fM||$F zR<@)eiKT+w3aN-IL?H{LnYIJ@1_TpS#N-Fj-PbHRO~l;R(UA>S1J|7Zbwqq*R|WFo zHg+$!L;C7g)RqN?F;vmfoZF^na;7RtEyfqdsbw0#4VMpJDO|tTA7x~UOD?mCYi9Il z`V!i-Is$Y)QEi*5;>_}g=hzU+POE71t39EJPag}Z$1 ztr7w>XOT5l%}3Ez>~RubKQZr6Cw5$}4MtQseYRDc0mZx3mG7>n7M{KF`@(djFive) z0nQos{hGxI&|OBH0oK)`R)C1QOS8wois?rR;M}EC7C{Zm`Nw{+ma8o9GH&Gsw#09Y z)Ydwpfn)af?0pjGHyt#lRZ~NS4rc`M7h~{2SE4ahBOO-={{fi<<2+K(+4b?k(rk2 zKq@VGKOh)g34G#*!92d`fN4wjvUB>Ghd}qElqOLB8~(Yh>+YQiO9h@gL&(wOqMh@B*3qV!oZ)~#LRx`KdvrI!RfCL z_N#i9)SDU{*pP8)0E3efxFTpl4&C2Lo@5OOxTqy!Zz*faa$)!kY z`cch*+HFh1-D#1E3Ce5Nkm$0S3sGQ{fHp+Ah}Mj|n<(L0#M*8+Rjj|UcdskDPE26D z6xC2yM*>U1;HHPvE%SV8j$!Tvm!ugy6X({+;E|qDU*}}V(0Kx-U8KP>Mb=z@=#9sTqHPSPl;swxhS8Ln0+o$`RAX@{^*~M`ROqHQ>($xfAHvZPdYz~$k}M>2Af0_ zFp`#5`g1pIG3;%R>}K63GzO=A2~RKUuVXh8nI2)38!qs4GDmt2B9};*Ou~agHNHSg z6;R{<&dw}J=)<`)`Jrj$=wX{5&jRTwdn?La9Zd+-@1*IxTiYDu6_q5Wz$`+ID}*sL zUZ*WF0Fvm7Q+a5b{(9yg8yj;n5o%l_jG=;88~X)i65j3yE>*hw=~Vva2che|BRh3$ zkTDTaqcxWDZ1ZzUFs~k5K7zh=^_Ai@C7{>zJ!iM_89Fu_Q#MN5x&VgmU6pGUM=3D! z2Dm}t+xQzV;+S$7x>jmg-E~9fFU!kAD5cthn7eixhkmqnk(KS=2UamzD)WO#kzzRf z4y3w~CUgdFSZt65hSvGr&%9{+tIS%quoQcTe1ixupnKIUGe6u5sd#h0vp6 zbHv>;i2VexT&~Ug;5zFxWJLy5QwG8lmZ=PP-)WBY_IO`4(7j!+QA3FR|nVE zu(FY!U_yvTEaT>FnC?CSICoVk;^s7Xvh${@io6$w{Zk7wXJ6s$e%V)18o)Vs_rAE^ zda00!tWk4$9|G$6K_Kbw)f3gaV9ThURaKMA!f)G@VJkL^E3LULtBioI_fV53$j~q{ zx~1{w-J+r-%+^XrSF_vgA-(^`nQK-?U5WsN8U5P4I{xC;i+jJZw*PJKW@Ba4VcEBN zjTnCgxG0*sXJyU*fw87Xwt7vHNg}UY6B`~;f^_}N{0T%E;oD&P9zNg!tnXKr_tj{d z^UK(nAn1aQ>mJ!ThF2UfJ(!Ss#zXR5R<~afH-ltpP=rn@u%RQRo9{qn(i9A~l3q<{;W3 zCDR}Zy{pP>f8ba79?Q{W{yR}8R^niN|ol4ml zGB6j-84+!9O9pv)VW!XI0<;0?GZHNkh6W0Fkvqoj;f$gL3a$%Djc}Ffv zpcYcMw&|P0{p95`pVllsF-}QP1L*E#rhW#Pd`uLu)~qi-hU9Z&)*BA0bA5SNWm&{A z^^IFt+MqLNPODHX%5ztoKcuJF;<2}4+QET1cV}6c20-b}Xz0i=?8EE10#WqE(c>RY zlqI~ht+qX+j??_98H}b@OmjjKABbT;oy-mrz=7Q3{D(1A7^>nf7jCfStjZmuIS9U2 zSSO5Ac=&mTO`NteD%SV%py@CsK$0o{$B(SIO2-~Ie*?2%wPM;^ggEyLG&GIes84m# z%YE< zigMy~VWv*z>Vrns{qE8k+eVEG-DRX%Xc!2|V=zLtslYZ zj2}hY4}l${+YO;KF$Q4n@qr+t;aY-yUdsI9$%bF1j@&k6TpYll+Mzba3Mw-bT0Yt_ z#||l5;Dx3+K}uubLUr&`e7B22)V{!Tt7PwXUr01_`j!RK-J~nZ{mKkdsNc2}VAavd z47Ff;$ipCo+WYZuUEbKw%K>Yzjf1-^GKsUb#(Ck2fmnu^&K#PeQ+8-v97v(RvNne! z1&rnT)?b7(*yOM^{#^xZ7Jz%GCI}75+FTv^S+p&#rz7Bg%6bq&BhrQj4TC@Z+%}&g z#wdioYN52!(3~Hb@*$=tbkIGEEC?c?h*jnHc10_cgJq)ca^=pvO4oWdftdflVK_GH zFF(gO64=|+;Ou^h5~9H~xZvZ3b(d$(jb*J?Y|mv9xWG*)x842NIBCHaa2HwX6JU;w zEatBG?yEa41cM)$s7wo#Ik4QbuDD9p6#N20;y963EuKQv>>uUTJ2I{}1V2)~0V$mZ zVl0>If{sfS>=LpsHSx8|doWGrR2-X@d2LY@Vw&sICJZAlj8|)QVwJW&hX)Fa-)ikY z5(?Ctm34XXsIhm7>CZ|$&HwIxi;@It#y&Nib?i&~jSXodsL!TwQQWHgxvW(_2o>E< zjCBL<59nTyIf5)YnquI5(Lf<{B}U$yezLY@582f^vdK>r-A{({<-(fFfpulx9DWsf zFDK8~ZzmUe&qYK1e(J9CSS6Ziin^ttI|?xv=1#2A3oWp_jz}UO=qx#gw6Am?)n3&= zVBAhIauURVu5vuwC)e5!S$`R*R*}o4y1{BU2zmh_IeyXP8_!a*Uac*pfdozdiC9Sj zbqwLZS=UsBS2aRTJ?mkwMc1)&A*QHw_u^dCL$#)As{y!8U-%Hsn`-%#`NN1(5m2R6 z#E=^F%#nVMNkLPt&D>;U{CU!KNWc_F=XbH~*Z9Bu2-F7xHMhJi0>*t{w{R6>nIDkjSBq;Y0I&8q$N+}cKl&0hJPkk9zu-zfo-P-<2;>Xe9S z&rQp(Atm9L-_8|RHjLmkV1sWPh*xyak&Yykvr-Ha7@;Pu8p6;}-ej@>NwY)mm2qUw zqEpLioldY??w_=E(E?~-6nVJxMFJ)@g)cBnk0=A2dtBN7?|=RW4+K{M=>h=&shb8I diff --git a/lib/src/lib.rs b/lib/src/lib.rs index 54730c773..c9a424b34 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -51,6 +51,7 @@ pub mod objectsource; pub(crate) mod objgv; #[cfg(feature = "internal-testing-api")] pub mod ostree_manual; +pub(crate) mod statistics; mod utils; diff --git a/lib/src/objectsource.rs b/lib/src/objectsource.rs index d8258c164..64a3eb137 100644 --- a/lib/src/objectsource.rs +++ b/lib/src/objectsource.rs @@ -41,9 +41,7 @@ pub struct ObjectSourceMeta { /// Unique identifier, does not need to be human readable, but can be. #[serde(with = "rcstr_serialize")] pub identifier: ContentID, - /// Identifier for this source (e.g. package name-version, git repo). - /// Unlike the [`ContentID`], this should be human readable. It likely comes from an external source, - /// and may be re-serialized. + /// Just the name of the package (no version), needs to be human readable. #[serde(with = "rcstr_serialize")] pub name: Rc, /// Identifier for the *source* of this content; for example, if multiple binary @@ -54,6 +52,8 @@ pub struct ObjectSourceMeta { /// One suggested way to generate this number is to have it be in units of hours or days /// since the earliest changed item. pub change_time_offset: u32, + /// Change frequency + pub change_frequency: u32, } impl PartialEq for ObjectSourceMeta { diff --git a/lib/src/statistics.rs b/lib/src/statistics.rs new file mode 100644 index 000000000..7b0102fb6 --- /dev/null +++ b/lib/src/statistics.rs @@ -0,0 +1,109 @@ +//! This module holds implementations of some basic statistical properties, such as mean and standard deviation. + +pub(crate) fn mean(data: &[u64]) -> Option { + if data.is_empty() { + None + } else { + Some(data.iter().sum::() as f64 / data.len() as f64) + } +} + +pub(crate) fn std_deviation(data: &[u64]) -> Option { + match (mean(data), data.len()) { + (Some(data_mean), count) if count > 0 => { + let variance = data + .iter() + .map(|value| { + let diff = data_mean - (*value as f64); + diff * diff + }) + .sum::() + / count as f64; + Some(variance.sqrt()) + } + _ => None, + } +} + +//Assumed sorted +pub(crate) fn median_absolute_deviation(data: &mut [u64]) -> Option<(f64, f64)> { + if data.is_empty() { + None + } else { + //Sort data + //data.sort_by(|a, b| a.partial_cmp(b).unwrap()); + + //Find median of data + let median_data: f64 = match data.len() % 2 { + 1 => data[data.len() / 2] as f64, + _ => 0.5 * (data[data.len() / 2 - 1] + data[data.len() / 2]) as f64, + }; + + //Absolute deviations + let mut absolute_deviations = Vec::new(); + for size in data { + absolute_deviations.push(f64::abs(*size as f64 - median_data)) + } + + absolute_deviations.sort_by(|a, b| a.partial_cmp(b).unwrap()); + let l = absolute_deviations.len(); + let mad: f64 = match l % 2 { + 1 => absolute_deviations[l / 2], + _ => 0.5 * (absolute_deviations[l / 2 - 1] + absolute_deviations[l / 2]), + }; + + Some((median_data, mad)) + } +} + +#[test] +fn test_mean() { + assert_eq!(mean(&[]), None); + for v in [0u64, 1, 5, 100] { + assert_eq!(mean(&[v]), Some(v as f64)); + } + assert_eq!(mean(&[0, 1]), Some(0.5)); + assert_eq!(mean(&[0, 5, 100]), Some(35.0)); + assert_eq!(mean(&[7, 4, 30, 14]), Some(13.75)); +} + +#[test] +fn test_std_deviation() { + assert_eq!(std_deviation(&[]), None); + for v in [0u64, 1, 5, 100] { + assert_eq!(std_deviation(&[v]), Some(0 as f64)); + } + assert_eq!(std_deviation(&[1, 4]), Some(1.5)); + assert_eq!(std_deviation(&[2, 2, 2, 2]), Some(0.0)); + assert_eq!( + std_deviation(&[1, 20, 300, 4000, 50000, 600000, 7000000, 80000000]), + Some(26193874.56387471) + ); +} + +#[test] +fn test_median_absolute_deviation() { + //Assumes sorted + assert_eq!(median_absolute_deviation(&mut []), None); + for v in [0u64, 1, 5, 100] { + assert_eq!(median_absolute_deviation(&mut [v]), Some((v as f64, 0.0))); + } + assert_eq!(median_absolute_deviation(&mut [1, 4]), Some((2.5, 1.5))); + assert_eq!( + median_absolute_deviation(&mut [2, 2, 2, 2]), + Some((2.0, 0.0)) + ); + assert_eq!( + median_absolute_deviation(&mut [ + 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 9, 12, 52, 90 + ]), + Some((6.0, 2.0)) + ); + + //if more than half of the data has the same value, MAD = 0, thus any + //value different from the residual median is classified as an outlier + assert_eq!( + median_absolute_deviation(&mut [0, 1, 1, 1, 1, 1, 1, 1, 0]), + Some((1.0, 0.0)) + ); +} diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 63eda8724..a9e5b69ca 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -21,7 +21,7 @@ use std::process::Command; use std::time::SystemTime; use xshell::cmd; -use ostree_ext::fixture::{FileDef, Fixture, CONTENTS_CHECKSUM_V0, CONTENTS_V0_LEN}; +use ostree_ext::fixture::{FileDef, Fixture, CONTENTS_CHECKSUM_V0, LAYERS_V0_LEN, PKGS_V0_LEN}; const EXAMPLE_TAR_LAYER: &[u8] = include_bytes!("fixtures/hlinks.tar.gz"); const TEST_REGISTRY_DEFAULT: &str = "localhost:5000"; @@ -480,12 +480,14 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { let opts = ExportOpts { copy_meta_keys: vec!["buildsys.checksum".to_string()], copy_meta_opt_keys: vec!["nosuchvalue".to_string()], + max_layers: std::num::NonZeroU32::new(PKGS_V0_LEN as u32), ..Default::default() }; let digest = ostree_ext::container::encapsulate( fixture.srcrepo(), fixture.testref(), &config, + None, Some(opts), contentmeta, &srcoci_imgref, @@ -520,7 +522,7 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { "/usr/bin/bash" ); - let n_chunks = if chunked { *CONTENTS_V0_LEN } else { 1 }; + let n_chunks = if chunked { LAYERS_V0_LEN } else { 1 }; assert_eq!(cfg.rootfs().diff_ids().len(), n_chunks); assert_eq!(cfg.history().len(), n_chunks); @@ -537,6 +539,7 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { &config, None, None, + None, &ociarchive_dest, ) .await @@ -625,7 +628,7 @@ fn validate_chunked_structure(oci_path: &Utf8Path) -> Result<()> { let d = Dir::open_ambient_dir(oci_path, cap_std::ambient_authority())?; let d = ocidir::OciDir::open(&d)?; let manifest = d.read_manifest()?; - assert_eq!(manifest.layers().len(), *CONTENTS_V0_LEN); + assert_eq!(manifest.layers().len(), LAYERS_V0_LEN); let ostree_layer = manifest.layers().first().unwrap(); let mut ostree_layer_blob = d .read_blob(ostree_layer) @@ -658,7 +661,7 @@ fn validate_chunked_structure(oci_path: &Utf8Path) -> Result<()> { #[tokio::test] async fn test_container_chunked() -> Result<()> { - let nlayers = *CONTENTS_V0_LEN - 1; + let nlayers = LAYERS_V0_LEN - 1; let mut fixture = Fixture::new_v1()?; let (imgref, expected_digest) = fixture.export_container().await.unwrap(); @@ -729,7 +732,7 @@ r usr/bin/bash bash-v0 first.1, "ostree export of commit 38ab1f9da373a0184b0b48db6e280076ab4b5d4691773475ae24825aae2272d4" ); - assert_eq!(second.1, "bash"); + assert_eq!(second.1, "7 components"); assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 1); let n = store::count_layer_references(fixture.destrepo())? as i64; @@ -803,7 +806,7 @@ r usr/bin/bash bash-v0 store::remove_images(fixture.destrepo(), [&derived_imgref.imgref]).unwrap(); assert_eq!(store::list_images(fixture.destrepo()).unwrap().len(), 0); let n_removed = store::gc_image_layers(fixture.destrepo())?; - assert_eq!(n_removed, (*CONTENTS_V0_LEN + 1) as u32); + assert_eq!(n_removed, (LAYERS_V0_LEN + 1) as u32); // Repo should be clean now assert_eq!(store::count_layer_references(fixture.destrepo())?, 0); @@ -910,6 +913,7 @@ async fn test_container_write_derive() -> Result<()> { }, None, None, + None, &ImageReference { transport: Transport::OciDir, name: base_oci_path.to_string(), @@ -1298,6 +1302,7 @@ async fn test_container_import_export_registry() -> Result<()> { &config, None, None, + None, &src_imgref, ) .await From 723b6f15fd76fb55da16a18fe1e8507ca8a9906c Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 15 May 2023 17:37:01 -0400 Subject: [PATCH 561/775] chunking: Minor comment cleanup/clarifications The default Rust style includes a space between the comment `//` and the text, so do that consistently. Ensure that "documentation comments" are consistently associated with their function. Expand acronyms like "hs" and "ms" etc. in many places because I find this easier to read. (Particularly I have a strong association with "ms" as "millisecond") --- lib/src/chunking.rs | 121 ++++++++++++++++++++++---------------------- 1 file changed, 61 insertions(+), 60 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index c8fdc333e..79ca9322a 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -411,18 +411,17 @@ fn packing_size(packing: &[Vec<&ObjectSourceMetaSized>]) -> u64 { packing.iter().map(|v| components_size(v)).sum() } -///Given a certain threshold, divide a list of packages into all combinations -///of (high, medium, low) size and (high,medium,low) using the following -///outlier detection methods: -///- Median and Median Absolute Deviation Method -/// Aggressively detects outliers in size and classifies them by -/// high, medium, low. The high size and low size are separate partitions -/// and deserve bins of their own -///- Mean and Standard Deviation Method -/// The medium partition from the previous step is less aggressively -/// classified by using mean for both size and frequency - -//Assumes components is sorted by descending size +/// Given a certain threshold, divide a list of packages into all combinations +/// of (high, medium, low) size and (high,medium,low) using the following +/// outlier detection methods: +/// - Median and Median Absolute Deviation Method +/// Aggressively detects outliers in size and classifies them by +/// high, medium, low. The high size and low size are separate partitions +/// and deserve bins of their own +/// - Mean and Standard Deviation Method +/// The medium partition from the previous step is less aggressively +/// classified by using mean for both size and frequency +/// Note: Assumes components is sorted by descending size fn get_partitions_with_threshold( components: Vec<&ObjectSourceMetaSized>, limit_hs_bins: usize, @@ -435,39 +434,39 @@ fn get_partitions_with_threshold( let mut sizes: Vec = components.iter().map(|a| a.size).collect(); let (median_size, mad_size) = statistics::median_absolute_deviation(&mut sizes)?; - //Avoids lower limit being negative + // We use abs here to ensure the lower limit stays positive let size_low_limit = 0.5 * f64::abs(median_size - threshold * mad_size); let size_high_limit = median_size + threshold * mad_size; for pkg in components { let size = pkg.size as f64; - //high size (hs) + // high size (hs) if size >= size_high_limit { high_size.push(pkg); } - //low size (ls) + // low size (ls) else if size <= size_low_limit { partitions .entry(LOW_PARTITION.to_string()) .and_modify(|bin| bin.push(pkg)) .or_insert_with(|| vec![pkg]); } - //medium size (ms) + // medium size (ms) else { med_size.push(pkg); } } - //Extra hs packages + // Extra high-size packages let mut remaining_pkgs: Vec<_> = high_size.drain(limit_hs_bins..).collect(); assert_eq!(high_size.len(), limit_hs_bins); - //Concatenate extra hs packages + med_sizes to keep it descending sorted + // Concatenate extra high-size packages + med_sizes to keep it descending sorted remaining_pkgs.append(&mut med_size); partitions.insert(HIGH_PARTITION.to_string(), high_size); - //Ascending sorted by frequency, so each partition within ms is freq sorted + // Ascending sorted by frequency, so each partition within medium-size is freq sorted remaining_pkgs.sort_by(|a, b| { a.meta .change_frequency @@ -485,7 +484,7 @@ fn get_partitions_with_threshold( let med_mean_size = statistics::mean(&med_sizes)?; let med_stddev_size = statistics::std_deviation(&med_sizes)?; - //Avoids lower limit being negative + // We use abs to avoid the lower limit being negative let med_freq_low_limit = 0.5f64 * f64::abs(med_mean_freq - threshold * med_stddev_freq); let med_freq_high_limit = med_mean_freq + threshold * med_stddev_freq; let med_size_low_limit = 0.5f64 * f64::abs(med_mean_size - threshold * med_stddev_size); @@ -504,7 +503,7 @@ fn get_partitions_with_threshold( size_name = "ms"; } - //Numbered to maintain order of partitions in a BTreeMap of hf, mf, lf + // Numbered to maintain order of partitions in a BTreeMap of hf, mf, lf let freq_name; if freq >= med_freq_high_limit { freq_name = "3hf"; @@ -531,19 +530,18 @@ fn get_partitions_with_threshold( /// Given a set of components with size metadata (e.g. boxes of a certain size) /// and a number of bins (possible container layers) to use, determine which components /// go in which bin. This algorithm is pretty simple: - -// Total available bins = n -// -// 1 bin for all the u32_max frequency pkgs -// 1 bin for all newly added pkgs -// 1 bin for all low size pkgs -// -// 60% of n-3 bins for high size pkgs -// 40% of n-3 bins for medium size pkgs -// -// If HS bins > limit, spillover to MS to package -// If MS bins > limit, fold by merging 2 bins from the end -// +/// Total available bins = n +/// +/// 1 bin for all the u32_max frequency pkgs +/// 1 bin for all newly added pkgs +/// 1 bin for all low size pkgs +/// +/// 60% of n-3 bins for high size pkgs +/// 40% of n-3 bins for medium size pkgs +/// +/// If HS bins > limit, spillover to MS to package +/// If MS bins > limit, fold by merging 2 bins from the end +/// fn basic_packing<'a>( components: &'a [ObjectSourceMetaSized], bin_size: NonZeroU32, @@ -553,7 +551,7 @@ fn basic_packing<'a>( let mut components: Vec<_> = components.iter().collect(); let before_processing_pkgs_len = components.len(); - //If the current rpm-ostree commit to be encapsulated is not the one in which packing structure changes, then + // If the current rpm-ostree commit to be encapsulated is not the one in which packing structure changes, then // Flatten out prior_build_metadata to view all the packages in prior build as a single vec // Compare the flattened vector to components to see if pkgs added, updated, // removed or kept same @@ -561,13 +559,14 @@ fn basic_packing<'a>( // if pkgs removed, then remove them from the prior[i] // iterate through prior[i] and make bins according to the name in nevra of pkgs to update // required packages - //else if pkg structure to be changed || prior build not specified + // else if pkg structure to be changed || prior build not specified // Recompute optimal packaging strcuture (Compute partitions, place packages and optimize build) if let Some(prior_build) = prior_build_metadata { tracing::debug!("Keeping old package structure"); - //1st layer is skipped as packing doesn't manage ostree_commit layer + // The first layer is the ostree commit, which will always be different for different builds, + // so we ignore it. For the remaining layers, extract the components/packages in each one. let curr_build: Result>> = prior_build .layers() .iter() @@ -594,7 +593,7 @@ fn basic_packing<'a>( .map(|pkg| pkg.meta.name.to_string()) .collect(); - //Handle added packages + // Added packages are included in the last bin which was reserved space. if let Some(last_bin) = curr_build.last_mut() { let added = curr_pkgs_set.difference(&prev_pkgs_set); last_bin.retain(|name| !name.is_empty()); @@ -603,13 +602,13 @@ fn basic_packing<'a>( panic!("No empty last bin for added packages"); } - //Handle removed packages + // Handle removed packages let removed: HashSet<&String> = prev_pkgs_set.difference(&curr_pkgs_set).collect(); for bin in curr_build.iter_mut() { bin.retain(|pkg| !removed.contains(pkg)); } - //Handle updated packages + // Handle updated packages let mut name_to_component: HashMap = HashMap::new(); for component in &components { name_to_component @@ -625,7 +624,7 @@ fn basic_packing<'a>( modified_build.push(mod_bin); } - //Verify all packages are included + // Verify all packages are included let after_processing_pkgs_len: usize = modified_build.iter().map(|b| b.len()).sum(); assert_eq!(after_processing_pkgs_len, before_processing_pkgs_len); assert!(modified_build.len() <= bin_size.get() as usize); @@ -634,7 +633,8 @@ fn basic_packing<'a>( tracing::debug!("Creating new packing structure"); - //Handle trivial case of no pkgs < bins + // If there are fewer packages/components than there are bins, then we don't need to do + // any "bin packing" at all; just assign a single component to each and we're done. if before_processing_pkgs_len < bin_size.get() as usize { components.into_iter().for_each(|pkg| r.push(vec![pkg])); if before_processing_pkgs_len > 0 { @@ -656,7 +656,8 @@ fn basic_packing<'a>( match components_len_after_max_freq { 0 => (), _ => { - //Defining Limits of each bins + // Given a total number of bins (layers), compute how many should be assigned to our + // partitioning based on size and frequency. let limit_ls_bins = 1usize; let limit_new_bins = 1usize; let _limit_new_pkgs = 0usize; @@ -683,7 +684,7 @@ fn basic_packing<'a>( .checked_div(limit_ms_bins) .expect("number of bins should be >= 4"); - //Bins assignment + // Bins assignment for (partition, pkgs) in partitions.iter() { if partition == HIGH_PARTITION { for pkg in pkgs { @@ -714,15 +715,15 @@ fn basic_packing<'a>( } tracing::debug!("Bins before unoptimized build: {}", r.len()); - //Despite allocation certain number of pkgs per bin in MS partitions, the - //hard limit of number of MS bins can be exceeded. This is because the pkg_per_bin_ms - //is only upper limit and there is no lower limit. Thus, if a partition in MS has only 1 pkg - //but pkg_per_bin_ms > 1, then the entire bin will have 1 pkg. This prevents partition - //mixing. + // Despite allocation certain number of pkgs per bin in medium-size partitions, the + // hard limit of number of medium-size bins can be exceeded. This is because the pkg_per_bin_ms + // is only upper limit and there is no lower limit. Thus, if a partition in medium-size has only 1 pkg + // but pkg_per_bin_ms > 1, then the entire bin will have 1 pkg. This prevents partition + // mixing. // - //Addressing MS bins limit breach by mergin internal MS partitions - //The partitions in MS are merged beginnign from the end so to not mix hf bins with lf bins. The - //bins are kept in this order: hf, mf, lf by design. + // Addressing medium-size bins limit breach by mergin internal MS partitions + // The partitions in medium-size are merged beginning from the end so to not mix high-frequency bins with low-frequency bins. The + // bins are kept in this order: high-frequency, medium-frequency, low-frequency. while r.len() > (bin_size.get() as usize - limit_new_bins - limit_max_frequency_bins) { for i in (limit_ls_bins + limit_hs_bins..r.len() - 1) .step_by(2) @@ -840,7 +841,7 @@ mod test { #[test] fn test_advanced_packing() -> Result<()> { - //Step1 : Initial build (Packing sructure computed) + // Step1 : Initial build (Packing sructure computed) let contentmeta_v0: Vec = vec![ vec![1, u32::MAX, 100000], vec![2, u32::MAX, 99999], @@ -886,14 +887,14 @@ mod test { ]; assert_eq!(structure, v0_expected_structure); - //Step 2: Derive packing structure from last build + // Step 2: Derive packing structure from last build let mut contentmeta_v1: Vec = contentmeta_v0; - //Upgrade pkg1.0 to 1.1 + // Upgrade pkg1.0 to 1.1 contentmeta_v1[0].meta.identifier = RcStr::from("pkg1.1"); - //Remove pkg7 + // Remove pkg7 contentmeta_v1.remove(contentmeta_v1.len() - 1); - //Add pkg5 + // Add pkg5 contentmeta_v1.push(ObjectSourceMetaSized { meta: ObjectSourceMeta { identifier: RcStr::from("pkg5.0"), @@ -927,12 +928,12 @@ mod test { assert_eq!(structure_derived, v1_expected_structure); - //Step 3: Another update on derived where the pkg in the last bin updates + // Step 3: Another update on derived where the pkg in the last bin updates let mut contentmeta_v2: Vec = contentmeta_v1; - //Upgrade pkg5.0 to 5.1 + // Upgrade pkg5.0 to 5.1 contentmeta_v2[9].meta.identifier = RcStr::from("pkg5.1"); - //Add pkg12 + // Add pkg12 contentmeta_v2.push(ObjectSourceMetaSized { meta: ObjectSourceMeta { identifier: RcStr::from("pkg12.0"), From 967bef2df9485eafabeebd8d97ef7051ed572641 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 15 May 2023 18:07:03 -0400 Subject: [PATCH 562/775] tests: Fix semantic merge conflict The chunking logic now puts all of these into one layer. --- lib/tests/it/main.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index dc7382374..88dcd67de 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -704,14 +704,16 @@ async fn test_container_chunked() -> Result<()> { .as_ref() .unwrap() .starts_with("ostree export")); - assert!(layer_history - .nth(6) - .unwrap()? - .1 - .created_by() - .as_ref() - .unwrap() - .starts_with("testlink")); + assert_eq!( + layer_history + .next() + .unwrap()? + .1 + .created_by() + .as_ref() + .unwrap(), + "7 components" + ); } let import = imp.import(prep).await.context("Init pull derived").unwrap(); assert_eq!(import.manifest_digest.as_str(), digest); From 4a9017085895454048f8ce9c5dc7d8369c26cc94 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 08:55:54 -0400 Subject: [PATCH 563/775] tests: Drop legacy image test I cleaned up my quay.io namespace and obviously didn't realize/remember that we were using an image from my namespace in our tests. While we could recreate it, it doesn't seem worth it. Just drop the test. --- ci/priv-integration.sh | 9 --------- 1 file changed, 9 deletions(-) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index b78a7e9f1..b01cffbb9 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -10,7 +10,6 @@ mkdir -p /var/tmp sysroot=/run/host # Current stable image fixture image=quay.io/fedora/fedora-coreos:testing-devel -old_image=quay.io/cgwalters/fcos:unchunked imgref=ostree-unverified-registry:${image} stateroot=testos @@ -58,14 +57,6 @@ for img in "${image}"; do fi done -if ostree-ext-cli container image deploy --sysroot "${sysroot}" \ - --stateroot "${stateroot}" --imgref ostree-unverified-registry:"${old_image}" 2>err.txt; then - echo "deployed old image" - exit 1 -fi -grep 'legacy format.*no longer supported' err.txt -echo "ok old image failed to parse" - # Verify we have systemd journal messages nsenter -m -t 1 journalctl _COMM=ostree-ext-cli > logs.txt grep 'layers already present: ' logs.txt From 6c6f9a0dcec3f7ccec988b9cbc60e3e3bb47a1fc Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 09:43:22 -0400 Subject: [PATCH 564/775] container: Set creation date to match ostree commit timestamp This is just friendlier because we want the value to be potentially useful. It doesn't harm reproducibility because if the embedded commit timestamp is not reproducible, then the image will already not be reproducible. Closes: https://github.com/ostreedev/ostree-rs-ext/issues/420 --- lib/src/container/encapsulate.rs | 9 +++++++++ lib/tests/it/main.rs | 3 +++ 2 files changed, 12 insertions(+) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 23e164c02..396ea8a5a 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -9,6 +9,7 @@ use crate::tar as ostree_tar; use anyhow::{anyhow, Context, Result}; use cap_std::fs::Dir; use cap_std_ext::cap_std; +use chrono::NaiveDateTime; use containers_image_proxy::oci_spec; use flate2::Compression; use fn_error_context::context; @@ -196,6 +197,11 @@ fn build_oci( let commit = repo.require_rev(rev)?; let commit = commit.as_str(); let (commit_v, _) = repo.load_commit(commit)?; + let commit_timestamp = NaiveDateTime::from_timestamp_opt( + ostree::commit_get_timestamp(&commit_v).try_into().unwrap(), + 0, + ) + .unwrap(); let commit_subject = commit_v.child_value(3); let commit_subject = commit_subject.str().ok_or_else(|| { anyhow::anyhow!( @@ -208,6 +214,9 @@ fn build_oci( let mut ctrcfg = oci_image::Config::default(); let mut imgcfg = oci_image::ImageConfiguration::default(); + imgcfg.set_created(Some( + commit_timestamp.format("%Y-%m-%dT%H:%M:%SZ").to_string(), + )); let labels = ctrcfg.labels_mut().get_or_insert_with(Default::default); commit_meta_to_labels( diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 88dcd67de..3fb2e8161 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -507,6 +507,9 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { r#""buildsys.checksum": "41af286dc0b172ed2f1ca934fd2278de4a1192302ffa07087cea2682e7d372e3""# )); let cfg = skopeo_inspect_config(&srcoci_imgref.to_string())?; + let creation_time = + chrono::NaiveDateTime::parse_from_str(cfg.created().as_deref().unwrap(), "%+").unwrap(); + assert_eq!(creation_time.timestamp(), 872879442); // unwrap. Unwrap. UnWrap. UNWRAP!!!!!!! assert_eq!( cfg.config() From 651f259a873da5d0d9b306c6e74c4b34be4eb605 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 16 May 2023 20:48:23 -0400 Subject: [PATCH 565/775] container: Correctly print size of added layers I noticed the sizes were always exactly the same... --- lib/src/container/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 115912ca8..c97089408 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -307,7 +307,7 @@ impl ManifestDiff { let n_added = &self.added.len(); let removed_size = layersum(&self.removed); let removed_size_str = glib::format_size(removed_size); - let added_size = layersum(&self.removed); + let added_size = layersum(&self.added); let added_size_str = glib::format_size(added_size); println!("Total new layers: {new_total} Size: {new_total_size}"); println!("Removed layers: {n_removed} Size: {removed_size_str}"); From 3f75b9dffb37ddf4a0a4fde5d3fa32d12b50fd79 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 16 May 2023 21:03:22 -0400 Subject: [PATCH 566/775] container: Have ManifestDiff borrow, make fields pub While efficiency here doesn't matter a whole lot, I think it is more elegant to borrow from the input arguments. While we're here, make those fields `pub`. This makes it easier for callers to perform their own computations on the values instead of just being able to invoke `print`. Motivated by potentially using this in rpm-ostree for chunking size analysis. Finally, also pad the output in `print` to line up the fields as it looks prettier. --- ci/priv-integration.sh | 4 +-- lib/src/container/mod.rs | 63 ++++++++++++++++++++++------------------ 2 files changed, 37 insertions(+), 30 deletions(-) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index b01cffbb9..6b5c22aa9 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -67,8 +67,8 @@ ostree-ext-cli container image pull ${sysroot}/ostree/repo ostree-unverified-ima echo "ok pulled from containers storage" ostree-ext-cli container compare ${imgref} ${imgref} > compare.txt -grep "Removed layers: 0 Size: 0 bytes" compare.txt -grep "Added layers: 0 Size: 0 bytes" compare.txt +grep "Removed layers: *0 *Size: 0 bytes" compare.txt +grep "Added layers: *0 *Size: 0 bytes" compare.txt mkdir build cd build diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index c97089408..f4c4a190f 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -252,22 +252,24 @@ impl std::fmt::Display for OstreeImageReference { } } -/// Represent the difference in content between two OCI compliant Images -#[derive(Debug, Default)] -pub struct ManifestDiff { - /// All layers present in the new image. - all_layers_in_new: Vec, +/// Represents the difference in layer/blob content between two OCI image manifests. +#[derive(Debug)] +pub struct ManifestDiff<'from, 'to> { + /// The source container image manifest. + pub from: &'from oci_spec::image::ImageManifest, + /// The target container image manifest. + pub to: &'to oci_spec::image::ImageManifest, /// Layers which are present in the old image but not the new image. - removed: Vec, + pub removed: Vec<&'from oci_spec::image::Descriptor>, /// Layers which are present in the new image but not the old image. - added: Vec, + pub added: Vec<&'to oci_spec::image::Descriptor>, } -impl ManifestDiff { +impl<'from, 'to> ManifestDiff<'from, 'to> { /// Compute the layer difference between two OCI image manifests. pub fn new( - src: &oci_spec::image::ImageManifest, - dest: &oci_spec::image::ImageManifest, + src: &'from oci_spec::image::ImageManifest, + dest: &'to oci_spec::image::ImageManifest, ) -> Self { let src_layers = src .layers() @@ -279,39 +281,44 @@ impl ManifestDiff { .iter() .map(|l| (l.digest(), l)) .collect::>(); - let mut diff = ManifestDiff::default(); + let mut removed = Vec::new(); + let mut added = Vec::new(); for (blobid, &descriptor) in src_layers.iter() { if !dest_layers.contains_key(blobid) { - diff.removed.push(descriptor.clone()); + removed.push(descriptor); } } for (blobid, &descriptor) in dest_layers.iter() { - diff.all_layers_in_new.push(descriptor.clone()); if !src_layers.contains_key(blobid) { - diff.added.push(descriptor.clone()); + added.push(descriptor); } } - diff + ManifestDiff { + from: src, + to: dest, + removed, + added, + } } } -impl ManifestDiff { +impl<'from, 'to> ManifestDiff<'from, 'to> { /// Prints the total, removed and added content between two OCI images pub fn print(&self) { - let layersum = |layers: &Vec| -> u64 { - layers.iter().map(|layer| layer.size() as u64).sum() - }; - let new_total = &self.all_layers_in_new.len(); - let new_total_size = glib::format_size(layersum(&self.all_layers_in_new)); - let n_removed = &self.removed.len(); - let n_added = &self.added.len(); - let removed_size = layersum(&self.removed); + fn layersum<'a, I: Iterator>(layers: I) -> u64 { + layers.map(|layer| layer.size() as u64).sum() + } + let new_total = self.to.layers().len(); + let new_total_size = glib::format_size(layersum(self.to.layers().iter())); + let n_removed = self.removed.len(); + let n_added = self.added.len(); + let removed_size = layersum(self.removed.iter().copied()); let removed_size_str = glib::format_size(removed_size); - let added_size = layersum(&self.added); + let added_size = layersum(self.added.iter().copied()); let added_size_str = glib::format_size(added_size); - println!("Total new layers: {new_total} Size: {new_total_size}"); - println!("Removed layers: {n_removed} Size: {removed_size_str}"); - println!("Added layers: {n_added} Size: {added_size_str}"); + println!("Total new layers: {new_total:<4} Size: {new_total_size}"); + println!("Removed layers: {n_removed:<4} Size: {removed_size_str}"); + println!("Added layers: {n_added:<4} Size: {added_size_str}"); } } From 7b0bfb7551df807d0d7e85d134ffa5a5633bdc46 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 08:42:37 -0400 Subject: [PATCH 567/775] Ensure ManifestDiff fields are sorted, add test case I went to add a test case for ManifestDiff but doing so encountered the fact that the order of elements in `added` and `removed` is random, because we use `HashMap` to process them. Sort them both by digest, and add a test case. --- lib/src/container/mod.rs | 2 ++ lib/tests/it/fixtures/manifest1.json | 1 + lib/tests/it/fixtures/manifest2.json | 1 + lib/tests/it/main.rs | 31 +++++++++++++++++++++++++++- 4 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 lib/tests/it/fixtures/manifest1.json create mode 100644 lib/tests/it/fixtures/manifest2.json diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index f4c4a190f..4d701b95c 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -288,11 +288,13 @@ impl<'from, 'to> ManifestDiff<'from, 'to> { removed.push(descriptor); } } + removed.sort_by(|a, b| a.digest().cmp(b.digest())); for (blobid, &descriptor) in dest_layers.iter() { if !src_layers.contains_key(blobid) { added.push(descriptor); } } + added.sort_by(|a, b| a.digest().cmp(b.digest())); ManifestDiff { from: src, to: dest, diff --git a/lib/tests/it/fixtures/manifest1.json b/lib/tests/it/fixtures/manifest1.json new file mode 100644 index 000000000..52f09f286 --- /dev/null +++ b/lib/tests/it/fixtures/manifest1.json @@ -0,0 +1 @@ +{"schemaVersion":2,"config":{"mediaType":"application/vnd.oci.image.config.v1+json","digest":"sha256:f3b50d0849a19894aa27ca2346a78efdacf2c56bdc2a3493672d2a819990fedf","size":9301},"layers":[{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:75f4abe8518ec55cb8bf0d358a737084f38e2c030a28651d698c0b7569d680a6","size":1387849},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:777cb841d2803f775a36fba62bcbfe84b2a1e0abc27cf995961b63c3d218a410","size":48676116},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:1179dc1e2994ec0466787ec43967db9016b4b93c602bb9675d7fe4c0993366ba","size":124705297},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:74555b3730c4c0f77529ead433db58e038070666b93a5cc0da262d7b8debff0e","size":38743650},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:0ff8b1fdd38e5cfb6390024de23ba4b947cd872055f62e70f2c21dad5c928925","size":77161948},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:76b83eea62b7b93200a056b5e0201ef486c67f1eeebcf2c7678ced4d614cece2","size":21970157},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:d85c742f69904cb8dbf98abca4724d364d91792fcf8b5f5634ab36dda162bfc4","size":59797135},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:167e5df36d0fcbed876ca90c1ed1e6c79b5e2bdaba5eae74ab86444654b19eff","size":49410348},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:b34384ba76fa1e335cc8d75522508d977854f2b423f8aceb50ca6dfc2f609a99","size":21714783},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:7bf2d65ebf222ee10115284abf6909b1a3da0f3bd6d8d849e30723636b7145cb","size":15264848},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:a75bbf55d8de4dbd54e429e16fbd46688717faf4ea823c94676529cc2525fd5f","size":14373701},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:cf728677fa8c84bfcfd71e17953062421538d492d7fbfdd0dbce8eb1e5f6eec3","size":8400473},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:caff60c1ef085fb500c94230ccab9338e531578635070230b1413b439fd53f8f","size":6914489},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:65ca8f9bddaa720b74c5a7401bf273e93eba6b3b855a62422a8258373e0b1ae0","size":8294965},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:387bab4fcb713e9691617a645b6af2b7ad29fe5e009b0b0d3215645ef315481c","size":6600369},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:f63dcde5a664dad3eb3321bbcf2913d9644d16561a67c86ab61d814c1462583d","size":16869027},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:8bcd90242651342fbd2ed5ca3e60d03de90fdd28c3a9f634329f6e1c21c79718","size":5735283},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:cb65c21a0659b5b826881280556995a7ca4818c2b9b7a89e31d816a996fa8640","size":4528663},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:5187f51b62f4a2e82198a75afcc623a0323d4804fa7848e2e0acb30d77b8d9ab","size":5266030},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:bfef79d6d35378fba9093083ff6bd7b5ed9f443f87517785e6ff134dc8d08c6a","size":4316135},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:1cf332fd50b382af7941d6416994f270c894e9d60fb5c6cecf25de887673bbcb","size":3914655},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:e0d80be6e71bfae398f06f7a7e3b224290f3dde7544c8413f922934abeb1f599","size":2441858},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:48ff87e7a7af41d7139c5230e2e939aa97cafb1f62a114825bda5f5904e04a0e","size":3818782},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:8bcc652ccaa27638bd5bd2d7188053f1736586afbae87b3952e9211c773e3563","size":3885971},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:d83d9388b8c8c1e7c97b6b18f5107b74354700ebce9da161ccb73156a2c54a2e","size":3442642},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:efc465ae44a18ee395e542eb97c8d1fc21bf9d5fb49244ba4738e9bf48bfd3dc","size":3066348},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:c5c471cce08aa9cc7d96884a9e1981b7bb67ee43524af47533f50a8ddde7a83d","size":909923},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:8956cd951abc481ba364cf8ef5deca7cc9185b59ed95ae40b52e42afdc271d8e","size":3553645},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:5b0963a6c89d595b5c4786e2f3ce0bc168a262efab74dfce3d7c8d1063482c60","size":1495301},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:bf2df295da2716291f9dd4707158bca218b4a7920965955a4808b824c1bee2b6","size":3063142},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:19b2ea8d63794b8249960d581216ae1ccb80f8cfe518ff8dd1f12d65d19527a5","size":8109718},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:420636df561ccc835ef9665f41d4bc91c5f00614a61dca266af2bcd7bee2cc25","size":3003935},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:5ae67caf0978d82848d47ff932eee83a1e5d2581382c9c47335f69c9d7acc180","size":2468557},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:4f4b8bb8463dc74bb7f32eee78d02b71f61a322967b6d6cbb29829d262376f74","size":2427605},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:69373f86b83e6e5a962de07f40ff780a031b42d2568ffbb8b3c36de42cc90dec","size":2991782},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:2d05c2f993f9761946701da37f45fc573a2db8467f92b3f0d356f5f7adaf229e","size":3085765},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:41925843e5c965165bedc9c8124b96038f08a89c95ba94603a5f782dc813f0a8","size":2724309},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:a8c39f2998073e0e8b55fb88ccd68d2621a0fb6e31a528fd4790a1c90f8508a9","size":2512079},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:b905f801d092faba0c155597dd1303fa8c0540116af59c111ed7744e486ed63b","size":2341122},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:4f46b58b37828fa71fa5d7417a8ca7a62761cc6a72eb1592943572fc2446b054","size":2759344},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:3fbae92ecc64cf253b643a0e75b56514dc694451f163b47fb4e15af373238e10","size":2539288},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:744dd4a3ec521668942661cf1f184eb8f07f44025ce1aa35d5072ad9d72946fe","size":2415870},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:6c74c0a05a36bddabef1fdfae365ff87a9c5dd1ec7345d9e20f7f8ab04b39fc6","size":2145078},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:910ff6f93303ebedde3459f599b06d7b70d8f0674e3fe1d6623e3af809245cc4","size":5098511},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:2752e2f62f38fea3a390f111d673d2529dbf929f6c67ec7ef4359731d1a7edd8","size":1051999},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:5065c3aac5fcc3c1bde50a19d776974353301f269a936dd2933a67711af3b703","size":2713694},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:8bf6993eea50bbd8b448e6fd719f83c82d1d40b623f2c415f7727e766587ea83","size":1686714},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:630221744f0f9632f4f34f74241e65f79e78f938100266a119113af1ce10a1c5","size":2061581},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:e7e2eae322bca0ffa01bb2cae72288507bef1a11ad51f99d0a4faba1b1e000b9","size":2079706},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:bb6374635385b0c2539c284b137d831bd45fbe64b5e49aee8ad92d14c156a41b","size":3142398},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:40493ecd0f9ab499a2bec715415c3a98774ea6d1c9c01eb30a6b56793204a02d","size":69953187}]} \ No newline at end of file diff --git a/lib/tests/it/fixtures/manifest2.json b/lib/tests/it/fixtures/manifest2.json new file mode 100644 index 000000000..102c40170 --- /dev/null +++ b/lib/tests/it/fixtures/manifest2.json @@ -0,0 +1 @@ +{"schemaVersion":2,"config":{"mediaType":"application/vnd.oci.image.config.v1+json","digest":"sha256:ca0f7e342503b45a1110aba49177e386242e9192ab1742a95998b6b99c2a0150","size":9301},"layers":[{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:bca674ffe2ebe92b9e952bc807b9f1cd0d559c057e95ac81f3bae12a9b96b53e","size":1387854},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:777cb841d2803f775a36fba62bcbfe84b2a1e0abc27cf995961b63c3d218a410","size":48676116},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:1179dc1e2994ec0466787ec43967db9016b4b93c602bb9675d7fe4c0993366ba","size":124705297},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:74555b3730c4c0f77529ead433db58e038070666b93a5cc0da262d7b8debff0e","size":38743650},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:0b5d930ffc92d444b0a7b39beed322945a3038603fbe2a56415a6d02d598df1f","size":77162517},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:8d12d20c2d1c8f05c533a2a1b27a457f25add8ad38382523660c4093f180887b","size":21970100},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:d85c742f69904cb8dbf98abca4724d364d91792fcf8b5f5634ab36dda162bfc4","size":59797135},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:167e5df36d0fcbed876ca90c1ed1e6c79b5e2bdaba5eae74ab86444654b19eff","size":49410348},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:b34384ba76fa1e335cc8d75522508d977854f2b423f8aceb50ca6dfc2f609a99","size":21714783},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:7bf2d65ebf222ee10115284abf6909b1a3da0f3bd6d8d849e30723636b7145cb","size":15264848},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:a75bbf55d8de4dbd54e429e16fbd46688717faf4ea823c94676529cc2525fd5f","size":14373701},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:cf728677fa8c84bfcfd71e17953062421538d492d7fbfdd0dbce8eb1e5f6eec3","size":8400473},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:caff60c1ef085fb500c94230ccab9338e531578635070230b1413b439fd53f8f","size":6914489},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:65ca8f9bddaa720b74c5a7401bf273e93eba6b3b855a62422a8258373e0b1ae0","size":8294965},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:387bab4fcb713e9691617a645b6af2b7ad29fe5e009b0b0d3215645ef315481c","size":6600369},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:f63dcde5a664dad3eb3321bbcf2913d9644d16561a67c86ab61d814c1462583d","size":16869027},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:8bcd90242651342fbd2ed5ca3e60d03de90fdd28c3a9f634329f6e1c21c79718","size":5735283},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:cb65c21a0659b5b826881280556995a7ca4818c2b9b7a89e31d816a996fa8640","size":4528663},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:5187f51b62f4a2e82198a75afcc623a0323d4804fa7848e2e0acb30d77b8d9ab","size":5266030},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:bfef79d6d35378fba9093083ff6bd7b5ed9f443f87517785e6ff134dc8d08c6a","size":4316135},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:1cf332fd50b382af7941d6416994f270c894e9d60fb5c6cecf25de887673bbcb","size":3914655},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:e0d80be6e71bfae398f06f7a7e3b224290f3dde7544c8413f922934abeb1f599","size":2441858},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:48ff87e7a7af41d7139c5230e2e939aa97cafb1f62a114825bda5f5904e04a0e","size":3818782},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:8bcc652ccaa27638bd5bd2d7188053f1736586afbae87b3952e9211c773e3563","size":3885971},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:d83d9388b8c8c1e7c97b6b18f5107b74354700ebce9da161ccb73156a2c54a2e","size":3442642},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:efc465ae44a18ee395e542eb97c8d1fc21bf9d5fb49244ba4738e9bf48bfd3dc","size":3066348},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:c5c471cce08aa9cc7d96884a9e1981b7bb67ee43524af47533f50a8ddde7a83d","size":909923},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:8956cd951abc481ba364cf8ef5deca7cc9185b59ed95ae40b52e42afdc271d8e","size":3553645},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:5b0963a6c89d595b5c4786e2f3ce0bc168a262efab74dfce3d7c8d1063482c60","size":1495301},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:bf2df295da2716291f9dd4707158bca218b4a7920965955a4808b824c1bee2b6","size":3063142},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:19b2ea8d63794b8249960d581216ae1ccb80f8cfe518ff8dd1f12d65d19527a5","size":8109718},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:420636df561ccc835ef9665f41d4bc91c5f00614a61dca266af2bcd7bee2cc25","size":3003935},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:5ae67caf0978d82848d47ff932eee83a1e5d2581382c9c47335f69c9d7acc180","size":2468557},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:4f4b8bb8463dc74bb7f32eee78d02b71f61a322967b6d6cbb29829d262376f74","size":2427605},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:69373f86b83e6e5a962de07f40ff780a031b42d2568ffbb8b3c36de42cc90dec","size":2991782},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:2d05c2f993f9761946701da37f45fc573a2db8467f92b3f0d356f5f7adaf229e","size":3085765},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:41925843e5c965165bedc9c8124b96038f08a89c95ba94603a5f782dc813f0a8","size":2724309},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:a8c39f2998073e0e8b55fb88ccd68d2621a0fb6e31a528fd4790a1c90f8508a9","size":2512079},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:b905f801d092faba0c155597dd1303fa8c0540116af59c111ed7744e486ed63b","size":2341122},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:4f46b58b37828fa71fa5d7417a8ca7a62761cc6a72eb1592943572fc2446b054","size":2759344},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:3fbae92ecc64cf253b643a0e75b56514dc694451f163b47fb4e15af373238e10","size":2539288},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:744dd4a3ec521668942661cf1f184eb8f07f44025ce1aa35d5072ad9d72946fe","size":2415870},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:6c74c0a05a36bddabef1fdfae365ff87a9c5dd1ec7345d9e20f7f8ab04b39fc6","size":2145078},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:910ff6f93303ebedde3459f599b06d7b70d8f0674e3fe1d6623e3af809245cc4","size":5098511},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:2752e2f62f38fea3a390f111d673d2529dbf929f6c67ec7ef4359731d1a7edd8","size":1051999},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:5065c3aac5fcc3c1bde50a19d776974353301f269a936dd2933a67711af3b703","size":2713694},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:8bf6993eea50bbd8b448e6fd719f83c82d1d40b623f2c415f7727e766587ea83","size":1686714},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:630221744f0f9632f4f34f74241e65f79e78f938100266a119113af1ce10a1c5","size":2061581},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:e7e2eae322bca0ffa01bb2cae72288507bef1a11ad51f99d0a4faba1b1e000b9","size":2079706},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:bb6374635385b0c2539c284b137d831bd45fbe64b5e49aee8ad92d14c156a41b","size":3142398},{"mediaType":"application/vnd.oci.image.layer.v1.tar+gzip","digest":"sha256:cb9b8a4ac4a8df62df79e6f0348a14b3ec239816d42985631c88e76d4e3ff815","size":69952385}]} \ No newline at end of file diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 88dcd67de..01f6f98ee 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -2,10 +2,11 @@ use anyhow::{Context, Result}; use camino::Utf8Path; use cap_std::fs::{Dir, DirBuilder}; use containers_image_proxy::oci_spec; +use containers_image_proxy::oci_spec::image::ImageManifest; use once_cell::sync::Lazy; use ostree::cap_std; use ostree_ext::chunking::ObjectMetaSized; -use ostree_ext::container::store; +use ostree_ext::container::{store, ManifestDiff}; use ostree_ext::container::{ Config, ExportOpts, ImageReference, OstreeImageReference, SignatureSource, Transport, }; @@ -1377,3 +1378,31 @@ d /usr/share assert_eq!(diff.removed_files.iter().next().unwrap(), "/bin/bash"); Ok(()) } + +#[test] +fn test_manifest_diff() { + let a: ImageManifest = serde_json::from_str(include_str!("fixtures/manifest1.json")).unwrap(); + let b: ImageManifest = serde_json::from_str(include_str!("fixtures/manifest2.json")).unwrap(); + + let d = ManifestDiff::new(&a, &b); + assert_eq!(d.from, &a); + assert_eq!(d.to, &b); + assert_eq!(d.added.len(), 4); + assert_eq!( + d.added[0].digest(), + "sha256:0b5d930ffc92d444b0a7b39beed322945a3038603fbe2a56415a6d02d598df1f" + ); + assert_eq!( + d.added[3].digest(), + "sha256:cb9b8a4ac4a8df62df79e6f0348a14b3ec239816d42985631c88e76d4e3ff815" + ); + assert_eq!(d.removed.len(), 4); + assert_eq!( + d.removed[0].digest(), + "sha256:0ff8b1fdd38e5cfb6390024de23ba4b947cd872055f62e70f2c21dad5c928925" + ); + assert_eq!( + d.removed[3].digest(), + "sha256:76b83eea62b7b93200a056b5e0201ef486c67f1eeebcf2c7678ced4d614cece2" + ); +} From 697afe1fbb7de95f34c6efbacd1291eb7013aeb8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 10:38:24 -0400 Subject: [PATCH 568/775] chunking: Better separate prior build from new path Move the declarations of these variables down to the part of code where they're used. --- lib/src/chunking.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 79ca9322a..c8de4694e 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -547,8 +547,6 @@ fn basic_packing<'a>( bin_size: NonZeroU32, prior_build_metadata: Option<&oci_spec::image::ImageManifest>, ) -> Result>> { - let mut r = Vec::new(); - let mut components: Vec<_> = components.iter().collect(); let before_processing_pkgs_len = components.len(); // If the current rpm-ostree commit to be encapsulated is not the one in which packing structure changes, then @@ -610,7 +608,7 @@ fn basic_packing<'a>( // Handle updated packages let mut name_to_component: HashMap = HashMap::new(); - for component in &components { + for component in components.iter() { name_to_component .entry(component.meta.name.to_string()) .or_insert(component); @@ -633,6 +631,8 @@ fn basic_packing<'a>( tracing::debug!("Creating new packing structure"); + let mut r = Vec::new(); + // If there are fewer packages/components than there are bins, then we don't need to do // any "bin packing" at all; just assign a single component to each and we're done. if before_processing_pkgs_len < bin_size.get() as usize { @@ -644,6 +644,7 @@ fn basic_packing<'a>( return Ok(r); } + let mut components: Vec<_> = components.iter().collect(); let mut max_freq_components: Vec<&ObjectSourceMetaSized> = Vec::new(); components.retain(|pkg| { let retain: bool = pkg.meta.change_frequency != u32::MAX; From 229ac7a3ca8e7a59bcfde6b3475ed4c9a8c12b84 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 10:42:30 -0400 Subject: [PATCH 569/775] chunking: Also more cleanly separate "enough bins" path Use `map()` for the first case to keep things more self-contained. --- lib/src/chunking.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index c8de4694e..7b2268408 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -631,12 +631,10 @@ fn basic_packing<'a>( tracing::debug!("Creating new packing structure"); - let mut r = Vec::new(); - // If there are fewer packages/components than there are bins, then we don't need to do // any "bin packing" at all; just assign a single component to each and we're done. if before_processing_pkgs_len < bin_size.get() as usize { - components.into_iter().for_each(|pkg| r.push(vec![pkg])); + let mut r = components.iter().map(|pkg| vec![pkg]).collect::>(); if before_processing_pkgs_len > 0 { let new_pkgs_bin: Vec<&ObjectSourceMetaSized> = Vec::new(); r.push(new_pkgs_bin); @@ -645,6 +643,7 @@ fn basic_packing<'a>( } let mut components: Vec<_> = components.iter().collect(); + let mut r = Vec::new(); let mut max_freq_components: Vec<&ObjectSourceMetaSized> = Vec::new(); components.retain(|pkg| { let retain: bool = pkg.meta.change_frequency != u32::MAX; From 67fce07032654765fa227397b41732ab6ce7a854 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 10:45:26 -0400 Subject: [PATCH 570/775] chunking: Move "prior build" bits into function The logic here is really quite different; I think it's clearer to have two separate functions. This allows de-indenting the logic for the prior build path. --- lib/src/chunking.rs | 164 +++++++++++++++++++++++--------------------- 1 file changed, 87 insertions(+), 77 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 7b2268408..490df07d3 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -527,6 +527,91 @@ fn get_partitions_with_threshold( Some(partitions) } +/// If the current rpm-ostree commit to be encapsulated is not the one in which packing structure changes, then +/// Flatten out prior_build_metadata to view all the packages in prior build as a single vec +/// Compare the flattened vector to components to see if pkgs added, updated, +/// removed or kept same +/// if pkgs added, then add them to the last bin of prior +/// if pkgs removed, then remove them from the prior[i] +/// iterate through prior[i] and make bins according to the name in nevra of pkgs to update +/// required packages +/// else if pkg structure to be changed || prior build not specified +/// Recompute optimal packaging strcuture (Compute partitions, place packages and optimize build) +fn basic_packing_with_prior_build<'a>( + components: &'a [ObjectSourceMetaSized], + bin_size: NonZeroU32, + prior_build: &oci_spec::image::ImageManifest, +) -> Result>> { + let before_processing_pkgs_len = components.len(); + + tracing::debug!("Keeping old package structure"); + + // The first layer is the ostree commit, which will always be different for different builds, + // so we ignore it. For the remaining layers, extract the components/packages in each one. + let curr_build: Result>> = prior_build + .layers() + .iter() + .skip(1) + .map(|layer| -> Result<_> { + let annotation_layer = layer + .annotations() + .as_ref() + .and_then(|annos| annos.get(CONTENT_ANNOTATION)) + .ok_or_else(|| anyhow!("Missing {CONTENT_ANNOTATION} on prior build"))?; + Ok(annotation_layer.split(',').map(ToOwned::to_owned).collect()) + }) + .collect(); + let mut curr_build = curr_build?; + + // View the packages as unordered sets for lookups and differencing + let prev_pkgs_set: HashSet = curr_build + .iter() + .flat_map(|v| v.iter().cloned()) + .filter(|name| !name.is_empty()) + .collect(); + let curr_pkgs_set: HashSet = components + .iter() + .map(|pkg| pkg.meta.name.to_string()) + .collect(); + + // Added packages are included in the last bin which was reserved space. + if let Some(last_bin) = curr_build.last_mut() { + let added = curr_pkgs_set.difference(&prev_pkgs_set); + last_bin.retain(|name| !name.is_empty()); + last_bin.extend(added.into_iter().cloned()); + } else { + panic!("No empty last bin for added packages"); + } + + // Handle removed packages + let removed: HashSet<&String> = prev_pkgs_set.difference(&curr_pkgs_set).collect(); + for bin in curr_build.iter_mut() { + bin.retain(|pkg| !removed.contains(pkg)); + } + + // Handle updated packages + let mut name_to_component: HashMap = HashMap::new(); + for component in components.iter() { + name_to_component + .entry(component.meta.name.to_string()) + .or_insert(component); + } + let mut modified_build: Vec> = Vec::new(); + for bin in curr_build { + let mut mod_bin = Vec::new(); + for pkg in bin { + mod_bin.push(name_to_component[&pkg]); + } + modified_build.push(mod_bin); + } + + // Verify all packages are included + let after_processing_pkgs_len: usize = modified_build.iter().map(|b| b.len()).sum(); + assert_eq!(after_processing_pkgs_len, before_processing_pkgs_len); + assert!(modified_build.len() <= bin_size.get() as usize); + Ok(modified_build) +} + /// Given a set of components with size metadata (e.g. boxes of a certain size) /// and a number of bins (possible container layers) to use, determine which components /// go in which bin. This algorithm is pretty simple: @@ -549,84 +634,9 @@ fn basic_packing<'a>( ) -> Result>> { let before_processing_pkgs_len = components.len(); - // If the current rpm-ostree commit to be encapsulated is not the one in which packing structure changes, then - // Flatten out prior_build_metadata to view all the packages in prior build as a single vec - // Compare the flattened vector to components to see if pkgs added, updated, - // removed or kept same - // if pkgs added, then add them to the last bin of prior - // if pkgs removed, then remove them from the prior[i] - // iterate through prior[i] and make bins according to the name in nevra of pkgs to update - // required packages - // else if pkg structure to be changed || prior build not specified - // Recompute optimal packaging strcuture (Compute partitions, place packages and optimize build) - + // If we have a prior build, then use that if let Some(prior_build) = prior_build_metadata { - tracing::debug!("Keeping old package structure"); - - // The first layer is the ostree commit, which will always be different for different builds, - // so we ignore it. For the remaining layers, extract the components/packages in each one. - let curr_build: Result>> = prior_build - .layers() - .iter() - .skip(1) - .map(|layer| -> Result<_> { - let annotation_layer = layer - .annotations() - .as_ref() - .and_then(|annos| annos.get(CONTENT_ANNOTATION)) - .ok_or_else(|| anyhow!("Missing {CONTENT_ANNOTATION} on prior build"))?; - Ok(annotation_layer.split(',').map(ToOwned::to_owned).collect()) - }) - .collect(); - let mut curr_build = curr_build?; - - // View the packages as unordered sets for lookups and differencing - let prev_pkgs_set: HashSet = curr_build - .iter() - .flat_map(|v| v.iter().cloned()) - .filter(|name| !name.is_empty()) - .collect(); - let curr_pkgs_set: HashSet = components - .iter() - .map(|pkg| pkg.meta.name.to_string()) - .collect(); - - // Added packages are included in the last bin which was reserved space. - if let Some(last_bin) = curr_build.last_mut() { - let added = curr_pkgs_set.difference(&prev_pkgs_set); - last_bin.retain(|name| !name.is_empty()); - last_bin.extend(added.into_iter().cloned()); - } else { - panic!("No empty last bin for added packages"); - } - - // Handle removed packages - let removed: HashSet<&String> = prev_pkgs_set.difference(&curr_pkgs_set).collect(); - for bin in curr_build.iter_mut() { - bin.retain(|pkg| !removed.contains(pkg)); - } - - // Handle updated packages - let mut name_to_component: HashMap = HashMap::new(); - for component in components.iter() { - name_to_component - .entry(component.meta.name.to_string()) - .or_insert(component); - } - let mut modified_build: Vec> = Vec::new(); - for bin in curr_build { - let mut mod_bin = Vec::new(); - for pkg in bin { - mod_bin.push(name_to_component[&pkg]); - } - modified_build.push(mod_bin); - } - - // Verify all packages are included - let after_processing_pkgs_len: usize = modified_build.iter().map(|b| b.len()).sum(); - assert_eq!(after_processing_pkgs_len, before_processing_pkgs_len); - assert!(modified_build.len() <= bin_size.get() as usize); - return Ok(modified_build); + return basic_packing_with_prior_build(components, bin_size, prior_build); } tracing::debug!("Creating new packing structure"); From 544e98e6e2858d875f977948d02a91657f3b5673 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 10:46:57 -0400 Subject: [PATCH 571/775] chunking: Use a combinator to compute sum --- lib/src/chunking.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 490df07d3..618ccaa9c 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -764,10 +764,7 @@ fn basic_packing<'a>( let new_pkgs_bin: Vec<&ObjectSourceMetaSized> = Vec::new(); r.push(new_pkgs_bin); - let mut after_processing_pkgs_len = 0; - r.iter().for_each(|bin| { - after_processing_pkgs_len += bin.len(); - }); + let after_processing_pkgs_len = r.iter().map(|b| b.len()).sum::(); assert_eq!(after_processing_pkgs_len, before_processing_pkgs_len); assert!(r.len() <= bin_size.get() as usize); Ok(r) From c45591d99bf5a7ef493eef46a7b33d2586aee6c0 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 10:55:27 -0400 Subject: [PATCH 572/775] chunking: Use `.partition()` It's made for this situation. --- lib/src/chunking.rs | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 618ccaa9c..7e2bf46e0 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -652,16 +652,11 @@ fn basic_packing<'a>( return Ok(r); } - let mut components: Vec<_> = components.iter().collect(); let mut r = Vec::new(); - let mut max_freq_components: Vec<&ObjectSourceMetaSized> = Vec::new(); - components.retain(|pkg| { - let retain: bool = pkg.meta.change_frequency != u32::MAX; - if !retain { - max_freq_components.push(pkg); - } - retain - }); + // Split off the components which are "max frequency". + let (components, max_freq_components) = components + .iter() + .partition::, _>(|pkg| pkg.meta.change_frequency != u32::MAX); let components_len_after_max_freq = components.len(); match components_len_after_max_freq { 0 => (), From ab2026863a06986325c65fbb446853d80797f664 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 10:58:22 -0400 Subject: [PATCH 573/775] chunking: Have partitioning borrow component vec Prep for further changes. --- lib/src/chunking.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 7e2bf46e0..b5183f4ad 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -422,11 +422,11 @@ fn packing_size(packing: &[Vec<&ObjectSourceMetaSized>]) -> u64 { /// The medium partition from the previous step is less aggressively /// classified by using mean for both size and frequency /// Note: Assumes components is sorted by descending size -fn get_partitions_with_threshold( - components: Vec<&ObjectSourceMetaSized>, +fn get_partitions_with_threshold<'a>( + components: &[&'a ObjectSourceMetaSized], limit_hs_bins: usize, threshold: f64, -) -> Option>> { +) -> Option>> { let mut partitions: BTreeMap> = BTreeMap::new(); let mut med_size: Vec<&ObjectSourceMetaSized> = Vec::new(); let mut high_size: Vec<&ObjectSourceMetaSized> = Vec::new(); @@ -676,7 +676,7 @@ fn basic_packing<'a>( let limit_ms_bins = (bin_size.get() - (limit_hs_bins + limit_ls_bins + limit_new_bins + limit_max_frequency_bins) as u32) as usize; - let partitions = get_partitions_with_threshold(components, limit_hs_bins, 2f64) + let partitions = get_partitions_with_threshold(&components, limit_hs_bins, 2f64) .expect("Partitioning components into sets"); let limit_ls_pkgs = match partitions.get(LOW_PARTITION) { From 20ef41af4d2ea13b684f65300510077d986c9bf9 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 10:59:26 -0400 Subject: [PATCH 574/775] chunking: use `is_empty()` instead of matching on Lines Main motivation is that this allows dropping a level of indentation, but it's also more idiomatic I think. --- lib/src/chunking.rs | 159 +++++++++++++++++++++----------------------- 1 file changed, 76 insertions(+), 83 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index b5183f4ad..c8d7c4fa6 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -657,100 +657,93 @@ fn basic_packing<'a>( let (components, max_freq_components) = components .iter() .partition::, _>(|pkg| pkg.meta.change_frequency != u32::MAX); - let components_len_after_max_freq = components.len(); - match components_len_after_max_freq { - 0 => (), - _ => { - // Given a total number of bins (layers), compute how many should be assigned to our - // partitioning based on size and frequency. - let limit_ls_bins = 1usize; - let limit_new_bins = 1usize; - let _limit_new_pkgs = 0usize; - let limit_max_frequency_pkgs = max_freq_components.len(); - let limit_max_frequency_bins = limit_max_frequency_pkgs.min(1); - let limit_hs_bins = (0.6 - * (bin_size.get() - - (limit_ls_bins + limit_new_bins + limit_max_frequency_bins) as u32) - as f32) - .floor() as usize; - let limit_ms_bins = (bin_size.get() - - (limit_hs_bins + limit_ls_bins + limit_new_bins + limit_max_frequency_bins) - as u32) as usize; - let partitions = get_partitions_with_threshold(&components, limit_hs_bins, 2f64) - .expect("Partitioning components into sets"); - - let limit_ls_pkgs = match partitions.get(LOW_PARTITION) { - Some(n) => n.len(), - None => 0usize, - }; + if !components.is_empty() { + // Given a total number of bins (layers), compute how many should be assigned to our + // partitioning based on size and frequency. + let limit_ls_bins = 1usize; + let limit_new_bins = 1usize; + let _limit_new_pkgs = 0usize; + let limit_max_frequency_pkgs = max_freq_components.len(); + let limit_max_frequency_bins = limit_max_frequency_pkgs.min(1); + let limit_hs_bins = (0.6 + * (bin_size.get() - (limit_ls_bins + limit_new_bins + limit_max_frequency_bins) as u32) + as f32) + .floor() as usize; + let limit_ms_bins = (bin_size.get() + - (limit_hs_bins + limit_ls_bins + limit_new_bins + limit_max_frequency_bins) as u32) + as usize; + let partitions = get_partitions_with_threshold(&components, limit_hs_bins, 2f64) + .expect("Partitioning components into sets"); + + let limit_ls_pkgs = match partitions.get(LOW_PARTITION) { + Some(n) => n.len(), + None => 0usize, + }; - let pkg_per_bin_ms: usize = - (components_len_after_max_freq - limit_hs_bins - limit_ls_pkgs) - .checked_div(limit_ms_bins) - .expect("number of bins should be >= 4"); + let pkg_per_bin_ms: usize = (components.len() - limit_hs_bins - limit_ls_pkgs) + .checked_div(limit_ms_bins) + .expect("number of bins should be >= 4"); - // Bins assignment - for (partition, pkgs) in partitions.iter() { - if partition == HIGH_PARTITION { - for pkg in pkgs { - r.push(vec![*pkg]); - } - } else if partition == LOW_PARTITION { - let mut bin: Vec<&ObjectSourceMetaSized> = Vec::new(); - for pkg in pkgs { + // Bins assignment + for (partition, pkgs) in partitions.iter() { + if partition == HIGH_PARTITION { + for pkg in pkgs { + r.push(vec![*pkg]); + } + } else if partition == LOW_PARTITION { + let mut bin: Vec<&ObjectSourceMetaSized> = Vec::new(); + for pkg in pkgs { + bin.push(*pkg); + } + r.push(bin); + } else { + let mut bin: Vec<&ObjectSourceMetaSized> = Vec::new(); + for (i, pkg) in pkgs.iter().enumerate() { + if bin.len() < pkg_per_bin_ms { + bin.push(*pkg); + } else { + r.push(bin.clone()); + bin.clear(); bin.push(*pkg); } - r.push(bin); - } else { - let mut bin: Vec<&ObjectSourceMetaSized> = Vec::new(); - for (i, pkg) in pkgs.iter().enumerate() { - if bin.len() < pkg_per_bin_ms { - bin.push(*pkg); - } else { - r.push(bin.clone()); - bin.clear(); - bin.push(*pkg); - } - if i == pkgs.len() - 1 && !bin.is_empty() { - r.push(bin.clone()); - bin.clear(); - } + if i == pkgs.len() - 1 && !bin.is_empty() { + r.push(bin.clone()); + bin.clear(); } } } - tracing::debug!("Bins before unoptimized build: {}", r.len()); - - // Despite allocation certain number of pkgs per bin in medium-size partitions, the - // hard limit of number of medium-size bins can be exceeded. This is because the pkg_per_bin_ms - // is only upper limit and there is no lower limit. Thus, if a partition in medium-size has only 1 pkg - // but pkg_per_bin_ms > 1, then the entire bin will have 1 pkg. This prevents partition - // mixing. - // - // Addressing medium-size bins limit breach by mergin internal MS partitions - // The partitions in medium-size are merged beginning from the end so to not mix high-frequency bins with low-frequency bins. The - // bins are kept in this order: high-frequency, medium-frequency, low-frequency. - while r.len() > (bin_size.get() as usize - limit_new_bins - limit_max_frequency_bins) { - for i in (limit_ls_bins + limit_hs_bins..r.len() - 1) - .step_by(2) - .rev() + } + tracing::debug!("Bins before unoptimized build: {}", r.len()); + + // Despite allocation certain number of pkgs per bin in medium-size partitions, the + // hard limit of number of medium-size bins can be exceeded. This is because the pkg_per_bin_ms + // is only upper limit and there is no lower limit. Thus, if a partition in medium-size has only 1 pkg + // but pkg_per_bin_ms > 1, then the entire bin will have 1 pkg. This prevents partition + // mixing. + // + // Addressing medium-size bins limit breach by mergin internal MS partitions + // The partitions in medium-size are merged beginning from the end so to not mix high-frequency bins with low-frequency bins. The + // bins are kept in this order: high-frequency, medium-frequency, low-frequency. + while r.len() > (bin_size.get() as usize - limit_new_bins - limit_max_frequency_bins) { + for i in (limit_ls_bins + limit_hs_bins..r.len() - 1) + .step_by(2) + .rev() + { + if r.len() <= (bin_size.get() as usize - limit_new_bins - limit_max_frequency_bins) { - if r.len() - <= (bin_size.get() as usize - limit_new_bins - limit_max_frequency_bins) - { - break; - } - let prev = &r[i - 1]; - let curr = &r[i]; - let mut merge: Vec<&ObjectSourceMetaSized> = Vec::new(); - merge.extend(prev.iter()); - merge.extend(curr.iter()); - r.remove(i); - r.remove(i - 1); - r.insert(i, merge); + break; } + let prev = &r[i - 1]; + let curr = &r[i]; + let mut merge: Vec<&ObjectSourceMetaSized> = Vec::new(); + merge.extend(prev.iter()); + merge.extend(curr.iter()); + r.remove(i); + r.remove(i - 1); + r.insert(i, merge); } - tracing::debug!("Bins after optimization: {}", r.len()); } + tracing::debug!("Bins after optimization: {}", r.len()); } if !max_freq_components.is_empty() { From 5ce2f3510c2ad3d9b109c11d9ed886f0cadf6833 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 11:01:39 -0400 Subject: [PATCH 575/775] chunking: Extract a `const` for high size This is a bit more self-documenting. --- lib/src/chunking.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index c8d7c4fa6..cceb58d7e 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -632,6 +632,7 @@ fn basic_packing<'a>( bin_size: NonZeroU32, prior_build_metadata: Option<&oci_spec::image::ImageManifest>, ) -> Result>> { + const HIGH_SIZE_CUTOFF: f32 = 0.6; let before_processing_pkgs_len = components.len(); // If we have a prior build, then use that @@ -665,7 +666,7 @@ fn basic_packing<'a>( let _limit_new_pkgs = 0usize; let limit_max_frequency_pkgs = max_freq_components.len(); let limit_max_frequency_bins = limit_max_frequency_pkgs.min(1); - let limit_hs_bins = (0.6 + let limit_hs_bins = (HIGH_SIZE_CUTOFF * (bin_size.get() - (limit_ls_bins + limit_new_bins + limit_max_frequency_bins) as u32) as f32) .floor() as usize; From 727236af829a3175347439288d48ba16df0a71f4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 11:26:02 -0400 Subject: [PATCH 576/775] chunking: Factor out a common variable for bin computation To reduce duplication. --- lib/src/chunking.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index cceb58d7e..b6506cb11 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -666,13 +666,12 @@ fn basic_packing<'a>( let _limit_new_pkgs = 0usize; let limit_max_frequency_pkgs = max_freq_components.len(); let limit_max_frequency_bins = limit_max_frequency_pkgs.min(1); + let low_and_other_bin_limit = limit_ls_bins + limit_new_bins + limit_max_frequency_bins; let limit_hs_bins = (HIGH_SIZE_CUTOFF - * (bin_size.get() - (limit_ls_bins + limit_new_bins + limit_max_frequency_bins) as u32) - as f32) + * (bin_size.get() - low_and_other_bin_limit as u32) as f32) .floor() as usize; - let limit_ms_bins = (bin_size.get() - - (limit_hs_bins + limit_ls_bins + limit_new_bins + limit_max_frequency_bins) as u32) - as usize; + let limit_ms_bins = + (bin_size.get() - (limit_hs_bins + low_and_other_bin_limit) as u32) as usize; let partitions = get_partitions_with_threshold(&components, limit_hs_bins, 2f64) .expect("Partitioning components into sets"); From 5fbb61f6ebdb940dcbffa7b5b6de78d6126b6317 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 11:29:36 -0400 Subject: [PATCH 577/775] chunking: Use combinators to compute low size In this case, the combinator is perhaps mildly easier to read. Also add comments. --- lib/src/chunking.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index b6506cb11..cdb42a419 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -675,12 +675,14 @@ fn basic_packing<'a>( let partitions = get_partitions_with_threshold(&components, limit_hs_bins, 2f64) .expect("Partitioning components into sets"); - let limit_ls_pkgs = match partitions.get(LOW_PARTITION) { - Some(n) => n.len(), - None => 0usize, - }; - - let pkg_per_bin_ms: usize = (components.len() - limit_hs_bins - limit_ls_pkgs) + // Compute how many low-sized package/components we have. + let low_sized_component_count = partitions + .get(LOW_PARTITION) + .map(|p| p.len()) + .unwrap_or_default(); + + // Approximate number of components we should have per medium-size bin. + let pkg_per_bin_ms: usize = (components.len() - limit_hs_bins - low_sized_component_count) .checked_div(limit_ms_bins) .expect("number of bins should be >= 4"); From 0d4da6271a183f62153c59cc055173f98d2c3f19 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 17 May 2023 11:31:31 -0400 Subject: [PATCH 578/775] chunking: Inline empty bin allocation And add a comment. --- lib/src/chunking.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index cdb42a419..139cafb64 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -752,8 +752,8 @@ fn basic_packing<'a>( r.push(max_freq_components); } - let new_pkgs_bin: Vec<&ObjectSourceMetaSized> = Vec::new(); - r.push(new_pkgs_bin); + // Allocate an empty bin for new packages + r.push(Vec::new()); let after_processing_pkgs_len = r.iter().map(|b| b.len()).sum::(); assert_eq!(after_processing_pkgs_len, before_processing_pkgs_len); assert!(r.len() <= bin_size.get() as usize); From 58655167357bc32581c03b6e621e2c9a907aad4b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 18 May 2023 10:28:38 -0400 Subject: [PATCH 579/775] Add a `const` for the component separator `,` This allows us to better find all the places that are reading/writing this. --- lib/src/chunking.rs | 13 +++++++++---- lib/src/container/encapsulate.rs | 6 ++++-- lib/src/container/mod.rs | 2 ++ 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 139cafb64..094b50dc3 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -10,7 +10,7 @@ use std::num::NonZeroU32; use std::rc::Rc; use std::time::Instant; -use crate::container::CONTENT_ANNOTATION; +use crate::container::{COMPONENT_SEPARATOR, CONTENT_ANNOTATION}; use crate::objectsource::{ContentID, ObjectMeta, ObjectMetaMap, ObjectSourceMeta}; use crate::objgv::*; use crate::statistics; @@ -558,7 +558,10 @@ fn basic_packing_with_prior_build<'a>( .as_ref() .and_then(|annos| annos.get(CONTENT_ANNOTATION)) .ok_or_else(|| anyhow!("Missing {CONTENT_ANNOTATION} on prior build"))?; - Ok(annotation_layer.split(',').map(ToOwned::to_owned).collect()) + Ok(annotation_layer + .split(COMPONENT_SEPARATOR) + .map(ToOwned::to_owned) + .collect()) }) .collect(); let mut curr_build = curr_build?; @@ -797,7 +800,7 @@ mod test { .iter() .map(|b| { b.iter() - .map(|p| p.split(".").collect::>()[0].to_string()) + .map(|p| p.split('.').collect::>()[0].to_string()) .collect() }) .collect(); @@ -814,13 +817,15 @@ mod test { let layers: Vec = metadata_with_ostree_commit .iter() .map(|l| { + let mut buf = [0; 8]; + let sep = COMPONENT_SEPARATOR.encode_utf8(&mut buf); oci_spec::image::DescriptorBuilder::default() .media_type(oci_spec::image::MediaType::ImageLayerGzip) .size(100) .digest(format!("sha256:{}", l.len())) .annotations(HashMap::from([( CONTENT_ANNOTATION.to_string(), - l.join(","), + l.join(sep), )])) .build() .expect("build layer") diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 396ea8a5a..6b6347c18 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -1,7 +1,7 @@ //! APIs for creating container images from OSTree commits use super::ocidir::{Layer, OciDir}; -use super::{ocidir, OstreeImageReference, Transport, CONTENT_ANNOTATION}; +use super::{ocidir, OstreeImageReference, Transport, COMPONENT_SEPARATOR, CONTENT_ANNOTATION}; use super::{ImageReference, SignatureSource, OSTREE_COMMIT_LABEL}; use crate::chunking::{Chunk, Chunking, ObjectMetaSized}; use crate::container::skopeo; @@ -154,9 +154,11 @@ fn export_chunked( // Add the ostree layer ociw.push_layer(manifest, imgcfg, ostree_layer, description, None); // Add the component/content layers + let mut buf = [0; 8]; + let sep = COMPONENT_SEPARATOR.encode_utf8(&mut buf); for (layer, name, packages) in layers { let mut annotation_component_layer = HashMap::new(); - annotation_component_layer.insert(CONTENT_ANNOTATION.to_string(), packages.join(",")); + annotation_component_layer.insert(CONTENT_ANNOTATION.to_string(), packages.join(sep)); ociw.push_layer( manifest, imgcfg, diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 4d701b95c..a92910b6d 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -40,6 +40,8 @@ pub const OSTREE_COMMIT_LABEL: &str = "ostree.commit"; /// The name of an annotation attached to a layer which names the packages/components /// which are part of it. pub(crate) const CONTENT_ANNOTATION: &str = "ostree.components"; +/// The character we use to separate values in [`CONTENT_ANNOTATION`]. +pub(crate) const COMPONENT_SEPARATOR: char = ','; /// Our generic catchall fatal error, expected to be converted /// to a string to output to a terminal or logs. From 29ca3e05cc4cffa096891b804cf448bb3bcbba1d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 18 May 2023 16:54:38 -0400 Subject: [PATCH 580/775] container: Make ManifestDiff have only one lifetime This is prep for further changes where I want to compute more derived data. There's no compelling reason to support disjoint lifetimes here; in the end we need to be OK with the shorter of the two. --- lib/src/container/mod.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 4d701b95c..35b1e7098 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -254,22 +254,22 @@ impl std::fmt::Display for OstreeImageReference { /// Represents the difference in layer/blob content between two OCI image manifests. #[derive(Debug)] -pub struct ManifestDiff<'from, 'to> { +pub struct ManifestDiff<'a> { /// The source container image manifest. - pub from: &'from oci_spec::image::ImageManifest, + pub from: &'a oci_spec::image::ImageManifest, /// The target container image manifest. - pub to: &'to oci_spec::image::ImageManifest, + pub to: &'a oci_spec::image::ImageManifest, /// Layers which are present in the old image but not the new image. - pub removed: Vec<&'from oci_spec::image::Descriptor>, + pub removed: Vec<&'a oci_spec::image::Descriptor>, /// Layers which are present in the new image but not the old image. - pub added: Vec<&'to oci_spec::image::Descriptor>, + pub added: Vec<&'a oci_spec::image::Descriptor>, } -impl<'from, 'to> ManifestDiff<'from, 'to> { +impl<'a> ManifestDiff<'a> { /// Compute the layer difference between two OCI image manifests. pub fn new( - src: &'from oci_spec::image::ImageManifest, - dest: &'to oci_spec::image::ImageManifest, + src: &'a oci_spec::image::ImageManifest, + dest: &'a oci_spec::image::ImageManifest, ) -> Self { let src_layers = src .layers() @@ -304,7 +304,7 @@ impl<'from, 'to> ManifestDiff<'from, 'to> { } } -impl<'from, 'to> ManifestDiff<'from, 'to> { +impl<'a> ManifestDiff<'a> { /// Prints the total, removed and added content between two OCI images pub fn print(&self) { fn layersum<'a, I: Iterator>(layers: I) -> u64 { From a53e4848390c4425b1c60dfe95aaf5bf2d7976a7 Mon Sep 17 00:00:00 2001 From: Luca BRUNO Date: Thu, 29 Sep 2022 13:25:18 +0000 Subject: [PATCH 581/775] cargo: bump to clap v4 Co-authored-by: Colin Walters --- cli/Cargo.toml | 2 +- lib/Cargo.toml | 4 ++-- lib/src/cli.rs | 2 +- lib/src/docgen.rs | 6 +++++- 4 files changed, 9 insertions(+), 5 deletions(-) diff --git a/cli/Cargo.toml b/cli/Cargo.toml index e9683ca20..66a012408 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -12,7 +12,7 @@ rust-version = "1.64.0" [dependencies] anyhow = "1.0" ostree-ext = { path = "../lib" } -clap = "3.2" +clap = "4.2" libc = "0.2.92" tokio = { version = "1", features = ["macros"] } log = "0.4.0" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 29906ae62..542f0fdd1 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -17,8 +17,8 @@ bitflags = "1" camino = "1.0.4" chrono = "0.4.19" olpc-cjson = "0.1.1" -clap = { version= "3.2", features = ["derive"] } -clap_mangen = { version = "0.1", optional = true } +clap = { version= "4.2", features = ["derive"] } +clap_mangen = { version = "0.2", optional = true } cap-std-ext = "2.0" cap-tempfile = "1.0" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } diff --git a/lib/src/cli.rs b/lib/src/cli.rs index f5a3ef687..ecc2ee705 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -57,7 +57,7 @@ pub(crate) struct ExportOpts { repo: Utf8PathBuf, /// The format version. Must be 1. - #[clap(long, hidden(true))] + #[clap(long, hide(true))] format_version: u32, /// The ostree ref or commit to export diff --git a/lib/src/docgen.rs b/lib/src/docgen.rs index 6bda7f4dd..0e2d12df0 100644 --- a/lib/src/docgen.rs +++ b/lib/src/docgen.rs @@ -36,7 +36,11 @@ fn generate_one(directory: &Utf8Path, cmd: Command) -> Result<()> { for subcmd in cmd.get_subcommands().filter(|c| !c.is_hide_set()) { let subname = format!("{}-{}", name, subcmd.get_name()); - generate_one(directory, subcmd.clone().name(subname).version(version))?; + // SAFETY: Latest clap 4 requires names are &'static - this is + // not long-running production code, so we just leak the names here. + let subname = &*std::boxed::Box::leak(subname.into_boxed_str()); + let subcmd = subcmd.clone().name(subname).alias(subname).version(version); + generate_one(directory, subcmd)?; } Ok(()) } From 04182aef2407977d684089d89c421f2184e42cbf Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 18 May 2023 17:49:11 -0400 Subject: [PATCH 582/775] lib: Drop circular `commit` alias clap correctly errors about this in debug mode. --- lib/src/cli.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index ecc2ee705..acbc973a4 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -139,7 +139,6 @@ pub(crate) enum ContainerOpts { compression_fast: bool, }, - #[clap(alias = "commit")] /// Perform build-time checking and canonicalization. /// This is presently an optional command, but may become required in the future. Commit, From 7b65cedea6750b77512a87696c0b77d73d450b8b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 19 May 2023 11:39:05 -0400 Subject: [PATCH 583/775] Release 0.11.0 (We should have bumped the semver in git before now) But I think we're done with API changes for now, so let's go ahead and do the first 0.11 release. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 542f0fdd1..3a3dac7c5 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.10.7" +version = "0.11.0" rust-version = "1.64.0" [dependencies] From 8303b3d23d6b5fbf42f051a38049ab64088311d6 Mon Sep 17 00:00:00 2001 From: RishabhSaini Date: Mon, 5 Jun 2023 17:31:43 -0400 Subject: [PATCH 584/775] store: Pulling from container-storage needs root privileges --- lib/src/container/store.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index e66c33ee8..28e4b4a96 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -441,8 +441,13 @@ impl ImageImporter { imgref: &OstreeImageReference, mut config: ImageProxyConfig, ) -> Result { - // Apply our defaults to the proxy config - merge_default_container_proxy_opts(&mut config)?; + if imgref.imgref.transport == Transport::ContainerStorage { + // Fetching from containers-storage, may require privileges to read files + merge_default_container_proxy_opts_with_isolation(&mut config, None)?; + } else { + // Apply our defaults to the proxy config + merge_default_container_proxy_opts(&mut config)?; + } let proxy = ImageProxy::new_with_config(config).await?; system_repo_journal_print( From afc1837ff383681b947de30c0cefc70080a4f87a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 10 Jun 2023 10:40:57 -0400 Subject: [PATCH 585/775] container/store: Make baseimage ref prefix `pub` Since it's intended for use by external consumers. --- lib/src/container/store.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 28e4b4a96..3b72c29a0 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -34,7 +34,7 @@ const IMAGE_PREFIX: &str = "ostree/container/image"; /// If you maintain tooling which is locally building derived commits, write a ref /// with this prefix that is owned by your code. It's a best practice to prefix the /// ref with the project name, so the final ref may be of the form e.g. `ostree/container/baseimage/bootc/foo`. -const BASE_IMAGE_PREFIX: &str = "ostree/container/baseimage"; +pub const BASE_IMAGE_PREFIX: &str = "ostree/container/baseimage"; /// The key injected into the merge commit for the manifest digest. const META_MANIFEST_DIGEST: &str = "ostree.manifest-digest"; From e19eca5a7c3e6f0cdf064b61ba89f64c3f301235 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 13 Jun 2023 17:10:02 -0400 Subject: [PATCH 586/775] Release 0.11.1 --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 3a3dac7c5..fc11aff01 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.11.0" +version = "0.11.1" rust-version = "1.64.0" [dependencies] From 7e21bc648346d494785e35f26d9416698a566125 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 6 Jul 2023 05:15:25 -0400 Subject: [PATCH 587/775] container: Add an API to serialize `Transport` This will fix the need demonstrated by https://github.com/containers/bootc/pull/106/files#diff-c163debeb7fdf9b238e5c589e0000c80684ca9a09abd408242df4bad489626a6R38 We made a historical mistake here in having our `Display` include the `:`...but it's hard to deal with because inherently the format is messy because of the `://` in `docker://`. --- lib/src/container/mod.rs | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 239d75299..a0d4491e3 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -100,15 +100,32 @@ impl TryFrom<&str> for Transport { fn try_from(value: &str) -> Result { Ok(match value { - "registry" | "docker" => Self::Registry, - "oci" => Self::OciDir, - "oci-archive" => Self::OciArchive, - "containers-storage" => Self::ContainerStorage, + Self::REGISTRY_STR | "docker" => Self::Registry, + Self::OCI_STR => Self::OciDir, + Self::OCI_ARCHIVE_STR => Self::OciArchive, + Self::CONTAINERS_STORAGE_STR => Self::ContainerStorage, o => return Err(anyhow!("Unknown transport '{}'", o)), }) } } +impl Transport { + const OCI_STR: &str = "oci"; + const OCI_ARCHIVE_STR: &str = "oci-archive"; + const CONTAINERS_STORAGE_STR: &str = "containers-storage"; + const REGISTRY_STR: &str = "registry"; + + /// Retrieve an identifier that can then be re-parsed from [`Transport::try_from::<&str>`]. + pub fn serializable_name(&self) -> &'static str { + match self { + Transport::Registry => Self::REGISTRY_STR, + Transport::OciDir => Self::OCI_STR, + Transport::OciArchive => Self::OCI_ARCHIVE_STR, + Transport::ContainerStorage => Self::CONTAINERS_STORAGE_STR, + } + } +} + impl TryFrom<&str> for ImageReference { type Error = anyhow::Error; @@ -428,6 +445,18 @@ mod tests { use super::*; + #[test] + fn test_serializable_transport() { + for v in [ + Transport::Registry, + Transport::ContainerStorage, + Transport::OciArchive, + Transport::OciDir, + ] { + assert_eq!(Transport::try_from(v.serializable_name()).unwrap(), v); + } + } + const INVALID_IRS: &[&str] = &["", "foo://", "docker:blah", "registry:", "foo:bar"]; const VALID_IRS: &[&str] = &[ "containers-storage:localhost/someimage", From 979b93a24765ab129c6f1414388a9c7565fe3d80 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 6 Jul 2023 07:00:49 -0400 Subject: [PATCH 588/775] deploy: Add an API to prune undeployed images This is part of fixing https://github.com/coreos/rpm-ostree/issues/4391 but is also in the general theme of making things less "stateful". A huge huge mess today is `rpm-ostree rebase` and `bootc switch` both have `--retain` options which keep the previous image. But really what we want is to use the deployments as source-of-truth; that way if e.g. an admin pins a deployment, it automatically pins the image too. And this will help strongly align with the bootc direction in reconciling to desired state. --- ci/priv-integration.sh | 7 ++++- lib/src/cli.rs | 37 ++++++++++++++++++++++++++ lib/src/container/deploy.rs | 52 ++++++++++++++++++++++++++++++++++++- lib/src/container/mod.rs | 8 +++--- 4 files changed, 98 insertions(+), 6 deletions(-) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index 6b5c22aa9..431e727d6 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -24,6 +24,8 @@ fi if test '!' -d "${sysroot}/ostree/deploy/${stateroot}"; then ostree admin os-init "${stateroot}" --sysroot "${sysroot}" fi +# Should be no images pruned +ostree-ext-cli container image prune-images --sysroot "${sysroot}" # Test the syntax which uses full imgrefs. ostree-ext-cli container image deploy --sysroot "${sysroot}" \ --stateroot "${stateroot}" --imgref "${imgref}" @@ -34,8 +36,11 @@ ostree admin --sysroot="${sysroot}" undeploy 0 ostree-ext-cli container image deploy --transport registry --sysroot "${sysroot}" \ --stateroot "${stateroot}" --image "${image}" --no-signature-verification ostree admin --sysroot="${sysroot}" status -ostree-ext-cli container image remove --repo "${sysroot}/ostree/repo" registry:"${image}" ostree admin --sysroot="${sysroot}" undeploy 0 +# Now we should prune it +ostree-ext-cli container image prune-images --sysroot "${sysroot}" +ostree-ext-cli container image list --repo "${sysroot}/ostree/repo" > out.txt +test $(stat -c '%s' out.txt) = 0 for img in "${image}"; do ostree-ext-cli container image deploy --sysroot "${sysroot}" \ diff --git a/lib/src/cli.rs b/lib/src/cli.rs index acbc973a4..62a56cad6 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -18,6 +18,7 @@ use crate::commit::container_commit; use crate::container::store::{ImportProgress, LayerProgress, PreparedImport}; use crate::container::{self as ostree_container}; use crate::container::{Config, ImageReference, OstreeImageReference}; +use crate::sysroot::SysrootLock; use ostree_container::store::{ImageImporter, PrepareResult}; /// Parse an [`OstreeImageReference`] from a CLI arguemnt. @@ -273,6 +274,17 @@ pub(crate) enum ContainerImageOpts { repo: Utf8PathBuf, }, + /// Garbage collect unreferenced image layer references. + PruneImages { + /// Path to the system root + #[clap(long)] + sysroot: Utf8PathBuf, + + #[clap(long)] + /// Also prune layers + and_layers: bool, + }, + /// Perform initial deployment for a container image Deploy { /// Path to the system root @@ -825,6 +837,31 @@ where println!("Removed layers: {nlayers}"); Ok(()) } + ContainerImageOpts::PruneImages { + sysroot, + and_layers, + } => { + let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); + sysroot.load(gio::Cancellable::NONE)?; + let sysroot = &SysrootLock::new_from_sysroot(sysroot).await?; + let removed = crate::container::deploy::remove_undeployed_images(sysroot)?; + match removed.as_slice() { + [] => { + println!("No unreferenced images."); + return Ok(()); + } + o => { + for imgref in o { + println!("Removed: {imgref}"); + } + } + } + if and_layers { + let nlayers = crate::container::store::gc_image_layers(&sysroot.repo())?; + println!("Removed layers: {nlayers}"); + } + Ok(()) + } ContainerImageOpts::Copy { src_repo, dest_repo, diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 98080c58e..431527e3a 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -1,8 +1,12 @@ //! Perform initial setup for a container image based system root +use std::collections::HashSet; + use super::store::LayeredImageState; -use super::OstreeImageReference; +use super::{ImageReference, OstreeImageReference}; use crate::container::store::PrepareResult; +use crate::keyfileext::KeyFileExt; +use crate::sysroot::SysrootLock; use anyhow::Result; use fn_error_context::context; use ostree::glib; @@ -112,3 +116,49 @@ pub async fn deploy( Ok(state) } + +/// Query the container image reference for a deployment +fn deployment_origin_container( + deploy: &ostree::Deployment, +) -> Result> { + let origin = deploy + .origin() + .map(|o| o.optional_string("origin", ORIGIN_CONTAINER)) + .transpose()? + .flatten(); + let r = origin + .map(|v| OstreeImageReference::try_from(v.as_str())) + .transpose()?; + Ok(r) +} + +/// Remove all container images which are not the target of a deployment. +/// This acts equivalently to [`super::store::remove_images()`] - the underlying layers +/// are not pruned. +/// +/// The set of removed images is returned. +pub fn remove_undeployed_images(sysroot: &SysrootLock) -> Result> { + let repo = &sysroot.repo(); + let deployment_origins: Result> = sysroot + .deployments() + .into_iter() + .filter_map(|deploy| { + deployment_origin_container(&deploy) + .map(|v| v.map(|v| v.imgref)) + .transpose() + }) + .collect(); + let deployment_origins = deployment_origins?; + // TODO add an API that returns ImageReference instead + let all_images = super::store::list_images(&sysroot.repo())? + .into_iter() + .filter_map(|img| ImageReference::try_from(img.as_str()).ok()); + let mut removed = Vec::new(); + for image in all_images { + if !deployment_origins.contains(&image) { + super::store::remove_image(repo, &image)?; + removed.push(image); + } + } + Ok(removed) +} diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 239d75299..cfe0bce9b 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -48,7 +48,7 @@ pub(crate) const COMPONENT_SEPARATOR: char = ','; type Result = anyhow::Result; /// A backend/transport for OCI/Docker images. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Hash, Debug, PartialEq, Eq)] pub enum Transport { /// A remote Docker/OCI registry (`registry:` or `docker://`) Registry, @@ -63,7 +63,7 @@ pub enum Transport { /// Combination of a remote image reference and transport. /// /// For example, -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Hash, PartialEq, Eq)] pub struct ImageReference { /// The storage and transport for the image pub transport: Transport, @@ -72,7 +72,7 @@ pub struct ImageReference { } /// Policy for signature verification. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum SignatureSource { /// Fetches will use the named ostree remote for signature verification of the ostree commit. OstreeRemote(String), @@ -87,7 +87,7 @@ pub const LABEL_VERSION: &str = "version"; /// Combination of a signature verification mechanism, and a standard container image reference. /// -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct OstreeImageReference { /// The signature verification mechanism. pub sigverify: SignatureSource, From afd05a75c027eaac28b5f1a5879c98a25743b52f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 17 Jul 2023 16:22:47 -0400 Subject: [PATCH 589/775] Release 0.11.2 Just two minor changes, but I'd like to have bootc use them. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index fc11aff01..fdf93f3d5 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.11.1" +version = "0.11.2" rust-version = "1.64.0" [dependencies] From cdfd9c332ba0b684b5b3cea9a059ac987c3851cd Mon Sep 17 00:00:00 2001 From: RishabhSaini Date: Mon, 17 Jul 2023 15:13:13 -0400 Subject: [PATCH 590/775] priv-integration: Add test to ensure policy verification occurs when pulling an image --- ci/priv-integration.sh | 66 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/ci/priv-integration.sh b/ci/priv-integration.sh index 431e727d6..1de1060ca 100755 --- a/ci/priv-integration.sh +++ b/ci/priv-integration.sh @@ -13,6 +13,9 @@ image=quay.io/fedora/fedora-coreos:testing-devel imgref=ostree-unverified-registry:${image} stateroot=testos +# This image was generated manually; TODO auto-generate in quay.io/coreos-assembler or better start sigstore signing our production images +FIXTURE_SIGSTORE_SIGNED_FCOS_IMAGE=quay.io/rh_ee_rsaini/coreos + cd $(mktemp -d -p /var/tmp) set -x @@ -103,4 +106,67 @@ img_commit2=$(ostree --repo=${repo} rev-parse ostree/container/image/${imgref}) test "${img_commit}" = "${img_commit2}" echo "ok deploy derived container identical revs" +# Verify policy + +mkdir -p /etc/pki/containers +#Ensure Wrong Public Key fails +cat > /etc/pki/containers/fcos.pub << EOF +-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEPw/TzXY5FQ00LT2orloOuAbqoOKv +relAN0my/O8tziGvc16PtEhF6A7Eun0/9//AMRZ8BwLn2cORZiQsGd5adA== +-----END PUBLIC KEY----- +EOF + +cat > /etc/containers/registries.d/default.yaml << EOF +docker: + ${FIXTURE_SIGSTORE_SIGNED_FCOS_IMAGE}: + use-sigstore-attachments: true +EOF + +cat > /etc/containers/policy.json << EOF +{ + "default": [ + { + "type": "reject" + } + ], + "transports": { + "docker": { + "quay.io/fedora/fedora-coreos": [ + { + "type": "insecureAcceptAnything" + } + ], + "${FIXTURE_SIGSTORE_SIGNED_FCOS_IMAGE}": [ + { + "type": "sigstoreSigned", + "keyPath": "/etc/pki/containers/fcos.pub", + "signedIdentity": { + "type": "matchRepository" + } + } + ] + + } + } +} +EOF + +if ostree container image pull ${repo} ostree-image-signed:docker://${FIXTURE_SIGSTORE_SIGNED_FCOS_IMAGE} 2> error; then + echo "unexpectedly pulled image" 1>&2 + exit 1 +else + grep -q "invalid signature" error +fi + +#Ensure Correct Public Key succeeds +cat > /etc/pki/containers/fcos.pub << EOF +-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEREpVb8t/Rp/78fawILAodC6EXGCG +rWNjJoPo7J99cBu5Ui4oCKD+hAHagop7GTi/G3UBP/dtduy2BVdICuBETQ== +-----END PUBLIC KEY----- +EOF +ostree container image pull ${repo} ostree-image-signed:docker://${FIXTURE_SIGSTORE_SIGNED_FCOS_IMAGE} +ostree container image history --repo ${repo} docker://${FIXTURE_SIGSTORE_SIGNED_FCOS_IMAGE} + echo ok privileged integration From 77f04e787a60354a54397670ea301eaa59447bcf Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 19 Jul 2023 13:44:34 -0400 Subject: [PATCH 591/775] deploy: Don't overwrite kargs by default This is the same bug as https://github.com/ostreedev/ostree-rs-ext/commit/3089166a4456cdcfa0568aedcda31eac65ac00ee but for the not-booted case. Basically in the C API bridged to Rust we can't distinguish between "NULL array" and "zero length array". But the _with_options path supports distinguishing them, and we want the "no kargs provided" case to not override anything. Closes: https://github.com/ostreedev/ostree-rs-ext/issues/502 --- lib/src/container/deploy.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 431527e3a..24651cf02 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -81,11 +81,12 @@ pub async fn deploy( let target_imgref = options.target_imgref.unwrap_or(imgref); origin.set_string("origin", ORIGIN_CONTAINER, &target_imgref.to_string()); + let opts = ostree::SysrootDeployTreeOpts { + override_kernel_argv: options.kargs, + ..Default::default() + }; + if sysroot.booted_deployment().is_some() { - let opts = ostree::SysrootDeployTreeOpts { - override_kernel_argv: options.kargs, - ..Default::default() - }; sysroot.stage_tree_with_options( Some(stateroot), commit, @@ -95,12 +96,12 @@ pub async fn deploy( cancellable, )?; } else { - let deployment = &sysroot.deploy_tree( + let deployment = &sysroot.deploy_tree_with_options( Some(stateroot), commit, Some(&origin), merge_deployment.as_ref(), - options.kargs.unwrap_or_default(), + Some(&opts), cancellable, )?; let flags = ostree::SysrootSimpleWriteDeploymentFlags::NONE; From abc8ed660843b3ad85e72c0002996dabd61778dd Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 19 Jul 2023 17:34:48 -0400 Subject: [PATCH 592/775] repair: New functionality to detect (future: fix) inodes Initial code to detect the situation resulting from https://github.com/ostreedev/ostree/pull/2874/commits/de6fddc6adee09a93901243dc7074090828a1912 --- lib/src/cli.rs | 73 ++++++++++++ lib/src/container/store.rs | 226 +++++++++++++++++++++++++++++++++++- lib/src/diff.rs | 2 +- lib/src/lib.rs | 2 + lib/src/repair.rs | 227 +++++++++++++++++++++++++++++++++++++ 5 files changed, 527 insertions(+), 3 deletions(-) create mode 100644 lib/src/repair.rs diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 62a56cad6..be17ac4ae 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -8,10 +8,12 @@ use anyhow::{Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; use clap::{Parser, Subcommand}; +use fn_error_context::context; use ostree::{cap_std, gio, glib}; use std::collections::BTreeMap; use std::ffi::OsString; use std::path::PathBuf; +use std::process::Command; use tokio::sync::mpsc::Receiver; use crate::commit::container_commit; @@ -345,6 +347,34 @@ pub(crate) enum ContainerImageOpts { }, } +/// Options for deployment repair. +#[derive(Debug, Subcommand)] +pub(crate) enum ProvisionalRepairOpts { + AnalyzeInodes { + /// Path to the repository + #[clap(long, value_parser)] + repo: Utf8PathBuf, + + /// Print additional information + #[clap(long)] + verbose: bool, + }, + + Repair { + /// Path to the sysroot + #[clap(long, value_parser)] + sysroot: Utf8PathBuf, + + /// Do not mutate any system state + #[clap(long)] + dry_run: bool, + + /// Print additional information + #[clap(long)] + verbose: bool, + }, +} + /// Options for the Integrity Measurement Architecture (IMA). #[derive(Debug, Parser)] pub(crate) struct ImaSignOpts { @@ -410,6 +440,8 @@ pub(crate) enum Opt { #[clap(hide(true))] #[cfg(feature = "docgen")] Man(ManOpts), + #[clap(hide = true, subcommand)] + ProvisionalRepair(ProvisionalRepairOpts), } #[allow(clippy::from_over_into)] @@ -739,6 +771,22 @@ async fn testing(opts: &TestingOpts) -> Result<()> { } } +// Quick hack; TODO dedup this with the code in bootc or lower here +#[context("Remounting sysroot writable")] +fn container_remount_sysroot(sysroot: &Utf8Path) -> Result<()> { + if !Utf8Path::new("/run/.containerenv").exists() { + return Ok(()); + } + println!("Running in container, assuming we can remount {sysroot} writable"); + let st = Command::new("mount") + .args(["-o", "remount,rw", sysroot.as_str()]) + .status()?; + if !st.success() { + anyhow::bail!("Failed to remount {sysroot}: {st:?}"); + } + Ok(()) +} + /// Parse the provided arguments and execute. /// Calls [`structopt::clap::Error::exit`] on failure, printing the error message and aborting the program. pub async fn run_from_iter(args: I) -> Result<()> @@ -978,5 +1026,30 @@ where Opt::InternalOnlyForTesting(ref opts) => testing(opts).await, #[cfg(feature = "docgen")] Opt::Man(manopts) => crate::docgen::generate_manpages(&manopts.directory), + Opt::ProvisionalRepair(opts) => match opts { + ProvisionalRepairOpts::AnalyzeInodes { repo, verbose } => { + let repo = parse_repo(&repo)?; + match crate::repair::check_inode_collision(&repo, verbose)? { + crate::repair::InodeCheckResult::Okay => { + println!("OK: No colliding objects found."); + } + crate::repair::InodeCheckResult::PotentialCorruption(n) => { + eprintln!("warning: {} potentially colliding inodes found", n.len()); + } + } + Ok(()) + } + ProvisionalRepairOpts::Repair { + sysroot, + verbose, + dry_run, + } => { + container_remount_sysroot(&sysroot)?; + let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); + sysroot.load(gio::Cancellable::NONE)?; + let sysroot = &SysrootLock::new_from_sysroot(sysroot).await?; + crate::repair::auto_repair_inode_collision(sysroot, dry_run, verbose) + } + }, } } diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 3b72c29a0..913b01671 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -8,14 +8,17 @@ use super::*; use crate::logging::system_repo_journal_print; use crate::refescape; +use crate::sysroot::SysrootLock; use crate::utils::ResultExt; use anyhow::{anyhow, Context}; +use camino::{Utf8Path, Utf8PathBuf}; use containers_image_proxy::{ImageProxy, OpenedImage}; use fn_error_context::context; use futures_util::TryFutureExt; use oci_spec::image::{self as oci_image, Descriptor, History, ImageConfiguration, ImageManifest}; -use ostree::prelude::{Cast, ToVariant}; +use ostree::prelude::{Cast, FileEnumeratorExt, FileExt, ToVariant}; use ostree::{gio, glib}; +use rustix::fs::MetadataExt; use std::collections::{BTreeSet, HashMap}; use std::iter::FromIterator; use tokio::sync::mpsc::{Receiver, Sender}; @@ -37,7 +40,7 @@ const IMAGE_PREFIX: &str = "ostree/container/image"; pub const BASE_IMAGE_PREFIX: &str = "ostree/container/baseimage"; /// The key injected into the merge commit for the manifest digest. -const META_MANIFEST_DIGEST: &str = "ostree.manifest-digest"; +pub(crate) const META_MANIFEST_DIGEST: &str = "ostree.manifest-digest"; /// The key injected into the merge commit with the manifest serialized as JSON. const META_MANIFEST: &str = "ostree.manifest"; /// The key injected into the merge commit with the image configuration serialized as JSON. @@ -1262,3 +1265,222 @@ pub fn remove_images<'a>( } Ok(()) } + +#[derive(Debug, Default)] +struct CompareState { + verified: BTreeSet, + inode_corrupted: BTreeSet, + unknown_corrupted: BTreeSet, +} + +impl CompareState { + fn is_ok(&self) -> bool { + self.inode_corrupted.is_empty() && self.unknown_corrupted.is_empty() + } +} + +fn compare_file_info(src: &gio::FileInfo, target: &gio::FileInfo) -> bool { + if src.file_type() != target.file_type() { + return false; + } + if src.size() != target.size() { + return false; + } + for attr in ["unix::uid", "unix::gid", "unix::mode"] { + if src.attribute_uint32(attr) != target.attribute_uint32(attr) { + return false; + } + } + true +} + +#[context("Querying object inode")] +fn inode_of_object(repo: &ostree::Repo, checksum: &str) -> Result { + let repodir = repo.dfd_as_dir()?; + let (prefix, suffix) = checksum.split_at(2); + let objpath = format!("objects/{}/{}.file", prefix, suffix); + let metadata = repodir.symlink_metadata(objpath)?; + Ok(metadata.ino()) +} + +fn compare_commit_trees( + repo: &ostree::Repo, + root: &Utf8Path, + target: &ostree::RepoFile, + expected: &ostree::RepoFile, + exact: bool, + colliding_inodes: &BTreeSet, + state: &mut CompareState, +) -> Result<()> { + let cancellable = gio::Cancellable::NONE; + let queryattrs = "standard::name,standard::type"; + let queryflags = gio::FileQueryInfoFlags::NOFOLLOW_SYMLINKS; + let expected_iter = expected.enumerate_children(queryattrs, queryflags, cancellable)?; + + while let Some(expected_info) = expected_iter.next_file(cancellable)? { + let expected_child = expected_iter.child(&expected_info); + let name = expected_info.name(); + let name = name.to_str().expect("UTF-8 ostree name"); + let path = Utf8PathBuf::from(format!("{root}{name}")); + let target_child = target.child(name); + let target_info = crate::diff::query_info_optional(&target_child, queryattrs, queryflags) + .context("querying optional to")?; + let is_dir = matches!(expected_info.file_type(), gio::FileType::Directory); + if let Some(target_info) = target_info { + let to_child = target_child + .downcast::() + .expect("downcast"); + to_child.ensure_resolved()?; + let from_child = expected_child + .downcast::() + .expect("downcast"); + from_child.ensure_resolved()?; + + if is_dir { + let from_contents_checksum = from_child.tree_get_contents_checksum(); + let to_contents_checksum = to_child.tree_get_contents_checksum(); + if from_contents_checksum != to_contents_checksum { + let subpath = Utf8PathBuf::from(format!("{}/", path)); + compare_commit_trees( + repo, + &subpath, + &from_child, + &to_child, + exact, + colliding_inodes, + state, + )?; + } + } else { + let from_checksum = from_child.checksum(); + let to_checksum = to_child.checksum(); + let matches = if exact { + from_checksum == to_checksum + } else { + compare_file_info(&target_info, &expected_info) + }; + if !matches { + let from_inode = inode_of_object(repo, &from_checksum)?; + let to_inode = inode_of_object(repo, &to_checksum)?; + if colliding_inodes.contains(&from_inode) + || colliding_inodes.contains(&to_inode) + { + state.inode_corrupted.insert(path); + } else { + state.unknown_corrupted.insert(path); + } + } else { + state.verified.insert(path); + } + } + } else { + eprintln!("Missing {path}"); + state.unknown_corrupted.insert(path); + } + } + Ok(()) +} + +#[context("Verifying container image state")] +pub(crate) fn verify_container_image( + sysroot: &SysrootLock, + imgref: &ImageReference, + colliding_inodes: &BTreeSet, + verbose: bool, +) -> Result { + let cancellable = gio::Cancellable::NONE; + let repo = &sysroot.repo(); + let state = + query_image_ref(repo, imgref)?.ok_or_else(|| anyhow!("Expected present image {imgref}"))?; + let merge_commit = state.merge_commit.as_str(); + let merge_commit_root = repo.read_commit(merge_commit, gio::Cancellable::NONE)?.0; + let merge_commit_root = merge_commit_root + .downcast::() + .expect("downcast"); + merge_commit_root.ensure_resolved()?; + + // This shouldn't happen anymore + let config = state + .configuration + .ok_or_else(|| anyhow!("Missing configuration for image {imgref}"))?; + let (commit_layer, _component_layers, remaining_layers) = + parse_manifest_layout(&state.manifest, &config)?; + + let mut comparison_state = CompareState::default(); + + let query = |l: &Descriptor| query_layer(repo, l.clone()); + + let base_tree = repo + .read_commit(&state.base_commit, cancellable)? + .0 + .downcast::() + .expect("downcast"); + println!( + "Verifying with base ostree layer {}", + ref_for_layer(commit_layer)? + ); + compare_commit_trees( + repo, + "/".into(), + &merge_commit_root, + &base_tree, + true, + colliding_inodes, + &mut comparison_state, + )?; + + let remaining_layers = remaining_layers + .into_iter() + .map(query) + .collect::>>()?; + + println!("Image has {} derived layers", remaining_layers.len()); + + for layer in remaining_layers.iter().rev() { + let layer_ref = layer.ostree_ref.as_str(); + let layer_commit = layer + .commit + .as_deref() + .ok_or_else(|| anyhow!("Missing layer {layer_ref}"))?; + let layer_tree = repo + .read_commit(layer_commit, cancellable)? + .0 + .downcast::() + .expect("downcast"); + compare_commit_trees( + repo, + "/".into(), + &merge_commit_root, + &layer_tree, + false, + colliding_inodes, + &mut comparison_state, + )?; + } + + let n_verified = comparison_state.verified.len(); + if comparison_state.is_ok() { + println!("OK image {imgref} (verified={n_verified})"); + println!(); + } else { + let n_inode = comparison_state.inode_corrupted.len(); + let n_other = comparison_state.unknown_corrupted.len(); + eprintln!("warning: Found corrupted merge commit"); + eprintln!(" inode clashes: {n_inode}"); + eprintln!(" unknown: {n_other}"); + eprintln!(" ok: {n_verified}"); + if verbose { + eprintln!("Mismatches:"); + for path in comparison_state.inode_corrupted { + eprintln!(" inode: {path}"); + } + for path in comparison_state.unknown_corrupted { + eprintln!(" other: {path}"); + } + } + eprintln!(); + return Ok(false); + } + + Ok(true) +} diff --git a/lib/src/diff.rs b/lib/src/diff.rs index a66c17a53..655adc382 100644 --- a/lib/src/diff.rs +++ b/lib/src/diff.rs @@ -14,7 +14,7 @@ use std::collections::BTreeSet; use std::fmt; /// Like `g_file_query_info()`, but return None if the target doesn't exist. -fn query_info_optional( +pub(crate) fn query_info_optional( f: &gio::File, queryattrs: &str, queryflags: gio::FileQueryInfoFlags, diff --git a/lib/src/lib.rs b/lib/src/lib.rs index c9a424b34..719564004 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -39,6 +39,8 @@ pub mod ima; pub mod keyfileext; pub(crate) mod logging; pub mod refescape; +#[doc(hidden)] +pub mod repair; pub mod sysroot; pub mod tar; pub mod tokio_util; diff --git a/lib/src/repair.rs b/lib/src/repair.rs new file mode 100644 index 000000000..5adddf454 --- /dev/null +++ b/lib/src/repair.rs @@ -0,0 +1,227 @@ +//! System repair functionality + +use std::{ + collections::{BTreeMap, BTreeSet}, + process::Command, +}; + +use anyhow::{anyhow, Context, Result}; +use cap_std::fs::Dir; +use cap_std_ext::prelude::CapStdExtCommandExt; +use cap_tempfile::cap_std; +use fn_error_context::context; +use ostree::{gio, glib}; +use std::os::unix::fs::MetadataExt; + +use crate::sysroot::SysrootLock; + +// Find the inode numbers for objects +fn gather_inodes( + prefix: &str, + dir: &Dir, + little_inodes: &mut BTreeMap, + big_inodes: &mut BTreeMap, +) -> Result<()> { + for child in dir.entries()? { + let child = child?; + let metadata = child.metadata()?; + if !(metadata.is_file() || metadata.is_symlink()) { + continue; + } + let name = child.file_name(); + let name = name + .to_str() + .ok_or_else(|| anyhow::anyhow!("Invalid {name:?}"))?; + let object_rest = name + .split_once('.') + .ok_or_else(|| anyhow!("Invalid object {name}"))? + .0; + let checksum = format!("{prefix}{object_rest}"); + let inode = metadata.ino(); + if let Ok(little) = u32::try_from(inode) { + little_inodes.insert(little, checksum); + } else { + big_inodes.insert(inode, checksum); + } + } + Ok(()) +} + +#[context("Analyzing commit for derivation")] +fn commit_is_derived(commit: &glib::Variant) -> Result { + let commit_meta = &glib::VariantDict::new(Some(&commit.child_value(0))); + if commit_meta + .lookup::(crate::container::store::META_MANIFEST_DIGEST)? + .is_some() + { + return Ok(true); + } + if commit_meta + .lookup::("rpmostree.clientlayer")? + .is_some() + { + return Ok(true); + } + Ok(false) +} + +/// The result of a check_repair operation +#[derive(Debug, PartialEq, Eq)] +pub enum InodeCheckResult { + /// Problems are unlikely. + Okay, + /// There is potential corruption + PotentialCorruption(BTreeSet), +} + +#[context("Checking inodes")] +#[doc(hidden)] +/// Detect if any commits are potentially incorrect due to inode truncations. +pub fn check_inode_collision(repo: &ostree::Repo, verbose: bool) -> Result { + let repo_dir = repo.dfd_as_dir()?; + let objects = repo_dir.open_dir("objects")?; + + println!( + r#"Attempting analysis of ostree state for files that may be incorrectly linked. +For more information, see https://github.com/ostreedev/ostree/pull/2874/commits/de6fddc6adee09a93901243dc7074090828a1912 +"# + ); + + println!("Gathering inodes for ostree objects..."); + let mut little_inodes = BTreeMap::new(); + let mut big_inodes = BTreeMap::new(); + + for child in objects.entries()? { + let child = child?; + if !child.file_type()?.is_dir() { + continue; + } + let name = child.file_name(); + if name.len() != 2 { + continue; + } + let name = name + .to_str() + .ok_or_else(|| anyhow::anyhow!("Invalid {name:?}"))?; + let objdir = child.open_dir()?; + gather_inodes(name, &objdir, &mut little_inodes, &mut big_inodes) + .with_context(|| format!("Processing {name:?}"))?; + } + + let mut colliding_inodes = BTreeMap::new(); + for (big_inum, big_inum_checksum) in big_inodes.iter() { + let truncated = *big_inum as u32; + if let Some(small_inum_object) = little_inodes.get(&truncated) { + // Don't output each collision unless verbose mode is enabled. It's actually + // quite interesting to see data, but only for development and deep introspection + // use cases. + if verbose { + eprintln!( + r#"collision: + inode (>32 bit): {big_inum} + object: {big_inum_checksum} + inode (truncated): {truncated} + object: {small_inum_object} +"# + ); + } + colliding_inodes.insert(big_inum, big_inum_checksum); + } + } + + let n_big = big_inodes.len(); + let n_small = little_inodes.len(); + println!("Analyzed {n_big} objects with > 32 bit inode numbers and {n_small} objects with <= 32 bit inode numbers"); + if !colliding_inodes.is_empty() { + return Ok(InodeCheckResult::PotentialCorruption( + colliding_inodes + .keys() + .map(|&&v| v) + .collect::>(), + )); + } + + Ok(InodeCheckResult::Okay) +} + +/// Attempt to automatically repair any corruption from inode collisions. +#[doc(hidden)] +pub fn auto_repair_inode_collision( + sysroot: &SysrootLock, + dry_run: bool, + verbose: bool, +) -> Result<()> { + use crate::container::store as container_store; + let repo = &sysroot.repo(); + let repo_dir = repo.dfd_as_dir()?; + + let mut derived_commits = BTreeSet::new(); + for (_refname, digest) in repo.list_refs(None, gio::Cancellable::NONE)? { + let commit = repo.load_commit(&digest)?.0; + if commit_is_derived(&commit)? { + if verbose { + eprintln!("Found derived commit: {commit}"); + } + derived_commits.insert(digest); + } + } + + // This is not an ironclad guarantee...however, I am pretty confident that there's + // no exposure without derivation today. + if derived_commits.is_empty() { + println!("OK no derived commits found."); + return Ok(()); + } + let n_derived = derived_commits.len(); + println!("Found {n_derived} derived commits"); + println!("Backing filesystem information:"); + { + let st = Command::new("stat") + .args(["-f", "."]) + .cwd_dir(repo_dir.try_clone()?) + .status()?; + if !st.success() { + eprintln!("failed to spawn stat: {st:?}"); + } + } + + match check_inode_collision(repo, verbose)? { + InodeCheckResult::Okay => { + println!("OK no colliding inodes found"); + Ok(()) + } + InodeCheckResult::PotentialCorruption(colliding_inodes) => { + eprintln!( + "warning: {} potentially colliding inodes found", + colliding_inodes.len() + ); + let all_images = container_store::list_images(repo)?; + let all_images = all_images + .into_iter() + .map(|img| crate::container::ImageReference::try_from(img.as_str())) + .collect::>>()?; + println!("Verifying {} ostree-container images", all_images.len()); + let mut corrupted_images = Vec::new(); + for imgref in all_images { + if !container_store::verify_container_image( + sysroot, + &imgref, + &colliding_inodes, + verbose, + )? { + eprintln!("warning: Corrupted image {imgref}"); + corrupted_images.push(imgref); + } + } + if corrupted_images.is_empty() { + println!("OK no corrupted images found"); + return Ok(()); + } + if dry_run { + anyhow::bail!("Found potential corruption, dry-run mode enabled"); + } + container_store::remove_images(repo, corrupted_images.iter())?; + Ok(()) + } + } +} From 389a6184883136752a7ff08013b70c929b4df9db Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 21 Jul 2023 09:56:57 -0400 Subject: [PATCH 593/775] repair: Rework to be more introspectable I plan to "productize" this repair code a bit more in OpenShift at least, and I think other admins may want to do similar outside of that too. In order to make that more reliable: - Better split the "fsck/--dry-run" path from "repair" i.e mutation - Introduce a `--write-result-to` argument that outputs JSON. This allows us to better distinguish the tristate of "OK" from "corruption detected" to "tool failed for some other reason" Further: - Drop the check for derived commits, let's just *always* check the inodes because it's not very expensive in the end and it's just really useful to do. - Add checks for whether the booted deployment is potentially affected, as this is important information; we'll need to redeploy and reboot if so --- lib/src/cli.rs | 53 ++++++-- lib/src/container/store.rs | 8 +- lib/src/repair.rs | 247 +++++++++++++++++++++---------------- 3 files changed, 188 insertions(+), 120 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index be17ac4ae..2425e6656 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -12,6 +12,7 @@ use fn_error_context::context; use ostree::{cap_std, gio, glib}; use std::collections::BTreeMap; use std::ffi::OsString; +use std::io::BufWriter; use std::path::PathBuf; use std::process::Command; use tokio::sync::mpsc::Receiver; @@ -348,7 +349,7 @@ pub(crate) enum ContainerImageOpts { } /// Options for deployment repair. -#[derive(Debug, Subcommand)] +#[derive(Debug, Parser)] pub(crate) enum ProvisionalRepairOpts { AnalyzeInodes { /// Path to the repository @@ -358,6 +359,10 @@ pub(crate) enum ProvisionalRepairOpts { /// Print additional information #[clap(long)] verbose: bool, + + /// Serialize the repair result to this file as JSON + #[clap(long)] + write_result_to: Option, }, Repair { @@ -369,6 +374,10 @@ pub(crate) enum ProvisionalRepairOpts { #[clap(long)] dry_run: bool, + /// Serialize the repair result to this file as JSON + #[clap(long)] + write_result_to: Option, + /// Print additional information #[clap(long)] verbose: bool, @@ -787,6 +796,17 @@ fn container_remount_sysroot(sysroot: &Utf8Path) -> Result<()> { Ok(()) } +#[context("Serializing to output file")] +fn handle_serialize_to_file(path: Option<&Utf8Path>, obj: T) -> Result<()> { + if let Some(path) = path { + let mut out = std::fs::File::create(path) + .map(BufWriter::new) + .with_context(|| anyhow::anyhow!("Opening {path} for writing"))?; + serde_json::to_writer(&mut out, &obj).context("Serializing output")?; + } + Ok(()) +} + /// Parse the provided arguments and execute. /// Calls [`structopt::clap::Error::exit`] on failure, printing the error message and aborting the program. pub async fn run_from_iter(args: I) -> Result<()> @@ -1027,15 +1047,21 @@ where #[cfg(feature = "docgen")] Opt::Man(manopts) => crate::docgen::generate_manpages(&manopts.directory), Opt::ProvisionalRepair(opts) => match opts { - ProvisionalRepairOpts::AnalyzeInodes { repo, verbose } => { + ProvisionalRepairOpts::AnalyzeInodes { + repo, + verbose, + write_result_to, + } => { let repo = parse_repo(&repo)?; - match crate::repair::check_inode_collision(&repo, verbose)? { - crate::repair::InodeCheckResult::Okay => { - println!("OK: No colliding objects found."); - } - crate::repair::InodeCheckResult::PotentialCorruption(n) => { - eprintln!("warning: {} potentially colliding inodes found", n.len()); - } + let check_res = crate::repair::check_inode_collision(&repo, verbose)?; + handle_serialize_to_file(write_result_to.as_deref(), &check_res)?; + if check_res.collisions.is_empty() { + println!("OK: No colliding objects found."); + } else { + eprintln!( + "warning: {} potentially colliding inodes found", + check_res.collisions.len() + ); } Ok(()) } @@ -1043,12 +1069,19 @@ where sysroot, verbose, dry_run, + write_result_to, } => { container_remount_sysroot(&sysroot)?; let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); sysroot.load(gio::Cancellable::NONE)?; let sysroot = &SysrootLock::new_from_sysroot(sysroot).await?; - crate::repair::auto_repair_inode_collision(sysroot, dry_run, verbose) + let result = crate::repair::analyze_for_repair(sysroot, verbose)?; + handle_serialize_to_file(write_result_to.as_deref(), &result)?; + if dry_run { + result.check() + } else { + result.repair(sysroot) + } } }, } diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 913b01671..ba5ec3e78 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -1385,13 +1385,12 @@ fn compare_commit_trees( pub(crate) fn verify_container_image( sysroot: &SysrootLock, imgref: &ImageReference, + state: &LayeredImageState, colliding_inodes: &BTreeSet, verbose: bool, ) -> Result { let cancellable = gio::Cancellable::NONE; let repo = &sysroot.repo(); - let state = - query_image_ref(repo, imgref)?.ok_or_else(|| anyhow!("Expected present image {imgref}"))?; let merge_commit = state.merge_commit.as_str(); let merge_commit_root = repo.read_commit(merge_commit, gio::Cancellable::NONE)?.0; let merge_commit_root = merge_commit_root @@ -1402,9 +1401,10 @@ pub(crate) fn verify_container_image( // This shouldn't happen anymore let config = state .configuration - .ok_or_else(|| anyhow!("Missing configuration for image {imgref}"))?; + .as_ref() + .ok_or_else(|| anyhow!("Missing configuration for image"))?; let (commit_layer, _component_layers, remaining_layers) = - parse_manifest_layout(&state.manifest, &config)?; + parse_manifest_layout(&state.manifest, config)?; let mut comparison_state = CompareState::default(); diff --git a/lib/src/repair.rs b/lib/src/repair.rs index 5adddf454..e6f4e2759 100644 --- a/lib/src/repair.rs +++ b/lib/src/repair.rs @@ -1,16 +1,13 @@ //! System repair functionality -use std::{ - collections::{BTreeMap, BTreeSet}, - process::Command, -}; +use std::collections::{BTreeMap, BTreeSet}; +use std::fmt::Display; use anyhow::{anyhow, Context, Result}; use cap_std::fs::Dir; -use cap_std_ext::prelude::CapStdExtCommandExt; use cap_tempfile::cap_std; use fn_error_context::context; -use ostree::{gio, glib}; +use serde::{Deserialize, Serialize}; use std::os::unix::fs::MetadataExt; use crate::sysroot::SysrootLock; @@ -47,37 +44,52 @@ fn gather_inodes( Ok(()) } -#[context("Analyzing commit for derivation")] -fn commit_is_derived(commit: &glib::Variant) -> Result { - let commit_meta = &glib::VariantDict::new(Some(&commit.child_value(0))); - if commit_meta - .lookup::(crate::container::store::META_MANIFEST_DIGEST)? - .is_some() - { - return Ok(true); - } - if commit_meta - .lookup::("rpmostree.clientlayer")? - .is_some() - { - return Ok(true); +#[derive(Default, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub struct RepairResult { + /// Result of inode checking + pub inodes: InodeCheck, + // Whether we detected a likely corrupted merge commit + pub likely_corrupted_container_image_merges: Vec, + // Whether the booted deployment is likely corrupted + pub booted_is_likely_corrupted: bool, + // Whether the staged deployment is likely corrupted + pub staged_is_likely_corrupted: bool, +} + +#[derive(Default, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub struct InodeCheck { + // Number of >32 bit inodes found + pub inode64: u64, + // Number of <= 32 bit inodes found + pub inode32: u64, + // Number of collisions found (when 64 bit inode is truncated to 32 bit) + pub collisions: BTreeSet, +} + +impl Display for InodeCheck { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "ostree inode check:\n 64bit inodes: {}\n 32 bit inodes: {}\n collisions: {}\n", + self.inode64, + self.inode32, + self.collisions.len() + ) } - Ok(false) } -/// The result of a check_repair operation -#[derive(Debug, PartialEq, Eq)] -pub enum InodeCheckResult { - /// Problems are unlikely. - Okay, - /// There is potential corruption - PotentialCorruption(BTreeSet), +impl InodeCheck { + pub fn is_ok(&self) -> bool { + self.collisions.is_empty() + } } #[context("Checking inodes")] #[doc(hidden)] /// Detect if any commits are potentially incorrect due to inode truncations. -pub fn check_inode_collision(repo: &ostree::Repo, verbose: bool) -> Result { +pub fn check_inode_collision(repo: &ostree::Repo, verbose: bool) -> Result { let repo_dir = repo.dfd_as_dir()?; let objects = repo_dir.open_dir("objects")?; @@ -129,99 +141,122 @@ For more information, see https://github.com/ostreedev/ostree/pull/2874/commits/ } } - let n_big = big_inodes.len(); - let n_small = little_inodes.len(); - println!("Analyzed {n_big} objects with > 32 bit inode numbers and {n_small} objects with <= 32 bit inode numbers"); - if !colliding_inodes.is_empty() { - return Ok(InodeCheckResult::PotentialCorruption( - colliding_inodes - .keys() - .map(|&&v| v) - .collect::>(), - )); - } + // From here let's just track the possibly-colliding 64 bit inode, not also + // the checksum. + let collisions = colliding_inodes + .keys() + .map(|&&v| v) + .collect::>(); - Ok(InodeCheckResult::Okay) + let inode32 = little_inodes.len() as u64; + let inode64 = big_inodes.len() as u64; + Ok(InodeCheck { + inode32, + inode64, + collisions, + }) } /// Attempt to automatically repair any corruption from inode collisions. #[doc(hidden)] -pub fn auto_repair_inode_collision( - sysroot: &SysrootLock, - dry_run: bool, - verbose: bool, -) -> Result<()> { +pub fn analyze_for_repair(sysroot: &SysrootLock, verbose: bool) -> Result { use crate::container::store as container_store; let repo = &sysroot.repo(); - let repo_dir = repo.dfd_as_dir()?; - let mut derived_commits = BTreeSet::new(); - for (_refname, digest) in repo.list_refs(None, gio::Cancellable::NONE)? { - let commit = repo.load_commit(&digest)?.0; - if commit_is_derived(&commit)? { - if verbose { - eprintln!("Found derived commit: {commit}"); - } - derived_commits.insert(digest); - } - } + // Query booted and pending state + let booted_deployment = sysroot.booted_deployment(); + let booted_checksum = booted_deployment.as_ref().map(|b| b.csum()); + let booted_checksum = booted_checksum.as_ref().map(|s| s.as_str()); + let staged_deployment = sysroot.staged_deployment(); + let staged_checksum = staged_deployment.as_ref().map(|b| b.csum()); + let staged_checksum = staged_checksum.as_ref().map(|s| s.as_str()); - // This is not an ironclad guarantee...however, I am pretty confident that there's - // no exposure without derivation today. - if derived_commits.is_empty() { - println!("OK no derived commits found."); - return Ok(()); + let inodes = check_inode_collision(repo, verbose)?; + println!("{}", inodes); + if inodes.is_ok() { + println!("OK no colliding inodes found"); + return Ok(RepairResult { + inodes, + ..Default::default() + }); } - let n_derived = derived_commits.len(); - println!("Found {n_derived} derived commits"); - println!("Backing filesystem information:"); - { - let st = Command::new("stat") - .args(["-f", "."]) - .cwd_dir(repo_dir.try_clone()?) - .status()?; - if !st.success() { - eprintln!("failed to spawn stat: {st:?}"); + + let all_images = container_store::list_images(repo)?; + let all_images = all_images + .into_iter() + .map(|img| crate::container::ImageReference::try_from(img.as_str())) + .collect::>>()?; + println!("Verifying ostree-container images: {}", all_images.len()); + let mut likely_corrupted_container_image_merges = Vec::new(); + let mut booted_is_likely_corrupted = false; + let mut staged_is_likely_corrupted = false; + for imgref in all_images { + if let Some(state) = container_store::query_image_ref(repo, &imgref)? { + if !container_store::verify_container_image( + sysroot, + &imgref, + &state, + &inodes.collisions, + verbose, + )? { + eprintln!("warning: Corrupted image {imgref}"); + likely_corrupted_container_image_merges.push(imgref.to_string()); + let merge_commit = state.merge_commit.as_str(); + if booted_checksum == Some(merge_commit) { + booted_is_likely_corrupted = true; + eprintln!("warning: booted deployment is likely corrupted"); + } else if staged_checksum == Some(merge_commit) { + staged_is_likely_corrupted = true; + eprintln!("warning: staged deployment is likely corrupted"); + } + } + } else { + // This really shouldn't happen + eprintln!("warning: Image was removed from underneath us: {imgref}"); + std::thread::sleep(std::time::Duration::from_secs(1)); } } + Ok(RepairResult { + inodes, + likely_corrupted_container_image_merges, + booted_is_likely_corrupted, + staged_is_likely_corrupted, + }) +} - match check_inode_collision(repo, verbose)? { - InodeCheckResult::Okay => { - println!("OK no colliding inodes found"); - Ok(()) +impl RepairResult { + pub fn check(&self) -> anyhow::Result<()> { + if self.booted_is_likely_corrupted { + eprintln!("warning: booted deployment is likely corrupted"); } - InodeCheckResult::PotentialCorruption(colliding_inodes) => { - eprintln!( - "warning: {} potentially colliding inodes found", - colliding_inodes.len() - ); - let all_images = container_store::list_images(repo)?; - let all_images = all_images - .into_iter() - .map(|img| crate::container::ImageReference::try_from(img.as_str())) - .collect::>>()?; - println!("Verifying {} ostree-container images", all_images.len()); - let mut corrupted_images = Vec::new(); - for imgref in all_images { - if !container_store::verify_container_image( - sysroot, - &imgref, - &colliding_inodes, - verbose, - )? { - eprintln!("warning: Corrupted image {imgref}"); - corrupted_images.push(imgref); - } - } - if corrupted_images.is_empty() { - println!("OK no corrupted images found"); - return Ok(()); + if self.booted_is_likely_corrupted { + eprintln!("warning: staged deployment is likely corrupted"); + } + match self.likely_corrupted_container_image_merges.len() { + 0 => { + println!("OK no corruption found"); + Ok(()) } - if dry_run { - anyhow::bail!("Found potential corruption, dry-run mode enabled"); + n => { + anyhow::bail!("Found corruption in images: {n}") } - container_store::remove_images(repo, corrupted_images.iter())?; - Ok(()) } } + + #[context("Repairing")] + pub fn repair(self, sysroot: &SysrootLock) -> Result<()> { + let repo = &sysroot.repo(); + for imgref in self.likely_corrupted_container_image_merges { + let imgref = crate::container::ImageReference::try_from(imgref.as_str())?; + eprintln!("Flushing cached state for corrupted merged image: {imgref}"); + crate::container::store::remove_images(repo, [&imgref])?; + } + if self.booted_is_likely_corrupted { + anyhow::bail!("TODO redeploy and reboot for booted deployment corruption"); + } + if self.staged_is_likely_corrupted { + anyhow::bail!("TODO undeploy for staged deployment corruption"); + } + Ok(()) + } } From 11e0b9b6b7d03ab9caa68cf483d69787457f7693 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 21 Jul 2023 14:36:36 -0400 Subject: [PATCH 594/775] Release 0.11.3 To get the fsck bits out. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index fdf93f3d5..aa05ddf02 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.11.2" +version = "0.11.3" rust-version = "1.64.0" [dependencies] From c7ae1a01f0263dadf5a6e008cd1f9416344e38fa Mon Sep 17 00:00:00 2001 From: Luke Yang Date: Mon, 10 Jul 2023 13:21:33 -0400 Subject: [PATCH 595/775] Add previous container state manifest This commit is a prerequisite to issue #4176 in the rpm-ostree repo, implementing the `rpm-ostree upgrade --check` feature for ostree native containers. A previous_state `LayeredImageState` object is added to the `PreparedImport` object, used to store the previous container state when importing a container image. A `export_as_string()` function is added to the `ManifestDiff` object to allow rpm-ostree to print the manifest diff on the client side. The code is exactly the same as the existing `print()` function for `ManifestDiff`, except it returns a string instead of printing out the diff in the terminal. --- lib/src/cli.rs | 6 ++++- lib/src/container/mod.rs | 51 +++++++++++++++++++++++++++----------- lib/src/container/store.rs | 12 +++++---- 3 files changed, 49 insertions(+), 20 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index acbc973a4..eb2f8120d 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -16,7 +16,7 @@ use tokio::sync::mpsc::Receiver; use crate::commit::container_commit; use crate::container::store::{ImportProgress, LayerProgress, PreparedImport}; -use crate::container::{self as ostree_container}; +use crate::container::{self as ostree_container, ManifestDiff}; use crate::container::{Config, ImageReference, OstreeImageReference}; use ostree_container::store::{ImageImporter, PrepareResult}; @@ -613,6 +613,10 @@ async fn container_store( if let Some(warning) = prep.deprecated_warning() { print_deprecated_warning(warning).await; } + if let Some(previous_state) = prep.previous_state.as_ref() { + let diff = ManifestDiff::new(&previous_state.manifest, &prep.manifest); + diff.print(); + } print_layer_status(&prep); let printer = (!quiet).then(|| { let layer_progress = imp.request_progress(); diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index 239d75299..3896aceb9 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -265,6 +265,18 @@ pub struct ManifestDiff<'a> { pub removed: Vec<&'a oci_spec::image::Descriptor>, /// Layers which are present in the new image but not the old image. pub added: Vec<&'a oci_spec::image::Descriptor>, + /// Total number of packages + pub total: u64, + /// Size of total number of packages. + pub total_size: u64, + /// Number of packages removed + pub n_removed: u64, + /// Size of the number of packages removed + pub removed_size: u64, + /// Number of packages added + pub n_added: u64, + /// Size of the number of packages added + pub added_size: u64, } impl<'a> ManifestDiff<'a> { @@ -297,11 +309,27 @@ impl<'a> ManifestDiff<'a> { } } added.sort_by(|a, b| a.digest().cmp(b.digest())); + + fn layersum<'a, I: Iterator>(layers: I) -> u64 { + layers.map(|layer| layer.size() as u64).sum() + } + let total = dest_layers.len() as u64; + let total_size = layersum(dest.layers().iter()); + let n_removed = removed.len() as u64; + let n_added = added.len() as u64; + let removed_size = layersum(removed.iter().copied()); + let added_size = layersum(added.iter().copied()); ManifestDiff { from: src, to: dest, removed, added, + total, + total_size, + n_removed, + removed_size, + n_added, + added_size, } } } @@ -309,20 +337,15 @@ impl<'a> ManifestDiff<'a> { impl<'a> ManifestDiff<'a> { /// Prints the total, removed and added content between two OCI images pub fn print(&self) { - fn layersum<'a, I: Iterator>(layers: I) -> u64 { - layers.map(|layer| layer.size() as u64).sum() - } - let new_total = self.to.layers().len(); - let new_total_size = glib::format_size(layersum(self.to.layers().iter())); - let n_removed = self.removed.len(); - let n_added = self.added.len(); - let removed_size = layersum(self.removed.iter().copied()); - let removed_size_str = glib::format_size(removed_size); - let added_size = layersum(self.added.iter().copied()); - let added_size_str = glib::format_size(added_size); - println!("Total new layers: {new_total:<4} Size: {new_total_size}"); - println!("Removed layers: {n_removed:<4} Size: {removed_size_str}"); - println!("Added layers: {n_added:<4} Size: {added_size_str}"); + let print_total = self.total; + let print_total_size = glib::format_size(self.total_size); + let print_n_removed = self.n_removed; + let print_removed_size = glib::format_size(self.removed_size); + let print_n_added = self.n_added; + let print_added_size = glib::format_size(self.added_size); + println!("Total new layers: {print_total:<4} Size: {print_total_size}"); + println!("Removed layers: {print_n_removed:<4} Size: {print_removed_size}"); + println!("Added layers: {print_n_added:<4} Size: {print_added_size}"); } } diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 3b72c29a0..4279417c9 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -191,6 +191,8 @@ pub struct PreparedImport { pub manifest: oci_image::ImageManifest, /// The deserialized configuration. pub config: oci_image::ImageConfiguration, + /// The previous manifest + pub previous_state: Option>, /// The previously stored manifest digest. pub previous_manifest_digest: Option, /// The previously stored image ID. @@ -532,7 +534,7 @@ impl ImageImporter { // Query for previous stored state - let (previous_manifest_digest, previous_imageid) = + let (previous_state, previous_imageid) = if let Some(previous_state) = try_query_image_ref(&self.repo, &self.imgref.imgref)? { // If the manifest digests match, we're done. if previous_state.manifest_digest == manifest_digest { @@ -543,10 +545,8 @@ impl ImageImporter { if previous_imageid == new_imageid { return Ok(PrepareResult::AlreadyPresent(previous_state)); } - ( - Some(previous_state.manifest_digest), - Some(previous_imageid.to_string()), - ) + let previous_imageid = previous_imageid.to_string(); + (Some(previous_state), Some(previous_imageid)) } else { (None, None) }; @@ -567,10 +567,12 @@ impl ImageImporter { .map(query) .collect::>>()?; + let previous_manifest_digest = previous_state.as_ref().map(|s| s.manifest_digest.clone()); let imp = PreparedImport { manifest, manifest_digest, config, + previous_state, previous_manifest_digest, previous_imageid, ostree_layers: component_layers, From 13498ed05e7afa84990d3c7d243dd4bc490f1d01 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 2 Aug 2023 13:51:10 -0400 Subject: [PATCH 596/775] Release 0.11.4 - One kargs fix - Add more data about diffs to `ManifestDiff` --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index aa05ddf02..4792d3810 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.11.3" +version = "0.11.4" rust-version = "1.64.0" [dependencies] From 3c9712fa09ed3980c6ab6ec904cb45882e84ea9b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 14 Aug 2023 13:00:16 -0400 Subject: [PATCH 597/775] container: Make requiring bootable field be opt-in This fixes a regression introduced by https://github.com/ostreedev/ostree-rs-ext/commit/9c4a75b3778a3f2fdece095f8f5f7a6289ab512dLooks Brought up in https://github.com/ostreedev/ostree/discussions/2978 Basically we need to make this opt-in at higher levels because encapsulating a non-bootable commit (as well as a commit that historically doesn't have that label) must be supported. --- lib/src/container/deploy.rs | 1 + lib/src/container/store.rs | 22 ++++++++++++---- lib/src/fixture.rs | 8 ++++-- lib/tests/it/main.rs | 51 +++++++++++++++++++++++++++++++++++++ 4 files changed, 75 insertions(+), 7 deletions(-) diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 24651cf02..49a829651 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -60,6 +60,7 @@ pub async fn deploy( let mut imp = super::store::ImageImporter::new(repo, imgref, options.proxy_cfg.unwrap_or_default()) .await?; + imp.require_bootable(); if let Some(target) = options.target_imgref { imp.set_target(target); } diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 9515eb625..e6cb56ab5 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -148,6 +148,8 @@ pub struct ImageImporter { target_imgref: Option, no_imgref: bool, // If true, do not write final image ref disable_gc: bool, // If true, don't prune unused image layers + /// If true, require the image has the bootable flag + require_bootable: bool, pub(crate) proxy_img: OpenedImage, layer_progress: Option>, @@ -349,11 +351,6 @@ pub(crate) fn parse_manifest_layout<'a>( config: &ImageConfiguration, ) -> Result<(&'a Descriptor, Vec<&'a Descriptor>, Vec<&'a Descriptor>)> { let config_labels = super::labels_of(config); - let bootable_key = *ostree::METADATA_KEY_BOOTABLE; - let bootable = config_labels.map_or(false, |l| l.contains_key(bootable_key)); - if !bootable { - anyhow::bail!("Target image does not have {bootable_key} label"); - } let first_layer = manifest .layers() @@ -470,6 +467,7 @@ impl ImageImporter { target_imgref: None, no_imgref: false, disable_gc: false, + require_bootable: false, imgref: imgref.clone(), layer_progress: None, layer_byte_progress: None, @@ -488,6 +486,11 @@ impl ImageImporter { self.no_imgref = true; } + /// Require that the image has the bootable metadata field + pub fn require_bootable(&mut self) { + self.require_bootable = true; + } + /// Do not prune image layers. pub fn disable_gc(&mut self) { self.disable_gc = true; @@ -555,6 +558,15 @@ impl ImageImporter { }; let config = self.proxy.fetch_config(&self.proxy_img).await?; + let config_labels = super::labels_of(&config); + + if self.require_bootable { + let bootable_key = *ostree::METADATA_KEY_BOOTABLE; + let bootable = config_labels.map_or(false, |l| l.contains_key(bootable_key)); + if !bootable { + anyhow::bail!("Target image does not have {bootable_key} label"); + } + } let (commit_layer, component_layers, remaining_layers) = parse_manifest_layout(&manifest, &config)?; diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index fbf649e1d..5d66efde0 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -140,7 +140,7 @@ static OWNERS: Lazy> = Lazy::new(|| { .collect() }); -static CONTENTS_V0: &str = indoc::indoc! { r##" +pub static CONTENTS_V0: &str = indoc::indoc! { r##" r usr/lib/modules/5.10.18-200.x86_64/vmlinuz this-is-a-kernel r usr/lib/modules/5.10.18-200.x86_64/initramfs this-is-an-initramfs m 0 0 755 @@ -361,6 +361,7 @@ pub struct Fixture { destrepo: ostree::Repo, pub selinux: bool, + pub bootable: bool, } impl Fixture { @@ -407,6 +408,7 @@ impl Fixture { srcrepo, destrepo, selinux: true, + bootable: true, }) } @@ -500,7 +502,9 @@ impl Fixture { metadata.insert("ostree.container-cmd", &vec!["/usr/bin/bash"]); metadata.insert("version", &"42.0"); #[allow(clippy::explicit_auto_deref)] - metadata.insert(*ostree::METADATA_KEY_BOOTABLE, &true); + if self.bootable { + metadata.insert(*ostree::METADATA_KEY_BOOTABLE, &true); + } let metadata = metadata.to_variant(); let commit = self.srcrepo.write_commit_with_time( None, diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 5926b6781..1bbbbea13 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -625,6 +625,57 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { Ok(()) } +#[tokio::test] +async fn test_unencapsulate_unbootable() -> Result<()> { + let fixture = { + let mut fixture = Fixture::new_base()?; + fixture.bootable = false; + fixture.commit_filedefs(FileDef::iter_from(ostree_ext::fixture::CONTENTS_V0))?; + fixture + }; + let testrev = fixture + .srcrepo() + .require_rev(fixture.testref()) + .context("Failed to resolve ref")?; + let srcoci_path = &fixture.path.join("oci"); + let srcoci_imgref = ImageReference { + transport: Transport::OciDir, + name: srcoci_path.as_str().to_string(), + }; + let srcoci_unverified = OstreeImageReference { + sigverify: SignatureSource::ContainerPolicyAllowInsecure, + imgref: srcoci_imgref.clone(), + }; + + let config = Config::default(); + let _digest = ostree_ext::container::encapsulate( + fixture.srcrepo(), + fixture.testref(), + &config, + None, + None, + None, + &srcoci_imgref, + ) + .await + .context("exporting")?; + assert!(srcoci_path.exists()); + + assert!(fixture + .destrepo() + .resolve_rev(fixture.testref(), true) + .unwrap() + .is_none()); + + let target = ostree_ext::container::unencapsulate(fixture.destrepo(), &srcoci_unverified) + .await + .unwrap(); + + assert_eq!(target.ostree_commit.as_str(), testrev.as_str()); + + Ok(()) +} + /// Parse a chunked container image and validate its structure; particularly fn validate_chunked_structure(oci_path: &Utf8Path) -> Result<()> { use tar::EntryType::Link; From 48bda787ab097e4e436efb0656f69836360fc881 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 14 Aug 2023 14:11:10 -0400 Subject: [PATCH 598/775] store: Tweak error message for non-ostree containers Since in theory we support non-bootable cases, and we actually have a distinct label for the bootable one. --- lib/src/container/store.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 9515eb625..fb82b9a43 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -372,7 +372,7 @@ pub(crate) fn parse_manifest_layout<'a>( let (layout, target_diffid) = info.ok_or_else(|| { anyhow!( - "No {} label found, not an ostree-bootable container", + "No {} label found, not an ostree encapsulated container", ExportLayout::V1.label() ) })?; From 9e37e76c3bb147de4b15ad6521ebd7a09605bd4a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 16 Aug 2023 14:30:18 -0400 Subject: [PATCH 599/775] lib: Dispatch to monomorphic function I was looking at `cargo bloat` and this showed up near the top twice. It's a large function and only the option parsing needs to be generic. --- lib/src/cli.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index f786df070..ffdf58047 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -818,7 +818,10 @@ where I: IntoIterator, I::Item: Into + Clone, { - let opt = Opt::parse_from(args); + run_from_opt(Opt::parse_from(args)).await +} + +async fn run_from_opt(opt: Opt) -> Result<()> { match opt { Opt::Tar(TarOpts::Import(ref opt)) => tar_import(opt).await, Opt::Tar(TarOpts::Export(ref opt)) => tar_export(opt), From 5dbf3571950eaa432e50d95e7dc0251587ab0613 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 21 Aug 2023 09:29:55 -0400 Subject: [PATCH 600/775] cli: Make `--sysroot` optional To make it less likely people run into the footgun of specifying `--sysroot /sysroot` which is buggy right now https://github.com/ostreedev/ostree/issues/2992 --- lib/src/cli.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index ffdf58047..949e33106 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -292,7 +292,7 @@ pub(crate) enum ContainerImageOpts { Deploy { /// Path to the system root #[clap(long)] - sysroot: String, + sysroot: Option, /// Name for the state directory, also known as "osname". #[clap(long, default_value = ostree_container::deploy::STATEROOT_DEFAULT)] @@ -976,7 +976,11 @@ async fn run_from_opt(opt: Opt) -> Result<()> { proxyopts, write_commitid_to, } => { - let sysroot = &ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))); + let sysroot = &if let Some(sysroot) = sysroot { + ostree::Sysroot::new(Some(&gio::File::for_path(&sysroot))) + } else { + ostree::Sysroot::new_default() + }; sysroot.load(gio::Cancellable::NONE)?; let repo = &sysroot.repo(); let kargs = karg.as_deref(); From 6792973b6c7b9b7de1cd7ce5734e70ef85c6e680 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 22 Aug 2023 09:24:27 -0400 Subject: [PATCH 601/775] Cargo.toml: Update `release` profile, add `releaselto` - `release` should use `panic=abort` by default because we make heavy use of FFI and this is safest, and I don't think we need unwinding anyways - The `releaselto` produces smallest binaries --- Cargo.toml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index d49afd92f..d381af313 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,3 +7,12 @@ opt-level = 1 # No optimizations are too slow for us. [profile.release] lto = "thin" +# We use FFI so this is safest +panic = "abort" +# We assume we're being delivered via e.g. RPM which supports split debuginfo +debug = true + +[profile.releaselto] +codegen-units = 1 +inherits = "release" +lto = "yes" From eaefabb970851067582d470262c66cf1b4fe1ee4 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Wed, 23 Aug 2023 10:59:23 -0400 Subject: [PATCH 602/775] Release 0.11.5 Just to keep the fixes flowing. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 4792d3810..a4f5fd767 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.11.4" +version = "0.11.5" rust-version = "1.64.0" [dependencies] From fe8da718e38b8e550c611784e64aa6ecb6a2a8e3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 28 Aug 2023 13:38:40 -0400 Subject: [PATCH 603/775] Bump rust-version = 1.70 clap_builder now pulls it in, and we need to follow that train... --- .github/workflows/rust.yml | 2 +- cli/Cargo.toml | 2 +- lib/Cargo.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 747111410..357ab2184 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -16,7 +16,7 @@ on: env: CARGO_TERM_COLOR: always # Pinned toolchain for linting - ACTION_LINTS_TOOLCHAIN: 1.64.0 + ACTION_LINTS_TOOLCHAIN: 1.70.0 jobs: tests: diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 66a012408..f18a0c7f1 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -7,7 +7,7 @@ license = "MIT OR Apache-2.0" repository = "https://github.com/ostreedev/ostree-rs-ext" readme = "README.md" publish = false -rust-version = "1.64.0" +rust-version = "1.70.0" [dependencies] anyhow = "1.0" diff --git a/lib/Cargo.toml b/lib/Cargo.toml index a4f5fd767..7c4cf010a 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -7,7 +7,7 @@ name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" version = "0.11.5" -rust-version = "1.64.0" +rust-version = "1.70.0" [dependencies] anyhow = "1.0" From 7650946cfac1eaa829d643e3d1d80c1d19a1a1bd Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 28 Aug 2023 14:17:51 -0400 Subject: [PATCH 604/775] ci: Drop hard errors on clippy warnings by default It's just too much churn for too little value. --- .github/workflows/rust.yml | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 357ab2184..32f962455 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -15,8 +15,6 @@ on: env: CARGO_TERM_COLOR: always - # Pinned toolchain for linting - ACTION_LINTS_TOOLCHAIN: 1.70.0 jobs: tests: @@ -33,6 +31,8 @@ jobs: uses: Swatinem/rust-cache@v2 with: key: "tests" + - name: cargo fmt (check) + run: cargo fmt -- --check -l - name: Build run: cargo test --no-run - name: Individual checks @@ -41,6 +41,8 @@ jobs: run: cargo test -- --nocapture --quiet - name: Manpage generation run: mkdir -p target/man && cargo run --features=docgen -- man --directory target/man + # - name: cargo clippy + # run: cargo clippy build: runs-on: ubuntu-latest container: quay.io/coreos-assembler/fcos-buildroot:testing-devel @@ -94,25 +96,6 @@ jobs: with: log-level: warn command: check bans sources licenses - linting: - name: "Lints, pinned toolchain" - runs-on: ubuntu-latest - container: quay.io/coreos-assembler/fcos-buildroot:testing-devel - steps: - - name: Checkout repository - uses: actions/checkout@v3 - - name: Install deps - run: ./ci/installdeps.sh - - name: Remove system Rust toolchain - run: dnf remove -y rust cargo - - uses: dtolnay/rust-toolchain@master - with: - toolchain: ${{ env['ACTION_LINTS_TOOLCHAIN'] }} - components: rustfmt, clippy - - name: cargo fmt (check) - run: cargo fmt -- --check -l - - name: cargo clippy (warnings) - run: cargo clippy -- -D warnings integration: name: "Integration" needs: build From b78b43bf47c4afa6cf626c5ce25c626591b43e56 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 28 Aug 2023 09:18:11 -0400 Subject: [PATCH 605/775] cli/unencapsulate: Add proxy options We need to support all the proxy options so that custom authentication files can be supported, etc. Closes: https://github.com/ostreedev/ostree/issues/3015 --- lib/src/cli.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 949e33106..c15135027 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -88,6 +88,9 @@ pub(crate) enum ContainerOpts { #[clap(long, value_parser)] repo: Utf8PathBuf, + #[clap(flatten)] + proxyopts: ContainerProxyOpts, + /// Image reference, e.g. registry:quay.io/exampleos/exampleos:latest #[clap(value_parser = parse_imgref)] imgref: OstreeImageReference, @@ -571,6 +574,7 @@ pub async fn print_deprecated_warning(msg: &str) { async fn container_import( repo: &ostree::Repo, imgref: &OstreeImageReference, + proxyopts: ContainerProxyOpts, write_ref: Option<&str>, quiet: bool, ) -> Result<()> { @@ -584,7 +588,7 @@ async fn container_import( pb.set_message("Downloading..."); pb }); - let importer = ImageImporter::new(repo, imgref, Default::default()).await?; + let importer = ImageImporter::new(repo, imgref, proxyopts.into()).await?; let import = importer.unencapsulate().await; // Ensure we finish the progress bar before potentially propagating an error if let Some(pb) = pb.as_ref() { @@ -831,11 +835,12 @@ async fn run_from_opt(opt: Opt) -> Result<()> { ContainerOpts::Unencapsulate { repo, imgref, + proxyopts, write_ref, quiet, } => { let repo = parse_repo(&repo)?; - container_import(&repo, &imgref, write_ref.as_deref(), quiet).await + container_import(&repo, &imgref, proxyopts, write_ref.as_deref(), quiet).await } ContainerOpts::Encapsulate { repo, From 449b400ec4b9e607bf7d0854f3471c9b12b3aad3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 28 Aug 2023 13:49:54 -0400 Subject: [PATCH 606/775] chunking: Add const for minimum, change to regular error Avoid a panic in this case as it can be reachable via external input. xref https://github.com/coreos/rpm-ostree/issues/4530 --- lib/src/chunking.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 094b50dc3..657c74e60 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -26,6 +26,9 @@ use serde::{Deserialize, Serialize}; // We take half the limit of 128. // https://github.com/ostreedev/ostree-rs-ext/issues/69 pub(crate) const MAX_CHUNKS: u32 = 64; +/// Minimum number of layers we can create in a "chunked" flow; otherwise +/// we will just drop down to one. +const MIN_CHUNKED_LAYERS: u32 = 4; type RcStr = Rc; pub(crate) type ChunkMapping = BTreeMap)>; @@ -638,6 +641,8 @@ fn basic_packing<'a>( const HIGH_SIZE_CUTOFF: f32 = 0.6; let before_processing_pkgs_len = components.len(); + anyhow::ensure!(bin_size.get() >= MIN_CHUNKED_LAYERS); + // If we have a prior build, then use that if let Some(prior_build) = prior_build_metadata { return basic_packing_with_prior_build(components, bin_size, prior_build); @@ -687,7 +692,7 @@ fn basic_packing<'a>( // Approximate number of components we should have per medium-size bin. let pkg_per_bin_ms: usize = (components.len() - limit_hs_bins - low_sized_component_count) .checked_div(limit_ms_bins) - .expect("number of bins should be >= 4"); + .ok_or_else(|| anyhow::anyhow!("number of bins should be >= {}", MIN_CHUNKED_LAYERS))?; // Bins assignment for (partition, pkgs) in partitions.iter() { @@ -772,7 +777,7 @@ mod test { #[test] fn test_packing_basics() -> Result<()> { // null cases - for v in [1u32, 7].map(|v| NonZeroU32::new(v).unwrap()) { + for v in [4, 7].map(|v| NonZeroU32::new(v).unwrap()) { assert_eq!(basic_packing(&[], v, None).unwrap().len(), 0); } Ok(()) @@ -795,6 +800,15 @@ mod test { Ok(()) } + #[test] + fn test_packing_one_layer() -> Result<()> { + let contentmeta: Vec = + serde_json::from_reader(flate2::read::GzDecoder::new(FCOS_CONTENTMETA))?; + let r = basic_packing(&contentmeta, NonZeroU32::new(1).unwrap(), None); + assert!(r.is_err()); + Ok(()) + } + fn create_manifest(prev_expected_structure: Vec>) -> oci_spec::image::ImageManifest { let mut p = prev_expected_structure .iter() From a3e32ab2327a9e6557c3d1c58adb8cab53e6e2d8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 28 Aug 2023 10:07:47 -0400 Subject: [PATCH 607/775] build-sys: Set `resolver = 2`, ci: passthrough `internal-testing-api` There's a warning in newer Rust 1.72 about this, we do want the v2 resolver. And adjust our build/CI configuration to explicitly enable `internal-testing-api` when building our binary in CI. --- .github/workflows/rust.yml | 2 +- Cargo.toml | 1 + cli/Cargo.toml | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 32f962455..041c8511e 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -55,7 +55,7 @@ jobs: with: key: "build" - name: Build - run: cargo build --release + run: cargo build --release --features=internal-testing-api - name: Upload binary uses: actions/upload-artifact@v2 with: diff --git a/Cargo.toml b/Cargo.toml index d381af313..0441b93cc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,6 @@ [workspace] members = ["cli", "lib"] +resolver = "2" # These bits are copied from rpm-ostree. [profile.dev] diff --git a/cli/Cargo.toml b/cli/Cargo.toml index f18a0c7f1..4930891da 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -18,3 +18,7 @@ tokio = { version = "1", features = ["macros"] } log = "0.4.0" tracing = "0.1" tracing-subscriber = "0.2.17" + +[features] +# A proxy for the library feature +internal-testing-api = ["ostree-ext/internal-testing-api"] From 8c0c1fe275f78aeb00ae547c2b0b131a7b3577f9 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 28 Aug 2023 15:44:58 -0400 Subject: [PATCH 608/775] lib: Require new containers-image-proxy Because it has an important bugfix. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 7c4cf010a..fd119312a 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -11,7 +11,7 @@ rust-version = "1.70.0" [dependencies] anyhow = "1.0" -containers-image-proxy = "0.5.3" +containers-image-proxy = "0.5.5" async-compression = { version = "0.3", features = ["gzip", "tokio"] } bitflags = "1" camino = "1.0.4" From 1d20f34c043a2b0f5e57f1b4a202a68c44fceae3 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 28 Aug 2023 15:40:16 -0400 Subject: [PATCH 609/775] cli/encapsulate: Add `--authfile` Ideally we'd pass through all of the proxy options here, but doing that sanely really requires being able to do *pushes* through containers-image-proxy-rs, which is a quite nontrivial amount of work. For now, let's pass through `--authfile` which is the main thing people want. Anything else can be worked around by encapsulating to `oci` and then doing a `skopeo copy` from there. cc https://github.com/ostreedev/ostree/issues/3015 --- lib/src/cli.rs | 8 ++++++++ lib/src/container/encapsulate.rs | 8 ++++++-- lib/src/container/skopeo.rs | 11 ++++++++++- lib/src/container/update_detachedmeta.rs | 4 ++-- 4 files changed, 26 insertions(+), 5 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index c15135027..1cf30d687 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -129,6 +129,10 @@ pub(crate) enum ContainerOpts { #[clap(name = "label", long, short)] labels: Vec, + #[clap(long)] + /// Path to Docker-formatted authentication file. + authfile: Option, + /// Propagate an OSTree commit metadata key to container label #[clap(name = "copymeta", long)] copy_meta_keys: Vec, @@ -624,6 +628,7 @@ async fn container_export( rev: &str, imgref: &ImageReference, labels: BTreeMap, + authfile: Option, copy_meta_keys: Vec, copy_meta_opt_keys: Vec, cmd: Option>, @@ -636,6 +641,7 @@ async fn container_export( let opts = crate::container::ExportOpts { copy_meta_keys, copy_meta_opt_keys, + authfile, skip_compression: compression_fast, // TODO rename this in the struct at the next semver break ..Default::default() }; @@ -847,6 +853,7 @@ async fn run_from_opt(opt: Opt) -> Result<()> { rev, imgref, labels, + authfile, copy_meta_keys, copy_meta_opt_keys, cmd, @@ -867,6 +874,7 @@ async fn run_from_opt(opt: Opt) -> Result<()> { &rev, &imgref, labels?, + authfile, copy_meta_keys, copy_meta_opt_keys, cmd, diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 6b6347c18..ac4dd08a3 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -348,6 +348,8 @@ async fn build_impl( let tempdest = tempdir.path().join("d"); let tempdest = tempdest.to_str().unwrap(); + // Minor TODO: refactor to avoid clone + let authfile = opts.authfile.clone(); let tempoci = build_oci( repo, ostree_ref, @@ -359,7 +361,7 @@ async fn build_impl( contentmeta, )?; - let digest = skopeo::copy(&tempoci, dest).await?; + let digest = skopeo::copy(&tempoci, dest, authfile.as_deref()).await?; Some(digest) }; if let Some(digest) = digest { @@ -377,7 +379,7 @@ async fn build_impl( } /// Options controlling commit export into OCI -#[derive(Debug, Default)] +#[derive(Clone, Debug, Default)] pub struct ExportOpts { /// If true, do not perform gzip compression of the tar layers. pub skip_compression: bool, @@ -387,6 +389,8 @@ pub struct ExportOpts { pub copy_meta_opt_keys: Vec, /// Maximum number of layers to use pub max_layers: Option, + /// Path to Docker-formatted authentication file. + pub authfile: Option, // TODO semver-break: remove this /// Use only the standard OCI version label pub no_legacy_version_label: bool, diff --git a/lib/src/container/skopeo.rs b/lib/src/container/skopeo.rs index 2ae9210cd..99489c0f5 100644 --- a/lib/src/container/skopeo.rs +++ b/lib/src/container/skopeo.rs @@ -4,6 +4,7 @@ use super::ImageReference; use anyhow::{Context, Result}; use serde::Deserialize; use std::io::Read; +use std::path::Path; use std::process::Stdio; use tokio::process::Command; @@ -58,12 +59,20 @@ pub(crate) fn spawn(mut cmd: Command) -> Result { } /// Use skopeo to copy a container image. -pub(crate) async fn copy(src: &ImageReference, dest: &ImageReference) -> Result { +pub(crate) async fn copy( + src: &ImageReference, + dest: &ImageReference, + authfile: Option<&Path>, +) -> Result { let digestfile = tempfile::NamedTempFile::new()?; let mut cmd = new_cmd(); cmd.stdout(std::process::Stdio::null()).arg("copy"); cmd.arg("--digestfile"); cmd.arg(digestfile.path()); + if let Some(authfile) = authfile { + cmd.arg("--authfile"); + cmd.arg(authfile); + } cmd.args(&[src.to_string(), dest.to_string()]); let proc = super::skopeo::spawn(cmd)?; let output = proc.wait_with_output().await?; diff --git a/lib/src/container/update_detachedmeta.rs b/lib/src/container/update_detachedmeta.rs index 0e7eba80b..0b07b8e14 100644 --- a/lib/src/container/update_detachedmeta.rs +++ b/lib/src/container/update_detachedmeta.rs @@ -29,7 +29,7 @@ pub async fn update_detached_metadata( }; // Full copy of the source image - let pulled_digest: String = skopeo::copy(src, &tempsrc_ref) + let pulled_digest: String = skopeo::copy(src, &tempsrc_ref, None) .await .context("Creating temporary copy to OCI dir")?; @@ -124,7 +124,7 @@ pub async fn update_detached_metadata( // Finally, copy the mutated image back to the target. For chunked images, // because we only changed one layer, skopeo should know not to re-upload shared blobs. - crate::container::skopeo::copy(&tempsrc_ref, dest) + crate::container::skopeo::copy(&tempsrc_ref, dest, None) .await .context("Copying to destination") } From 78353d2eff46d52612a46735909f07ec60356841 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 29 Aug 2023 11:08:52 -0400 Subject: [PATCH 610/775] ci: Re-enable clippy, but nonblocking Blocking on clippy I think is just too painful for the value it provides. --- .github/workflows/rust.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 041c8511e..822a64984 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -41,8 +41,8 @@ jobs: run: cargo test -- --nocapture --quiet - name: Manpage generation run: mkdir -p target/man && cargo run --features=docgen -- man --directory target/man - # - name: cargo clippy - # run: cargo clippy + - name: cargo clippy + run: cargo clippy build: runs-on: ubuntu-latest container: quay.io/coreos-assembler/fcos-buildroot:testing-devel From e4e8d184e74fe29ca5488e2c280252d9f2172875 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 31 Aug 2023 08:20:45 -0400 Subject: [PATCH 611/775] sysroot: Add `from_assume_locked` There's a mess with `SysrootLock` because in practice, way, way too many APIs just take the plain old C `Sysroot` object. In this project, we added a new API that requires a `SysrootLock`, which works well when everything is using ostree-ext from the start, as it is in bootc. However in rpm-ostree we acquire the lock from C code, but want to call `remove_undeployed_images` which wants `SysrootLock`. The only practical way out of this is to add an API which asserts that the sysroot is locked and returns this wrapper. What would actually work better here is to drive this locking logic down into the C library. --- lib/src/sysroot.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/lib/src/sysroot.rs b/lib/src/sysroot.rs index 5516bac82..a4f971101 100644 --- a/lib/src/sysroot.rs +++ b/lib/src/sysroot.rs @@ -9,10 +9,15 @@ use anyhow::Result; pub struct SysrootLock { /// The underlying sysroot value. pub sysroot: ostree::Sysroot, + /// True if we didn't actually lock + unowned: bool, } impl Drop for SysrootLock { fn drop(&mut self) { + if self.unowned { + return; + } self.sysroot.unlock(); } } @@ -28,12 +33,14 @@ impl Deref for SysrootLock { impl SysrootLock { /// Asynchronously acquire a sysroot lock. If the lock cannot be acquired /// immediately, a status message will be printed to standard output. + /// The lock will be unlocked when this object is dropped. pub async fn new_from_sysroot(sysroot: &ostree::Sysroot) -> Result { let mut printed = false; loop { if sysroot.try_lock()? { return Ok(Self { sysroot: sysroot.clone(), + unowned: false, }); } if !printed { @@ -43,4 +50,13 @@ impl SysrootLock { tokio::time::sleep(std::time::Duration::from_secs(3)).await; } } + + /// This function should only be used when you have locked the sysroot + /// externally (e.g. in C/C++ code). This also does not unlock on drop. + pub fn from_assumed_locked(sysroot: &ostree::Sysroot) -> Self { + Self { + sysroot: sysroot.clone(), + unowned: true, + } + } } From e39b72769993b1dde1548149da7814c360ba3cda Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 31 Aug 2023 15:55:34 -0400 Subject: [PATCH 612/775] container: Add a trace log for when we discard "broken pipe" error I don't think we're hitting this in https://github.com/coreos/rpm-ostree/issues/4567 but it'd be useful to have a trace message in case. --- lib/src/container/unencapsulate.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/container/unencapsulate.rs b/lib/src/container/unencapsulate.rs index e4f37334a..1e162bfdf 100644 --- a/lib/src/container/unencapsulate.rs +++ b/lib/src/container/unencapsulate.rs @@ -165,6 +165,7 @@ pub(crate) async fn join_fetch( (Err(worker), Err(driver)) => { let text = driver.root_cause().to_string(); if text.ends_with("broken pipe") { + tracing::trace!("Ignoring broken pipe failure from driver"); Err(worker) } else { Err(worker.context(format!("proxy failure: {} and client error", text))) From 13455cce4923ac0c3237d3983c2d4044842fb139 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 31 Aug 2023 17:18:21 -0400 Subject: [PATCH 613/775] tar: Hold open input stream as long as possible I'm hoping this will help us debug https://github.com/coreos/rpm-ostree/issues/4567 ``` [2023-08-30T15:00:16.554Z] Aug 30 15:00:15 qemu0 kola-runext-container-image[1957]: error: Importing: Parsing layer blob sha256:00623c39da63781bdd3fb00fedb36f8b9ec95e42cdb4d389f692457f24c67144: Failed to invoke skopeo proxy method FinishPipe: remote error: write |1: broken pipe ``` I haven't been able to reproduce it outside of CI yet, but we had a prior ugly hack for this in https://github.com/ostreedev/ostree-rs-ext/commit/a27dac83831297a6e83bd25c5b6b1b842249ad4d As the comments say - the goal is to hold open the input stream as long as feasibly possible. --- lib/src/tar/write.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index 29e0d82c7..df147c903 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -259,16 +259,28 @@ async fn filter_tar_async( mut dest: impl AsyncWrite + Send + Unpin, ) -> Result> { let (tx_buf, mut rx_buf) = tokio::io::duplex(8192); + // The source must be moved to the heap so we know it is stable for passing to the worker thread let src = Box::pin(src); - let tar_transformer = tokio::task::spawn_blocking(move || -> Result<_> { - let src = tokio_util::io::SyncIoBridge::new(src); + let tar_transformer = tokio::task::spawn_blocking(move || { + let mut src = tokio_util::io::SyncIoBridge::new(src); let dest = tokio_util::io::SyncIoBridge::new(tx_buf); - filter_tar(src, dest) + let r = filter_tar(&mut src, dest); + // Pass ownership of the input stream back to the caller - see below. + (r, src) }); let copier = tokio::io::copy(&mut rx_buf, &mut dest); let (r, v) = tokio::join!(tar_transformer, copier); let _v: u64 = v?; - r? + let (r, src) = r?; + // Note that the worker thread took temporary ownership of the input stream; we only close + // it at this point, after we're sure we've done all processing of the input. The reason + // for this is that both the skopeo process *or* us could encounter an error (see join_fetch). + // By ensuring we hold the stream open as long as possible, it ensures that we're going to + // see a remote error first, instead of the remote skopeo process seeing us close the pipe + // because we found an error. + drop(src); + // And pass back the result + r } /// Write the contents of a tarball as an ostree commit. From f4d9280a3ade7b31d33279fb5dcda964a2297b2d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 31 Aug 2023 17:51:14 -0400 Subject: [PATCH 614/775] Release 0.11.6 Just a collection of relatively minor things, but let's keep the train going. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index fd119312a..6dca80e46 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.11.5" +version = "0.11.6" rust-version = "1.70.0" [dependencies] From f23fd84731c010b3e3665baf500249f3dd3d931f Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Thu, 31 Aug 2023 17:55:15 -0400 Subject: [PATCH 615/775] Use cap_tempfile via cap_std_ext This way things like Dependabot understand there's only one dependency to bump instead of multiple that must move in lockstep. --- lib/Cargo.toml | 1 - lib/src/bootabletree.rs | 2 +- lib/src/commit.rs | 1 + lib/src/container/ocidir.rs | 2 +- lib/src/container/store.rs | 2 +- lib/src/repair.rs | 2 +- lib/src/tar/write.rs | 2 +- 7 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index fd119312a..c5cffde88 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -20,7 +20,6 @@ olpc-cjson = "0.1.1" clap = { version= "4.2", features = ["derive"] } clap_mangen = { version = "0.2", optional = true } cap-std-ext = "2.0" -cap-tempfile = "1.0" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" futures-util = "0.3.13" diff --git a/lib/src/bootabletree.rs b/lib/src/bootabletree.rs index 591cbeb4d..23ab3b723 100644 --- a/lib/src/bootabletree.rs +++ b/lib/src/bootabletree.rs @@ -91,7 +91,7 @@ pub fn find_kernel_dir_fs(root: &Dir) -> Result> { #[cfg(test)] mod test { use super::*; - use cap_tempfile::cap_std; + use cap_std_ext::{cap_std, cap_tempfile}; #[test] fn test_find_kernel_dir_fs() -> Result<()> { diff --git a/lib/src/commit.rs b/lib/src/commit.rs index de4fd2bfe..a9252400d 100644 --- a/lib/src/commit.rs +++ b/lib/src/commit.rs @@ -169,6 +169,7 @@ pub(crate) async fn container_commit() -> Result<()> { #[cfg(test)] mod tests { use super::*; + use cap_std_ext::cap_tempfile; #[test] fn commit() -> Result<()> { diff --git a/lib/src/container/ocidir.rs b/lib/src/container/ocidir.rs index 8ed72bf1c..ac5d3a3af 100644 --- a/lib/src/container/ocidir.rs +++ b/lib/src/container/ocidir.rs @@ -7,8 +7,8 @@ use anyhow::{anyhow, Context, Result}; use camino::Utf8Path; use cap_std::fs::Dir; -use cap_std_ext::cap_std; use cap_std_ext::dirext::CapStdExtDirExt; +use cap_std_ext::{cap_std, cap_tempfile}; use containers_image_proxy::oci_spec; use flate2::write::GzEncoder; use fn_error_context::context; diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index b10d40f74..8dc0324cf 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -841,7 +841,7 @@ impl ImageImporter { let devino = ostree::RepoDevInoCache::new(); let repodir = repo.dfd_as_dir()?; let repo_tmp = repodir.open_dir("tmp")?; - let td = cap_tempfile::TempDir::new_in(&repo_tmp)?; + let td = cap_std_ext::cap_tempfile::TempDir::new_in(&repo_tmp)?; let rootpath = "root"; let checkout_mode = if repo.mode() == ostree::RepoMode::Bare { diff --git a/lib/src/repair.rs b/lib/src/repair.rs index e6f4e2759..235ff84a8 100644 --- a/lib/src/repair.rs +++ b/lib/src/repair.rs @@ -5,7 +5,7 @@ use std::fmt::Display; use anyhow::{anyhow, Context, Result}; use cap_std::fs::Dir; -use cap_tempfile::cap_std; +use cap_std_ext::cap_std; use fn_error_context::context; use serde::{Deserialize, Serialize}; use std::os::unix::fs::MetadataExt; diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index df147c903..608ea7f4a 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -12,8 +12,8 @@ use anyhow::{anyhow, Context}; use camino::{Utf8Component, Utf8Path, Utf8PathBuf}; use cap_std::io_lifetimes; -use cap_std_ext::cap_std; use cap_std_ext::cmdext::CapStdExtCommandExt; +use cap_std_ext::{cap_std, cap_tempfile}; use once_cell::unsync::OnceCell; use ostree::gio; use ostree::prelude::FileExt; From d4c37871624307de425e4579c4a38dbc530739cf Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 5 Sep 2023 10:00:49 -0400 Subject: [PATCH 616/775] lib: Bump to libsystemd 0.6 This updates to a more modern version which will help drop out e.g. `nix-0.23` from our dependency graph - it's long obsoleted. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 1a75d630a..d41a174f4 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -29,7 +29,7 @@ io-lifetimes = "1.0" indicatif = "0.17.0" once_cell = "1.9" libc = "0.2.92" -libsystemd = "0.5.0" +libsystemd = "0.6.0" openssl = "0.10.33" ostree = { features = ["v2022_5", "cap-std-apis"], version = "0.18.0" } pin-project = "1.0" From d10142dc2681b794d5ba83c7bdeb68d62b4eb796 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 8 Sep 2023 08:45:07 -0400 Subject: [PATCH 617/775] keyfileext: Drop optional_string_list Nothing is using this, and the underlying glib type changed so it's annoying to keep. --- lib/src/keyfileext.rs | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/lib/src/keyfileext.rs b/lib/src/keyfileext.rs index 767cf7b6e..8d6e3a6ea 100644 --- a/lib/src/keyfileext.rs +++ b/lib/src/keyfileext.rs @@ -9,12 +9,6 @@ pub trait KeyFileExt { fn optional_string(&self, group: &str, key: &str) -> Result, glib::Error>; /// Get a boolean value, but return `None` if the key does not exist. fn optional_bool(&self, group: &str, key: &str) -> Result, glib::Error>; - /// Get a string list value, but return `None` if the key does not exist. - fn optional_string_list( - &self, - group: &str, - key: &str, - ) -> Result>, glib::Error>; } /// Consume a keyfile error, mapping the case where group or key is not found to `Ok(None)`. @@ -42,14 +36,6 @@ impl KeyFileExt for glib::KeyFile { fn optional_bool(&self, group: &str, key: &str) -> Result, glib::Error> { map_keyfile_optional(self.boolean(group, key)) } - - fn optional_string_list( - &self, - group: &str, - key: &str, - ) -> Result>, glib::Error> { - map_keyfile_optional(self.string_list(group, key)) - } } #[cfg(test)] @@ -71,17 +57,5 @@ mod tests { assert_eq!(kf.optional_bool("foo", "bar").unwrap(), None); kf.set_boolean("foo", "somebool", false); assert_eq!(kf.optional_bool("foo", "somebool").unwrap(), Some(false)); - - assert_eq!(kf.optional_string_list("foo", "bar").unwrap(), None); - kf.set_string("foo", "somelist", "one;two;three"); - assert_eq!( - kf.optional_string_list("foo", "somelist").unwrap(), - Some( - vec!["one", "two", "three"] - .iter() - .map(|&v| GString::from(v)) - .collect() - ) - ); } } From 5a32624d5c52deb7b89bab8ca3ba7619f956eddb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 8 Sep 2023 08:45:49 -0400 Subject: [PATCH 618/775] lib: Bump to ostree 0.19, cap-std-ext 3.0 This is just "keeping up with churn": - Newer versions of glib crates - Bump to cap-std-ext 0.3, which bumps cap-std 2.0 --- lib/Cargo.toml | 4 ++-- lib/src/cli.rs | 6 ++++-- lib/src/container/store.rs | 5 +++-- lib/src/fixture.rs | 21 +++++++++++++++------ lib/src/repair.rs | 2 +- lib/src/tar/export.rs | 10 ++++++---- lib/tests/it/main.rs | 2 +- 7 files changed, 32 insertions(+), 18 deletions(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index d41a174f4..f78102c1e 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -19,7 +19,7 @@ chrono = "0.4.19" olpc-cjson = "0.1.1" clap = { version= "4.2", features = ["derive"] } clap_mangen = { version = "0.2", optional = true } -cap-std-ext = "2.0" +cap-std-ext = "3.0" flate2 = { features = ["zlib"], default_features = false, version = "1.0.20" } fn-error-context = "0.2.0" futures-util = "0.3.13" @@ -31,7 +31,7 @@ once_cell = "1.9" libc = "0.2.92" libsystemd = "0.6.0" openssl = "0.10.33" -ostree = { features = ["v2022_5", "cap-std-apis"], version = "0.18.0" } +ostree = { features = ["v2022_6"], version = "0.19.0" } pin-project = "1.0" regex = "1.5.4" rustix = { version = "0.37.19", features = ["fs", "process"] } diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 1cf30d687..693368596 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -7,9 +7,11 @@ use anyhow::{Context, Result}; use camino::{Utf8Path, Utf8PathBuf}; +use cap_std_ext::cap_std; use clap::{Parser, Subcommand}; use fn_error_context::context; -use ostree::{cap_std, gio, glib}; +use io_lifetimes::AsFd; +use ostree::{gio, glib}; use std::collections::BTreeMap; use std::ffi::OsString; use std::io::BufWriter; @@ -38,7 +40,7 @@ pub fn parse_base_imgref(s: &str) -> Result { pub fn parse_repo(s: &Utf8Path) -> Result { let repofd = cap_std::fs::Dir::open_ambient_dir(s, cap_std::ambient_authority()) .with_context(|| format!("Opening directory at '{s}'"))?; - ostree::Repo::open_at_dir(&repofd, ".") + ostree::Repo::open_at_dir(repofd.as_fd(), ".") .with_context(|| format!("Opening ostree repository at '{s}'")) } diff --git a/lib/src/container/store.rs b/lib/src/container/store.rs index 8dc0324cf..fd66359e9 100644 --- a/lib/src/container/store.rs +++ b/lib/src/container/store.rs @@ -12,6 +12,7 @@ use crate::sysroot::SysrootLock; use crate::utils::ResultExt; use anyhow::{anyhow, Context}; use camino::{Utf8Path, Utf8PathBuf}; +use cap_std_ext::cap_std::fs::Dir; use containers_image_proxy::{ImageProxy, OpenedImage}; use fn_error_context::context; use futures_util::TryFutureExt; @@ -839,7 +840,7 @@ impl ImageImporter { let txn = repo.auto_transaction(cancellable)?; let devino = ostree::RepoDevInoCache::new(); - let repodir = repo.dfd_as_dir()?; + let repodir = Dir::reopen_dir(&repo.dfd_borrow())?; let repo_tmp = repodir.open_dir("tmp")?; let td = cap_std_ext::cap_tempfile::TempDir::new_in(&repo_tmp)?; @@ -1310,7 +1311,7 @@ fn compare_file_info(src: &gio::FileInfo, target: &gio::FileInfo) -> bool { #[context("Querying object inode")] fn inode_of_object(repo: &ostree::Repo, checksum: &str) -> Result { - let repodir = repo.dfd_as_dir()?; + let repodir = Dir::reopen_dir(&repo.dfd_borrow())?; let (prefix, suffix) = checksum.split_at(2); let objpath = format!("objects/{}/{}.file", prefix, suffix); let metadata = repodir.symlink_metadata(objpath)?; diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index 5d66efde0..a2035b77f 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -10,11 +10,12 @@ use crate::{gio, glib}; use anyhow::{anyhow, Context, Result}; use camino::{Utf8Component, Utf8Path, Utf8PathBuf}; use cap_std::fs::Dir; +use cap_std_ext::cap_std; use cap_std_ext::prelude::CapStdExtCommandExt; use chrono::TimeZone; use fn_error_context::context; +use io_lifetimes::AsFd; use once_cell::sync::Lazy; -use ostree::cap_std; use regex::Regex; use std::borrow::Cow; use std::io::Write; @@ -394,13 +395,21 @@ impl Fixture { .status()?; assert!(st.success()); - let srcrepo = - ostree::Repo::create_at_dir(srcdir_dfd, "repo", ostree::RepoMode::Archive, None) - .context("Creating src/ repo")?; + let srcrepo = ostree::Repo::create_at_dir( + srcdir_dfd.as_fd(), + "repo", + ostree::RepoMode::Archive, + None, + ) + .context("Creating src/ repo")?; dir.create_dir("dest")?; - let destrepo = - ostree::Repo::create_at_dir(&dir, "dest/repo", ostree::RepoMode::BareUser, None)?; + let destrepo = ostree::Repo::create_at_dir( + dir.as_fd(), + "dest/repo", + ostree::RepoMode::BareUser, + None, + )?; Ok(Self { tempdir, dir, diff --git a/lib/src/repair.rs b/lib/src/repair.rs index 235ff84a8..4deb35c40 100644 --- a/lib/src/repair.rs +++ b/lib/src/repair.rs @@ -90,7 +90,7 @@ impl InodeCheck { #[doc(hidden)] /// Detect if any commits are potentially incorrect due to inode truncations. pub fn check_inode_collision(repo: &ostree::Repo, verbose: bool) -> Result { - let repo_dir = repo.dfd_as_dir()?; + let repo_dir = Dir::reopen_dir(&repo.dfd_borrow())?; let objects = repo_dir.open_dir("objects")?; println!( diff --git a/lib/src/tar/export.rs b/lib/src/tar/export.rs index 839169455..43f61456e 100644 --- a/lib/src/tar/export.rs +++ b/lib/src/tar/export.rs @@ -398,19 +398,21 @@ impl<'a, W: std::io::Write> OstreeTarWriter<'a, W> { let target = meta .symlink_target() .ok_or_else(|| anyhow!("Missing symlink target"))?; + let target = target + .to_str() + .ok_or_else(|| anyhow!("Invalid UTF-8 symlink target: {target:?}"))?; let context = || format!("Writing content symlink: {}", checksum); h.set_entry_type(tar::EntryType::Symlink); h.set_size(0); // Handle //chkconfig, see above - if symlink_is_denormal(&target) { - h.set_link_name_literal(meta.symlink_target().unwrap().as_str()) - .with_context(context)?; + if symlink_is_denormal(target) { + h.set_link_name_literal(target).with_context(context)?; self.out .append_data(&mut h, &path, &mut std::io::empty()) .with_context(context)?; } else { self.out - .append_link(&mut h, &path, target.as_str()) + .append_link(&mut h, &path, target) .with_context(context)?; } } diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 1bbbbea13..6d514d7df 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -1,10 +1,10 @@ use anyhow::{Context, Result}; use camino::Utf8Path; use cap_std::fs::{Dir, DirBuilder}; +use cap_std_ext::cap_std; use containers_image_proxy::oci_spec; use containers_image_proxy::oci_spec::image::ImageManifest; use once_cell::sync::Lazy; -use ostree::cap_std; use ostree_ext::chunking::ObjectMetaSized; use ostree_ext::container::{store, ManifestDiff}; use ostree_ext::container::{ From ab4045d992a779dd3c2834f6225d16017f6772a2 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 8 Sep 2023 09:24:57 -0400 Subject: [PATCH 619/775] lib: Bump to rustix 0.38 Again just keeping up. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index f78102c1e..9e167a26a 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -34,7 +34,7 @@ openssl = "0.10.33" ostree = { features = ["v2022_6"], version = "0.19.0" } pin-project = "1.0" regex = "1.5.4" -rustix = { version = "0.37.19", features = ["fs", "process"] } +rustix = { version = "0.38", features = ["fs", "process"] } serde = { features = ["derive"], version = "1.0.125" } serde_json = "1.0.64" tar = "0.4.38" From 0be18838bda8309b0f6df3e0bf542aab3d9e2b0a Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 8 Sep 2023 09:25:48 -0400 Subject: [PATCH 620/775] lib: Bump semver to 0.12 Because we bumped ostree. --- lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 9e167a26a..d8adc3dc3 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT OR Apache-2.0" name = "ostree-ext" readme = "README.md" repository = "https://github.com/ostreedev/ostree-rs-ext" -version = "0.11.6" +version = "0.12.0" rust-version = "1.70.0" [dependencies] From 5730f9fa7f9a03662727eae97306d6a0a1ae580d Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Fri, 8 Sep 2023 10:56:45 -0400 Subject: [PATCH 621/775] chunking: Ignore ostree commit layer I was hitting a panic in this section of the code; I think triggering it involves removing packages, but I haven't narrowed it down more precisely. Hopefully at some point I will try to add some more unit testing for this... --- lib/src/chunking.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 657c74e60..62d2a0ab7 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -606,6 +606,10 @@ fn basic_packing_with_prior_build<'a>( for bin in curr_build { let mut mod_bin = Vec::new(); for pkg in bin { + // An empty component set can happen for the ostree commit layer; ignore that. + if pkg.is_empty() { + continue; + } mod_bin.push(name_to_component[&pkg]); } modified_build.push(mod_bin); From ab1d16a574752ec4a4669352bc483ebb07c2af4b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sat, 9 Sep 2023 11:41:19 -0400 Subject: [PATCH 622/775] container: Canonicalize imgref to ostree-unverified-registry xref https://github.com/openshift/machine-config-operator/pull/3857#issuecomment-1712026017 The MCO has code which incorrectly parses these image references; because rpm-ostree defaults to writing `ostree-unverified-registry`, let's canonicalize to that because it looks better too. --- lib/src/container/mod.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/lib/src/container/mod.rs b/lib/src/container/mod.rs index ae2b08a74..54c6f965e 100644 --- a/lib/src/container/mod.rs +++ b/lib/src/container/mod.rs @@ -267,7 +267,16 @@ impl std::fmt::Display for SignatureSource { impl std::fmt::Display for OstreeImageReference { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}:{}", self.sigverify, self.imgref) + match (&self.sigverify, &self.imgref) { + (SignatureSource::ContainerPolicyAllowInsecure, imgref) + if imgref.transport == Transport::Registry => + { + write!(f, "ostree-unverified-registry:{}", self.imgref.name) + } + (sigverify, imgref) => { + write!(f, "{}:{}", sigverify, imgref) + } + } } } @@ -551,7 +560,7 @@ mod tests { assert_eq!(ir.imgref.name, "quay.io/exampleos/blah"); assert_eq!( ir.to_string(), - "ostree-unverified-image:docker://quay.io/exampleos/blah" + "ostree-unverified-registry:quay.io/exampleos/blah" ); let ir_shorthand = OstreeImageReference::try_from("ostree-unverified-registry:quay.io/exampleos/blah") From befe35775d42a202437c73266f250c54304e01ff Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 10 Sep 2023 14:40:46 -0400 Subject: [PATCH 623/775] tar: Make `WriteTarOptions` `#[non_exhaustive]` To allow future extensibility. --- lib/src/tar/write.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/tar/write.rs b/lib/src/tar/write.rs index 608ea7f4a..99da2a3d7 100644 --- a/lib/src/tar/write.rs +++ b/lib/src/tar/write.rs @@ -55,6 +55,7 @@ pub(crate) fn copy_entry( /// Configuration for tar layer commits. #[derive(Debug, Default)] +#[non_exhaustive] pub struct WriteTarOptions { /// Base ostree commit hash pub base: Option, From 748537ea0ee07a01017b1775a558f101c5505e10 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 10 Sep 2023 14:41:48 -0400 Subject: [PATCH 624/775] tar: Make `TarImportOptions` `#[non_exhaustive]` To allow future extensibility. --- lib/src/tar/import.rs | 1 + lib/tests/it/main.rs | 55 ++++++++++++------------------------------- 2 files changed, 16 insertions(+), 40 deletions(-) diff --git a/lib/src/tar/import.rs b/lib/src/tar/import.rs index 38ce2823e..61bae2195 100644 --- a/lib/src/tar/import.rs +++ b/lib/src/tar/import.rs @@ -798,6 +798,7 @@ fn validate_sha256(input: String) -> Result { /// Configuration for tar import. #[derive(Debug, Default)] +#[non_exhaustive] pub struct TarImportOptions { /// Name of the remote to use for signature verification. pub remote: Option, diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 6d514d7df..67df2640f 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -106,14 +106,9 @@ async fn test_tar_import_signed() -> Result<()> { // Verify we fail with an unknown remote. let src_tar = tokio::fs::File::from_std(fixture.dir.open(test_tar)?.into_std()); - let r = ostree_ext::tar::import_tar( - fixture.destrepo(), - src_tar, - Some(TarImportOptions { - remote: Some("nosuchremote".to_string()), - }), - ) - .await; + let mut taropts = TarImportOptions::default(); + taropts.remote = Some("nosuchremote".to_string()); + let r = ostree_ext::tar::import_tar(fixture.destrepo(), src_tar, Some(taropts)).await; assert_err_contains(r, r#"Remote "nosuchremote" not found"#); // Test a remote, but without a key @@ -124,14 +119,9 @@ async fn test_tar_import_signed() -> Result<()> { .destrepo() .remote_add("myremote", None, Some(&opts.end()), gio::Cancellable::NONE)?; let src_tar = tokio::fs::File::from_std(fixture.dir.open(test_tar)?.into_std()); - let r = ostree_ext::tar::import_tar( - fixture.destrepo(), - src_tar, - Some(TarImportOptions { - remote: Some("myremote".to_string()), - }), - ) - .await; + let mut taropts = TarImportOptions::default(); + taropts.remote = Some("myremote".to_string()); + let r = ostree_ext::tar::import_tar(fixture.destrepo(), src_tar, Some(taropts)).await; assert_err_contains(r, r#"Can't check signature: public key not found"#); // And signed correctly @@ -143,14 +133,9 @@ async fn test_tar_import_signed() -> Result<()> { .ignore_stdout() .run()?; let src_tar = tokio::fs::File::from_std(fixture.dir.open(test_tar)?.into_std()); - let imported = ostree_ext::tar::import_tar( - fixture.destrepo(), - src_tar, - Some(TarImportOptions { - remote: Some("myremote".to_string()), - }), - ) - .await?; + let mut taropts = TarImportOptions::default(); + taropts.remote = Some("myremote".to_string()); + let imported = ostree_ext::tar::import_tar(fixture.destrepo(), src_tar, Some(taropts)).await?; let (commitdata, state) = fixture.destrepo().load_commit(&imported)?; assert_eq!( CONTENTS_CHECKSUM_V0, @@ -173,14 +158,9 @@ async fn test_tar_import_signed() -> Result<()> { }) .await??; let src_tar = tokio::fs::File::from_std(fixture.dir.open(nometa)?.into_std()); - let r = ostree_ext::tar::import_tar( - fixture.destrepo(), - src_tar, - Some(TarImportOptions { - remote: Some("myremote".to_string()), - }), - ) - .await; + let mut taropts = TarImportOptions::default(); + taropts.remote = Some("myremote".to_string()); + let r = ostree_ext::tar::import_tar(fixture.destrepo(), src_tar, Some(taropts)).await; assert_err_contains(r, "Expected commitmeta object"); // Now inject garbage into the commitmeta by flipping some bits in the signature @@ -210,14 +190,9 @@ async fn test_tar_import_signed() -> Result<()> { }) .await??; let src_tar = tokio::fs::File::from_std(fixture.dir.open(nometa)?.into_std()); - let r = ostree_ext::tar::import_tar( - fixture.destrepo(), - src_tar, - Some(TarImportOptions { - remote: Some("myremote".to_string()), - }), - ) - .await; + let mut taropts = TarImportOptions::default(); + taropts.remote = Some("myremote".to_string()); + let r = ostree_ext::tar::import_tar(fixture.destrepo(), src_tar, Some(taropts)).await; assert_err_contains(r, "BAD signature"); Ok(()) From d53bc0453e9537bf9a8ab931e9a407c7f07dc0c9 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 10 Sep 2023 14:41:48 -0400 Subject: [PATCH 625/775] container: Make `DeployOpts` `#[non_exhaustive]` To allow future extensibility. --- lib/src/container/deploy.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/src/container/deploy.rs b/lib/src/container/deploy.rs index 49a829651..c36496534 100644 --- a/lib/src/container/deploy.rs +++ b/lib/src/container/deploy.rs @@ -20,6 +20,7 @@ pub const STATEROOT_DEFAULT: &str = "default"; /// Options configuring deployment. #[derive(Debug, Default)] +#[non_exhaustive] pub struct DeployOpts<'a> { /// Kernel arguments to use. pub kargs: Option<&'a [&'a str]>, From 258e04a4fbae9bd1947f64847e07a4770117759b Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 10 Sep 2023 14:41:48 -0400 Subject: [PATCH 626/775] container: Make `ExportOpts` `#[non_exhaustive]` To allow future extensibility. --- lib/src/container/encapsulate.rs | 1 + lib/tests/it/main.rs | 10 ++++------ 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index ac4dd08a3..74d749b35 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -380,6 +380,7 @@ async fn build_impl( /// Options controlling commit export into OCI #[derive(Clone, Debug, Default)] +#[non_exhaustive] pub struct ExportOpts { /// If true, do not perform gzip compression of the tar layers. pub skip_compression: bool, diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 67df2640f..39c38ffd8 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -453,12 +453,10 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { ObjectMetaSized::compute_sizes(fixture.srcrepo(), meta).context("Computing sizes") }) .transpose()?; - let opts = ExportOpts { - copy_meta_keys: vec!["buildsys.checksum".to_string()], - copy_meta_opt_keys: vec!["nosuchvalue".to_string()], - max_layers: std::num::NonZeroU32::new(PKGS_V0_LEN as u32), - ..Default::default() - }; + let mut opts = ExportOpts::default(); + opts.copy_meta_keys = vec!["buildsys.checksum".to_string()]; + opts.copy_meta_opt_keys = vec!["nosuchvalue".to_string()]; + opts.max_layers = std::num::NonZeroU32::new(PKGS_V0_LEN as u32); let digest = ostree_ext::container::encapsulate( fixture.srcrepo(), fixture.testref(), From b6f76aca7f897c5348e8824175e577073a3e4d35 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Mon, 11 Sep 2023 15:16:02 -0400 Subject: [PATCH 627/775] deploy: Use booted stateroot if available We really want the semantics here to more closely match `bootc switch` and `rpm-ostree rebase` etc. This is just the "offline capable" version. But if run from a booted system let's DTRT. --- lib/src/cli.rs | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 693368596..16c53d60a 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -12,6 +12,7 @@ use clap::{Parser, Subcommand}; use fn_error_context::context; use io_lifetimes::AsFd; use ostree::{gio, glib}; +use std::borrow::Cow; use std::collections::BTreeMap; use std::ffi::OsString; use std::io::BufWriter; @@ -304,8 +305,10 @@ pub(crate) enum ContainerImageOpts { sysroot: Option, /// Name for the state directory, also known as "osname". - #[clap(long, default_value = ostree_container::deploy::STATEROOT_DEFAULT)] - stateroot: String, + /// If the current system is booted via ostree, then this will default to the booted stateroot. + /// Otherwise, the default is `default`. + #[clap(long)] + stateroot: Option, /// Source image reference, e.g. ostree-remote-image:someremote:registry:quay.io/exampleos/exampleos@sha256:abcd... /// This conflicts with `--image`. @@ -1004,6 +1007,20 @@ async fn run_from_opt(opt: Opt) -> Result<()> { r }); + // If the user specified a stateroot, we always use that. + let stateroot = if let Some(stateroot) = stateroot.as_deref() { + Cow::Borrowed(stateroot) + } else { + // Otherwise, if we're booted via ostree, use the booted. + // If that doesn't hold, then use `default`. + let booted_stateroot = sysroot + .booted_deployment() + .map(|d| Cow::Owned(d.osname().to_string())); + booted_stateroot.unwrap_or_else(|| { + Cow::Borrowed(crate::container::deploy::STATEROOT_DEFAULT) + }) + }; + let imgref = if let Some(image) = image { let transport = transport.as_deref().unwrap_or("registry"); let transport = ostree_container::Transport::try_from(transport)?; From f1713aa894cfa206dc3a306c1e0fc0b845f810fb Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Sun, 10 Sep 2023 14:29:46 -0400 Subject: [PATCH 628/775] container: Move more metadata into `ExportOpts` This drops two extra arguments we added over time; in a few places before we had e.g. `None, None, None` being passed which just looks awkward. And we also threaded through all 3 in various places. The `ExportOpts` just needs to grow a lifetime argument, but that turned out to not be too bad when I realized we could use the elided lifetime `<'_>` in all methods that use it. --- lib/src/chunking.rs | 4 +-- lib/src/cli.rs | 3 +- lib/src/container/encapsulate.rs | 62 ++++++++++---------------------- lib/src/fixture.rs | 3 +- lib/tests/it/main.rs | 24 +++---------- 5 files changed, 28 insertions(+), 68 deletions(-) diff --git a/lib/src/chunking.rs b/lib/src/chunking.rs index 62d2a0ab7..d7af2f16d 100644 --- a/lib/src/chunking.rs +++ b/lib/src/chunking.rs @@ -269,7 +269,7 @@ impl Chunking { pub fn from_mapping( repo: &ostree::Repo, rev: &str, - meta: ObjectMetaSized, + meta: &ObjectMetaSized, max_layers: &Option, prior_build_metadata: Option<&oci_spec::image::ImageManifest>, ) -> Result { @@ -287,7 +287,7 @@ impl Chunking { #[allow(clippy::or_fun_call)] pub fn process_mapping( &mut self, - meta: ObjectMetaSized, + meta: &ObjectMetaSized, max_layers: &Option, prior_build_metadata: Option<&oci_spec::image::ImageManifest>, ) -> Result<()> { diff --git a/lib/src/cli.rs b/lib/src/cli.rs index 16c53d60a..57a004c8c 100644 --- a/lib/src/cli.rs +++ b/lib/src/cli.rs @@ -650,8 +650,7 @@ async fn container_export( skip_compression: compression_fast, // TODO rename this in the struct at the next semver break ..Default::default() }; - let pushed = - crate::container::encapsulate(repo, rev, &config, None, Some(opts), None, imgref).await?; + let pushed = crate::container::encapsulate(repo, rev, &config, Some(opts), imgref).await?; println!("{}", pushed); Ok(()) } diff --git a/lib/src/container/encapsulate.rs b/lib/src/container/encapsulate.rs index 74d749b35..c9de2875b 100644 --- a/lib/src/container/encapsulate.rs +++ b/lib/src/container/encapsulate.rs @@ -187,8 +187,6 @@ fn build_oci( tag: Option<&str>, config: &Config, opts: ExportOpts, - prior_build: Option<&oci_image::ImageManifest>, - contentmeta: Option, ) -> Result { if !ocidir_path.exists() { std::fs::create_dir(ocidir_path).context("Creating OCI dir")?; @@ -230,14 +228,16 @@ fn build_oci( let mut manifest = ocidir::new_empty_manifest().build().unwrap(); - let chunking = contentmeta + let chunking = opts + .contentmeta + .as_ref() .map(|meta| { crate::chunking::Chunking::from_mapping( repo, commit, meta, &opts.max_layers, - prior_build, + opts.prior_build, ) }) .transpose()?; @@ -316,14 +316,12 @@ pub(crate) fn parse_oci_path_and_tag(path: &str) -> (&str, Option<&str>) { } /// Helper for `build()` that avoids generics -#[instrument(skip(repo, contentmeta))] +#[instrument(skip(repo, config, opts))] async fn build_impl( repo: &ostree::Repo, ostree_ref: &str, config: &Config, - prior_build: Option<&oci_image::ImageManifest>, - opts: Option, - contentmeta: Option, + opts: Option>, dest: &ImageReference, ) -> Result { let mut opts = opts.unwrap_or_default(); @@ -332,16 +330,8 @@ async fn build_impl( } let digest = if dest.transport == Transport::OciDir { let (path, tag) = parse_oci_path_and_tag(dest.name.as_str()); - let _copied: ImageReference = build_oci( - repo, - ostree_ref, - Path::new(path), - tag, - config, - opts, - prior_build, - contentmeta, - )?; + let _copied: ImageReference = + build_oci(repo, ostree_ref, Path::new(path), tag, config, opts)?; None } else { let tempdir = tempfile::tempdir_in("/var/tmp")?; @@ -350,16 +340,7 @@ async fn build_impl( // Minor TODO: refactor to avoid clone let authfile = opts.authfile.clone(); - let tempoci = build_oci( - repo, - ostree_ref, - Path::new(tempdest), - None, - config, - opts, - prior_build, - contentmeta, - )?; + let tempoci = build_oci(repo, ostree_ref, Path::new(tempdest), None, config, opts)?; let digest = skopeo::copy(&tempoci, dest, authfile.as_deref()).await?; Some(digest) @@ -381,7 +362,7 @@ async fn build_impl( /// Options controlling commit export into OCI #[derive(Clone, Debug, Default)] #[non_exhaustive] -pub struct ExportOpts { +pub struct ExportOpts<'m, 'o> { /// If true, do not perform gzip compression of the tar layers. pub skip_compression: bool, /// A set of commit metadata keys to copy as image labels. @@ -395,9 +376,15 @@ pub struct ExportOpts { // TODO semver-break: remove this /// Use only the standard OCI version label pub no_legacy_version_label: bool, + /// A reference to the metadata for a previous build; used to optimize + /// the packing structure. + pub prior_build: Option<&'m oci_image::ImageManifest>, + /// Metadata mapping between objects and their owning component/package; + /// used to optimize packing. + pub contentmeta: Option<&'o ObjectMetaSized>, } -impl ExportOpts { +impl<'m, 'o> ExportOpts<'m, 'o> { /// Return the gzip compression level to use, as configured by the export options. fn compression(&self) -> Compression { if self.skip_compression { @@ -415,19 +402,8 @@ pub async fn encapsulate>( repo: &ostree::Repo, ostree_ref: S, config: &Config, - prior_build: Option<&oci_image::ImageManifest>, - opts: Option, - contentmeta: Option, + opts: Option>, dest: &ImageReference, ) -> Result { - build_impl( - repo, - ostree_ref.as_ref(), - config, - prior_build, - opts, - contentmeta, - dest, - ) - .await + build_impl(repo, ostree_ref.as_ref(), config, opts, dest).await } diff --git a/lib/src/fixture.rs b/lib/src/fixture.rs index a2035b77f..3322d04b2 100644 --- a/lib/src/fixture.rs +++ b/lib/src/fixture.rs @@ -679,15 +679,14 @@ impl Fixture { .context("Computing sizes")?; let opts = ExportOpts { max_layers: std::num::NonZeroU32::new(PKGS_V0_LEN as u32), + contentmeta: Some(&contentmeta), ..Default::default() }; let digest = crate::container::encapsulate( self.srcrepo(), self.testref(), &config, - None, Some(opts), - Some(contentmeta), &imgref, ) .await diff --git a/lib/tests/it/main.rs b/lib/tests/it/main.rs index 39c38ffd8..275f12ded 100644 --- a/lib/tests/it/main.rs +++ b/lib/tests/it/main.rs @@ -457,13 +457,12 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { opts.copy_meta_keys = vec!["buildsys.checksum".to_string()]; opts.copy_meta_opt_keys = vec!["nosuchvalue".to_string()]; opts.max_layers = std::num::NonZeroU32::new(PKGS_V0_LEN as u32); + opts.contentmeta = contentmeta.as_ref(); let digest = ostree_ext::container::encapsulate( fixture.srcrepo(), fixture.testref(), &config, - None, Some(opts), - contentmeta, &srcoci_imgref, ) .await @@ -515,8 +514,6 @@ async fn impl_test_container_import_export(chunked: bool) -> Result<()> { fixture.testref(), &config, None, - None, - None, &ociarchive_dest, ) .await @@ -626,8 +623,6 @@ async fn test_unencapsulate_unbootable() -> Result<()> { fixture.testref(), &config, None, - None, - None, &srcoci_imgref, ) .await @@ -962,8 +957,6 @@ async fn test_container_write_derive() -> Result<()> { ..Default::default() }, None, - None, - None, &ImageReference { transport: Transport::OciDir, name: base_oci_path.to_string(), @@ -1346,17 +1339,10 @@ async fn test_container_import_export_registry() -> Result<()> { cmd: Some(vec!["/bin/bash".to_string()]), ..Default::default() }; - let digest = ostree_ext::container::encapsulate( - fixture.srcrepo(), - testref, - &config, - None, - None, - None, - &src_imgref, - ) - .await - .context("exporting to registry")?; + let digest = + ostree_ext::container::encapsulate(fixture.srcrepo(), testref, &config, None, &src_imgref) + .await + .context("exporting to registry")?; let mut digested_imgref = src_imgref.clone(); digested_imgref.name = format!("{}@{}", src_imgref.name, digest); From 662aca0b8f0f075312f4c5cbd387b1d750845cf8 Mon Sep 17 00:00:00 2001 From: Colin Walters Date: Tue, 12 Sep 2023 20:53:38 -0400 Subject: [PATCH 629/775] container: Cache new manifest/config in prepare, add API to query Closes: https://github.com/ostreedev/ostree-rs-ext/issues/496 In https://github.com/coreos/rpm-ostree/pull/4486 we were working on fixing `rpm-ostree upgrade --check` with containers. However, what we really want here is to *persist* the updated manifest (and config) that we fetch. And if we do that, we might as well just make it part of the current `prepare()` API so it happens automatically. In this change, we do so via detached commit metadata. An important thing here is that the data is then automatically lifecycle bound to the merge commit - and the merge commit always changes when we fetch a new manifest. Then, add this "cached update" metadata to the existing structure which has image state so it can be conveniently queried *without* re-fetching. Hence a flow like this should work: - OS boots - OS updater does a background "check for updates" via calling `prepare()` - OS updater finds an update, and renders metadata to the user or orchestration system -