Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move RAFS filesystem builder into nydus-rafs crate #1141

Merged
merged 10 commits into from
Mar 8, 2023
16 changes: 9 additions & 7 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 6 additions & 6 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,11 @@ path = "src/lib.rs"

[dependencies]
anyhow = "1"
base64 = "0.13.0"
clap = { version = "4.0.18", features = ["derive", "cargo"] }
fuse-backend-rs = "0.10.1"
hex = "0.4.3"
hyper = "0.14.11"
hyperlocal = "0.8.0"
indexmap = "1"
lazy_static = "1"
libc = "0.2"
log = "0.4.8"
Expand All @@ -47,11 +45,8 @@ nix = "0.24.0"
rlimit = "0.9.0"
serde = { version = "1.0.110", features = ["serde_derive", "rc"] }
serde_json = "1.0.51"
sha2 = "0.10.2"
tar = "0.4.38"
tokio = { version = "1.24", features = ["macros"] }
vmm-sys-util = "0.10.0"
xattr = "0.2.3"

# Build static linked openssl library
openssl = { version = "0.10.45", features = ["vendored"] }
Expand All @@ -61,7 +56,7 @@ openssl = { version = "0.10.45", features = ["vendored"] }
nydus-api = { version = "0.2.2", path = "api", features = ["handler"] }
nydus-app = { version = "0.3.2", path = "app" }
nydus-error = { version = "0.2.3", path = "error" }
nydus-rafs = { version = "0.2.2", path = "rafs" }
nydus-rafs = { version = "0.2.2", path = "rafs", features = ["builder"] }
nydus-service = { version = "0.2.0", path = "service" }
nydus-storage = { version = "0.6.2", path = "storage" }
nydus-utils = { version = "0.4.1", path = "utils" }
Expand All @@ -71,6 +66,10 @@ vhost-user-backend = { version = "0.7.0", optional = true }
virtio-bindings = { version = "0.1", features = ["virtio-v5_0_0"], optional = true }
virtio-queue = { version = "0.6.0", optional = true }
vm-memory = { version = "0.9.0", features = ["backend-mmap"], optional = true }
vmm-sys-util = { version = "0.10.0", optional = true }

[dev-dependencies]
xattr = "0.2.3"

[features]
default = [
Expand All @@ -88,6 +87,7 @@ virtiofs = [
"virtio-bindings",
"virtio-queue",
"vm-memory",
"vmm-sys-util",
]

backend-http-proxy = ["nydus-storage/backend-http-proxy"]
Expand Down
9 changes: 9 additions & 0 deletions rafs/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,13 @@ spmc = "0.3.0"
vm-memory = "0.9"
fuse-backend-rs = "0.10"

hex = { version = "0.4.3", optional = true }
indexmap = { version = "1", optional = true }
sha2 = { version = "0.10.2", optional = true }
tar = { version = "0.4.38", optional = true }
vmm-sys-util = { version = "0.10.0", optional = true }
xattr = { version = "0.2.3", optional = true }

nydus-api = { version = "0.2", path = "../api" }
nydus-error = { version = "0.2", path = "../error" }
nydus-storage = { version = "0.6", path = "../storage", features = ["backend-localfs"] }
Expand All @@ -40,6 +47,8 @@ fusedev = ["fuse-backend-rs/fusedev"]
virtio-fs = ["fuse-backend-rs/virtiofs", "vm-memory/backend-mmap"]
vhost-user-fs = ["fuse-backend-rs/vhost-user-fs"]

builder = ["base64", "hex", "indexmap", "sha2", "tar", "vmm-sys-util", "xattr"]

[package.metadata.docs.rs]
all-features = true
targets = ["x86_64-unknown-linux-gnu", "aarch64-unknown-linux-gnu", "aarch64-apple-darwin"]
Original file line number Diff line number Diff line change
Expand Up @@ -5,30 +5,28 @@
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::io::Write;
use std::ops::Deref;
use std::path::PathBuf;
use std::sync::Arc;

use anyhow::Result;
use anyhow::{bail, ensure, Result};
use serde::{Deserialize, Serialize};
use sha2::Digest;

use nydus_rafs::metadata::chunk::ChunkWrapper;
use nydus_rafs::metadata::{RafsSuper, RafsVersion};
use nydus_storage::backend::BlobBackend;
use nydus_storage::utils::alloc_buf;
use nydus_utils::digest::RafsDigest;
use nydus_utils::{digest, try_round_up_4k};

use crate::core::blob::Blob;
use crate::core::bootstrap::Bootstrap;
use crate::core::chunk_dict::{ChunkDict, HashChunkDict};
use crate::core::context::{
use super::core::blob::Blob;
use super::core::bootstrap::Bootstrap;
use super::core::node::Node;
use crate::builder::{
ArtifactStorage, ArtifactWriter, BlobContext, BlobManager, BootstrapManager, BuildContext,
BuildOutput, ConversionType,
BuildOutput, ChunkDict, ConversionType, Features, HashChunkDict, Tree, WhiteoutSpec,
};
use crate::core::feature::Features;
use crate::core::node::{Node, WhiteoutSpec};
use crate::core::tree::Tree;
use crate::metadata::chunk::ChunkWrapper;
use crate::metadata::{RafsSuper, RafsVersion};

const DEFAULT_COMPACT_BLOB_SIZE: usize = 10 * 1024 * 1024;
const DEFAULT_MAX_COMPACT_SIZE: usize = 100 * 1024 * 1024;
Expand Down Expand Up @@ -79,6 +77,7 @@ impl ChunkKey {
match c {
ChunkWrapper::V5(_) => Self::Digest(*c.id()),
ChunkWrapper::V6(_) => Self::Offset(c.blob_index(), c.compressed_offset()),
ChunkWrapper::Ref(_) => unimplemented!("unsupport ChunkWrapper::Ref(c)"),
}
}
}
Expand Down Expand Up @@ -303,9 +302,13 @@ impl BlobCompactor {
if let Some(c) =
chunk_dict.get_chunk(chunk.inner.id(), chunk.inner.uncompressed_size())
{
apply_chunk_change(c, &mut chunk.inner)?;
let mut chunk_inner = chunk.inner.deref().clone();
apply_chunk_change(c, &mut chunk_inner)?;
chunk.inner = Arc::new(chunk_inner);
} else if let Some(c) = all_chunks.get_chunk(&chunk_key) {
apply_chunk_change(c, &mut chunk.inner)?;
let mut chunk_inner = chunk.inner.deref().clone();
apply_chunk_change(c, &mut chunk_inner)?;
chunk.inner = Arc::new(chunk_inner);
} else {
all_chunks.add_chunk(&chunk.inner);
// add to per blob ChunkSet
Expand Down Expand Up @@ -356,9 +359,7 @@ impl BlobCompactor {
self.nodes[*node_idx].chunks[*chunk_idx].inner.blob_index() == from,
"unexpected blob_index of chunk"
);
self.nodes[*node_idx].chunks[*chunk_idx]
.inner
.set_blob_index(to);
self.nodes[*node_idx].chunks[*chunk_idx].set_blob_index(to);
}
}
Ok(())
Expand All @@ -367,7 +368,10 @@ impl BlobCompactor {
fn apply_chunk_change(&mut self, c: &(ChunkWrapper, ChunkWrapper)) -> Result<()> {
if let Some(idx_list) = self.c2nodes.get(&ChunkKey::from(&c.0)) {
for (node_idx, chunk_idx) in idx_list.iter() {
apply_chunk_change(&c.1, &mut self.nodes[*node_idx].chunks[*chunk_idx].inner)?;
let chunk = &mut self.nodes[*node_idx].chunks[*chunk_idx];
let mut chunk_inner = chunk.inner.deref().clone();
apply_chunk_change(&c.1, &mut chunk_inner)?;
chunk.inner = Arc::new(chunk_inner);
}
}
Ok(())
Expand Down Expand Up @@ -510,7 +514,7 @@ impl BlobCompactor {
if blob_idx != idx as u32 {
self.apply_blob_move(idx as u32, blob_idx)?;
}
self.new_blob_mgr.add(ctx);
self.new_blob_mgr.add_blob(ctx);
}
State::Delete => {
info!("delete blob {}", ori_blob_ids[idx]);
Expand Down Expand Up @@ -539,7 +543,7 @@ impl BlobCompactor {
self.apply_chunk_change(change_chunk)?;
}
info!("rebuild blob {} successfully", blob_ctx.blob_id);
self.new_blob_mgr.add(blob_ctx);
self.new_blob_mgr.add_blob(blob_ctx);
}
}
}
Expand Down Expand Up @@ -589,9 +593,9 @@ impl BlobCompactor {
return Ok(None);
}
let mut _dict = HashChunkDict::new(build_ctx.digester);
let mut tree = Tree::from_bootstrap(&rs, &mut _dict)?;
let tree = Tree::from_bootstrap(&rs, &mut _dict)?;
let mut bootstrap = Bootstrap::new()?;
bootstrap.build(&mut build_ctx, &mut bootstrap_ctx, &mut tree)?;
bootstrap.build(&mut build_ctx, &mut bootstrap_ctx, tree)?;
let mut nodes = Vec::new();
// move out nodes
std::mem::swap(&mut bootstrap_ctx.nodes, &mut nodes);
Expand All @@ -604,7 +608,7 @@ impl BlobCompactor {
)?;
compactor.compact(cfg)?;
compactor.dump_new_blobs(&build_ctx, &cfg.blobs_dir, build_ctx.aligned_chunk)?;
if compactor.new_blob_mgr.len() == 0 {
if compactor.new_blob_mgr.is_empty() {
info!("blobs of source bootstrap have already been optimized");
return Ok(None);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,23 +7,25 @@ use std::io::Write;
use std::slice;

use anyhow::{Context, Result};
use nydus_rafs::metadata::RAFS_MAX_CHUNK_SIZE;
use nydus_storage::device::BlobFeatures;
use nydus_storage::meta::{toc, BlobMetaChunkArray};
use nydus_utils::compress;
use nydus_utils::digest::{self, DigestHasher, RafsDigest};
use sha2::digest::Digest;

use super::context::{ArtifactWriter, BlobContext, BlobManager, BuildContext, ConversionType};
use super::feature::Feature;
use super::layout::BlobLayout;
use super::node::Node;
use crate::builder::{
ArtifactWriter, BlobContext, BlobManager, BuildContext, ConversionType, Feature,
};
use crate::metadata::RAFS_MAX_CHUNK_SIZE;

pub struct Blob {}
/// Generator for RAFS data blob.
pub(crate) struct Blob {}

impl Blob {
/// Dump blob file and generate chunks
pub fn dump(
pub(crate) fn dump(
ctx: &BuildContext,
nodes: &mut [Node],
blob_mgr: &mut BlobManager,
Expand Down Expand Up @@ -201,7 +203,7 @@ impl Blob {
)?;
}

// Generate ToC entry for `blob.meta`.
// Generate ToC entry for `blob.meta` and write chunk digest array.
if ctx.features.is_enabled(Feature::BlobToc) {
let mut hasher = RafsDigest::hasher(digest::Algorithm::Sha256);
let ci_data = if ctx.blob_features.contains(BlobFeatures::ZRAN) {
Expand Down
Loading