use core::{mem::size_of, ops::Deref}; use bytemuck::{Pod, Zeroable}; use derivative::Derivative; use num_enum::{FromPrimitive, IntoPrimitive, TryFromPrimitive}; use scroll::{ctx::TryFromCtx, Pread, SizeWith}; use zerocopy::{byteorder::LE, AsBytes, FromBytes, U16, U32, U64}; use crate::{Error, Result}; #[non_exhaustive] #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, FromPrimitive, IntoPrimitive)] #[repr(u64)] pub enum KnownObjectId { RootTree = 1, ExtentTree, ChunkTree, DevTree, FsTree, RootTreeDir, CsumTree, QuotaTree, UuidTree, FreeSpaceTree, __FirstFreeId = 256, __LastFreeId = u64::MAX - 256, DataRelocTree = u64::MAX - 9, TreeReloc = u64::MAX - 8, TreeLog = u64::MAX - 7, Orphan = u64::MAX - 5, #[num_enum(catch_all)] Custom(u64), } #[repr(C, packed)] #[derive(Clone, Copy, PartialEq, Eq, FromBytes, AsBytes)] pub struct ObjectId { inner: U64, } impl core::fmt::Debug for ObjectId { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("ObjectTypeWrapper") .field("inner", &self.as_id()) .finish() } } impl ObjectId { pub fn from_id(id: KnownObjectId) -> Self { Self { inner: U64::::new(id.into()), } } pub fn as_id(self) -> KnownObjectId { KnownObjectId::from_primitive(self.inner.get()) } } unsafe impl Pod for ObjectId {} unsafe impl Zeroable for ObjectId {} #[repr(u8)] #[non_exhaustive] #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, FromPrimitive, IntoPrimitive)] //#[rustc_nonnull_optimization_guaranteed] pub enum ObjectType { INodeItem = 0x01, InodeRef = 0x0C, InodeExtref = 0x0D, XattrItem = 0x18, OrphanInode = 0x30, DirItem = 0x54, DirIndex = 0x60, ExtentData = 0x6C, ExtentCsum = 0x80, RootItem = 0x84, TypeRootBackref = 0x90, RootRef = 0x9C, ExtentItem = 0xA8, MetadataItem = 0xA9, TreeBlockRef = 0xB0, ExtentDataRef = 0xB2, ExtentRefV0 = 0xB4, SharedBlockRef = 0xB6, SharedDataRef = 0xB8, BlockGroupItem = 0xC0, FreeSpaceInfo = 0xC6, FreeSpaceExtent = 0xC7, FreeSpaceBitmap = 0xC8, DevExtent = 0xCC, DevItem = 0xD8, ChunkItem = 0xE4, TempItem = 0xF8, DevStats = 0xF9, SubvolUuid = 0xFB, SubvolRecUuid = 0xFC, #[num_enum(catch_all)] Invalid(u8), } #[repr(C, packed)] #[derive(Clone, Copy, PartialEq, Eq, Pread, SizeWith, FromBytes, AsBytes)] pub struct ObjectTypeWrapper { inner: u8, } impl core::fmt::Debug for ObjectTypeWrapper { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "{:?}", self.as_type()) } } impl ObjectTypeWrapper { pub fn from_ty(ty: ObjectType) -> Self { Self { inner: ty.into() } } pub fn as_type(self) -> ObjectType { ObjectType::from_primitive(self.inner) } } unsafe impl Pod for ObjectTypeWrapper {} unsafe impl Zeroable for ObjectTypeWrapper {} #[repr(transparent)] #[derive(Clone, Copy, PartialEq, Eq, FromBytes, AsBytes)] pub struct Uuid(uuid::Bytes); impl core::fmt::Debug for Uuid { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { uuid::Uuid::from_bytes_ref(&self.0).fmt(f) } } impl Deref for Uuid { type Target = uuid::Uuid; fn deref(&self) -> &Self::Target { uuid::Uuid::from_bytes_ref(&self.0) } } unsafe impl Pod for Uuid {} unsafe impl Zeroable for Uuid {} #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Eq, FromBytes, AsBytes)] pub struct Key { pub id: ObjectId, pub ty: ObjectTypeWrapper, pub offset: U64, } impl Key { pub fn new(id: KnownObjectId, ty: ObjectType, offset: u64) -> Self { Self { id: ObjectId::from_id(id), ty: ObjectTypeWrapper::from_ty(ty), offset: U64::new(offset), } } pub fn ty(&self) -> ObjectType { self.ty.as_type() } pub fn id(&self) -> KnownObjectId { self.id.as_id() } } impl PartialEq for Key { fn eq(&self, other: &Self) -> bool { self.id() == other.id() && self.ty() == other.ty() && self.offset == other.offset } } impl Ord for Key { fn cmp(&self, other: &Self) -> core::cmp::Ordering { self.partial_cmp(other).unwrap() } } impl PartialOrd for Key { fn partial_cmp(&self, other: &Self) -> Option { match self.id().partial_cmp(&other.id()) { Some(core::cmp::Ordering::Equal) => {} ord => return ord, } match self.ty().partial_cmp(&other.ty()) { Some(core::cmp::Ordering::Equal) => {} ord => return ord, } self.offset.get().partial_cmp(&other.offset.get()) } } macro_rules! impl_try_from_ctx { ($($ty:ty),*) => { $(impl<'a> TryFromCtx<'a> for $ty { type Error = scroll::Error; fn try_from_ctx( from: &'a [u8], _: (), ) -> core::result::Result<(Self, usize), Self::Error> { Self::read_from(&from[..size_of::()]) .map(|v| (v, size_of::())) .ok_or(scroll::Error::TooBig { size: size_of::(), len: from.len(), }) } })* }; } macro_rules! impl_parse_try_from_ctx { ($($ty:ty),*) => { $(impl $ty { pub fn parse(bytes: &[u8]) -> Result { Ok(bytes.pread(0)?) } })* }; } impl_parse_try_from_ctx!(Chunk, Header, Key, RootItem); impl_try_from_ctx!(Key, Chunk, Header, Superblock, RootItem); const MAX_LABEL_SIZE: usize = 0x100; const SYS_CHUNK_ARRAY_SIZE: usize = 0x800; const BTRFS_NUM_BACKUP_ROOTS: usize = 4; fn format_u8str>(s: &T, f: &mut core::fmt::Formatter) -> core::fmt::Result { let bytes = s.as_ref(); let end = bytes .iter() .position(|&b| b == 0) .map(|i| i + 1) .unwrap_or(bytes.len()); core::ffi::CStr::from_bytes_with_nul(&bytes[..end]) .map(|s| write!(f, "{:?}", s)) .map_err(|_| core::fmt::Error)? } #[repr(C, packed(1))] #[derive(Derivative, Clone, Copy, FromBytes, AsBytes)] #[derivative(Debug)] pub struct INodeItem { generation: U64, transid: U64, st_size: U64, st_blocks: U64, block_group: U64, st_nlink: U32, st_uid: U32, st_gid: U32, st_mode: U32, st_rdev: U64, flags: U64, sequence: U64, #[derivative(Debug = "ignore")] reserved: [u8; 32], st_atime: Timespec, st_ctime: Timespec, st_mtime: Timespec, otime: Timespec, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct ChunkItem { size: u64, root_id: u64, stripe_length: u64, ty: u64, opt_io_alignment: u32, opt_io_width: u32, sector_size: u32, num_stripes: u16, sub_stripes: u16, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct ChunkItemStripe { dev_id: u64, offset: u64, dev_uuid: Uuid, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct ExtentData { generation: u64, decoded_size: u64, compression: u8, encryption: u8, encoding: u16, ty: u8, data: [u8; 1], } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct ExtentData2 { address: u64, size: u64, offset: u64, num_bytes: u64, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct INodeRef { index: u64, n: u16, name: [u8; 1], } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct INodeExtRef { dir: u64, index: u64, n: u16, name: [u8; 1], } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct ExtentItem { ref_count: u64, generation: u64, flags: u64, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy)] pub struct ExtentItem2 { first_item: Key, level: u8, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct ExtentItemV0 { ref_count: u32, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy)] pub struct ExtentItemTree { extent_item: ExtentItem, first_item: Key, level: u8, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct TreeBlockRef { offset: u64, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct ExtentDataRef { root: u64, objid: u64, offset: u64, count: u32, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct BlockGroupItem { used: u64, chunk_tree: u64, flags: u64, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct ExtentRefV0 { root: u64, gen: u64, objid: u64, count: u32, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct SharedBlockRef { offset: u64, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct SharedDataRef { offset: u64, count: u32, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct FreeSpaceEntry { offset: u64, size: u64, ty: u8, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy)] pub struct FreeSpaceItem { key: Key, generation: u64, num_entries: u64, num_bitmaps: u64, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct RootRef { dir: u64, index: u64, n: u16, name: [u8; 1], } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct DevExtent { chunktree: u64, objid: u64, address: u64, length: u64, chunktree_uuid: Uuid, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct BalanceArgs {} #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct BalanceItem {} #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct FreeSpaceInfo { count: u32, flags: u32, } #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct SendHeader {} #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct SendCommand {} #[repr(C, packed(1))] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct SendTlv {} const CSUM_SIZE: usize = 32; const LABEL_SIZE: usize = 256; const SYSTEM_CHUNK_ARRAY_SIZE: usize = 2048; pub const FS_TREE_OBJECTID: u64 = 5; pub const INODE_REF_KEY: u8 = 12; pub const DIR_ITEM_KEY: u8 = 84; pub const ROOT_ITEM_KEY: u8 = 132; pub const CHUNK_ITEM_KEY: u8 = 228; pub const FT_REG_FILE: u8 = 1; #[repr(C, packed)] #[derive(Debug, Clone, Copy, FromBytes, AsBytes)] pub struct DevItem { /// the internal btrfs device id pub devid: U64, /// size of the device pub total_bytes: U64, /// bytes used pub bytes_used: U64, /// optimal io alignment for this device pub io_align: U32, /// optimal io width for this device pub io_width: U32, /// minimal io size for this device pub sector_size: U32, /// type and info about this device pub ty: U64, /// expected generation for this device pub generation: U64, /// starting byte of this partition on the device, to allow for stripe alignment in the future pub start_offset: U64, /// grouping information for allocation decisions pub dev_group: U32, /// seek speed 0-100 where 100 is fastest pub seek_speed: u8, /// bandwidth 0-100 where 100 is fastest pub bandwidth: u8, /// btrfs generated uuid for this device pub uuid: Uuid, /// uuid of FS who owns this device pub fsid: Uuid, } #[repr(C, packed)] #[derive(Debug, Clone, Copy, FromBytes, AsBytes)] pub struct RootBackup { pub tree_root: U64, pub tree_root_gen: U64, pub chunk_root: U64, pub chunk_root_gen: U64, pub extent_root: U64, pub extent_root_gen: U64, pub fs_root: U64, pub fs_root_gen: U64, pub dev_root: U64, pub dev_root_gen: U64, pub csum_root: U64, pub csum_root_gen: U64, pub total_bytes: U64, pub bytes_used: U64, pub num_devices: U64, /// future pub unused_64: [u64; 4], pub tree_root_level: u8, pub chunk_root_level: u8, pub extent_root_level: u8, pub fs_root_level: u8, pub dev_root_level: u8, pub csum_root_level: u8, /// future and to align pub unused_8: [u8; 10], } #[repr(C, packed)] #[derive(Derivative, Clone, Copy, FromBytes, AsBytes)] #[derivative(Debug)] pub struct Superblock { pub csum: [u8; 32], pub fsid: Uuid, /// Physical address of this block pub bytenr: U64, pub flags: U64, pub magic: [u8; 0x8], pub generation: U64, /// Logical address of the root tree root pub root: U64, /// Logical address of the chunk tree root pub chunk_root: U64, /// Logical address of the log tree root pub log_root: U64, pub log_root_transid: U64, pub total_bytes: U64, pub bytes_used: U64, pub root_dir_objectid: U64, pub num_devices: U64, pub sector_size: U32, pub node_size: U32, /// Unused and must be equal to `nodesize` pub leafsize: U32, pub stripesize: U32, pub sys_chunk_array_size: U32, pub chunk_root_generation: U64, pub compat_flags: U64, pub compat_ro_flags: U64, pub incompat_flags: U64, pub csum_type: U16, pub root_level: u8, pub chunk_root_level: u8, pub log_root_level: u8, pub dev_item: DevItem, #[derivative(Debug(format_with = "format_u8str"))] pub label: [u8; 0x100], pub cache_generation: U64, pub uuid_tree_generation: U64, pub metadata_uuid: Uuid, /// Future expansion #[derivative(Debug = "ignore")] _reserved: [u64; 28], #[derivative(Debug = "ignore")] pub sys_chunk_array: [u8; 0x800], #[derivative(Debug = "ignore")] pub root_backups: [RootBackup; 4], #[derivative(Debug = "ignore")] _reserved2: [u8; 565], } #[repr(u16)] #[derive(Derivative, Clone, Copy, TryFromPrimitive)] #[derivative(Debug)] pub enum ChecksumType { Crc32 = 0, XxHash64, Sha256, Blake2B, } pub fn calculate_crc32c(bytes: &[u8]) -> [u8; 32] { let crc = crc::Crc::::new(&crc::CRC_32_ISCSI); let mut csum = [0u8; 32]; csum[..4].copy_from_slice(crc.checksum(bytes).as_bytes()); csum } impl Superblock { pub const SUPERBLOCK_BASE_OFFSET: usize = 0x10000; pub const SUPERBLOCK_OFFSETS: [usize; 4] = [ Self::SUPERBLOCK_BASE_OFFSET, 0x4000000, 0x4000000000, 0x4000000000000, ]; pub const MAGIC: [u8; 8] = *b"_BHRfS_M"; pub fn parse(bytes: &[u8]) -> Result { let superblock = Self::read_from(bytes).ok_or(Error::ReadFailed)?; if !superblock.verify_magic() { return Err(Error::InvalidMagic); } if !superblock.verify_checksum() { return Err(Error::InvalidChecksum { expected: superblock.csum, actual: superblock.calculate_checksum(), }); } Ok(superblock) } pub fn calculate_checksum(&self) -> [u8; 32] { match self.checksum_type().expect("csum type invalid") { ChecksumType::Crc32 => calculate_crc32c(&self.as_bytes()[0x20..]), ChecksumType::XxHash64 => todo!(), ChecksumType::Sha256 => todo!(), ChecksumType::Blake2B => todo!(), } } pub fn verify_checksum(&self) -> bool { self.calculate_checksum() == self.csum } pub fn checksum_type(&self) -> Option { ChecksumType::try_from_primitive(self.csum_type.get()).ok() } pub fn verify_magic(&self) -> bool { self.magic == Self::MAGIC } } #[repr(C, packed)] #[derive(Debug, Clone, Copy, FromBytes, AsBytes)] pub struct Stripe { pub devid: U64, pub offset: U64, pub dev_uuid: Uuid, } impl Stripe { pub fn parse(bytes: &[u8]) -> Result { Self::read_from(bytes).ok_or(Error::ReadFailed) } } #[repr(C, packed)] #[derive(Debug, Clone, Copy, FromBytes, AsBytes)] pub struct Chunk { /// size of this chunk in bytes pub length: U64, /// objectid of the root referencing this chunk pub owner: U64, pub stripe_len: U64, pub ty: U64, /// optimal io alignment for this chunk pub io_align: U32, /// optimal io width for this chunk pub io_width: U32, /// minimal io size for this chunk pub sector_size: U32, /// 2^16 stripes is quite a lot, a second limit is the size of a single item in the btree pub num_stripes: U16, /// sub stripes only matter for raid10 pub sub_stripes: U16, pub stripe: Stripe, // additional stripes go here } #[repr(C, packed)] #[derive(Debug, Clone, Copy, FromBytes, AsBytes)] pub struct Timespec { pub sec: U64, pub nsec: U32, } #[repr(C, packed)] #[derive(Derivative, Clone, Copy, FromBytes, AsBytes)] #[derivative(Debug)] pub struct InodeItem { /// nfs style generation number pub generation: U64, /// transid that last touched this inode pub transid: U64, pub size: U64, pub nbytes: U64, pub block_group: U64, pub nlink: U32, pub uid: U32, pub gid: U32, pub mode: U32, pub rdev: U64, pub flags: U64, /// modification sequence number for NFS pub sequence: U64, #[derivative(Debug = "ignore")] pub reserved: [u64; 4], pub atime: Timespec, pub ctime: Timespec, pub mtime: Timespec, pub otime: Timespec, } #[repr(C, packed)] #[derive(Derivative, Clone, Copy, FromBytes, AsBytes)] #[derivative(Debug)] pub struct RootItem { pub inode: InodeItem, pub generation: U64, pub root_dirid: U64, pub bytenr: U64, pub byte_limit: U64, pub bytes_used: U64, pub last_snapshot: U64, pub flags: U64, pub refs: U32, pub drop_progress: Key, pub drop_level: u8, pub level: u8, pub generation_v2: U64, pub uuid: Uuid, pub parent_uuid: Uuid, pub received_uuid: Uuid, /// updated when an inode changes pub ctransid: U64, /// trans when created pub otransid: U64, /// trans when sent. non-zero for received subvol pub stransid: U64, /// trans when received. non-zero for received subvol pub rtransid: U64, pub ctime: Timespec, pub otime: Timespec, pub stime: Timespec, pub rtime: Timespec, #[derivative(Debug = "ignore")] pub reserved: [u64; 8], } #[repr(u8)] #[non_exhaustive] #[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive)] pub enum DirItemType { Unknown, RegFile, Dir, ChrDev, BlkDev, Fifo, Sock, Symlink, Xattr, #[num_enum(catch_all)] Invalid(u8), } #[repr(C, packed)] #[derive(Debug, Clone, Copy, FromBytes, AsBytes)] pub struct DirItem { pub location: Key, pub transid: U64, pub data_len: U16, pub name_len: U16, ty: u8, } impl DirItem { pub fn ty(&self) -> DirItemType { DirItemType::from_primitive(self.ty) } pub fn parse_single(bytes: &[u8]) -> Result<(DirItemEntry)> { let offset = &mut 0; Self::parse_single_inner(bytes, offset) } fn parse_single_inner(bytes: &[u8], offset: &mut usize) -> Result { let dir_item = DirItem::read_from(&bytes[*offset..*offset + size_of::()]) .ok_or(Error::ReadFailed)?; *offset += size_of::(); let name_len = dir_item.name_len.get() as usize; let name = &bytes[*offset..*offset + name_len]; *offset += name_len; Ok(DirItemEntry::new(dir_item, name.to_vec())) } pub fn parse(bytes: &[u8]) -> Result> { let offset = &mut 0; let entries = core::iter::from_fn(|| { if *offset + size_of::() < bytes.len() { Some(Self::parse_single_inner(&bytes[*offset..], offset)) } else { None } }) .collect::>>()?; Ok(entries) } } #[derive(Clone)] pub struct DirItemEntry { pub dir_item: DirItem, pub name: Vec, } impl DirItemEntry { pub fn new(dir_item: DirItem, name: Vec) -> Self { Self { dir_item, name } } pub fn name_as_str(&self) -> core::result::Result<&str, core::str::Utf8Error> { core::str::from_utf8(&self.name) } pub fn name_as_string_lossy(&self) -> alloc::borrow::Cow { alloc::string::String::from_utf8_lossy(&self.name) } } impl core::fmt::Debug for DirItemEntry { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("DirItemEntry") .field("dir_item", &self.dir_item) .field("name", &self.name_as_string_lossy()) .finish() } } #[repr(C, packed)] #[derive(Debug, Clone, Copy, Pod, Zeroable)] pub struct InodeRef { pub index: u64, pub name_len: u16, } #[repr(C, packed)] #[derive(Debug, Clone, PartialEq, Eq, Copy, FromBytes, AsBytes)] pub struct Header { pub csum: [u8; 32], pub fsid: Uuid, /// Which block this node is supposed to live in pub bytenr: U64, pub flags: U64, pub chunk_tree_uuid: Uuid, pub generation: U64, pub owner: U64, pub nritems: U32, pub level: u8, } #[repr(C, packed)] #[derive(Debug, Clone, Copy, FromBytes, AsBytes)] /// A `BtrfsLeaf` is full of `BtrfsItem`s. `offset` and `size` (relative to start of data area) /// tell us where to find the item in the leaf. pub struct Item { pub key: Key, pub offset: U32, pub size: U32, } #[repr(C, packed)] #[derive(Debug, Clone, Copy, FromBytes, AsBytes)] /// All non-leaf blocks are nodes and they hold only keys are pointers to other blocks pub struct KeyPtr { pub key: Key, pub blockptr: U64, pub generation: U64, } use alloc::vec::Vec; #[derive(Debug)] pub enum TreeItem { Chunk(Chunk), Root(RootItem), DirItem(Vec), DirIndex(DirItemEntry), Unimplemented, } impl From for TreeItem { fn from(value: Chunk) -> Self { Self::Chunk(value) } } impl From for TreeItem { fn from(value: RootItem) -> Self { Self::Root(value) } } impl From> for TreeItem { fn from(value: Vec) -> Self { Self::DirItem(value) } } impl From for TreeItem { fn from(value: DirItemEntry) -> Self { Self::DirIndex(value) } } impl TreeItem { pub fn parse(item: &Item, bytes: &[u8]) -> Result { Ok(match item.key.ty() { ObjectType::RootItem => RootItem::parse(bytes)?.into(), ObjectType::ChunkItem => Chunk::parse(bytes)?.into(), ObjectType::DirItem => DirItem::parse(bytes)?.into(), ObjectType::DirIndex => DirItem::parse_single(bytes)?.into(), _ => TreeItem::Unimplemented, }) } } impl TreeItem { pub fn as_chunk(&self) -> Option<&Chunk> { if let Self::Chunk(v) = self { Some(v) } else { None } } pub fn as_root(&self) -> Option<&RootItem> { if let Self::Root(v) = self { Some(v) } else { None } } } #[derive(Debug, Clone)] pub struct BTreeLeafNode { pub header: Header, /// actual leaf data pub items: Vec, } impl BTreeLeafNode { pub fn parse(header: Header, bytes: &[u8]) -> Result { log::debug!("leaf:"); let offset = &mut 0; let items = core::iter::from_fn(|| { if *offset as usize + size_of::() < bytes.len() { let item = Item::read_from(&bytes[*offset..*offset + size_of::()]); *offset += size_of::(); if let Some(item) = item.as_ref() { log::debug!( "\titem type {:?}: ty offset: {}", item.key.ty.as_type(), item.key.offset.get() ); log::debug!("\t{item:?}"); } item } else { None } }) .take(header.nritems.get() as usize) .collect::>(); Ok(Self { header, items }) } } #[derive(Debug, Clone)] pub struct BTreeInternalNode { pub header: Header, pub children: Vec, } impl BTreeInternalNode { pub fn parse(header: Header, bytes: &[u8]) -> Result { log::debug!("internal lvl: {}", header.level); let offset = &mut 0; let size = size_of::(); let children = core::iter::from_fn(|| { if *offset as usize + size < bytes.len() { let item = KeyPtr::read_from(&bytes[*offset..*offset + size]); *offset += size; if let Some(item) = item.as_ref() { log::debug!( "\tchild gen: {} offset: {}", item.generation.get(), item.key.offset.get() ); } item } else { None } }) .take(header.nritems.get() as usize) .collect::>(); Ok(Self { header, children }) } } #[derive(Debug, Clone)] pub enum BTreeNode { Internal(BTreeInternalNode), Leaf(BTreeLeafNode), } impl BTreeNode { pub fn parse(bytes: &[u8]) -> Result { let offset = &mut 0; let header = bytes.gread::
(offset)?; if header.level == 0 { Ok(Self::Leaf(BTreeLeafNode::parse(header, &bytes[*offset..])?)) } else { Ok(Self::Internal(BTreeInternalNode::parse( header, &bytes[*offset..], )?)) } } }