initial commit

This commit is contained in:
Janis 2022-11-23 01:34:16 +01:00
commit 8a754c135e
4 changed files with 789 additions and 0 deletions

2
.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
/target
/Cargo.lock

17
Cargo.toml Normal file
View file

@ -0,0 +1,17 @@
[package]
name = "winreg"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
default = ["alloc", "nightly"]
nightly = []
alloc = []
[dependencies]
log = "0.4"
bytemuck = {version = "1.12.3", features = ["derive", "min_const_generics"]}
bitfield = "0.14.0"
bitflags = "1.3.2"

2
rust-toolchain.toml Normal file
View file

@ -0,0 +1,2 @@
[toolchain]
channel = "nightly"

768
src/lib.rs Normal file
View file

@ -0,0 +1,768 @@
#![cfg_attr(feature = "nightly", feature(error_in_core))]
#![no_std]
pub mod error {
use core::fmt::Display;
#[derive(Debug, PartialEq, Eq)]
pub enum BaseBlockVerifyError {
BadSignature,
IncompatibleMajorVersion,
IncompatibleMinorVersion,
BadFileType,
BadFormat,
Cluster,
}
impl Display for BaseBlockVerifyError {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.write_str(match self {
BaseBlockVerifyError::BadSignature => "Bad Signature",
BaseBlockVerifyError::IncompatibleMajorVersion => "Incompatible Major Version",
BaseBlockVerifyError::IncompatibleMinorVersion => "Incompatible Minor Version",
BaseBlockVerifyError::BadFileType => "Bad File Type (self.kind)",
BaseBlockVerifyError::BadFormat => "Bad Format",
BaseBlockVerifyError::Cluster => "Not the correct Cluster",
})
}
}
#[cfg(feature = "nightly")]
impl core::error::Error for BaseBlockVerifyError {}
}
pub mod ptr {
use bytemuck::{Pod, Zeroable};
#[repr(transparent)]
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)]
pub struct ZstPtr;
unsafe impl Zeroable for ZstPtr {
fn zeroed() -> Self {
Self
}
}
unsafe impl Pod for ZstPtr {}
impl ZstPtr {
pub fn ptr(&self) -> *const () {
core::ptr::addr_of!(*self).cast()
}
pub fn ptr_mut(&mut self) -> *mut () {
core::ptr::addr_of_mut!(*self).cast()
}
}
#[test]
fn zst_ptr() {
use core::mem::size_of;
assert_eq!(size_of::<ZstPtr>(), 0);
let ptr = ZstPtr;
let ref r = ptr;
assert_eq!(ptr.ptr(), r as *const ZstPtr as *const ());
}
}
pub mod cell_data {
use core::marker::PhantomData;
use bitfield::{Bit, BitRange};
use bitflags::bitflags;
use bytemuck::{Pod, Zeroable};
use crate::{ptr::ZstPtr, Offset};
#[repr(C)]
#[derive(Debug, Clone, Copy, Zeroable, Pod)]
pub struct FastEntry {
offset: Offset,
name_hint: [u8; 4],
}
#[repr(C)]
#[derive(Debug, Clone, Copy, Zeroable, Pod)]
pub struct HashEntry {
offset: Offset,
name_hash: [u8; 4],
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy, Zeroable, Pod)]
pub struct KeyValue {
name_len: u16,
data_len: u32,
data: Offset,
data_type: u32,
flags: u16,
spare: u16,
value_name: ZstPtr,
}
/// KeySecurity items form a doubly linked list. A key security item may
/// act as a list header or a list entry (the only difference here is the
/// meaning of f_link and b_link fields).
/// When a key security item acts as a list header, flink and blink point to
/// the first and the last entries of this list respectively. If a list is
/// empty, flink and blink point to a list header (i.e. to a current cell).
/// When a key security item acts as a list entry, flink and blink point to
/// the next and the previous entries of this list respectively. If there is
/// no next entry in a list, flink points to a list header. If there is no
/// previous entry in a list, blink points to a list header.
#[repr(C, packed)]
#[derive(Debug, Clone, Copy, Zeroable, Pod)]
pub struct KeySecurity {
reserved: u16,
/// offset in bytes relative to the start of hive bins data.
f_link: Offset,
/// offset in bytes relative to the start of hive bins data.
b_link: Offset,
reference_count: u32,
security_descriptor_len: u32,
security_descriptor: ZstPtr,
}
/// The [`BigData`] cell type is used to reference data larger than 16344 bytes (when the Minor version field of the base block is greater than 3).
/// the data `segments` points at is a list of `segments_len` [Offsets](Offset).
/// A data segment is stored in the data field of a [Cell] pointed by the Data segment offset field. A data segment has the maximum size of 16344 bytes.
/// `segments` of a [BigData] record, except the last one, always have the maximum size.
#[repr(C, packed)]
#[derive(Debug, Clone, Copy, Zeroable, Pod)]
pub struct BigData {
segments_len: u16,
segments: Offset,
}
/// Cells contain data which is prefixed with a 2 byte ascii signature
/// indicating what kind of actual data is stored in this cell
#[repr(C, packed)]
#[derive(Debug, Clone, PartialEq, Eq, Copy, Zeroable, Pod)]
pub struct CellHeader {
/// 2 byte ASCII signature as represented by constants in CellData
signature: [u8; 2],
}
#[derive(Debug)]
pub struct Cell<'a> {
header: CellHeader,
data: &'a [u8],
}
impl<'a> From<&'a super::RawCell> for Cell<'a> {
fn from(raw: &'a super::RawCell) -> Self {
let data = unsafe {
core::slice::from_raw_parts::<'a, u8>(raw.data.ptr().cast(), raw.size as usize)
};
let header = *bytemuck::from_bytes(&data[..2]);
let data = &data[2..];
Self { header, data }
}
}
impl<'a> From<Cell<'a>> for AnyCell<'a> {
fn from(cell: Cell<'a>) -> Self {
match cell.header {
CellHeader::FAST_LEAF => {
Self::FastLeaf(bytemuck::from_bytes::<Leaf<FastEntry>>(cell.data))
}
CellHeader::INDEX_LEAF => {
Self::IndexLeaf(bytemuck::from_bytes::<Leaf<Offset>>(cell.data))
}
CellHeader::HASH_LEAF => {
Self::HashLeaf(bytemuck::from_bytes::<Leaf<HashEntry>>(cell.data))
}
CellHeader::INDEX_ROOT => {
Self::IndexRoot(bytemuck::from_bytes::<Leaf<Offset>>(cell.data))
}
CellHeader::KEY_NODE => Self::KeyNode(bytemuck::from_bytes::<KeyNode>(cell.data)),
CellHeader::KEY_VALUE => {
Self::KeyValue(bytemuck::from_bytes::<KeyValue>(cell.data))
}
CellHeader::KEY_SECURITY => {
Self::KeySecurity(bytemuck::from_bytes::<KeySecurity>(cell.data))
}
CellHeader::BIG_DATA => Self::BigData(bytemuck::from_bytes::<BigData>(cell.data)),
_ => unimplemented!(),
}
}
}
/// Helper enum to match against a generic CellData
pub enum AnyCell<'a> {
IndexLeaf(&'a Leaf<Offset>),
FastLeaf(&'a Leaf<FastEntry>),
HashLeaf(&'a Leaf<HashEntry>),
IndexRoot(&'a Leaf<Offset>),
KeyNode(&'a KeyNode),
KeyValue(&'a KeyValue),
KeySecurity(&'a KeySecurity),
BigData(&'a BigData),
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy, Zeroable, Pod)]
pub struct KeyNode {
flags: KeyNodeFlags,
last_written: u64,
access: u32,
parent: Offset,
subkeys_len: u32,
volatile_subkeys_len: u32,
subkeys: Offset,
volatile_subkeys: Offset,
key_values_len: u32,
key_values: Offset,
key_security: Offset,
class_name: Offset,
largest_subkey_len: u32,
largest_subkey_class_len: u32,
largest_value_name_len: u32,
largest_value_data_len: u32,
work_var: u32,
key_name_len: u16,
class_name_len: u16,
key_name: ZstPtr,
}
#[derive(Debug)]
pub struct SubkeyNameLengthField {
subkey_name_length: u16,
virt_flags: VirtualizationControlFlags,
user_flags: UserFlags,
debug: DebugFlags,
}
impl SubkeyNameLengthField {
pub fn subkey_name_length(&self) -> u16 {
self.subkey_name_length
}
pub fn virt_flags(&self) -> VirtualizationControlFlags {
self.virt_flags
}
pub fn user_flags(&self) -> UserFlags {
self.user_flags
}
pub fn debug(&self) -> DebugFlags {
self.debug
}
}
bitflags! {
#[repr(C)]
#[derive(Zeroable, Pod)]
pub struct VirtualizationControlFlags: u8 {
const DONT_VIRTUALIZE = 0x2;
const DONT_SILENT_FAIL = 0x4;
const RECURSE_FLAG = 0x8;
}
}
bitflags! {
#[repr(C)]
#[derive(Zeroable, Pod)]
pub struct KeyNodeFlags: u16 {
/// Is volatile (not used, a key node on a disk isn't expected to
/// have this flag set)
const VOLATILE = 0x0001;
/// Is the mount point of another hive (a key node on a disk isn't
/// expected to have this flag set)
const HIVE_EXIT = 0x0002;
/// Is the root key for this hive
const HIVE_ENTRY = 0x0004;
/// This key can't be deleted
const NO_DELETE = 0x0008;
/// This key is a symlink (a target key is specified as a UTF-16LE
/// string (REG_LINK) in a value named "SymbolicLinkValue", example:
/// \REGISTRY\MACHINE\SOFTWARE\Classes\Wow6432Node)
const SYM_LINK = 0x0010;
/// Key name is an ASCII string, possibly an extended ASCII string
/// (otherwise it is a UTF-16LE string)
const COMP_NAME = 0x0020;
/// Is a predefined handle (a handle is stored in the Number of key
/// values field)
const PREDEF_HANDLE = 0x0040;
/// This key was virtualized at least once
const VIRTUAL_SOURCE = 0x0080;
/// Is virtual
const VIRTUAL_TARGET = 0x0100;
/// Is a part of a virtual store path
const VIRTUAL_STORE = 0x0200;
}
}
bitflags! {
#[repr(C)]
#[derive(Zeroable, Pod)]
pub struct DebugFlags: u8 {
/// This key is opened
const BREAK_ON_OPEN = 0x01;
/// This key is deleted
const BREAK_ON_DELETE = 0x02;
/// A key security item is changed for this key
const BREAK_ON_SECURITY_CHANGE = 0x04;
/// A subkey of this key is created
const BREAK_ON_CREATE_SUBKEY = 0x08;
/// A subkey of this key is deleted
const BREAK_ON_DELETE_SUBKEY = 0x10;
/// A value is set to this key
const BREAK_ON_SET_VALUE = 0x20;
/// A value is deleted from this key
const BREAK_ON_DELETE_VALUE = 0x40;
/// This key is virtualized
const BREAK_ON_KEY_VIRTUALIZE = 0x80;
}
}
bitflags! {
#[repr(C)]
#[derive(Zeroable, Pod)]
pub struct UserFlags: u8 {
/// Is a 32-bit key: this key was created through the Wow64
/// subsystem or this key shall not be used by a 64-bit program
/// (e.g. by a 64-bit driver during the boot)
const KEY_32_BIT = 0x1;
/// This key was created by the reflection process (when reflecting
/// a key from another view)
const REFLECTION_KEY = 0x2;
/// Disable registry reflection for this key
const DISABLE_REFLECTION = 0x4;
/// In the old location of the User flags field: execute the int 3
/// instruction on an access to this key (both retail and checked
/// Windows kernels), this bit was superseded by the Debug field
/// (see below); in the new location of the User flags field:
/// disable registry reflection for this key if a corresponding key
/// exists in another view and it wasn't created by a caller (see
/// below)
const TRIGGER_INT3_OR_DISABLE_REFLECTION = 0x8;
}
}
bitflags! {
#[repr(C)]
#[derive(Zeroable, Pod)]
pub struct InnerAccessBits: u8 {
const PRE_BOOT_REG_INIT = 0x1;
const POST_BOOT_REG_INIT = 0x2;
}
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy, Zeroable, Pod)]
pub struct AccessBits {
access_bits: InnerAccessBits,
layered_key_bit_fields: LayeredKeyBitFields,
}
impl From<u32> for AccessBits {
fn from(value: u32) -> Self {
let access_bits = InnerAccessBits::from_bits_truncate(value.bit_range(0, 8));
let layered_key_bit_fields = LayeredKeyBitFields(value.bit_range(8, 16));
Self {
access_bits,
layered_key_bit_fields,
}
}
}
#[repr(C, packed)]
#[derive(Debug, Clone, Copy, Zeroable, Pod)]
struct LayeredKeyBitFields(u8);
#[repr(C)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LayerSemantics {
Zero = 0x0,
IsTombstone,
IsSupersedeLocal,
IsSupersedeTree,
}
impl LayeredKeyBitFields {
pub fn inherit_class(&self) -> bool {
self.0.bit(0)
}
pub fn layer_semantics(&self) -> LayerSemantics {
match BitRange::<u8>::bit_range(&self.0, 6, 8) {
0 => LayerSemantics::Zero,
1 => LayerSemantics::IsTombstone,
2 => LayerSemantics::IsSupersedeLocal,
3 => LayerSemantics::IsSupersedeTree,
_ => unreachable!(),
}
}
}
impl KeyNode {
pub fn largest_subkey_name_length_vista_xpsp3(&self) -> SubkeyNameLengthField {
let field = self.largest_subkey_len;
SubkeyNameLengthField {
subkey_name_length: field.bit_range(0, 16),
virt_flags: VirtualizationControlFlags::from_bits_truncate(field.bit_range(16, 20)),
user_flags: UserFlags::from_bits_truncate(field.bit_range(20, 24)),
debug: DebugFlags::from_bits_truncate(field.bit_range(24, 32)),
}
}
}
/// A general view of a *Leaf Cell, which can either be Index, Fast or Hash.
/// All 3 of these CellData structs share the same common layout and only
/// differ in what data represents their element
#[repr(C)]
#[derive(Debug)]
pub struct Leaf<T: Sized> {
len: u16,
elements: ZstPtr,
_phantom: PhantomData<T>,
}
impl<T: Sized> Clone for Leaf<T> {
fn clone(&self) -> Self {
Self {
len: self.len,
elements: ZstPtr,
_phantom: PhantomData,
}
}
}
impl<T: Sized> Copy for Leaf<T> {}
unsafe impl<T: Sized> Zeroable for Leaf<T> {}
unsafe impl<T: Sized + 'static> Pod for Leaf<T> {}
impl KeyValue {
pub fn data_type(&self) -> DataType {
match self.data_type {
0x00 => DataType::None,
0x01 => DataType::Sz,
0x02 => DataType::ExpandSz,
0x03 => DataType::Binary,
0x04 => DataType::DWordLE,
0x05 => DataType::DWordBE,
0x06 => DataType::Link,
0x07 => DataType::MultiSz,
0x08 => DataType::ResourceList,
0x09 => DataType::FullResourceDescriptor,
0x0a => DataType::ResourceRequirementsList,
0x0b => DataType::QWordLE,
_ => DataType::Other(self.data_type),
}
}
}
impl CellHeader {
const INDEX_LEAF: Self = Self::from_sig(b"li");
const FAST_LEAF: Self = Self::from_sig(b"lf");
const HASH_LEAF: Self = Self::from_sig(b"lh");
const INDEX_ROOT: Self = Self::from_sig(b"ri");
const KEY_NODE: Self = Self::from_sig(b"nk");
const KEY_VALUE: Self = Self::from_sig(b"vk");
const KEY_SECURITY: Self = Self::from_sig(b"sk");
const BIG_DATA: Self = Self::from_sig(b"bd");
#[allow(dead_code)]
const fn from_sig(sig: &[u8; 2]) -> Self {
Self { signature: *sig }
}
}
#[derive(Debug, Clone, Copy)]
pub enum DataType {
None,
Sz,
ExpandSz,
Binary,
DWordLE,
DWordBE,
Link,
MultiSz,
ResourceList,
FullResourceDescriptor,
ResourceRequirementsList,
QWordLE,
Other(u32),
}
#[cfg(test)]
mod test {
use super::*;
use core::mem::size_of;
#[test]
fn sizes() {
assert_eq!(size_of::<FastEntry>(), 8, "size_of::<FastEntry>() != 8");
assert_eq!(size_of::<HashEntry>(), 8, "size_of::<HashEntry>() != 8");
assert_eq!(size_of::<KeyNode>(), 74, "size_of::<KeyNode>() != 74");
assert_eq!(size_of::<KeyValue>(), 18, "size_of::<KeyValue>() != 18");
}
}
}
use bytemuck::{Pod, Zeroable};
use cell_data::Cell;
use error::BaseBlockVerifyError;
use ptr::ZstPtr;
#[cfg(feature = "alloc")]
extern crate alloc;
#[repr(C, packed)]
#[derive(Debug, Copy, Clone, Pod, Zeroable)]
pub struct BaseBlockSequence {
before: u32,
after: u32,
}
#[repr(C, packed)]
#[derive(Debug, Copy, Clone, Pod, Zeroable)]
pub struct Offset(u32);
#[repr(C, packed)]
#[derive(Debug, Copy, Clone, Pod, Zeroable)]
pub struct BaseBlock {
signature: u32,
sequence: BaseBlockSequence,
time_stmap: u64,
major: u32,
minor: u32,
kind: u32,
format: u32,
root_cell: Offset,
length: u32,
cluster: u32,
file_name: [u16; 32],
reserved: [u32; 99],
checksum: u32,
reserved2: [u32; 0x37E],
boot_type: u32,
boot_recover: u32,
}
#[repr(C, packed)]
#[derive(Debug, Copy, Clone, Pod, Zeroable)]
pub struct BinHeader {
signature: [u8; 4],
offset: u32,
size: u32,
reserved: u64,
timestamp: u64,
_spare: u32,
}
#[repr(C, packed)]
#[derive(Debug, Copy, Clone, Pod, Zeroable)]
pub struct RawCell {
size: i32,
data: ZstPtr,
}
impl RawCell {
pub fn size(&self) -> usize {
self.size.abs() as usize
}
}
#[repr(C, packed)]
#[derive(Debug, Copy, Clone, Pod, Zeroable)]
pub struct RawBin {
header: BinHeader,
cells: ZstPtr,
}
impl RawBin {
pub fn len(&self) -> usize {
self.header.size as usize - core::mem::size_of::<BinHeader>()
}
pub fn cells_bytes<'a>(&'a self) -> &'a [u8] {
unsafe { core::slice::from_raw_parts::<'a, u8>(self.cells.ptr().cast(), self.len()) }
}
pub fn iter(&self) -> CellIterator {
CellIterator {
bin: self,
offset: 0,
}
}
}
#[derive(Debug)]
pub struct CellIterator<'a> {
bin: &'a RawBin,
offset: usize,
}
impl<'a> Iterator for CellIterator<'a> {
type Item = Cell<'a>;
fn next(&mut self) -> Option<Self::Item> {
if self.offset >= self.bin.len() {
None
} else {
let bytes = self.bin.cells_bytes();
let next = bytemuck::try_from_bytes::<RawCell>(&bytes[self.offset..]).ok()?;
self.offset += next.size();
Some(next.into())
}
}
}
#[cfg(feature = "alloc")]
#[derive(Debug)]
pub struct Bin<'a> {
raw: &'a RawBin,
cells: alloc::vec::Vec<Cell<'a>>,
}
#[cfg(feature = "alloc")]
impl<'a> Bin<'a> {
pub fn header(&self) -> &BinHeader {
&self.raw.header
}
pub fn cells(&self) -> &[Cell] {
self.cells.as_ref()
}
}
#[cfg(feature = "alloc")]
impl<'a> From<&'a RawBin> for Bin<'a> {
fn from(bin: &'a RawBin) -> Self {
let cells = bin.iter().collect::<alloc::vec::Vec<_>>();
Self { raw: bin, cells }
}
}
impl Offset {
pub fn offset(&self) -> Option<usize> {
if self.0 == 0xffffffff {
None
} else {
Some(self.0 as usize)
}
}
}
impl BaseBlock {
const SIGNATURE: u32 = 0x66676572;
const MAJOR: u32 = 1;
const MINOR: u32 = 3;
const FILE_TYPE_PRIMARY: u32 = 0;
const BASE_FORMAT_MEMORY: u32 = 1;
pub fn calculate_checksum(&self) -> u32 {
let bytes = bytemuck::bytes_of(self);
// let data = bytemuck::cast_slice::<_, u32>(&bytes[..508]);
let data = unsafe { core::slice::from_raw_parts(bytes.as_ptr().cast::<u32>(), 127) };
let checksum = data.iter().clone().fold(0, |a, &b| a ^ b);
if checksum == 0xffffffff {
0xfffffffe
} else if checksum == 0 {
1u32
} else {
checksum
}
}
/// returns either any error encountered when verifying this base block or the dirtiness
pub fn verify(&self) -> Result<bool, BaseBlockVerifyError> {
(self.signature == Self::SIGNATURE)
.then_some(())
.ok_or(BaseBlockVerifyError::BadSignature)?;
(self.major == Self::MAJOR)
.then_some(())
.ok_or(BaseBlockVerifyError::IncompatibleMajorVersion)?;
(self.minor == Self::MINOR)
.then_some(())
.ok_or(BaseBlockVerifyError::IncompatibleMinorVersion)?;
(self.kind == Self::FILE_TYPE_PRIMARY)
.then_some(())
.ok_or(BaseBlockVerifyError::BadFileType)?;
(self.format == Self::BASE_FORMAT_MEMORY)
.then_some(())
.ok_or(BaseBlockVerifyError::BadFormat)?;
(self.cluster == 1)
.then_some(())
.ok_or(BaseBlockVerifyError::BadSignature)?;
Ok(self.dirty())
}
/// returns true if this block is dirty
///
/// A hive is considered to be dirty (i.e. requiring recovery) when a base
/// block in a primary file contains a wrong checksum, or its primary
/// sequence number doesn't match its secondary sequence number. If a hive
/// isn't dirty, but a transaction log file (new format) contains subsequent
/// log entries, they are ignored.
pub fn dirty(&self) -> bool {
self.sequence.dirty() || self.calculate_checksum() != self.checksum
}
}
impl BaseBlockSequence {
pub fn dirty(&self) -> bool {
self.before != self.after
}
pub fn get(&self) -> (u32, u32) {
(self.before, self.after)
}
}
#[cfg(test)]
mod aligns_and_size {
use super::*;
use core::mem::size_of;
extern crate std;
#[test]
fn size_of_base_block() {
assert_eq!(size_of::<BaseBlock>(), 0x1000);
}
#[test]
fn size_of_base_block_sequence() {
assert_eq!(size_of::<BaseBlockSequence>(), 8);
}
#[test]
fn size_of_bin_header() {
assert_eq!(size_of::<BinHeader>(), 32);
}
#[test]
fn size_of_raw_cell() {
assert_eq!(size_of::<RawCell>(), 4);
}
#[test]
fn offset_of_cells() {
assert_eq!(bytemuck::offset_of!(RawBin::zeroed(), RawBin, cells), 32);
}
#[test]
fn offset_of_cell_data() {
assert_eq!(bytemuck::offset_of!(RawCell::zeroed(), RawCell, data), 4);
}
}