From f197fca6021ab0079194d8a06ac753c5ade0e4f2 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 22 Apr 2026 14:48:33 +0000 Subject: [PATCH 1/2] use checked arithmetic for HVCI/HEKI --- litebox_platform_lvbs/src/mshv/error.rs | 4 ++ litebox_platform_lvbs/src/mshv/vsm.rs | 66 +++++++++++++++---- .../src/mshv/vsm_intercept.rs | 7 +- 3 files changed, 64 insertions(+), 13 deletions(-) diff --git a/litebox_platform_lvbs/src/mshv/error.rs b/litebox_platform_lvbs/src/mshv/error.rs index 5d6274be5..56a424aed 100644 --- a/litebox_platform_lvbs/src/mshv/error.rs +++ b/litebox_platform_lvbs/src/mshv/error.rs @@ -27,6 +27,9 @@ pub enum VsmError { #[error("code page offset overflow when computing VTL return address")] CodePageOffsetOverflow, + #[error("integer overflow while processing VTL0-controlled range data")] + IntegerOverflow, + // End-of-Boot Restriction Errors #[error("{0} not allowed after end of boot")] OperationAfterEndOfBoot(&'static str), @@ -195,6 +198,7 @@ impl From for Errno { | VsmError::KexecSegmentRangeInvalid | VsmError::ModuleElfSizeExceeded { .. } | VsmError::CodePageOffsetOverflow + | VsmError::IntegerOverflow | VsmError::SymbolNameTooLong | VsmError::SymbolTableOutOfRange => Errno::ERANGE, diff --git a/litebox_platform_lvbs/src/mshv/vsm.rs b/litebox_platform_lvbs/src/mshv/vsm.rs index 483de17bb..4b7aa0346 100644 --- a/litebox_platform_lvbs/src/mshv/vsm.rs +++ b/litebox_platform_lvbs/src/mshv/vsm.rs @@ -84,11 +84,15 @@ pub(crate) fn init(is_bsp: bool) { if is_bsp { if let Ok((start, size)) = get_vtl1_memory_info() { - debug_serial_println!("VSM: Protect GPAs from {:#x} to {:#x}", start, start + size); + let end = start + .checked_add(size) + .ok_or(VsmError::IntegerOverflow) + .expect("VTL1 memory range overflow while protecting startup memory"); + debug_serial_println!("VSM: Protect GPAs from {:#x} to {:#x}", start, end); if protect_physical_memory_range( PhysFrame::range( PhysFrame::containing_address(PhysAddr::new(start)), - PhysFrame::containing_address(PhysAddr::new(start + size)), + PhysFrame::containing_address(PhysAddr::new(end)), ), MemAttr::empty(), ) @@ -904,11 +908,14 @@ fn apply_vtl0_text_patch(heki_patch: HekiPatch) -> Result<(), VsmError> { } fn mshv_vsm_allocate_ringbuffer_memory(phys_addr: u64, size: usize) -> Result { + let end = phys_addr + .checked_add(size as u64) + .ok_or(VsmError::IntegerOverflow)?; set_ringbuffer(PhysAddr::new(phys_addr), size); protect_physical_memory_range( PhysFrame::range( PhysFrame::containing_address(PhysAddr::new(phys_addr)), - PhysFrame::containing_address(PhysAddr::new(phys_addr + (size as u64))), + PhysFrame::containing_address(PhysAddr::new(end)), ), MemAttr::MEM_ATTR_READ, )?; @@ -1298,7 +1305,7 @@ fn copy_heki_pages_from_vtl0(pa: u64, nranges: u64) -> Option> { return None; } - range += heki_page.nranges; + range = range.checked_add(heki_page.nranges)?; next_pa = PhysAddr::new(heki_page.next_pa); heki_pages.push(*heki_page); } @@ -1425,9 +1432,10 @@ impl MemoryContainer { pub fn get_range(&self) -> Option> { let start_range = self.range.first()?; let end_range = self.range.last()?; + let end = end_range.addr.as_u64().checked_add(end_range.len)?; Some(Range { start: start_range.addr, - end: end_range.addr + end_range.len, + end: VirtAddr::try_new(end).ok()?, }) } @@ -1435,8 +1443,20 @@ impl MemoryContainer { let addr = VirtAddr::try_new(heki_range.va).map_err(|_| VsmError::InvalidVirtualAddress)?; let phys_addr = PhysAddr::try_new(heki_range.pa).map_err(|_| VsmError::InvalidPhysicalAddress)?; + let len = heki_range + .epa + .checked_sub(heki_range.pa) + .ok_or(VsmError::IntegerOverflow)?; if let Some(last_range) = self.range.last() - && last_range.addr + last_range.len != addr + && VirtAddr::try_new( + last_range + .addr + .as_u64() + .checked_add(last_range.len) + .ok_or(VsmError::IntegerOverflow)?, + ) + .map_err(|_| VsmError::InvalidVirtualAddress)? + != addr { debug_serial_println!("Discontiguous address found {heki_range:?}"); // NOTE: Intentionally not returning an error here. @@ -1446,7 +1466,7 @@ impl MemoryContainer { self.range.push(MemoryRange { addr, phys_addr, - len: heki_range.epa - heki_range.pa, + len, }); Ok(()) } @@ -1458,14 +1478,22 @@ impl MemoryContainer { if self.buf.is_empty() { for range in &self.range { let range_len: usize = range.len.truncate(); - len += range_len; + len = len + .checked_add(range_len) + .ok_or(MemoryContainerError::Overflow)?; } self.buf.reserve_exact(len); } let range = self.range.clone(); for range in range { - self.write_vtl0_phys_bytes(range.phys_addr, range.phys_addr + range.len)?; + let phys_end = range + .phys_addr + .as_u64() + .checked_add(range.len) + .and_then(|end| PhysAddr::try_new(end).ok()) + .ok_or(MemoryContainerError::Overflow)?; + self.write_vtl0_phys_bytes(range.phys_addr, phys_end)?; } Ok(()) } @@ -1492,7 +1520,11 @@ impl MemoryContainer { let src = &page.0[src_offset..src_offset + src_len]; self.buf.extend_from_slice(src); - phys_cur += src_len as u64; + phys_cur = phys_cur + .as_u64() + .checked_add(src_len as u64) + .and_then(|next| PhysAddr::try_new(next).ok()) + .ok_or(MemoryContainerError::Overflow)?; bytes_to_copy -= src_len; } Ok(()) @@ -1513,6 +1545,8 @@ impl core::ops::Deref for MemoryContainer { pub enum MemoryContainerError { #[error("failed to copy data from VTL0")] CopyFromVtl0Failed, + #[error("integer overflow while processing VTL0 memory")] + Overflow, } pub struct KexecMemoryMetadataWrapper { @@ -1733,7 +1767,11 @@ impl PatchDataMap { // Step 2 of `text_poke_bp_batch` where we only know the second to last bytes of the patch such // that cannot know the address of the first page. Details are in `validate_text_poke_bp_batch`. if !patch_target_pa_1.is_null() - && (patch_target_pa_0 + 1).is_aligned(Size4KiB::SIZE) + && patch_target_pa_0 + .as_u64() + .checked_add(1) + .and_then(|next| PhysAddr::try_new(next).ok()) + .is_some_and(|next| next.is_aligned(Size4KiB::SIZE)) { mod_mem_meta.insert_patch_target(patch_target_pa_1); inner.insert(patch_target_pa_1, patch); @@ -1744,7 +1782,11 @@ impl PatchDataMap { } else { inner.insert(patch_target_pa_0, patch); if !patch_target_pa_1.is_null() - && (patch_target_pa_0 + 1).is_aligned(Size4KiB::SIZE) + && patch_target_pa_0 + .as_u64() + .checked_add(1) + .and_then(|next| PhysAddr::try_new(next).ok()) + .is_some_and(|next| next.is_aligned(Size4KiB::SIZE)) { inner.insert(patch_target_pa_1, patch); } diff --git a/litebox_platform_lvbs/src/mshv/vsm_intercept.rs b/litebox_platform_lvbs/src/mshv/vsm_intercept.rs index 8ab2967d9..4a88b7df6 100644 --- a/litebox_platform_lvbs/src/mshv/vsm_intercept.rs +++ b/litebox_platform_lvbs/src/mshv/vsm_intercept.rs @@ -151,7 +151,12 @@ pub fn vsm_handle_intercept() { #[inline] fn advance_vtl0_rip(int_msg_hdr: &HvInterceptMessageHeader) -> Result { - let new_vtl0_rip = int_msg_hdr.rip + u64::from(int_msg_hdr.instruction_length); + let Some(new_vtl0_rip) = int_msg_hdr + .rip + .checked_add(u64::from(int_msg_hdr.instruction_length)) + else { + return raise_vtl0_gp_fault(); + }; hvcall_set_vp_vtl0_registers(HV_X64_REGISTER_RIP, new_vtl0_rip) } From 87b2decc3013dda108505745e47d677905c50ec7 Mon Sep 17 00:00:00 2001 From: Sangho Lee Date: Wed, 22 Apr 2026 17:07:57 +0000 Subject: [PATCH 2/2] checked arithm in lvbs runner --- litebox_runner_lvbs/src/lib.rs | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/litebox_runner_lvbs/src/lib.rs b/litebox_runner_lvbs/src/lib.rs index ee598e4be..951827bcd 100644 --- a/litebox_runner_lvbs/src/lib.rs +++ b/litebox_runner_lvbs/src/lib.rs @@ -99,8 +99,18 @@ pub fn seed_initial_heap() { pub fn init(is_bsp: bool) -> Option<&'static Platform> { let ret = if is_bsp { let (start, size) = get_vtl1_memory_info().expect("Failed to get memory info"); + let min_vtl1_size = ((VTL1_REMAP_PDE_PAGE + 1) * PAGE_SIZE) as u64; + assert!( + size >= min_vtl1_size, + "VTL1 memory size is too small for fixed boot layout" + ); let vtl1_start = x86_64::PhysAddr::new(start); - let vtl1_end = x86_64::PhysAddr::new(start + size); + // this `checked_add` covers all `vtl1_start + offset` calculations within this function. + let vtl1_end = x86_64::PhysAddr::new( + start + .checked_add(size) + .expect("VTL1 memory range overflow in init()"), + ); // Re-compute the pre-populated region bounds needed for the // remaining-memory add after `Platform::new()` below. @@ -184,8 +194,10 @@ pub fn init(is_bsp: bool) -> Option<&'static Platform> { // Add the rest of the VTL1 memory to the global allocator once they are mapped to the base page table. let mem_fill_start = mem_fill_start + mem_fill_size; + let vtl1_base_va = Platform::pa_to_va(vtl1_start).as_u64(); let mem_fill_size = TruncateExt::::truncate( - size - (mem_fill_start as u64 - Platform::pa_to_va(vtl1_start).as_u64()), + size.checked_sub((mem_fill_start as u64) - vtl1_base_va) + .expect("remaining VTL1 memory size underflow in init()"), ); unsafe { Platform::mem_fill_pages(mem_fill_start, mem_fill_size); @@ -261,7 +273,9 @@ fn optee_smc_handler_entry_inner( smc_args_pfn: u64, ) -> Result { let smc_args_pfn: usize = smc_args_pfn.truncate(); - let smc_args_addr = smc_args_pfn << litebox_platform_lvbs::mshv::vtl1_mem_layout::PAGE_SHIFT; + let smc_args_addr = smc_args_pfn + .checked_mul(1usize << litebox_platform_lvbs::mshv::vtl1_mem_layout::PAGE_SHIFT) + .ok_or(litebox_common_linux::errno::Errno::EINVAL)?; let smc_args_updated = optee_smc_handler(smc_args_addr); // Write back the SMC arguments page to normal world memory. @@ -1287,8 +1301,9 @@ fn write_rpc_args_to_normal_world( let mut blob = vec![0u8; rpc_args_size]; rpc_args.serialize(&mut blob)?; - let rpc_pa: usize = - >::truncate(msg_args_phys_addr) + msg_args_size; // RPC args are placed right after the main msg_args blob + let rpc_pa: usize = >::truncate(msg_args_phys_addr) + .checked_add(msg_args_size) + .ok_or(OpteeSmcReturnCode::EBadAddr)?; // RPC args are placed right after the main msg_args blob let mut ptr = NormalWorldMutPtr::::with_contiguous_pages(rpc_pa, rpc_args_size)?; // SAFETY: Writing rpc_args back to normal world memory at a valid physical address. // The blob contains the serialized variable-length optee_msg_arg structure(s).