diff --git a/litebox_shim_optee/src/loader/elf.rs b/litebox_shim_optee/src/loader/elf.rs index e75bbf6c7..13a511edc 100644 --- a/litebox_shim_optee/src/loader/elf.rs +++ b/litebox_shim_optee/src/loader/elf.rs @@ -41,7 +41,8 @@ fn read_at(elf: &ElfFileInMemory, offset: u64, buf: &mut [u8]) -> Result<(), Err if offset >= elf.buffer.len() { return Err(Errno::ENODATA); } - let end = core::cmp::min(offset + buf.len(), elf.buffer.len()); + let available = elf.buffer.len() - offset; + let end = offset + core::cmp::min(buf.len(), available); let len = end - offset; buf[..len].copy_from_slice(&elf.buffer[offset..end]); Ok(()) @@ -74,7 +75,9 @@ impl litebox_common_linux::loader::MapMemory for ElfFileInMemory<'_> { fn reserve(&mut self, len: usize, align: usize) -> Result { // Allocate a mapping large enough that even if it's maximally misaligned we can // still fit `len` bytes. - let mapping_len = len + (align.max(PAGE_SIZE) - PAGE_SIZE); + let mapping_len = len + .checked_add(align.max(PAGE_SIZE) - PAGE_SIZE) + .ok_or(Errno::ENOMEM)?; let mapping_ptr = self .task .sys_mmap( @@ -87,9 +90,14 @@ impl litebox_common_linux::loader::MapMemory for ElfFileInMemory<'_> { )? .as_usize(); - let ptr = mapping_ptr.next_multiple_of(align); - let end = ptr + len; - let mapping_end = mapping_ptr + mapping_len; + let ptr = mapping_ptr + .checked_next_multiple_of(align) + .ok_or(Errno::ENOMEM)?; + let end = ptr.checked_add(len).ok_or(Errno::ENOMEM)?; + let mapping_end = mapping_ptr.checked_add(mapping_len).ok_or(Errno::ENOMEM)?; + if end > mapping_end { + return Err(Errno::ENOMEM); + } if ptr != mapping_ptr { self.task .sys_munmap(MutPtr::from_usize(mapping_ptr), ptr - mapping_ptr)?; @@ -132,7 +140,8 @@ impl litebox_common_linux::loader::MapMemory for ElfFileInMemory<'_> { // MAP_ANONYMOUS ensures remaining bytes are zero if src is shorter than len. let offset: usize = offset.truncate(); if len > 0 && offset < self.buffer.len() { - let end = core::cmp::min(offset + len, self.buffer.len()); + let available = self.buffer.len() - offset; + let end = offset + core::cmp::min(len, available); let src = &self.buffer[offset..end]; let user_ptr = UserMutPtr::::from_usize(mapped_addr); user_ptr diff --git a/litebox_shim_optee/src/msg_handler.rs b/litebox_shim_optee/src/msg_handler.rs index e83cd54bb..64307b62b 100644 --- a/litebox_shim_optee/src/msg_handler.rs +++ b/litebox_shim_optee/src/msg_handler.rs @@ -60,8 +60,9 @@ fn page_align_down(address: u64) -> u64 { } #[inline] -fn page_align_up(len: u64) -> u64 { - len.next_multiple_of(PAGE_SIZE as u64) +fn page_align_up(len: u64) -> Result { + len.checked_next_multiple_of(PAGE_SIZE as u64) + .ok_or(OpteeSmcReturnCode::EBadAddr) } fn parse_optee_msg_args( @@ -230,7 +231,10 @@ pub fn handle_optee_smc_args( if page_index >= shm_info.page_addrs.len() { return Err(OpteeSmcReturnCode::EBadAddr); } - let msg_args_addr = shm_info.page_addrs[page_index].as_usize() + offset_in_page; + let msg_args_addr = shm_info.page_addrs[page_index] + .as_usize() + .checked_add(offset_in_page) + .ok_or(OpteeSmcReturnCode::EBadAddr)?; Ok(OpteeSmcResult::CallWithArg { msg_args, @@ -305,7 +309,10 @@ pub fn handle_optee_msg_args(msg_args: &OpteeMsgArgs) -> Result<(), OpteeSmcRetu // - The page offset of the first shared memory page (`pages_list[0]`) let shm_ref_pages_data_phys_addr = page_align_down(tmem.buf_ptr); let page_offset = tmem.buf_ptr - shm_ref_pages_data_phys_addr; - let aligned_size = page_align_up(page_offset + tmem.size); + let aligned_size = page_offset + .checked_add(tmem.size) + .ok_or(OpteeSmcReturnCode::EBadAddr) + .and_then(page_align_up)?; shm_ref_map().register_shm( shm_ref_pages_data_phys_addr, page_offset, @@ -353,9 +360,6 @@ pub struct TaRequestInfo { /// memory to create `UteeParamOwned` structures to avoid potential data corruption during TA /// execution. /// -/// # Panics -/// -/// Panics if any conversion from `u64` to `usize` fails. OP-TEE shim doesn't support a 32-bit environment. pub fn decode_ta_request( msg_args: &OpteeMsgArgs, ) -> Result, OpteeSmcReturnCode> { @@ -399,6 +403,12 @@ pub fn decode_ta_request( }; let num_params = msg_args.num_params as usize; + let client_params = num_params + .checked_sub(skip) + .ok_or(OpteeSmcReturnCode::EBadCmd)?; + if client_params > UteeParamOwned::TEE_NUM_PARAMS { + return Err(OpteeSmcReturnCode::EBadCmd); + } for (i, param) in msg_args .params .iter() @@ -746,14 +756,20 @@ fn get_shm_info_from_optee_msg_param_tmem( let aligned_addr = phys_addr - page_offset as u64; // Calculate number of pages needed - let num_pages = (page_offset + size).div_ceil(PAGE_SIZE); + let num_pages = page_offset + .checked_add(size) + .ok_or(OpteeSmcReturnCode::EBadAddr)? + .div_ceil(PAGE_SIZE); // Build page address list let mut page_addrs = Vec::with_capacity(num_pages); - for i in 0..num_pages { - let page_addr = aligned_addr + (i * PAGE_SIZE) as u64; + let mut page_addr = aligned_addr; + for _ in 0..num_pages { page_addrs .push(PhysPageAddr::new(page_addr.truncate()).ok_or(OpteeSmcReturnCode::EBadAddr)?); + page_addr = page_addr + .checked_add(PAGE_SIZE as u64) + .ok_or(OpteeSmcReturnCode::EBadAddr)?; } ShmInfo::new(page_addrs.into_boxed_slice(), page_offset) diff --git a/litebox_shim_optee/src/ptr.rs b/litebox_shim_optee/src/ptr.rs index 2a54bac25..06a492506 100644 --- a/litebox_shim_optee/src/ptr.rs +++ b/litebox_shim_optee/src/ptr.rs @@ -94,11 +94,6 @@ fn align_down(address: usize, align: usize) -> usize { address & !(align - 1) } -#[inline] -fn align_up(len: usize, align: usize) -> usize { - len.next_multiple_of(align) -} - /// Represent a physical pointer to an object with on-demand mapping. /// - `pages`: An array of page-aligned physical addresses. We expect physical addresses in this array are /// virtually contiguous. @@ -167,18 +162,23 @@ impl PhysMutPtr { )); } let start_page = align_down(pa, ALIGN); - let end_page = align_up( - pa.checked_add(bytes).ok_or(PhysPointerError::Overflow)?, - ALIGN, - ); - let mut pages = alloc::vec::Vec::with_capacity((end_page - start_page) / ALIGN); + let end_page = pa + .checked_add(bytes) + .and_then(|end| end.checked_next_multiple_of(ALIGN)) + .ok_or(PhysPointerError::Overflow)?; + let span = end_page + .checked_sub(start_page) + .ok_or(PhysPointerError::Overflow)?; + let mut pages = alloc::vec::Vec::with_capacity(span / ALIGN); let mut current_page = start_page; while current_page < end_page { pages.push( PhysPageAddr::::new(current_page) .ok_or(PhysPointerError::InvalidPhysicalAddress(current_page))?, ); - current_page += ALIGN; + current_page = current_page + .checked_add(ALIGN) + .ok_or(PhysPointerError::Overflow)?; } Self::new(&pages, pa - start_page) } @@ -377,7 +377,10 @@ impl PhysMutPtr { ) .ok_or(PhysPointerError::Overflow)?; let start = skip / ALIGN; - let end = (skip + size).div_ceil(ALIGN); + let end = skip + .checked_add(size) + .ok_or(PhysPointerError::Overflow)? + .div_ceil(ALIGN); unsafe { self.map_range(start, end, perms)?; } diff --git a/litebox_shim_optee/src/syscalls/ldelf.rs b/litebox_shim_optee/src/syscalls/ldelf.rs index 2d9d179ba..193780b07 100644 --- a/litebox_shim_optee/src/syscalls/ldelf.rs +++ b/litebox_shim_optee/src/syscalls/ldelf.rs @@ -14,6 +14,27 @@ fn align_down(addr: usize, align: usize) -> usize { } impl Task { + #[inline] + fn checked_map_size( + num_bytes: usize, + pad_begin: usize, + pad_end: usize, + ) -> Result { + num_bytes + .checked_add(pad_begin) + .and_then(|t| t.checked_add(pad_end)) + .and_then(|t| t.checked_next_multiple_of(PAGE_SIZE)) + .ok_or(TeeResult::BadParameters) + } + + #[inline] + fn checked_pad_end_start(padded_start: usize, num_bytes: usize) -> Result { + padded_start + .checked_add(num_bytes) + .and_then(|end| end.checked_next_multiple_of(PAGE_SIZE)) + .ok_or(TeeResult::BadParameters) + } + /// OP-TEE's syscall to map zero-initialized memory with padding. /// This function pads `pad_begin` bytes before and `pad_end` bytes after the /// zero-initialized `num_bytes` bytes. `va` can contain a hint address which @@ -48,11 +69,7 @@ impl Task { } // TODO: Check whether flags contains `LDELF_MAP_FLAG_SHAREABLE` once we support sharing of file-based mappings. - let total_size = num_bytes - .checked_add(pad_begin) - .and_then(|t| t.checked_add(pad_end)) - .ok_or(TeeResult::BadParameters)? - .next_multiple_of(PAGE_SIZE); + let total_size = Self::checked_map_size(num_bytes, pad_begin, pad_end)?; if addr.checked_add(total_size).is_none() { return Err(TeeResult::BadParameters); } @@ -67,7 +84,10 @@ impl Task { let addr = self .sys_mmap(addr, total_size, ProtFlags::PROT_READ_WRITE, flags, -1, 0) .map_err(|_| TeeResult::OutOfMemory)?; - let padded_start = addr.as_usize() + pad_begin; + let padded_start = addr + .as_usize() + .checked_add(pad_begin) + .ok_or(TeeResult::BadParameters)?; // Unmap the padding regions to free physical memory. // Using munmap instead of mprotect(PROT_NONE) actually deallocates the frames. @@ -77,8 +97,11 @@ impl Task { let _ = self.sys_munmap(addr, pad_begin_end - addr.as_usize()); } // pad_end region: [align_up(padded_start + num_bytes, PAGE_SIZE), addr + total_size) - let pad_end_start = (padded_start + num_bytes).next_multiple_of(PAGE_SIZE); - let region_end = addr.as_usize() + total_size; + let pad_end_start = Self::checked_pad_end_start(padded_start, num_bytes)?; + let region_end = addr + .as_usize() + .checked_add(total_size) + .ok_or(TeeResult::BadParameters)?; if pad_end_start < region_end { let _ = self.sys_munmap( UserMutPtr::from_usize(pad_end_start), @@ -172,11 +195,7 @@ impl Task { return Err(TeeResult::BadParameters); } - let total_size = num_bytes - .checked_add(pad_begin) - .and_then(|t| t.checked_add(pad_end)) - .ok_or(TeeResult::BadParameters)? - .next_multiple_of(PAGE_SIZE); + let total_size = Self::checked_map_size(num_bytes, pad_begin, pad_end)?; if addr.checked_add(total_size).is_none() { return Err(TeeResult::BadParameters); } @@ -200,7 +219,10 @@ impl Task { 0, ) .map_err(|_| TeeResult::OutOfMemory)?; - let padded_start = addr.as_usize() + pad_begin; + let padded_start = addr + .as_usize() + .checked_add(pad_begin) + .ok_or(TeeResult::BadParameters)?; if padded_start == 0 { let _ = self.sys_munmap(addr, total_size).ok(); return Err(TeeResult::BadFormat); @@ -225,13 +247,14 @@ impl Task { } else if flags.contains(LdelfMapFlags::LDELF_MAP_FLAG_EXECUTABLE) { prot |= ProtFlags::PROT_EXEC; } + let prot_start = align_down(padded_start, PAGE_SIZE); + let prot_len = padded_start + .checked_sub(prot_start) + .and_then(|offset| offset.checked_add(num_bytes)) + .and_then(|len| len.checked_next_multiple_of(PAGE_SIZE)) + .ok_or(TeeResult::BadParameters)?; if self - .sys_mprotect( - UserMutPtr::from_usize(align_down(padded_start, PAGE_SIZE)), - (num_bytes + padded_start - align_down(padded_start, PAGE_SIZE)) - .next_multiple_of(PAGE_SIZE), - prot, - ) + .sys_mprotect(UserMutPtr::from_usize(prot_start), prot_len, prot) .is_err() { let _ = self.sys_munmap(addr, total_size).ok(); @@ -246,8 +269,11 @@ impl Task { let _ = self.sys_munmap(addr, pad_begin_end - addr.as_usize()); } // pad_end region: [align_up(padded_start + num_bytes, PAGE_SIZE), addr + total_size) - let pad_end_start = (padded_start + num_bytes).next_multiple_of(PAGE_SIZE); - let region_end = addr.as_usize() + total_size; + let pad_end_start = Self::checked_pad_end_start(padded_start, num_bytes)?; + let region_end = addr + .as_usize() + .checked_add(total_size) + .ok_or(TeeResult::BadParameters)?; if pad_end_start < region_end { let _ = self.sys_munmap( UserMutPtr::from_usize(pad_end_start), diff --git a/litebox_shim_optee/src/syscalls/mm.rs b/litebox_shim_optee/src/syscalls/mm.rs index 5bd3fa90a..116421b8f 100644 --- a/litebox_shim_optee/src/syscalls/mm.rs +++ b/litebox_shim_optee/src/syscalls/mm.rs @@ -9,9 +9,9 @@ use litebox_common_linux::{MapFlags, ProtFlags, errno::Errno}; use crate::{Task, UserMutPtr}; #[inline] -fn align_up(addr: usize, align: usize) -> usize { +fn align_up(addr: usize, align: usize) -> Option { debug_assert!(align.is_power_of_two()); - (addr + align - 1) & !(align - 1) + addr.checked_next_multiple_of(align) } impl Task { @@ -66,7 +66,7 @@ impl Task { return Err(Errno::EINVAL); } - let aligned_len = align_up(len, PAGE_SIZE); + let aligned_len = align_up(len, PAGE_SIZE).ok_or(Errno::ENOMEM)?; if aligned_len == 0 { return Err(Errno::ENOMEM); } diff --git a/litebox_shim_optee/src/syscalls/tee.rs b/litebox_shim_optee/src/syscalls/tee.rs index 25e8f593a..fdaf9424e 100644 --- a/litebox_shim_optee/src/syscalls/tee.rs +++ b/litebox_shim_optee/src/syscalls/tee.rs @@ -21,9 +21,9 @@ use crate::{ }; #[inline] -fn align_up(addr: usize, align: usize) -> usize { +fn align_up(addr: usize, align: usize) -> Option { debug_assert!(align.is_power_of_two()); - (addr + align - 1) & !(align - 1) + addr.checked_next_multiple_of(align) } #[inline] @@ -254,8 +254,10 @@ impl Task { let len = len .checked_add(buf.as_usize() - align_down(buf.as_usize(), PAGE_SIZE)) .ok_or(TeeResult::AccessConflict)?; - NonZeroPageSize::::new(align_up(len, PAGE_SIZE)) - .ok_or(TeeResult::AccessConflict)? + NonZeroPageSize::::new( + align_up(len, PAGE_SIZE).ok_or(TeeResult::AccessConflict)?, + ) + .ok_or(TeeResult::AccessConflict)? }; if let Some(perms) = self.global.pm.get_memory_permissions(start, aligned_len) { if (flags.contains(TeeMemoryAccessRights::TEE_MEMORY_ACCESS_READ)