diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 24404ae..776c5f2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -5,35 +5,23 @@ on: branches: - main - develop - workflow_dispatch: + pull_request: + branches: + - main + - develop schedule: - - cron: '15 4 */1 * *' - -env: - CARGO_TERM_COLOR: always + - cron: '0 3 * * *' jobs: build: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - - name: Install tools - run: | - rustup default nightly - rustup component add rust-src - - - name: Build Bootloader and Kernel - run: | - cd src - if [[ $(git branch --show-current) == "main" ]]; then - ./builder.rs build -r -p - else - ./builder.rs build - fi - - - name: Upload artifacts - uses: actions/upload-artifact@v4 + - name: Setup the environment + run: rustup default nightly + - name: Build + run: cd src && cargo xtask build + - uses: actions/upload-artifact@v4 with: name: MilvusVisor path: src/bin/ diff --git a/.github/workflows/build_stable.yml b/.github/workflows/build_stable.yml new file mode 100644 index 0000000..efe69be --- /dev/null +++ b/.github/workflows/build_stable.yml @@ -0,0 +1,24 @@ +name: Build Hypervisor (Stable) + +on: + push: + branches: + - main + pull_request: + branches: + - main + schedule: + - cron: '0 0 1 * *' + +jobs: + build_stable: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Build + run: cd src && cargo xtask build -r -p + - uses: actions/upload-artifact@v4 + with: + name: MilvusVisor + path: bin/ + compression-level: 9 diff --git a/.github/workflows/check.yml b/.github/workflows/rustfmt.yml similarity index 61% rename from .github/workflows/check.yml rename to .github/workflows/rustfmt.yml index d221fdf..17e3e86 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/rustfmt.yml @@ -1,4 +1,4 @@ -name: Check compile errors and miss formatting +name: Rustfmt on: pull_request: @@ -6,20 +6,11 @@ on: - main - develop -env: - CARGO_TERM_COLOR: always - jobs: check: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - name: Install tools - run: | - rustup default nightly - rustup component add rust-src - rustup component add rustfmt - - name: Run cargo fmt id: fmt continue-on-error: true @@ -35,12 +26,4 @@ jobs: echo "\`\`\`" >> /tmp/message gh pr comment -F /tmp/message "$REF" if: steps.fmt.outcome != 'success' - - name: Build UEFI bootloader - run: | - cd src/hypervisor_bootloader - cargo build - - name: Build kernel - run: | - cd src/hypervisor_kernel - cargo build diff --git a/README.md b/README.md index fa25e1a..d1c2aff 100644 --- a/README.md +++ b/README.md @@ -16,9 +16,10 @@ to the users without virtualization overhead. Currently, MilvusVisor provides the following function. -You can build with enabling some functions by `./builder.rs build -f feature1,feautre2,...`.(`featureN` is described +You can build with enabling some functions by `cargo xtask build -f feature1,feautre2,...`.(`featureN` is described like `Feature Name: feature_name` in each section.) -If you want to build with extra features, you can build by `make custom_all FEATURES=default,feature1,feature2,...`. +If you want to build with extra features, you can build by `cargo xtask build -f default,feature1,feature2,...`. +If you want to build without any features, you can build by `cargo xtask build -f minimum`. - Protecting non-volatile data in devices from guest OS (e.g. Firmware, MAC address) - Intel I210 (Feature Name: `i210`) @@ -83,22 +84,23 @@ The following table shows which feature worked on which machines. ## How to build the hypervisor -### By Rust toolchain +### By Rust toolchains #### Requirements -- `cargo`, `rustc`, and `rust-src` (you can install from https://rustup.rs/) - - Currently, nightly build is needed +- Rust 1.88 or later + - cargo + - rustc #### Steps (commands list) ```bash -rustup component add rust-src cd path/to/repo-root/src -./builder.rs build +cargo xtask build # Debug Build +cargo xtask build -r # Release build ``` -To customize build options, please see `./builder.rs help`. +To customize build options, please see `cargo xtask help`. Next [How to run the hypervisor](#how-to-run-the-hypervisor) @@ -115,7 +117,7 @@ Next [How to run the hypervisor](#how-to-run-the-hypervisor) ```bash cd path/to/repo-root/src -./build_by_docker.sh # You can add arguments to pass the make command, like as `-f FEATURES=...` +./build_by_docker.sh # You can add arguments to pass the cargo xtask command, like as `-f ...` ``` For more detail, please see the scripts. @@ -125,13 +127,16 @@ For more detail, please see the scripts. ### On QEMU First, please install QEMU that supports emulating `QEMU ARM Virtual Machine`, `a64fx` CPU. -Then, run the following command to run the built hypervisor. +Then, you should build the hypervisor by the above method. +After that, run the following command to run the built hypervisor. ```bash cd path/to/repo-root/src -./builder.rs run --bios /usr/share/qemu-efi/QEMU_EFI.fd #Please set the path of your QEMU_EFI.fd +cargo xtask run --bios /usr/share/qemu-efi/QEMU_EFI.fd #Please set the path of your QEMU_EFI.fd ``` +For more options, please see `cargo xtask help`. + ### On a physical machine from a USB memory stick #### Requirement @@ -142,13 +147,18 @@ cd path/to/repo-root/src #### Steps -1. Attach your USB memory stick to the development machine which built the hypervisor binary. -2. Identify the EFI partition (in the following description, `/dev/sdX1` is the EFI partition). -3. Run `sudo ./builder.rs write -d /dev/sdX1` to copy the binary. +1. Build the hypervisor by the above method. +2. Attach your USB memory stick to the development machine which built the hypervisor binary. +3. Identify the EFI partition (in the following description, `/dev/sdX1` is the EFI partition). +4. Run `cargo xtask write -d /dev/sdX1` to copy the binary. !! Please be careful not to specify a wrong partition as `DEVICE` because the script mount/unmount the partition and copies the binary file with root privilege.!! -4. Detach the USB memory from the development machine, and attach it to the physical machine to run the hypervisor. -5. Boot the physical machine with UEFI, and specify `BOOTAA64.EFI` in the EFI partition as the EFI application to boot. +5. Detach the USB memory from the development machine, and attach it to the physical machine to run the hypervisor. +6. Boot the physical machine with UEFI, and specify `BOOTAA64.EFI` in the EFI partition as the EFI application to boot. + +`cargo xtask write` uses `sudo` command. +If you can access the target device without the root privilege, you can add the `-u` option. +For more options, please see `cargo xtask help`. ### PXE Boot @@ -171,7 +181,7 @@ The default settings assume that files are deploy on tftp server likes below. #### Steps -1. Build MilvusVisor with tfp feature like `./builder.rs build -f default,tftp` +1. Build MilvusVisor with tfp feature like `cargo xtask build -f default,tftp` 2. Deploy `BOOTAA64.EFI` and `hypervisor_kernel` on tftp server.(you can rename `BOOTAA64.EFI`) 3. Modify DHCP setting to change the boot file to `BOOTAA64.EFI`(if you renamed, adjust the name). diff --git a/docs/raspberry_pi4.md b/docs/raspberry_pi4.md index e4eb92a..454be43 100644 --- a/docs/raspberry_pi4.md +++ b/docs/raspberry_pi4.md @@ -9,7 +9,7 @@ To enable Raspberry Pi 4 support, you need to add `raspberrypi` feature flag on bootloader. ```shell -./builder.rs -r -f raspberrypi +cargo xtask build -r -f raspberrypi ``` ## How to boot diff --git a/src/.cargo/config.toml b/src/.cargo/config.toml new file mode 100644 index 0000000..35049cb --- /dev/null +++ b/src/.cargo/config.toml @@ -0,0 +1,2 @@ +[alias] +xtask = "run --package xtask --" diff --git a/src/Cargo.toml b/src/Cargo.toml index 9992648..1e961b2 100644 --- a/src/Cargo.toml +++ b/src/Cargo.toml @@ -1,6 +1,17 @@ +# Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) +# All rights reserved. +# +# This software is released under the MIT License. +# http://opensource.org/licenses/mit-license.php [workspace] -members = ["common", "hypervisor_bootloader", "hypervisor_kernel", "uefi"] -resolver = "2" +default-members = ["hypervisor_bootloader", "hypervisor_kernel"] +members = ["common", "hypervisor_bootloader", "hypervisor_kernel", "uefi", "xtask"] +resolver = "3" + +[workspace.package] +version = "1.5.0" +edition = "2024" +license = "MIT" [profile.release.package.hypervisor_bootloader] strip = "symbols" diff --git a/src/Makefile.legacy b/src/Makefile.legacy deleted file mode 100644 index 6e02a88..0000000 --- a/src/Makefile.legacy +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (c) 2022 RIKEN -# Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) -# All rights reserved. -# -# This software is released under the MIT License. -# http://opensource.org/licenses/mit-license.php - -## Build System with make -## This is deprecated, please use builder.rs - -MAKE = make -COMMON_MODULE = common -UEFI_MODULE = uefi -BOOTLOADER = hypervisor_bootloader -KERNEL = hypervisor_kernel -CP = cp -MKDIR = mkdir -p -QEMU ?= qemu-system-aarch64 -RM = rm -rf -CARGO = cargo -export PROJECT_HASH := $(shell git rev-parse HEAD 2> /dev/null) -export RUSTC_VERSION := $(shell rustc --version 2> /dev/null) -CARGO_BUILD_OPTION = --release -MOUNT = mount -UMOUNT = umount -CD = cd -QEMU_EFI ?= QEMU_EFI.fd - -all: bootloader kernel - $(MKDIR) bin/EFI/BOOT/ - $(CP) target/*/release/$(BOOTLOADER).efi bin/EFI/BOOT/BOOTAA64.EFI - $(CP) target/*/release/$(KERNEL) bin/EFI/BOOT/$(KERNEL) - -minimum_all: export CARGO_BUILD_OPTION :=--no-default-features $(CARGO_BUILD_OPTION) -minimum_all: all - -custom_all: export CARGO_BUILD_OPTION :=--no-default-features --features $(FEATURES) $(CARGO_BUILD_OPTION) -custom_all: all - -bootloader: .FORCE - $(CD) $(BOOTLOADER) && $(CARGO) build $(CARGO_BUILD_OPTION) - -kernel: .FORCE - $(CD) $(KERNEL) && $(CARGO) build $(CARGO_BUILD_OPTION) - -clean: - $(RM) bin - $(CARGO) clean - -fmt: - $(CARGO) fmt - -run: all - $(QEMU) -m 1G -cpu a64fx -machine virt,virtualization=on,iommu=smmuv3 -smp 4 -nographic -bios $(QEMU_EFI) -drive file=fat:rw:bin/,format=raw,media=disk - -debug: all - $(QEMU) -m 1G -cpu a64fx -machine virt,virtualization=on,iommu=smmuv3 -smp 4 -monitor stdio -bios $(QEMU_EFI) -drive file=fat:rw:bin/,format=raw,media=disk - -write: - $(MOUNT) $(DEVICE) /mnt - $(CP) bin/EFI/BOOT/BOOTAA64.EFI /mnt/EFI/BOOT/BOOTAA64.EFI - $(CP) bin/EFI/BOOT/$(KERNEL) /mnt/EFI/BOOT/$(KERNEL) - $(UMOUNT) /mnt - -.FORCE: diff --git a/src/build_by_docker.sh b/src/build_by_docker.sh index e4529d8..fb75146 100755 --- a/src/build_by_docker.sh +++ b/src/build_by_docker.sh @@ -1,4 +1,4 @@ - #!/bin/bash +#!/bin/bash # Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) # All rights reserved. @@ -6,6 +6,6 @@ # This software is released under the MIT License. # http://opensource.org/licenses/mit-license.php -BUILD_CMDLINE="rustup default nightly && rustup component add rust-src && cd /workspace && ./builder.rs build $@" +BUILD_CMDLINE="cd /workspace && cargo xtask build $@" docker run -it --rm -v ${PWD}:/workspace rust:latest /bin/bash -c "${BUILD_CMDLINE}" diff --git a/src/common/Cargo.toml b/src/common/Cargo.toml index dd0e871..a85c4eb 100644 --- a/src/common/Cargo.toml +++ b/src/common/Cargo.toml @@ -6,9 +6,9 @@ # http://opensource.org/licenses/mit-license.php [package] name = "common" -version = "1.4.1" -edition = "2021" -resolver = "2" +edition.workspace = true +license.workspace = true +version.workspace = true [features] advanced_memory_manager = [] diff --git a/src/common/src/acpi.rs b/src/common/src/acpi.rs index 786125f..34a9347 100644 --- a/src/common/src/acpi.rs +++ b/src/common/src/acpi.rs @@ -18,7 +18,7 @@ pub mod madt; const RSDP_SIGNATURE: [u8; 8] = *b"RSD PTR "; const XSDT_SIGNATURE: [u8; 4] = *b"XSDT"; -pub const XSDT_STRUCT_SIZE: usize = core::mem::size_of::(); +pub const XSDT_STRUCT_SIZE: usize = size_of::(); #[repr(C, packed)] pub struct RSDP { diff --git a/src/common/src/acpi/iort.rs b/src/common/src/acpi/iort.rs index b9bf3c0..a8bab73 100644 --- a/src/common/src/acpi/iort.rs +++ b/src/common/src/acpi/iort.rs @@ -106,7 +106,7 @@ impl Iterator for IdMappingIter { } else { let a = self.p; self.n -= 1; - self.p += core::mem::size_of::(); + self.p += size_of::(); Some(unsafe { read_unaligned(a as *const Self::Item) }) } } diff --git a/src/common/src/acpi/madt.rs b/src/common/src/acpi/madt.rs index 742a3d0..a85410d 100644 --- a/src/common/src/acpi/madt.rs +++ b/src/common/src/acpi/madt.rs @@ -69,7 +69,7 @@ pub struct GicInterruptTranslationServiceStructureList { impl MADT { pub const SIGNATURE: [u8; 4] = *b"APIC"; - const STRUCT_SIZE: usize = core::mem::size_of::(); + const STRUCT_SIZE: usize = size_of::(); pub fn get_gic_list(&self) -> GicCpuInterfaceStructureList { let length = self.length as usize - Self::STRUCT_SIZE; @@ -97,7 +97,7 @@ impl MADT { } base_address += record_length as usize; } - return None; + None } pub fn get_gic_its_list(&self) -> GicInterruptTranslationServiceStructureList { diff --git a/src/common/src/cpu.rs b/src/common/src/cpu.rs index 6b24eec..2d3dde0 100644 --- a/src/common/src/cpu.rs +++ b/src/common/src/cpu.rs @@ -9,13 +9,25 @@ //! CPU Specified Assembly functions //! -use crate::{bitmask, PAGE_MASK, PAGE_SHIFT}; +use crate::{PAGE_MASK, PAGE_SHIFT, bitmask}; use core::arch::asm; #[derive(Clone)] pub struct InterruptFlag(u64); +impl Default for InterruptFlag { + fn default() -> Self { + Self::new() + } +} + +impl InterruptFlag { + pub const fn new() -> Self { + Self(0) + } +} + pub const AA64_INSTRUCTION_SIZE: usize = 4; /* DAIF */ @@ -28,24 +40,37 @@ pub const CNTHCTL_EL2_EL1PCTEN: u64 = 1 << 0; /* CPACR_EL1 */ pub const CPACR_EL1_TTA_BIT_OFFSET: u64 = 28; -//pub const CPACR_EL1_TTA: u64 = 1 << CPACR_EL1_TTA_BIT_OFFSET; +// pub const CPACR_EL1_TTA: u64 = 1 << CPACR_EL1_TTA_BIT_OFFSET; +pub const CPACR_EL1_SMEN_BITS_OFFSET: u64 = 24; +// pub const CPACR_EL1_SMEN: u64 = 0b11 << CPACR_EL1_SMEN_BITS_OFFSET; +pub const CPACR_EL1_SMEN_TRAP_ALL: u64 = 0b00 << CPACR_EL1_SMEN_BITS_OFFSET; +pub const CPACR_EL1_SMEN_TRAP_NONE: u64 = 0b11 << CPACR_EL1_SMEN_BITS_OFFSET; pub const CPACR_EL1_FPEN_BITS_OFFSET: u64 = 20; -//pub const CPACR_EL1_FPEN: u64 = 0b11 << CPACR_EL1_FPEN_BITS_OFFSET; +// pub const CPACR_EL1_FPEN: u64 = 0b11 << CPACR_EL1_FPEN_BITS_OFFSET; +pub const CPACR_EL1_FPEN_TRAP_ALL: u64 = 0b00 << CPACR_EL1_FPEN_BITS_OFFSET; +pub const CPACR_EL1_FPEN_TRAP_NONE: u64 = 0b11 << CPACR_EL1_FPEN_BITS_OFFSET; pub const CPACR_EL1_ZEN_BITS_OFFSET: u64 = 16; -//pub const CPACR_EL1_ZEN: u64 = 0b11 << CPACR_EL1_ZEN_BITS_OFFSET; +// pub const CPACR_EL1_ZEN: u64 = 0b11 << CPACR_EL1_ZEN_BITS_OFFSET; +pub const CPACR_EL1_ZEN_TRAP_NONE: u64 = 0b11 << CPACR_EL1_ZEN_BITS_OFFSET; +pub const CPACR_EL1_ZEN_TRAP_ALL: u64 = 0b00 << CPACR_EL1_ZEN_BITS_OFFSET; /* CPTR_EL2 */ -pub const CPTR_EL2_TTA_BIT_OFFSET_WITH_E2H: u64 = 28; -pub const CPTR_EL2_TTA_WITH_E2H: u64 = 1 << CPTR_EL2_TTA_BIT_OFFSET_WITH_E2H; +pub const CPTR_EL2_TCPAC_BIT_OFFSET: u64 = 31; +pub const CPTR_EL2_TCPAC: u64 = 1 << CPTR_EL2_TCPAC_BIT_OFFSET; +pub const CPTR_EL2_TAM_BIT_OFFSET: u64 = 30; +pub const CPTR_EL2_TAM: u64 = 1 << CPTR_EL2_TAM_BIT_OFFSET; pub const CPTR_EL2_TTA_BIT_OFFSET_WITHOUT_E2H: u64 = 20; pub const CPTR_EL2_TTA_WITHOUT_E2H: u64 = 1 << CPTR_EL2_TTA_BIT_OFFSET_WITHOUT_E2H; -pub const CPTR_EL2_FPEN_BITS_OFFSET: u64 = 20; -pub const CPTR_EL2_FPEN: u64 = 0b11 << CPTR_EL2_FPEN_BITS_OFFSET; -pub const CPTR_EL2_FPEN_NO_TRAP: u64 = 0b11 << CPTR_EL2_FPEN_BITS_OFFSET; -pub const CPTR_EL2_ZEN_BITS_OFFSET: u64 = 16; -pub const CPTR_EL2_ZEN: u64 = 0b11 << CPTR_EL2_ZEN_BITS_OFFSET; -pub const CPTR_EL2_ZEN_NO_TRAP: u64 = 0b11 << CPTR_EL2_ZEN_BITS_OFFSET; -//pub const CPTR_EL2_RES1: u64 = 0b11111111 | (1 << 9) | (0b11 << 12); +pub const CPTR_EL2_TSM_BIT_OFFSET: u64 = 12; +pub const CPTR_EL2_TSM: u64 = 1 << CPTR_EL2_TSM_BIT_OFFSET; +pub const CPTR_EL2_TSM_TRAP: u64 = 1 << CPTR_EL2_TSM_BIT_OFFSET; +pub const CPTR_EL2_TFP_BIT_OFFSET: u64 = 10; +pub const CPTR_EL2_TFP: u64 = 1 << CPTR_EL2_TFP_BIT_OFFSET; +pub const CPTR_EL2_TFP_TRAP: u64 = 1 << CPTR_EL2_TFP_BIT_OFFSET; +pub const CPTR_EL2_TZ_BIT_OFFSET: u64 = 8; +pub const CPTR_EL2_TZ: u64 = 1 << CPTR_EL2_TZ_BIT_OFFSET; +pub const CPTR_EL2_TZ_TRAP: u64 = 1 << CPTR_EL2_TZ_BIT_OFFSET; +pub const CPTR_EL2_RES1: u64 = (1 << 13) | (1 << 9) | bitmask!(7, 0); /* TCR_EL2 */ pub const TCR_EL2_DS_BIT_OFFSET_WITHOUT_E2H: u64 = 32; @@ -122,11 +147,15 @@ pub const VTCR_EL2_T0SZ: u64 = 0b111111 << VTCR_EL2_T0SZ_BITS_OFFSET; /* SPSR_EL2 */ pub const SPSR_EL2_M: u64 = 0b1111; pub const SPSR_EL2_M_EL0T: u64 = 0b0000; +pub const SPSR_EL2_DEFAULT: u64 = (1 << 7) | (1 << 6) | (1 << 2) | (1) /* EL1h(EL1 + Use SP_EL1) */; /* ID_AA64PFR0_EL1 */ pub const ID_AA64PFR0_EL1_SVE: u64 = 0b1111 << 32; pub const ID_AA64PFR0_EL1_GIC: u64 = 0b1111 << 24; +/* ID_AA64PFR1_EL1 */ +pub const ID_AA64PFR1_EL1_SME: u64 = 0b1111 << 24; + /* ID_AA64MMFR0_EL1 */ pub const ID_AA64MMFR0_EL1_PARANGE: u64 = 0b1111; @@ -145,48 +174,35 @@ pub const CCSIDR_EL1_LINE_SIZE: u64 = 0b111 << CCSIDR_EL1_LINE_SIZE_BITS_OFFSET; /* ZCR_EL2 */ pub const MAX_ZCR_EL2_LEN: u64 = 0x1ff; +/* ICC_SRE_EL2 */ +pub const ICC_SRE_EL2_ENABLE_BIT_OFFSET: u64 = 3; +pub const ICC_SRE_EL2_ENABLE: u64 = 1 << ICC_SRE_EL2_ENABLE_BIT_OFFSET; +pub const ICC_SRE_EL2_SRE_BIT_OFFSET: u64 = 0; +pub const ICC_SRE_EL2_SRE: u64 = 1 << ICC_SRE_EL2_SRE_BIT_OFFSET; + /// Execute SMC #0 with SMC Calling Convention 1.2 -pub fn secure_monitor_call( - x0: &mut u64, - x1: &mut u64, - x2: &mut u64, - x3: &mut u64, - x4: &mut u64, - x5: &mut u64, - x6: &mut u64, - x7: &mut u64, - x8: &mut u64, - x9: &mut u64, - x10: &mut u64, - x11: &mut u64, - x12: &mut u64, - x13: &mut u64, - x14: &mut u64, - x15: &mut u64, - x16: &mut u64, - x17: &mut u64, -) { +pub fn secure_monitor_call(regs: &mut [u64; 32]) { unsafe { asm!( "smc 0", - inout("x0") * x0, - inout("x1") * x1, - inout("x2") * x2, - inout("x3") * x3, - inout("x4") * x4, - inout("x5") * x5, - inout("x6") * x6, - inout("x7") * x7, - inout("x8") * x8, - inout("x9") * x9, - inout("x10") * x10, - inout("x11") * x11, - inout("x12") * x12, - inout("x13") * x13, - inout("x14") * x14, - inout("x15") * x15, - inout("x16") * x16, - inout("x17") * x17, + inout("x0") regs[0], + inout("x1") regs[1], + inout("x2") regs[2], + inout("x3") regs[3], + inout("x4") regs[4], + inout("x5") regs[5], + inout("x6") regs[6], + inout("x7") regs[7], + inout("x8") regs[8], + inout("x9") regs[9], + inout("x10") regs[10], + inout("x11") regs[11], + inout("x12") regs[12], + inout("x13") regs[13], + inout("x14") regs[14], + inout("x15") regs[15], + inout("x16") regs[16], + inout("x17") regs[17], clobber_abi("C") ) }; @@ -545,6 +561,13 @@ pub fn get_id_aa64pfr0_el1() -> u64 { id_aa64pfr0_el1 } +#[inline(always)] +pub fn get_id_aa64pfr1_el1() -> u64 { + let id_aa64pfr1_el1: u64; + unsafe { asm!("mrs {:x}, id_aa64pfr1_el1", out(reg) id_aa64pfr1_el1) }; + id_aa64pfr1_el1 +} + #[inline(always)] pub fn get_mpidr_el1() -> u64 { let mpidr_el1: u64; @@ -564,6 +587,16 @@ pub fn advance_elr_el2() { set_elr_el2(get_elr_el2() + AA64_INSTRUCTION_SIZE as u64); } +#[inline(always)] +pub fn set_icc_sre_el2(icc_sre_el2: u64) { + unsafe { asm!("msr icc_sre_el2, {:x}", in(reg) icc_sre_el2) }; +} + +#[inline(always)] +pub fn set_ich_hcr_el2(ich_hcr_el2: u64) { + unsafe { asm!("msr ich_hcr_el2, {:x}", in(reg) ich_hcr_el2) }; +} + #[inline(always)] pub fn flush_tlb_el2() { unsafe { @@ -708,24 +741,24 @@ pub fn local_irq_fiq_restore(f: InterruptFlag) { /// * virtual_address - the virtual address to convert /// /// # Result -/// If succeeded, returns Ok(physical_address), otherwise(the address is not accessible) returns Err(()) +/// If succeeded, returns Ok(physical_address), otherwise(the address is not accessible) returns Err(par_el1) pub fn convert_virtual_address_to_physical_address_el2_read( virtual_address: usize, -) -> Result { +) -> Result { let aligned_virtual_address = virtual_address & PAGE_MASK; let offset = virtual_address & !PAGE_MASK; let aligned_physical_address: usize; unsafe { asm!(" at S1E2R, {:x} mrs {:x}, par_el1", - in(reg) (aligned_virtual_address), + in(reg) aligned_virtual_address, out(reg) aligned_physical_address) }; if (aligned_physical_address & 1) == 0 { Ok((aligned_physical_address & bitmask!(51, PAGE_SHIFT)) + offset) } else { - Err(()) + Err(aligned_physical_address as u64) } } @@ -737,24 +770,24 @@ pub fn convert_virtual_address_to_physical_address_el2_read( /// * virtual_address - the virtual address to convert /// /// # Result -/// If succeeded, returns Ok(physical_address), otherwise(the address is not accessible) returns Err(()) +/// If succeeded, returns Ok(physical_address), otherwise(the address is not accessible) returns Err(par_el1) pub fn convert_virtual_address_to_physical_address_el2_write( virtual_address: usize, -) -> Result { +) -> Result { let aligned_virtual_address = virtual_address & PAGE_MASK; let offset = virtual_address & !PAGE_MASK; let aligned_physical_address: usize; unsafe { asm!(" at S1E2W, {:x} mrs {:x}, par_el1", - in(reg) (aligned_virtual_address), + in(reg) aligned_virtual_address, out(reg) aligned_physical_address) }; if (aligned_physical_address & 1) == 0 { Ok((aligned_physical_address & bitmask!(51, PAGE_SHIFT)) + offset) } else { - Err(()) + Err(aligned_physical_address as u64) } } @@ -767,24 +800,24 @@ pub fn convert_virtual_address_to_physical_address_el2_write( /// /// # Result /// If succeeded, returns Ok(intermediate_physical_address), -/// otherwise(the address is not accessible) returns Err(()) +/// otherwise(the address is not accessible) returns Err(par_el1) pub fn convert_virtual_address_to_intermediate_physical_address_el0_read( virtual_address: usize, -) -> Result { +) -> Result { let aligned_virtual_address = virtual_address & PAGE_MASK; let offset = virtual_address & !PAGE_MASK; let aligned_physical_address: usize; unsafe { asm!(" at S1E0R, {:x} mrs {:x}, par_el1", - in(reg) (aligned_virtual_address), + in(reg) aligned_virtual_address, out(reg) aligned_physical_address) }; if (aligned_physical_address & 1) == 0 { Ok((aligned_physical_address & bitmask!(51, PAGE_SHIFT)) + offset) } else { - Err(()) + Err(aligned_physical_address as u64) } } @@ -797,24 +830,24 @@ pub fn convert_virtual_address_to_intermediate_physical_address_el0_read( /// /// # Result /// If succeeded, returns Ok(intermediate_physical_address), -/// otherwise(the address is not accessible) returns Err(()) +/// otherwise(the address is not accessible) returns `Err(par_el1)` pub fn convert_virtual_address_to_intermediate_physical_address_el1_read( virtual_address: usize, -) -> Result { +) -> Result { let aligned_virtual_address = virtual_address & PAGE_MASK; let offset = virtual_address & !PAGE_MASK; let aligned_physical_address: usize; unsafe { asm!(" at S1E1R, {:x} mrs {:x}, par_el1", - in(reg) (aligned_virtual_address), + in(reg) aligned_virtual_address, out(reg) aligned_physical_address) }; if (aligned_physical_address & 1) == 0 { Ok((aligned_physical_address & bitmask!(51, PAGE_SHIFT)) + offset) } else { - Err(()) + Err(aligned_physical_address as u64) } } @@ -827,24 +860,24 @@ pub fn convert_virtual_address_to_intermediate_physical_address_el1_read( /// /// # Result /// If succeeded, returns Ok(intermediate_physical_address), -/// otherwise(the address is not accessible) returns Err(()) +/// otherwise(the address is not accessible) returns `Err(par_el1)` pub fn convert_virtual_address_to_intermediate_physical_address_el1_write( virtual_address: usize, -) -> Result { +) -> Result { let aligned_virtual_address = virtual_address & PAGE_MASK; let offset = virtual_address & !PAGE_MASK; let aligned_physical_address: usize; unsafe { asm!(" at S1E1W, {:x} mrs {:x}, par_el1", - in(reg) (aligned_virtual_address), + in(reg) aligned_virtual_address, out(reg) aligned_physical_address) }; if (aligned_physical_address & 1) == 0 { Ok((aligned_physical_address & bitmask!(51, PAGE_SHIFT)) + offset) } else { - Err(()) + Err(aligned_physical_address as u64) } } diff --git a/src/common/src/lib.rs b/src/common/src/lib.rs index 1e997f7..38af53e 100644 --- a/src/common/src/lib.rs +++ b/src/common/src/lib.rs @@ -10,14 +10,12 @@ pub mod acpi; pub mod cpu; -#[cfg(feature = "advanced_memory_manager")] -pub mod memory_allocator; +mod memory_allocator; pub mod paging; pub mod serial_port; pub mod smmu; pub mod spin_flag; -#[cfg(not(feature = "advanced_memory_manager"))] -pub mod stack_memory_allocator; +mod stack_memory_allocator; #[cfg(feature = "advanced_memory_manager")] pub use memory_allocator::MemoryAllocator; @@ -31,19 +29,19 @@ use core::num::NonZeroUsize; use core::ptr::NonNull; /// The name of this hypervisor -pub const HYPERVISOR_NAME: &'static str = "MilvusVisor"; +pub const HYPERVISOR_NAME: &str = "MilvusVisor"; /// The hash value of VCS from the environment variable pub const HYPERVISOR_HASH_INFO: Option<&'static str> = option_env!("PROJECT_HASH"); /// The compiler information from the environment variables pub const COMPILER_INFO: Option<&'static str> = option_env!("RUSTC_VERSION"); /// The path of hypervisor_kernel -pub const HYPERVISOR_PATH: &'static str = "\\EFI\\BOOT\\hypervisor_kernel"; +pub const HYPERVISOR_PATH: &str = "\\EFI\\BOOT\\hypervisor_kernel"; /// The path of DTB written -pub const DTB_WRITTEN_PATH: &'static str = "\\EFI\\BOOT\\dtb"; +pub const DTB_WRITTEN_PATH: &str = "\\EFI\\BOOT\\dtb"; /// The path of hypervisor_kernel of tftp -pub const HYPERVISOR_TFTP_PATH: &'static str = "/uefi/hypervisor_kernel"; +pub const HYPERVISOR_TFTP_PATH: &str = "/uefi/hypervisor_kernel"; /// The path of payload uefi application -pub const UEFI_PAYLOAD_PATH: &'static str = "/uefi/grubaa64.efi"; +pub const UEFI_PAYLOAD_PATH: &str = "/uefi/grubaa64.efi"; /// The virtual address to map hypervisor_kernel (same as hypervisor_kernel/config/linkerscript.ld) pub const HYPERVISOR_VIRTUAL_BASE_ADDRESS: usize = 0x7FC0000000; /// The virtual address of serial port MMIO @@ -64,10 +62,12 @@ pub const STACK_PAGES: usize = 16; pub type HypervisorKernelMainType = fn(&mut SystemInformation); +pub type GeneralPurposeRegisters = [u64; 32]; + #[macro_export] macro_rules! bitmask { ($high:expr,$low:expr) => { - ((1 << (($high - $low) + 1)) - 1) << $low + (((1 << (($high - $low) + 1)) - 1) << $low) }; } diff --git a/src/common/src/memory_allocator.rs b/src/common/src/memory_allocator.rs index 81fe620..5a3a8e0 100644 --- a/src/common/src/memory_allocator.rs +++ b/src/common/src/memory_allocator.rs @@ -42,7 +42,7 @@ impl MemoryAllocator { const NUM_OF_POOL_ENTRIES: usize = 64; const NUM_OF_FREE_LIST: usize = 12; - /// Setup myself with with allocated address + /// Setup myself with allocated address /// /// All members of Self are uninitialized. /// Please be careful when you assign some value into the member which has a drop trait. @@ -58,7 +58,7 @@ impl MemoryAllocator { /// * `allocated_address` - the base address allocated /// * `allocated_size` - the allocated size pub fn init(&mut self, allocated_address: usize, allocated_size: usize) { - use core::mem::{forget, replace}; + use core::mem::replace; /* Initialize members */ self.free_memory_size = 0; @@ -66,33 +66,7 @@ impl MemoryAllocator { let _ = replace(&mut self.free_list, [None; Self::NUM_OF_FREE_LIST]); for e in &mut self.memory_entry_pool { - forget(replace( - e, - MemoryEntry { - previous: None, - next: None, - list_prev: None, - list_next: None, - start: 0, - end: 0, - enabled: false, - }, - )); - } - - self.free(allocated_address, allocated_size) - .expect("Failed to init memory"); - } - - /* - /// Setup MemoryAllocator with allocated address, and return Self - pub fn create(allocated_address: usize, allocated_size: usize) -> Self { - use core::mem::MaybeUninit; - - let mut pool: [MaybeUninit; Self::NUM_OF_POOL_ENTRIES] = - MaybeUninit::uninit_array(); - for e in &mut pool { - e.write(MemoryEntry { + *e = MemoryEntry { previous: None, next: None, list_prev: None, @@ -100,21 +74,16 @@ impl MemoryAllocator { start: 0, end: 0, enabled: false, - }); + }; } - let mut s = Self { - free_memory_size: 0, - first_entry: core::ptr::null_mut(), - free_list: [None; Self::NUM_OF_FREE_LIST], - memory_entry_pool: unsafe { MaybeUninit::array_assume_init(pool) }, - }; - s.free(allocated_address, allocated_size) + self.free(allocated_address, allocated_size) .expect("Failed to init memory"); + } - return s; + pub fn get_all_memory(&mut self) -> (usize /*base_address*/, usize /* number of pages*/) { + unreachable!() } - */ fn create_memory_entry(&mut self) -> Result<&'static mut MemoryEntry, MemoryAllocationError> { for e in &mut self.memory_entry_pool { @@ -452,9 +421,7 @@ impl MemoryAllocator { self.free_list[new_order] = Some(entry as *mut _); } else { loop { - if let Some(next_entry) = - list_entry.list_next.and_then(|n| Some(unsafe { &mut *n })) - { + if let Some(next_entry) = list_entry.list_next.map(|n| unsafe { &mut *n }) { if next_entry.get_size() >= entry.get_size() { list_entry.list_next = Some(entry as *mut _); entry.list_prev = Some(list_entry as *mut _); @@ -619,7 +586,7 @@ impl MemoryEntry { } pub fn is_first_entry(&self) -> bool { - self.previous == None + self.previous.is_none() } pub fn unchain_from_freelist(&mut self) { @@ -646,7 +613,7 @@ impl Iterator for FreeListIterMut { type Item = &'static mut MemoryEntry; fn next(&mut self) -> Option { if let Some(address) = self.entry { - let entry = unsafe { &mut *(address as *mut MemoryEntry) }; + let entry = unsafe { &mut *(address) }; self.entry = entry.list_next; /* ATTENTION: get **free_list's** next */ Some(entry) } else { diff --git a/src/common/src/paging.rs b/src/common/src/paging.rs index 4b9f82a..2455a26 100644 --- a/src/common/src/paging.rs +++ b/src/common/src/paging.rs @@ -10,11 +10,11 @@ //! use crate::cpu::{ - get_mair_el2, TCR_EL2_DS_BIT_OFFSET_WITHOUT_E2H, TCR_EL2_DS_WITHOUT_E2H, + TCR_EL2_DS_BIT_OFFSET_WITHOUT_E2H, TCR_EL2_DS_WITHOUT_E2H, TCR_EL2_T0SZ_BITS_OFFSET_WITHOUT_E2H, TCR_EL2_T0SZ_WITHOUT_E2H, - TCR_EL2_TG0_BITS_OFFSET_WITHOUT_E2H, TCR_EL2_TG0_WITHOUT_E2H, + TCR_EL2_TG0_BITS_OFFSET_WITHOUT_E2H, TCR_EL2_TG0_WITHOUT_E2H, get_mair_el2, }; -use crate::{bitmask, PAGE_MASK, PAGE_SIZE, STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE}; +use crate::{PAGE_MASK, PAGE_SIZE, STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE, bitmask}; pub const PAGE_TABLE_SIZE: usize = 0x1000; @@ -34,8 +34,8 @@ pub const MEMORY_PERMISSION_WRITABLE_BIT: u8 = 1; pub const MEMORY_PERMISSION_EXECUTABLE_BIT: u8 = 2; const STAGE_2_PAGE_ENTRY_ATTRIBUTE: u64 = - 1 << 10 /* AF bit */| - 0b11 << 8 /* SH bits (Inner sharable) */| + 1 << 10 /* AF bit */ | + 0b11 << 8 /* SH bits (Inner sharable) */ | 0b1111 << 2 /* MemAttr(Write-back) */; #[derive(Copy, Clone, Eq, PartialEq)] @@ -180,14 +180,12 @@ pub const fn calculate_number_of_concatenated_page_tables( } } -pub const fn page_align_up(size: usize) -> usize { - //assert_ne!(size, 0); - assert!(size != 0); +pub fn page_align_up(size: usize) -> usize { + assert_ne!(size, 0); ((size - 1) & PAGE_MASK) + PAGE_SIZE } -pub const fn stage2_page_align_up(size: usize) -> usize { - //assert_ne!(size, 0); - assert!(size != 0); +pub fn stage2_page_align_up(size: usize) -> usize { + assert_ne!(size, 0); ((size - 1) & STAGE_2_PAGE_MASK) + STAGE_2_PAGE_SIZE } diff --git a/src/common/src/smmu.rs b/src/common/src/smmu.rs index c44c36f..303d2b0 100644 --- a/src/common/src/smmu.rs +++ b/src/common/src/smmu.rs @@ -88,7 +88,7 @@ pub const SMMU_CR0_VMW: u32 = 0b111 << 6; pub const SMMU_CR1_TABLE_SH_BITS_OFFSET: u32 = 10; pub const SMMU_CR1_QUEUE_SH: u32 = 0b11 << 4; pub const SMMU_CR1_QUEUE_OC: u32 = 0b11 << 2; -pub const SMMU_CR1_QUEUE_IC: u32 = 0b11 << 0; +pub const SMMU_CR1_QUEUE_IC: u32 = 0b11; pub const SMMU_CR2_E2H: u32 = 1; @@ -110,8 +110,8 @@ pub const SMMU_GBPA_SHCFG_INCOMING: u32 = 0b01 << 12; pub const SMMU_VATOS_SID_SUBSTREAM_ID: u64 = bitmask!(51, 32); pub type SteArrayBaseType = u64; -const STE_ARRAY_BASE_TYPE_BITS: SteArrayBaseType = - (core::mem::size_of::() * 8) as SteArrayBaseType; + +const STE_ARRAY_BASE_TYPE_BITS: SteArrayBaseType = SteArrayBaseType::BITS as SteArrayBaseType; pub const STE_V_INDEX: usize = 0; pub const STE_V: SteArrayBaseType = 1 << 0; @@ -205,9 +205,15 @@ const STE_S2TTB_INDEX: usize = (196 / STE_ARRAY_BASE_TYPE_BITS) as usize; const STE_S2TTB: SteArrayBaseType = (bitmask!(51, 4) >> 4) << STE_S2TTB_OFFSET; // MEMO: Set S2HWU** to 0 because the page table is shared with CPUs. -#[derive(Clone)] +#[derive(Clone, Copy)] pub struct StreamTableEntry([SteArrayBaseType; 8]); +impl Default for StreamTableEntry { + fn default() -> Self { + Self::new() + } +} + impl StreamTableEntry { pub const fn new() -> Self { Self([0; 8]) @@ -362,7 +368,7 @@ impl StreamTableEntry { } pub fn set_stage2_translation_table(&mut self, table_address: usize) { - assert_eq!(table_address & !(bitmask!(51, 4)), 0); + assert_eq!(table_address & !bitmask!(51, 4), 0); self.0[STE_S2TTB_INDEX] = (self.0[STE_S2TTB_INDEX] & (!STE_S2TTB)) | (table_address as SteArrayBaseType); self.set_s2aa64(true); @@ -376,11 +382,11 @@ impl StreamTableEntry { is_traffic_can_pass: bool, is_stage1_bypassed: bool, ) { + use crate::STAGE_2_PAGE_SIZE; use crate::cpu::{ VTCR_EL2_PS, VTCR_EL2_PS_BITS_OFFSET, VTCR_EL2_SL0, VTCR_EL2_SL0_BITS_OFFSET, VTCR_EL2_T0SZ, VTCR_EL2_T0SZ_BITS_OFFSET, }; - use crate::STAGE_2_PAGE_SIZE; self.set_s2hwu(0b0000); self.set_s2fwb(0); @@ -408,15 +414,11 @@ impl StreamTableEntry { /// This function will return false when the data is STE::config pub fn is_offset_configuration_about_stage2(offset: usize, data: SteArrayBaseType) -> bool { - assert_eq!(core::mem::size_of::(), 8); + assert_eq!(size_of::(), 8); match offset { 1 => { let mask: SteArrayBaseType = (0b1111 << (72 - 64)) | (1 << (89 - 64)); - if (data & mask) != 0 { - true - } else { - false - } + (data & mask) != 0 } 2 | 3 => true, _ => false, @@ -428,11 +430,10 @@ pub const fn get_level1_table_size(log2_size: u32, split: u32) -> usize { } pub const fn get_level2_table_size(span: u64, _split: u32) -> usize { - (1usize << (span - 1)) * core::mem::size_of::() + (1usize << (span - 1)) * size_of::() } pub fn create_bitmask_of_stage2_configurations(ste_offset: usize) -> u64 { - use core::mem::size_of; let start_offset = ste_offset / size_of::(); let end_offset = (ste_offset + size_of::()) / size_of::(); let mut mask: u64 = 0; diff --git a/src/common/src/spin_flag.rs b/src/common/src/spin_flag.rs index 1131199..d417331 100644 --- a/src/common/src/spin_flag.rs +++ b/src/common/src/spin_flag.rs @@ -10,19 +10,24 @@ use crate::cpu::{clean_and_invalidate_data_cache, isb}; pub struct SpinLockFlag(AtomicBool); +impl Default for SpinLockFlag { + fn default() -> Self { + Self::new() + } +} + impl SpinLockFlag { pub const fn new() -> Self { Self(AtomicBool::new(false)) } #[inline(always)] - pub fn try_lock_weak(&self) -> Result<(), ()> { + pub fn try_lock_weak(&self) -> Result<(), bool> { clean_and_invalidate_data_cache(self.0.as_ptr() as usize); isb(); self.0 .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed) - .and_then(|old| if old == false { Ok(()) } else { Err(false) }) - .or(Err(())) + .and_then(|old| if !old { Ok(()) } else { Err(false) }) } #[inline(always)] diff --git a/src/common/src/stack_memory_allocator.rs b/src/common/src/stack_memory_allocator.rs index 16f11d8..4c1e241 100644 --- a/src/common/src/stack_memory_allocator.rs +++ b/src/common/src/stack_memory_allocator.rs @@ -8,7 +8,7 @@ //! Stack Style Memory Allocator //! -use crate::{paging::page_align_up, MemoryAllocationError, PAGE_SHIFT}; +use crate::{MemoryAllocationError, PAGE_SHIFT, paging::page_align_up}; /// Stack Style Memory Allocator /// @@ -21,7 +21,7 @@ pub struct MemoryAllocator { } impl MemoryAllocator { - /// Setup myself with with allocated address + /// Setup myself with allocated address /// /// All members of Self are uninitialized. /// Please be careful when you assign some value into the member which has a drop trait. diff --git a/src/hypervisor_bootloader/.cargo/config.toml b/src/hypervisor_bootloader/.cargo/config.toml index a62b650..55e9562 100644 --- a/src/hypervisor_bootloader/.cargo/config.toml +++ b/src/hypervisor_bootloader/.cargo/config.toml @@ -1,7 +1,3 @@ [build] target = "aarch64-unknown-uefi" target-dir = "../target/" - -[unstable] -build-std-features = ["compiler-builtins-mem"] -build-std = ["core", "compiler_builtins"] diff --git a/src/hypervisor_bootloader/Cargo.toml b/src/hypervisor_bootloader/Cargo.toml index afb7d08..817c922 100644 --- a/src/hypervisor_bootloader/Cargo.toml +++ b/src/hypervisor_bootloader/Cargo.toml @@ -6,28 +6,21 @@ # http://opensource.org/licenses/mit-license.php [package] name = "hypervisor_bootloader" -version = "1.4.1" -edition = "2021" -resolver = "2" +edition.workspace = true +license.workspace = true +version.workspace = true [features] -default = ["smmu", "i210", "mt27800", "fast_restore", "acpi_table_protection", "contiguous_bit", "advanced_memory_manager"] +default = ["smmu", "fast_restore", "contiguous_bit"] minimum = [] smmu = [] -i210 = [] -mt27800 = [] fast_restore = [] -acpi_table_protection = [] contiguous_bit = [] -mrs_msr_emulation = [] -a64fx = ["mrs_msr_emulation"] -advanced_memory_manager = [] # Bootloader uses stack style allocator tftp = [] +a64fx = [] edit_dtb_memory = [] save_dtb = [] raspberrypi = ["edit_dtb_memory", "save_dtb"] -virtio = [] -virtio_net = [] embed_kernel = [] [dependencies] diff --git a/src/hypervisor_bootloader/rust-toolchain.toml b/src/hypervisor_bootloader/rust-toolchain.toml deleted file mode 100644 index db77f1b..0000000 --- a/src/hypervisor_bootloader/rust-toolchain.toml +++ /dev/null @@ -1,4 +0,0 @@ -[toolchain] -channel = "nightly" -components = ["rust-src"] -targets = ["aarch64-unknown-none"] diff --git a/src/hypervisor_bootloader/src/console.rs b/src/hypervisor_bootloader/src/console.rs index 3c99101..14bc206 100644 --- a/src/hypervisor_bootloader/src/console.rs +++ b/src/hypervisor_bootloader/src/console.rs @@ -9,34 +9,27 @@ //! Console with UEFI Output Protocol //! -use uefi::{output::EfiOutputProtocol, EfiStatus}; +use uefi::{EfiStatus, output::EfiOutputProtocol}; use core::fmt; -use core::mem::MaybeUninit; -pub struct Console { - uefi_output_console: MaybeUninit<&'static EfiOutputProtocol>, - //write_lock: SpinLockFlag, // Currently, Bootloader runs only BSP. Therefore the lock is not necessary. +pub struct Console<'a> { + efi_output_protocol: &'a EfiOutputProtocol, } -pub static mut DEFAULT_CONSOLE: Console = Console::new(); +static mut DEFAULT_CONSOLE: Option = None; -impl Console { - pub const fn new() -> Self { +impl<'a> Console<'a> { + pub const fn new(efi_output_protocol: &'a EfiOutputProtocol) -> Self { Self { - uefi_output_console: MaybeUninit::uninit(), + efi_output_protocol, } } - - pub fn init(&mut self, efi_output_protocol: *const EfiOutputProtocol) { - self.uefi_output_console = MaybeUninit::new(unsafe { &*efi_output_protocol }); - } } -impl fmt::Write for Console { +impl fmt::Write for Console<'_> { fn write_str(&mut self, string: &str) -> fmt::Result { - let result = unsafe { self.uefi_output_console.assume_init().output(string) }; - if result == EfiStatus::EfiSuccess { + if self.efi_output_protocol.output(string) == EfiStatus::EfiSuccess { Ok(()) } else { Err(fmt::Error) @@ -44,11 +37,16 @@ impl fmt::Write for Console { } } +pub fn init_default_console(efi_output_protocol: &'static EfiOutputProtocol) { + unsafe { (&raw mut DEFAULT_CONSOLE).write(Some(Console::new(efi_output_protocol))) }; +} + pub fn print(args: fmt::Arguments) { use fmt::Write; - let result = unsafe { DEFAULT_CONSOLE.write_fmt(args) }; - if result.is_err() { - panic!("write_fmt was failed."); + if let Some(Some(console)) = unsafe { (&raw mut DEFAULT_CONSOLE).as_mut() } { + if console.write_fmt(args).is_err() { + panic!("write_fmt was failed."); + } } } diff --git a/src/hypervisor_bootloader/src/dtb.rs b/src/hypervisor_bootloader/src/dtb.rs index 8150d63..3d83568 100644 --- a/src/hypervisor_bootloader/src/dtb.rs +++ b/src/hypervisor_bootloader/src/dtb.rs @@ -70,14 +70,13 @@ impl DtbNode { while unsafe { *(*pointer as *const u32) } == FDT_NOP { *pointer += TOKEN_SIZE; } - return; } fn skip_padding(pointer: &mut usize) -> Result<(), ()> { while (*pointer & (TOKEN_SIZE - 1)) != 0 { *pointer += 1; } - return Ok(()); + Ok(()) } #[allow(dead_code)] @@ -124,7 +123,7 @@ impl DtbNode { FDT_PROP => { *pointer += TOKEN_SIZE; let property_len = u32::from_be(unsafe { *(*pointer as *const u32) }); - *pointer += core::mem::size_of::() * 2; + *pointer += size_of::() * 2; *pointer += property_len as usize; Self::skip_padding(pointer)?; } @@ -144,7 +143,7 @@ impl DtbNode { Self::skip_nop(pointer); } *pointer += TOKEN_SIZE; - return Ok(()); + Ok(()) } fn add_offset(&mut self, mut regs: usize, regs_len: u32) { @@ -155,7 +154,7 @@ impl DtbNode { address_cells |= u32::from_be(unsafe { *(regs as *const u32) }) as usize; regs += TOKEN_SIZE; } - self.address_offset += address_cells as usize; + self.address_offset += address_cells; } } @@ -186,9 +185,9 @@ impl DtbNode { FDT_PROP => { pointer += TOKEN_SIZE; let property_len = u32::from_be(unsafe { *(pointer as *const u32) }); - pointer += core::mem::size_of::(); - let name_segment_offset = u32::from_be(unsafe { *((pointer) as *const u32) }); - pointer += core::mem::size_of::(); + pointer += size_of::(); + let name_segment_offset = u32::from_be(unsafe { *(pointer as *const u32) }); + pointer += size_of::(); let prop_name = dtb.get_name(name_segment_offset)?; if Self::match_string(prop_name, PROP_ADDRESS_CELLS) { @@ -217,7 +216,7 @@ impl DtbNode { } Self::skip_nop(&mut pointer); } - return Ok(None); + Ok(None) } fn _search_device_by_node_name( @@ -263,9 +262,9 @@ impl DtbNode { FDT_PROP => { *pointer += TOKEN_SIZE; let property_len = u32::from_be(unsafe { *(*pointer as *const u32) }); - *pointer += core::mem::size_of::(); + *pointer += size_of::(); let name_segment_offset = u32::from_be(unsafe { *((*pointer) as *const u32) }); - *pointer += core::mem::size_of::(); + *pointer += size_of::(); let prop_name = dtb.get_name(name_segment_offset)?; if Self::match_string(prop_name, PROP_ADDRESS_CELLS) { @@ -314,7 +313,7 @@ impl DtbNode { return Ok(Some(self.clone())); } *pointer += TOKEN_SIZE; - return Ok(None); + Ok(None) } fn _search_device_by_compatible( @@ -360,9 +359,9 @@ impl DtbNode { FDT_PROP => { *pointer += TOKEN_SIZE; let property_len = u32::from_be(unsafe { *(*pointer as *const u32) }); - *pointer += core::mem::size_of::(); + *pointer += size_of::(); let name_segment_offset = u32::from_be(unsafe { *((*pointer) as *const u32) }); - *pointer += core::mem::size_of::(); + *pointer += size_of::(); let prop_name = dtb.get_name(name_segment_offset)?; if Self::match_string(prop_name, PROP_COMPATIBLE) { @@ -425,7 +424,7 @@ impl DtbNode { return Ok(Some((self.clone(), index))); } *pointer += TOKEN_SIZE; - return Ok(None); + Ok(None) } pub fn get_search_holder(&self) -> Result { @@ -474,10 +473,7 @@ impl DtbNode { let mut s = self.clone(); if let Some((p, len)) = s.search_pointer_to_property(prop_name, dtb)? { Ok(Some(unsafe { - core::slice::from_raw_parts( - p as *const u32, - len as usize / core::mem::size_of::(), - ) + core::slice::from_raw_parts(p as *const u32, len as usize / size_of::()) })) } else { Ok(None) @@ -498,18 +494,18 @@ impl DtbNodeNameSearchHolder { if let Some(t) = &result { self.pointer = t.base_pointer; DtbNode::skip_to_end_of_node(&mut self.pointer)?; - } else { - if unsafe { *(self.pointer as *const u32) } != FDT_END { - if self.pointer >= dtb.get_struct_block_limit() { - println!("Broken DTB"); - return Err(()); - } - self.node = dtb.get_root_node(); - self.node.base_pointer = self.pointer; - return self.search_next_device_by_node_name(node_name, dtb); + Ok(result) + } else if unsafe { *(self.pointer as *const u32) } != FDT_END { + if self.pointer >= dtb.get_struct_block_limit() { + println!("Broken DTB"); + return Err(()); } + self.node = dtb.get_root_node(); + self.node.base_pointer = self.pointer; + self.search_next_device_by_node_name(node_name, dtb) + } else { + Ok(result) } - return Ok(result); } pub fn search_next_device_by_compatible( @@ -523,18 +519,18 @@ impl DtbNodeNameSearchHolder { if let Some((t, _)) = &result { self.pointer = t.base_pointer; DtbNode::skip_to_end_of_node(&mut self.pointer)?; - } else { - if unsafe { *(self.pointer as *const u32) } != FDT_END { - if self.pointer >= dtb.get_struct_block_limit() { - println!("Broken DTB"); - return Err(()); - } - self.node = dtb.get_root_node(); - self.node.base_pointer = self.pointer; - return self.search_next_device_by_compatible(compatible_devices, dtb); + Ok(result) + } else if unsafe { *(self.pointer as *const u32) } != FDT_END { + if self.pointer >= dtb.get_struct_block_limit() { + println!("Broken DTB"); + return Err(()); } + self.node = dtb.get_root_node(); + self.node.base_pointer = self.pointer; + self.search_next_device_by_compatible(compatible_devices, dtb) + } else { + Ok(result) } - return Ok(result); } } @@ -587,7 +583,7 @@ pub fn add_new_memory_reservation_entry_to_dtb( let original_dtb_header = unsafe { &*(original_base_address as *const DtbHeader) }; let new_dtb_header = unsafe { &mut *(new_base_address as *mut DtbHeader) }; - total_new_size += core::mem::size_of::(); + total_new_size += size_of::(); if new_size < total_new_size { return Err(()); } @@ -606,18 +602,18 @@ pub fn add_new_memory_reservation_entry_to_dtb( let mut pointer = original_reservation_block_address; loop { let address = unsafe { *(pointer as *const u64) }; - pointer += core::mem::size_of::(); + pointer += size_of::(); let size = unsafe { *(pointer as *const u64) }; - pointer += core::mem::size_of::(); + pointer += size_of::(); if address == 0 && size == 0 { break; } } // original reservation block size without terminal entry let reservation_block_section_size = - pointer - original_reservation_block_address - core::mem::size_of::() * 2; + pointer - original_reservation_block_address - size_of::() * 2; // new total size will be the size of original reservation block + new entry + terminal entry - total_new_size += reservation_block_section_size + core::mem::size_of::() * 4; + total_new_size += reservation_block_section_size + size_of::() * 4; if new_size < total_new_size { return Err(()); } @@ -633,16 +629,14 @@ pub fn add_new_memory_reservation_entry_to_dtb( // write new entries let new_reservation_entry_address_filed_address = new_reservation_block_address + reservation_block_section_size; - let new_reservation_entry_size_field_address = new_reservation_block_address - + reservation_block_section_size - + core::mem::size_of::(); + let new_reservation_entry_size_field_address = + new_reservation_block_address + reservation_block_section_size + size_of::(); *(new_reservation_entry_address_filed_address as *mut usize) = reserved_address.to_be(); *(new_reservation_entry_size_field_address as *mut usize) = reserved_size.to_be(); - let new_termianal_entry_address = new_reservation_block_address - + reservation_block_section_size - + core::mem::size_of::() * 2; + let new_termianal_entry_address = + new_reservation_block_address + reservation_block_section_size + size_of::() * 2; *(new_termianal_entry_address as *mut usize) = 0; - *((new_termianal_entry_address + core::mem::size_of::()) as *mut usize) = 0; + *((new_termianal_entry_address + size_of::()) as *mut usize) = 0; } // copy struct section diff --git a/src/hypervisor_bootloader/src/elf.rs b/src/hypervisor_bootloader/src/elf.rs index 76b1ef1..dce99c4 100644 --- a/src/hypervisor_bootloader/src/elf.rs +++ b/src/hypervisor_bootloader/src/elf.rs @@ -83,7 +83,7 @@ impl Elf64Header { println!("Unsupported ELF version: {}", self.e_version); return false; } - return true; + true } pub fn get_entry_point(&self) -> usize { diff --git a/src/hypervisor_bootloader/src/main.rs b/src/hypervisor_bootloader/src/main.rs index 7999c58..f16aa1a 100644 --- a/src/hypervisor_bootloader/src/main.rs +++ b/src/hypervisor_bootloader/src/main.rs @@ -15,11 +15,11 @@ use core::ptr::NonNull; use common::{cpu::*, *}; use uefi::{ - boot_service, boot_service::EfiBootServices, EfiConfigurationTable, EfiHandle, EfiSystemTable, - EFI_ACPI_20_TABLE_GUID, EFI_DTB_TABLE_GUID, + EFI_ACPI_20_TABLE_GUID, EFI_DTB_TABLE_GUID, EfiConfigurationTable, EfiHandle, EfiSystemTable, + boot_service, boot_service::EfiBootServices, }; #[cfg(feature = "tftp")] -use uefi::{pxe, EfiStatus}; +use uefi::{EfiStatus, pxe}; #[macro_use] mod console; @@ -34,7 +34,7 @@ mod smmu; static mut ORIGINAL_PAGE_TABLE: usize = 0; static mut ORIGINAL_VECTOR_BASE: u64 = 0; static mut ORIGINAL_TCR_EL2: u64 = 0; -static mut INTERRUPT_FLAG: MaybeUninit = MaybeUninit::uninit(); +static mut INTERRUPT_FLAG: InterruptFlag = InterruptFlag::new(); static mut IMAGE_HANDLE: EfiHandle = 0; static mut SYSTEM_TABLE: *const EfiSystemTable = core::ptr::null(); @@ -44,15 +44,15 @@ static mut MEMORY_ALLOCATOR: MaybeUninit = MaybeUninit::uninit( #[cfg(feature = "tftp")] static mut PXE_PROTOCOL: *const pxe::EfiPxeBaseCodeProtocol = core::ptr::null(); -#[no_mangle] +#[unsafe(no_mangle)] extern "C" fn efi_main(image_handle: EfiHandle, system_table: *mut EfiSystemTable) -> ! { let system_table = unsafe { &*system_table }; let b_s = unsafe { &*system_table.efi_boot_services }; unsafe { IMAGE_HANDLE = image_handle; SYSTEM_TABLE = system_table; - console::DEFAULT_CONSOLE.init((*system_table).console_output_protocol); } + console::init_default_console(unsafe { &*system_table.console_output_protocol }); if let Some(hash_info) = HYPERVISOR_HASH_INFO { println!( @@ -119,7 +119,7 @@ extern "C" fn efi_main(image_handle: EfiHandle, system_table: *mut EfiSystemTabl #[cfg(feature = "smmu")] let smmu_v3_base_address = if let Some(acpi_address) = unsafe { ACPI_20_TABLE_ADDRESS } { - smmu::detect_smmu(acpi_address.get()).and_then(|a| NonZeroUsize::new(a)) + smmu::detect_smmu(acpi_address.get()).and_then(NonZeroUsize::new) } else { None }; @@ -161,23 +161,37 @@ extern "C" fn efi_main(image_handle: EfiHandle, system_table: *mut EfiSystemTabl ); } + let exit_boot_service_address = if system_table.efi_boot_services as usize != 0 { + if let Some(b_s) = unsafe { (&raw const *system_table.efi_boot_services).as_ref() } { + NonZeroUsize::new(b_s.exit_boot_services as usize) + } else { + None + } + } else { + None + }; + println!("Call the hypervisor(Entry Point: {:#X})", entry_point); let mut system_info = SystemInformation { acpi_rsdp_address: unsafe { ACPI_20_TABLE_ADDRESS }, vbar_el2: 0, - available_memory_info: unsafe { MEMORY_ALLOCATOR.assume_init_mut().get_all_memory() }, + available_memory_info: unsafe { + (&raw mut MEMORY_ALLOCATOR) + .as_mut() + .unwrap() + .assume_init_mut() + } + .get_all_memory(), spin_table_info, memory_save_list, serial_port: serial, ecam_info, smmu_v3_base_address, - exit_boot_service_address: NonZeroUsize::new(unsafe { - (*(*SYSTEM_TABLE).efi_boot_services).exit_boot_services - } as usize), + exit_boot_service_address, }; unsafe { - (core::mem::transmute::(entry_point))(&mut system_info) + core::mem::transmute::(entry_point)(&mut system_info) }; /* Do not call allocate_memory/free_memory from here */ @@ -186,7 +200,7 @@ extern "C" fn efi_main(image_handle: EfiHandle, system_table: *mut EfiSystemTabl /* Disable IRQ/FIQ */ /* After disabling IRQ/FIQ, we should avoid calling UEFI functions */ - unsafe { INTERRUPT_FLAG.write(local_irq_fiq_save()) }; + unsafe { INTERRUPT_FLAG = local_irq_fiq_save() }; /* Setup registers */ unsafe { ORIGINAL_VECTOR_BASE = get_vbar_el2() }; @@ -198,7 +212,7 @@ extern "C" fn efi_main(image_handle: EfiHandle, system_table: *mut EfiSystemTabl el2_to_el1(el1_main as *const fn() as usize, stack_address); /* Never come here */ - local_irq_fiq_restore(unsafe { INTERRUPT_FLAG.assume_init_ref().clone() }); + local_irq_fiq_restore(unsafe { (&raw const INTERRUPT_FLAG).read() }); panic!("Failed to jump EL1"); } @@ -210,8 +224,7 @@ extern "C" fn efi_main(image_handle: EfiHandle, system_table: *mut EfiSystemTabl fn detect_acpi_and_dtb(system_table: &EfiSystemTable) { for i in 0..system_table.num_table_entries { let table = unsafe { - &*((system_table.configuration_table - + i * core::mem::size_of::()) + &*((system_table.configuration_table + i * size_of::()) as *const EfiConfigurationTable) }; pr_debug!("GUID: {:#X?}", table.vendor_guid); @@ -249,7 +262,9 @@ fn init_memory_pool(b_s: &EfiBootServices) -> usize { allocated_address + ALLOC_SIZE ); unsafe { - MEMORY_ALLOCATOR + (&raw mut MEMORY_ALLOCATOR) + .as_mut() + .unwrap() .assume_init_mut() .init(allocated_address, ALLOC_SIZE) }; @@ -297,7 +312,9 @@ fn map_memory_pool(allocated_memory_address: usize, alloc_size: usize) { /// If the allocation is succeeded, Ok(start_address), otherwise Err(()) pub fn allocate_memory(pages: usize, align: Option) -> Result { unsafe { - MEMORY_ALLOCATOR + (&raw mut MEMORY_ALLOCATOR) + .as_mut() + .unwrap() .assume_init_mut() .allocate(pages << PAGE_SHIFT, align.unwrap_or(PAGE_SHIFT)) } @@ -313,7 +330,9 @@ pub fn allocate_memory(pages: usize, align: Option) -> Result Result<(), MemoryAllocationError> { unsafe { - MEMORY_ALLOCATOR + (&raw mut MEMORY_ALLOCATOR) + .as_mut() + .unwrap() .assume_init_mut() .free(address, pages << PAGE_SHIFT) } @@ -344,7 +363,7 @@ fn detect_spin_table( }; let base_address = ((u32::from_be(release_addr[0]) as usize) << u32::BITS) | (u32::from_be(release_addr[1]) as usize); - let mut length = core::mem::size_of::(); + let mut length = size_of::(); while let Ok(Some(node)) = search_holder.search_next_device_by_node_name(b"cpu", &dtb_analyzer) { let Ok(Some(release_addr)) = node.get_prop_as_u32(b"cpu-release-addr", &dtb_analyzer) @@ -353,16 +372,16 @@ fn detect_spin_table( }; let release_address = ((u32::from_be(release_addr[0]) as usize) << u32::BITS) | (u32::from_be(release_addr[1]) as usize); - length = release_address + core::mem::size_of::() - base_address; + length = release_address + size_of::() - base_address; pr_debug!("CPU Release Address: {:#X}", release_address); } Some((base_address, NonZeroUsize::new(length).unwrap())) } -/// Load hypervisor_kernel to [`common::HYPERVISOR_VIRTUAL_BASE_ADDRESS`] via TFTP +/// Load hypervisor_kernel to [`HYPERVISOR_VIRTUAL_BASE_ADDRESS`] via TFTP /// /// This function loads hypervisor_kernel according to ELF header. -/// The hypervisor_kernel will be loaded from [`common::HYPERVISOR_PATH`] +/// The hypervisor_kernel will be loaded from [`HYPERVISOR_PATH`] /// /// Before loads the hypervisor, this will save original TTBR0_EL2 into [`ORIGINAL_PAGE_TABLE`] and /// create new TTBR0_EL2 by copying original page table tree. @@ -447,7 +466,7 @@ fn load_hypervisor(image_handle: EfiHandle, b_s: &EfiBootServices) -> usize { if let Err(e) = b_s.free_pool(kernel_pool) { println!("Failed to free the pool: {:?}", e); } - return entry_point; + entry_point } /// Load hypervisor_kernel to [`common::HYPERVISOR_VIRTUAL_BASE_ADDRESS`] from the embedded binary @@ -547,7 +566,7 @@ fn load_hypervisor(image_handle: EfiHandle, b_s: &EfiBootServices) -> usize { entry_point } -/// Load hypervisor_kernel to [`common::HYPERVISOR_VIRTUAL_BASE_ADDRESS`] +/// Load hypervisor_kernel to [`HYPERVISOR_VIRTUAL_BASE_ADDRESS`] /// /// This function loads hypervisor_kernel according to ELF header. /// The data will be loaded by `read_data` @@ -573,7 +592,7 @@ where { /* Read ElfHeader */ let mut elf_header: MaybeUninit = MaybeUninit::uninit(); - const ELF64_HEADER_SIZE: usize = core::mem::size_of::(); + const ELF64_HEADER_SIZE: usize = size_of::(); let read_size = read_data(elf_header.as_mut_ptr() as *mut u8, 0, ELF64_HEADER_SIZE) .expect("Failed to read Elf header"); assert_eq!( @@ -686,7 +705,7 @@ where /// Save DTB /// -/// This function saves DTB into [`common::DTB_WRITTEN_PATH`] +/// This function saves DTB into [`DTB_WRITTEN_PATH`] /// This process needed by passing the edited DTB to U-Boot. /// /// # Panics @@ -733,7 +752,7 @@ fn create_memory_save_list(b_s: &EfiBootServices) -> &'static mut [MemorySaveLis allocate_memory(MEMORY_SAVE_LIST_PAGES, None) .expect("Failed to allocate memory for memory saving list") as *mut MemorySaveListEntry, - MEMORY_SAVE_LIST_SIZE / core::mem::size_of::(), + MEMORY_SAVE_LIST_SIZE / size_of::(), ) }; @@ -799,7 +818,7 @@ fn dump_memory_map(b_s: &EfiBootServices) { return; } }; - let default_descriptor_size = core::mem::size_of::(); + let default_descriptor_size = size_of::(); if default_descriptor_size != memory_map_info.actual_descriptor_size { println!( @@ -827,6 +846,8 @@ fn dump_memory_map(b_s: &EfiBootServices) { fn set_up_el1() { let is_e2h_enabled = (get_hcr_el2() & HCR_EL2_E2H) != 0; + let id_aa64pfr0_el1 = get_id_aa64pfr0_el1(); + let id_aa64pfr1_el1 = get_id_aa64pfr1_el1(); /* CNTHCTL_EL2 & CNTVOFF_EL2 */ set_cnthctl_el2(CNTHCTL_EL2_EL1PCEN | CNTHCTL_EL2_EL1PCTEN); @@ -850,66 +871,59 @@ fn set_up_el1() { /* Ignore it currently... */ /* CPACR_EL1 & CPTR_EL2 */ - #[cfg(feature = "raspberrypi")] - set_cptr_el2(0x0); - let cptr_el2_current = get_cptr_el2(); + let cptr_el2 = get_cptr_el2(); let mut cpacr_el1: u64 = 0; - cpacr_el1 |= ((((cptr_el2_current) & CPTR_EL2_ZEN) >> CPTR_EL2_ZEN_BITS_OFFSET) - << CPACR_EL1_ZEN_BITS_OFFSET) - | ((((cptr_el2_current) & CPTR_EL2_FPEN) >> CPTR_EL2_FPEN_BITS_OFFSET) - << CPACR_EL1_FPEN_BITS_OFFSET); - cpacr_el1 |= 0b11 << CPACR_EL1_FPEN_BITS_OFFSET; /* TODO: inspect why we must set 0b11 */ - if is_e2h_enabled { - cpacr_el1 |= ((cptr_el2_current & CPTR_EL2_TTA_WITH_E2H) - >> CPTR_EL2_TTA_BIT_OFFSET_WITH_E2H) - << CPACR_EL1_TTA_BIT_OFFSET; + cpacr_el1 = cptr_el2; + cpacr_el1 &= !(CPTR_EL2_TCPAC | CPTR_EL2_TAM); } else { - cpacr_el1 |= ((cptr_el2_current & CPTR_EL2_TTA_WITHOUT_E2H) - >> CPTR_EL2_TTA_BIT_OFFSET_WITHOUT_E2H) + cpacr_el1 |= ((cptr_el2 & CPTR_EL2_TTA_WITHOUT_E2H) >> CPTR_EL2_TTA_BIT_OFFSET_WITHOUT_E2H) << CPACR_EL1_TTA_BIT_OFFSET; + if (cptr_el2 & CPTR_EL2_TSM) == CPTR_EL2_TSM_TRAP { + cpacr_el1 |= CPACR_EL1_SMEN_TRAP_ALL; + } else { + cpacr_el1 |= CPACR_EL1_SMEN_TRAP_NONE; + } + if (cptr_el2 & CPTR_EL2_TFP) == CPTR_EL2_TFP_TRAP { + cpacr_el1 |= CPACR_EL1_FPEN_TRAP_ALL; + } else { + cpacr_el1 |= CPACR_EL1_FPEN_TRAP_NONE; + } + if (cptr_el2 & CPTR_EL2_TZ) == CPTR_EL2_TZ_TRAP { + cpacr_el1 |= CPACR_EL1_ZEN_TRAP_ALL; + } else { + cpacr_el1 |= CPACR_EL1_ZEN_TRAP_NONE; + } } - let mut cptr_el2: u64 = cptr_el2_current | CPTR_EL2_ZEN_NO_TRAP | CPTR_EL2_FPEN_NO_TRAP /*| CPTR_EL2_RES1*/; - cptr_el2 &= !((1 << 28) | (1 << 30) | (1 << 31)); - set_cpacr_el1(cpacr_el1); - isb(); - /* CPTR_EL2 will be set after HCR_EL2 */ - - let id_aa64pfr0_el1 = get_id_aa64pfr0_el1(); + let mut cptr_el2 = CPTR_EL2_RES1; + if (id_aa64pfr1_el1 & ID_AA64PFR1_EL1_SME) != 0 { + cptr_el2 &= !CPTR_EL2_TSM; + } if (id_aa64pfr0_el1 & ID_AA64PFR0_EL1_SVE) != 0 { - /* ZCR_EL2 */ - unsafe { - asm!(" mov {t}, 0x1ff - msr S3_4_C1_C2_0, {t}", t = out(reg) _) - }; + cptr_el2 &= !CPTR_EL2_TZ; + + let zcr_el2 = MAX_ZCR_EL2_LEN; + unsafe { asm!("msr S3_4_C1_C2_0, {:x}", in(reg)zcr_el2) }; } if (id_aa64pfr0_el1 & ID_AA64PFR0_EL1_GIC) != 0 { - /* GICv3~ */ - /*unsafe { - asm!(" mrs {t}, icc_sre_el2 - orr {t}, {t}, 1 << 0 - orr {t}, {t}, 1 << 3 - msr icc_sre_el2, {t} - isb - mrs {t}, icc_sre_el2 - tbz {t}, 0, 1f - msr ich_hcr_el2, xzr - 1:", t = out(reg) _) - };*/ + let icc_sre_el2 = ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE; + set_icc_sre_el2(icc_sre_el2); + isb(); + set_ich_hcr_el2(0); } /* MAIR_EL1(Copy MAIR_EL2) */ - set_mair_el1(get_mair_el2()); + let mair_el1 = get_mair_el2(); /* TTBR0_EL1 */ - set_ttbr0_el1(unsafe { ORIGINAL_PAGE_TABLE } as u64); + let ttbr0_el1 = unsafe { ORIGINAL_PAGE_TABLE } as u64; /* TCR_EL1 */ - if is_e2h_enabled { - set_tcr_el1(unsafe { ORIGINAL_TCR_EL2 }); + let tcr_el1 = if is_e2h_enabled { + unsafe { ORIGINAL_TCR_EL2 } } else { let mut tcr_el1: u64 = 0; let tcr_el2 = unsafe { ORIGINAL_TCR_EL2 }; @@ -936,15 +950,24 @@ fn set_up_el1() { << TCR_EL1_IPS_BITS_OFFSET; tcr_el1 |= TCR_EL1_EPD1; /* Disable TTBR1_EL1 */ - set_tcr_el1(tcr_el1); - } + tcr_el1 + }; /* SCTLR_EL1(Copy SCTLR_EL2) */ - set_sctlr_el1(get_sctlr_el2()); + let sctlr_el1 = get_sctlr_el2(); /* VBAR_EL1 */ - set_vbar_el1(unsafe { ORIGINAL_VECTOR_BASE }); + let vbar_el1 = unsafe { ORIGINAL_VECTOR_BASE }; + /* A64FX specific registers */ + #[cfg(feature = "a64fx")] + let mut imp_fj_tag_address_ctrl_el1: u32; + #[cfg(feature = "a64fx")] + let imp_sccr_ctrl_el1: u64; + #[cfg(feature = "a64fx")] + let imp_pf_ctrl_el1: u64; + #[cfg(feature = "a64fx")] + let imp_barrier_ctrl_el1: u64; #[cfg(feature = "a64fx")] { const IMP_FJ_TAG_ADDRESS_CTRL_EL2_TBO0_BIT_OFFSET: u32 = 0; @@ -967,18 +990,18 @@ fn set_up_el1() { const IMP_BARRIER_CTRL_EL1_EL0AE: u64 = 1 << 62; let mut imp_fj_tag_address_ctrl_el2: u32; - let mut imp_fj_tag_address_ctrl_el1: u32 = 0; /* Is it ok including IMP_SCCR_CTRL_EL1_EL0AE? */ - let imp_sccr_ctrl_el1: u64 = IMP_SCCR_CTRL_EL1_EL1AE | IMP_SCCR_CTRL_EL1_EL0AE; + imp_sccr_ctrl_el1 = IMP_SCCR_CTRL_EL1_EL1AE | IMP_SCCR_CTRL_EL1_EL0AE; /* Is it ok including IMP_PF_CTRL_EL1_EL0AE? */ - let imp_pf_ctrl_el1: u64 = IMP_PF_CTRL_EL1_EL1AE | IMP_PF_CTRL_EL1_EL0AE; + imp_pf_ctrl_el1 = IMP_PF_CTRL_EL1_EL1AE | IMP_PF_CTRL_EL1_EL0AE; /* Is it ok including IMP_BARRIER_CTRL_EL1_EL0AE? */ - let imp_barrier_ctrl_el1: u64 = IMP_BARRIER_CTRL_EL1_EL1AE | IMP_BARRIER_CTRL_EL1_EL0AE; + imp_barrier_ctrl_el1 = IMP_BARRIER_CTRL_EL1_EL1AE | IMP_BARRIER_CTRL_EL1_EL0AE; unsafe { asm!("mrs {:x}, S3_4_C11_C2_0", out(reg) imp_fj_tag_address_ctrl_el2) }; if is_e2h_enabled { imp_fj_tag_address_ctrl_el1 = imp_fj_tag_address_ctrl_el2; } else { + imp_fj_tag_address_ctrl_el1 = 0; imp_fj_tag_address_ctrl_el1 |= ((imp_fj_tag_address_ctrl_el2 & IMP_FJ_TAG_ADDRESS_CTRL_EL2_TBO0) >> IMP_FJ_TAG_ADDRESS_CTRL_EL2_TBO0_BIT_OFFSET) @@ -994,17 +1017,29 @@ fn set_up_el1() { } imp_fj_tag_address_ctrl_el2 = 0; unsafe { asm!("msr S3_4_C11_C2_0, {:x}", in(reg) imp_fj_tag_address_ctrl_el2) }; - unsafe { asm!("msr S3_0_C11_C2_0, {:x}", in(reg) imp_fj_tag_address_ctrl_el1) }; - unsafe { asm!("msr S3_0_C11_C8_0, {:x}", in(reg) imp_sccr_ctrl_el1) }; - unsafe { asm!("msr S3_0_C11_C4_0, {:x}", in(reg) imp_pf_ctrl_el1) }; - unsafe { asm!("msr S3_0_C11_C12_0, {:x}", in(reg) imp_barrier_ctrl_el1) }; } /* HCR_EL2 */ let hcr_el2 = HCR_EL2_FIEN | HCR_EL2_API | HCR_EL2_APK | HCR_EL2_RW | HCR_EL2_TSC | HCR_EL2_VM; set_hcr_el2(hcr_el2); isb(); + + /* Now, HCR_EL2.E2H == 0 */ set_cptr_el2(cptr_el2); + set_cpacr_el1(cpacr_el1); + set_sctlr_el1(sctlr_el1); + set_vbar_el1(vbar_el1); + set_mair_el1(mair_el1); + set_tcr_el1(tcr_el1); + set_ttbr0_el1(ttbr0_el1); + + #[cfg(feature = "a64fx")] + { + unsafe { asm!("msr S3_0_C11_C2_0, {:x}", in(reg) imp_fj_tag_address_ctrl_el1) }; + unsafe { asm!("msr S3_0_C11_C8_0, {:x}", in(reg) imp_sccr_ctrl_el1) }; + unsafe { asm!("msr S3_0_C11_C4_0, {:x}", in(reg) imp_pf_ctrl_el1) }; + unsafe { asm!("msr S3_0_C11_C12_0, {:x}", in(reg) imp_barrier_ctrl_el1) }; + } } #[cfg(feature = "tftp")] @@ -1112,7 +1147,7 @@ fn exit_bootloader() -> ! { } extern "C" fn el1_main() -> ! { - local_irq_fiq_restore(unsafe { INTERRUPT_FLAG.assume_init_ref().clone() }); + local_irq_fiq_restore(unsafe { (&raw const INTERRUPT_FLAG).read() }); assert_eq!(get_current_el() >> 2, 1, "Failed to jump to EL1"); println!("Hello,world! from EL1"); @@ -1120,20 +1155,21 @@ extern "C" fn el1_main() -> ! { exit_bootloader(); } -fn el2_to_el1(el1_entry_point: usize, el1_stack_pointer: usize) { +fn el2_to_el1(el1_entry_point: usize, el2_stack_pointer: usize) { unsafe { asm!(" msr elr_el2, {entry_point} mov {tmp}, sp msr sp_el1, {tmp} mov sp, {stack_pointer} - mov {tmp}, (1 << 7) |(1 << 6) | (1 << 2) | (1) // EL1h(EL1 + Use SP_EL1) + mov {tmp}, {SPSR_EL2} msr spsr_el2, {tmp} isb eret", tmp = in(reg) 0u64, entry_point = in(reg) el1_entry_point, - stack_pointer = in(reg) el1_stack_pointer, + stack_pointer = in(reg) el2_stack_pointer, + SPSR_EL2 = const SPSR_EL2_DEFAULT, options(noreturn) ) } diff --git a/src/hypervisor_bootloader/src/paging.rs b/src/hypervisor_bootloader/src/paging.rs index 41152ee..3696ef4 100644 --- a/src/hypervisor_bootloader/src/paging.rs +++ b/src/hypervisor_bootloader/src/paging.rs @@ -20,13 +20,9 @@ use common::{ fn _clone_page_table(table_address: usize, current_level: i8) -> usize { let cloned_table_address = allocate_memory(1, None).expect("Failed to allocate page table"); - let cloned_table = unsafe { - &mut *(cloned_table_address as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) - }; - unsafe { - *cloned_table = - *(table_address as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) - }; + let cloned_table = + unsafe { &mut *(cloned_table_address as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) }; + unsafe { *cloned_table = *(table_address as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) }; if current_level == 3 { return cloned_table_address; } @@ -37,7 +33,7 @@ fn _clone_page_table(table_address: usize, current_level: i8) -> usize { | (_clone_page_table(next_level_table_address, current_level + 1) as u64); } } - return cloned_table_address; + cloned_table_address } /// Clone TTBR0_EL2 @@ -53,7 +49,7 @@ pub fn clone_page_table() -> usize { let page_table_address = TTBR::new(get_ttbr0_el2()).get_base_address(); let tcr_el2 = get_tcr_el2(); let first_table_level = get_initial_page_table_level_and_bits_to_shift(tcr_el2).0; - return _clone_page_table(page_table_address, first_table_level); + _clone_page_table(page_table_address, first_table_level) } /// Map physical address recursively @@ -71,6 +67,7 @@ pub fn clone_page_table() -> usize { /// * `permission` - The attribute for memory, Bit0: is_readable, Bit1: is_writable, Bit2: is_executable /// * `memory_attribute` - The index of MAIR_EL2 to apply the mapping area /// * `t0sz` - The value of TCR_EL2::T0SZ +#[allow(clippy::too_many_arguments)] fn map_address_recursive( physical_address: &mut usize, virtual_address: &mut usize, @@ -85,9 +82,8 @@ fn map_address_recursive( let mut table_index = (*virtual_address >> shift_level) & 0x1FF; if table_level == 3 { - let current_table = unsafe { - &mut *(table_address as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) - }; + let current_table = + unsafe { &mut *(table_address as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) }; let num_of_pages = if *num_of_remaining_pages + table_index > 512 { 512 - table_index } else { @@ -95,17 +91,20 @@ fn map_address_recursive( }; let attributes = create_attributes_for_stage_1(permission, memory_attribute, false); - for index in table_index..(table_index + num_of_pages) { - current_table[index] = *physical_address as u64 | attributes; + for e in current_table + .iter_mut() + .skip(table_index) + .take(num_of_pages) + { + *e = *physical_address as u64 | attributes; *physical_address += PAGE_SIZE; *virtual_address += PAGE_SIZE; } *num_of_remaining_pages -= num_of_pages; return Ok(()); } - let current_table = unsafe { - &mut *(table_address as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) - }; + let current_table = + unsafe { &mut *(table_address as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) }; while *num_of_remaining_pages != 0 { pr_debug!( @@ -164,7 +163,7 @@ fn map_address_recursive( *target_descriptor ^ (block_physical_address as u64); let next_level_page = unsafe { &mut *(allocated_table_address - as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) + as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) }; if table_level + 1 == 3 { @@ -181,7 +180,7 @@ fn map_address_recursive( /* set_mem */ for e in unsafe { &mut *(allocated_table_address - as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) + as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) } { *e = 0; } @@ -208,7 +207,7 @@ fn map_address_recursive( } table_index += 1; } - return Ok(()); + Ok(()) } /// Map address @@ -280,9 +279,9 @@ pub fn map_address( &mut num_of_needed_pages, TTBR::new(get_ttbr0_el2()).get_base_address(), table_level, - (readable as u8) << MEMORY_PERMISSION_READABLE_BIT - | (writable as u8) << MEMORY_PERMISSION_WRITABLE_BIT - | (executable as u8) << MEMORY_PERMISSION_EXECUTABLE_BIT, + ((readable as u8) << MEMORY_PERMISSION_READABLE_BIT) + | ((writable as u8) << MEMORY_PERMISSION_WRITABLE_BIT) + | ((executable as u8) << MEMORY_PERMISSION_EXECUTABLE_BIT), get_suitable_memory_attribute_index_from_mair_el2(is_device), tcr_el2_t0sz as u8, )?; @@ -300,9 +299,10 @@ pub fn map_address( aligned_size, aligned_size >> PAGE_SHIFT ); - return Ok(()); + Ok(()) } +#[allow(clippy::too_many_arguments)] fn map_address_recursive_stage2( physical_address: &mut usize, virtual_address: &mut usize, @@ -320,8 +320,7 @@ fn map_address_recursive_stage2( (*virtual_address >> shift_level) & (0x200 * (concatenated_tables as usize) - 1); if table_level == 3 { - let table_len = - (PAGE_TABLE_SIZE * (concatenated_tables as usize)) / core::mem::size_of::(); + let table_len = (PAGE_TABLE_SIZE * (concatenated_tables as usize)) / size_of::(); let current_table = unsafe { core::slice::from_raw_parts_mut(table_address as *mut u64, table_len) }; @@ -337,20 +336,25 @@ fn map_address_recursive_stage2( } let attributes = create_attributes_for_stage_2(permission, is_dummy_page, is_unmap, false); let end_index = table_index + num_of_pages; - for index in table_index..end_index { + for (index, e) in current_table + .iter_mut() + .enumerate() + .take(end_index) + .skip(table_index) + { + *e = *physical_address as u64 | attributes; + + #[cfg(feature = "contiguous_bit")] if STAGE_2_PAGE_SIZE == 0x1000 && (index & 0xF) == 0 && !is_dummy_page && (end_index - index) >= 16 && (*physical_address & ((16 * STAGE_2_PAGE_SIZE) - 1)) == 0 - && cfg!(feature = "contiguous_bit") { pr_debug!("Enable CONTIGUOUS_BIT({:#X} ~ {:#X})", index, end_index); - current_table[index] = - *physical_address as u64 | attributes | PAGE_DESCRIPTORS_CONTIGUOUS; - } else { - current_table[index] = *physical_address as u64 | attributes; + *e |= PAGE_DESCRIPTORS_CONTIGUOUS; } + if !is_dummy_page { *physical_address += STAGE_2_PAGE_SIZE; } @@ -362,7 +366,7 @@ fn map_address_recursive_stage2( let current_table = unsafe { core::slice::from_raw_parts_mut( table_address as *mut u64, - (PAGE_TABLE_SIZE * concatenated_tables as usize) / core::mem::size_of::(), + (PAGE_TABLE_SIZE * concatenated_tables as usize) / size_of::(), ) }; @@ -424,7 +428,7 @@ fn map_address_recursive_stage2( *target_descriptor ^ (block_physical_address as u64); let next_level_page = unsafe { &mut *(allocated_table_address - as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) + as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) }; if table_level + 1 == 3 { @@ -441,7 +445,7 @@ fn map_address_recursive_stage2( /* set_mem */ for e in unsafe { &mut *(allocated_table_address - as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) + as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) } { *e = 0; } @@ -472,7 +476,7 @@ fn map_address_recursive_stage2( } table_index += 1; } - return Ok(()); + Ok(()) } /// Map address ~ (address + size) to dummy page @@ -482,7 +486,7 @@ fn map_address_recursive_stage2( /// # Arguments /// * `address` - The address to hide from EL1/EL0 /// * `size` - The size to hide -/// * `dummy_page` - [`common::PAGE_SIZE`] memory area to convert the access from EL1/EL0 +/// * `dummy_page` - [`PAGE_SIZE`] memory area to convert the access from EL1/EL0 /// /// # Result /// If mapping is succeeded, returns Ok(()), otherwise returns Err(()) @@ -525,7 +529,7 @@ pub fn map_dummy_page_into_vttbr_el2( assert_eq!(num_of_needed_pages, 0); assert_eq!(original_dummy_page, dummy_page); - return Ok(()); + Ok(()) } fn setup_stage_2_translation_recursive( @@ -538,7 +542,7 @@ fn setup_stage_2_translation_recursive( let page_table = unsafe { core::slice::from_raw_parts_mut( table_address as *mut u64, - (PAGE_TABLE_SIZE * number_of_tables) / core::mem::size_of::(), + (PAGE_TABLE_SIZE * number_of_tables) / size_of::(), ) }; let shift_level = table_level_to_table_shift(STAGE_2_PAGE_SHIFT, table_level); @@ -569,7 +573,7 @@ fn setup_stage_2_translation_recursive( *e = (next_table_address as u64) | 0b11; } } - return Ok(()); + Ok(()) } pub fn setup_stage_2_translation() -> Result<(), ()> { @@ -622,13 +626,13 @@ pub fn setup_stage_2_translation() -> Result<(), ()> { /* Setup VTCR_EL2 */ /* D13.2.148 VTCR_EL2, Virtualization Translation Control Register */ - let vtcr_el2: u64 = ((sl2 as u64)<< VTCR_EL2_SL2_BIT_OFFSET) | + let vtcr_el2: u64 = ((sl2 as u64) << VTCR_EL2_SL2_BIT_OFFSET) | VTCR_EL2_RES1 | - (0b1111 << VTCR_EL2_HWU_BITS_OFFSET) | - ((ps as u64) << VTCR_EL2_PS_BITS_OFFSET) | + (0b1111 << VTCR_EL2_HWU_BITS_OFFSET) | + (ps << VTCR_EL2_PS_BITS_OFFSET) | (0 << VTCR_EL2_TG0_BITS_OFFSET) /* 4KiB */ | - (0b11 < Result<(), ()> { set_vtcr_el2(vtcr_el2); set_vttbr_el2(table_address as u64); - return Ok(()); + Ok(()) } pub fn dump_page_table_recursive( @@ -680,7 +684,7 @@ pub fn dump_page_table_recursive( ); } *virtual_base_address += granule; - processing_descriptor_address += core::mem::size_of::(); + processing_descriptor_address += size_of::(); } } else { for _ in 0..512 { @@ -721,7 +725,7 @@ pub fn dump_page_table_recursive( granule >> 9, ); } - processing_descriptor_address += core::mem::size_of::(); + processing_descriptor_address += size_of::(); } } } @@ -834,7 +838,7 @@ fn allocate_page_table_for_stage_1( is_for_ttbr: bool, ) -> Result { let alignment = if is_for_ttbr { - ((64 - ((PAGE_SHIFT - 3) * (4 - look_up_level) as usize) - t0sz as usize).max(4)).min(12) + (64 - ((PAGE_SHIFT - 3) * (4 - look_up_level) as usize) - t0sz as usize).clamp(4, 12) } else { PAGE_SHIFT }; @@ -857,8 +861,7 @@ fn allocate_page_table_for_stage_2( ) -> Result { assert_ne!(number_of_tables, 0); let alignment = if is_for_ttbr { - ((64 - ((PAGE_SHIFT - 3) as usize * (4 - look_up_level) as usize) - t0sz as usize).max(4)) - .min(12) + (64 - ((PAGE_SHIFT - 3) * (4 - look_up_level) as usize) - t0sz as usize).clamp(4, 12) + (number_of_tables as usize - 1) } else { assert_eq!(number_of_tables, 1); diff --git a/src/hypervisor_bootloader/src/pci.rs b/src/hypervisor_bootloader/src/pci.rs index 7572b31..064c650 100644 --- a/src/hypervisor_bootloader/src/pci.rs +++ b/src/hypervisor_bootloader/src/pci.rs @@ -11,6 +11,7 @@ use common::acpi::get_acpi_table; use common::{EcamInfo, PAGE_MASK}; pub fn detect_pci_space(rsdp: usize) -> Option { + use core::ptr::read_unaligned; let mcfg = get_acpi_table(rsdp, b"MCFG"); if let Err(e) = mcfg { println!("Failed to get MCFG table: {:?}", e); @@ -18,9 +19,9 @@ pub fn detect_pci_space(rsdp: usize) -> Option { } let mcfg = mcfg.unwrap(); /* Currently, supporting only one ECAM Address */ - let ecam_address = unsafe { *((mcfg + 44) as *const u64) } as usize; - let start_bus = unsafe { *((mcfg + 54) as *const u8) }; - let end_bus = unsafe { *((mcfg + 55) as *const u8) }; + let ecam_address = unsafe { read_unaligned((mcfg + 44) as *const u64) } as usize; + let start_bus = unsafe { read_unaligned((mcfg + 54) as *const u8) }; + let end_bus = unsafe { read_unaligned((mcfg + 55) as *const u8) }; println!( "ECAM: BaseAddress: {:#X}, Bus: {:#X} ~ {:#X}", ecam_address, start_bus, end_bus @@ -36,9 +37,9 @@ pub fn detect_pci_space(rsdp: usize) -> Option { true, ) .expect("Failed to map ECAM Space"); - return Some(EcamInfo { + Some(EcamInfo { address: ecam_address, start_bus, end_bus, - }); + }) } diff --git a/src/hypervisor_bootloader/src/serial_port.rs b/src/hypervisor_bootloader/src/serial_port.rs index d7dad0f..9405c51 100644 --- a/src/hypervisor_bootloader/src/serial_port.rs +++ b/src/hypervisor_bootloader/src/serial_port.rs @@ -5,7 +5,7 @@ // This software is released under the MIT License. // http://opensource.org/licenses/mit-license.php -use common::acpi::{get_acpi_table, GeneralAddressStructure}; +use common::acpi::{GeneralAddressStructure, get_acpi_table}; use common::serial_port::{SerialPortInfo, SerialPortType}; use core::num::NonZeroUsize; @@ -34,16 +34,16 @@ fn try_to_get_serial_info_from_acpi(rsdp_address: usize) -> Option Option { let dtb_analyser = crate::dtb::DtbAnalyser::new(dtb_address); - if let Err(_) = dtb_analyser { + if dtb_analyser.is_err() { println!("Invalid DTB"); return None; } @@ -83,7 +83,7 @@ fn try_to_get_serial_info_from_dtb(dtb_address: usize) -> Option } } - return None; + None } #[cfg(feature = "raspberrypi")] diff --git a/src/hypervisor_bootloader/src/smmu.rs b/src/hypervisor_bootloader/src/smmu.rs index 400e927..79b141d 100644 --- a/src/hypervisor_bootloader/src/smmu.rs +++ b/src/hypervisor_bootloader/src/smmu.rs @@ -15,7 +15,7 @@ use crate::allocate_memory; use crate::paging::map_address; use common::cpu::{get_vtcr_el2, get_vttbr_el2}; -use common::{acpi, paging::page_align_up, smmu::*, PAGE_SHIFT}; +use common::{PAGE_SHIFT, acpi, paging::page_align_up, smmu::*}; use core::ptr::{read_volatile, write_volatile}; @@ -36,7 +36,7 @@ use core::ptr::{read_volatile, write_volatile}; /// * acpi_address: RSDP of ACPI 2.0 or later /// /// # Result -/// If the initialization is succeed, return Some(smmuv3_base_address), otherwise none +/// If the initialization is succeeded, return Some(smmuv3_base_address), otherwise none pub fn detect_smmu(acpi_address: usize) -> Option { let iort = match acpi::get_acpi_table(acpi_address, &acpi::iort::IORT::SIGNATURE) { Ok(address) => unsafe { &*(address as *const acpi::iort::IORT) }, @@ -109,15 +109,14 @@ pub fn detect_smmu(acpi_address: usize) -> Option { const STREAM_TABLE_SPLIT: u32 = 6; let level2_table_address = allocate_memory( - page_align_up((1 << STREAM_TABLE_SPLIT) * core::mem::size_of::()) - >> PAGE_SHIFT, + page_align_up((1 << STREAM_TABLE_SPLIT) * size_of::()) >> PAGE_SHIFT, None, ) .expect("Failed to allocate memory for Level2 Stream Table"); let level2_table = unsafe { &mut *(level2_table_address as *mut [StreamTableEntry; 1 << STREAM_TABLE_SPLIT]) }; for e in level2_table { - core::mem::forget(core::mem::replace(e, ste.clone())); + *e = ste; } /* Find max_stream_id */ @@ -144,8 +143,7 @@ pub fn detect_smmu(acpi_address: usize) -> Option { /* Create Stream Table (Level1)*/ let number_of_level1_context_descriptors = (max_stream_id + 1) >> STREAM_TABLE_SPLIT; let level1_table_address = allocate_memory( - page_align_up(number_of_level1_context_descriptors * core::mem::size_of::()) - >> PAGE_SHIFT, + page_align_up(number_of_level1_context_descriptors * size_of::()) >> PAGE_SHIFT, None, ) .expect("Failed to allocate memory for Level1 Stream Table"); diff --git a/src/hypervisor_kernel/.cargo/config.toml b/src/hypervisor_kernel/.cargo/config.toml index 0026296..44acaf1 100644 --- a/src/hypervisor_kernel/.cargo/config.toml +++ b/src/hypervisor_kernel/.cargo/config.toml @@ -1,7 +1,4 @@ [build] -target = "aarch64-unknown-none" -rustflags = ["-C", "link-arg=-Thypervisor_kernel/config/linkerscript.ld", "-C", "target-feature=-neon,-sve", "-C", "soft-float=yes"] +target = "aarch64-unknown-none-softfloat" +rustflags = ["-C", "link-arg=-Thypervisor_kernel/config/linkerscript.ld"] target-dir = "../target/" - -[unstable] -build-std = ["core", "compiler_builtins"] diff --git a/src/hypervisor_kernel/Cargo.toml b/src/hypervisor_kernel/Cargo.toml index 5dc7823..c247e01 100644 --- a/src/hypervisor_kernel/Cargo.toml +++ b/src/hypervisor_kernel/Cargo.toml @@ -6,9 +6,9 @@ # http://opensource.org/licenses/mit-license.php [package] name = "hypervisor_kernel" -version = "1.4.1" -edition = "2021" -resolver = "2" +edition.workspace = true +license.workspace = true +version.workspace = true [features] default = ["smmu", "i210", "mt27800", "fast_restore", "acpi_table_protection", "contiguous_bit", "advanced_memory_manager"] @@ -22,10 +22,6 @@ contiguous_bit = [] mrs_msr_emulation = [] a64fx = ["mrs_msr_emulation"] advanced_memory_manager = ["common/advanced_memory_manager"] -tftp = [] -edit_dtb_memory = [] -save_dtb = [] -raspberrypi = ["edit_dtb_memory", "save_dtb"] virtio = [] virtio_net = ["virtio"] embed_kernel = [] diff --git a/src/hypervisor_kernel/rust-toolchain.toml b/src/hypervisor_kernel/rust-toolchain.toml deleted file mode 100644 index db77f1b..0000000 --- a/src/hypervisor_kernel/rust-toolchain.toml +++ /dev/null @@ -1,4 +0,0 @@ -[toolchain] -channel = "nightly" -components = ["rust-src"] -targets = ["aarch64-unknown-none"] diff --git a/src/hypervisor_kernel/src/acpi_protect.rs b/src/hypervisor_kernel/src/acpi_protect.rs index 3b7c592..5cfa481 100644 --- a/src/hypervisor_kernel/src/acpi_protect.rs +++ b/src/hypervisor_kernel/src/acpi_protect.rs @@ -9,13 +9,12 @@ //! use common::acpi::{RSDP, XSDT, XSDT_STRUCT_SIZE}; -use common::{STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE}; +use common::{GeneralPurposeRegisters, STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE}; use crate::memory_hook::{ - add_memory_store_access_handler, StoreAccessHandlerEntry, StoreHookResult, + StoreAccessHandlerEntry, StoreHookResult, add_memory_store_access_handler, }; use crate::paging::add_memory_access_trap; -use crate::StoredRegisters; const EXCEPT_TABLE: [&[u8; 4]; 0] = []; @@ -93,7 +92,7 @@ fn register_acpi_table(table_address: usize, table_length: Option) { pub fn acpi_table_store_handler( _: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, _: u64, _: &StoreAccessHandlerEntry, diff --git a/src/hypervisor_kernel/src/drivers.rs b/src/hypervisor_kernel/src/drivers.rs index 9b71f6a..ab145f5 100644 --- a/src/hypervisor_kernel/src/drivers.rs +++ b/src/hypervisor_kernel/src/drivers.rs @@ -14,6 +14,6 @@ pub mod serial_port; pub mod i210; pub mod mt27800; #[cfg(feature = "virtio")] -mod virtio; +pub mod virtio; #[cfg(feature = "virtio_net")] pub mod virtio_net; diff --git a/src/hypervisor_kernel/src/drivers/i210.rs b/src/hypervisor_kernel/src/drivers/i210.rs index c7ab6d5..ae10cd1 100644 --- a/src/hypervisor_kernel/src/drivers/i210.rs +++ b/src/hypervisor_kernel/src/drivers/i210.rs @@ -8,11 +8,11 @@ //! Intel(R) Ethernet Controller I210 //! -use common::{bitmask, PAGE_SIZE, STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE}; +use common::{GeneralPurposeRegisters, PAGE_SIZE, STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE, bitmask}; use crate::memory_hook::*; +use crate::paging; use crate::pci::{get_configuration_space_data, get_ecam_target_address}; -use crate::{paging, StoredRegisters}; pub const VENDOR_ID: u16 = 0x8086; pub const DEVICE_ID: u16 = 0x1533; @@ -83,7 +83,7 @@ pub fn setup_device(ecam_address: usize, bus: u8, device: u8, function: u8) { let memory_bar = ((get_configuration_space_data(ecam_address, bus, device, function, 0x10, 4)) & !0b1111) as usize | (if is_64bit_bar { - ((get_configuration_space_data(ecam_address, bus, device, function, 0x14, 4)) as usize) + (get_configuration_space_data(ecam_address, bus, device, function, 0x14, 4) as usize) << 32 } else { 0 @@ -175,14 +175,14 @@ fn setup_memory_trap(new_memory_bar: usize) { /* Set up load access handlers */ for e in &I210_LOAD_HANDLERS { - let mut e = e.clone(); + let mut e = *e; e.set_target_address(e.get_target_address() + new_memory_bar); add_memory_load_access_handler(e).expect("Failed to set up the load handler"); } /* Set up store access handlers */ for e in &I210_STORE_HANDLERS { - let mut e = e.clone(); + let mut e = *e; e.set_target_address(e.get_target_address() + new_memory_bar); add_memory_store_access_handler(e).expect("Failed to set up the store handler"); } @@ -205,14 +205,14 @@ fn remove_memory_trap(bar_address: usize) { /* Remove load access handlers */ for e in &I210_LOAD_HANDLERS { - let mut e = e.clone(); + let mut e = *e; e.set_target_address(e.get_target_address() + bar_address); remove_memory_load_access_handler(e).expect("Failed to remove the load handler"); } /* Remove store access handlers */ for e in &I210_STORE_HANDLERS { - let mut e = e.clone(); + let mut e = *e; e.set_target_address(e.get_target_address() + bar_address); remove_memory_store_access_handler(e).expect("Failed to remove the store handler"); } @@ -244,7 +244,7 @@ fn remove_expansion_rom_memory_trap(expansion_rom_bar: usize) { fn i210_pci_bar_address_store_handler( accessing_memory_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, data: u64, _: &StoreAccessHandlerEntry, @@ -282,7 +282,7 @@ fn i210_pci_bar_address_store_handler( fn i210_pci_expansion_rom_bar_address_store_handler( _: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, data: u64, _: &StoreAccessHandlerEntry, @@ -300,7 +300,7 @@ fn i210_pci_expansion_rom_bar_address_store_handler( fn i210_eeprom_write_register_load_handler( _: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, _: bool, _: bool, @@ -314,7 +314,7 @@ fn i210_eeprom_write_register_load_handler( fn i210_eeprom_write_register_store_handler( _: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, data: u64, _: &StoreAccessHandlerEntry, @@ -337,7 +337,7 @@ fn i210_eeprom_write_register_store_handler( fn i210_i_nvm_data_store_handler( accessing_memory_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, data: u64, _: &StoreAccessHandlerEntry, @@ -352,7 +352,7 @@ fn i210_i_nvm_data_store_handler( fn i210_i_flash_burst_registers_store_handler( accessing_memory_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, data: u64, _: &StoreAccessHandlerEntry, @@ -371,7 +371,7 @@ fn i210_i_flash_burst_registers_store_handler( fn i210_expansion_rom_store_handler( _: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, _data: u64, _: &StoreAccessHandlerEntry, diff --git a/src/hypervisor_kernel/src/drivers/mt27800.rs b/src/hypervisor_kernel/src/drivers/mt27800.rs index acd182d..dfa2186 100644 --- a/src/hypervisor_kernel/src/drivers/mt27800.rs +++ b/src/hypervisor_kernel/src/drivers/mt27800.rs @@ -11,11 +11,11 @@ use core::sync::atomic::{AtomicBool, Ordering}; -use common::{bitmask, STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE}; +use common::{GeneralPurposeRegisters, STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE, bitmask}; use crate::memory_hook::*; +use crate::paging; use crate::pci::{get_configuration_space_data, get_ecam_target_address}; -use crate::{paging, StoredRegisters}; static mut CURRENT_EXPANSION_ROM_BAR: usize = 0; static mut EXPANSION_ROM_SIZE: usize = 0; @@ -116,7 +116,7 @@ fn remove_expansion_rom_memory_trap(expansion_rom_bar: usize) { fn mt27800_pci_expansion_rom_bar_address_store_handler( _: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, data: u64, _: &StoreAccessHandlerEntry, @@ -134,7 +134,7 @@ fn mt27800_pci_expansion_rom_bar_address_store_handler( fn mt27800_expansion_rom_store_handler( _: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, _: u64, _: &StoreAccessHandlerEntry, @@ -147,7 +147,7 @@ static IS_WRITE_CANCELED: AtomicBool = AtomicBool::new(false); fn mt27800_address_and_data_load_handler( accessing_memory_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, _: bool, _: bool, @@ -166,7 +166,7 @@ fn mt27800_address_and_data_load_handler( fn mt27800_address_and_data_store_handler( accessing_memory_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, data: u64, _: &StoreAccessHandlerEntry, @@ -187,5 +187,5 @@ fn mt27800_address_and_data_store_handler( IS_WRITE_CANCELED.store(false, Ordering::Relaxed); } } - return StoreHookResult::PassThrough; + StoreHookResult::PassThrough } diff --git a/src/hypervisor_kernel/src/drivers/serial_port.rs b/src/hypervisor_kernel/src/drivers/serial_port.rs index 4106538..901c6c7 100644 --- a/src/hypervisor_kernel/src/drivers/serial_port.rs +++ b/src/hypervisor_kernel/src/drivers/serial_port.rs @@ -93,7 +93,7 @@ impl SerialPort { } core::hint::spin_loop(); } - return Ok(()); + Ok(()) } /// For panic_handler @@ -102,13 +102,13 @@ impl SerialPort { } } -pub unsafe fn init_default_serial_port(info: SerialPortInfo) { - DEFAULT_SERIAL_PORT = Some(SerialPort::new(info)); +pub fn init_default_serial_port(info: SerialPortInfo) { + unsafe { DEFAULT_SERIAL_PORT = Some(SerialPort::new(info)) }; } pub unsafe fn force_release_serial_port_lock() { - if let Some(e) = &mut *core::ptr::addr_of_mut!(DEFAULT_SERIAL_PORT) { - e.force_release_write_lock(); + if let Some(e) = unsafe { (&raw mut DEFAULT_SERIAL_PORT).as_mut() }.unwrap() { + unsafe { e.force_release_write_lock() }; } } @@ -131,14 +131,14 @@ impl fmt::Write for SerialPort { } } self.write_lock.unlock(); - return Ok(()); + Ok(()) } } static mut DEFAULT_SERIAL_PORT: Option = None; pub fn print(args: fmt::Arguments) { - if let Some(s) = unsafe { &mut *core::ptr::addr_of_mut!(DEFAULT_SERIAL_PORT) } { + if let Some(s) = unsafe { (&raw mut DEFAULT_SERIAL_PORT).as_mut().unwrap() } { use fmt::Write; let _ = s.write_fmt(args); } diff --git a/src/hypervisor_kernel/src/drivers/serial_port/arm_pl011.rs b/src/hypervisor_kernel/src/drivers/serial_port/arm_pl011.rs index 3a07017..9c8f87f 100644 --- a/src/hypervisor_kernel/src/drivers/serial_port/arm_pl011.rs +++ b/src/hypervisor_kernel/src/drivers/serial_port/arm_pl011.rs @@ -24,7 +24,7 @@ impl SerialPortDevice for SerialArmPl011 { fn write_char(&mut self, c: u8) -> Result<(), ()> { unsafe { ptr::write_volatile((self.base_address + UART_DR) as *mut u8, c) }; - return Ok(()); + Ok(()) } fn is_write_fifo_full(&self) -> bool { diff --git a/src/hypervisor_kernel/src/drivers/serial_port/arm_sbsa_generic_uart.rs b/src/hypervisor_kernel/src/drivers/serial_port/arm_sbsa_generic_uart.rs index 96f368e..e807b0b 100644 --- a/src/hypervisor_kernel/src/drivers/serial_port/arm_sbsa_generic_uart.rs +++ b/src/hypervisor_kernel/src/drivers/serial_port/arm_sbsa_generic_uart.rs @@ -24,7 +24,7 @@ impl SerialPortDevice for SerialSbsaUart { fn write_char(&mut self, c: u8) -> Result<(), ()> { unsafe { ptr::write_volatile((self.base_address + UART_DR) as *mut u8, c) }; - return Ok(()); + Ok(()) } fn is_write_fifo_full(&self) -> bool { diff --git a/src/hypervisor_kernel/src/drivers/serial_port/meson_gx_uart.rs b/src/hypervisor_kernel/src/drivers/serial_port/meson_gx_uart.rs index 91217f7..b2a8862 100644 --- a/src/hypervisor_kernel/src/drivers/serial_port/meson_gx_uart.rs +++ b/src/hypervisor_kernel/src/drivers/serial_port/meson_gx_uart.rs @@ -24,7 +24,7 @@ impl SerialPortDevice for SerialMesonGxUart { fn write_char(&mut self, c: u8) -> Result<(), ()> { unsafe { ptr::write_volatile((self.base_address + UART_WR_FIFO) as *mut u32, c as u32) }; - return Ok(()); + Ok(()) } fn is_write_fifo_full(&self) -> bool { diff --git a/src/hypervisor_kernel/src/drivers/virtio.rs b/src/hypervisor_kernel/src/drivers/virtio.rs index da252e5..c910be6 100644 --- a/src/hypervisor_kernel/src/drivers/virtio.rs +++ b/src/hypervisor_kernel/src/drivers/virtio.rs @@ -9,8 +9,6 @@ //! This module provides virtio common interface and virtio queue //! -use core::mem::size_of; - pub(super) const VIRTIO_MMIO_MAGIC: usize = 0x000; pub(super) const VIRTIO_MMIO_MAGIC_VALUE: u32 = 0x74726976; pub(super) const VIRTIO_MMIO_VERSION: usize = 0x04; @@ -92,7 +90,7 @@ impl VirtQueue { } #[allow(dead_code)] - pub(super) const fn is_avail_ring_empty(&self) -> bool { + pub(super) fn is_avail_ring_empty(&self) -> bool { self.last_avail_id == unsafe { &*self.avail_ring }.idx } @@ -155,7 +153,7 @@ impl VirtQueue { + size_of::() * (used_id as usize)) as *mut VirtQueueUsedElement, VirtQueueUsedElement { - id: (id as u32), + id: id as u32, length, }, ) @@ -164,7 +162,7 @@ impl VirtQueue { } } -pub(super) fn append_virtio_ssdt( +pub fn append_virtio_ssdt( rsdp_address: usize, device_name: [u8; 4], mmio_address: usize, @@ -186,9 +184,7 @@ pub(super) fn append_virtio_ssdt( const INTID_OFFSET: usize = 69; /* Modify AML */ - for i in 0..4 { - ssdt_template[DEVICE_NAME_OFFSET + i] = device_name[i]; - } + ssdt_template[DEVICE_NAME_OFFSET..(4 + DEVICE_NAME_OFFSET)].copy_from_slice(&device_name); if mmio_address > u32::MAX as usize { println!( "MMIO Base Address({:#X}) is not 32bit address", diff --git a/src/hypervisor_kernel/src/drivers/virtio_net.rs b/src/hypervisor_kernel/src/drivers/virtio_net.rs index dde481b..5e83284 100644 --- a/src/hypervisor_kernel/src/drivers/virtio_net.rs +++ b/src/hypervisor_kernel/src/drivers/virtio_net.rs @@ -12,18 +12,20 @@ //! It may be not able to create virtio device on some devices. //! -use core::mem::size_of; use core::num::NonZeroUsize; +use common::GeneralPurposeRegisters; use common::spin_flag::SpinLockFlag; use crate::memory_hook::*; -use crate::{paging, StoredRegisters}; +use crate::paging; use super::virtio::*; const VIRTIO_NET_F_MRG_RXBUF: u32 = 1 << 15; +pub type Callback = fn(frame: &[u8], send_frame: &mut dyn FnMut(&[u8]) -> Result<(), ()>); + /// Virtio Network Entry Descriptor #[repr(C)] struct VirtioNetHdr { @@ -79,7 +81,7 @@ pub struct VirtioNetwork { mac_address: [u8; 6], features_select: u8, //base_address: usize, - callback: fn(frame: &[u8], send_frame: &mut dyn FnMut(&[u8]) -> Result<(), ()>), + callback: Callback, } impl VirtioNetwork { @@ -110,7 +112,7 @@ impl VirtioNetwork { pub fn create_new_device( base_address: usize, int_id: u32, - receive_callback: fn(frame: &[u8], send_frame: &mut dyn FnMut(&[u8]) -> Result<(), ()>), + receive_callback: Callback, rsdp_address: Option, device_suffix: u8, ) -> Result<&'static mut Self, ()> { @@ -398,7 +400,7 @@ impl VirtioNetwork { fn load_handler( accessing_memory_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, _: bool, _: bool, @@ -462,19 +464,17 @@ impl VirtioNetwork { } _ => { /* Ignore */ } } - } else { - if 0x100 <= offset && offset <= 0x106 { - net.lock.lock(); - value = net.mac_address[offset - 0x100] as u64; - net.lock.unlock(); - } + } else if (0x100..=0x106).contains(&offset) { + net.lock.lock(); + value = net.mac_address[offset - 0x100] as u64; + net.lock.unlock(); } LoadHookResult::Data(value) } fn store_handler( accessing_memory_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, data: u64, entry: &StoreAccessHandlerEntry, @@ -538,7 +538,7 @@ impl VirtioNetwork { } _ => { /* Ignore */ } } - if 0x100 <= offset && offset <= 0x106 { + if (0x100..=0x106).contains(&offset) { net.lock.lock(); net.mac_address[offset - 0x100] = data as u8; net.lock.unlock(); diff --git a/src/hypervisor_kernel/src/emulation.rs b/src/hypervisor_kernel/src/emulation.rs index dca0605..55ce57d 100644 --- a/src/hypervisor_kernel/src/emulation.rs +++ b/src/hypervisor_kernel/src/emulation.rs @@ -12,31 +12,39 @@ use core::arch::asm; use common::cpu::{ - convert_virtual_address_to_intermediate_physical_address_el0_read, + SPSR_EL2_M, SPSR_EL2_M_EL0T, convert_virtual_address_to_intermediate_physical_address_el0_read, convert_virtual_address_to_intermediate_physical_address_el1_read, convert_virtual_address_to_intermediate_physical_address_el1_write, convert_virtual_address_to_physical_address_el2_read, - convert_virtual_address_to_physical_address_el2_write, SPSR_EL2_M, SPSR_EL2_M_EL0T, + convert_virtual_address_to_physical_address_el2_write, }; -use common::{bitmask, PAGE_MASK, PAGE_SIZE}; +use common::{GeneralPurposeRegisters, PAGE_MASK, PAGE_SIZE, bitmask}; + pub use load::read_memory; -use crate::{handler_panic, paging::map_address, StoredRegisters}; +use crate::paging::map_address; mod load; mod store; const REGISTER_NUMBER_XZR: u8 = 31; +#[derive(Copy, Clone, Debug)] +pub enum EmulationError { + InvalidAddress, + Unsupported, + AlignmentError, +} + #[allow(unused_variables)] pub fn data_abort_handler( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, esr: u64, elr: u64, far: u64, hpfar: u64, spsr: u64, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { #[cfg(debug_assertions)] if (esr & (1 << 24)) != 0 { let sas = (esr >> 22) & 0b11; @@ -89,16 +97,17 @@ pub fn data_abort_handler( } fn emulate_instruction( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, target_instruction: u32, _elr: u64, far: u64, hpfar: u64, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { /* ARM DDI 0487G.a ID011921 C4-280 */ let op0 = ((target_instruction & bitmask!(28, 25)) >> 25) as u8; if (op0 & 0b0101) != 0b0100 { - handler_panic!(s_r, "Not Load/Store Instruction: {:#X}", target_instruction); + println!("Not Load/Store Instruction: {:#X}", target_instruction); + return Err(EmulationError::Unsupported); } let op1 = (op0 >> 1) & 1; let op0 = ((target_instruction & bitmask!(31, 28)) >> 28) as u8; @@ -120,7 +129,8 @@ fn emulate_instruction( /* unsigned immediate (No post|pre indexing) */ if op1 != 0 { /* V */ - handler_panic!(s_r, "SIMD is not supported: {:#X}", target_instruction); + println!("SIMD is not supported: {:#X}", target_instruction); + return Err(EmulationError::Unsupported); } let opc = ((target_instruction & bitmask!(23, 22)) >> 22) as u8; return if opc == 0b00 { @@ -144,7 +154,8 @@ fn emulate_instruction( pr_debug!("Load/Store Register Offset"); if op1 != 0 { /* V */ - handler_panic!(s_r, "SIMD is not supported: {:#X}", target_instruction); + println!("SIMD is not supported: {:#X}", target_instruction); + return Err(EmulationError::Unsupported); } let opc = ((target_instruction & bitmask!(23, 22)) >> 22) as u8; return if opc == 0b00 { @@ -173,7 +184,8 @@ fn emulate_instruction( } else { if op1 != 0 { /* V */ - handler_panic!(s_r, "SIMD is not supported: {:#X}", target_instruction); + println!("SIMD is not supported: {:#X}", target_instruction); + return Err(EmulationError::Unsupported); } let opc = ((target_instruction & bitmask!(23, 22)) >> 22) as u8; return if opc == 0b00 { @@ -189,7 +201,8 @@ fn emulate_instruction( pr_debug!("Load/Store Register Pair"); if op1 != 0 { /* V */ - handler_panic!(s_r, "SIMD is not supported: {:#X}", target_instruction); + println!("SIMD is not supported: {:#X}", target_instruction); + return Err(EmulationError::Unsupported); } return if (target_instruction & (1 << 22)) != 0 { pr_debug!("LDP"); @@ -204,18 +217,19 @@ fn emulate_instruction( pr_debug!("Load Register Literal"); if op1 != 0 { /* V */ - handler_panic!(s_r, "SIMD is not supported: {:#X}", target_instruction); + println!("SIMD is not supported: {:#X}", target_instruction); + return Err(EmulationError::Unsupported); } return load::emulate_literal_load_register(s_r, target_instruction, far, hpfar); } } } println!("Unknown Instruction: {:#X}", target_instruction); - Err(()) + Err(EmulationError::Unsupported) } #[cfg(feature = "mrs_msr_emulation")] -pub fn mrs_msr_handler(s_r: &mut StoredRegisters, esr: u64) -> Result<(), ()> { +pub fn mrs_msr_handler(s_r: &mut GeneralPurposeRegisters, esr: u64) -> Result<(), EmulationError> { let op0 = ((esr & bitmask!(21, 20)) >> 20) as u8; let op2 = ((esr & bitmask!(19, 17)) >> 17) as u8; let op1 = ((esr & bitmask!(16, 14)) >> 14) as u8; @@ -227,7 +241,7 @@ pub fn mrs_msr_handler(s_r: &mut StoredRegisters, esr: u64) -> Result<(), ()> { let reg = if target_register == REGISTER_NUMBER_XZR { &mut xzr } else { - get_register_reference_mut(s_r, target_register) + &mut s_r[target_register as usize] }; macro_rules! emulate_mrs_msr_if_matched { @@ -277,28 +291,30 @@ pub fn mrs_msr_handler(s_r: &mut StoredRegisters, esr: u64) -> Result<(), ()> { "Unknown Register: S{}_{}_C{}_C{}_{}", op0, op1, crn, crm, op2 ); - Err(()) + Err(EmulationError::Unsupported) } -fn faulting_va_to_ipa_load(far: u64) -> Result { +fn faulting_va_to_ipa_load(far: u64) -> Result { convert_virtual_address_to_intermediate_physical_address_el1_read(far as usize) + .or(Err(EmulationError::InvalidAddress)) } -fn faulting_va_to_ipa_store(far: u64) -> Result { +fn faulting_va_to_ipa_store(far: u64) -> Result { convert_virtual_address_to_intermediate_physical_address_el1_write(far as usize) + .or(Err(EmulationError::InvalidAddress)) } fn get_virtual_address_to_access_ipa( intermediate_physical_address: usize, is_write_access: bool, -) -> Result { +) -> Result { if is_write_access { if let Ok(pa) = convert_virtual_address_to_physical_address_el2_write(intermediate_physical_address) { return if pa != intermediate_physical_address { println!("IPA({:#X}) != VA({:#X})", intermediate_physical_address, pa); - Err(()) + Err(EmulationError::InvalidAddress) } else { Ok(intermediate_physical_address) }; @@ -308,7 +324,7 @@ fn get_virtual_address_to_access_ipa( { return if pa != intermediate_physical_address { println!("IPA({:#X}) != VA({:#X})", intermediate_physical_address, pa); - Err(()) + Err(EmulationError::InvalidAddress) } else { Ok(intermediate_physical_address) }; @@ -322,27 +338,19 @@ fn get_virtual_address_to_access_ipa( true, false, true, - )?; + ) + .or(Err(EmulationError::InvalidAddress))?; Ok(intermediate_physical_address) } -fn get_register_reference_mut(s_r: &mut StoredRegisters, index: u8) -> &mut u64 { - unsafe { - &mut core::mem::transmute::< - &mut StoredRegisters, - &mut [u64; core::mem::size_of::() / core::mem::size_of::()], - >(s_r)[index as usize] - } -} - fn write_back_index_register_imm9(base_register: &mut u64, imm9_u32: u32) { unsafe { asm!(" sbfx {imm9}, {imm9}, #0, #9 add {base_reg}, {base_reg}, {imm9} ", - imm9 = inout(reg) (imm9_u32 as u64) => _ , - base_reg = inout(reg) *base_register) + imm9 = inout(reg) imm9_u32 as u64 => _, + base_reg = inout(reg) *base_register) }; } @@ -352,7 +360,7 @@ fn write_back_index_register_imm7(base_register: &mut u64, imm7_u32: u32) { sbfx {imm7}, {imm7}, #0, #7 add {base_reg}, {base_reg}, {imm7} ", - imm7 = inout(reg) (imm7_u32 as u64) => _ , + imm7 = inout(reg) imm7_u32 as u64 => _, base_reg = inout(reg) *base_register) }; } diff --git a/src/hypervisor_kernel/src/emulation/load.rs b/src/hypervisor_kernel/src/emulation/load.rs index 62fc26a..05560a9 100644 --- a/src/hypervisor_kernel/src/emulation/load.rs +++ b/src/hypervisor_kernel/src/emulation/load.rs @@ -11,22 +11,22 @@ //! Supported: ldr, ldp (except Atomic, SIMD) //! -use common::{bitmask, cpu::advance_elr_el2, STAGE_2_PAGE_SHIFT}; +use common::{GeneralPurposeRegisters, STAGE_2_PAGE_SHIFT, bitmask, cpu::advance_elr_el2}; -use crate::memory_hook::{memory_load_hook_handler, LoadHookResult}; -use crate::StoredRegisters; +use crate::memory_hook::{LoadHookResult, memory_load_hook_handler}; use super::{ - faulting_va_to_ipa_load, get_register_reference_mut, get_virtual_address_to_access_ipa, - write_back_index_register_imm7, write_back_index_register_imm9, REGISTER_NUMBER_XZR, + EmulationError, REGISTER_NUMBER_XZR, faulting_va_to_ipa_load, + get_virtual_address_to_access_ipa, write_back_index_register_imm7, + write_back_index_register_imm9, }; pub fn emulate_load_register( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, target_instruction: u32, far: u64, _hpfar: u64, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { let target_register = (target_instruction & bitmask!(4, 0)) as u8; let intermediate_physical_load_address = faulting_va_to_ipa_load(far)?; //let op2 = ((target_instruction & bitmask!(24, 23)) >> 23) as u8; @@ -51,7 +51,8 @@ pub fn emulate_load_register( } ); if op4 == 0b10 { - unimplemented!("UnPrivileged Access is not implemented..."); + println!("UnPrivileged Access is not implemented..."); + return Err(EmulationError::Unsupported); } let size = (target_instruction >> 30) as u8; @@ -71,21 +72,20 @@ pub fn emulate_load_register( if (op4 & 1) != 0 { pr_debug!("Post/Pre Indexed"); let imm9 = (target_instruction & bitmask!(20, 12)) >> 12; - let base_register = - get_register_reference_mut(s_r, ((target_instruction & bitmask!(9, 5)) >> 5) as u8); + let base_register = &mut s_r[((target_instruction & bitmask!(9, 5)) >> 5) as usize]; write_back_index_register_imm9(base_register, imm9); } advance_elr_el2(); - return Ok(()); + Ok(()) } pub fn emulate_unsigned_immediate_load_register( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, target_instruction: u32, far: u64, _hpfar: u64, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { let target_register = (target_instruction & bitmask!(4, 0)) as u8; let intermediate_physical_load_address = faulting_va_to_ipa_load(far)?; @@ -108,15 +108,15 @@ pub fn emulate_unsigned_immediate_load_register( sse, )?; advance_elr_el2(); - return Ok(()); + Ok(()) } pub fn emulate_load_register_register_offset( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, target_instruction: u32, far: u64, _hpfar: u64, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { let target_register = (target_instruction & bitmask!(4, 0)) as u8; let intermediate_physical_load_address = faulting_va_to_ipa_load(far)?; @@ -139,15 +139,15 @@ pub fn emulate_load_register_register_offset( sse, )?; advance_elr_el2(); - return Ok(()); + Ok(()) } pub fn emulate_literal_load_register( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, target_instruction: u32, far: u64, _hpfar: u64, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { let target_register = (target_instruction & bitmask!(4, 0)) as u8; let intermediate_physical_load_address = faulting_va_to_ipa_load(far)?; @@ -171,15 +171,15 @@ pub fn emulate_literal_load_register( sse, )?; advance_elr_el2(); - return Ok(()); + Ok(()) } pub fn emulate_load_pair( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, target_instruction: u32, far: u64, _hpfar: u64, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { let op2 = ((target_instruction & bitmask!(24, 23)) >> 23) as u8; let opc = (target_instruction >> 30) as u8; let sf = (opc & (1 << 1)) != 0; @@ -212,7 +212,7 @@ pub fn emulate_load_pair( >> STAGE_2_PAGE_SHIFT) { println!("LDP alignment error."); - return Err(()); + return Err(EmulationError::AlignmentError); } load_from_address_and_store_into_register( s_r, @@ -234,22 +234,21 @@ pub fn emulate_load_pair( if is_pre_or_post_indexed { pr_debug!("Post/Pre Indexed"); let imm7 = (target_instruction & bitmask!(21, 15)) >> 15; - let base_register = - get_register_reference_mut(s_r, ((target_instruction & bitmask!(9, 5)) >> 5) as u8); + let base_register = &mut s_r[((target_instruction & bitmask!(9, 5)) >> 5) as usize]; write_back_index_register_imm7(base_register, imm7); } advance_elr_el2(); - return Ok(()); + Ok(()) } fn load_from_address_and_store_into_register( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, intermediate_physical_load_address: usize, target_register: u8, size: u8, sf: bool, /* sf is usable only if sse is true */ sse: bool, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { let sf = !sse || sf; pr_debug!( @@ -264,7 +263,7 @@ fn load_from_address_and_store_into_register( if !sf && size == 0b11 { println!("Invalid Instruction: Loading a 64bit data into the 32bit register."); - return Err(()); + return Err(EmulationError::Unsupported); } let data = @@ -281,9 +280,9 @@ fn load_from_address_and_store_into_register( pr_debug!("Data: {:#X}", data); if target_register != REGISTER_NUMBER_XZR { - *get_register_reference_mut(s_r, target_register) = data; + s_r[target_register as usize] = data; } - return Ok(()); + Ok(()) } fn _read_memory(load_virtual_address: usize, access_size: u8) -> u64 { diff --git a/src/hypervisor_kernel/src/emulation/store.rs b/src/hypervisor_kernel/src/emulation/store.rs index 3c59414..090d3be 100644 --- a/src/hypervisor_kernel/src/emulation/store.rs +++ b/src/hypervisor_kernel/src/emulation/store.rs @@ -11,22 +11,22 @@ //! Supported: str, stp (except Atomic, SIMD) //! -use common::{bitmask, cpu::advance_elr_el2, STAGE_2_PAGE_SHIFT}; +use common::{GeneralPurposeRegisters, STAGE_2_PAGE_SHIFT, bitmask, cpu::advance_elr_el2}; -use crate::memory_hook::{memory_store_hook_handler, StoreHookResult}; -use crate::{handler_panic, StoredRegisters}; +use crate::memory_hook::{StoreHookResult, memory_store_hook_handler}; use super::{ - faulting_va_to_ipa_store, get_register_reference_mut, get_virtual_address_to_access_ipa, - write_back_index_register_imm7, write_back_index_register_imm9, REGISTER_NUMBER_XZR, + EmulationError, REGISTER_NUMBER_XZR, faulting_va_to_ipa_store, + get_virtual_address_to_access_ipa, write_back_index_register_imm7, + write_back_index_register_imm9, }; pub fn emulate_store_register( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, target_instruction: u32, far: u64, _hpfar: u64, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { let target_register = (target_instruction & bitmask!(4, 0)) as u8; let intermediate_physical_store_address = faulting_va_to_ipa_store(far)?; //let op2 = ((target_instruction & bitmask!(24, 23)) >> 23) as u8; @@ -47,12 +47,17 @@ pub fn emulate_store_register( } ); if op4 == 0b10 { - unimplemented!("UnPrivileged Access is not implemented..."); + println!( + "UnPrivileged Access is not supported: {:#X}", + target_instruction + ); + return Err(EmulationError::Unsupported); } if ((target_instruction >> 26) & 1) != 0 { /* V */ - handler_panic!(s_r, "SIMD is not supported: {:#X}", target_instruction); + println!("SIMD is not supported: {:#X}", target_instruction); + return Err(EmulationError::Unsupported); } pr_debug!("Size: {:#b}", size); @@ -65,21 +70,20 @@ pub fn emulate_store_register( if (op4 & 1) != 0 { pr_debug!("Post/Pre Indexed"); let imm9 = (target_instruction & bitmask!(20, 12)) >> 12; - let base_register = - get_register_reference_mut(s_r, ((target_instruction & bitmask!(9, 5)) >> 5) as u8); + let base_register = &mut s_r[((target_instruction & bitmask!(9, 5)) >> 5) as usize]; write_back_index_register_imm9(base_register, imm9); } advance_elr_el2(); - return Ok(()); + Ok(()) } pub fn emulate_unsigned_immediate_store_register( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, target_instruction: u32, far: u64, _hpfar: u64, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { let target_register = (target_instruction & bitmask!(4, 0)) as u8; let intermediate_physical_store_address = faulting_va_to_ipa_store(far)?; let size = (target_instruction >> 30) as u8; @@ -90,15 +94,15 @@ pub fn emulate_unsigned_immediate_store_register( size, )?; advance_elr_el2(); - return Ok(()); + Ok(()) } pub fn emulate_store_register_register_offset( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, target_instruction: u32, far: u64, _hpfar: u64, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { let target_register = (target_instruction & bitmask!(4, 0)) as u8; let intermediate_physical_store_address = faulting_va_to_ipa_store(far)?; let size = (target_instruction >> 30) as u8; @@ -109,15 +113,15 @@ pub fn emulate_store_register_register_offset( size, )?; advance_elr_el2(); - return Ok(()); + Ok(()) } pub fn emulate_store_pair( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, target_instruction: u32, far: u64, _hpfar: u64, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { let op2 = ((target_instruction & bitmask!(24, 23)) >> 23) as u8; let opc = (target_instruction >> 30) as u8; let sf = (opc & (1 << 1)) != 0; @@ -150,10 +154,11 @@ pub fn emulate_store_pair( >> STAGE_2_PAGE_SHIFT) { println!("STP alignment error."); - return Err(()); + return Err(EmulationError::AlignmentError); } if sse { - unimplemented!(); + println!("SSE is not supported: {:#X}", target_instruction); + return Err(EmulationError::Unsupported); } store_register_into_address( s_r, @@ -171,20 +176,19 @@ pub fn emulate_store_pair( if is_pre_or_post_indexed { pr_debug!("Post/Pre Indexed"); let imm7 = (target_instruction & bitmask!(21, 15)) >> 15; - let base_register = - get_register_reference_mut(s_r, ((target_instruction & bitmask!(9, 5)) >> 5) as u8); + let base_register = &mut s_r[((target_instruction & bitmask!(9, 5)) >> 5) as usize]; write_back_index_register_imm7(base_register, imm7); } advance_elr_el2(); - return Ok(()); + Ok(()) } fn store_register_into_address( - s_r: &mut StoredRegisters, + s_r: &mut GeneralPurposeRegisters, intermediate_physical_store_address: usize, target_register: u8, size: u8, -) -> Result<(), ()> { +) -> Result<(), EmulationError> { let virtual_address_to_store = get_virtual_address_to_access_ipa(intermediate_physical_store_address, true)?; @@ -204,7 +208,7 @@ fn store_register_into_address( let reg_data = if target_register == REGISTER_NUMBER_XZR { 0 } else { - *get_register_reference_mut(s_r, target_register) + s_r[target_register as usize] }; let data = @@ -218,11 +222,11 @@ fn store_register_into_address( }; pr_debug!("Data: {:#X}", data); - _write_memory(virtual_address_to_store, size, data); - return Ok(()); + write_memory(virtual_address_to_store, size, data); + Ok(()) } -pub fn _write_memory(store_address: usize, access_size: u8, data: u64) { +pub fn write_memory(store_address: usize, access_size: u8, data: u64) { use core::ptr::write_volatile; match access_size { 0b00 => unsafe { write_volatile(store_address as *mut u8, data as u8) }, @@ -232,13 +236,3 @@ pub fn _write_memory(store_address: usize, access_size: u8, data: u64) { _ => unreachable!(), }; } - -#[allow(dead_code)] -pub fn write_memory(intermediate_physical_store_address: usize, access_size: u8, data: u64) { - _write_memory( - get_virtual_address_to_access_ipa(intermediate_physical_store_address, true) - .expect("Failed to convert Address"), - access_size, - data, - ) -} diff --git a/src/hypervisor_kernel/src/fast_restore.rs b/src/hypervisor_kernel/src/fast_restore.rs index 4810314..e30a403 100644 --- a/src/hypervisor_kernel/src/fast_restore.rs +++ b/src/hypervisor_kernel/src/fast_restore.rs @@ -8,13 +8,11 @@ use crate::paging::{ add_memory_access_trap, map_address, remake_stage2_page_table, remove_memory_access_trap, }; -use crate::{ - allocate_memory, free_memory, gic, multi_core, psci, smmu, StoredRegisters, BSP_MPIDR, -}; +use crate::{BSP_MPIDR, allocate_memory, free_memory, gic, multi_core, psci, smmu}; use common::{ - cpu, paging, MemorySaveListEntry, MEMORY_SAVE_ADDRESS_ONDEMAND_FLAG, PAGE_MASK, PAGE_SHIFT, - PAGE_SIZE, STACK_PAGES, + GeneralPurposeRegisters, MEMORY_SAVE_ADDRESS_ONDEMAND_FLAG, MemorySaveListEntry, PAGE_MASK, + PAGE_SHIFT, PAGE_SIZE, STACK_PAGES, cpu, paging, }; use core::mem::MaybeUninit; @@ -24,14 +22,15 @@ use core::sync::atomic::{AtomicBool, Ordering}; static IS_RESTORE_NEEDED: AtomicBool = AtomicBool::new(false); static mut MEMORY_SAVE_LIST: MaybeUninit<&'static mut [MemorySaveListEntry]> = MaybeUninit::uninit(); -static mut SAVED_SYSTEM_REGISTERS: MaybeUninit = MaybeUninit::uninit(); -static mut SAVED_REGISTERS: MaybeUninit = MaybeUninit::uninit(); +static mut SAVED_SYSTEM_REGISTERS: SavedSystemRegisters = SavedSystemRegisters::new(); +static mut SAVED_REGISTERS: GeneralPurposeRegisters = [0; 32]; static mut ORIGINAL_VTTBR_EL2: u64 = 0; pub const HVC_EXIT_BOOT_SERVICE_TRAP: u16 = 0xFFF0; pub const HVC_AFTER_EXIT_BOOT_SERVICE_TRAP: u16 = 0xFFF1; -struct SavedRegisters { +#[derive(Clone)] +struct SavedSystemRegisters { cpacr_el1: u64, ttbr0_el1: u64, /*ttbr1_el1: u64,*/ @@ -44,15 +43,41 @@ struct SavedRegisters { sp_el1: u64, } +impl SavedSystemRegisters { + const fn new() -> Self { + Self { + cpacr_el1: 0, + ttbr0_el1: 0, + tcr_el1: 0, + mair_el1: 0, + sctlr_el1: 0, + vbar_el1: 0, + spsr_el2: 0, + elr_el2: 0, + sp_el1: 0, + } + } +} + pub fn add_memory_save_list(list: *mut [MemorySaveListEntry]) { - unsafe { MEMORY_SAVE_LIST.write(&mut *list) }; + unsafe { + (&raw mut MEMORY_SAVE_LIST) + .as_mut() + .unwrap() + .write(&mut *list) + }; } pub fn create_memory_trap_for_save_memory() { unsafe { ORIGINAL_VTTBR_EL2 = cpu::get_vttbr_el2() }; let page_table = remake_stage2_page_table().expect("Failed to remake page table."); cpu::set_vttbr_el2(page_table as u64); - let list = unsafe { MEMORY_SAVE_LIST.assume_init_read() }; + let list = unsafe { + (&raw const MEMORY_SAVE_LIST) + .as_ref() + .unwrap() + .assume_init_read() + }; for e in list { if e.num_of_pages == 0 && e.memory_start == 0 { break; @@ -76,7 +101,7 @@ pub fn check_memory_access_for_memory_save_list(ec: u8, far_el2: u64) -> bool { add_memory_area_to_memory_save_list(far_el2); return true; } - return false; + false } fn compress_memory_save_list(list: &mut [MemorySaveListEntry]) -> Option { @@ -107,7 +132,7 @@ fn compress_memory_save_list(list: &mut [MemorySaveListEntry]) -> Option } } } - return None; + None } #[inline(never)] @@ -119,12 +144,20 @@ fn add_memory_area_to_memory_save_list(far_el2: u64) { pr_debug!("Fault Address: {:#X}", fault_address); let mut available_entry: Option<*mut MemorySaveListEntry> = None; - let list_length = unsafe { MEMORY_SAVE_LIST.assume_init_read() }.len(); - - for (i, e) in unsafe { MEMORY_SAVE_LIST.assume_init_read() } - .iter_mut() - .enumerate() - { + let list_ptr = &raw const MEMORY_SAVE_LIST; + let mut is_registered = false; + let mut should_clear_next = false; + + for e in unsafe { list_ptr.as_ref().unwrap().assume_init_read() } { + if should_clear_next { + *e = MemorySaveListEntry { + memory_start: 0, + saved_address: 0, + num_of_pages: 0, + }; + assert!(is_registered); + break; + } if e.num_of_pages == 0 && e.memory_start == 0 { let new_entry = MemorySaveListEntry { memory_start: fault_address, @@ -133,42 +166,41 @@ fn add_memory_area_to_memory_save_list(far_el2: u64) { }; if let Some(available_entry) = available_entry { unsafe { *available_entry = new_entry }; + is_registered = true; + break; } else { *e = new_entry; - if i + 1 != list_length { - unsafe { - MEMORY_SAVE_LIST.assume_init_read()[i + 1] = MemorySaveListEntry { - memory_start: 0, - saved_address: 0, - num_of_pages: 0, - } - }; - } + is_registered = true; + should_clear_next = true; + continue; } - break; } if e.saved_address == MEMORY_SAVE_ADDRESS_ONDEMAND_FLAG { available_entry = Some(e); } else if e.memory_start == fault_address + PAGE_SIZE { e.memory_start = fault_address; e.num_of_pages += 1; + is_registered = true; break; } else if e.memory_start + ((e.num_of_pages as usize) << PAGE_SHIFT) == fault_address { e.num_of_pages += 1; + is_registered = true; break; } - if i + 1 == list_length { - let list = unsafe { MEMORY_SAVE_LIST.assume_init_read() }; - if let Some(i) = compress_memory_save_list(list) { - list[i] = MemorySaveListEntry { + } + if !is_registered { + if let Some(i) = + compress_memory_save_list(unsafe { list_ptr.as_ref().unwrap().assume_init_read() }) + { + unsafe { + list_ptr.as_ref().unwrap().assume_init_read()[i] = MemorySaveListEntry { memory_start: fault_address, saved_address: 0, num_of_pages: 1, - }; - break; - } else { - panic!("There is no available entry"); - } + } + }; + } else { + panic!("There is no available entry"); } } remove_memory_access_trap(fault_address, PAGE_SIZE).expect("Failed to remove memory trap"); @@ -239,7 +271,7 @@ pub fn save_original_instruction_and_insert_hvc(address: usize, hvc_number: u16) ) .expect("Failed to map memory"); } - let hvc_instruction = 0b11010100000 << 21 | (hvc_number as u32) << 5 | 0b00010; + let hvc_instruction = (0b11010100000 << 21) | ((hvc_number as u32) << 5) | 0b00010; unsafe { ORIGINAL_INSTRUCTION = *(address as *const u32); *(address as *mut u32) = hvc_instruction; @@ -253,7 +285,7 @@ pub fn add_trap_to_exit_boot_service(address: usize) { save_original_instruction_and_insert_hvc(address, HVC_EXIT_BOOT_SERVICE_TRAP); } -pub fn exit_boot_service_trap_main(regs: &mut StoredRegisters, elr: u64) { +pub fn exit_boot_service_trap_main(regs: &mut GeneralPurposeRegisters, elr: u64) { if unsafe { ORIGINAL_INSTRUCTION } == 0 { return; } @@ -261,10 +293,10 @@ pub fn exit_boot_service_trap_main(regs: &mut StoredRegisters, elr: u64) { let hvc_address = elr as usize - cpu::AA64_INSTRUCTION_SIZE; unsafe { *(hvc_address as *mut u32) = ORIGINAL_INSTRUCTION }; cpu::set_elr_el2(hvc_address as u64); - save_original_instruction_and_insert_hvc(regs.x30 as usize, HVC_AFTER_EXIT_BOOT_SERVICE_TRAP); + save_original_instruction_and_insert_hvc(regs[30] as usize, HVC_AFTER_EXIT_BOOT_SERVICE_TRAP); } -pub fn after_exit_boot_service_trap_main(regs: &mut StoredRegisters, elr: u64) { +pub fn after_exit_boot_service_trap_main(regs: &mut GeneralPurposeRegisters, elr: u64) { if unsafe { ORIGINAL_INSTRUCTION } == 0 { return; } @@ -277,15 +309,20 @@ pub fn after_exit_boot_service_trap_main(regs: &mut StoredRegisters, elr: u64) { cpu::set_elr_el2(hvc_address as u64); cpu::flush_tlb_el1(); cpu::clear_instruction_cache_all(); - pr_debug!("ExitBootServiceStatus: {:#X}", regs.x0); - if regs.x0 != 0 { + pr_debug!("ExitBootServiceStatus: {:#X}", regs[0]); + if regs[0] != 0 { panic!("ExitBootService is failed(TODO: reset trap point and continue...)"); } /* Save current status */ - save_memory(unsafe { MEMORY_SAVE_LIST.assume_init_read() }); + save_memory(unsafe { + (&raw mut MEMORY_SAVE_LIST) + .as_ref() + .unwrap() + .assume_init_read() + }); /* Store registers */ - let r = SavedRegisters { + let r = SavedSystemRegisters { cpacr_el1: cpu::get_cpacr_el1(), ttbr0_el1: cpu::get_ttbr0_el1(), tcr_el1: cpu::get_tcr_el1(), @@ -296,15 +333,15 @@ pub fn after_exit_boot_service_trap_main(regs: &mut StoredRegisters, elr: u64) { elr_el2: cpu::get_elr_el2(), sp_el1: cpu::get_sp_el1(), }; - unsafe { SAVED_SYSTEM_REGISTERS.write(r) }; - unsafe { SAVED_REGISTERS.write(regs.clone()) }; + unsafe { *(&raw mut SAVED_SYSTEM_REGISTERS).as_mut().unwrap() = r }; + unsafe { *(&raw mut SAVED_REGISTERS).as_mut().unwrap() = *regs }; cpu::set_vttbr_el2(unsafe { ORIGINAL_VTTBR_EL2 }); /* TODO: free old page table */ unsafe { ORIGINAL_VTTBR_EL2 = 0 }; pr_debug!("Remove page table for memory save"); } /// If you disable all entries of Stage2 Page Table, -/// don't call [`super::paging::add_memory_access_trap`] and [`super::paging::remove_memory_access_trap`] +/// don't call [`add_memory_access_trap`] and [`remove_memory_access_trap`] /// until you re-enable the entries. fn modify_all_enable_bit_of_stage2_top_level_entries(is_enabled: bool) { let stage_2_page_table = paging::TTBR::new(cpu::get_vttbr_el2()).get_base_address(); @@ -324,7 +361,7 @@ fn modify_all_enable_bit_of_stage2_top_level_entries(is_enabled: bool) { let table = unsafe { core::slice::from_raw_parts_mut( stage_2_page_table as *mut u64, - (paging::PAGE_TABLE_SIZE / core::mem::size_of::()) * num_of_pages as usize, + (paging::PAGE_TABLE_SIZE / size_of::()) * num_of_pages as usize, ) }; if is_enabled { @@ -372,7 +409,10 @@ pub fn perform_restore_if_needed() { fn restore_main() -> ! { cpu::local_irq_fiq_save(); if unsafe { BSP_MPIDR } != cpu::get_mpidr_el1() { - pr_debug!("This CPU(MPIDR: {:#X}) is not BSP, currently perform CPU_OFF(TODO: use AP to copy memory)", cpu::get_mpidr_el1()); + pr_debug!( + "This CPU(MPIDR: {:#X}) is not BSP, currently perform CPU_OFF(TODO: use AP to copy memory)", + cpu::get_mpidr_el1() + ); let result = multi_core::power_off_cpu(); panic!( "Failed to call CPU_OFF: {:#X?}", @@ -410,7 +450,12 @@ fn restore_main() -> ! { smmu::restore_smmu_status(); /* Restore saved registers */ - let saved_registers = unsafe { SAVED_SYSTEM_REGISTERS.assume_init_read() }; + let saved_registers = unsafe { + (&raw const SAVED_SYSTEM_REGISTERS) + .as_ref() + .unwrap() + .clone() + }; cpu::set_cpacr_el1(saved_registers.cpacr_el1); cpu::set_ttbr0_el1(saved_registers.ttbr0_el1); cpu::set_tcr_el1(saved_registers.tcr_el1); @@ -424,7 +469,12 @@ fn restore_main() -> ! { /* Restore memory */ pr_debug!("Restore the memory"); - restore_memory(unsafe { MEMORY_SAVE_LIST.assume_init_read() }); + restore_memory(unsafe { + (&raw const MEMORY_SAVE_LIST) + .as_ref() + .unwrap() + .assume_init_read() + }); cpu::flush_tlb_el1(); cpu::clear_instruction_cache_all(); @@ -448,6 +498,6 @@ fn restore_main() -> ! { ldp x4, x5, [x0, #( 2 * 16)] ldp x2, x3, [x0, #( 1 * 16)] ldp x0, x1, [x0, #( 0 * 16)] - eret", in("x0") SAVED_REGISTERS.as_ptr() as usize, options(noreturn)) + eret", in("x0") (&raw const SAVED_REGISTERS) as usize, options(noreturn)) } } diff --git a/src/hypervisor_kernel/src/gic.rs b/src/hypervisor_kernel/src/gic.rs index 72e0f94..fbb4c6b 100644 --- a/src/hypervisor_kernel/src/gic.rs +++ b/src/hypervisor_kernel/src/gic.rs @@ -8,11 +8,10 @@ use core::ptr::{read_volatile, write_volatile}; use common::acpi::{get_acpi_table, madt::MADT}; use common::paging::{page_align_up, stage2_page_align_up}; -use common::{cpu, PAGE_SIZE}; +use common::{GeneralPurposeRegisters, PAGE_SIZE, cpu}; use crate::memory_hook::*; use crate::paging::{add_memory_access_trap, map_address, remove_memory_access_trap}; -use crate::StoredRegisters; const GICR_MAP_SIZE: usize = 0x1000; @@ -65,7 +64,7 @@ pub fn set_interrupt_pending(int_id: u32) { println!("Distributor is not available."); return; } - let register_index = ((int_id / u32::BITS) as usize) * core::mem::size_of::(); + let register_index = ((int_id / u32::BITS) as usize) * size_of::(); let register_offset = int_id & (u32::BITS - 1); unsafe { write_volatile( @@ -121,10 +120,9 @@ pub fn remove_sgi() { } match version { - 2 | 3 | 4 => { + 2..=4 => { for i in 0..4 { - let r = - (distributor + GICD_CPENDSGIR + i * core::mem::size_of::()) as *mut u32; + let r = (distributor + GICD_CPENDSGIR + i * size_of::()) as *mut u32; unsafe { write_volatile(r, read_volatile(r)) }; } } @@ -219,7 +217,7 @@ pub fn restore_gic(acpi_address: usize) { fn gic_redistributor_fast_restore_load_handler( accessing_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, _: bool, _: bool, @@ -237,7 +235,7 @@ fn gic_redistributor_fast_restore_load_handler( fn gic_redistributor_fast_restore_store_handler( accessing_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, data: u64, _: &StoreAccessHandlerEntry, diff --git a/src/hypervisor_kernel/src/main.rs b/src/hypervisor_kernel/src/main.rs index 5c13951..ae73bf0 100644 --- a/src/hypervisor_kernel/src/main.rs +++ b/src/hypervisor_kernel/src/main.rs @@ -7,7 +7,6 @@ #![no_std] #![no_main] -#![feature(naked_functions)] use core::arch::global_asm; use core::mem::MaybeUninit; @@ -16,8 +15,8 @@ use core::num::NonZeroUsize; use common::cpu::*; use common::spin_flag::SpinLockFlag; use common::{ - acpi, bitmask, MemoryAllocationError, MemoryAllocator, SystemInformation, COMPILER_INFO, - HYPERVISOR_HASH_INFO, HYPERVISOR_NAME, PAGE_SHIFT, + COMPILER_INFO, GeneralPurposeRegisters, HYPERVISOR_HASH_INFO, HYPERVISOR_NAME, + MemoryAllocationError, MemoryAllocator, PAGE_SHIFT, SystemInformation, acpi, bitmask, }; #[macro_use] @@ -46,43 +45,6 @@ static mut MEMORY_ALLOCATOR: (SpinLockFlag, MaybeUninit) = static mut ACPI_RSDP: Option = None; static mut BSP_MPIDR: u64 = 0; -#[repr(C)] -#[derive(Clone, Debug)] -pub struct StoredRegisters { - x0: u64, - x1: u64, - x2: u64, - x3: u64, - x4: u64, - x5: u64, - x6: u64, - x7: u64, - x8: u64, - x9: u64, - x10: u64, - x11: u64, - x12: u64, - x13: u64, - x14: u64, - x15: u64, - x16: u64, - x17: u64, - x18: u64, - x19: u64, - x20: u64, - x21: u64, - x22: u64, - x23: u64, - x24: u64, - x25: u64, - x26: u64, - x27: u64, - x28: u64, - x29: u64, - x30: u64, - sp: u64, -} - #[macro_export] macro_rules! handler_panic { ($s_r:expr, $($t:tt)*) => { @@ -90,21 +52,23 @@ macro_rules! handler_panic { }; } -#[no_mangle] +#[unsafe(no_mangle)] fn hypervisor_main(system_information: &mut SystemInformation) { if let Some(s_info) = &system_information.serial_port { - unsafe { drivers::serial_port::init_default_serial_port(s_info.clone()) }; + drivers::serial_port::init_default_serial_port(s_info.clone()); } show_kernel_info(); - unsafe { - MEMORY_ALLOCATOR.1.assume_init_mut().init( - system_information.available_memory_info.0, - system_information.available_memory_info.1 << PAGE_SHIFT, - ); - ACPI_RSDP = system_information.acpi_rsdp_address; - } + let (lock, allocator) = unsafe { (&raw mut MEMORY_ALLOCATOR).as_mut() }.unwrap(); + lock.lock(); + unsafe { allocator.assume_init_mut() }.init( + system_information.available_memory_info.0, + system_information.available_memory_info.1 << PAGE_SHIFT, + ); + lock.unlock(); + + unsafe { ACPI_RSDP = system_information.acpi_rsdp_address }; memory_hook::init_memory_access_handler(); @@ -146,8 +110,9 @@ fn hypervisor_main(system_information: &mut SystemInformation) { } unsafe { BSP_MPIDR = get_mpidr_el1() }; - extern "C" { + unsafe extern "C" { fn vector_table_el2(); + } system_information.vbar_el2 = vector_table_el2 as *const fn() as usize as u64; } @@ -206,15 +171,12 @@ fn show_kernel_info() { /// # Result /// If the allocation is succeeded, Ok(start_address), otherwise Err(()) pub fn allocate_memory(pages: usize, align: Option) -> Result { - unsafe { - MEMORY_ALLOCATOR.0.lock(); - let result = MEMORY_ALLOCATOR - .1 - .assume_init_mut() - .allocate(pages << PAGE_SHIFT, align.unwrap_or(PAGE_SHIFT)); - MEMORY_ALLOCATOR.0.unlock(); - return result; - } + let (lock, allocator) = unsafe { (&raw mut MEMORY_ALLOCATOR).as_mut() }.unwrap(); + lock.lock(); + let result = unsafe { allocator.assume_init_mut() } + .allocate(pages << PAGE_SHIFT, align.unwrap_or(PAGE_SHIFT)); + lock.unlock(); + result } /// Free memory to memory pool @@ -226,19 +188,15 @@ pub fn allocate_memory(pages: usize, align: Option) -> Result Result<(), MemoryAllocationError> { - unsafe { - MEMORY_ALLOCATOR.0.lock(); - let result = MEMORY_ALLOCATOR - .1 - .assume_init_mut() - .free(address, pages << PAGE_SHIFT); - MEMORY_ALLOCATOR.0.unlock(); - return result; - } + let (lock, allocator) = unsafe { (&raw mut MEMORY_ALLOCATOR).as_mut() }.unwrap(); + lock.lock(); + let result = unsafe { allocator.assume_init_mut() }.free(address, pages << PAGE_SHIFT); + lock.unlock(); + result } -#[no_mangle] -extern "C" fn synchronous_exception_handler(regs: &mut StoredRegisters) { +#[unsafe(no_mangle)] +extern "C" fn synchronous_exception_handler(regs: &mut GeneralPurposeRegisters) { let esr_el2 = get_esr_el2(); let elr_el2 = get_elr_el2(); let far_el2 = get_far_el2(); @@ -285,30 +243,11 @@ extern "C" fn synchronous_exception_handler(regs: &mut StoredRegisters) { pr_debug!("SecureMonitor Call: {:#X}", smc_number); pr_debug!("Registers: {:#X?}", regs); if smc_number == 0 { - if let Ok(psci_function_id) = psci::PsciFunctionId::try_from(regs.x0) { + if let Ok(psci_function_id) = psci::PsciFunctionId::try_from(regs[0]) { psci::handle_psci_call(psci_function_id, regs); } else { - pr_debug!("Unknown Secure Monitor Call: {:#X}", regs.x0); - secure_monitor_call( - &mut regs.x0, - &mut regs.x1, - &mut regs.x2, - &mut regs.x3, - &mut regs.x4, - &mut regs.x5, - &mut regs.x6, - &mut regs.x7, - &mut regs.x8, - &mut regs.x9, - &mut regs.x10, - &mut regs.x11, - &mut regs.x12, - &mut regs.x13, - &mut regs.x14, - &mut regs.x15, - &mut regs.x16, - &mut regs.x17, - ); + pr_debug!("Unknown Secure Monitor Call: {:#X}", regs[0]); + secure_monitor_call(regs); } } else { handler_panic!(regs, "SMC {:#X} is not implemented.", smc_number); @@ -336,13 +275,13 @@ extern "C" fn synchronous_exception_handler(regs: &mut StoredRegisters) { pr_debug!("Return to EL1."); } -#[no_mangle] -extern "C" fn s_error_exception_handler(regs: &mut StoredRegisters) { +#[unsafe(no_mangle)] +extern "C" fn s_error_exception_handler(regs: &mut GeneralPurposeRegisters) { handler_panic!(regs, "S Error Exception!!"); } #[track_caller] -fn interrupt_handler_panic(s_r: &StoredRegisters, f: core::fmt::Arguments) -> ! { +fn interrupt_handler_panic(s_r: &GeneralPurposeRegisters, f: core::fmt::Arguments) -> ! { let esr_el2 = get_esr_el2(); let elr_el2 = get_elr_el2(); let far_el2 = get_far_el2(); @@ -545,4 +484,4 @@ lower_aa64_restore_registers_and_eret: isb eret .size lower_aa64_restore_registers_and_eret, . - lower_aa64_restore_registers_and_eret -", SR_SIZE = const core::mem::size_of::()); +", SR_SIZE = const size_of::()); diff --git a/src/hypervisor_kernel/src/memory_hook.rs b/src/hypervisor_kernel/src/memory_hook.rs index a5572b2..73c8d75 100644 --- a/src/hypervisor_kernel/src/memory_hook.rs +++ b/src/hypervisor_kernel/src/memory_hook.rs @@ -11,7 +11,7 @@ use core::mem::MaybeUninit; -use crate::StoredRegisters; +use common::GeneralPurposeRegisters; const DEFAULT_LOAD_EMULATION_RESULT: LoadHookResult = LoadHookResult::PassThrough; const DEFAULT_STORE_EMULATION_RESULT: StoreHookResult = StoreHookResult::PassThrough; @@ -39,7 +39,7 @@ pub enum StoreHookResult { pub type LoadAccessHandler = fn( accessing_memory_address: usize, - stored_registers: &mut StoredRegisters, + regs: &mut GeneralPurposeRegisters, access_size: u8, is_64bit_register: bool, is_sign_extend_required: bool, @@ -48,7 +48,7 @@ pub type LoadAccessHandler = fn( pub type StoreAccessHandler = fn( accessing_memory_address: usize, - stored_registers: &mut StoredRegisters, + regs: &mut GeneralPurposeRegisters, access_size: u8, data: u64, entry: &StoreAccessHandlerEntry, @@ -161,9 +161,10 @@ static mut NUM_OF_STORE_HANDLER_ENABLED_ENTRIES: usize = 0; macro_rules! get_load_handler_list { () => { unsafe { - (&*core::ptr::addr_of!(LOAD_HANDLER_LIST)) + (&raw const LOAD_HANDLER_LIST) + .as_ref() + .unwrap() .assume_init() - .iter() } }; } @@ -171,9 +172,10 @@ macro_rules! get_load_handler_list { macro_rules! get_load_handler_list_mut { () => { unsafe { - (&mut *core::ptr::addr_of_mut!(LOAD_HANDLER_LIST)) + (&raw mut LOAD_HANDLER_LIST) + .as_mut() + .unwrap() .assume_init_mut() - .iter_mut() } }; } @@ -181,9 +183,10 @@ macro_rules! get_load_handler_list_mut { macro_rules! get_store_handler_list { () => { unsafe { - (&*core::ptr::addr_of!(STORE_HANDLER_LIST)) + (&raw const STORE_HANDLER_LIST) + .as_ref() + .unwrap() .assume_init() - .iter() } }; } @@ -191,19 +194,22 @@ macro_rules! get_store_handler_list { macro_rules! get_store_handler_list_mut { () => { unsafe { - (&mut *core::ptr::addr_of_mut!(STORE_HANDLER_LIST)) + (&raw mut STORE_HANDLER_LIST) + .as_mut() + .unwrap() .assume_init_mut() - .iter_mut() } }; } pub fn init_memory_access_handler() { - for e in get_load_handler_list_mut!() { + let list = get_load_handler_list_mut!(); + for e in list.iter_mut() { e.target_address = 0; e.range = 0; } - for e in get_store_handler_list_mut!() { + let list = get_store_handler_list_mut!(); + for e in list.iter_mut() { e.target_address = 0; e.range = 0; } @@ -219,7 +225,8 @@ pub fn add_memory_load_access_handler(entry: LoadAccessHandlerEntry) -> Result<( if entry.range == 0 { return Err(()); } - for e in get_load_handler_list_mut!() { + let list = get_load_handler_list_mut!(); + for e in list.iter_mut() { if e.range == 0 { *e = entry; unsafe { NUM_OF_LOAD_HANDLER_ENABLED_ENTRIES += 1 }; @@ -232,14 +239,15 @@ pub fn add_memory_load_access_handler(entry: LoadAccessHandlerEntry) -> Result<( /// Register StoreAccessHandlerEntry /// /// This function will add StoreAccessHandlerEntry into list. -/// Function will return Err if entry.range == 0 or list is full. +/// Function will return Err if `entry.range == 0` or list is full. /// /// This function **does not** add paging trap. Please call [`crate::paging::add_memory_access_trap`]. pub fn add_memory_store_access_handler(entry: StoreAccessHandlerEntry) -> Result<(), ()> { if entry.range == 0 { return Err(()); } - for e in get_store_handler_list_mut!() { + let list = get_store_handler_list_mut!(); + for e in list.iter_mut() { if e.range == 0 { *e = entry; unsafe { NUM_OF_STORE_HANDLER_ENABLED_ENTRIES += 1 }; @@ -257,7 +265,8 @@ pub fn add_memory_store_access_handler(entry: StoreAccessHandlerEntry) -> Result /// This function **does not** remove paging trap. Please call [`crate::paging::remove_memory_access_trap`]. /// If you call [`crate::paging::remove_memory_access_trap`], be careful if other handlers need the page trap. pub fn remove_memory_load_access_handler(entry: LoadAccessHandlerEntry) -> Result<(), ()> { - for e in get_load_handler_list_mut!() { + let list = get_load_handler_list_mut!(); + for e in list.iter_mut() { if e.target_address == entry.target_address && e.range == entry.range { e.target_address = 0; e.range = 0; @@ -276,7 +285,8 @@ pub fn remove_memory_load_access_handler(entry: LoadAccessHandlerEntry) -> Resul /// This function **does not** remove paging trap. Please call [`crate::paging::remove_memory_access_trap`]. /// If you call [`crate::paging::remove_memory_access_trap`], be careful if other handlers need the page trap. pub fn remove_memory_store_access_handler(entry: StoreAccessHandlerEntry) -> Result<(), ()> { - for e in get_store_handler_list_mut!() { + let list = get_store_handler_list_mut!(); + for e in list.iter_mut() { if e.target_address == entry.target_address && e.range == entry.range { e.target_address = 0; e.range = 0; @@ -289,20 +299,21 @@ pub fn remove_memory_store_access_handler(entry: StoreAccessHandlerEntry) -> Res pub fn memory_load_hook_handler( accessing_memory_address: usize, - stored_registers: &mut StoredRegisters, + regs: &mut GeneralPurposeRegisters, access_size: u8, is_64bit_register: bool, is_sign_extend_required: bool, ) -> LoadHookResult { let mut num_of_check_entries = 0; - for e in get_load_handler_list!() { + let list = get_load_handler_list!(); + for e in list.iter() { if e.range == 0 { continue; } if (e.target_address..(e.target_address + e.range)).contains(&accessing_memory_address) { return (e.handler)( accessing_memory_address, - stored_registers, + regs, access_size, is_64bit_register, is_sign_extend_required, @@ -319,20 +330,15 @@ pub fn memory_load_hook_handler( pub fn memory_store_hook_handler( accessing_memory_address: usize, - stored_registers: &mut StoredRegisters, + regs: &mut GeneralPurposeRegisters, access_size: u8, data: u64, ) -> StoreHookResult { let mut num_of_check_entries = 0; - for e in get_store_handler_list!() { + let list = get_store_handler_list!(); + for e in list.iter() { if (e.target_address..(e.target_address + e.range)).contains(&accessing_memory_address) { - return (e.handler)( - accessing_memory_address, - stored_registers, - access_size, - data, - e, - ); + return (e.handler)(accessing_memory_address, regs, access_size, data, e); } num_of_check_entries += 1; if num_of_check_entries == unsafe { NUM_OF_STORE_HANDLER_ENABLED_ENTRIES } { diff --git a/src/hypervisor_kernel/src/multi_core.rs b/src/hypervisor_kernel/src/multi_core.rs index e89b612..be8bf62 100644 --- a/src/hypervisor_kernel/src/multi_core.rs +++ b/src/hypervisor_kernel/src/multi_core.rs @@ -9,16 +9,17 @@ //! MultiCore Handling Functions //! +use core::arch::naked_asm; use core::sync::atomic::{AtomicUsize, Ordering}; -use common::{cpu, PAGE_SHIFT, PAGE_SIZE, STACK_PAGES}; +use common::{GeneralPurposeRegisters, PAGE_SHIFT, PAGE_SIZE, STACK_PAGES, cpu}; use crate::memory_hook::{ - add_memory_store_access_handler, StoreAccessHandlerEntry, StoreHookResult, + StoreAccessHandlerEntry, StoreHookResult, add_memory_store_access_handler, }; use crate::paging::{add_memory_access_trap, map_address}; -use crate::psci::{call_psci_function, PsciFunctionId, PsciReturnCode}; -use crate::{allocate_memory, free_memory, StoredRegisters}; +use crate::psci::{PsciFunctionId, PsciReturnCode, call_psci_function}; +use crate::{allocate_memory, free_memory}; pub static NUMBER_OF_RUNNING_AP: AtomicUsize = AtomicUsize::new(0); pub static STACK_TO_FREE_LATER: AtomicUsize = AtomicUsize::new(0); @@ -39,14 +40,14 @@ struct HypervisorRegisters { el1_context_id: u64, } -pub fn setup_new_cpu(regs: &mut StoredRegisters) { +pub fn setup_new_cpu(regs: &mut GeneralPurposeRegisters) { let stack_address = (allocate_memory(STACK_PAGES, Some(STACK_PAGES)) .expect("Failed to allocate stack") + (STACK_PAGES << PAGE_SHIFT)) as u64; /* Write System Registers */ let register_buffer = unsafe { - &mut *((stack_address as usize - core::mem::size_of::()) + &mut *((stack_address as usize - size_of::()) as *mut HypervisorRegisters) }; register_buffer.cnthctl_el2 = cpu::get_cnthctl_el2(); @@ -59,8 +60,8 @@ pub fn setup_new_cpu(regs: &mut StoredRegisters) { register_buffer.vtcr_el2 = cpu::get_vtcr_el2(); register_buffer.sctlr_el2 = cpu::get_sctlr_el2(); register_buffer.hcr_el2 = cpu::get_hcr_el2(); - register_buffer.el1_entry_point = regs.x2; - register_buffer.el1_context_id = regs.x3; + register_buffer.el1_entry_point = regs[2]; + register_buffer.el1_context_id = regs[3]; cpu::dsb(); /* Flush Memory Cache for Application Processors */ @@ -70,13 +71,13 @@ pub fn setup_new_cpu(regs: &mut StoredRegisters) { cpu::convert_virtual_address_to_physical_address_el2_read(cpu_boot as *const fn() as usize) .expect("Failed to convert virtual address to real address"); - regs.x0 = call_psci_function( + regs[0] = call_psci_function( PsciFunctionId::CpuOn, - regs.x1, + regs[1], cpu_boot_address_real_address as u64, register_buffer as *const _ as usize as u64, ); - if regs.x0 as i32 != PsciReturnCode::Success as i32 { + if regs[0] as i32 != PsciReturnCode::Success as i32 { if let Err(err) = free_memory( stack_address as usize - (STACK_PAGES << PAGE_SHIFT), STACK_PAGES, @@ -85,8 +86,8 @@ pub fn setup_new_cpu(regs: &mut StoredRegisters) { } println!( "Failed to power on the cpu (MPIDR: {:#X}): {:?}", - regs.x1, - PsciReturnCode::try_from(regs.x0 as i32) + regs[1], + PsciReturnCode::try_from(regs[0] as i32) ); return; } @@ -151,7 +152,7 @@ pub fn setup_spin_table(base_address: usize, length: usize) { fn spin_table_store_access_handler( accessing_memory_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, data: u64, _: &StoreAccessHandlerEntry, @@ -163,7 +164,7 @@ fn spin_table_store_access_handler( /* Write System Registers */ let register_buffer = unsafe { - &mut *((stack_address as usize - core::mem::size_of::()) + &mut *((stack_address as usize - size_of::()) as *mut HypervisorRegisters) }; register_buffer.cnthctl_el2 = cpu::get_cnthctl_el2(); @@ -224,39 +225,9 @@ fn spin_table_store_access_handler( } /* cpu_boot must use position-relative code */ -#[naked] +#[unsafe(naked)] extern "C" fn cpu_boot() { - unsafe { - core::arch::naked_asm!(" - // MIDR_EL1 & MPIDR_EL1 - mrs x15, midr_el1 - msr vpidr_el2, x15 - mrs x16, mpidr_el1 - msr vmpidr_el2, x16 - - // SVE - mrs x17, id_aa64pfr0_el1 - ubfx x18, x17, 32, 4 - cbz x18, 2f - mov x15, {MAX_ZCR_EL2_LEN} - msr S3_4_C1_C2_0, x15 // ZCR_EL2 - -2: - // GICv3~ - mrs x15, icc_sre_el2 - and x16, x15, 1 - cbz x16, 3f - mov x17, 0xf - msr icc_sre_el2, x16 - msr ich_hcr_el2, xzr - isb -3: - // A64FX - mov x15, {A64FX} - cbz x15, 4f - msr S3_4_C11_C2_0, xzr // IMP_FJ_TAG_ADDRESS_CTRL_EL2 - -4: + naked_asm!(" ldp x1, x2, [x0, 16 * 0] ldp x3, x4, [x0, 16 * 1] ldp x5, x6, [x0, 16 * 2] @@ -278,15 +249,49 @@ extern "C" fn cpu_boot() { msr sctlr_el2, x9 msr hcr_el2, x10 - mov x1, (1 << 7) |(1 << 6) | (1 << 2) | (1) // EL1h(EL1 + Use SP_EL1) - msr spsr_el2, x1 - msr elr_el2, x11 - mov x0, x12 + // Do not use x11 and x12 + + // MIDR_EL1 & MPIDR_EL1 + mrs x15, midr_el1 + msr vpidr_el2, x15 + mrs x16, mpidr_el1 + msr vmpidr_el2, x16 + + mrs x17, id_aa64pfr0_el1 + + // SVE + ubfx x18, x17, 32, 4 + cbz x18, 2f + mov x15, {MAX_ZCR_EL2_LEN} + msr S3_4_C1_C2_0, x15 // ZCR_EL2 + +2: // GIC v3/4/4.1 + ubfx x18, x17, 24, 4 + cbz x18, 3f + mrs x15, icc_sre_el2 + and x16, x15, 1 + cbz x16, 3f + mov x18, {ICC_SRE_EL2} + msr icc_sre_el2, x18 + isb + msr ich_hcr_el2, xzr + +3: // A64FX + mov x15, {A64FX} + cbz x15, 4f + msr S3_4_C11_C2_0, xzr // IMP_FJ_TAG_ADDRESS_CTRL_EL2 + +4: + mov x1, {SPSR_EL2} + msr spsr_el2, x1 + msr elr_el2, x11 + mov x0, x12 isb eret", - MAX_ZCR_EL2_LEN = const cpu::MAX_ZCR_EL2_LEN, - A64FX = const cfg!(feature = "a64fx") as u64) - } + MAX_ZCR_EL2_LEN = const cpu::MAX_ZCR_EL2_LEN, + ICC_SRE_EL2 = const (cpu::ICC_SRE_EL2_ENABLE | cpu::ICC_SRE_EL2_SRE), + A64FX = const cfg!(feature = "a64fx") as u64, + SPSR_EL2 = const cpu::SPSR_EL2_DEFAULT) } /// # ATTENTION @@ -294,10 +299,9 @@ extern "C" fn cpu_boot() { /// adjust `SPIN_TABLE_STACK_ADDRESS_OFFSET` at spin_table_store_access_handler. /// # TODO /// Use atomic instructions(Currently "stlxr" fails to write zero). -#[naked] +#[unsafe(naked)] extern "C" fn spin_table_boot() { - unsafe { - core::arch::naked_asm!(" + naked_asm!(" .align 3 adr x1, 3f 2: @@ -311,6 +315,5 @@ extern "C" fn spin_table_boot() { b {CPU_BOOT} 3: .quad 0", - CPU_BOOT = sym cpu_boot) - } + CPU_BOOT = sym cpu_boot) } diff --git a/src/hypervisor_kernel/src/paging.rs b/src/hypervisor_kernel/src/paging.rs index 1b579e3..f0806c3 100644 --- a/src/hypervisor_kernel/src/paging.rs +++ b/src/hypervisor_kernel/src/paging.rs @@ -27,7 +27,7 @@ fn _remake_stage2_page_table( let page_table = unsafe { core::slice::from_raw_parts_mut( table_address as *mut u64, - (PAGE_TABLE_SIZE * number_of_tables) / core::mem::size_of::(), + (PAGE_TABLE_SIZE * number_of_tables) / size_of::(), ) }; let shift_level = table_level_to_table_shift(STAGE_2_PAGE_SHIFT, table_level); @@ -58,7 +58,7 @@ fn _remake_stage2_page_table( *e = (next_table_address as u64) | 0b11; } } - return Ok(()); + Ok(()) } pub fn remake_stage2_page_table() -> Result { @@ -91,7 +91,7 @@ pub fn remake_stage2_page_table() -> Result { vtcr_el2_t0sz, )?; - return Ok(table_address); + Ok(table_address) } /// Map physical address recursively @@ -109,6 +109,7 @@ pub fn remake_stage2_page_table() -> Result { /// * `permission` - The attribute for memory, Bit0: is_readable, Bit1: is_writable, Bit2: is_executable /// * `memory_attribute` - The index of MAIR_EL2 to apply the mapping area /// * `t0sz` - The value of TCR_EL2::T0SZ +#[allow(clippy::too_many_arguments)] fn map_address_recursive( physical_address: &mut usize, virtual_address: &mut usize, @@ -123,9 +124,8 @@ fn map_address_recursive( let mut table_index = (*virtual_address >> shift_level) & 0x1FF; if table_level == 3 { - let current_table = unsafe { - &mut *(table_address as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) - }; + let current_table = + unsafe { &mut *(table_address as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) }; let num_of_pages = if *num_of_remaining_pages + table_index > 512 { 512 - table_index } else { @@ -133,17 +133,20 @@ fn map_address_recursive( }; let attributes = create_attributes_for_stage_1(permission, memory_attribute, false); - for index in table_index..(table_index + num_of_pages) { - current_table[index] = *physical_address as u64 | attributes; + for e in current_table + .iter_mut() + .skip(table_index) + .take(num_of_pages) + { + *e = *physical_address as u64 | attributes; *physical_address += PAGE_SIZE; *virtual_address += PAGE_SIZE; } *num_of_remaining_pages -= num_of_pages; return Ok(()); } - let current_table = unsafe { - &mut *(table_address as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) - }; + let current_table = + unsafe { &mut *(table_address as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) }; while *num_of_remaining_pages != 0 { pr_debug!( @@ -202,7 +205,7 @@ fn map_address_recursive( *target_descriptor ^ (block_physical_address as u64); let next_level_page = unsafe { &mut *(allocated_table_address - as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) + as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) }; if table_level + 1 == 3 { @@ -219,7 +222,7 @@ fn map_address_recursive( /* set_mem */ for e in unsafe { &mut *(allocated_table_address - as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) + as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) } { *e = 0; } @@ -247,7 +250,7 @@ fn map_address_recursive( } table_index += 1; } - return Ok(()); + Ok(()) } /// Map address @@ -312,9 +315,9 @@ pub fn map_address( &mut num_of_needed_pages, TTBR::new(get_ttbr0_el2()).get_base_address(), table_level, - (readable as u8) << MEMORY_PERMISSION_READABLE_BIT - | (writable as u8) << MEMORY_PERMISSION_WRITABLE_BIT - | (executable as u8) << MEMORY_PERMISSION_EXECUTABLE_BIT, + ((readable as u8) << MEMORY_PERMISSION_READABLE_BIT) + | ((writable as u8) << MEMORY_PERMISSION_WRITABLE_BIT) + | ((executable as u8) << MEMORY_PERMISSION_EXECUTABLE_BIT), get_suitable_memory_attribute_index_from_mair_el2(is_device), tcr_el2_t0sz as u8, )?; @@ -329,12 +332,13 @@ pub fn map_address( aligned_size >> PAGE_SHIFT ); flush_tlb_el2(); - return Ok(()); + Ok(()) } /// Map physical Address Recursively into Stage2 translation table /// /// permission: Bit0:Readable, Bit1: Writable, Bit2: Executable +#[allow(clippy::too_many_arguments)] fn map_address_recursive_stage2( physical_address: &mut usize, virtual_address: &mut usize, @@ -352,8 +356,7 @@ fn map_address_recursive_stage2( (*virtual_address >> shift_level) & (0x200 * (concatenated_tables as usize) - 1); if table_level == 3 { - let table_len = - (PAGE_TABLE_SIZE * (concatenated_tables as usize)) / core::mem::size_of::(); + let table_len = (PAGE_TABLE_SIZE * (concatenated_tables as usize)) / size_of::(); let current_table = unsafe { core::slice::from_raw_parts_mut(table_address as *mut u64, table_len) }; @@ -369,19 +372,23 @@ fn map_address_recursive_stage2( } let attributes = create_attributes_for_stage_2(permission, is_dummy_page, is_unmap, false); let end_index = table_index + num_of_pages; - for index in table_index..end_index { + for (index, e) in current_table + .iter_mut() + .enumerate() + .take(end_index) + .skip(table_index) + { + *e = *physical_address as u64 | attributes; + + #[cfg(feature = "contiguous_bit")] if STAGE_2_PAGE_SIZE == 0x1000 && (index & 0xF) == 0 && !is_dummy_page && (end_index - index) >= 16 && (*physical_address & ((16 * STAGE_2_PAGE_SIZE) - 1)) == 0 - && cfg!(feature = "contiguous_bit") { pr_debug!("Enable CONTIGUOUS_BIT({:#X} ~ {:#X})", index, end_index); - current_table[index] = - *physical_address as u64 | attributes | PAGE_DESCRIPTORS_CONTIGUOUS; - } else { - current_table[index] = *physical_address as u64 | attributes; + *e |= PAGE_DESCRIPTORS_CONTIGUOUS; } if !is_dummy_page { *physical_address += STAGE_2_PAGE_SIZE; @@ -395,7 +402,7 @@ fn map_address_recursive_stage2( let current_table = unsafe { core::slice::from_raw_parts_mut( table_address as *mut u64, - (PAGE_TABLE_SIZE * concatenated_tables as usize) / core::mem::size_of::(), + (PAGE_TABLE_SIZE * concatenated_tables as usize) / size_of::(), ) }; @@ -435,9 +442,6 @@ fn map_address_recursive_stage2( } *physical_address += 1 << shift_level; - /*for i in 0..(1 << (shift_level - STAGE_2_PAGE_SHIFT)) { - flush_tlb_ipa_is((*virtual_address + (i << STAGE_2_PAGE_SHIFT)) as u64); - }*/ *virtual_address += 1 << shift_level; *num_of_remaining_pages -= 512usize.pow((3 - table_level) as u32); } else { @@ -459,7 +463,7 @@ fn map_address_recursive_stage2( *target_descriptor ^ (block_physical_address as u64); let next_level_page = unsafe { &mut *(allocated_table_address - as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) + as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) }; if table_level + 1 == 3 { @@ -476,7 +480,7 @@ fn map_address_recursive_stage2( /* set_mem */ for e in unsafe { &mut *(allocated_table_address - as *mut [u64; PAGE_TABLE_SIZE / core::mem::size_of::()]) + as *mut [u64; PAGE_TABLE_SIZE / size_of::()]) } { *e = 0; } @@ -507,7 +511,7 @@ fn map_address_recursive_stage2( } table_index += 1; } - return Ok(()); + Ok(()) } /// Set up to trap memory access from EL1/EL0 @@ -576,7 +580,7 @@ pub fn add_memory_access_trap( assert_eq!(physical_address, address); pr_debug!("Unmapped {:#X} Bytes({} Pages)", size, size >> PAGE_SHIFT); flush_tlb_el1(); - return Ok(()); + Ok(()) } /// Remove the trap of memory access from EL1/EL0 @@ -630,7 +634,7 @@ pub fn remove_memory_access_trap(mut address: usize, size: usize) -> Result<(), assert_eq!(num_of_needed_pages, 0); pr_debug!("Unmapped {:#X} Bytes({} Pages)", size, size >> PAGE_SHIFT); flush_tlb_el1(); - return Ok(()); + Ok(()) } /// Allocate page table for stage 1 with suitable address alignment @@ -641,7 +645,7 @@ fn allocate_page_table_for_stage_1( is_for_ttbr: bool, ) -> Result { let alignment = if is_for_ttbr { - ((64 - ((PAGE_SHIFT - 3) * (4 - look_up_level) as usize) - t0sz as usize).max(4)).min(12) + (64 - ((PAGE_SHIFT - 3) * (4 - look_up_level) as usize) - t0sz as usize).clamp(4, 12) } else { PAGE_SHIFT }; @@ -664,8 +668,7 @@ fn allocate_page_table_for_stage_2( ) -> Result { assert_ne!(number_of_tables, 0); let alignment = if is_for_ttbr { - ((64 - ((PAGE_SHIFT - 3) as usize * (4 - look_up_level) as usize) - t0sz as usize).max(4)) - .min(12) + (64 - ((PAGE_SHIFT - 3) * (4 - look_up_level) as usize) - t0sz as usize).clamp(4, 12) + (number_of_tables as usize - 1) } else { assert_eq!(number_of_tables, 1); @@ -862,6 +865,7 @@ pub fn dump_page_table_stage2( ); } +#[allow(clippy::too_many_arguments)] fn dump_page_table_recursive( table_address: usize, start_virtual_address: usize, @@ -915,7 +919,7 @@ fn dump_page_table_recursive( } } *virtual_base_address += granule; - processing_descriptor_address += core::mem::size_of::(); + processing_descriptor_address += size_of::(); } } else { for _ in 0..number_of_entries { @@ -968,7 +972,7 @@ fn dump_page_table_recursive( 512, ); } - processing_descriptor_address += core::mem::size_of::(); + processing_descriptor_address += size_of::(); } } } diff --git a/src/hypervisor_kernel/src/psci.rs b/src/hypervisor_kernel/src/psci.rs index 64f4c9e..80acbbe 100644 --- a/src/hypervisor_kernel/src/psci.rs +++ b/src/hypervisor_kernel/src/psci.rs @@ -10,10 +10,11 @@ //! //! Supported Version: ~2.0 +use common::GeneralPurposeRegisters; use common::cpu::{get_mpidr_el1, secure_monitor_call}; +use crate::handler_panic; use crate::multi_core::{power_off_cpu, setup_new_cpu}; -use crate::{handler_panic, StoredRegisters}; /// PSCI Function ID List /// @@ -114,16 +115,16 @@ impl TryFrom for PsciReturnCode { } } -pub fn handle_psci_call(function_id: PsciFunctionId, stored_registers: &mut StoredRegisters) { +pub fn handle_psci_call(function_id: PsciFunctionId, regs: &mut GeneralPurposeRegisters) { pr_debug!("PSCI Function Call: {:?}", function_id); if function_id == PsciFunctionId::CpuOn { - pr_debug!("CPU ON: MPIDR: {:#X}", stored_registers.x1); - setup_new_cpu(stored_registers); + pr_debug!("CPU ON: MPIDR: {:#X}", regs[1]); + setup_new_cpu(regs); } else if function_id == PsciFunctionId::CpuOff { let result = power_off_cpu(); handler_panic!( - stored_registers, + regs, "Failed to power off the cpu (MPIDR: {:#X}): {:?}", get_mpidr_el1(), PsciReturnCode::try_from(result) @@ -137,39 +138,16 @@ pub fn handle_psci_call(function_id: PsciFunctionId, stored_registers: &mut Stor println!("Trap power_off/reboot"); crate::fast_restore::enter_restore_process(); } - secure_monitor_call( - &mut stored_registers.x0, - &mut stored_registers.x1, - &mut stored_registers.x2, - &mut stored_registers.x3, - &mut stored_registers.x4, - &mut stored_registers.x5, - &mut stored_registers.x6, - &mut stored_registers.x7, - &mut stored_registers.x8, - &mut stored_registers.x9, - &mut stored_registers.x10, - &mut stored_registers.x11, - &mut stored_registers.x12, - &mut stored_registers.x13, - &mut stored_registers.x14, - &mut stored_registers.x15, - &mut stored_registers.x16, - &mut stored_registers.x17, - ); + secure_monitor_call(regs); } } -pub fn call_psci_function( - function_id: PsciFunctionId, - mut arg0: u64, - mut arg1: u64, - mut arg2: u64, -) -> u64 { - let mut x0 = function_id as u64; - secure_monitor_call( - &mut x0, &mut arg0, &mut arg1, &mut arg2, &mut 0, &mut 0, &mut 0, &mut 0, &mut 0, &mut 0, - &mut 0, &mut 0, &mut 0, &mut 0, &mut 0, &mut 0, &mut 0, &mut 0, - ); - x0 +pub fn call_psci_function(function_id: PsciFunctionId, arg0: u64, arg1: u64, arg2: u64) -> u64 { + let mut regs: GeneralPurposeRegisters = [0; 32]; + regs[0] = function_id as u64; + regs[1] = arg0; + regs[2] = arg1; + regs[3] = arg2; + secure_monitor_call(&mut regs); + regs[0] } diff --git a/src/hypervisor_kernel/src/smmu.rs b/src/hypervisor_kernel/src/smmu.rs index 3d3afae..0a598bf 100644 --- a/src/hypervisor_kernel/src/smmu.rs +++ b/src/hypervisor_kernel/src/smmu.rs @@ -9,16 +9,14 @@ //! System Memory Management Unit //! -use core::mem::size_of; - use common::cpu::{dsb, get_vtcr_el2, get_vttbr_el2}; use common::paging::{page_align_up, stage2_page_align_up}; use common::smmu::*; -use common::{bitmask, STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE}; +use common::{GeneralPurposeRegisters, STAGE_2_PAGE_MASK, STAGE_2_PAGE_SIZE, bitmask}; +use crate::emulation; use crate::memory_hook::*; use crate::paging::{add_memory_access_trap, map_address, remove_memory_access_trap}; -use crate::{emulation, StoredRegisters}; #[inline(always)] fn read_smmu_register(base: usize, offset: usize) -> T { @@ -81,7 +79,7 @@ static mut SMMU_BASE_ADDRESS: usize = 0; /// If adding memory access handler is failed, this function panics. /// /// # Arguments -/// * `smmu_registers_base_address` - The base address of SMMU registers([`common::smmu::SMMU_MEMORY_MAP_SIZE`] must be mapped and accessible) +/// * `smmu_registers_base_address` - The base address of SMMU registers([`SMMU_MEMORY_MAP_SIZE`] must be mapped and accessible) /// * `iort_address` - The address of IORT(Optional) pub fn init_smmu(smmu_base_address: usize, _iort_address: Option) { #[cfg(feature = "fast_restore")] @@ -129,7 +127,7 @@ fn backup_default_smmu_settings(base_address: usize) { fn smmu_registers_load_handler( accessing_memory_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, _: u8, _: bool, _: bool, @@ -164,7 +162,7 @@ fn smmu_registers_load_handler( fn smmu_registers_store_handler( accessing_memory_address: usize, - _stored_registers: &mut StoredRegisters, + _regs: &mut GeneralPurposeRegisters, access_size: u8, data: u64, entry: &StoreAccessHandlerEntry, @@ -680,7 +678,7 @@ fn process_level2_table_entry(entry_base: usize, _id: u32, should_check_entry: b fn level1_table_store_handler( accessing_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, access_size: u8, data: u64, _: &StoreAccessHandlerEntry, @@ -704,7 +702,7 @@ fn level1_table_store_handler( fn level2_table_load_handler( accessing_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, access_size: u8, _: bool, _: bool, @@ -719,7 +717,7 @@ fn level2_table_load_handler( fn level2_table_store_handler( accessing_address: usize, - _: &mut StoredRegisters, + _: &mut GeneralPurposeRegisters, access_size: u8, data: u64, _: &StoreAccessHandlerEntry, diff --git a/src/rust-toolchain.toml b/src/rust-toolchain.toml index db77f1b..94a9cb5 100644 --- a/src/rust-toolchain.toml +++ b/src/rust-toolchain.toml @@ -1,4 +1,3 @@ [toolchain] -channel = "nightly" -components = ["rust-src"] -targets = ["aarch64-unknown-none"] +components = ["rustc", "cargo"] +targets = ["aarch64-unknown-uefi", "aarch64-unknown-none-softfloat"] diff --git a/src/uefi/Cargo.toml b/src/uefi/Cargo.toml index dec83c8..87ffc5d 100644 --- a/src/uefi/Cargo.toml +++ b/src/uefi/Cargo.toml @@ -6,8 +6,8 @@ # http://opensource.org/licenses/mit-license.php [package] name = "uefi" -version = "1.4.1" -edition = "2021" -resolver = "2" +edition.workspace = true +license.workspace = true +version.workspace = true [dependencies] diff --git a/src/uefi/src/boot_service/memory_service.rs b/src/uefi/src/boot_service/memory_service.rs index c63dfd5..ae0dd45 100644 --- a/src/uefi/src/boot_service/memory_service.rs +++ b/src/uefi/src/boot_service/memory_service.rs @@ -99,7 +99,7 @@ impl EfiBootServices { Ok(()) } - /// Allocate highest memory which matches the demanded size and `border_address` + /// Allocate the highest memory which matches the demanded size and `border_address` /// /// # Arguments /// * `b_s` - EfiBootService diff --git a/src/uefi/src/device_path.rs b/src/uefi/src/device_path.rs index a3d6b90..83681f1 100644 --- a/src/uefi/src/device_path.rs +++ b/src/uefi/src/device_path.rs @@ -8,8 +8,8 @@ //! EFI Device Path Protocol //! -use crate::loaded_image::{EfiLoadedImageProtocol, EFI_LOADED_IMAGE_PROTOCOL_GUID}; -use crate::{boot_service, EfiHandle, EfiStatus, Guid}; +use crate::loaded_image::{EFI_LOADED_IMAGE_PROTOCOL_GUID, EfiLoadedImageProtocol}; +use crate::{EfiHandle, EfiStatus, Guid, boot_service}; const EFI_DEVICE_PATH_PROTOCOL_GUID: Guid = Guid { d1: 0x09576e91, diff --git a/src/uefi/src/file.rs b/src/uefi/src/file.rs index 46efd4d..bab0fb1 100644 --- a/src/uefi/src/file.rs +++ b/src/uefi/src/file.rs @@ -9,8 +9,8 @@ //! EFI Simple File System Protocol //! -use crate::boot_service::{EfiBootServices, EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL}; -use crate::loaded_image::{EfiLoadedImageProtocol, EFI_LOADED_IMAGE_PROTOCOL_GUID}; +use crate::boot_service::{EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL, EfiBootServices}; +use crate::loaded_image::{EFI_LOADED_IMAGE_PROTOCOL_GUID, EfiLoadedImageProtocol}; use crate::{EfiHandle, EfiStatus, EfiTime, Guid}; use core::mem::MaybeUninit; @@ -213,7 +213,7 @@ impl EfiFileProtocol { pub fn get_file_info(&self) -> Result { let mut result = MaybeUninit::::uninit(); - let mut read_size = core::mem::size_of::(); + let mut read_size = size_of::(); let status = (self.get_info)( self, &EFI_FILE_INFO_GUID, @@ -253,7 +253,7 @@ impl EfiFileProtocol { } pub fn close_file(&'static self) -> Result<(), EfiStatus> { - let s = ((*self).close)(self); + let s = (self.close)(self); if s == EfiStatus::EfiSuccess { Ok(()) } else { diff --git a/src/uefi/src/lib.rs b/src/uefi/src/lib.rs index e6c8854..aa522df 100644 --- a/src/uefi/src/lib.rs +++ b/src/uefi/src/lib.rs @@ -35,7 +35,7 @@ macro_rules! efi_warn { } #[repr(usize)] -#[allow(dead_code)] +#[allow(dead_code, clippy::enum_clike_unportable_variant)] #[derive(Eq, PartialEq, Debug)] pub enum EfiStatus { EfiSuccess = 0, diff --git a/src/uefi/src/loaded_image.rs b/src/uefi/src/loaded_image.rs index d99ca1f..22162da 100644 --- a/src/uefi/src/loaded_image.rs +++ b/src/uefi/src/loaded_image.rs @@ -9,7 +9,7 @@ //! EFI Loaded Image Protocol //! -use crate::{boot_service::EfiMemoryType, EfiHandle, EfiStatus, EfiSystemTable, Guid}; +use crate::{EfiHandle, EfiStatus, EfiSystemTable, Guid, boot_service::EfiMemoryType}; pub const EFI_LOADED_IMAGE_PROTOCOL_GUID: Guid = Guid { d1: 0x5B1B31A1, diff --git a/src/uefi/src/pxe.rs b/src/uefi/src/pxe.rs index c44b637..af209f5 100644 --- a/src/uefi/src/pxe.rs +++ b/src/uefi/src/pxe.rs @@ -8,8 +8,8 @@ //! EFI PXE Base Code Protocol //! -use crate::boot_service::{EfiBootServices, EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL}; -use crate::loaded_image::{EfiLoadedImageProtocol, EFI_LOADED_IMAGE_PROTOCOL_GUID}; +use crate::boot_service::{EFI_OPEN_PROTOCOL_BY_HANDLE_PROTOCOL, EfiBootServices}; +use crate::loaded_image::{EFI_LOADED_IMAGE_PROTOCOL_GUID, EfiLoadedImageProtocol}; use crate::{EfiHandle, EfiStatus, Guid}; const DEFAULT_BLOCK_SIZE: usize = 2048; @@ -156,17 +156,17 @@ impl EfiPxeBaseCodeProtocol { Ok(unsafe { &*pxe_protocol }) } - pub fn get_server_ip_v4(&self) -> Result<[u8; 4], ()> { + pub fn get_server_ip_v4(&self) -> Result<[u8; 4], EfiStatus> { if self.mode.is_null() { - return Err(()); + return Err(EfiStatus::EfiUnsupported); } if unsafe { (*(self.mode)).dhcp_discover_valid } { Ok(unsafe { - &*(&(*self.mode).dhcp_ack as *const _ as usize as *const EfiPxeBaseCodeDhcpv4Packet) + &*(&(*self.mode).dhcp_ack as *const _ as *const EfiPxeBaseCodeDhcpv4Packet) } .bootp_si_addr) } else { - Err(()) + Err(EfiStatus::EfiUnsupported) } } diff --git a/src/xtask/Cargo.toml b/src/xtask/Cargo.toml new file mode 100644 index 0000000..8312b7c --- /dev/null +++ b/src/xtask/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "xtask" +version.workspace = true +edition.workspace = true +license.workspace = true + +[dependencies] +toml = "0.8" diff --git a/src/builder.rs b/src/xtask/src/main.rs similarity index 62% rename from src/builder.rs rename to src/xtask/src/main.rs index 73b6c7f..b8d00f5 100755 --- a/src/builder.rs +++ b/src/xtask/src/main.rs @@ -1,21 +1,27 @@ -#!/usr/bin/env -S cargo -q -Zscript - // Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) // All rights reserved. // // This software is released under the MIT License. // http://opensource.org/licenses/mit-license.php +extern crate toml; + use std::env::Args; -use std::process::{exit, Command}; +use std::fs; +use std::path::Path; +use std::process::{Command, Stdio, exit}; + +const HYPERVISOR_BOOTLOADER_NAME: &str = "hypervisor_bootloader"; +const HYPERVISOR_KERNEL_NAME: &str = "hypervisor_kernel"; +const CARGO_FILE_NAME: &str = "Cargo.toml"; +const HYPERVISOR_BOOTLOADER_TRIPLE: &str = "aarch64-unknown-uefi"; +const HYPERVISOR_KERNEL_TRIPLE: &str = "aarch64-unknown-none-softfloat"; fn main() { // default settings let mut cargo_path = "cargo".to_string(); - let mut args = std::env::args(); - - // Skip build script binary path + // Skip xtask binary path let _ = args.next().unwrap(); // Parse global option @@ -69,36 +75,90 @@ macro_rules! try_get_argument { }; } +fn get_features(cargo_toml_path: &str) -> Vec { + toml::from_str::(fs::read_to_string(cargo_toml_path).unwrap().as_str()) + .unwrap() + .get("features") + .expect("Failed to get `[features]`") + .as_table() + .expect("`[features]` is invalid.") + .iter() + .map(|(k, _)| k.clone()) + .collect::>() +} + fn build(mut args: Args, cargo_path: &String) { - // default settings + // Default settings let mut is_parallel = false; let mut is_release = false; let mut is_kernel_embedded = false; - let mut cargo_args: Vec = vec!["build".to_string()]; + let mut bootloader_cargo_args: Vec = vec!["build".to_string()]; + let mut kernel_cargo_args: Vec = vec!["build".to_string()]; let mut output_directory = "bin/EFI/BOOT".to_string(); - let hypervisor_bootloader_name = "hypervisor_bootloader"; - let hypervisor_kernel_name = "hypervisor_kernel"; let hypervisor_bootloader_suffix = ".efi"; let hypervisor_bootloader_output_name = "BOOTAA64.EFI"; - let hypervisor_bootloader_triple = "aarch64-unknown-uefi"; - let hypervisor_kernel_triple = "aarch64-unknown-none"; + + // Path + let mut bootloader_path = std::env::current_dir().unwrap(); + let mut kernel_path = std::env::current_dir().unwrap(); + bootloader_path.push(HYPERVISOR_BOOTLOADER_NAME); + kernel_path.push(HYPERVISOR_KERNEL_NAME); + let bootloader_path = bootloader_path; + let kernel_path = kernel_path; // Parse options while let Some(v) = args.next() { if v == "-f" || v == "--features" { - cargo_args.push("--no-default-features".to_string()); - cargo_args.push("--features".to_string()); - let f; + // List up supported features + let mut bootloader_cargo_toml_path = bootloader_path.clone(); + let mut kernel_cargo_toml_path = kernel_path.clone(); + bootloader_cargo_toml_path.push(CARGO_FILE_NAME); + kernel_cargo_toml_path.push(CARGO_FILE_NAME); + let bootloader_features = get_features(bootloader_cargo_toml_path.to_str().unwrap()); + let kernel_features = get_features(kernel_cargo_toml_path.to_str().unwrap()); + + // Get features from command line + let f: String; + let mut hypervisor_bootloader_features_list = Vec::::new(); + let mut hypervisor_kernel_features_list = Vec::::new(); try_get_argument!(args, f, "Failed to get features", 1); - if f.contains("embed_kernel") { - is_kernel_embedded = true; + + for feature in f.split_terminator(',') { + let feature = feature.to_string(); + let mut is_supported = false; + + if bootloader_features.contains(&feature) { + hypervisor_bootloader_features_list.push(feature.clone()); + is_supported = true; + } + if kernel_features.contains(&feature) { + hypervisor_kernel_features_list.push(feature.clone()); + is_supported = true; + } + if !is_supported { + eprintln!( + "'{feature}' is unknown feature\nSupported features:\n\tLoader: {:?},\n\tKernel: {:?}", + bootloader_features, kernel_features + ); + exit(1); + } + if feature == "embed_kernel" { + is_kernel_embedded = true; + } } - cargo_args.push(f); + + bootloader_cargo_args.push("--no-default-features".to_string()); + kernel_cargo_args.push("--no-default-features".to_string()); + bootloader_cargo_args.push("--features".to_string()); + kernel_cargo_args.push("--features".to_string()); + bootloader_cargo_args.push(hypervisor_bootloader_features_list.join(",")); + kernel_cargo_args.push(hypervisor_kernel_features_list.join(",")); } else if v == "-p" || v == "--parallel" { is_parallel = true; } else if v == "-r" || v == "--release" { is_release = true; - cargo_args.push("--release".to_string()); + bootloader_cargo_args.push("--release".to_string()); + kernel_cargo_args.push("--release".to_string()); } else if v == "-o" || v == "--output-dir" { try_get_argument!( args, @@ -108,24 +168,22 @@ fn build(mut args: Args, cargo_path: &String) { ); } } + + // Create the command line let mut bootloader_command = Command::new(cargo_path); let mut kernel_command = Command::new(cargo_path); // Set working directory - let mut bootloader_path = std::env::current_dir().unwrap(); - let mut kernel_path = std::env::current_dir().unwrap(); - bootloader_path.push(hypervisor_bootloader_name); - kernel_path.push(hypervisor_kernel_name); bootloader_command.current_dir(bootloader_path); kernel_command.current_dir(kernel_path); // Set arguments - bootloader_command.args(cargo_args.clone()); - kernel_command.args(cargo_args); + bootloader_command.args(bootloader_cargo_args); + kernel_command.args(kernel_cargo_args); if is_kernel_embedded && is_parallel { is_parallel = false; - eprintln!("Parallel build is disabled by \"embed_kernel\""); + eprintln!("Parallel build is disabled by 'embed_kernel'"); } if is_parallel { @@ -185,9 +243,9 @@ fn build(mut args: Args, cargo_path: &String) { let mut hypervisor_kernel_new_name = bin_dir_path.clone(); hypervisor_bootloader_binary_path.push("target"); - hypervisor_bootloader_binary_path.push(hypervisor_bootloader_triple); + hypervisor_bootloader_binary_path.push(HYPERVISOR_BOOTLOADER_TRIPLE); hypervisor_kernel_binary_path.push("target"); - hypervisor_kernel_binary_path.push(hypervisor_kernel_triple); + hypervisor_kernel_binary_path.push(HYPERVISOR_KERNEL_TRIPLE); if is_release { hypervisor_bootloader_binary_path.push("release"); @@ -197,8 +255,8 @@ fn build(mut args: Args, cargo_path: &String) { hypervisor_kernel_binary_path.push("debug"); } hypervisor_bootloader_binary_path - .push(hypervisor_bootloader_name.to_string() + hypervisor_bootloader_suffix); - hypervisor_kernel_binary_path.push(hypervisor_kernel_name); + .push(HYPERVISOR_BOOTLOADER_NAME.to_string() + hypervisor_bootloader_suffix); + hypervisor_kernel_binary_path.push(HYPERVISOR_KERNEL_NAME); // Build bootloader if kernel should be embedded if is_kernel_embedded { @@ -221,7 +279,7 @@ fn build(mut args: Args, cargo_path: &String) { // Move hypervisor_bootloader_new_name.push(hypervisor_bootloader_output_name); - hypervisor_kernel_new_name.push(hypervisor_kernel_name); + hypervisor_kernel_new_name.push(HYPERVISOR_KERNEL_NAME); std::fs::rename( hypervisor_bootloader_binary_path, hypervisor_bootloader_new_name, @@ -236,6 +294,7 @@ fn run(mut args: Args) { let mut qemu = "qemu-system-aarch64".to_string(); let mut mount_directory = "bin/".to_string(); let mut smp = "4".to_string(); + let mut memory = "1G".to_string(); let mut qemu_efi = "QEMU_EFI.fd".to_string(); let mut is_debug = false; @@ -245,6 +304,8 @@ fn run(mut args: Args) { try_get_argument!(args, qemu, "Failed to get the emulator path", 1); } else if v == "-p" || v == "--smp" { try_get_argument!(args, smp, "Failed to get the number of processors", 1); + } else if v == "-m" || v == "--memory" { + try_get_argument!(args, memory, "Failed to get the memory size", 1); } else if v == "-d" || v == "--mount-directory" { try_get_argument!( args, @@ -261,6 +322,10 @@ fn run(mut args: Args) { let mut qemu_command = Command::new(qemu); qemu_command.args([ + "-m", + memory.as_str(), + "-cpu", + "a64fx", "-machine", "virt,virtualization=on,iommu=smmuv3", "-smp", @@ -276,20 +341,42 @@ fn run(mut args: Args) { } let exit_status = qemu_command - .spawn() - .expect("Failed to run the emulator") - .wait() - .unwrap(); - if !exit_status.success() { - exit(exit_status.code().unwrap()); + .stdin(Stdio::inherit()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .output() + .expect("Failed to run the emulator"); + if !exit_status.status.success() { + exit(exit_status.status.code().unwrap()); + } +} + +fn write_bin_all(from: &Path, to: &Path, path: &Path) -> std::io::Result<()> { + use std::fs; + let f = from.join(path); + let t = to.join(path); + + fs::create_dir_all(&t)?; + for e in fs::read_dir(f)? { + let e = e?; + if e.file_type()?.is_dir() { + write_bin_all(from, to, path.join(e.file_name()).as_path())?; + } else { + // `std::fs::copy` may be failed on Linux + let mut original = fs::File::open(e.path())?; + let mut created = fs::File::create(t.join(e.file_name()))?; + std::io::copy(&mut original, &mut created)?; + } } + Ok(()) } fn write_bin(mut args: Args) { // default settings let mut mount_directory = "/mnt/".to_string(); - let mut output_directory = "bin/EFI".to_string(); + let mut output_directory = "bin".to_string(); let mut device = "".to_string(); + let mut should_use_sudo = true; // Parse options while let Some(v) = args.next() { @@ -299,6 +386,8 @@ fn write_bin(mut args: Args) { try_get_argument!(args, output_directory, "Failed to get the path to copy", 1); } else if v == "-p" || v == "--mount-point" { try_get_argument!(args, mount_directory, "Failed to get the path to mount", 1); + } else if v == "-u" || v == "--user" { + should_use_sudo = false; } } @@ -309,36 +398,59 @@ fn write_bin(mut args: Args) { } // Mount - let status = Command::new("mount") + let mut command; + if should_use_sudo { + command = Command::new("sudo"); + command.args(["mount", "-o", "users,rw,umask=000"]); + } else { + command = Command::new("mount"); + } + let status = command .args([device.as_str(), mount_directory.as_str()]) - .spawn() - .expect("Failed to mount") - .wait() + .stdin(Stdio::inherit()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .output() .expect("Failed to mount"); - if !status.success() { + + if !status.status.success() { eprintln!("Failed to mount the device"); - exit(status.code().unwrap()); + exit(status.status.code().unwrap()); } - // Copy - let result = std::fs::copy(output_directory, mount_directory.as_str()); + // Copy files + let result = write_bin_all( + Path::new(&output_directory), + Path::new(&mount_directory), + Path::new(""), + ); // Umount - let status = Command::new("umount") + let mut command; + if should_use_sudo { + command = Command::new("sudo"); + command.arg("umount"); + } else { + command = Command::new("umount"); + } + let status = command .arg(mount_directory.as_str()) - .spawn() - .expect("Failed to umount") - .wait() + .stdin(Stdio::inherit()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .output() .expect("Failed to umount"); - if !status.success() { - eprintln!("Failed to umount the device"); - exit(status.code().unwrap()); + + if !status.status.success() { + eprintln!("Failed to unmount the device"); + exit(status.status.code().unwrap()); } if result.is_err() { eprintln!("Failed to copy binaries: {:?}", result.unwrap_err()); exit(1); } + println!("Success"); } fn show_help() { @@ -346,7 +458,7 @@ fn show_help() { Hypervisor Builder Copyright (c) 2022 National Institute of Advanced Industrial Science and Technology (AIST) All rights reserved. -Usage: ./build.rs [global options] command [command options] +Usage: cargo xtask [global options] command [command options] Global Options: (-m | --manager) package_manager_path : Specify the path of package manager like cargo @@ -370,6 +482,7 @@ run: (-e | --emulator) qemu_path : Modify qemu path (-o | --output-dir) directory : Modify the output directory of built binaries (-p | --smp) smp : Modify the number of virtual processors + (-m | --memory) size : Modify the memory size (-d | --mount-directory) : Modify the directory path to mount as virtual FAT device --bios path : Specify the OVMF image --debug : Enable debug system @@ -378,5 +491,6 @@ write: (-d | --device) : Specify the device to write (Required) (-p | --mount-point) : Modify the path to mount (-o | --output-dir) directory : Modify the output directory of built binaries + (-u | --user) : Access the device without sudo "); }