diff --git a/pkg/abi/linux/prctl.go b/pkg/abi/linux/prctl.go index c1928ae641..d82b28f311 100644 --- a/pkg/abi/linux/prctl.go +++ b/pkg/abi/linux/prctl.go @@ -159,6 +159,10 @@ const ( // specified) to ptrace the current task. PR_SET_PTRACER = 0x59616d61 PR_SET_PTRACER_ANY = -1 + + PR_SET_TAGGED_ADDR_CTRL = 55 + PR_GET_TAGGED_ADDR_CTRL = 56 + PR_TAGGED_ADDR_ENABLE = (1 << 0) ) // From diff --git a/pkg/sentry/mm/io.go b/pkg/sentry/mm/io.go index 1173020cfa..f4c1acf1f7 100644 --- a/pkg/sentry/mm/io.go +++ b/pkg/sentry/mm/io.go @@ -107,7 +107,7 @@ func translateIOError(ctx context.Context, err error) error { // CopyOut implements usermem.IO.CopyOut. func (mm *MemoryManager) CopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts usermem.IOOpts) (int, error) { - _, ok := mm.CheckIORange(addr, int64(len(src))) + ar, ok := mm.CheckIORange(addr, int64(len(src))) if !ok { return 0, linuxerr.EFAULT } @@ -118,7 +118,7 @@ func (mm *MemoryManager) CopyOut(ctx context.Context, addr hostarch.Addr, src [] // Do AddressSpace IO if applicable. if mm.asioEnabled(opts) && len(src) < copyMapMinBytes { - return mm.asCopyOut(ctx, addr, src, opts) + return mm.asCopyOut(ctx, ar, src, opts) } // Go through internal mappings. @@ -127,19 +127,18 @@ func (mm *MemoryManager) CopyOut(ctx context.Context, addr hostarch.Addr, src [] // traverse an unnecessary layer of buffering. This can be fixed by // inlining mm.withInternalMappings() and passing src subslices directly to // memmap.File.BufferWriteAt(). - return mm.imCopyOut(ctx, addr, src, opts) + return mm.imCopyOut(ctx, ar, src, opts) } -func (mm *MemoryManager) asCopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts usermem.IOOpts) (int, error) { +func (mm *MemoryManager) asCopyOut(ctx context.Context, ar hostarch.AddrRange, src []byte, opts usermem.IOOpts) (int, error) { var done int for { - n, err := mm.as.CopyOut(addr+hostarch.Addr(done), src[done:]) + n, err := mm.as.CopyOut(ar.Start+hostarch.Addr(done), src[done:]) done += n if err == nil { return done, nil } if f, ok := err.(platform.SegmentationFault); ok { - ar, _ := addr.ToRange(uint64(len(src))) if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Write); err != nil { return done, err } @@ -147,14 +146,14 @@ func (mm *MemoryManager) asCopyOut(ctx context.Context, addr hostarch.Addr, src } if _, ok := err.(platform.AddressSpaceIOUnavailable); ok { // Fall back to using internal mappings. - return mm.imCopyOut(ctx, addr+hostarch.Addr(done), src[done:], opts) + return mm.imCopyOut(ctx, hostarch.AddrRange{ar.Start + hostarch.Addr(done), ar.End}, src[done:], opts) } return done, translateIOError(ctx, err) } } -func (mm *MemoryManager) imCopyOut(ctx context.Context, addr hostarch.Addr, src []byte, opts usermem.IOOpts) (int, error) { - n64, err := mm.withInternalMappings(ctx, addr.MustToRange(uint64(len(src))), hostarch.Write, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { +func (mm *MemoryManager) imCopyOut(ctx context.Context, ar hostarch.AddrRange, src []byte, opts usermem.IOOpts) (int, error) { + n64, err := mm.withInternalMappings(ctx, ar, hostarch.Write, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { n, err := safemem.CopySeq(ims, safemem.BlockSeqOf(safemem.BlockFromSafeSlice(src))) return n, translateIOError(ctx, err) }) @@ -163,7 +162,7 @@ func (mm *MemoryManager) imCopyOut(ctx context.Context, addr hostarch.Addr, src // CopyIn implements usermem.IO.CopyIn. func (mm *MemoryManager) CopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts usermem.IOOpts) (int, error) { - _, ok := mm.CheckIORange(addr, int64(len(dst))) + ar, ok := mm.CheckIORange(addr, int64(len(dst))) if !ok { return 0, linuxerr.EFAULT } @@ -174,7 +173,7 @@ func (mm *MemoryManager) CopyIn(ctx context.Context, addr hostarch.Addr, dst []b // Do AddressSpace IO if applicable. if mm.asioEnabled(opts) && len(dst) < copyMapMinBytes { - return mm.asCopyIn(ctx, addr, dst, opts) + return mm.asCopyIn(ctx, ar, dst, opts) } // Go through internal mappings. @@ -183,19 +182,18 @@ func (mm *MemoryManager) CopyIn(ctx context.Context, addr hostarch.Addr, dst []b // traverse an unnecessary layer of buffering. This can be fixed by // inlining mm.withInternalMappings() and passing dst subslices directly to // memmap.File.BufferReadAt(). - return mm.imCopyIn(ctx, addr, dst, opts) + return mm.imCopyIn(ctx, ar, dst, opts) } -func (mm *MemoryManager) asCopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts usermem.IOOpts) (int, error) { +func (mm *MemoryManager) asCopyIn(ctx context.Context, ar hostarch.AddrRange, dst []byte, opts usermem.IOOpts) (int, error) { var done int for { - n, err := mm.as.CopyIn(addr+hostarch.Addr(done), dst[done:]) + n, err := mm.as.CopyIn(ar.Start+hostarch.Addr(done), dst[done:]) done += n if err == nil { return done, nil } if f, ok := err.(platform.SegmentationFault); ok { - ar, _ := addr.ToRange(uint64(len(dst))) if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Read); err != nil { return done, err } @@ -203,14 +201,14 @@ func (mm *MemoryManager) asCopyIn(ctx context.Context, addr hostarch.Addr, dst [ } if _, ok := err.(platform.AddressSpaceIOUnavailable); ok { // Fall back to using internal mappings. - return mm.imCopyIn(ctx, addr+hostarch.Addr(done), dst[done:], opts) + return mm.imCopyIn(ctx, hostarch.AddrRange{ar.Start + hostarch.Addr(done), ar.End}, dst[done:], opts) } return done, translateIOError(ctx, err) } } -func (mm *MemoryManager) imCopyIn(ctx context.Context, addr hostarch.Addr, dst []byte, opts usermem.IOOpts) (int, error) { - n64, err := mm.withInternalMappings(ctx, addr.MustToRange(uint64(len(dst))), hostarch.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { +func (mm *MemoryManager) imCopyIn(ctx context.Context, ar hostarch.AddrRange, dst []byte, opts usermem.IOOpts) (int, error) { + n64, err := mm.withInternalMappings(ctx, ar, hostarch.Read, opts.IgnorePermissions, func(ims safemem.BlockSeq) (uint64, error) { n, err := safemem.CopySeq(safemem.BlockSeqOf(safemem.BlockFromSafeSlice(dst)), ims) return n, translateIOError(ctx, err) }) @@ -219,7 +217,7 @@ func (mm *MemoryManager) imCopyIn(ctx context.Context, addr hostarch.Addr, dst [ // ZeroOut implements usermem.IO.ZeroOut. func (mm *MemoryManager) ZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts usermem.IOOpts) (int64, error) { - _, ok := mm.CheckIORange(addr, toZero) + ar, ok := mm.CheckIORange(addr, toZero) if !ok { return 0, linuxerr.EFAULT } @@ -230,23 +228,22 @@ func (mm *MemoryManager) ZeroOut(ctx context.Context, addr hostarch.Addr, toZero // Do AddressSpace IO if applicable. if mm.asioEnabled(opts) && toZero < copyMapMinBytes { - return mm.asZeroOut(ctx, addr, toZero, opts) + return mm.asZeroOut(ctx, ar, opts) } // Go through internal mappings. - return mm.imZeroOut(ctx, addr, toZero, opts) + return mm.imZeroOut(ctx, ar, opts) } -func (mm *MemoryManager) asZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts usermem.IOOpts) (int64, error) { +func (mm *MemoryManager) asZeroOut(ctx context.Context, ar hostarch.AddrRange, opts usermem.IOOpts) (int64, error) { var done int64 for { - n, err := mm.as.ZeroOut(addr+hostarch.Addr(done), uintptr(toZero-done)) + n, err := mm.as.ZeroOut(ar.Start+hostarch.Addr(done), uintptr(int64(ar.Length())-done)) done += int64(n) if err == nil { return done, nil } if f, ok := err.(platform.SegmentationFault); ok { - ar, _ := addr.ToRange(uint64(toZero)) if err := mm.handleASIOFault(ctx, f.Addr, ar, hostarch.Write); err != nil { return done, err } @@ -254,14 +251,14 @@ func (mm *MemoryManager) asZeroOut(ctx context.Context, addr hostarch.Addr, toZe } if _, ok := err.(platform.AddressSpaceIOUnavailable); ok { // Fall back to using internal mappings. - return mm.imZeroOut(ctx, addr+hostarch.Addr(done), toZero-done, opts) + return mm.imZeroOut(ctx, hostarch.AddrRange{ar.Start + hostarch.Addr(done), ar.End}, opts) } return done, translateIOError(ctx, err) } } -func (mm *MemoryManager) imZeroOut(ctx context.Context, addr hostarch.Addr, toZero int64, opts usermem.IOOpts) (int64, error) { - return mm.withInternalMappings(ctx, addr.MustToRange(uint64(toZero)), hostarch.Write, opts.IgnorePermissions, func(dsts safemem.BlockSeq) (uint64, error) { +func (mm *MemoryManager) imZeroOut(ctx context.Context, ar hostarch.AddrRange, opts usermem.IOOpts) (int64, error) { + return mm.withInternalMappings(ctx, ar, hostarch.Write, opts.IgnorePermissions, func(dsts safemem.BlockSeq) (uint64, error) { n, err := safemem.ZeroSeq(dsts) return n, translateIOError(ctx, err) }) @@ -297,7 +294,7 @@ func (mm *MemoryManager) CopyOutFrom(ctx context.Context, ars hostarch.AddrRange if cplen > int64(bufN)-done { cplen = int64(bufN) - done } - n, err := mm.asCopyOut(ctx, ar.Start, buf[int(done):int(done+cplen)], opts) + n, err := mm.asCopyOut(ctx, ar, buf[int(done):int(done+cplen)], opts) done += int64(n) if err != nil { return done, err @@ -330,7 +327,7 @@ func (mm *MemoryManager) CopyInTo(ctx context.Context, ars hostarch.AddrRangeSeq for !ars.IsEmpty() { ar := ars.Head() var n int - n, bufErr = mm.asCopyIn(ctx, ar.Start, buf[done:done+int(ar.Length())], opts) + n, bufErr = mm.asCopyIn(ctx, ar, buf[done:done+int(ar.Length())], opts) done += n if bufErr != nil { break @@ -377,7 +374,7 @@ func (mm *MemoryManager) SwapUint32(ctx context.Context, addr hostarch.Addr, new // Do AddressSpace IO if applicable. if mm.asioEnabled(opts) { for { - old, err := mm.as.SwapUint32(addr, new) + old, err := mm.as.SwapUint32(ar.Start, new) if err == nil { return old, nil } @@ -424,7 +421,7 @@ func (mm *MemoryManager) CompareAndSwapUint32(ctx context.Context, addr hostarch // Do AddressSpace IO if applicable. if mm.asioEnabled(opts) { for { - prev, err := mm.as.CompareAndSwapUint32(addr, old, new) + prev, err := mm.as.CompareAndSwapUint32(ar.Start, old, new) if err == nil { return prev, nil } @@ -471,7 +468,7 @@ func (mm *MemoryManager) LoadUint32(ctx context.Context, addr hostarch.Addr, opt // Do AddressSpace IO if applicable. if mm.asioEnabled(opts) { for { - val, err := mm.as.LoadUint32(addr) + val, err := mm.as.LoadUint32(ar.Start) if err == nil { return val, nil } diff --git a/pkg/sentry/syscalls/linux/sys_prctl.go b/pkg/sentry/syscalls/linux/sys_prctl.go index 1a8553df12..45cc621348 100644 --- a/pkg/sentry/syscalls/linux/sys_prctl.go +++ b/pkg/sentry/syscalls/linux/sys_prctl.go @@ -263,6 +263,19 @@ func Prctl(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, } return 0, nil, t.MemoryManager().SetVMAAnonName(args[2].Pointer(), args[3].Uint64(), name, nameIsNil) + // gVisor always uses tagged address ABI, so these syscalls are no-ops. + case linux.PR_SET_TAGGED_ADDR_CTRL: + if t.Arch().Arch() != arch.ARM64 || args[1].Uint64() != linux.PR_TAGGED_ADDR_ENABLE || args[2].Uint64() != 0 || args[3].Uint64() != 0 || args[4].Uint64() != 0 { + return 0, nil, linuxerr.EINVAL + } + return 0, nil, nil + + case linux.PR_GET_TAGGED_ADDR_CTRL: + if t.Arch().Arch() != arch.ARM64 || args[1].Uint64() != 0 || args[2].Uint64() != 0 || args[3].Uint64() != 0 || args[4].Uint64() != 0 { + return 0, nil, linuxerr.EINVAL + } + return linux.PR_TAGGED_ADDR_ENABLE, nil, nil + case linux.PR_GET_TIMING, linux.PR_SET_TIMING, linux.PR_GET_TSC, diff --git a/pkg/sentry/syscalls/linux/sys_random.go b/pkg/sentry/syscalls/linux/sys_random.go index fed7930d27..c41a1ceb27 100644 --- a/pkg/sentry/syscalls/linux/sys_random.go +++ b/pkg/sentry/syscalls/linux/sys_random.go @@ -51,7 +51,7 @@ func GetRandom(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintp if length > math.MaxInt32 { length = math.MaxInt32 } - ar, ok := addr.ToRange(uint64(length)) + ar, ok := t.MemoryManager().CheckIORange(addr, int64(length)) if !ok { return 0, nil, linuxerr.EFAULT } diff --git a/test/syscalls/linux/prctl.cc b/test/syscalls/linux/prctl.cc index f76a34fb3b..133be108c6 100644 --- a/test/syscalls/linux/prctl.cc +++ b/test/syscalls/linux/prctl.cc @@ -307,6 +307,20 @@ TEST(PrctlTest, OrphansReparentedToSubreaper) { EXPECT_TRUE(got_sigchild); } +TEST(PrctlTest, TaggedAddrCtrl) { +#if defined(__aarch64__) + EXPECT_THAT(prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0), + SyscallSucceeds()); + EXPECT_THAT(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0), + SyscallSucceedsWithValue(PR_TAGGED_ADDR_ENABLE)); +#else + EXPECT_THAT(prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0), + SyscallFailsWithErrno(EINVAL)); + EXPECT_THAT(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0), + SyscallFailsWithErrno(EINVAL)); +#endif +} + } // namespace } // namespace testing diff --git a/test/syscalls/linux/processes.cc b/test/syscalls/linux/processes.cc index 14362e632b..2c6ab911d4 100644 --- a/test/syscalls/linux/processes.cc +++ b/test/syscalls/linux/processes.cc @@ -275,7 +275,10 @@ int ExecSwapPostExec() { // ExecSwapPreClone is the first part of the ExecSwapThreadGroupLeader test. // It is called after the test has fork()'d. // It calls clone() to run ExecSwapPreExec. -[[noreturn]] void ExecSwapPreClone(ExecSwapArg* exec_swap_arg) { +// We need to disable HWASan, for this, because we cannot tag the stack region +// used by the clone() call. +[[noreturn]] void ExecSwapPreClone(ExecSwapArg* exec_swap_arg) + __attribute__((no_sanitize("hwaddress"))) { pid_t pid = getpid(); TEST_PCHECK(pid > 0); pid_t tid = gettid();