From 4a9a0d07f7376b439430e686721e8176c7b56ce7 Mon Sep 17 00:00:00 2001 From: ReinUsesLisp Date: Mon, 28 Dec 2020 02:18:31 -0300 Subject: [PATCH] backend/{a32,a64}_emit_x64: Add config entry to mask page table pointers Add config entry to mask out the lower bits in page table pointers. This is intended to allow users of Dynarmic to pack small integers inside pointers and update the pair atomically without locks. These lower bits can be masked out due to the expected alignment in pointers inside the page table. For the given usage, using AND on the pointer acts the same way as a TEST instruction. That said when the mask value is zero, TEST is still emitted to keep the same behavior. --- include/dynarmic/A32/config.h | 5 +++++ include/dynarmic/A64/config.h | 5 +++++ src/backend/x64/a32_emit_x64.cpp | 6 +++++- src/backend/x64/a64_emit_x64.cpp | 6 +++++- 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/include/dynarmic/A32/config.h b/include/dynarmic/A32/config.h index a0091841..d7af62c6 100644 --- a/include/dynarmic/A32/config.h +++ b/include/dynarmic/A32/config.h @@ -137,6 +137,11 @@ struct UserConfig { /// So there might be wrongly faulted pages which maps to nullptr. /// This can be avoided by carefully allocating the memory region. bool absolute_offset_page_table = false; + /// Masks out the first N bits in host pointers from the page table. + /// The intention behind this is to allow users of Dynarmic to pack attributes in the + /// same integer and update the pointer attribute pair atomically. + /// If the configured value is 3, all pointers will be forcefully aligned to 8 bytes. + int page_table_pointer_mask_bits = 0; /// Determines if we should detect memory accesses via page_table that straddle are /// misaligned. Accesses that straddle page boundaries will fallback to the relevant /// memory callback. diff --git a/include/dynarmic/A64/config.h b/include/dynarmic/A64/config.h index 6c607717..e6f48ef6 100644 --- a/include/dynarmic/A64/config.h +++ b/include/dynarmic/A64/config.h @@ -188,6 +188,11 @@ struct UserConfig { /// Determines the size of page_table. Valid values are between 12 and 64 inclusive. /// This is only used if page_table is not nullptr. size_t page_table_address_space_bits = 36; + /// Masks out the first N bits in host pointers from the page table. + /// The intention behind this is to allow users of Dynarmic to pack attributes in the + /// same integer and update the pointer attribute pair atomically. + /// If the configured value is 3, all pointers will be forcefully aligned to 8 bytes. + int page_table_pointer_mask_bits = 0; /// Determines what happens if the guest accesses an entry that is off the end of the /// page table. If true, Dynarmic will silently mirror page_table's address space. If /// false, accessing memory outside of page_table bounds will result in a call to the diff --git a/src/backend/x64/a32_emit_x64.cpp b/src/backend/x64/a32_emit_x64.cpp index a532ccb1..099dfdd0 100644 --- a/src/backend/x64/a32_emit_x64.cpp +++ b/src/backend/x64/a32_emit_x64.cpp @@ -935,7 +935,11 @@ Xbyak::RegExp EmitVAddrLookup(BlockOfCode& code, A32EmitContext& ctx, size_t bit code.mov(tmp, vaddr.cvt32()); code.shr(tmp, static_cast(page_bits)); code.mov(page, qword[r14 + tmp.cvt64() * sizeof(void*)]); - code.test(page, page); + if (ctx.conf.page_table_pointer_mask_bits == 0) { + code.test(page, page); + } else { + code.and_(page, ~u32(0) << ctx.conf.page_table_pointer_mask_bits); + } code.jz(abort, code.T_NEAR); if (ctx.conf.absolute_offset_page_table) { return page + vaddr; diff --git a/src/backend/x64/a64_emit_x64.cpp b/src/backend/x64/a64_emit_x64.cpp index ade6711b..c8be6fe1 100644 --- a/src/backend/x64/a64_emit_x64.cpp +++ b/src/backend/x64/a64_emit_x64.cpp @@ -815,7 +815,11 @@ Xbyak::RegExp EmitVAddrLookup(BlockOfCode& code, A64EmitContext& ctx, size_t bit code.jnz(abort, code.T_NEAR); } code.mov(page, qword[r14 + tmp * sizeof(void*)]); - code.test(page, page); + if (ctx.conf.page_table_pointer_mask_bits == 0) { + code.test(page, page); + } else { + code.and_(page, ~u32(0) << ctx.conf.page_table_pointer_mask_bits); + } code.jz(abort, code.T_NEAR); if (ctx.conf.absolute_offset_page_table) { return page + vaddr;