From 4c4b2516abaa63f0e038559fbf0bd588df7bc3d4 Mon Sep 17 00:00:00 2001 From: strongtz Date: Wed, 27 May 2020 12:49:01 +0800 Subject: [PATCH] Use custom ArmMmuLib due to sdm845 specific issue --- sdm845Pkg/CommonDsc.dsc.inc | 4 +- .../Library/ArmMmuLib/AArch64/ArmMmuLibCore.c | 772 ++++++++++++++++ .../ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S | 74 ++ .../AArch64/ArmMmuPeiLibConstructor.c | 55 ++ .../Library/ArmMmuLib/Arm/ArmMmuLibCore.c | 841 ++++++++++++++++++ .../ArmMmuLib/Arm/ArmMmuLibV7Support.S | 29 + .../ArmMmuLib/Arm/ArmMmuLibV7Support.asm | 26 + sdm845Pkg/Library/ArmMmuLib/ArmMmuBaseLib.inf | 39 + sdm845Pkg/Library/ArmMmuLib/ArmMmuPeiLib.inf | 32 + 9 files changed, 1870 insertions(+), 2 deletions(-) create mode 100644 sdm845Pkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c create mode 100644 sdm845Pkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S create mode 100644 sdm845Pkg/Library/ArmMmuLib/AArch64/ArmMmuPeiLibConstructor.c create mode 100644 sdm845Pkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c create mode 100644 sdm845Pkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.S create mode 100644 sdm845Pkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.asm create mode 100644 sdm845Pkg/Library/ArmMmuLib/ArmMmuBaseLib.inf create mode 100644 sdm845Pkg/Library/ArmMmuLib/ArmMmuPeiLib.inf diff --git a/sdm845Pkg/CommonDsc.dsc.inc b/sdm845Pkg/CommonDsc.dsc.inc index 7d39577..a77f452 100644 --- a/sdm845Pkg/CommonDsc.dsc.inc +++ b/sdm845Pkg/CommonDsc.dsc.inc @@ -62,7 +62,7 @@ ArmGicArchLib|ArmPkg/Library/ArmGicArchLib/ArmGicArchLib.inf ArmPlatformStackLib|ArmPlatformPkg/Library/ArmPlatformStackLib/ArmPlatformStackLib.inf ArmSmcLib|ArmPkg/Library/ArmSmcLib/ArmSmcLib.inf - ArmMmuLib|ArmPkg/Library/ArmMmuLib/ArmMmuBaseLib.inf + ArmMmuLib|sdm845Pkg/Library/ArmMmuLib/ArmMmuBaseLib.inf ResetSystemLib|ArmPkg/Library/ArmSmcPsciResetSystemLib/ArmSmcPsciResetSystemLib.inf @@ -163,7 +163,7 @@ ## Fixed compile error after upgrade to 14.10 PlatformPeiLib|ArmPlatformPkg/PlatformPei/PlatformPeiLib.inf PcdLib|MdePkg/Library/PeiPcdLib/PeiPcdLib.inf - ArmMmuLib|ArmPkg/Library/ArmMmuLib/ArmMmuPeiLib.inf + ArmMmuLib|sdm845Pkg/Library/ArmMmuLib/ArmMmuPeiLib.inf BaseMemoryLib|MdePkg/Library/BaseMemoryLib/BaseMemoryLib.inf [LibraryClasses.common.DXE_CORE] diff --git a/sdm845Pkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/sdm845Pkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c new file mode 100644 index 0000000..e635836 --- /dev/null +++ b/sdm845Pkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c @@ -0,0 +1,772 @@ +/** @file +* File managing the MMU for ARMv8 architecture +* +* Copyright (c) 2011-2014, ARM Limited. All rights reserved. +* Copyright (c) 2016, Linaro Limited. All rights reserved. +* Copyright (c) 2017, Intel Corporation. All rights reserved.
+* +* SPDX-License-Identifier: BSD-2-Clause-Patent +* +**/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// We use this index definition to define an invalid block entry +#define TT_ATTR_INDX_INVALID ((UINT32)~0) + +STATIC +UINT64 +ArmMemoryAttributeToPageAttribute ( + IN ARM_MEMORY_REGION_ATTRIBUTES Attributes + ) +{ + switch (Attributes) { + case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE: + return TT_ATTR_INDX_MEMORY_WRITE_BACK; + + case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK: + return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE; + + case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH: + return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE; + + // Uncached and device mappings are treated as outer shareable by default, + case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED: + return TT_ATTR_INDX_MEMORY_NON_CACHEABLE; + + default: + ASSERT(0); + case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE: + if (ArmReadCurrentEL () == AARCH64_EL2) + return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK; + else + return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK; + } +} + +UINT64 +PageAttributeToGcdAttribute ( + IN UINT64 PageAttributes + ) +{ + UINT64 GcdAttributes; + + switch (PageAttributes & TT_ATTR_INDX_MASK) { + case TT_ATTR_INDX_DEVICE_MEMORY: + GcdAttributes = EFI_MEMORY_UC; + break; + case TT_ATTR_INDX_MEMORY_NON_CACHEABLE: + GcdAttributes = EFI_MEMORY_WC; + break; + case TT_ATTR_INDX_MEMORY_WRITE_THROUGH: + GcdAttributes = EFI_MEMORY_WT; + break; + case TT_ATTR_INDX_MEMORY_WRITE_BACK: + GcdAttributes = EFI_MEMORY_WB; + break; + default: + DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes)); + ASSERT (0); + // The Global Coherency Domain (GCD) value is defined as a bit set. + // Returning 0 means no attribute has been set. + GcdAttributes = 0; + } + + // Determine protection attributes + if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) { + // Read only cases map to write-protect + GcdAttributes |= EFI_MEMORY_RO; + } + + // Process eXecute Never attribute + if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) { + GcdAttributes |= EFI_MEMORY_XP; + } + + return GcdAttributes; +} + +#define MIN_T0SZ 16 +#define BITS_PER_LEVEL 9 + +VOID +GetRootTranslationTableInfo ( + IN UINTN T0SZ, + OUT UINTN *TableLevel, + OUT UINTN *TableEntryCount + ) +{ + // Get the level of the root table + if (TableLevel) { + *TableLevel = (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL; + } + + if (TableEntryCount) { + *TableEntryCount = 1UL << (BITS_PER_LEVEL - (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL); + } +} + +/* +STATIC +VOID +ReplaceLiveEntry ( + IN UINT64 *Entry, + IN UINT64 Value, + IN UINT64 RegionStart + ) +{ + if (!ArmMmuEnabled ()) { + *Entry = Value; + } else { + ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart); + } +} +*/ + +STATIC +VOID +LookupAddresstoRootTable ( + IN UINT64 MaxAddress, + OUT UINTN *T0SZ, + OUT UINTN *TableEntryCount + ) +{ + UINTN TopBit; + + // Check the parameters are not NULL + ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL)); + + // Look for the highest bit set in MaxAddress + for (TopBit = 63; TopBit != 0; TopBit--) { + if ((1ULL << TopBit) & MaxAddress) { + // MaxAddress top bit is found + TopBit = TopBit + 1; + break; + } + } + ASSERT (TopBit != 0); + + // Calculate T0SZ from the top bit of the MaxAddress + *T0SZ = 64 - TopBit; + + // Get the Table info from T0SZ + GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount); +} + +STATIC +UINT64* +GetBlockEntryListFromAddress ( + IN UINT64 *RootTable, + IN UINT64 RegionStart, + OUT UINTN *TableLevel, + IN OUT UINT64 *BlockEntrySize, + OUT UINT64 **LastBlockEntry + ) +{ + UINTN RootTableLevel; + UINTN RootTableEntryCount; + UINT64 *TranslationTable; + UINT64 *BlockEntry; + UINT64 *SubTableBlockEntry; + UINT64 BlockEntryAddress; + UINTN BaseAddressAlignment; + UINTN PageLevel; + UINTN Index; + UINTN IndexLevel; + UINTN T0SZ; + UINT64 Attributes; + UINT64 TableAttributes; + + // Initialize variable + BlockEntry = NULL; + + // Ensure the parameters are valid + if (!(TableLevel && BlockEntrySize && LastBlockEntry)) { + ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER); + return NULL; + } + + // Ensure the Region is aligned on 4KB boundary + if ((RegionStart & (SIZE_4KB - 1)) != 0) { + ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER); + return NULL; + } + + // Ensure the required size is aligned on 4KB boundary and not 0 + if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) { + ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER); + return NULL; + } + + T0SZ = ArmGetTCR () & TCR_T0SZ_MASK; + // Get the Table info from T0SZ + GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount); + + // If the start address is 0x0 then we use the size of the region to identify the alignment + if (RegionStart == 0) { + // Identify the highest possible alignment for the Region Size + BaseAddressAlignment = LowBitSet64 (*BlockEntrySize); + } else { + // Identify the highest possible alignment for the Base Address + BaseAddressAlignment = LowBitSet64 (RegionStart); + } + + // Identify the Page Level the RegionStart must belong to. Note that PageLevel + // should be at least 1 since block translations are not supported at level 0 + PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1); + + // If the required size is smaller than the current block size then we need to go to the page below. + // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment + // of the allocation size + while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) { + // It does not fit so we need to go a page level above + PageLevel++; + } + + // + // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries + // + + TranslationTable = RootTable; + for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) { + BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart); + + if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) { + // Go to the next table + TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE); + + // If we are at the last level then update the last level to next level + if (IndexLevel == PageLevel) { + // Enter the next level + PageLevel++; + } + } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) { + // If we are not at the last level then we need to split this BlockEntry + if (IndexLevel != PageLevel) { + // Retrieve the attributes from the block entry + Attributes = *BlockEntry & TT_ATTRIBUTES_MASK; + + // Convert the block entry attributes into Table descriptor attributes + TableAttributes = TT_TABLE_AP_NO_PERMISSION; + if (Attributes & TT_NS) { + TableAttributes = TT_TABLE_NS; + } + + // Get the address corresponding at this entry + BlockEntryAddress = RegionStart; + BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel); + // Shift back to right to set zero before the effective address + BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel); + + // Set the correct entry type for the next page level + if ((IndexLevel + 1) == 3) { + Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3; + } else { + Attributes |= TT_TYPE_BLOCK_ENTRY; + } + + // Create a new translation table + TranslationTable = AllocatePages (1); + if (TranslationTable == NULL) { + return NULL; + } + + // Populate the newly created lower level table + SubTableBlockEntry = TranslationTable; + for (Index = 0; Index < TT_ENTRY_COUNT; Index++) { + *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1))); + SubTableBlockEntry++; + } + + // Fill the BlockEntry with the new TranslationTable + *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY; + } + } else { + if (IndexLevel != PageLevel) { + // + // Case when we have an Invalid Entry and we are at a page level above of the one targetted. + // + + // Create a new translation table + TranslationTable = AllocatePages (1); + if (TranslationTable == NULL) { + return NULL; + } + + ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64)); + + // Fill the new BlockEntry with the TranslationTable + *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY; + } + } + } + + // Expose the found PageLevel to the caller + *TableLevel = PageLevel; + + // Now, we have the Table Level we can get the Block Size associated to this table + *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel); + + // The last block of the root table depends on the number of entry in this table, + // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table. + *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable, + (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT); + + return BlockEntry; +} + +STATIC +EFI_STATUS +UpdateRegionMapping ( + IN UINT64 *RootTable, + IN UINT64 RegionStart, + IN UINT64 RegionLength, + IN UINT64 Attributes, + IN UINT64 BlockEntryMask + ) +{ + UINT32 Type; + UINT64 *BlockEntry; + UINT64 *LastBlockEntry; + UINT64 BlockEntrySize; + UINTN TableLevel; + + // Ensure the Length is aligned on 4KB boundary + if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) { + ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER); + return EFI_INVALID_PARAMETER; + } + + do { + // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor + // such as the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor + BlockEntrySize = RegionLength; + BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry); + if (BlockEntry == NULL) { + // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables + return EFI_OUT_OF_RESOURCES; + } + + if (TableLevel != 3) { + Type = TT_TYPE_BLOCK_ENTRY; + } else { + Type = TT_TYPE_BLOCK_ENTRY_LEVEL3; + } + + do { + // Fill the Block Entry with attribute and output block address + *BlockEntry &= BlockEntryMask; + *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type; + + ArmUpdateTranslationTableEntry (BlockEntry, (VOID *)RegionStart); + + // Go to the next BlockEntry + RegionStart += BlockEntrySize; + RegionLength -= BlockEntrySize; + BlockEntry++; + + // Break the inner loop when next block is a table + // Rerun GetBlockEntryListFromAddress to avoid page table memory leak + if (TableLevel != 3 && BlockEntry <= LastBlockEntry && + (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) { + break; + } + } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry)); + } while (RegionLength != 0); + + return EFI_SUCCESS; +} + +STATIC +EFI_STATUS +FillTranslationTable ( + IN UINT64 *RootTable, + IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion + ) +{ + return UpdateRegionMapping ( + RootTable, + MemoryRegion->VirtualBase, + MemoryRegion->Length, + ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF, + 0 + ); +} + +STATIC +UINT64 +GcdAttributeToPageAttribute ( + IN UINT64 GcdAttributes + ) +{ + UINT64 PageAttributes; + + switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) { + case EFI_MEMORY_UC: + PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY; + break; + case EFI_MEMORY_WC: + PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE; + break; + case EFI_MEMORY_WT: + PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE; + break; + case EFI_MEMORY_WB: + PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE; + break; + default: + PageAttributes = TT_ATTR_INDX_MASK; + break; + } + + if ((GcdAttributes & EFI_MEMORY_XP) != 0 || + (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) { + if (ArmReadCurrentEL () == AARCH64_EL2) { + PageAttributes |= TT_XN_MASK; + } else { + PageAttributes |= TT_UXN_MASK | TT_PXN_MASK; + } + } + + if ((GcdAttributes & EFI_MEMORY_RO) != 0) { + PageAttributes |= TT_AP_RO_RO; + } + + return PageAttributes | TT_AF; +} + +EFI_STATUS +ArmSetMemoryAttributes ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length, + IN UINT64 Attributes + ) +{ + EFI_STATUS Status; + UINT64 *TranslationTable; + UINT64 PageAttributes; + UINT64 PageAttributeMask; + + PageAttributes = GcdAttributeToPageAttribute (Attributes); + PageAttributeMask = 0; + + if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) { + // + // No memory type was set in Attributes, so we are going to update the + // permissions only. + // + PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK; + PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK | + TT_PXN_MASK | TT_XN_MASK); + } + + TranslationTable = ArmGetTTBR0BaseAddress (); + + Status = UpdateRegionMapping ( + TranslationTable, + BaseAddress, + Length, + PageAttributes, + PageAttributeMask); + if (EFI_ERROR (Status)) { + return Status; + } + + return EFI_SUCCESS; +} + +STATIC +EFI_STATUS +SetMemoryRegionAttribute ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length, + IN UINT64 Attributes, + IN UINT64 BlockEntryMask + ) +{ + EFI_STATUS Status; + UINT64 *RootTable; + + RootTable = ArmGetTTBR0BaseAddress (); + + Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask); + if (EFI_ERROR (Status)) { + return Status; + } + + return EFI_SUCCESS; +} + +EFI_STATUS +ArmSetMemoryRegionNoExec ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length + ) +{ + UINT64 Val; + + if (ArmReadCurrentEL () == AARCH64_EL1) { + Val = TT_PXN_MASK | TT_UXN_MASK; + } else { + Val = TT_XN_MASK; + } + + return SetMemoryRegionAttribute ( + BaseAddress, + Length, + Val, + ~TT_ADDRESS_MASK_BLOCK_ENTRY); +} + +EFI_STATUS +ArmClearMemoryRegionNoExec ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length + ) +{ + UINT64 Mask; + + // XN maps to UXN in the EL1&0 translation regime + Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK); + + return SetMemoryRegionAttribute ( + BaseAddress, + Length, + 0, + Mask); +} + +EFI_STATUS +ArmSetMemoryRegionReadOnly ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length + ) +{ + return SetMemoryRegionAttribute ( + BaseAddress, + Length, + TT_AP_RO_RO, + ~TT_ADDRESS_MASK_BLOCK_ENTRY); +} + +EFI_STATUS +ArmClearMemoryRegionReadOnly ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length + ) +{ + return SetMemoryRegionAttribute ( + BaseAddress, + Length, + TT_AP_RW_RW, + ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK)); +} + +EFI_STATUS +EFIAPI +ArmConfigureMmu ( + IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable, + OUT VOID **TranslationTableBase OPTIONAL, + OUT UINTN *TranslationTableSize OPTIONAL + ) +{ + VOID* TranslationTable; + UINT32 TranslationTableAttribute; + UINT64 MaxAddress; + UINTN T0SZ; + UINTN RootTableEntryCount; + UINT64 TCR; + EFI_STATUS Status; + + if(MemoryTable == NULL) { + ASSERT (MemoryTable != NULL); + return EFI_INVALID_PARAMETER; + } + + // + // Limit the virtual address space to what we can actually use: UEFI + // mandates a 1:1 mapping, so no point in making the virtual address + // space larger than the physical address space. We also have to take + // into account the architectural limitations that result from UEFI's + // use of 4 KB pages. + // + MaxAddress = MIN (LShiftU64 (1ULL, ArmGetPhysicalAddressBits ()) - 1, + MAX_ALLOC_ADDRESS); + + // Lookup the Table Level to get the information + LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount); + + // + // Set TCR that allows us to retrieve T0SZ in the subsequent functions + // + // Ideally we will be running at EL2, but should support EL1 as well. + // UEFI should not run at EL3. + if (ArmReadCurrentEL () == AARCH64_EL2) { + //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2 + TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB; + + // Set the Physical Address Size using MaxAddress + if (MaxAddress < SIZE_4GB) { + TCR |= TCR_PS_4GB; + } else if (MaxAddress < SIZE_64GB) { + TCR |= TCR_PS_64GB; + } else if (MaxAddress < SIZE_1TB) { + TCR |= TCR_PS_1TB; + } else if (MaxAddress < SIZE_4TB) { + TCR |= TCR_PS_4TB; + } else if (MaxAddress < SIZE_16TB) { + TCR |= TCR_PS_16TB; + } else if (MaxAddress < SIZE_256TB) { + TCR |= TCR_PS_256TB; + } else { + DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress)); + ASSERT (0); // Bigger than 48-bit memory space are not supported + return EFI_UNSUPPORTED; + } + } else if (ArmReadCurrentEL () == AARCH64_EL1) { + // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1. + TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1; + + // Set the Physical Address Size using MaxAddress + if (MaxAddress < SIZE_4GB) { + TCR |= TCR_IPS_4GB; + } else if (MaxAddress < SIZE_64GB) { + TCR |= TCR_IPS_64GB; + } else if (MaxAddress < SIZE_1TB) { + TCR |= TCR_IPS_1TB; + } else if (MaxAddress < SIZE_4TB) { + TCR |= TCR_IPS_4TB; + } else if (MaxAddress < SIZE_16TB) { + TCR |= TCR_IPS_16TB; + } else if (MaxAddress < SIZE_256TB) { + TCR |= TCR_IPS_256TB; + } else { + DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress)); + ASSERT (0); // Bigger than 48-bit memory space are not supported + return EFI_UNSUPPORTED; + } + } else { + ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3. + return EFI_UNSUPPORTED; + } + + // + // Translation table walks are always cache coherent on ARMv8-A, so cache + // maintenance on page tables is never needed. Since there is a risk of + // loss of coherency when using mismatched attributes, and given that memory + // is mapped cacheable except for extraordinary cases (such as non-coherent + // DMA), have the page table walker perform cached accesses as well, and + // assert below that that matches the attributes we use for CPU accesses to + // the region. + // + TCR |= TCR_SH_INNER_SHAREABLE | + TCR_RGN_OUTER_WRITE_BACK_ALLOC | + TCR_RGN_INNER_WRITE_BACK_ALLOC; + + // Set TCR + ArmSetTCR (TCR); + + // Allocate pages for translation table + TranslationTable = AllocatePages (1); + if (TranslationTable == NULL) { + return EFI_OUT_OF_RESOURCES; + } + // We set TTBR0 just after allocating the table to retrieve its location from the subsequent + // functions without needing to pass this value across the functions. The MMU is only enabled + // after the translation tables are populated. + ArmSetTTBR0 (TranslationTable); + + if (TranslationTableBase != NULL) { + *TranslationTableBase = TranslationTable; + } + + if (TranslationTableSize != NULL) { + *TranslationTableSize = RootTableEntryCount * sizeof(UINT64); + } + + ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64)); + + // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs + ArmDisableMmu (); + ArmDisableDataCache (); + ArmDisableInstructionCache (); + + // Make sure nothing sneaked into the cache + ArmCleanInvalidateDataCache (); + ArmInvalidateInstructionCache (); + + TranslationTableAttribute = TT_ATTR_INDX_INVALID; + UINTN idx = 0; + while (MemoryTable->Length != 0) { + if ((MemoryTable->Length & (SIZE_4KB - 1)) != 0) { + DEBUG ((DEBUG_ERROR, "bruh: %d\n", idx)); + } + + DEBUG_CODE_BEGIN (); + // Find the memory attribute for the Translation Table + if ((UINTN)TranslationTable >= MemoryTable->PhysicalBase && + (UINTN)TranslationTable + EFI_PAGE_SIZE <= MemoryTable->PhysicalBase + + MemoryTable->Length) { + TranslationTableAttribute = MemoryTable->Attributes; + } + DEBUG_CODE_END (); + + Status = FillTranslationTable (TranslationTable, MemoryTable); + if (EFI_ERROR (Status)) { + goto FREE_TRANSLATION_TABLE; + } + MemoryTable++; + idx++; + } + + ASSERT (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK || + TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK); + + ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC + MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC + MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT + MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB + + ArmDisableAlignmentCheck (); + ArmEnableStackAlignmentCheck (); + ArmEnableInstructionCache (); + ArmEnableDataCache (); + + ArmEnableMmu (); + return EFI_SUCCESS; + +FREE_TRANSLATION_TABLE: + FreePages (TranslationTable, 1); + return Status; +} + +RETURN_STATUS +EFIAPI +ArmMmuBaseLibConstructor ( + VOID + ) +{ + extern UINT32 ArmReplaceLiveTranslationEntrySize; + + // + // The ArmReplaceLiveTranslationEntry () helper function may be invoked + // with the MMU off so we have to ensure that it gets cleaned to the PoC + // + WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry, + ArmReplaceLiveTranslationEntrySize); + + return RETURN_SUCCESS; +} diff --git a/sdm845Pkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S b/sdm845Pkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S new file mode 100644 index 0000000..66ebca5 --- /dev/null +++ b/sdm845Pkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S @@ -0,0 +1,74 @@ +#------------------------------------------------------------------------------ +# +# Copyright (c) 2016, Linaro Limited. All rights reserved. +# +# SPDX-License-Identifier: BSD-2-Clause-Patent +# +#------------------------------------------------------------------------------ + +#include + + .set CTRL_M_BIT, (1 << 0) + + .macro __replace_entry, el + + // disable the MMU + mrs x8, sctlr_el\el + bic x9, x8, #CTRL_M_BIT + msr sctlr_el\el, x9 + isb + + // write updated entry + str x1, [x0] + + // invalidate again to get rid of stale clean cachelines that may + // have been filled speculatively since the last invalidate + dmb sy + dc ivac, x0 + + // flush translations for the target address from the TLBs + lsr x2, x2, #12 + .if \el == 1 + tlbi vaae1, x2 + .else + tlbi vae\el, x2 + .endif + dsb nsh + + // re-enable the MMU + msr sctlr_el\el, x8 + isb + .endm + +//VOID +//ArmReplaceLiveTranslationEntry ( +// IN UINT64 *Entry, +// IN UINT64 Value, +// IN UINT64 Address +// ) +ASM_FUNC(ArmReplaceLiveTranslationEntry) + + // disable interrupts + mrs x4, daif + msr daifset, #0xf + isb + + // clean and invalidate first so that we don't clobber + // adjacent entries that are dirty in the caches + dc civac, x0 + dsb nsh + + EL1_OR_EL2_OR_EL3(x3) +1:__replace_entry 1 + b 4f +2:__replace_entry 2 + b 4f +3:__replace_entry 3 + +4:msr daif, x4 + ret + +ASM_GLOBAL ASM_PFX(ArmReplaceLiveTranslationEntrySize) + +ASM_PFX(ArmReplaceLiveTranslationEntrySize): + .long . - ArmReplaceLiveTranslationEntry diff --git a/sdm845Pkg/Library/ArmMmuLib/AArch64/ArmMmuPeiLibConstructor.c b/sdm845Pkg/Library/ArmMmuLib/AArch64/ArmMmuPeiLibConstructor.c new file mode 100644 index 0000000..8031792 --- /dev/null +++ b/sdm845Pkg/Library/ArmMmuLib/AArch64/ArmMmuPeiLibConstructor.c @@ -0,0 +1,55 @@ +#/* @file +# +# Copyright (c) 2016, Linaro Limited. All rights reserved. +# +# SPDX-License-Identifier: BSD-2-Clause-Patent +# +#*/ + +#include + +#include +#include +#include +#include + +EFI_STATUS +EFIAPI +ArmMmuPeiLibConstructor ( + IN EFI_PEI_FILE_HANDLE FileHandle, + IN CONST EFI_PEI_SERVICES **PeiServices + ) +{ + extern UINT32 ArmReplaceLiveTranslationEntrySize; + + EFI_FV_FILE_INFO FileInfo; + EFI_STATUS Status; + + ASSERT (FileHandle != NULL); + + Status = (*PeiServices)->FfsGetFileInfo (FileHandle, &FileInfo); + ASSERT_EFI_ERROR (Status); + + // + // Some platforms do not cope very well with cache maintenance being + // performed on regions backed by NOR flash. Since the firmware image + // can be assumed to be clean to the PoC when running XIP, even when PEI + // is executing from DRAM, we only need to perform the cache maintenance + // when not executing in place. + // + if ((UINTN)FileInfo.Buffer <= (UINTN)ArmReplaceLiveTranslationEntry && + ((UINTN)FileInfo.Buffer + FileInfo.BufferSize >= + (UINTN)ArmReplaceLiveTranslationEntry + ArmReplaceLiveTranslationEntrySize)) { + DEBUG ((EFI_D_INFO, "ArmMmuLib: skipping cache maintenance on XIP PEIM\n")); + } else { + DEBUG ((EFI_D_INFO, "ArmMmuLib: performing cache maintenance on shadowed PEIM\n")); + // + // The ArmReplaceLiveTranslationEntry () helper function may be invoked + // with the MMU off so we have to ensure that it gets cleaned to the PoC + // + WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry, + ArmReplaceLiveTranslationEntrySize); + } + + return RETURN_SUCCESS; +} diff --git a/sdm845Pkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c b/sdm845Pkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c new file mode 100644 index 0000000..74ac31d --- /dev/null +++ b/sdm845Pkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c @@ -0,0 +1,841 @@ +/** @file +* File managing the MMU for ARMv7 architecture +* +* Copyright (c) 2011-2016, ARM Limited. All rights reserved. +* +* SPDX-License-Identifier: BSD-2-Clause-Patent +* +**/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ID_MMFR0_SHARELVL_SHIFT 12 +#define ID_MMFR0_SHARELVL_MASK 0xf +#define ID_MMFR0_SHARELVL_ONE 0 +#define ID_MMFR0_SHARELVL_TWO 1 + +#define ID_MMFR0_INNERSHR_SHIFT 28 +#define ID_MMFR0_INNERSHR_MASK 0xf +#define ID_MMFR0_OUTERSHR_SHIFT 8 +#define ID_MMFR0_OUTERSHR_MASK 0xf + +#define ID_MMFR0_SHR_IMP_UNCACHED 0 +#define ID_MMFR0_SHR_IMP_HW_COHERENT 1 +#define ID_MMFR0_SHR_IGNORED 0xf + +#define __EFI_MEMORY_RWX 0 // no restrictions + +#define CACHE_ATTRIBUTE_MASK (EFI_MEMORY_UC | \ + EFI_MEMORY_WC | \ + EFI_MEMORY_WT | \ + EFI_MEMORY_WB | \ + EFI_MEMORY_UCE | \ + EFI_MEMORY_WP) + +UINTN +EFIAPI +ArmReadIdMmfr0 ( + VOID + ); + +BOOLEAN +EFIAPI +ArmHasMpExtensions ( + VOID + ); + +UINT32 +ConvertSectionAttributesToPageAttributes ( + IN UINT32 SectionAttributes, + IN BOOLEAN IsLargePage + ) +{ + UINT32 PageAttributes; + + PageAttributes = 0; + PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_CACHE_POLICY (SectionAttributes, IsLargePage); + PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_AP (SectionAttributes); + PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_XN (SectionAttributes, IsLargePage); + PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_NG (SectionAttributes); + PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_S (SectionAttributes); + + return PageAttributes; +} + +STATIC +BOOLEAN +PreferNonshareableMemory ( + VOID + ) +{ + UINTN Mmfr; + UINTN Val; + + if (FeaturePcdGet (PcdNormalMemoryNonshareableOverride)) { + return TRUE; + } + + // + // Check whether the innermost level of shareability (the level we will use + // by default to map normal memory) is implemented with hardware coherency + // support. Otherwise, revert to mapping as non-shareable. + // + Mmfr = ArmReadIdMmfr0 (); + switch ((Mmfr >> ID_MMFR0_SHARELVL_SHIFT) & ID_MMFR0_SHARELVL_MASK) { + case ID_MMFR0_SHARELVL_ONE: + // one level of shareability + Val = (Mmfr >> ID_MMFR0_OUTERSHR_SHIFT) & ID_MMFR0_OUTERSHR_MASK; + break; + case ID_MMFR0_SHARELVL_TWO: + // two levels of shareability + Val = (Mmfr >> ID_MMFR0_INNERSHR_SHIFT) & ID_MMFR0_INNERSHR_MASK; + break; + default: + // unexpected value -> shareable is the safe option + ASSERT (FALSE); + return FALSE; + } + return Val != ID_MMFR0_SHR_IMP_HW_COHERENT; +} + +STATIC +VOID +PopulateLevel2PageTable ( + IN UINT32 *SectionEntry, + IN UINT32 PhysicalBase, + IN UINT32 RemainLength, + IN ARM_MEMORY_REGION_ATTRIBUTES Attributes + ) +{ + UINT32* PageEntry; + UINT32 Pages; + UINT32 Index; + UINT32 PageAttributes; + UINT32 SectionDescriptor; + UINT32 TranslationTable; + UINT32 BaseSectionAddress; + UINT32 FirstPageOffset; + + switch (Attributes) { + case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK: + PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_BACK; + break; + case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE: + PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_BACK; + PageAttributes &= ~TT_DESCRIPTOR_PAGE_S_SHARED; + break; + case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH: + PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_THROUGH; + break; + case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE: + PageAttributes = TT_DESCRIPTOR_PAGE_DEVICE; + break; + case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED: + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED: + PageAttributes = TT_DESCRIPTOR_PAGE_UNCACHED; + break; + default: + PageAttributes = TT_DESCRIPTOR_PAGE_UNCACHED; + break; + } + + if (PreferNonshareableMemory ()) { + PageAttributes &= ~TT_DESCRIPTOR_PAGE_S_SHARED; + } + + // Check if the Section Entry has already been populated. Otherwise attach a + // Level 2 Translation Table to it + if (*SectionEntry != 0) { + // The entry must be a page table. Otherwise it exists an overlapping in the memory map + if (TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE(*SectionEntry)) { + TranslationTable = *SectionEntry & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK; + } else if ((*SectionEntry & TT_DESCRIPTOR_SECTION_TYPE_MASK) == TT_DESCRIPTOR_SECTION_TYPE_SECTION) { + // Case where a virtual memory map descriptor overlapped a section entry + + // Allocate a Level2 Page Table for this Section + TranslationTable = (UINTN)AllocatePages(EFI_SIZE_TO_PAGES(TRANSLATION_TABLE_PAGE_SIZE + TRANSLATION_TABLE_PAGE_ALIGNMENT)); + TranslationTable = ((UINTN)TranslationTable + TRANSLATION_TABLE_PAGE_ALIGNMENT_MASK) & ~TRANSLATION_TABLE_PAGE_ALIGNMENT_MASK; + + // Translate the Section Descriptor into Page Descriptor + SectionDescriptor = TT_DESCRIPTOR_PAGE_TYPE_PAGE | ConvertSectionAttributesToPageAttributes (*SectionEntry, FALSE); + + BaseSectionAddress = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(*SectionEntry); + + // Populate the new Level2 Page Table for the section + PageEntry = (UINT32*)TranslationTable; + for (Index = 0; Index < TRANSLATION_TABLE_PAGE_COUNT; Index++) { + PageEntry[Index] = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(BaseSectionAddress + (Index << 12)) | SectionDescriptor; + } + + // Overwrite the section entry to point to the new Level2 Translation Table + *SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) | + (IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE(Attributes) ? (1 << 3) : 0) | + TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE; + } else { + // We do not support the other section type (16MB Section) + ASSERT(0); + return; + } + } else { + TranslationTable = (UINTN)AllocatePages(EFI_SIZE_TO_PAGES(TRANSLATION_TABLE_PAGE_SIZE + TRANSLATION_TABLE_PAGE_ALIGNMENT)); + TranslationTable = ((UINTN)TranslationTable + TRANSLATION_TABLE_PAGE_ALIGNMENT_MASK) & ~TRANSLATION_TABLE_PAGE_ALIGNMENT_MASK; + + ZeroMem ((VOID *)TranslationTable, TRANSLATION_TABLE_PAGE_SIZE); + + *SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) | + (IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE(Attributes) ? (1 << 3) : 0) | + TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE; + } + + FirstPageOffset = (PhysicalBase & TT_DESCRIPTOR_PAGE_INDEX_MASK) >> TT_DESCRIPTOR_PAGE_BASE_SHIFT; + PageEntry = (UINT32 *)TranslationTable + FirstPageOffset; + Pages = RemainLength / TT_DESCRIPTOR_PAGE_SIZE; + + ASSERT (FirstPageOffset + Pages <= TRANSLATION_TABLE_PAGE_COUNT); + + for (Index = 0; Index < Pages; Index++) { + *PageEntry++ = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(PhysicalBase) | PageAttributes; + PhysicalBase += TT_DESCRIPTOR_PAGE_SIZE; + } + +} + +STATIC +VOID +FillTranslationTable ( + IN UINT32 *TranslationTable, + IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion + ) +{ + UINT32 *SectionEntry; + UINT32 Attributes; + UINT32 PhysicalBase; + UINT64 RemainLength; + UINT32 PageMapLength; + + ASSERT(MemoryRegion->Length > 0); + + if (MemoryRegion->PhysicalBase >= SIZE_4GB) { + return; + } + + PhysicalBase = MemoryRegion->PhysicalBase; + RemainLength = MIN(MemoryRegion->Length, SIZE_4GB - PhysicalBase); + + switch (MemoryRegion->Attributes) { + case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK: + Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(0); + break; + case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE: + Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(0); + Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED; + break; + case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH: + Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH(0); + break; + case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE: + Attributes = TT_DESCRIPTOR_SECTION_DEVICE(0); + break; + case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED: + Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(0); + break; + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK: + Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(1); + break; + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE: + Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(1); + Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED; + break; + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH: + Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH(1); + break; + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE: + Attributes = TT_DESCRIPTOR_SECTION_DEVICE(1); + break; + case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED: + Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(1); + break; + default: + Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(0); + break; + } + + if (PreferNonshareableMemory ()) { + Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED; + } + + // Get the first section entry for this mapping + SectionEntry = TRANSLATION_TABLE_ENTRY_FOR_VIRTUAL_ADDRESS(TranslationTable, MemoryRegion->VirtualBase); + + while (RemainLength != 0) { + if (PhysicalBase % TT_DESCRIPTOR_SECTION_SIZE == 0 && + RemainLength >= TT_DESCRIPTOR_SECTION_SIZE) { + // Case: Physical address aligned on the Section Size (1MB) && the length + // is greater than the Section Size + *SectionEntry++ = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(PhysicalBase) | Attributes; + PhysicalBase += TT_DESCRIPTOR_SECTION_SIZE; + RemainLength -= TT_DESCRIPTOR_SECTION_SIZE; + } else { + PageMapLength = MIN (RemainLength, TT_DESCRIPTOR_SECTION_SIZE - + (PhysicalBase % TT_DESCRIPTOR_SECTION_SIZE)); + + // Case: Physical address aligned on the Section Size (1MB) && the length + // does not fill a section + // Case: Physical address NOT aligned on the Section Size (1MB) + PopulateLevel2PageTable (SectionEntry++, PhysicalBase, PageMapLength, + MemoryRegion->Attributes); + + // If it is the last entry + if (RemainLength < TT_DESCRIPTOR_SECTION_SIZE) { + break; + } + + PhysicalBase += PageMapLength; + RemainLength -= PageMapLength; + } + } +} + +RETURN_STATUS +EFIAPI +ArmConfigureMmu ( + IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable, + OUT VOID **TranslationTableBase OPTIONAL, + OUT UINTN *TranslationTableSize OPTIONAL + ) +{ + VOID* TranslationTable; + ARM_MEMORY_REGION_ATTRIBUTES TranslationTableAttribute; + UINT32 TTBRAttributes; + + // Allocate pages for translation table. + TranslationTable = AllocatePages (EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_SECTION_SIZE + TRANSLATION_TABLE_SECTION_ALIGNMENT)); + if (TranslationTable == NULL) { + return RETURN_OUT_OF_RESOURCES; + } + TranslationTable = (VOID*)(((UINTN)TranslationTable + TRANSLATION_TABLE_SECTION_ALIGNMENT_MASK) & ~TRANSLATION_TABLE_SECTION_ALIGNMENT_MASK); + + if (TranslationTableBase != NULL) { + *TranslationTableBase = TranslationTable; + } + + if (TranslationTableSize != NULL) { + *TranslationTableSize = TRANSLATION_TABLE_SECTION_SIZE; + } + + ZeroMem (TranslationTable, TRANSLATION_TABLE_SECTION_SIZE); + + // By default, mark the translation table as belonging to a uncached region + TranslationTableAttribute = ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED; + while (MemoryTable->Length != 0) { + // Find the memory attribute for the Translation Table + if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) && ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) { + TranslationTableAttribute = MemoryTable->Attributes; + } + + FillTranslationTable (TranslationTable, MemoryTable); + MemoryTable++; + } + + // Translate the Memory Attributes into Translation Table Register Attributes + if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) || + (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) { + TTBRAttributes = ArmHasMpExtensions () ? TTBR_MP_WRITE_BACK_ALLOC : TTBR_WRITE_BACK_ALLOC; + } else { + // Page tables must reside in memory mapped as write-back cacheable + ASSERT (0); + return RETURN_UNSUPPORTED; + } + + if (TTBRAttributes & TTBR_SHAREABLE) { + if (PreferNonshareableMemory ()) { + TTBRAttributes ^= TTBR_SHAREABLE; + } else { + // + // Unlike the S bit in the short descriptors, which implies inner shareable + // on an implementation that supports two levels, the meaning of the S bit + // in the TTBR depends on the NOS bit, which defaults to Outer Shareable. + // However, we should only set this bit after we have confirmed that the + // implementation supports multiple levels, or else the NOS bit is UNK/SBZP + // + if (((ArmReadIdMmfr0 () >> 12) & 0xf) != 0) { + TTBRAttributes |= TTBR_NOT_OUTER_SHAREABLE; + } + } + } + + ArmCleanInvalidateDataCache (); + ArmInvalidateInstructionCache (); + + ArmDisableDataCache (); + ArmDisableInstructionCache(); + // TLBs are also invalidated when calling ArmDisableMmu() + ArmDisableMmu (); + + // Make sure nothing sneaked into the cache + ArmCleanInvalidateDataCache (); + ArmInvalidateInstructionCache (); + + ArmSetTTBR0 ((VOID *)(UINTN)(((UINTN)TranslationTable & ~TRANSLATION_TABLE_SECTION_ALIGNMENT_MASK) | (TTBRAttributes & 0x7F))); + + // + // The TTBCR register value is undefined at reset in the Non-Secure world. + // Writing 0 has the effect of: + // Clearing EAE: Use short descriptors, as mandated by specification. + // Clearing PD0 and PD1: Translation Table Walk Disable is off. + // Clearing N: Perform all translation table walks through TTBR0. + // (0 is the default reset value in systems not implementing + // the Security Extensions.) + // + ArmSetTTBCR (0); + + ArmSetDomainAccessControl (DOMAIN_ACCESS_CONTROL_NONE(15) | + DOMAIN_ACCESS_CONTROL_NONE(14) | + DOMAIN_ACCESS_CONTROL_NONE(13) | + DOMAIN_ACCESS_CONTROL_NONE(12) | + DOMAIN_ACCESS_CONTROL_NONE(11) | + DOMAIN_ACCESS_CONTROL_NONE(10) | + DOMAIN_ACCESS_CONTROL_NONE( 9) | + DOMAIN_ACCESS_CONTROL_NONE( 8) | + DOMAIN_ACCESS_CONTROL_NONE( 7) | + DOMAIN_ACCESS_CONTROL_NONE( 6) | + DOMAIN_ACCESS_CONTROL_NONE( 5) | + DOMAIN_ACCESS_CONTROL_NONE( 4) | + DOMAIN_ACCESS_CONTROL_NONE( 3) | + DOMAIN_ACCESS_CONTROL_NONE( 2) | + DOMAIN_ACCESS_CONTROL_NONE( 1) | + DOMAIN_ACCESS_CONTROL_CLIENT(0)); + + ArmEnableInstructionCache(); + ArmEnableDataCache(); + ArmEnableMmu(); + return RETURN_SUCCESS; +} + +STATIC +EFI_STATUS +ConvertSectionToPages ( + IN EFI_PHYSICAL_ADDRESS BaseAddress + ) +{ + UINT32 FirstLevelIdx; + UINT32 SectionDescriptor; + UINT32 PageTableDescriptor; + UINT32 PageDescriptor; + UINT32 Index; + + volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable; + volatile ARM_PAGE_TABLE_ENTRY *PageTable; + + DEBUG ((EFI_D_PAGE, "Converting section at 0x%x to pages\n", (UINTN)BaseAddress)); + + // Obtain page table base + FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTTBR0BaseAddress (); + + // Calculate index into first level translation table for start of modification + FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(BaseAddress) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT; + ASSERT (FirstLevelIdx < TRANSLATION_TABLE_SECTION_COUNT); + + // Get section attributes and convert to page attributes + SectionDescriptor = FirstLevelTable[FirstLevelIdx]; + PageDescriptor = TT_DESCRIPTOR_PAGE_TYPE_PAGE | ConvertSectionAttributesToPageAttributes (SectionDescriptor, FALSE); + + // Allocate a page table for the 4KB entries (we use up a full page even though we only need 1KB) + PageTable = (volatile ARM_PAGE_TABLE_ENTRY *)AllocatePages (1); + if (PageTable == NULL) { + return EFI_OUT_OF_RESOURCES; + } + + // Write the page table entries out + for (Index = 0; Index < TRANSLATION_TABLE_PAGE_COUNT; Index++) { + PageTable[Index] = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(BaseAddress + (Index << 12)) | PageDescriptor; + } + + // Formulate page table entry, Domain=0, NS=0 + PageTableDescriptor = (((UINTN)PageTable) & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) | TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE; + + // Write the page table entry out, replacing section entry + FirstLevelTable[FirstLevelIdx] = PageTableDescriptor; + + return EFI_SUCCESS; +} + +STATIC +EFI_STATUS +UpdatePageEntries ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length, + IN UINT64 Attributes, + OUT BOOLEAN *FlushTlbs OPTIONAL + ) +{ + EFI_STATUS Status; + UINT32 EntryValue; + UINT32 EntryMask; + UINT32 FirstLevelIdx; + UINT32 Offset; + UINT32 NumPageEntries; + UINT32 Descriptor; + UINT32 p; + UINT32 PageTableIndex; + UINT32 PageTableEntry; + UINT32 CurrentPageTableEntry; + VOID *Mva; + + volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable; + volatile ARM_PAGE_TABLE_ENTRY *PageTable; + + Status = EFI_SUCCESS; + + // EntryMask: bitmask of values to change (1 = change this value, 0 = leave alone) + // EntryValue: values at bit positions specified by EntryMask + EntryMask = TT_DESCRIPTOR_PAGE_TYPE_MASK | TT_DESCRIPTOR_PAGE_AP_MASK; + if (Attributes & EFI_MEMORY_XP) { + EntryValue = TT_DESCRIPTOR_PAGE_TYPE_PAGE_XN; + } else { + EntryValue = TT_DESCRIPTOR_PAGE_TYPE_PAGE; + } + + // Although the PI spec is unclear on this, the GCD guarantees that only + // one Attribute bit is set at a time, so the order of the conditionals below + // is irrelevant. If no memory attribute is specified, we preserve whatever + // memory type is set in the page tables, and update the permission attributes + // only. + if (Attributes & EFI_MEMORY_UC) { + // modify cacheability attributes + EntryMask |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_MASK; + // map to strongly ordered + EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_STRONGLY_ORDERED; // TEX[2:0] = 0, C=0, B=0 + } else if (Attributes & EFI_MEMORY_WC) { + // modify cacheability attributes + EntryMask |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_MASK; + // map to normal non-cachable + EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_NON_CACHEABLE; // TEX [2:0]= 001 = 0x2, B=0, C=0 + } else if (Attributes & EFI_MEMORY_WT) { + // modify cacheability attributes + EntryMask |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_MASK; + // write through with no-allocate + EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_WRITE_THROUGH_NO_ALLOC; // TEX [2:0] = 0, C=1, B=0 + } else if (Attributes & EFI_MEMORY_WB) { + // modify cacheability attributes + EntryMask |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_MASK; + // write back (with allocate) + EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_WRITE_BACK_ALLOC; // TEX [2:0] = 001, C=1, B=1 + } else if (Attributes & CACHE_ATTRIBUTE_MASK) { + // catch unsupported memory type attributes + ASSERT (FALSE); + return EFI_UNSUPPORTED; + } + + if (Attributes & EFI_MEMORY_RO) { + EntryValue |= TT_DESCRIPTOR_PAGE_AP_RO_RO; + } else { + EntryValue |= TT_DESCRIPTOR_PAGE_AP_RW_RW; + } + + // Obtain page table base + FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTTBR0BaseAddress (); + + // Calculate number of 4KB page table entries to change + NumPageEntries = Length / TT_DESCRIPTOR_PAGE_SIZE; + + // Iterate for the number of 4KB pages to change + Offset = 0; + for(p = 0; p < NumPageEntries; p++) { + // Calculate index into first level translation table for page table value + + FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(BaseAddress + Offset) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT; + ASSERT (FirstLevelIdx < TRANSLATION_TABLE_SECTION_COUNT); + + // Read the descriptor from the first level page table + Descriptor = FirstLevelTable[FirstLevelIdx]; + + // Does this descriptor need to be converted from section entry to 4K pages? + if (!TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE(Descriptor)) { + Status = ConvertSectionToPages (FirstLevelIdx << TT_DESCRIPTOR_SECTION_BASE_SHIFT); + if (EFI_ERROR(Status)) { + // Exit for loop + break; + } + + // Re-read descriptor + Descriptor = FirstLevelTable[FirstLevelIdx]; + if (FlushTlbs != NULL) { + *FlushTlbs = TRUE; + } + } + + // Obtain page table base address + PageTable = (ARM_PAGE_TABLE_ENTRY *)TT_DESCRIPTOR_PAGE_BASE_ADDRESS(Descriptor); + + // Calculate index into the page table + PageTableIndex = ((BaseAddress + Offset) & TT_DESCRIPTOR_PAGE_INDEX_MASK) >> TT_DESCRIPTOR_PAGE_BASE_SHIFT; + ASSERT (PageTableIndex < TRANSLATION_TABLE_PAGE_COUNT); + + // Get the entry + CurrentPageTableEntry = PageTable[PageTableIndex]; + + // Mask off appropriate fields + PageTableEntry = CurrentPageTableEntry & ~EntryMask; + + // Mask in new attributes and/or permissions + PageTableEntry |= EntryValue; + + if (CurrentPageTableEntry != PageTableEntry) { + Mva = (VOID *)(UINTN)((((UINTN)FirstLevelIdx) << TT_DESCRIPTOR_SECTION_BASE_SHIFT) + (PageTableIndex << TT_DESCRIPTOR_PAGE_BASE_SHIFT)); + + // Only need to update if we are changing the entry + PageTable[PageTableIndex] = PageTableEntry; + ArmUpdateTranslationTableEntry ((VOID *)&PageTable[PageTableIndex], Mva); + } + + Status = EFI_SUCCESS; + Offset += TT_DESCRIPTOR_PAGE_SIZE; + + } // End first level translation table loop + + return Status; +} + +STATIC +EFI_STATUS +UpdateSectionEntries ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length, + IN UINT64 Attributes + ) +{ + EFI_STATUS Status = EFI_SUCCESS; + UINT32 EntryMask; + UINT32 EntryValue; + UINT32 FirstLevelIdx; + UINT32 NumSections; + UINT32 i; + UINT32 CurrentDescriptor; + UINT32 Descriptor; + VOID *Mva; + volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable; + + // EntryMask: bitmask of values to change (1 = change this value, 0 = leave alone) + // EntryValue: values at bit positions specified by EntryMask + + // Make sure we handle a section range that is unmapped + EntryMask = TT_DESCRIPTOR_SECTION_TYPE_MASK | TT_DESCRIPTOR_SECTION_XN_MASK | + TT_DESCRIPTOR_SECTION_AP_MASK; + EntryValue = TT_DESCRIPTOR_SECTION_TYPE_SECTION; + + // Although the PI spec is unclear on this, the GCD guarantees that only + // one Attribute bit is set at a time, so the order of the conditionals below + // is irrelevant. If no memory attribute is specified, we preserve whatever + // memory type is set in the page tables, and update the permission attributes + // only. + if (Attributes & EFI_MEMORY_UC) { + // modify cacheability attributes + EntryMask |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK; + // map to strongly ordered + EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_STRONGLY_ORDERED; // TEX[2:0] = 0, C=0, B=0 + } else if (Attributes & EFI_MEMORY_WC) { + // modify cacheability attributes + EntryMask |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK; + // map to normal non-cachable + EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_NON_CACHEABLE; // TEX [2:0]= 001 = 0x2, B=0, C=0 + } else if (Attributes & EFI_MEMORY_WT) { + // modify cacheability attributes + EntryMask |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK; + // write through with no-allocate + EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_WRITE_THROUGH_NO_ALLOC; // TEX [2:0] = 0, C=1, B=0 + } else if (Attributes & EFI_MEMORY_WB) { + // modify cacheability attributes + EntryMask |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK; + // write back (with allocate) + EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_WRITE_BACK_ALLOC; // TEX [2:0] = 001, C=1, B=1 + } else if (Attributes & CACHE_ATTRIBUTE_MASK) { + // catch unsupported memory type attributes + ASSERT (FALSE); + return EFI_UNSUPPORTED; + } + + if (Attributes & EFI_MEMORY_RO) { + EntryValue |= TT_DESCRIPTOR_SECTION_AP_RO_RO; + } else { + EntryValue |= TT_DESCRIPTOR_SECTION_AP_RW_RW; + } + + if (Attributes & EFI_MEMORY_XP) { + EntryValue |= TT_DESCRIPTOR_SECTION_XN_MASK; + } + + // obtain page table base + FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTTBR0BaseAddress (); + + // calculate index into first level translation table for start of modification + FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(BaseAddress) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT; + ASSERT (FirstLevelIdx < TRANSLATION_TABLE_SECTION_COUNT); + + // calculate number of 1MB first level entries this applies to + NumSections = Length / TT_DESCRIPTOR_SECTION_SIZE; + + // iterate through each descriptor + for(i=0; i (UINT64)MAX_ADDRESS) { + return EFI_UNSUPPORTED; + } + + Length = MIN (Length, (UINT64)MAX_ADDRESS - BaseAddress + 1); + if (Length == 0) { + return EFI_SUCCESS; + } + + FlushTlbs = FALSE; + while (Length > 0) { + if ((BaseAddress % TT_DESCRIPTOR_SECTION_SIZE == 0) && + Length >= TT_DESCRIPTOR_SECTION_SIZE) { + + ChunkLength = Length - Length % TT_DESCRIPTOR_SECTION_SIZE; + + DEBUG ((DEBUG_PAGE, + "SetMemoryAttributes(): MMU section 0x%lx length 0x%lx to %lx\n", + BaseAddress, ChunkLength, Attributes)); + + Status = UpdateSectionEntries (BaseAddress, ChunkLength, Attributes); + + FlushTlbs = TRUE; + } else { + + // + // Process page by page until the next section boundary, but only if + // we have more than a section's worth of area to deal with after that. + // + ChunkLength = TT_DESCRIPTOR_SECTION_SIZE - + (BaseAddress % TT_DESCRIPTOR_SECTION_SIZE); + if (ChunkLength + TT_DESCRIPTOR_SECTION_SIZE > Length) { + ChunkLength = Length; + } + + DEBUG ((DEBUG_PAGE, + "SetMemoryAttributes(): MMU page 0x%lx length 0x%lx to %lx\n", + BaseAddress, ChunkLength, Attributes)); + + Status = UpdatePageEntries (BaseAddress, ChunkLength, Attributes, + &FlushTlbs); + } + + if (EFI_ERROR (Status)) { + break; + } + + BaseAddress += ChunkLength; + Length -= ChunkLength; + } + + if (FlushTlbs) { + ArmInvalidateTlb (); + } + return Status; +} + +EFI_STATUS +ArmSetMemoryRegionNoExec ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length + ) +{ + return ArmSetMemoryAttributes (BaseAddress, Length, EFI_MEMORY_XP); +} + +EFI_STATUS +ArmClearMemoryRegionNoExec ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length + ) +{ + return ArmSetMemoryAttributes (BaseAddress, Length, __EFI_MEMORY_RWX); +} + +EFI_STATUS +ArmSetMemoryRegionReadOnly ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length + ) +{ + return ArmSetMemoryAttributes (BaseAddress, Length, EFI_MEMORY_RO); +} + +EFI_STATUS +ArmClearMemoryRegionReadOnly ( + IN EFI_PHYSICAL_ADDRESS BaseAddress, + IN UINT64 Length + ) +{ + return ArmSetMemoryAttributes (BaseAddress, Length, __EFI_MEMORY_RWX); +} + +RETURN_STATUS +EFIAPI +ArmMmuBaseLibConstructor ( + VOID + ) +{ + return RETURN_SUCCESS; +} diff --git a/sdm845Pkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.S b/sdm845Pkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.S new file mode 100644 index 0000000..a97e3fa --- /dev/null +++ b/sdm845Pkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.S @@ -0,0 +1,29 @@ +#------------------------------------------------------------------------------ +# +# Copyright (c) 2016, Linaro Limited. All rights reserved. +# +# SPDX-License-Identifier: BSD-2-Clause-Patent +# +#------------------------------------------------------------------------------ + +#include + +.text +.align 2 + +GCC_ASM_EXPORT (ArmReadIdMmfr0) +GCC_ASM_EXPORT (ArmHasMpExtensions) + +#------------------------------------------------------------------------------ + +ASM_PFX (ArmHasMpExtensions): + mrc p15,0,R0,c0,c0,5 + // Get Multiprocessing extension (bit31) + lsr R0, R0, #31 + bx LR + +ASM_PFX(ArmReadIdMmfr0): + mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 Register + bx lr + +ASM_FUNCTION_REMOVE_IF_UNREFERENCED diff --git a/sdm845Pkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.asm b/sdm845Pkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.asm new file mode 100644 index 0000000..a65e95d --- /dev/null +++ b/sdm845Pkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.asm @@ -0,0 +1,26 @@ +//------------------------------------------------------------------------------ +// +// Copyright (c) 2016, Linaro Limited. All rights reserved. +// +// SPDX-License-Identifier: BSD-2-Clause-Patent +// +//------------------------------------------------------------------------------ + + + + INCLUDE AsmMacroExport.inc + + +//------------------------------------------------------------------------------ + + RVCT_ASM_EXPORT ArmHasMpExtensions + mrc p15,0,R0,c0,c0,5 + // Get Multiprocessing extension (bit31) + lsr R0, R0, #31 + bx LR + + RVCT_ASM_EXPORT ArmReadIdMmfr0 + mrc p15, 0, r0, c0, c1, 4 ; Read ID_MMFR0 Register + bx lr + + END diff --git a/sdm845Pkg/Library/ArmMmuLib/ArmMmuBaseLib.inf b/sdm845Pkg/Library/ArmMmuLib/ArmMmuBaseLib.inf new file mode 100644 index 0000000..5028a95 --- /dev/null +++ b/sdm845Pkg/Library/ArmMmuLib/ArmMmuBaseLib.inf @@ -0,0 +1,39 @@ +#/** @file +# +# Copyright (c) 2016 Linaro Ltd. All rights reserved. +# +# SPDX-License-Identifier: BSD-2-Clause-Patent +# +# +#**/ + +[Defines] + INF_VERSION = 0x00010005 + BASE_NAME = ArmMmuBaseLib + FILE_GUID = da8f0232-fb14-42f0-922c-63104d2c70bd + MODULE_TYPE = BASE + VERSION_STRING = 1.0 + LIBRARY_CLASS = ArmMmuLib + CONSTRUCTOR = ArmMmuBaseLibConstructor + +[Sources.AARCH64] + AArch64/ArmMmuLibCore.c + AArch64/ArmMmuLibReplaceEntry.S + +[Sources.ARM] + Arm/ArmMmuLibCore.c + Arm/ArmMmuLibV7Support.S |GCC + Arm/ArmMmuLibV7Support.asm |RVCT + +[Packages] + ArmPkg/ArmPkg.dec + EmbeddedPkg/EmbeddedPkg.dec + MdePkg/MdePkg.dec + +[LibraryClasses] + ArmLib + CacheMaintenanceLib + MemoryAllocationLib + +[Pcd.ARM] + gArmTokenSpaceGuid.PcdNormalMemoryNonshareableOverride diff --git a/sdm845Pkg/Library/ArmMmuLib/ArmMmuPeiLib.inf b/sdm845Pkg/Library/ArmMmuLib/ArmMmuPeiLib.inf new file mode 100644 index 0000000..ce9674e --- /dev/null +++ b/sdm845Pkg/Library/ArmMmuLib/ArmMmuPeiLib.inf @@ -0,0 +1,32 @@ +#/** @file +# +# Copyright (c) 2016 Linaro Ltd. All rights reserved. +# +# SPDX-License-Identifier: BSD-2-Clause-Patent +# +# +#**/ + +[Defines] + INF_VERSION = 0x00010005 + BASE_NAME = ArmMmuPeiLib + FILE_GUID = b50d8d53-1ad1-44ea-9e69-8c89d4a6d08b + MODULE_TYPE = PEIM + VERSION_STRING = 1.0 + LIBRARY_CLASS = ArmMmuLib|PEIM + CONSTRUCTOR = ArmMmuPeiLibConstructor + +[Sources.AARCH64] + AArch64/ArmMmuLibCore.c + AArch64/ArmMmuPeiLibConstructor.c + AArch64/ArmMmuLibReplaceEntry.S + +[Packages] + ArmPkg/ArmPkg.dec + EmbeddedPkg/EmbeddedPkg.dec + MdePkg/MdePkg.dec + +[LibraryClasses] + ArmLib + CacheMaintenanceLib + MemoryAllocationLib -- 2.45.2