diff --git a/common/board_r.c b/common/board_r.c index 6432d23..2cb6836 100644 --- a/common/board_r.c +++ b/common/board_r.c @@ -795,6 +795,9 @@ init_fnc_t init_sequence_r[] = { #ifdef CONFIG_CLOCKS set_cpu_clk_info, /* Setup clock information */ #endif +#ifdef CONFIG_EFI_LOADER + efi_memory_init, +#endif stdio_init_tables, initr_serial, initr_announce, diff --git a/include/efi_loader.h b/include/efi_loader.h index be3c28a..11be685 100644 --- a/include/efi_loader.h +++ b/include/efi_loader.h @@ -110,6 +110,25 @@ efi_status_t efi_exit_func(efi_status_t ret); /* Call this to relocate the runtime section to an address space */ void efi_runtime_relocate(ulong offset, struct efi_mem_desc *map); +/* Generic EFI memory allocator, call this to get memory */ +void *efi_alloc(uint64_t len, int memory_type); +/* More specific EFI memory allocator, called by EFI payloads */ +efi_status_t efi_allocate_pages(int type, int memory_type, unsigned long pages, + uint64_t *memory); +/* EFI memory free function. Not implemented today */ +efi_status_t efi_free_pages(uint64_t memory, unsigned long pages); +/* Returns the EFI memory map */ +efi_status_t efi_get_memory_map(unsigned long *memory_map_size, + struct efi_mem_desc *memory_map, + unsigned long *map_key, + unsigned long *descriptor_size, + uint32_t *descriptor_version); +/* Adds a range into the EFI memory map */ +uint64_t efi_add_memory_map(uint64_t start, uint64_t pages, int memory_type, + bool overlap_only_ram); +/* Called by board init to initialize the EFI memory map */ +int efi_memory_init(void); + /* * Use these to indicate that your code / data should go into the EFI runtime * section and thus still be available when the OS is running diff --git a/lib/efi_loader/efi_memory.c b/lib/efi_loader/efi_memory.c new file mode 100644 index 0000000..c82b53f --- /dev/null +++ b/lib/efi_loader/efi_memory.c @@ -0,0 +1,319 @@ +/* + * EFI application memory management + * + * Copyright (c) 2016 Alexander Graf + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +/* #define DEBUG_EFI */ + +#include +#include +#include +#include +#include +#include +#include + +DECLARE_GLOBAL_DATA_PTR; + +struct efi_mem_list { + struct list_head link; + struct efi_mem_desc desc; +}; + +/* This list contains all memory map items */ +LIST_HEAD(efi_mem); + +/* + * Unmaps all memory occupied by the carve_desc region from the + * list entry pointed to by map. + * + * Returns 1 if carving was performed or 0 if the regions don't overlap. + * Returns -1 if it would affect non-RAM regions but overlap_only_ram is set. + * Carving is only guaranteed to complete when all regions return 0. + */ +static int efi_mem_carve_out(struct efi_mem_list *map, + struct efi_mem_desc *carve_desc, + bool overlap_only_ram) +{ + struct efi_mem_list *newmap; + struct efi_mem_desc *map_desc = &map->desc; + uint64_t map_start = map_desc->physical_start; + uint64_t map_end = map_start + (map_desc->num_pages << EFI_PAGE_SHIFT); + uint64_t carve_start = carve_desc->physical_start; + uint64_t carve_end = carve_start + + (carve_desc->num_pages << EFI_PAGE_SHIFT); + + /* check whether we're overlapping */ + if ((carve_end <= map_start) || (carve_start >= map_end)) + return 0; + + /* We're overlapping with non-RAM, warn the caller if desired */ + if (overlap_only_ram && (map_desc->type != EFI_CONVENTIONAL_MEMORY)) + return -1; + + /* Sanitize carve_start and carve_end to lie within our bounds */ + carve_start = max(carve_start, map_start); + carve_end = min(carve_end, map_end); + + /* Carving at the beginning of our map? Just move it! */ + if (carve_start == map_start) { + if (map_end == carve_end) { + /* Full overlap, just remove map */ + list_del(&map->link); + } + + map_desc->physical_start = carve_end; + map_desc->num_pages = (map_end - carve_end) >> EFI_PAGE_SHIFT; + return 1; + } + + /* + * Overlapping maps, just split the list map at carve_start, + * it will get moved or removed in the next iteration. + * + * [ map_desc |__carve_start__| newmap ] + */ + + /* Create a new map from [ carve_start ... map_end ] */ + newmap = calloc(1, sizeof(*newmap)); + newmap->desc = map->desc; + newmap->desc.physical_start = carve_start; + newmap->desc.num_pages = (map_end - carve_start) >> EFI_PAGE_SHIFT; + list_add_tail(&newmap->link, &efi_mem); + + /* Shrink the map to [ map_start ... carve_start ] */ + map_desc->num_pages = (carve_start - map_start) >> EFI_PAGE_SHIFT; + + return 1; +} + +uint64_t efi_add_memory_map(uint64_t start, uint64_t pages, int memory_type, + bool overlap_only_ram) +{ + struct list_head *lhandle; + struct efi_mem_list *newlist; + bool do_carving; + + if (!pages) + return start; + + newlist = calloc(1, sizeof(*newlist)); + newlist->desc.type = memory_type; + newlist->desc.physical_start = start; + newlist->desc.virtual_start = start; + newlist->desc.num_pages = pages; + + switch (memory_type) { + case EFI_RUNTIME_SERVICES_CODE: + case EFI_RUNTIME_SERVICES_DATA: + newlist->desc.attribute = (1 << EFI_MEMORY_WB_SHIFT) | + (1ULL << EFI_MEMORY_RUNTIME_SHIFT); + break; + case EFI_MMAP_IO: + newlist->desc.attribute = 1ULL << EFI_MEMORY_RUNTIME_SHIFT; + break; + default: + newlist->desc.attribute = 1 << EFI_MEMORY_WB_SHIFT; + break; + } + + /* Add our new map */ + do { + do_carving = false; + list_for_each(lhandle, &efi_mem) { + struct efi_mem_list *lmem; + int r; + + lmem = list_entry(lhandle, struct efi_mem_list, link); + r = efi_mem_carve_out(lmem, &newlist->desc, + overlap_only_ram); + if (r < 0) { + return 0; + } else if (r) { + do_carving = true; + break; + } + } + } while (do_carving); + + /* Add our new map */ + list_add_tail(&newlist->link, &efi_mem); + + return start; +} + +static uint64_t efi_find_free_memory(uint64_t len, uint64_t max_addr) +{ + struct list_head *lhandle; + + list_for_each(lhandle, &efi_mem) { + struct efi_mem_list *lmem = list_entry(lhandle, + struct efi_mem_list, link); + struct efi_mem_desc *desc = &lmem->desc; + uint64_t desc_len = desc->num_pages << EFI_PAGE_SHIFT; + uint64_t desc_end = desc->physical_start + desc_len; + uint64_t curmax = min(max_addr, desc_end); + uint64_t ret = curmax - len; + + /* We only take memory from free RAM */ + if (desc->type != EFI_CONVENTIONAL_MEMORY) + continue; + + /* Out of bounds for max_addr */ + if ((ret + len) > max_addr) + continue; + + /* Out of bounds for upper map limit */ + if ((ret + len) > desc_end) + continue; + + /* Out of bounds for lower map limit */ + if (ret < desc->physical_start) + continue; + + /* Return the highest address in this map within bounds */ + return ret; + } + + return 0; +} + +efi_status_t efi_allocate_pages(int type, int memory_type, + unsigned long pages, uint64_t *memory) +{ + u64 len = pages << EFI_PAGE_SHIFT; + efi_status_t r = EFI_SUCCESS; + uint64_t addr; + + switch (type) { + case 0: + /* Any page */ + addr = efi_find_free_memory(len, gd->ram_top); + if (!addr) { + r = EFI_NOT_FOUND; + break; + } + break; + case 1: + /* Max address */ + addr = efi_find_free_memory(len, *memory); + if (!addr) { + r = EFI_NOT_FOUND; + break; + } + break; + case 2: + /* Exact address, reserve it. The addr is already in *memory. */ + addr = *memory; + break; + default: + /* UEFI doesn't specify other allocation types */ + r = EFI_INVALID_PARAMETER; + break; + } + + if (r == EFI_SUCCESS) { + uint64_t ret; + + /* Reserve that map in our memory maps */ + ret = efi_add_memory_map(addr, pages, memory_type, true); + if (ret == addr) { + *memory = addr; + } else { + /* Map would overlap, bail out */ + r = EFI_OUT_OF_RESOURCES; + } + } + + return r; +} + +void *efi_alloc(uint64_t len, int memory_type) +{ + uint64_t ret = 0; + uint64_t pages = (len + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; + efi_status_t r; + + r = efi_allocate_pages(0, memory_type, pages, &ret); + if (r == EFI_SUCCESS) + return (void*)(uintptr_t)ret; + + return NULL; +} + +efi_status_t efi_free_pages(uint64_t memory, unsigned long pages) +{ + /* We don't free, let's cross our fingers we have plenty RAM */ + return EFI_SUCCESS; +} + +efi_status_t efi_get_memory_map(unsigned long *memory_map_size, + struct efi_mem_desc *memory_map, + unsigned long *map_key, + unsigned long *descriptor_size, + uint32_t *descriptor_version) +{ + ulong map_size = 0; + struct list_head *lhandle; + + list_for_each(lhandle, &efi_mem) + map_size += sizeof(struct efi_mem_desc); + + *memory_map_size = map_size; + + if (descriptor_size) + *descriptor_size = sizeof(struct efi_mem_desc); + + if (*memory_map_size < map_size) + return EFI_BUFFER_TOO_SMALL; + + /* Copy list into array */ + if (memory_map) { + list_for_each(lhandle, &efi_mem) { + struct efi_mem_list *lmem; + + lmem = list_entry(lhandle, struct efi_mem_list, link); + *memory_map = lmem->desc; + memory_map++; + } + } + + return EFI_SUCCESS; +} + +int efi_memory_init(void) +{ + uint64_t runtime_start, runtime_end, runtime_pages; + uint64_t uboot_start, uboot_pages; + uint64_t uboot_stack_size = 16 * 1024 * 1024; + int i; + + /* Add RAM */ + for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { + u64 ram_start = gd->bd->bi_dram[i].start; + u64 ram_size = gd->bd->bi_dram[i].size; + u64 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; + u64 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; + + efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY, + false); + } + + /* Add U-Boot */ + uboot_start = (gd->start_addr_sp - uboot_stack_size) & ~EFI_PAGE_MASK; + uboot_pages = (gd->ram_top - uboot_start) >> EFI_PAGE_SHIFT; + efi_add_memory_map(uboot_start, uboot_pages, EFI_LOADER_DATA, false); + + /* Add Runtime Services */ + runtime_start = (ulong)&__efi_runtime_start & ~EFI_PAGE_MASK; + runtime_end = (ulong)&__efi_runtime_stop; + runtime_end = (runtime_end + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; + runtime_pages = (runtime_end - runtime_start) >> EFI_PAGE_SHIFT; + efi_add_memory_map(runtime_start, runtime_pages, + EFI_RUNTIME_SERVICES_CODE, false); + + return 0; +}