/* * Based on arch/arm/kernel/setup.c * * Copyright (C) 1995-2001 Russell King * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/export.h> #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/utsname.h> #include <linux/initrd.h> #include <linux/console.h> #include <linux/bootmem.h> #include <linux/seq_file.h> #include <linux/screen_info.h> #include <linux/init.h> #include <linux/kexec.h> #include <linux/crash_dump.h> #include <linux/root_dev.h> #include <linux/clk-provider.h> #include <linux/cpu.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/memblock.h> #include <linux/of_fdt.h> #include <linux/of_platform.h> #include <asm/cputype.h> #include <asm/elf.h> #include <asm/cputable.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp_plat.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/traps.h> #include <asm/memblock.h> #include <asm/psci.h> unsigned int processor_id; EXPORT_SYMBOL(processor_id); unsigned long elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); #ifdef CONFIG_COMPAT #define COMPAT_ELF_HWCAP_DEFAULT \ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV) unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; unsigned int compat_elf_hwcap2 __read_mostly; #endif static const char *cpu_name; static const char *machine_name; phys_addr_t __fdt_pointer __initdata; /* * Standard memory resources */ static struct resource mem_res[] = { { .name = "Kernel code", .start = 0, .end = 0, .flags = IORESOURCE_MEM }, { .name = "Kernel data", .start = 0, .end = 0, .flags = IORESOURCE_MEM } }; #define kernel_code mem_res[0] #define kernel_data mem_res[1] void __init early_print(const char *str, ...) { char buf[256]; va_list ap; va_start(ap, str); vsnprintf(buf, sizeof(buf), str, ap); va_end(ap); printk("%s", buf); } static void __init setup_processor(void) { struct cpu_info *cpu_info; u64 features, block; /* * locate processor in the list of supported processor * types. The linker builds this table for us from the * entries in arch/arm/mm/proc.S */ cpu_info = lookup_processor_type(read_cpuid_id()); if (!cpu_info) { printk("CPU configuration botched (ID %08x), unable to continue.\n", read_cpuid_id()); while (1); } cpu_name = cpu_info->cpu_name; printk("CPU: %s [%08x] revision %d\n", cpu_name, read_cpuid_id(), read_cpuid_id() & 15); sprintf(init_utsname()->machine, "aarch64"); elf_hwcap = 0; /* * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks. * The blocks we test below represent incremental functionality * for non-negative values. Negative values are reserved. */ features = read_cpuid(ID_AA64ISAR0_EL1); block = (features >> 4) & 0xf; if (!(block & 0x8)) { switch (block) { default: case 2: elf_hwcap |= HWCAP_PMULL; case 1: elf_hwcap |= HWCAP_AES; case 0: break; } } block = (features >> 8) & 0xf; if (block && !(block & 0x8)) elf_hwcap |= HWCAP_SHA1; block = (features >> 12) & 0xf; if (block && !(block & 0x8)) elf_hwcap |= HWCAP_SHA2; block = (features >> 16) & 0xf; if (block && !(block & 0x8)) elf_hwcap |= HWCAP_CRC32; #ifdef CONFIG_COMPAT /* * ID_ISAR5_EL1 carries similar information as above, but pertaining to * the Aarch32 32-bit execution state. */ features = read_cpuid(ID_ISAR5_EL1); block = (features >> 4) & 0xf; if (!(block & 0x8)) { switch (block) { default: case 2: compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL; case 1: compat_elf_hwcap2 |= COMPAT_HWCAP2_AES; case 0: break; } } block = (features >> 8) & 0xf; if (block && !(block & 0x8)) compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1; block = (features >> 12) & 0xf; if (block && !(block & 0x8)) compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2; block = (features >> 16) & 0xf; if (block && !(block & 0x8)) compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32; #endif } static void __init setup_machine_fdt(phys_addr_t dt_phys) { struct boot_param_header *devtree; unsigned long dt_root; /* Check we have a non-NULL DT pointer */ if (!dt_phys) { early_print("\n" "Error: NULL or invalid device tree blob\n" "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" "\nPlease check your bootloader.\n"); while (true) cpu_relax(); } devtree = phys_to_virt(dt_phys); /* Check device tree validity */ if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) { early_print("\n" "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" "Expected 0x%x, found 0x%x\n" "\nPlease check your bootloader.\n", dt_phys, devtree, OF_DT_HEADER, be32_to_cpu(devtree->magic)); while (true) cpu_relax(); } initial_boot_params = devtree; dt_root = of_get_flat_dt_root(); machine_name = of_get_flat_dt_prop(dt_root, "model", NULL); if (!machine_name) machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL); if (!machine_name) machine_name = "<unknown>"; pr_info("Machine: %s\n", machine_name); /* Retrieve various information from the /chosen node */ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); /* Initialize {size,address}-cells info */ of_scan_flat_dt(early_init_dt_scan_root, NULL); /* Setup memory, calling early_init_dt_add_memory_arch */ of_scan_flat_dt(early_init_dt_scan_memory, NULL); } void __init early_init_dt_add_memory_arch(u64 base, u64 size) { base &= PAGE_MASK; size &= PAGE_MASK; if (base + size < PHYS_OFFSET) { pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", base, base + size); return; } if (base < PHYS_OFFSET) { pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", base, PHYS_OFFSET); size -= PHYS_OFFSET - base; base = PHYS_OFFSET; } memblock_add(base, size); } void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { return __va(memblock_alloc(size, align)); } /* * Limit the memory size that was specified via FDT. */ static int __init early_mem(char *p) { phys_addr_t limit; if (!p) return 1; limit = memparse(p, &p) & PAGE_MASK; pr_notice("Memory limited to %lldMB\n", limit >> 20); memblock_enforce_memory_limit(limit); return 0; } early_param("mem", early_mem); static void __init request_standard_resources(void) { struct memblock_region *region; struct resource *res; kernel_code.start = virt_to_phys(_text); kernel_code.end = virt_to_phys(_etext - 1); kernel_data.start = virt_to_phys(_sdata); kernel_data.end = virt_to_phys(_end - 1); for_each_memblock(memory, region) { res = alloc_bootmem_low(sizeof(*res)); res->name = "System RAM"; res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); if (kernel_code.start >= res->start && kernel_code.end <= res->end) request_resource(res, &kernel_code); if (kernel_data.start >= res->start && kernel_data.end <= res->end) request_resource(res, &kernel_data); } } u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID }; void __init setup_arch(char **cmdline_p) { setup_processor(); setup_machine_fdt(__fdt_pointer); init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; *cmdline_p = boot_command_line; parse_early_param(); arm64_memblock_init(); paging_init(); request_standard_resources(); unflatten_device_tree(); psci_init(); cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; #ifdef CONFIG_SMP smp_init_cpus(); #endif #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif } static int __init arm64_device_init(void) { of_clk_init(NULL); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); return 0; } arch_initcall(arm64_device_init); static DEFINE_PER_CPU(struct cpu, cpu_data); static int __init topology_init(void) { int i; for_each_possible_cpu(i) { struct cpu *cpu = &per_cpu(cpu_data, i); cpu->hotpluggable = 1; register_cpu(cpu, i); } return 0; } subsys_initcall(topology_init); static const char *hwcap_str[] = { "fp", "asimd", "evtstrm", "aes", "pmull", "sha1", "sha2", "crc32", NULL }; static int c_show(struct seq_file *m, void *v) { int i; seq_printf(m, "Processor\t: %s rev %d (%s)\n", cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); for_each_online_cpu(i) { /* * glibc reads /proc/cpuinfo to determine the number of * online processors, looking for lines beginning with * "processor". Give glibc what it expects. */ #ifdef CONFIG_SMP seq_printf(m, "processor\t: %d\n", i); #endif seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", loops_per_jiffy / (500000UL/HZ), loops_per_jiffy / (5000UL/HZ) % 100); } /* dump out the processor features */ seq_puts(m, "Features\t: "); for (i = 0; hwcap_str[i]; i++) if (elf_hwcap & (1 << i)) seq_printf(m, "%s ", hwcap_str[i]); #ifdef CONFIG_ARMV7_COMPAT_CPUINFO if (is_compat_task()) { /* Print out the non-optional ARMv8 HW capabilities */ seq_printf(m, "wp half thumb fastmult vfp edsp neon vfpv3 tlsi "); seq_printf(m, "vfpv4 idiva idivt "); } #endif seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); seq_printf(m, "CPU architecture: %s\n", #if IS_ENABLED(CONFIG_ARMV7_COMPAT_CPUINFO) is_compat_task() ? "8" : #endif "AArch64"); seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); seq_puts(m, "\n"); seq_printf(m, "Hardware\t: %s\n", machine_name); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { return *pos < 1 ? (void *)1 : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return NULL; } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = c_show };