From 32f0bb680d49ad35794e7109d2d5104d7802ebe0 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Fri, 9 Aug 2013 17:58:15 -0400 Subject: [PATCH 01/18] Add secure_modules() call Provide a single call to allow kernel code to determine whether the system has been configured to either disable module loading entirely or to load only modules signed with a trusted key. Bugzilla: N/A Upstream-status: Fedora mustard. Replaced by securelevels, but that was nak'd Signed-off-by: Matthew Garrett --- include/linux/module.h | 6 ++++++ kernel/module.c | 10 ++++++++++ 2 files changed, 16 insertions(+) diff --git a/include/linux/module.h b/include/linux/module.h index 0c3207d26ac0c4..c8b4ea0fc8b061 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -629,6 +629,8 @@ static inline bool module_requested_async_probing(struct module *module) return module && module->async_probe_requested; } +extern bool secure_modules(void); + #ifdef CONFIG_LIVEPATCH static inline bool is_livepatch_module(struct module *mod) { @@ -750,6 +752,10 @@ static inline bool module_requested_async_probing(struct module *module) return false; } +static inline bool secure_modules(void) +{ + return false; +} #endif /* CONFIG_MODULES */ #ifdef CONFIG_SYSFS diff --git a/kernel/module.c b/kernel/module.c index 0e54d5bf0097f5..085b720d1dfd3f 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -4285,3 +4285,13 @@ void module_layout(struct module *mod, } EXPORT_SYMBOL(module_layout); #endif + +bool secure_modules(void) +{ +#ifdef CONFIG_MODULE_SIG + return (sig_enforce || modules_disabled); +#else + return modules_disabled; +#endif +} +EXPORT_SYMBOL(secure_modules); From 8cc7c3a3fa3777e525b587f6bfa199843bfde412 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Thu, 8 Mar 2012 10:10:38 -0500 Subject: [PATCH 02/18] PCI: Lock down BAR access when module security is enabled Any hardware that can potentially generate DMA has to be locked down from userspace in order to avoid it being possible for an attacker to modify kernel code, allowing them to circumvent disabled module loading or module signing. Default to paranoid - in future we can potentially relax this for sufficiently IOMMU-isolated devices. Signed-off-by: Matthew Garrett --- drivers/pci/pci-sysfs.c | 10 ++++++++++ drivers/pci/proc.c | 8 +++++++- drivers/pci/syscall.c | 3 ++- 3 files changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index bcd10c795284cf..a950301496f355 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "pci.h" static int sysfs_initialized; /* = 0 */ @@ -716,6 +717,9 @@ static ssize_t pci_write_config(struct file *filp, struct kobject *kobj, loff_t init_off = off; u8 *data = (u8 *) buf; + if (secure_modules()) + return -EPERM; + if (off > dev->cfg_size) return 0; if (off + count > dev->cfg_size) { @@ -1007,6 +1011,9 @@ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, resource_size_t start, end; int i; + if (secure_modules()) + return -EPERM; + for (i = 0; i < PCI_ROM_RESOURCE; i++) if (res == &pdev->resource[i]) break; @@ -1106,6 +1113,9 @@ static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { + if (secure_modules()) + return -EPERM; + return pci_resource_io(filp, kobj, attr, buf, off, count, true); } diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 2408abe4ee8c68..59f321c56c1884 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -116,6 +116,9 @@ static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf, int size = dev->cfg_size; int cnt; + if (secure_modules()) + return -EPERM; + if (pos >= size) return 0; if (nbytes >= size) @@ -195,6 +198,9 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, #endif /* HAVE_PCI_MMAP */ int ret = 0; + if (secure_modules()) + return -EPERM; + switch (cmd) { case PCIIOC_CONTROLLER: ret = pci_domain_nr(dev->bus); @@ -233,7 +239,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) struct pci_filp_private *fpriv = file->private_data; int i, ret, write_combine; - if (!capable(CAP_SYS_RAWIO)) + if (!capable(CAP_SYS_RAWIO) || secure_modules()) return -EPERM; /* Make sure the caller is mapping a real resource for this device */ diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c index b91c4da6836574..98f5637304d1ba 100644 --- a/drivers/pci/syscall.c +++ b/drivers/pci/syscall.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include "pci.h" @@ -92,7 +93,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, u32 dword; int err = 0; - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN) || secure_modules()) return -EPERM; dev = pci_get_bus_and_slot(bus, dfn); From a191b452265caa2ed0d5e48de302ee6546751695 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Thu, 8 Mar 2012 10:35:59 -0500 Subject: [PATCH 03/18] x86: Lock down IO port access when module security is enabled IO port access would permit users to gain access to PCI configuration registers, which in turn (on a lot of hardware) give access to MMIO register space. This would potentially permit root to trigger arbitrary DMA, so lock it down by default. Signed-off-by: Matthew Garrett --- arch/x86/kernel/ioport.c | 5 +++-- drivers/char/mem.c | 4 ++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 589b3193f1020b..ab8372443efbf0 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c @@ -15,6 +15,7 @@ #include #include #include +#include #include /* @@ -28,7 +29,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) return -EINVAL; - if (turn_on && !capable(CAP_SYS_RAWIO)) + if (turn_on && (!capable(CAP_SYS_RAWIO) || secure_modules())) return -EPERM; /* @@ -108,7 +109,7 @@ SYSCALL_DEFINE1(iopl, unsigned int, level) return -EINVAL; /* Trying to gain more privileges? */ if (level > old) { - if (!capable(CAP_SYS_RAWIO)) + if (!capable(CAP_SYS_RAWIO) || secure_modules()) return -EPERM; } regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 7e4a9d1296bb7f..83cca9fee42fe0 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -600,6 +601,9 @@ static ssize_t write_port(struct file *file, const char __user *buf, unsigned long i = *ppos; const char __user *tmp = buf; + if (secure_modules()) + return -EPERM; + if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; while (count-- > 0 && i < 65536) { From f022ee289333f94e068ed51521e95a738418ad5d Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Fri, 9 Mar 2012 08:39:37 -0500 Subject: [PATCH 04/18] ACPI: Limit access to custom_method custom_method effectively allows arbitrary access to system memory, making it possible for an attacker to circumvent restrictions on module loading. Disable it if any such restrictions have been enabled. Signed-off-by: Matthew Garrett --- drivers/acpi/custom_method.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c index c68e72414a67a9..4277938af700d1 100644 --- a/drivers/acpi/custom_method.c +++ b/drivers/acpi/custom_method.c @@ -29,6 +29,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, struct acpi_table_header table; acpi_status status; + if (secure_modules()) + return -EPERM; + if (!(*ppos)) { /* parse the table header to get the table length */ if (count <= sizeof(struct acpi_table_header)) From ee2b27f64622d25827cc78298580477d5fdb2286 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Fri, 9 Mar 2012 08:46:50 -0500 Subject: [PATCH 05/18] asus-wmi: Restrict debugfs interface when module loading is restricted We have no way of validating what all of the Asus WMI methods do on a given machine, and there's a risk that some will allow hardware state to be manipulated in such a way that arbitrary code can be executed in the kernel, circumventing module loading restrictions. Prevent that if any of these features are enabled. Signed-off-by: Matthew Garrett --- drivers/platform/x86/asus-wmi.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 8499d3ae4257b6..837849180851b7 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -1900,6 +1900,9 @@ static int show_dsts(struct seq_file *m, void *data) int err; u32 retval = -1; + if (secure_modules()) + return -EPERM; + err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval); if (err < 0) @@ -1916,6 +1919,9 @@ static int show_devs(struct seq_file *m, void *data) int err; u32 retval = -1; + if (secure_modules()) + return -EPERM; + err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param, &retval); @@ -1940,6 +1946,9 @@ static int show_call(struct seq_file *m, void *data) union acpi_object *obj; acpi_status status; + if (secure_modules()) + return -EPERM; + status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID, 1, asus->debug.method_id, &input, &output); From d09cc7fd2ac8ce28700c9c67319f16266a926049 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Fri, 9 Mar 2012 09:28:15 -0500 Subject: [PATCH 06/18] Restrict /dev/mem and /dev/kmem when module loading is restricted Allowing users to write to address space makes it possible for the kernel to be subverted, avoiding module loading restrictions. Prevent this when any restrictions have been imposed on loading modules. Signed-off-by: Matthew Garrett --- drivers/char/mem.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 83cca9fee42fe0..e19f4838192c07 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -180,6 +180,9 @@ static ssize_t write_mem(struct file *file, const char __user *buf, if (p != *ppos) return -EFBIG; + if (secure_modules()) + return -EPERM; + if (!valid_phys_addr_range(p, count)) return -EFAULT; @@ -536,6 +539,9 @@ static ssize_t write_kmem(struct file *file, const char __user *buf, char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ int err = 0; + if (secure_modules()) + return -EPERM; + if (p < (unsigned long) high_memory) { unsigned long to_write = min_t(unsigned long, count, (unsigned long)high_memory - p); From e7ba6c6e6690ec17a80f591320a245c735aa2950 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Mon, 25 Jun 2012 19:57:30 -0400 Subject: [PATCH 07/18] acpi: Ignore acpi_rsdp kernel parameter when module loading is restricted This option allows userspace to pass the RSDP address to the kernel, which makes it possible for a user to circumvent any restrictions imposed on loading modules. Disable it in that case. Signed-off-by: Josh Boyer --- drivers/acpi/osl.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 416953a4251094..4887e343c7fdf5 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -191,7 +192,7 @@ early_param("acpi_rsdp", setup_acpi_rsdp); acpi_physical_address __init acpi_os_get_root_pointer(void) { #ifdef CONFIG_KEXEC - if (acpi_rsdp) + if (acpi_rsdp && !secure_modules()) return acpi_rsdp; #endif From 14b62c45fb9d7e42e2b5c0c62f45eaea02c9b83f Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Thu, 19 Nov 2015 18:55:53 -0800 Subject: [PATCH 08/18] kexec: Disable at runtime if the kernel enforces module loading restrictions kexec permits the loading and execution of arbitrary code in ring 0, which is something that module signing enforcement is meant to prevent. It makes sense to disable kexec in this situation. Signed-off-by: Matthew Garrett --- kernel/kexec.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/kexec.c b/kernel/kexec.c index 980936a90ee6ea..a0e4cb36f553d9 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "kexec_internal.h" @@ -190,7 +191,7 @@ SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, int result; /* We only trust the superuser with rebooting the system. */ - if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) + if (!capable(CAP_SYS_BOOT) || kexec_load_disabled || secure_modules()) return -EPERM; /* From 6bc71c742f2f411e934c808682df8c1d390213bc Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Fri, 8 Feb 2013 11:12:13 -0800 Subject: [PATCH 09/18] x86: Restrict MSR access when module loading is restricted Writing to MSRs should not be allowed if module loading is restricted, since it could lead to execution of arbitrary code in kernel mode. Based on a patch by Kees Cook. Cc: Kees Cook Signed-off-by: Matthew Garrett --- arch/x86/kernel/msr.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 7f3550acde1b8c..963ba40119234a 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c @@ -83,6 +83,9 @@ static ssize_t msr_write(struct file *file, const char __user *buf, int err = 0; ssize_t bytes = 0; + if (secure_modules()) + return -EPERM; + if (count % 8) return -EINVAL; /* Invalid chunk size */ @@ -130,6 +133,10 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg) err = -EBADF; break; } + if (secure_modules()) { + err = -EPERM; + break; + } if (copy_from_user(®s, uregs, sizeof regs)) { err = -EFAULT; break; From 635d13f6050257dde63468b255eb0a71593a4774 Mon Sep 17 00:00:00 2001 From: Matthew Garrett Date: Fri, 9 Aug 2013 18:36:30 -0400 Subject: [PATCH 10/18] Add option to automatically enforce module signatures when in Secure Boot mode UEFI Secure Boot provides a mechanism for ensuring that the firmware will only load signed bootloaders and kernels. Certain use cases may also require that all kernel modules also be signed. Add a configuration option that enforces this automatically when enabled. Signed-off-by: Matthew Garrett --- Documentation/x86/zero-page.txt | 2 ++ arch/x86/Kconfig | 10 ++++++++ arch/x86/boot/compressed/eboot.c | 35 +++++++++++++++++++++++++++ arch/x86/include/uapi/asm/bootparam.h | 3 ++- arch/x86/kernel/setup.c | 6 +++++ include/linux/module.h | 6 +++++ kernel/module.c | 7 ++++++ 7 files changed, 68 insertions(+), 1 deletion(-) diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt index 95a4d34af3fdd7..b8527c6b76461c 100644 --- a/Documentation/x86/zero-page.txt +++ b/Documentation/x86/zero-page.txt @@ -31,6 +31,8 @@ Offset Proto Name Meaning 1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below) 1EA/001 ALL edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer (below) +1EB/001 ALL kbd_status Numlock is enabled +1EC/001 ALL secure_boot Secure boot is enabled in the firmware 1EF/001 ALL sentinel Used to detect broken bootloaders 290/040 ALL edd_mbr_sig_buffer EDD MBR signatures 2D0/A00 ALL e820_map E820 memory map table diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index bada636d1065c5..882da2bd162804 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1786,6 +1786,16 @@ config EFI_MIXED If unsure, say N. +config EFI_SECURE_BOOT_SIG_ENFORCE + def_bool n + prompt "Force module signing when UEFI Secure Boot is enabled" + ---help--- + UEFI Secure Boot provides a mechanism for ensuring that the + firmware will only load signed bootloaders and kernels. Certain + use cases may also require that all kernel modules also be signed. + Say Y here to automatically enable module signature enforcement + when a system boots with UEFI Secure Boot enabled. + config SECCOMP def_bool y prompt "Enable seccomp to safely compute untrusted bytecode" diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index cc69e37548db22..17b376596c9690 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "../string.h" #include "eboot.h" @@ -537,6 +538,36 @@ static void setup_efi_pci(struct boot_params *params) efi_call_early(free_pool, pci_handle); } +static int get_secure_boot(void) +{ + u8 sb, setup; + unsigned long datasize = sizeof(sb); + efi_guid_t var_guid = EFI_GLOBAL_VARIABLE_GUID; + efi_status_t status; + + status = efi_early->call((unsigned long)sys_table->runtime->get_variable, + L"SecureBoot", &var_guid, NULL, &datasize, &sb); + + if (status != EFI_SUCCESS) + return 0; + + if (sb == 0) + return 0; + + + status = efi_early->call((unsigned long)sys_table->runtime->get_variable, + L"SetupMode", &var_guid, NULL, &datasize, + &setup); + + if (status != EFI_SUCCESS) + return 0; + + if (setup == 1) + return 0; + + return 1; +} + static efi_status_t setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height) { @@ -1094,6 +1125,10 @@ struct boot_params *efi_main(struct efi_config *c, else setup_boot_services32(efi_early); + sanitize_boot_params(boot_params); + + boot_params->secure_boot = get_secure_boot(); + setup_graphics(boot_params); setup_efi_pci(boot_params); diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index c18ce67495fadf..2b3e5427097b4f 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -134,7 +134,8 @@ struct boot_params { __u8 eddbuf_entries; /* 0x1e9 */ __u8 edd_mbr_sig_buf_entries; /* 0x1ea */ __u8 kbd_status; /* 0x1eb */ - __u8 _pad5[3]; /* 0x1ec */ + __u8 secure_boot; /* 0x1ec */ + __u8 _pad5[2]; /* 0x1ed */ /* * The sentinel is set to a nonzero value (0xff) in header.S. * diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 9c337b0e8ba7c4..f7f369bcf7eebd 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1160,6 +1160,12 @@ void __init setup_arch(char **cmdline_p) io_delay_init(); +#ifdef CONFIG_EFI_SECURE_BOOT_SIG_ENFORCE + if (boot_params.secure_boot) { + enforce_signed_modules(); + } +#endif + /* * Parse the ACPI tables for possible boot-time SMP configuration. */ diff --git a/include/linux/module.h b/include/linux/module.h index c8b4ea0fc8b061..8918ef47519610 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -260,6 +260,12 @@ extern const typeof(name) __mod_##type##__##name##_device_table \ struct notifier_block; +#ifdef CONFIG_MODULE_SIG +extern void enforce_signed_modules(void); +#else +static inline void enforce_signed_modules(void) {}; +#endif + #ifdef CONFIG_MODULES extern int modules_disabled; /* for sysctl */ diff --git a/kernel/module.c b/kernel/module.c index 085b720d1dfd3f..e0c6216f770065 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -4286,6 +4286,13 @@ void module_layout(struct module *mod, EXPORT_SYMBOL(module_layout); #endif +#ifdef CONFIG_MODULE_SIG +void enforce_signed_modules(void) +{ + sig_enforce = true; +} +#endif + bool secure_modules(void) { #ifdef CONFIG_MODULE_SIG From 69f25598cb7d1f60b2e0dfb41452d5ddb71c5599 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Tue, 27 Aug 2013 13:28:43 -0400 Subject: [PATCH 11/18] efi: Make EFI_SECURE_BOOT_SIG_ENFORCE depend on EFI The functionality of the config option is dependent upon the platform being UEFI based. Reflect this in the config deps. Signed-off-by: Josh Boyer --- arch/x86/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 882da2bd162804..d666ef8b616c73 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1787,7 +1787,8 @@ config EFI_MIXED If unsure, say N. config EFI_SECURE_BOOT_SIG_ENFORCE - def_bool n + def_bool n + depends on EFI prompt "Force module signing when UEFI Secure Boot is enabled" ---help--- UEFI Secure Boot provides a mechanism for ensuring that the From cbc0cd9ff1f0d7583a7bf638ab0f1a56ec5147a4 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Tue, 27 Aug 2013 13:33:03 -0400 Subject: [PATCH 12/18] efi: Add EFI_SECURE_BOOT bit UEFI machines can be booted in Secure Boot mode. Add a EFI_SECURE_BOOT bit for use with efi_enabled. Signed-off-by: Josh Boyer --- arch/x86/kernel/setup.c | 2 ++ include/linux/efi.h | 1 + 2 files changed, 3 insertions(+) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index f7f369bcf7eebd..60dccc24554a30 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1162,7 +1162,9 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_EFI_SECURE_BOOT_SIG_ENFORCE if (boot_params.secure_boot) { + set_bit(EFI_SECURE_BOOT, &efi.flags); enforce_signed_modules(); + pr_info("Secure boot enabled\n"); } #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index cba7177cbec74e..0d767057a998fa 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1045,6 +1045,7 @@ extern int __init efi_setup_pcdp_console(char *); #define EFI_ARCH_1 7 /* First arch-specific bit */ #define EFI_DBG 8 /* Print additional debug info at runtime */ #define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */ +#define EFI_SECURE_BOOT 10 /* Are we in Secure Boot mode? */ #ifdef CONFIG_EFI /* From c80f8705b887ad846a82dfb67c1d5610bef23ffd Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Fri, 20 Jun 2014 08:53:24 -0400 Subject: [PATCH 13/18] hibernate: Disable in a signed modules environment There is currently no way to verify the resume image when returning from hibernate. This might compromise the signed modules trust model, so until we can work with signed hibernate images we disable it in a secure modules environment. Signed-off-by: Josh Boyer --- kernel/power/hibernate.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index b26dbc48c75b9f..ab187ad3fc6152 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include "power.h" @@ -67,7 +68,7 @@ static const struct platform_hibernation_ops *hibernation_ops; bool hibernation_available(void) { - return (nohibernate == 0); + return ((nohibernate == 0) && !secure_modules()); } /** From c9f538d3f8b9f6726f3e4e91514450763c65fd66 Mon Sep 17 00:00:00 2001 From: Vito Caputo Date: Wed, 25 Nov 2015 02:59:45 -0800 Subject: [PATCH 14/18] kbuild: derive relative path for KBUILD_SRC from CURDIR This enables relocating source and build trees to different roots, provided they stay reachable relative to one another. Useful for builds done within a sandbox where the eventual root is prefixed by some undesirable path component. --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 50436f502d81c6..cff393b50d1b54 100644 --- a/Makefile +++ b/Makefile @@ -147,7 +147,8 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make @: sub-make: - $(Q)$(MAKE) -C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR) \ + $(Q)$(MAKE) -C $(KBUILD_OUTPUT) \ + KBUILD_SRC=$(shell realpath --relative-to=$(KBUILD_OUTPUT) $(CURDIR)) \ -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS)) # Leave processing to above invocation of make From e1fde4f0da73c1f5e70a548f5eee9f185f056fdc Mon Sep 17 00:00:00 2001 From: Geoff Levand Date: Fri, 11 Nov 2016 17:28:52 -0800 Subject: [PATCH 15/18] Add arm64 coreos verity hash Signed-off-by: Geoff Levand --- arch/arm64/kernel/head.S | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 332e33193ccf15..964bae1fc5d72a 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -195,6 +195,11 @@ section_table: .short 0 // NumberOfLineNumbers (0 for executables) .long 0xe0500020 // Characteristics (section flags) + /* CoreOS 64 byte verity hash value. */ + .org _head + 512 + .ascii "verity-hash" + .org _head + 512 + 64 + /* * EFI will load .text onwards at the 4k section alignment * described in the PE/COFF header. To ensure that instruction From 33f3d95300f6f9426cb77324f0183ba595dc6ee9 Mon Sep 17 00:00:00 2001 From: Stephen Smalley Date: Mon, 9 Jan 2017 10:07:31 -0500 Subject: [PATCH 16/18] selinux: allow context mounts on tmpfs, ramfs, devpts within user namespaces commit aad82892af261b9903cc11c55be3ecf5f0b0b4f8 ("selinux: Add support for unprivileged mounts from user namespaces") prohibited any use of context mount options within non-init user namespaces. However, this breaks use of context mount options for tmpfs mounts within user namespaces, which are being used by Docker/runc. There is no reason to block such usage for tmpfs, ramfs or devpts. Exempt these filesystem types from this restriction. Before: sh$ userns_child_exec -p -m -U -M '0 1000 1' -G '0 1000 1' bash sh# mount -t tmpfs -o context=system_u:object_r:user_tmp_t:s0:c13 none /tmp mount: tmpfs is write-protected, mounting read-only mount: cannot mount tmpfs read-only After: sh$ userns_child_exec -p -m -U -M '0 1000 1' -G '0 1000 1' bash sh# mount -t tmpfs -o context=system_u:object_r:user_tmp_t:s0:c13 none /tmp sh# ls -Zd /tmp unconfined_u:object_r:user_tmp_t:s0:c13 /tmp Signed-off-by: Stephen Smalley Signed-off-by: Paul Moore --- security/selinux/hooks.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index c2da45ae5b2a04..799a69148cef4a 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -832,10 +832,14 @@ static int selinux_set_mnt_opts(struct super_block *sb, } /* - * If this is a user namespace mount, no contexts are allowed - * on the command line and security labels must be ignored. + * If this is a user namespace mount and the filesystem type is not + * explicitly whitelisted, then no contexts are allowed on the command + * line and security labels must be ignored. */ - if (sb->s_user_ns != &init_user_ns) { + if (sb->s_user_ns != &init_user_ns && + strcmp(sb->s_type->name, "tmpfs") && + strcmp(sb->s_type->name, "ramfs") && + strcmp(sb->s_type->name, "devpts")) { if (context_sid || fscontext_sid || rootcontext_sid || defcontext_sid) { rc = -EACCES; From 4453acc4fe142267c17502e4e8552a1d0f3d8597 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 31 Jan 2017 23:58:38 +0100 Subject: [PATCH 17/18] perf/x86/intel/rapl: Make package handling more robust The package management code in RAPL relies on package mapping being available before a CPU is started. This changed with: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") because the ACPI/BIOS information turned out to be unreliable, but that left RAPL in broken state. This was not noticed because on a regular boot all CPUs are online before RAPL is initialized. A possible fix would be to reintroduce the mess which allocates a package data structure in CPU prepare and when it turns out to already exist in starting throw it away later in the CPU online callback. But that's a horrible hack and not required at all because RAPL becomes functional for perf only in the CPU online callback. That's correct because user space is not yet informed about the CPU being onlined, so nothing caan rely on RAPL being available on that particular CPU. Move the allocation to the CPU online callback and simplify the hotplug handling. At this point the package mapping is established and correct. This also adds a missing check for available package data in the event_init() function. Reported-by: Yasuaki Ishimatsu Signed-off-by: Thomas Gleixner Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sebastian Siewior Cc: Stephane Eranian Cc: Vince Weaver Fixes: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") Link: http://lkml.kernel.org/r/20170131230141.212593966@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/events/intel/rapl.c | 60 ++++++++++++++++-------------------- include/linux/cpuhotplug.h | 1 - 2 files changed, 26 insertions(+), 35 deletions(-) diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 0a535cea8ff31a..1dba3c2c90c59b 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -161,7 +161,13 @@ static u64 rapl_timer_ms; static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) { - return rapl_pmus->pmus[topology_logical_package_id(cpu)]; + unsigned int pkgid = topology_logical_package_id(cpu); + + /* + * The unsigned check also catches the '-1' return value for non + * existent mappings in the topology map. + */ + return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL; } static inline u64 rapl_read_counter(struct perf_event *event) @@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event) /* must be done before validate_group */ pmu = cpu_to_rapl_pmu(event->cpu); + if (!pmu) + return -EINVAL; event->cpu = pmu->cpu; event->pmu_private = pmu; event->hw.event_base = msr; @@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu) struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); int target; + if (!pmu) { + pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); + if (!pmu) + return -ENOMEM; + + raw_spin_lock_init(&pmu->lock); + INIT_LIST_HEAD(&pmu->active_list); + pmu->pmu = &rapl_pmus->pmu; + pmu->timer_interval = ms_to_ktime(rapl_timer_ms); + rapl_hrtimer_init(pmu); + + rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu; + } + /* * Check if there is an online cpu in the package which collects rapl * events already. @@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu) return 0; } -static int rapl_cpu_prepare(unsigned int cpu) -{ - struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); - - if (pmu) - return 0; - - pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); - if (!pmu) - return -ENOMEM; - - raw_spin_lock_init(&pmu->lock); - INIT_LIST_HEAD(&pmu->active_list); - pmu->pmu = &rapl_pmus->pmu; - pmu->timer_interval = ms_to_ktime(rapl_timer_ms); - pmu->cpu = -1; - rapl_hrtimer_init(pmu); - rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu; - return 0; -} - static int rapl_check_hw_unit(bool apply_quirk) { u64 msr_rapl_power_unit_bits; @@ -802,29 +803,21 @@ static int __init rapl_pmu_init(void) /* * Install callbacks. Core will call them for each online cpu. */ - - ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP", - rapl_cpu_prepare, NULL); - if (ret) - goto out; - ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, "AP_PERF_X86_RAPL_ONLINE", rapl_cpu_online, rapl_cpu_offline); if (ret) - goto out1; + goto out; ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); if (ret) - goto out2; + goto out1; rapl_advertise(); return 0; -out2: - cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); out1: - cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP); + cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); out: pr_warn("Initialization failed (%d), disabled\n", ret); cleanup_rapl_pmus(); @@ -835,7 +828,6 @@ module_init(rapl_pmu_init); static void __exit intel_rapl_exit(void) { cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); - cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP); perf_pmu_unregister(&rapl_pmus->pmu); cleanup_rapl_pmus(); } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index ba1cad7b97cfa1..965cc5693a46d9 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -10,7 +10,6 @@ enum cpuhp_state { CPUHP_PERF_X86_PREPARE, CPUHP_PERF_X86_UNCORE_PREP, CPUHP_PERF_X86_AMD_UNCORE_PREP, - CPUHP_PERF_X86_RAPL_PREP, CPUHP_PERF_BFIN, CPUHP_PERF_POWER, CPUHP_PERF_SUPERH, From d339b36b166fe63a339a95aa6747978b5b7c093a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 31 Jan 2017 23:58:40 +0100 Subject: [PATCH 18/18] perf/x86/intel/uncore: Make package handling more robust The package management code in uncore relies on package mapping being available before a CPU is started. This changed with: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") because the ACPI/BIOS information turned out to be unreliable, but that left uncore in broken state. This was not noticed because on a regular boot all CPUs are online before uncore is initialized. Move the allocation to the CPU online callback and simplify the hotplug handling. At this point the package mapping is established and correct. Signed-off-by: Thomas Gleixner Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sebastian Siewior Cc: Stephane Eranian Cc: Vince Weaver Cc: Yasuaki Ishimatsu Fixes: 9d85eb9119f4 ("x86/smpboot: Make logical package management more robust") Link: http://lkml.kernel.org/r/20170131230141.377156255@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore.c | 196 +++++++++++++++------------------ include/linux/cpuhotplug.h | 2 - 2 files changed, 91 insertions(+), 107 deletions(-) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 19d646a783fd6c..f2d760dd080adf 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj, struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) { - return pmu->boxes[topology_logical_package_id(cpu)]; + unsigned int pkgid = topology_logical_package_id(cpu); + + /* + * The unsigned check also catches the '-1' return value for non + * existent mappings in the topology map. + */ + return pkgid < max_packages ? pmu->boxes[pkgid] : NULL; } u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) @@ -1033,76 +1039,6 @@ static void uncore_pci_exit(void) } } -static int uncore_cpu_dying(unsigned int cpu) -{ - struct intel_uncore_type *type, **types = uncore_msr_uncores; - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box; - int i, pkg; - - pkg = topology_logical_package_id(cpu); - for (; *types; types++) { - type = *types; - pmu = type->pmus; - for (i = 0; i < type->num_boxes; i++, pmu++) { - box = pmu->boxes[pkg]; - if (box && atomic_dec_return(&box->refcnt) == 0) - uncore_box_exit(box); - } - } - return 0; -} - -static int uncore_cpu_starting(unsigned int cpu) -{ - struct intel_uncore_type *type, **types = uncore_msr_uncores; - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box; - int i, pkg; - - pkg = topology_logical_package_id(cpu); - for (; *types; types++) { - type = *types; - pmu = type->pmus; - for (i = 0; i < type->num_boxes; i++, pmu++) { - box = pmu->boxes[pkg]; - if (!box) - continue; - /* The first cpu on a package activates the box */ - if (atomic_inc_return(&box->refcnt) == 1) - uncore_box_init(box); - } - } - - return 0; -} - -static int uncore_cpu_prepare(unsigned int cpu) -{ - struct intel_uncore_type *type, **types = uncore_msr_uncores; - struct intel_uncore_pmu *pmu; - struct intel_uncore_box *box; - int i, pkg; - - pkg = topology_logical_package_id(cpu); - for (; *types; types++) { - type = *types; - pmu = type->pmus; - for (i = 0; i < type->num_boxes; i++, pmu++) { - if (pmu->boxes[pkg]) - continue; - /* First cpu of a package allocates the box */ - box = uncore_alloc_box(type, cpu_to_node(cpu)); - if (!box) - return -ENOMEM; - box->pmu = pmu; - box->pkgid = pkg; - pmu->boxes[pkg] = box; - } - } - return 0; -} - static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, int new_cpu) { @@ -1142,12 +1078,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores, static int uncore_event_cpu_offline(unsigned int cpu) { - int target; + struct intel_uncore_type *type, **types = uncore_msr_uncores; + struct intel_uncore_pmu *pmu; + struct intel_uncore_box *box; + int i, pkg, target; /* Check if exiting cpu is used for collecting uncore events */ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) - return 0; - + goto unref; /* Find a new cpu to collect uncore events */ target = cpumask_any_but(topology_core_cpumask(cpu), cpu); @@ -1159,12 +1097,82 @@ static int uncore_event_cpu_offline(unsigned int cpu) uncore_change_context(uncore_msr_uncores, cpu, target); uncore_change_context(uncore_pci_uncores, cpu, target); + +unref: + /* Clear the references */ + pkg = topology_logical_package_id(cpu); + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[pkg]; + if (box && atomic_dec_return(&box->refcnt) == 0) + uncore_box_exit(box); + } + } return 0; } +static int allocate_boxes(struct intel_uncore_type **types, + unsigned int pkg, unsigned int cpu) +{ + struct intel_uncore_box *box, *tmp; + struct intel_uncore_type *type; + struct intel_uncore_pmu *pmu; + LIST_HEAD(allocated); + int i; + + /* Try to allocate all required boxes */ + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (pmu->boxes[pkg]) + continue; + box = uncore_alloc_box(type, cpu_to_node(cpu)); + if (!box) + goto cleanup; + box->pmu = pmu; + box->pkgid = pkg; + list_add(&box->active_list, &allocated); + } + } + /* Install them in the pmus */ + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + box->pmu->boxes[pkg] = box; + } + return 0; + +cleanup: + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + kfree(box); + } + return -ENOMEM; +} + static int uncore_event_cpu_online(unsigned int cpu) { - int target; + struct intel_uncore_type *type, **types = uncore_msr_uncores; + struct intel_uncore_pmu *pmu; + struct intel_uncore_box *box; + int i, ret, pkg, target; + + pkg = topology_logical_package_id(cpu); + ret = allocate_boxes(types, pkg, cpu); + if (ret) + return ret; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[pkg]; + if (!box && atomic_inc_return(&box->refcnt) == 1) + uncore_box_init(box); + } + } /* * Check if there is an online cpu in the package @@ -1354,33 +1362,13 @@ static int __init intel_uncore_init(void) if (cret && pret) return -ENODEV; - /* - * Install callbacks. Core will call them for each online cpu. - * - * The first online cpu of each package allocates and takes - * the refcounts for all other online cpus in that package. - * If msrs are not enabled no allocation is required and - * uncore_cpu_prepare() is not called for each online cpu. - */ - if (!cret) { - ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP, - "PERF_X86_UNCORE_PREP", - uncore_cpu_prepare, NULL); - if (ret) - goto err; - } else { - cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP, - "PERF_X86_UNCORE_PREP", - uncore_cpu_prepare, NULL); - } - - cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING, - "AP_PERF_X86_UNCORE_STARTING", - uncore_cpu_starting, uncore_cpu_dying); - - cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, - "AP_PERF_X86_UNCORE_ONLINE", - uncore_event_cpu_online, uncore_event_cpu_offline); + /* Install hotplug callbacks to setup the targets for each package */ + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, + "AP_PERF_X86_UNCORE_ONLINE", + uncore_event_cpu_online, + uncore_event_cpu_offline); + if (ret) + goto err; return 0; err: @@ -1392,9 +1380,7 @@ module_init(intel_uncore_init); static void __exit intel_uncore_exit(void) { - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE); - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING); - cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP); + cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); uncore_types_exit(uncore_msr_uncores); uncore_pci_exit(); } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 965cc5693a46d9..ce831190f31517 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -8,7 +8,6 @@ enum cpuhp_state { CPUHP_CREATE_THREADS, CPUHP_PERF_PREPARE, CPUHP_PERF_X86_PREPARE, - CPUHP_PERF_X86_UNCORE_PREP, CPUHP_PERF_X86_AMD_UNCORE_PREP, CPUHP_PERF_BFIN, CPUHP_PERF_POWER, @@ -63,7 +62,6 @@ enum cpuhp_state { CPUHP_AP_IRQ_ARMADA_CASC_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, - CPUHP_AP_PERF_X86_UNCORE_STARTING, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_AMD_IBS_STARTING,