diff --git a/Documentation/power/suspend-and-interrupts.txt b/Documentation/power/suspend-and-interrupts.txt index 2f9c5a5fcb25ff..8afb29a8604a55 100644 --- a/Documentation/power/suspend-and-interrupts.txt +++ b/Documentation/power/suspend-and-interrupts.txt @@ -40,8 +40,10 @@ but also to IPIs and to some other special-purpose interrupts. The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when requesting a special-purpose interrupt. It causes suspend_device_irqs() to -leave the corresponding IRQ enabled so as to allow the interrupt to work all -the time as expected. +leave the corresponding IRQ enabled so as to allow the interrupt to work as +expected during the suspend-resume cycle, but does not guarantee that the +interrupt will wake the system from a suspended state -- for such cases it is +necessary to use enable_irq_wake(). Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one user of it. Thus, if the IRQ is shared, all of the interrupt handlers installed @@ -110,8 +112,9 @@ any special interrupt handling logic for it to work. IRQF_NO_SUSPEND and enable_irq_wake() ------------------------------------- -There are no valid reasons to use both enable_irq_wake() and the IRQF_NO_SUSPEND -flag on the same IRQ. +There are very few valid reasons to use both enable_irq_wake() and the +IRQF_NO_SUSPEND flag on the same IRQ, and it is never valid to use both for the +same device. First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND interrupts (interrupt handlers are invoked after suspend_device_irqs()) are @@ -120,4 +123,13 @@ handlers are not invoked after suspend_device_irqs()). Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not to individual interrupt handlers, so sharing an IRQ between a system wakeup -interrupt source and an IRQF_NO_SUSPEND interrupt source does not make sense. +interrupt source and an IRQF_NO_SUSPEND interrupt source does not generally +make sense. + +In rare cases an IRQ can be shared between a wakeup device driver and an +IRQF_NO_SUSPEND user. In order for this to be safe, the wakeup device driver +must be able to discern spurious IRQs from genuine wakeup events (signalling +the latter to the core with pm_system_wakeup()), must use enable_irq_wake() to +ensure that the IRQ will function as a wakeup source, and must request the IRQ +with IRQF_COND_SUSPEND to tell the core that it meets these requirements. If +these requirements are not met, it is not valid to use IRQF_COND_SUSPEND. diff --git a/MAINTAINERS b/MAINTAINERS index 42f686f2e4b241..6239a305dff0d9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8480,6 +8480,14 @@ S: Supported L: netdev@vger.kernel.org F: drivers/net/ethernet/samsung/sxgbe/ +SAMSUNG THERMAL DRIVER +M: Lukasz Majewski +L: linux-pm@vger.kernel.org +L: linux-samsung-soc@vger.kernel.org +S: Supported +T: https://github.com/lmajewski/linux-samsung-thermal.git +F: drivers/thermal/samsung/ + SAMSUNG USB2 PHY DRIVER M: Kamil Debski L: linux-kernel@vger.kernel.org diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c2fb8a87dccb29..b7d31ca5518744 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -499,6 +499,7 @@ config X86_INTEL_QUARK depends on X86_IO_APIC select IOSF_MBI select INTEL_IMR + select COMMON_CLK ---help--- Select to include support for Quark X1000 SoC. Say Y here if you have a Quark based system such as the Arduino diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h index 5fa9770035dc93..c9a6d68b8d623b 100644 --- a/arch/x86/include/asm/xsave.h +++ b/arch/x86/include/asm/xsave.h @@ -82,18 +82,15 @@ static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask) if (boot_cpu_has(X86_FEATURE_XSAVES)) asm volatile("1:"XSAVES"\n\t" "2:\n\t" - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) + xstate_fault + : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); else asm volatile("1:"XSAVE"\n\t" "2:\n\t" - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) + xstate_fault + : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); - - asm volatile(xstate_fault - : "0" (0) - : "memory"); - return err; } @@ -112,18 +109,15 @@ static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask) if (boot_cpu_has(X86_FEATURE_XSAVES)) asm volatile("1:"XRSTORS"\n\t" "2:\n\t" - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) + xstate_fault + : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); else asm volatile("1:"XRSTOR"\n\t" "2:\n\t" - : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) + xstate_fault + : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); - - asm volatile(xstate_fault - : "0" (0) - : "memory"); - return err; } @@ -149,9 +143,9 @@ static inline int xsave_state(struct xsave_struct *fx, u64 mask) */ alternative_input_2( "1:"XSAVE, - "1:"XSAVEOPT, + XSAVEOPT, X86_FEATURE_XSAVEOPT, - "1:"XSAVES, + XSAVES, X86_FEATURE_XSAVES, [fx] "D" (fx), "a" (lmask), "d" (hmask) : "memory"); @@ -178,7 +172,7 @@ static inline int xrstor_state(struct xsave_struct *fx, u64 mask) */ alternative_input( "1: " XRSTOR, - "1: " XRSTORS, + XRSTORS, X86_FEATURE_XSAVES, "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) : "memory"); diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 10074ad9ebf85e..1d74d161687c9f 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -269,11 +269,14 @@ ENTRY(ret_from_fork) testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? jz 1f - testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET - jnz int_ret_from_sys_call - - RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET - jmp ret_from_sys_call # go to the SYSRET fastpath + /* + * By the time we get here, we have no idea whether our pt_regs, + * ti flags, and ti status came from the 64-bit SYSCALL fast path, + * the slow path, or one of the ia32entry paths. + * Use int_ret_from_sys_call to return, since it can safely handle + * all of the above. + */ + jmp int_ret_from_sys_call 1: subq $REST_SKIP, %rsp # leave space for volatiles diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 6ac273832f2846..e4695985f9de85 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -331,7 +331,7 @@ static void probe_pci_root_info(struct pci_root_info *info, struct list_head *list) { int ret; - struct resource_entry *entry; + struct resource_entry *entry, *tmp; sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum); info->bridge = device; @@ -345,8 +345,13 @@ static void probe_pci_root_info(struct pci_root_info *info, dev_dbg(&device->dev, "no IO and memory resources present in _CRS\n"); else - resource_list_for_each_entry(entry, list) - entry->res->name = info->name; + resource_list_for_each_entry_safe(entry, tmp, list) { + if ((entry->res->flags & IORESOURCE_WINDOW) == 0 || + (entry->res->flags & IORESOURCE_DISABLED)) + resource_list_destroy_entry(entry); + else + entry->res->name = info->name; + } } struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index c723668e3e277d..5589a6e2a02346 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -42,8 +42,10 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io) * CHECKME: len might be required to check versus a minimum * length as well. 1 for io is fine, but for memory it does * not make any sense at all. + * Note: some BIOSes report incorrect length for ACPI address space + * descriptor, so remove check of 'reslen == len' to avoid regression. */ - if (len && reslen && reslen == len && start <= end) + if (len && reslen && start <= end) return true; pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index debd30917010a1..26eb70c8f5184f 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -2110,7 +2110,8 @@ static int __init intel_opregion_present(void) int acpi_video_register(void) { - int result = 0; + int ret; + if (register_count) { /* * if the function of acpi_video_register is already called, @@ -2122,9 +2123,9 @@ int acpi_video_register(void) mutex_init(&video_list_lock); INIT_LIST_HEAD(&video_bus_head); - result = acpi_bus_register_driver(&acpi_video_bus); - if (result < 0) - return -ENODEV; + ret = acpi_bus_register_driver(&acpi_video_bus); + if (ret) + return ret; /* * When the acpi_video_bus is loaded successfully, increase @@ -2176,6 +2177,17 @@ EXPORT_SYMBOL(acpi_video_unregister_backlight); static int __init acpi_video_init(void) { + /* + * Let the module load even if ACPI is disabled (e.g. due to + * a broken BIOS) so that i915.ko can still be loaded on such + * old systems without an AcpiOpRegion. + * + * acpi_video_register() will report -ENODEV later as well due + * to acpi_disabled when i915.ko tries to register itself afterwards. + */ + if (acpi_disabled) + return 0; + dmi_check_system(video_dmi_table); if (intel_opregion_present()) diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ba4abbe4693c3e..45937f88e77c88 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2242,7 +2242,7 @@ static void rtpm_status_str(struct seq_file *s, struct device *dev) } static int pm_genpd_summary_one(struct seq_file *s, - struct generic_pm_domain *gpd) + struct generic_pm_domain *genpd) { static const char * const status_lookup[] = { [GPD_STATE_ACTIVE] = "on", @@ -2256,26 +2256,26 @@ static int pm_genpd_summary_one(struct seq_file *s, struct gpd_link *link; int ret; - ret = mutex_lock_interruptible(&gpd->lock); + ret = mutex_lock_interruptible(&genpd->lock); if (ret) return -ERESTARTSYS; - if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup))) + if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup))) goto exit; - seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]); + seq_printf(s, "%-30s %-15s ", genpd->name, status_lookup[genpd->status]); /* * Modifications on the list require holding locks on both * master and slave, so we are safe. - * Also gpd->name is immutable. + * Also genpd->name is immutable. */ - list_for_each_entry(link, &gpd->master_links, master_node) { + list_for_each_entry(link, &genpd->master_links, master_node) { seq_printf(s, "%s", link->slave->name); - if (!list_is_last(&link->master_node, &gpd->master_links)) + if (!list_is_last(&link->master_node, &genpd->master_links)) seq_puts(s, ", "); } - list_for_each_entry(pm_data, &gpd->dev_list, list_node) { + list_for_each_entry(pm_data, &genpd->dev_list, list_node) { kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL); if (kobj_path == NULL) continue; @@ -2287,14 +2287,14 @@ static int pm_genpd_summary_one(struct seq_file *s, seq_puts(s, "\n"); exit: - mutex_unlock(&gpd->lock); + mutex_unlock(&genpd->lock); return 0; } static int pm_genpd_summary_show(struct seq_file *s, void *data) { - struct generic_pm_domain *gpd; + struct generic_pm_domain *genpd; int ret = 0; seq_puts(s, " domain status slaves\n"); @@ -2305,8 +2305,8 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) if (ret) return -ERESTARTSYS; - list_for_each_entry(gpd, &gpd_list, gpd_list_node) { - ret = pm_genpd_summary_one(s, gpd); + list_for_each_entry(genpd, &gpd_list, gpd_list_node) { + ret = pm_genpd_summary_one(s, genpd); if (ret) break; } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index c2744b30d5d92e..aab7158d2afea8 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -730,6 +730,7 @@ void pm_system_wakeup(void) pm_abort_suspend = true; freeze_wake(); } +EXPORT_SYMBOL_GPL(pm_system_wakeup); void pm_wakeup_clear(void) { diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index f07c8152e5cc42..3f27d21fb7297e 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -89,12 +89,29 @@ static int pmc_irq_set_type(struct irq_data *d, unsigned type) return 0; } +static void pmc_irq_suspend(struct irq_data *d) +{ + struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); + + pmc->imr = pmc_read(pmc, AT91_PMC_IMR); + pmc_write(pmc, AT91_PMC_IDR, pmc->imr); +} + +static void pmc_irq_resume(struct irq_data *d) +{ + struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); + + pmc_write(pmc, AT91_PMC_IER, pmc->imr); +} + static struct irq_chip pmc_irq = { .name = "PMC", .irq_disable = pmc_irq_mask, .irq_mask = pmc_irq_mask, .irq_unmask = pmc_irq_unmask, .irq_set_type = pmc_irq_set_type, + .irq_suspend = pmc_irq_suspend, + .irq_resume = pmc_irq_resume, }; static struct lock_class_key pmc_lock_class; @@ -224,7 +241,8 @@ static struct at91_pmc *__init at91_pmc_init(struct device_node *np, goto out_free_pmc; pmc_write(pmc, AT91_PMC_IDR, 0xffffffff); - if (request_irq(pmc->virq, pmc_irq_handler, IRQF_SHARED, "pmc", pmc)) + if (request_irq(pmc->virq, pmc_irq_handler, + IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc)) goto out_remove_irqdomain; return pmc; diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 52d2041fa3f635..69abb08cf14651 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -33,6 +33,7 @@ struct at91_pmc { spinlock_t lock; const struct at91_pmc_caps *caps; struct irq_domain *irqdomain; + u32 imr; }; static inline void pmc_lock(struct at91_pmc *pmc) diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 5e98c6b1f284b6..82d2fbb20f7eb7 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -159,7 +159,7 @@ static struct cpufreq_driver exynos_driver = { static int exynos_cpufreq_probe(struct platform_device *pdev) { - struct device_node *cpus, *np; + struct device_node *cpu0; int ret = -EINVAL; exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL); @@ -206,28 +206,19 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) if (ret) goto err_cpufreq_reg; - cpus = of_find_node_by_path("/cpus"); - if (!cpus) { - pr_err("failed to find cpus node\n"); + cpu0 = of_get_cpu_node(0, NULL); + if (!cpu0) { + pr_err("failed to find cpu0 node\n"); return 0; } - np = of_get_next_child(cpus, NULL); - if (!np) { - pr_err("failed to find cpus child node\n"); - of_node_put(cpus); - return 0; - } - - if (of_find_property(np, "#cooling-cells", NULL)) { - cdev = of_cpufreq_cooling_register(np, + if (of_find_property(cpu0, "#cooling-cells", NULL)) { + cdev = of_cpufreq_cooling_register(cpu0, cpu_present_mask); if (IS_ERR(cdev)) pr_err("running cpufreq without cooling device: %ld\n", PTR_ERR(cdev)); } - of_node_put(np); - of_node_put(cpus); return 0; diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c index bee5df7794d33d..7cb4b766cf948d 100644 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c @@ -22,6 +22,8 @@ #include #include +#include /* for get_hard_smp_processor_id() in UP configs */ + /** * struct cpu_data - per CPU data struct * @parent: the parent node of cpu clock diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 4d534582514e01..080bd2dbde4ba5 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -44,6 +44,12 @@ void disable_cpuidle(void) off = 1; } +bool cpuidle_not_available(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{ + return off || !initialized || !drv || !dev || !dev->enabled; +} + /** * cpuidle_play_dead - cpu off-lining * @@ -66,14 +72,8 @@ int cpuidle_play_dead(void) return -ENODEV; } -/** - * cpuidle_find_deepest_state - Find deepest state meeting specific conditions. - * @drv: cpuidle driver for the given CPU. - * @dev: cpuidle device for the given CPU. - * @freeze: Whether or not the state should be suitable for suspend-to-idle. - */ -static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, - struct cpuidle_device *dev, bool freeze) +static int find_deepest_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev, bool freeze) { unsigned int latency_req = 0; int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; @@ -92,6 +92,17 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, return ret; } +/** + * cpuidle_find_deepest_state - Find the deepest available idle state. + * @drv: cpuidle driver for the given CPU. + * @dev: cpuidle device for the given CPU. + */ +int cpuidle_find_deepest_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{ + return find_deepest_state(drv, dev, false); +} + static void enter_freeze_proper(struct cpuidle_driver *drv, struct cpuidle_device *dev, int index) { @@ -113,15 +124,14 @@ static void enter_freeze_proper(struct cpuidle_driver *drv, /** * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. + * @drv: cpuidle driver for the given CPU. + * @dev: cpuidle device for the given CPU. * * If there are states with the ->enter_freeze callback, find the deepest of - * them and enter it with frozen tick. Otherwise, find the deepest state - * available and enter it normally. + * them and enter it with frozen tick. */ -void cpuidle_enter_freeze(void) +int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) { - struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); - struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); int index; /* @@ -129,24 +139,11 @@ void cpuidle_enter_freeze(void) * that interrupts won't be enabled when it exits and allows the tick to * be frozen safely. */ - index = cpuidle_find_deepest_state(drv, dev, true); - if (index >= 0) { - enter_freeze_proper(drv, dev, index); - return; - } - - /* - * It is not safe to freeze the tick, find the deepest state available - * at all and try to enter it normally. - */ - index = cpuidle_find_deepest_state(drv, dev, false); + index = find_deepest_state(drv, dev, true); if (index >= 0) - cpuidle_enter(drv, dev, index); - else - arch_cpu_idle(); + enter_freeze_proper(drv, dev, index); - /* Interrupts are enabled again here. */ - local_irq_disable(); + return index; } /** @@ -205,12 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, */ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) { - if (off || !initialized) - return -ENODEV; - - if (!drv || !dev || !dev->enabled) - return -EBUSY; - return cpuidle_curr_governor->select(drv, dev); } diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index c5f7b4e9eb6c6e..69fac068669fde 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c @@ -78,7 +78,7 @@ static const char * __init dmi_string(const struct dmi_header *dm, u8 s) * We have to be cautious here. We have seen BIOSes with DMI pointers * pointing to completely the wrong place for example */ -static void dmi_table(u8 *buf, int len, int num, +static void dmi_table(u8 *buf, u32 len, int num, void (*decode)(const struct dmi_header *, void *), void *private_data) { @@ -92,12 +92,6 @@ static void dmi_table(u8 *buf, int len, int num, while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) { const struct dmi_header *dm = (const struct dmi_header *)data; - /* - * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0] - */ - if (dm->type == DMI_ENTRY_END_OF_TABLE) - break; - /* * We want to know the total length (formatted area and * strings) before decoding to make sure we won't run off the @@ -108,13 +102,20 @@ static void dmi_table(u8 *buf, int len, int num, data++; if (data - buf < len - 1) decode(dm, private_data); + + /* + * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0] + */ + if (dm->type == DMI_ENTRY_END_OF_TABLE) + break; + data += 2; i++; } } static phys_addr_t dmi_base; -static u16 dmi_len; +static u32 dmi_len; static u16 dmi_num; static int __init dmi_walk_early(void (*decode)(const struct dmi_header *, diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index 2fe195002021d0..f07d4a67fa76b3 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -179,12 +179,12 @@ efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, start = desc->phys_addr; end = start + desc->num_pages * (1UL << EFI_PAGE_SHIFT); - if ((start + size) > end || (start + size) > max) - continue; - - if (end - size > max) + if (end > max) end = max; + if ((start + size) > end) + continue; + if (round_down(end - size, align) < start) continue; diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c index 1ec694a52379ea..464bf492ee2ae3 100644 --- a/drivers/pci/host/pci-versatile.c +++ b/drivers/pci/host/pci-versatile.c @@ -80,7 +80,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, if (err) return err; - resource_list_for_each_entry(win, res, list) { + resource_list_for_each_entry(win, res) { struct resource *parent, *res = win->res; switch (resource_type(res)) { diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 70a5d94cc766af..b4f7744f67510b 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include "rtc-at91rm9200.h" @@ -54,6 +55,10 @@ static void __iomem *at91_rtc_regs; static int irq; static DEFINE_SPINLOCK(at91_rtc_lock); static u32 at91_rtc_shadow_imr; +static bool suspended; +static DEFINE_SPINLOCK(suspended_lock); +static unsigned long cached_events; +static u32 at91_rtc_imr; static void at91_rtc_write_ier(u32 mask) { @@ -290,7 +295,9 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) struct rtc_device *rtc = platform_get_drvdata(pdev); unsigned int rtsr; unsigned long events = 0; + int ret = IRQ_NONE; + spin_lock(&suspended_lock); rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read_imr(); if (rtsr) { /* this interrupt is shared! Is it ours? */ if (rtsr & AT91_RTC_ALARM) @@ -304,14 +311,22 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */ - rtc_update_irq(rtc, 1, events); + if (!suspended) { + rtc_update_irq(rtc, 1, events); - dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", __func__, - events >> 8, events & 0x000000FF); + dev_dbg(&pdev->dev, "%s(): num=%ld, events=0x%02lx\n", + __func__, events >> 8, events & 0x000000FF); + } else { + cached_events |= events; + at91_rtc_write_idr(at91_rtc_imr); + pm_system_wakeup(); + } - return IRQ_HANDLED; + ret = IRQ_HANDLED; } - return IRQ_NONE; /* not handled */ + spin_lock(&suspended_lock); + + return ret; } static const struct at91_rtc_config at91rm9200_config = { @@ -401,8 +416,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev) AT91_RTC_CALEV); ret = devm_request_irq(&pdev->dev, irq, at91_rtc_interrupt, - IRQF_SHARED, - "at91_rtc", pdev); + IRQF_SHARED | IRQF_COND_SUSPEND, + "at91_rtc", pdev); if (ret) { dev_err(&pdev->dev, "IRQ %d already in use.\n", irq); return ret; @@ -454,8 +469,6 @@ static void at91_rtc_shutdown(struct platform_device *pdev) /* AT91RM9200 RTC Power management control */ -static u32 at91_rtc_imr; - static int at91_rtc_suspend(struct device *dev) { /* this IRQ is shared with DBGU and other hardware which isn't @@ -464,21 +477,42 @@ static int at91_rtc_suspend(struct device *dev) at91_rtc_imr = at91_rtc_read_imr() & (AT91_RTC_ALARM|AT91_RTC_SECEV); if (at91_rtc_imr) { - if (device_may_wakeup(dev)) + if (device_may_wakeup(dev)) { + unsigned long flags; + enable_irq_wake(irq); - else + + spin_lock_irqsave(&suspended_lock, flags); + suspended = true; + spin_unlock_irqrestore(&suspended_lock, flags); + } else { at91_rtc_write_idr(at91_rtc_imr); + } } return 0; } static int at91_rtc_resume(struct device *dev) { + struct rtc_device *rtc = dev_get_drvdata(dev); + if (at91_rtc_imr) { - if (device_may_wakeup(dev)) + if (device_may_wakeup(dev)) { + unsigned long flags; + + spin_lock_irqsave(&suspended_lock, flags); + + if (cached_events) { + rtc_update_irq(rtc, 1, cached_events); + cached_events = 0; + } + + suspended = false; + spin_unlock_irqrestore(&suspended_lock, flags); + disable_irq_wake(irq); - else - at91_rtc_write_ier(at91_rtc_imr); + } + at91_rtc_write_ier(at91_rtc_imr); } return 0; } diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index 2183fd2750abd9..5ccaee32df7223 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c @@ -23,6 +23,7 @@ #include #include #include +#include #include /* @@ -77,6 +78,9 @@ struct sam9_rtc { unsigned int gpbr_offset; int irq; struct clk *sclk; + bool suspended; + unsigned long events; + spinlock_t lock; }; #define rtt_readl(rtc, field) \ @@ -271,14 +275,9 @@ static int at91_rtc_proc(struct device *dev, struct seq_file *seq) return 0; } -/* - * IRQ handler for the RTC - */ -static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc) +static irqreturn_t at91_rtc_cache_events(struct sam9_rtc *rtc) { - struct sam9_rtc *rtc = _rtc; u32 sr, mr; - unsigned long events = 0; /* Shared interrupt may be for another device. Note: reading * SR clears it, so we must only read it in this irq handler! @@ -290,18 +289,54 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc) /* alarm status */ if (sr & AT91_RTT_ALMS) - events |= (RTC_AF | RTC_IRQF); + rtc->events |= (RTC_AF | RTC_IRQF); /* timer update/increment */ if (sr & AT91_RTT_RTTINC) - events |= (RTC_UF | RTC_IRQF); + rtc->events |= (RTC_UF | RTC_IRQF); + + return IRQ_HANDLED; +} + +static void at91_rtc_flush_events(struct sam9_rtc *rtc) +{ + if (!rtc->events) + return; - rtc_update_irq(rtc->rtcdev, 1, events); + rtc_update_irq(rtc->rtcdev, 1, rtc->events); + rtc->events = 0; pr_debug("%s: num=%ld, events=0x%02lx\n", __func__, - events >> 8, events & 0x000000FF); + rtc->events >> 8, rtc->events & 0x000000FF); +} - return IRQ_HANDLED; +/* + * IRQ handler for the RTC + */ +static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc) +{ + struct sam9_rtc *rtc = _rtc; + int ret; + + spin_lock(&rtc->lock); + + ret = at91_rtc_cache_events(rtc); + + /* We're called in suspended state */ + if (rtc->suspended) { + /* Mask irqs coming from this peripheral */ + rtt_writel(rtc, MR, + rtt_readl(rtc, MR) & + ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); + /* Trigger a system wakeup */ + pm_system_wakeup(); + } else { + at91_rtc_flush_events(rtc); + } + + spin_unlock(&rtc->lock); + + return ret; } static const struct rtc_class_ops at91_rtc_ops = { @@ -421,7 +456,8 @@ static int at91_rtc_probe(struct platform_device *pdev) /* register irq handler after we know what name we'll use */ ret = devm_request_irq(&pdev->dev, rtc->irq, at91_rtc_interrupt, - IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc); + IRQF_SHARED | IRQF_COND_SUSPEND, + dev_name(&rtc->rtcdev->dev), rtc); if (ret) { dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); return ret; @@ -482,7 +518,12 @@ static int at91_rtc_suspend(struct device *dev) rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); if (rtc->imr) { if (device_may_wakeup(dev) && (mr & AT91_RTT_ALMIEN)) { + unsigned long flags; + enable_irq_wake(rtc->irq); + spin_lock_irqsave(&rtc->lock, flags); + rtc->suspended = true; + spin_unlock_irqrestore(&rtc->lock, flags); /* don't let RTTINC cause wakeups */ if (mr & AT91_RTT_RTTINCIEN) rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); @@ -499,10 +540,18 @@ static int at91_rtc_resume(struct device *dev) u32 mr; if (rtc->imr) { + unsigned long flags; + if (device_may_wakeup(dev)) disable_irq_wake(rtc->irq); mr = rtt_readl(rtc, MR); rtt_writel(rtc, MR, mr | rtc->imr); + + spin_lock_irqsave(&rtc->lock, flags); + rtc->suspended = false; + at91_rtc_cache_events(rtc); + at91_rtc_flush_events(rtc); + spin_unlock_irqrestore(&rtc->lock, flags); } return 0; diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c index f88b0887702568..1e25133d35e2cb 100644 --- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c +++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c @@ -208,7 +208,7 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, trip_cnt, GFP_KERNEL); if (!int34x_thermal_zone->aux_trips) { ret = -ENOMEM; - goto free_mem; + goto err_trip_alloc; } trip_mask = BIT(trip_cnt) - 1; int34x_thermal_zone->aux_trip_nr = trip_cnt; @@ -248,14 +248,15 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, 0, 0); if (IS_ERR(int34x_thermal_zone->zone)) { ret = PTR_ERR(int34x_thermal_zone->zone); - goto free_lpat; + goto err_thermal_zone; } return int34x_thermal_zone; -free_lpat: +err_thermal_zone: acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); -free_mem: + kfree(int34x_thermal_zone->aux_trips); +err_trip_alloc: kfree(int34x_thermal_zone); return ERR_PTR(ret); } @@ -266,6 +267,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone { thermal_zone_device_unregister(int34x_thermal_zone->zone); acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); + kfree(int34x_thermal_zone->aux_trips); kfree(int34x_thermal_zone); } EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 1fc54ab911d206..1d30b097565154 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -682,6 +682,7 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on) if (on) { con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); + con |= (1 << EXYNOS7_PD_DET_EN_SHIFT); interrupt_en = (of_thermal_is_trip_valid(tz, 7) << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | @@ -704,9 +705,9 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on) interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; } else { con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); + con &= ~(1 << EXYNOS7_PD_DET_EN_SHIFT); interrupt_en = 0; /* Disable all interrupts */ } - con |= 1 << EXYNOS7_PD_DET_EN_SHIFT; writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN); writel(con, data->base + EXYNOS_TMU_REG_CONTROL); diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 48491d1a81d650..174d3bcf8bd7a1 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -899,6 +899,22 @@ thermal_cooling_device_trip_point_show(struct device *dev, return sprintf(buf, "%d\n", instance->trip); } +static struct attribute *cooling_device_attrs[] = { + &dev_attr_cdev_type.attr, + &dev_attr_max_state.attr, + &dev_attr_cur_state.attr, + NULL, +}; + +static const struct attribute_group cooling_device_attr_group = { + .attrs = cooling_device_attrs, +}; + +static const struct attribute_group *cooling_device_attr_groups[] = { + &cooling_device_attr_group, + NULL, +}; + /* Device management */ /** @@ -1130,6 +1146,7 @@ __thermal_cooling_device_register(struct device_node *np, cdev->ops = ops; cdev->updated = false; cdev->device.class = &thermal_class; + cdev->device.groups = cooling_device_attr_groups; cdev->devdata = devdata; dev_set_name(&cdev->device, "cooling_device%d", cdev->id); result = device_register(&cdev->device); @@ -1139,21 +1156,6 @@ __thermal_cooling_device_register(struct device_node *np, return ERR_PTR(result); } - /* sys I/F */ - if (type) { - result = device_create_file(&cdev->device, &dev_attr_cdev_type); - if (result) - goto unregister; - } - - result = device_create_file(&cdev->device, &dev_attr_max_state); - if (result) - goto unregister; - - result = device_create_file(&cdev->device, &dev_attr_cur_state); - if (result) - goto unregister; - /* Add 'this' new cdev to the global cdev list */ mutex_lock(&thermal_list_lock); list_add(&cdev->node, &thermal_cdev_list); @@ -1163,11 +1165,6 @@ __thermal_cooling_device_register(struct device_node *np, bind_cdev(cdev); return cdev; - -unregister: - release_idr(&thermal_cdev_idr, &thermal_idr_lock, cdev->id); - device_unregister(&cdev->device); - return ERR_PTR(result); } /** diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 846552bff67d6f..4e959c43f6804d 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -173,6 +174,12 @@ struct atmel_uart_port { bool ms_irq_enabled; bool is_usart; /* usart or uart */ struct timer_list uart_timer; /* uart timer */ + + bool suspended; + unsigned int pending; + unsigned int pending_status; + spinlock_t lock_suspended; + int (*prepare_rx)(struct uart_port *port); int (*prepare_tx)(struct uart_port *port); void (*schedule_rx)(struct uart_port *port); @@ -1179,12 +1186,15 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id) { struct uart_port *port = dev_id; struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - unsigned int status, pending, pass_counter = 0; + unsigned int status, pending, mask, pass_counter = 0; bool gpio_handled = false; + spin_lock(&atmel_port->lock_suspended); + do { status = atmel_get_lines_status(port); - pending = status & UART_GET_IMR(port); + mask = UART_GET_IMR(port); + pending = status & mask; if (!gpio_handled) { /* * Dealing with GPIO interrupt @@ -1206,11 +1216,21 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id) if (!pending) break; + if (atmel_port->suspended) { + atmel_port->pending |= pending; + atmel_port->pending_status = status; + UART_PUT_IDR(port, mask); + pm_system_wakeup(); + break; + } + atmel_handle_receive(port, pending); atmel_handle_status(port, pending, status); atmel_handle_transmit(port, pending); } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); + spin_unlock(&atmel_port->lock_suspended); + return pass_counter ? IRQ_HANDLED : IRQ_NONE; } @@ -1742,7 +1762,8 @@ static int atmel_startup(struct uart_port *port) /* * Allocate the IRQ */ - retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED, + retval = request_irq(port->irq, atmel_interrupt, + IRQF_SHARED | IRQF_COND_SUSPEND, tty ? tty->name : "atmel_serial", port); if (retval) { dev_err(port->dev, "atmel_startup - Can't get irq\n"); @@ -2513,8 +2534,14 @@ static int atmel_serial_suspend(struct platform_device *pdev, /* we can not wake up if we're running on slow clock */ atmel_port->may_wakeup = device_may_wakeup(&pdev->dev); - if (atmel_serial_clk_will_stop()) + if (atmel_serial_clk_will_stop()) { + unsigned long flags; + + spin_lock_irqsave(&atmel_port->lock_suspended, flags); + atmel_port->suspended = true; + spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); device_set_wakeup_enable(&pdev->dev, 0); + } uart_suspend_port(&atmel_uart, port); @@ -2525,6 +2552,18 @@ static int atmel_serial_resume(struct platform_device *pdev) { struct uart_port *port = platform_get_drvdata(pdev); struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + unsigned long flags; + + spin_lock_irqsave(&atmel_port->lock_suspended, flags); + if (atmel_port->pending) { + atmel_handle_receive(port, atmel_port->pending); + atmel_handle_status(port, atmel_port->pending, + atmel_port->pending_status); + atmel_handle_transmit(port, atmel_port->pending); + atmel_port->pending = 0; + } + atmel_port->suspended = false; + spin_unlock_irqrestore(&atmel_port->lock_suspended, flags); uart_resume_port(&atmel_uart, port); device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup); @@ -2593,6 +2632,8 @@ static int atmel_serial_probe(struct platform_device *pdev) port->backup_imr = 0; port->uart.line = ret; + spin_lock_init(&port->lock_suspended); + ret = atmel_init_gpios(port, &pdev->dev); if (ret < 0) dev_err(&pdev->dev, "%s", diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c index 6df940528fd21b..1443b3c391de49 100644 --- a/drivers/watchdog/at91sam9_wdt.c +++ b/drivers/watchdog/at91sam9_wdt.c @@ -208,7 +208,8 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt) if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) { err = request_irq(wdt->irq, wdt_interrupt, - IRQF_SHARED | IRQF_IRQPOLL, + IRQF_SHARED | IRQF_IRQPOLL | + IRQF_NO_SUSPEND, pdev->name, wdt); if (err) return err; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 993642199326a7..6d67f32e648df7 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1645,14 +1645,14 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, parent_nritems = btrfs_header_nritems(parent); blocksize = root->nodesize; - end_slot = parent_nritems; + end_slot = parent_nritems - 1; - if (parent_nritems == 1) + if (parent_nritems <= 1) return 0; btrfs_set_lock_blocking(parent); - for (i = start_slot; i < end_slot; i++) { + for (i = start_slot; i <= end_slot; i++) { int close = 1; btrfs_node_key(parent, &disk_key, i); @@ -1669,7 +1669,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, other = btrfs_node_blockptr(parent, i - 1); close = close_blocks(blocknr, other, blocksize); } - if (!close && i < end_slot - 2) { + if (!close && i < end_slot) { other = btrfs_node_blockptr(parent, i + 1); close = close_blocks(blocknr, other, blocksize); } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 571f402d3fc46e..6f080451fcb111 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3208,6 +3208,8 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, return 0; } + if (trans->aborted) + return 0; again: inode = lookup_free_space_inode(root, block_group, path); if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { @@ -3243,6 +3245,20 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, */ BTRFS_I(inode)->generation = 0; ret = btrfs_update_inode(trans, root, inode); + if (ret) { + /* + * So theoretically we could recover from this, simply set the + * super cache generation to 0 so we know to invalidate the + * cache, but then we'd have to keep track of the block groups + * that fail this way so we know we _have_ to reset this cache + * before the next commit or risk reading stale cache. So to + * limit our exposure to horrible edge cases lets just abort the + * transaction, this only happens in really bad situations + * anyway. + */ + btrfs_abort_transaction(trans, root, ret); + goto out_put; + } WARN_ON(ret); if (i_size_read(inode) > 0) { diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index b78bbbac900db8..30982bbd31c30c 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1811,22 +1811,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, mutex_unlock(&inode->i_mutex); /* - * we want to make sure fsync finds this change - * but we haven't joined a transaction running right now. - * - * Later on, someone is sure to update the inode and get the - * real transid recorded. - * - * We set last_trans now to the fs_info generation + 1, - * this will either be one more than the running transaction - * or the generation used for the next transaction if there isn't - * one running right now. - * * We also have to set last_sub_trans to the current log transid, * otherwise subsequent syncs to a file that's been synced in this * transaction will appear to have already occured. */ - BTRFS_I(inode)->last_trans = root->fs_info->generation + 1; BTRFS_I(inode)->last_sub_trans = root->log_transid; if (num_written > 0) { err = generic_write_sync(file, pos, num_written); @@ -1959,25 +1947,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) atomic_inc(&root->log_batch); /* - * check the transaction that last modified this inode - * and see if its already been committed - */ - if (!BTRFS_I(inode)->last_trans) { - mutex_unlock(&inode->i_mutex); - goto out; - } - - /* - * if the last transaction that changed this file was before - * the current transaction, we can bail out now without any - * syncing + * If the last transaction that changed this file was before the current + * transaction and we have the full sync flag set in our inode, we can + * bail out now without any syncing. + * + * Note that we can't bail out if the full sync flag isn't set. This is + * because when the full sync flag is set we start all ordered extents + * and wait for them to fully complete - when they complete they update + * the inode's last_trans field through: + * + * btrfs_finish_ordered_io() -> + * btrfs_update_inode_fallback() -> + * btrfs_update_inode() -> + * btrfs_set_inode_last_trans() + * + * So we are sure that last_trans is up to date and can do this check to + * bail out safely. For the fast path, when the full sync flag is not + * set in our inode, we can not do it because we start only our ordered + * extents and don't wait for them to complete (that is when + * btrfs_finish_ordered_io runs), so here at this point their last_trans + * value might be less than or equals to fs_info->last_trans_committed, + * and setting a speculative last_trans for an inode when a buffered + * write is made (such as fs_info->generation + 1 for example) would not + * be reliable since after setting the value and before fsync is called + * any number of transactions can start and commit (transaction kthread + * commits the current transaction periodically), and a transaction + * commit does not start nor waits for ordered extents to complete. */ smp_mb(); if (btrfs_inode_in_log(inode, root->fs_info->generation) || - BTRFS_I(inode)->last_trans <= - root->fs_info->last_trans_committed) { - BTRFS_I(inode)->last_trans = 0; - + (full_sync && BTRFS_I(inode)->last_trans <= + root->fs_info->last_trans_committed)) { /* * We'v had everything committed since the last time we were * modified so clear this flag in case it was set for whatever @@ -2275,6 +2275,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) bool same_page; bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES); u64 ino_size; + bool truncated_page = false; + bool updated_inode = false; ret = btrfs_wait_ordered_range(inode, offset, len); if (ret) @@ -2306,13 +2308,18 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) * entire page. */ if (same_page && len < PAGE_CACHE_SIZE) { - if (offset < ino_size) + if (offset < ino_size) { + truncated_page = true; ret = btrfs_truncate_page(inode, offset, len, 0); + } else { + ret = 0; + } goto out_only_mutex; } /* zero back part of the first page */ if (offset < ino_size) { + truncated_page = true; ret = btrfs_truncate_page(inode, offset, 0, 0); if (ret) { mutex_unlock(&inode->i_mutex); @@ -2348,6 +2355,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) if (!ret) { /* zero the front end of the last page */ if (tail_start + tail_len < ino_size) { + truncated_page = true; ret = btrfs_truncate_page(inode, tail_start + tail_len, 0, 1); if (ret) @@ -2357,8 +2365,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) } if (lockend < lockstart) { - mutex_unlock(&inode->i_mutex); - return 0; + ret = 0; + goto out_only_mutex; } while (1) { @@ -2506,6 +2514,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) trans->block_rsv = &root->fs_info->trans_block_rsv; ret = btrfs_update_inode(trans, root, inode); + updated_inode = true; btrfs_end_transaction(trans, root); btrfs_btree_balance_dirty(root); out_free: @@ -2515,6 +2524,22 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, &cached_state, GFP_NOFS); out_only_mutex: + if (!updated_inode && truncated_page && !ret && !err) { + /* + * If we only end up zeroing part of a page, we still need to + * update the inode item, so that all the time fields are + * updated as well as the necessary btrfs inode in memory fields + * for detecting, at fsync time, if the inode isn't yet in the + * log tree or it's there but not up to date. + */ + trans = btrfs_start_transaction(root, 1); + if (IS_ERR(trans)) { + err = PTR_ERR(trans); + } else { + err = btrfs_update_inode(trans, root, inode); + ret = btrfs_end_transaction(trans, root); + } + } mutex_unlock(&inode->i_mutex); if (ret && !err) err = ret; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a85c23dfcddbcf..da828cf5e8f885 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7285,7 +7285,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && em->block_start != EXTENT_MAP_HOLE)) { int type; - int ret; u64 block_start, orig_start, orig_block_len, ram_bytes; if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 534544e08f7694..157cc54fc63486 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -452,9 +452,7 @@ void btrfs_get_logged_extents(struct inode *inode, continue; if (entry_end(ordered) <= start) break; - if (!list_empty(&ordered->log_list)) - continue; - if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) + if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) continue; list_add(&ordered->log_list, logged_list); atomic_inc(&ordered->refs); @@ -511,8 +509,7 @@ void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans, wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)); - if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) - list_add_tail(&ordered->trans_list, &trans->ordered); + list_add_tail(&ordered->trans_list, &trans->ordered); spin_lock_irq(&log->log_extents_lock[index]); } spin_unlock_irq(&log->log_extents_lock[index]); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index fe5857223515d1..d6033f540cc75c 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -230,6 +230,7 @@ struct pending_dir_move { u64 parent_ino; u64 ino; u64 gen; + bool is_orphan; struct list_head update_refs; }; @@ -2984,7 +2985,8 @@ static int add_pending_dir_move(struct send_ctx *sctx, u64 ino_gen, u64 parent_ino, struct list_head *new_refs, - struct list_head *deleted_refs) + struct list_head *deleted_refs, + const bool is_orphan) { struct rb_node **p = &sctx->pending_dir_moves.rb_node; struct rb_node *parent = NULL; @@ -2999,6 +3001,7 @@ static int add_pending_dir_move(struct send_ctx *sctx, pm->parent_ino = parent_ino; pm->ino = ino; pm->gen = ino_gen; + pm->is_orphan = is_orphan; INIT_LIST_HEAD(&pm->list); INIT_LIST_HEAD(&pm->update_refs); RB_CLEAR_NODE(&pm->node); @@ -3131,16 +3134,20 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) rmdir_ino = dm->rmdir_ino; free_waiting_dir_move(sctx, dm); - ret = get_first_ref(sctx->parent_root, pm->ino, - &parent_ino, &parent_gen, name); - if (ret < 0) - goto out; - - ret = get_cur_path(sctx, parent_ino, parent_gen, - from_path); - if (ret < 0) - goto out; - ret = fs_path_add_path(from_path, name); + if (pm->is_orphan) { + ret = gen_unique_name(sctx, pm->ino, + pm->gen, from_path); + } else { + ret = get_first_ref(sctx->parent_root, pm->ino, + &parent_ino, &parent_gen, name); + if (ret < 0) + goto out; + ret = get_cur_path(sctx, parent_ino, parent_gen, + from_path); + if (ret < 0) + goto out; + ret = fs_path_add_path(from_path, name); + } if (ret < 0) goto out; @@ -3150,7 +3157,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) LIST_HEAD(deleted_refs); ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, - &pm->update_refs, &deleted_refs); + &pm->update_refs, &deleted_refs, + pm->is_orphan); if (ret < 0) goto out; if (rmdir_ino) { @@ -3283,6 +3291,127 @@ static int apply_children_dir_moves(struct send_ctx *sctx) return ret; } +/* + * We might need to delay a directory rename even when no ancestor directory + * (in the send root) with a higher inode number than ours (sctx->cur_ino) was + * renamed. This happens when we rename a directory to the old name (the name + * in the parent root) of some other unrelated directory that got its rename + * delayed due to some ancestor with higher number that got renamed. + * + * Example: + * + * Parent snapshot: + * . (ino 256) + * |---- a/ (ino 257) + * | |---- file (ino 260) + * | + * |---- b/ (ino 258) + * |---- c/ (ino 259) + * + * Send snapshot: + * . (ino 256) + * |---- a/ (ino 258) + * |---- x/ (ino 259) + * |---- y/ (ino 257) + * |----- file (ino 260) + * + * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257 + * from 'a' to 'x/y' happening first, which in turn depends on the rename of + * inode 259 from 'c' to 'x'. So the order of rename commands the send stream + * must issue is: + * + * 1 - rename 259 from 'c' to 'x' + * 2 - rename 257 from 'a' to 'x/y' + * 3 - rename 258 from 'b' to 'a' + * + * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can + * be done right away and < 0 on error. + */ +static int wait_for_dest_dir_move(struct send_ctx *sctx, + struct recorded_ref *parent_ref, + const bool is_orphan) +{ + struct btrfs_path *path; + struct btrfs_key key; + struct btrfs_key di_key; + struct btrfs_dir_item *di; + u64 left_gen; + u64 right_gen; + int ret = 0; + + if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) + return 0; + + path = alloc_path_for_send(); + if (!path) + return -ENOMEM; + + key.objectid = parent_ref->dir; + key.type = BTRFS_DIR_ITEM_KEY; + key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len); + + ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0); + if (ret < 0) { + goto out; + } else if (ret > 0) { + ret = 0; + goto out; + } + + di = btrfs_match_dir_item_name(sctx->parent_root, path, + parent_ref->name, parent_ref->name_len); + if (!di) { + ret = 0; + goto out; + } + /* + * di_key.objectid has the number of the inode that has a dentry in the + * parent directory with the same name that sctx->cur_ino is being + * renamed to. We need to check if that inode is in the send root as + * well and if it is currently marked as an inode with a pending rename, + * if it is, we need to delay the rename of sctx->cur_ino as well, so + * that it happens after that other inode is renamed. + */ + btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key); + if (di_key.type != BTRFS_INODE_ITEM_KEY) { + ret = 0; + goto out; + } + + ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL, + &left_gen, NULL, NULL, NULL, NULL); + if (ret < 0) + goto out; + ret = get_inode_info(sctx->send_root, di_key.objectid, NULL, + &right_gen, NULL, NULL, NULL, NULL); + if (ret < 0) { + if (ret == -ENOENT) + ret = 0; + goto out; + } + + /* Different inode, no need to delay the rename of sctx->cur_ino */ + if (right_gen != left_gen) { + ret = 0; + goto out; + } + + if (is_waiting_for_move(sctx, di_key.objectid)) { + ret = add_pending_dir_move(sctx, + sctx->cur_ino, + sctx->cur_inode_gen, + di_key.objectid, + &sctx->new_refs, + &sctx->deleted_refs, + is_orphan); + if (!ret) + ret = 1; + } +out: + btrfs_free_path(path); + return ret; +} + static int wait_for_parent_move(struct send_ctx *sctx, struct recorded_ref *parent_ref) { @@ -3349,7 +3478,8 @@ static int wait_for_parent_move(struct send_ctx *sctx, sctx->cur_inode_gen, ino, &sctx->new_refs, - &sctx->deleted_refs); + &sctx->deleted_refs, + false); if (!ret) ret = 1; } @@ -3372,6 +3502,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) int did_overwrite = 0; int is_orphan = 0; u64 last_dir_ino_rm = 0; + bool can_rename = true; verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); @@ -3490,12 +3621,22 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); } } + if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) { + ret = wait_for_dest_dir_move(sctx, cur, is_orphan); + if (ret < 0) + goto out; + if (ret == 1) { + can_rename = false; + *pending_move = 1; + } + } + /* * link/move the ref to the new place. If we have an orphan * inode, move it and update valid_path. If not, link or move * it depending on the inode mode. */ - if (is_orphan) { + if (is_orphan && can_rename) { ret = send_rename(sctx, valid_path, cur->full_path); if (ret < 0) goto out; @@ -3503,7 +3644,7 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); ret = fs_path_copy(valid_path, cur->full_path); if (ret < 0) goto out; - } else { + } else if (can_rename) { if (S_ISDIR(sctx->cur_inode_mode)) { /* * Dirs can't be linked, so move it. For moved diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 7e80f32550a663..88e51aded6bda9 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1052,9 +1052,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); if (ret) return ret; - ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); - if (ret) - return ret; } return 0; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9a37f8b39bae90..c5b8ba37f88e31 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1012,7 +1012,7 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans, base = btrfs_item_ptr_offset(leaf, path->slots[0]); while (cur_offset < item_size) { - extref = (struct btrfs_inode_extref *)base + cur_offset; + extref = (struct btrfs_inode_extref *)(base + cur_offset); victim_name_len = btrfs_inode_extref_name_len(leaf, extref); diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 47b19465f0dc64..883b93623bc568 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -111,6 +111,8 @@ static int do_setxattr(struct btrfs_trans_handle *trans, name, name_len, -1); if (!di && (flags & XATTR_REPLACE)) ret = -ENODATA; + else if (IS_ERR(di)) + ret = PTR_ERR(di); else if (di) ret = btrfs_delete_one_dir_name(trans, root, path, di); goto out; @@ -127,10 +129,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans, ASSERT(mutex_is_locked(&inode->i_mutex)); di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, name_len, 0); - if (!di) { + if (!di) ret = -ENODATA; + else if (IS_ERR(di)) + ret = PTR_ERR(di); + if (ret) goto out; - } btrfs_release_path(path); di = NULL; } diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 90d1882b306fac..5ba029e627cc22 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -124,7 +124,7 @@ ecryptfs_get_key_payload_data(struct key *key) } #define ECRYPTFS_MAX_KEYSET_SIZE 1024 -#define ECRYPTFS_MAX_CIPHER_NAME_SIZE 32 +#define ECRYPTFS_MAX_CIPHER_NAME_SIZE 31 #define ECRYPTFS_MAX_NUM_ENC_KEYS 64 #define ECRYPTFS_MAX_IV_BYTES 16 /* 128 bits */ #define ECRYPTFS_SALT_BYTES 2 @@ -237,7 +237,7 @@ struct ecryptfs_crypt_stat { struct crypto_ablkcipher *tfm; struct crypto_hash *hash_tfm; /* Crypto context for generating * the initialization vectors */ - unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE]; + unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1]; unsigned char key[ECRYPTFS_MAX_KEY_BYTES]; unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES]; struct list_head keysig_list; diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index b07731e68c0b4d..fd39bad6f1bdf8 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -303,9 +303,22 @@ ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct file *lower_file = ecryptfs_file_to_lower(file); long rc = -ENOTTY; - if (lower_file->f_op->unlocked_ioctl) + if (!lower_file->f_op->unlocked_ioctl) + return rc; + + switch (cmd) { + case FITRIM: + case FS_IOC_GETFLAGS: + case FS_IOC_SETFLAGS: + case FS_IOC_GETVERSION: + case FS_IOC_SETVERSION: rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg); - return rc; + fsstack_copy_attr_all(file_inode(file), file_inode(lower_file)); + + return rc; + default: + return rc; + } } #ifdef CONFIG_COMPAT @@ -315,9 +328,22 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct file *lower_file = ecryptfs_file_to_lower(file); long rc = -ENOIOCTLCMD; - if (lower_file->f_op->compat_ioctl) + if (!lower_file->f_op->compat_ioctl) + return rc; + + switch (cmd) { + case FITRIM: + case FS_IOC32_GETFLAGS: + case FS_IOC32_SETFLAGS: + case FS_IOC32_GETVERSION: + case FS_IOC32_SETVERSION: rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg); - return rc; + fsstack_copy_attr_all(file_inode(file), file_inode(lower_file)); + + return rc; + default: + return rc; + } } #endif diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 917bd5c9776aab..6bd67e2011f083 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -891,7 +891,7 @@ struct ecryptfs_parse_tag_70_packet_silly_stack { struct blkcipher_desc desc; char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1]; char iv[ECRYPTFS_MAX_IV_BYTES]; - char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE]; + char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1]; }; /** diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 1895d60f4122c2..c095d32642599f 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -407,7 +407,7 @@ static int ecryptfs_parse_options(struct ecryptfs_sb_info *sbi, char *options, if (!cipher_name_set) { int cipher_name_len = strlen(ECRYPTFS_DEFAULT_CIPHER); - BUG_ON(cipher_name_len >= ECRYPTFS_MAX_CIPHER_NAME_SIZE); + BUG_ON(cipher_name_len > ECRYPTFS_MAX_CIPHER_NAME_SIZE); strcpy(mount_crypt_stat->global_default_cipher_name, ECRYPTFS_DEFAULT_CIPHER); } diff --git a/fs/locks.c b/fs/locks.c index 365c82e1b3a9a6..f1bad681fc1ca3 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1665,7 +1665,8 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr } if (my_fl != NULL) { - error = lease->fl_lmops->lm_change(my_fl, arg, &dispose); + lease = my_fl; + error = lease->fl_lmops->lm_change(lease, arg, &dispose); if (error) goto out; goto out_setup; diff --git a/fs/nfs/client.c b/fs/nfs/client.c index f9f4845db989e5..19874151e95c99 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -433,7 +433,7 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat static bool nfs_client_init_is_complete(const struct nfs_client *clp) { - return clp->cl_cons_state != NFS_CS_INITING; + return clp->cl_cons_state <= NFS_CS_READY; } int nfs_wait_client_init_complete(const struct nfs_client *clp) diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index a1f0685b42ff7d..a6ad6886588034 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -181,8 +181,8 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); spin_unlock(&delegation->lock); - put_rpccred(oldcred); rcu_read_unlock(); + put_rpccred(oldcred); trace_nfs4_reclaim_delegation(inode, res->delegation_type); } else { /* We appear to have raced with a delegation return. */ @@ -370,7 +370,10 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct delegation = NULL; goto out; } - freeme = nfs_detach_delegation_locked(nfsi, + if (test_and_set_bit(NFS_DELEGATION_RETURNING, + &old_delegation->flags)) + goto out; + freeme = nfs_detach_delegation_locked(nfsi, old_delegation, clp); if (freeme == NULL) goto out; @@ -433,6 +436,8 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation) { bool ret = false; + if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) + goto out; if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) ret = true; if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) { @@ -444,6 +449,7 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation) ret = true; spin_unlock(&delegation->lock); } +out: return ret; } @@ -471,14 +477,20 @@ int nfs_client_return_marked_delegations(struct nfs_client *clp) super_list) { if (!nfs_delegation_need_return(delegation)) continue; - inode = nfs_delegation_grab_inode(delegation); - if (inode == NULL) + if (!nfs_sb_active(server->super)) continue; + inode = nfs_delegation_grab_inode(delegation); + if (inode == NULL) { + rcu_read_unlock(); + nfs_sb_deactive(server->super); + goto restart; + } delegation = nfs_start_delegation_return_locked(NFS_I(inode)); rcu_read_unlock(); err = nfs_end_delegation_return(inode, delegation, 0); iput(inode); + nfs_sb_deactive(server->super); if (!err) goto restart; set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); @@ -809,19 +821,30 @@ void nfs_delegation_reap_unclaimed(struct nfs_client *clp) list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { list_for_each_entry_rcu(delegation, &server->delegations, super_list) { + if (test_bit(NFS_DELEGATION_RETURNING, + &delegation->flags)) + continue; if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) continue; - inode = nfs_delegation_grab_inode(delegation); - if (inode == NULL) + if (!nfs_sb_active(server->super)) continue; - delegation = nfs_detach_delegation(NFS_I(inode), - delegation, server); + inode = nfs_delegation_grab_inode(delegation); + if (inode == NULL) { + rcu_read_unlock(); + nfs_sb_deactive(server->super); + goto restart; + } + delegation = nfs_start_delegation_return_locked(NFS_I(inode)); rcu_read_unlock(); - - if (delegation != NULL) - nfs_free_delegation(delegation); + if (delegation != NULL) { + delegation = nfs_detach_delegation(NFS_I(inode), + delegation, server); + if (delegation != NULL) + nfs_free_delegation(delegation); + } iput(inode); + nfs_sb_deactive(server->super); goto restart; } } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 9b0c55cb2a2eaa..c19e16f0b2d049 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -408,14 +408,22 @@ static int xdr_decode(nfs_readdir_descriptor_t *desc, return 0; } +/* Match file and dirent using either filehandle or fileid + * Note: caller is responsible for checking the fsid + */ static int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) { + struct nfs_inode *nfsi; + if (dentry->d_inode == NULL) goto different; - if (nfs_compare_fh(entry->fh, NFS_FH(dentry->d_inode)) != 0) - goto different; - return 1; + + nfsi = NFS_I(dentry->d_inode); + if (entry->fattr->fileid == nfsi->fileid) + return 1; + if (nfs_compare_fh(entry->fh, &nfsi->fh) == 0) + return 1; different: return 0; } @@ -469,6 +477,10 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) struct inode *inode; int status; + if (!(entry->fattr->valid & NFS_ATTR_FATTR_FILEID)) + return; + if (!(entry->fattr->valid & NFS_ATTR_FATTR_FSID)) + return; if (filename.name[0] == '.') { if (filename.len == 1) return; @@ -479,6 +491,10 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) dentry = d_lookup(parent, &filename); if (dentry != NULL) { + /* Is there a mountpoint here? If so, just exit */ + if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid, + &entry->fattr->fsid)) + goto out; if (nfs_same_file(dentry, entry)) { nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); status = nfs_refresh_inode(dentry->d_inode, entry->fattr); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 94712fc781fa58..e679d24c39d3a5 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -178,7 +178,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to) iocb->ki_filp, iov_iter_count(to), (unsigned long) iocb->ki_pos); - result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping); + result = nfs_revalidate_mapping_protected(inode, iocb->ki_filp->f_mapping); if (!result) { result = generic_file_read_iter(iocb, to); if (result > 0) @@ -199,7 +199,7 @@ nfs_file_splice_read(struct file *filp, loff_t *ppos, dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n", filp, (unsigned long) count, (unsigned long long) *ppos); - res = nfs_revalidate_mapping(inode, filp->f_mapping); + res = nfs_revalidate_mapping_protected(inode, filp->f_mapping); if (!res) { res = generic_file_splice_read(filp, ppos, pipe, count, flags); if (res > 0) @@ -372,6 +372,10 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping, nfs_wait_bit_killable, TASK_KILLABLE); if (ret) return ret; + /* + * Wait for O_DIRECT to complete + */ + nfs_inode_dio_wait(mapping->host); page = grab_cache_page_write_begin(mapping, index, flags); if (!page) @@ -619,6 +623,9 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) /* make sure the cache has finished storing the page */ nfs_fscache_wait_on_page_write(NFS_I(inode), page); + wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING, + nfs_wait_bit_killable, TASK_KILLABLE); + lock_page(page); mapping = page_file_mapping(page); if (mapping != inode->i_mapping) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 83107be3dd0109..d42dff6d5e983f 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -556,6 +556,7 @@ EXPORT_SYMBOL_GPL(nfs_setattr); * This is a copy of the common vmtruncate, but with the locking * corrected to take into account the fact that NFS requires * inode->i_size to be updated under the inode->i_lock. + * Note: must be called with inode->i_lock held! */ static int nfs_vmtruncate(struct inode * inode, loff_t offset) { @@ -565,14 +566,14 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset) if (err) goto out; - spin_lock(&inode->i_lock); i_size_write(inode, offset); /* Optimisation */ if (offset == 0) NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA; - spin_unlock(&inode->i_lock); + spin_unlock(&inode->i_lock); truncate_pagecache(inode, offset); + spin_lock(&inode->i_lock); out: return err; } @@ -585,10 +586,15 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset) * Note: we do this in the *proc.c in order to ensure that * it works for things like exclusive creates too. */ -void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr) +void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, + struct nfs_fattr *fattr) { + /* Barrier: bump the attribute generation count. */ + nfs_fattr_set_barrier(fattr); + + spin_lock(&inode->i_lock); + NFS_I(inode)->attr_gencount = fattr->gencount; if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) { - spin_lock(&inode->i_lock); if ((attr->ia_valid & ATTR_MODE) != 0) { int mode = attr->ia_mode & S_IALLUGO; mode |= inode->i_mode & ~S_IALLUGO; @@ -600,12 +606,13 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr) inode->i_gid = attr->ia_gid; nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL); - spin_unlock(&inode->i_lock); } if ((attr->ia_valid & ATTR_SIZE) != 0) { nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC); nfs_vmtruncate(inode, attr->ia_size); } + nfs_update_inode(inode, fattr); + spin_unlock(&inode->i_lock); } EXPORT_SYMBOL_GPL(nfs_setattr_update_inode); @@ -1028,6 +1035,7 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map if (mapping->nrpages != 0) { if (S_ISREG(inode->i_mode)) { + unmap_mapping_range(mapping, 0, 0, 0); ret = nfs_sync_mapping(mapping); if (ret < 0) return ret; @@ -1060,11 +1068,14 @@ static bool nfs_mapping_need_revalidate_inode(struct inode *inode) } /** - * nfs_revalidate_mapping - Revalidate the pagecache + * __nfs_revalidate_mapping - Revalidate the pagecache * @inode - pointer to host inode * @mapping - pointer to mapping + * @may_lock - take inode->i_mutex? */ -int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) +static int __nfs_revalidate_mapping(struct inode *inode, + struct address_space *mapping, + bool may_lock) { struct nfs_inode *nfsi = NFS_I(inode); unsigned long *bitlock = &nfsi->flags; @@ -1113,7 +1124,12 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) nfsi->cache_validity &= ~NFS_INO_INVALID_DATA; spin_unlock(&inode->i_lock); trace_nfs_invalidate_mapping_enter(inode); - ret = nfs_invalidate_mapping(inode, mapping); + if (may_lock) { + mutex_lock(&inode->i_mutex); + ret = nfs_invalidate_mapping(inode, mapping); + mutex_unlock(&inode->i_mutex); + } else + ret = nfs_invalidate_mapping(inode, mapping); trace_nfs_invalidate_mapping_exit(inode, ret); clear_bit_unlock(NFS_INO_INVALIDATING, bitlock); @@ -1123,6 +1139,29 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) return ret; } +/** + * nfs_revalidate_mapping - Revalidate the pagecache + * @inode - pointer to host inode + * @mapping - pointer to mapping + */ +int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping) +{ + return __nfs_revalidate_mapping(inode, mapping, false); +} + +/** + * nfs_revalidate_mapping_protected - Revalidate the pagecache + * @inode - pointer to host inode + * @mapping - pointer to mapping + * + * Differs from nfs_revalidate_mapping() in that it grabs the inode->i_mutex + * while invalidating the mapping. + */ +int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping) +{ + return __nfs_revalidate_mapping(inode, mapping, true); +} + static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) { struct nfs_inode *nfsi = NFS_I(inode); @@ -1231,13 +1270,6 @@ static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fat return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0; } -static int nfs_size_need_update(const struct inode *inode, const struct nfs_fattr *fattr) -{ - if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) - return 0; - return nfs_size_to_loff_t(fattr->size) > i_size_read(inode); -} - static atomic_long_t nfs_attr_generation_counter; static unsigned long nfs_read_attr_generation_counter(void) @@ -1249,6 +1281,7 @@ unsigned long nfs_inc_attr_generation_counter(void) { return atomic_long_inc_return(&nfs_attr_generation_counter); } +EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter); void nfs_fattr_init(struct nfs_fattr *fattr) { @@ -1260,6 +1293,22 @@ void nfs_fattr_init(struct nfs_fattr *fattr) } EXPORT_SYMBOL_GPL(nfs_fattr_init); +/** + * nfs_fattr_set_barrier + * @fattr: attributes + * + * Used to set a barrier after an attribute was updated. This + * barrier ensures that older attributes from RPC calls that may + * have raced with our update cannot clobber these new values. + * Note that you are still responsible for ensuring that other + * operations which change the attribute on the server do not + * collide. + */ +void nfs_fattr_set_barrier(struct nfs_fattr *fattr) +{ + fattr->gencount = nfs_inc_attr_generation_counter(); +} + struct nfs_fattr *nfs_alloc_fattr(void) { struct nfs_fattr *fattr; @@ -1370,7 +1419,6 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 || nfs_ctime_need_update(inode, fattr) || - nfs_size_need_update(inode, fattr) || ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0); } @@ -1460,6 +1508,7 @@ int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr) int status; spin_lock(&inode->i_lock); + nfs_fattr_set_barrier(fattr); status = nfs_post_op_update_inode_locked(inode, fattr); spin_unlock(&inode->i_lock); @@ -1468,7 +1517,7 @@ int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr) EXPORT_SYMBOL_GPL(nfs_post_op_update_inode); /** - * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache + * nfs_post_op_update_inode_force_wcc_locked - update the inode attribute cache * @inode - pointer to inode * @fattr - updated attributes * @@ -1478,11 +1527,10 @@ EXPORT_SYMBOL_GPL(nfs_post_op_update_inode); * * This function is mainly designed to be used by the ->write_done() functions. */ -int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr) +int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr) { int status; - spin_lock(&inode->i_lock); /* Don't do a WCC update if these attributes are already stale */ if ((fattr->valid & NFS_ATTR_FATTR) == 0 || !nfs_inode_attrs_need_update(inode, fattr)) { @@ -1514,6 +1562,27 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa } out_noforce: status = nfs_post_op_update_inode_locked(inode, fattr); + return status; +} + +/** + * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache + * @inode - pointer to inode + * @fattr - updated attributes + * + * After an operation that has changed the inode metadata, mark the + * attribute cache as being invalid, then try to update it. Fake up + * weak cache consistency data, if none exist. + * + * This function is mainly designed to be used by the ->write_done() functions. + */ +int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr) +{ + int status; + + spin_lock(&inode->i_lock); + nfs_fattr_set_barrier(fattr); + status = nfs_post_op_update_inode_force_wcc_locked(inode, fattr); spin_unlock(&inode->i_lock); return status; } @@ -1715,6 +1784,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE); nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); nfsi->attrtimeo_timestamp = now; + /* Set barrier to be more recent than all outstanding updates */ nfsi->attr_gencount = nfs_inc_attr_generation_counter(); } else { if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) { @@ -1722,6 +1792,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode); nfsi->attrtimeo_timestamp = now; } + /* Set the barrier to be more recent than this fattr */ + if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0) + nfsi->attr_gencount = fattr->gencount; } invalid &= ~NFS_INO_INVALID_ATTR; /* Don't invalidate the data if we were to blame */ diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index b802fb3a2d99ff..9e6475bc5ba22b 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -459,6 +459,7 @@ void nfs_mark_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo, u32 ds_commit_idx); int nfs_write_need_commit(struct nfs_pgio_header *); +void nfs_writeback_update_inode(struct nfs_pgio_header *hdr); int nfs_generic_commit_list(struct inode *inode, struct list_head *head, int how, struct nfs_commit_info *cinfo); void nfs_retry_commit(struct list_head *page_list, diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 78e557c3ab87d0..1f11d2533ee410 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -138,7 +138,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, nfs_fattr_init(fattr); status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); if (status == 0) - nfs_setattr_update_inode(inode, sattr); + nfs_setattr_update_inode(inode, sattr, fattr); dprintk("NFS reply setattr: %d\n", status); return status; } @@ -834,7 +834,7 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) if (nfs3_async_handle_jukebox(task, inode)) return -EAGAIN; if (task->tk_status >= 0) - nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr); + nfs_writeback_update_inode(hdr); return 0; } diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 2a932fdc57cb37..53852a4bd88be6 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -1987,6 +1987,11 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, if (entry->fattr->valid & NFS_ATTR_FATTR_V3) entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); + if (entry->fattr->fileid != entry->ino) { + entry->fattr->mounted_on_fileid = entry->ino; + entry->fattr->valid |= NFS_ATTR_FATTR_MOUNTED_ON_FILEID; + } + /* In fact, a post_op_fh3: */ p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 8646af9b11d2e1..86d6214ea022fe 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -621,6 +621,9 @@ int nfs41_walk_client_list(struct nfs_client *new, spin_lock(&nn->nfs_client_lock); list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { + if (pos == new) + goto found; + if (pos->rpc_ops != new->rpc_ops) continue; @@ -639,10 +642,6 @@ int nfs41_walk_client_list(struct nfs_client *new, prev = pos; status = nfs_wait_client_init_complete(pos); - if (pos->cl_cons_state == NFS_CS_SESSION_INITING) { - nfs4_schedule_lease_recovery(pos); - status = nfs4_wait_clnt_recover(pos); - } spin_lock(&nn->nfs_client_lock); if (status < 0) break; @@ -668,7 +667,7 @@ int nfs41_walk_client_list(struct nfs_client *new, */ if (!nfs4_match_client_owner_id(pos, new)) continue; - +found: atomic_inc(&pos->cl_count); *result = pos; status = 0; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 88180ac5ea0eeb..627f37c4445675 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -901,6 +901,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo) if (!cinfo->atomic || cinfo->before != dir->i_version) nfs_force_lookup_revalidate(dir); dir->i_version = cinfo->after; + nfsi->attr_gencount = nfs_inc_attr_generation_counter(); nfs_fscache_invalidate(dir); spin_unlock(&dir->i_lock); } @@ -1552,6 +1553,9 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod opendata->o_arg.open_flags = 0; opendata->o_arg.fmode = fmode; + opendata->o_arg.share_access = nfs4_map_atomic_open_share( + NFS_SB(opendata->dentry->d_sb), + fmode, 0); memset(&opendata->o_res, 0, sizeof(opendata->o_res)); memset(&opendata->c_res, 0, sizeof(opendata->c_res)); nfs4_init_opendata_res(opendata); @@ -2413,8 +2417,8 @@ static int _nfs4_do_open(struct inode *dir, opendata->o_res.f_attr, sattr, state, label, olabel); if (status == 0) { - nfs_setattr_update_inode(state->inode, sattr); - nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); + nfs_setattr_update_inode(state->inode, sattr, + opendata->o_res.f_attr); nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel); } } @@ -2651,7 +2655,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) case -NFS4ERR_BAD_STATEID: case -NFS4ERR_EXPIRED: if (!nfs4_stateid_match(&calldata->arg.stateid, - &state->stateid)) { + &state->open_stateid)) { rpc_restart_call_prepare(task); goto out_release; } @@ -2687,7 +2691,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); - nfs4_stateid_copy(&calldata->arg.stateid, &state->stateid); + nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid); /* Calculate the change in open mode */ calldata->arg.fmode = 0; if (state->n_rdwr == 0) { @@ -3288,7 +3292,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label); if (status == 0) { - nfs_setattr_update_inode(inode, sattr); + nfs_setattr_update_inode(inode, sattr, fattr); nfs_setsecurity(inode, fattr, label); } nfs4_label_free(label); @@ -4234,7 +4238,7 @@ static int nfs4_write_done_cb(struct rpc_task *task, } if (task->tk_status >= 0) { renew_lease(NFS_SERVER(inode), hdr->timestamp); - nfs_post_op_update_inode_force_wcc(inode, &hdr->fattr); + nfs_writeback_update_inode(hdr); } return 0; } @@ -6893,9 +6897,13 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, if (status == 0) { clp->cl_clientid = res.clientid; - clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R); - if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) + clp->cl_exchange_flags = res.flags; + /* Client ID is not confirmed */ + if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) { + clear_bit(NFS4_SESSION_ESTABLISHED, + &clp->cl_session->session_state); clp->cl_seqid = res.seqid; + } kfree(clp->cl_serverowner); clp->cl_serverowner = res.server_owner; @@ -7227,6 +7235,9 @@ static void nfs4_update_session(struct nfs4_session *session, struct nfs41_create_session_res *res) { nfs4_copy_sessionid(&session->sess_id, &res->sessionid); + /* Mark client id and session as being confirmed */ + session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; + set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state); session->flags = res->flags; memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); if (res->flags & SESSION4_BACK_CHAN) @@ -7322,8 +7333,8 @@ int nfs4_proc_destroy_session(struct nfs4_session *session, dprintk("--> nfs4_proc_destroy_session\n"); /* session is still being setup */ - if (session->clp->cl_cons_state != NFS_CS_READY) - return status; + if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state)) + return 0; status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); trace_nfs4_destroy_session(session->clp, status); diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index fc46c745589863..e3ea2c5324d68e 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h @@ -70,6 +70,7 @@ struct nfs4_session { enum nfs4_session_state { NFS4_SESSION_INITING, + NFS4_SESSION_ESTABLISHED, }; extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 5ad908e9ce9c33..f95e3b58bbc304 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -346,9 +346,23 @@ int nfs41_discover_server_trunking(struct nfs_client *clp, status = nfs4_proc_exchange_id(clp, cred); if (status != NFS4_OK) return status; - set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); - return nfs41_walk_client_list(clp, result, cred); + status = nfs41_walk_client_list(clp, result, cred); + if (status < 0) + return status; + if (clp != *result) + return 0; + + /* Purge state if the client id was established in a prior instance */ + if (clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R) + set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state); + else + set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); + nfs4_schedule_state_manager(clp); + status = nfs_wait_client_init_complete(clp); + if (status < 0) + nfs_put_client(clp); + return status; } #endif /* CONFIG_NFS_V4_1 */ diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index b09cc23d6f433b..c63189acd0523c 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -139,7 +139,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, nfs_fattr_init(fattr); status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0); if (status == 0) - nfs_setattr_update_inode(inode, sattr); + nfs_setattr_update_inode(inode, sattr, fattr); dprintk("NFS reply setattr: %d\n", status); return status; } @@ -609,10 +609,8 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task, static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) { - struct inode *inode = hdr->inode; - if (task->tk_status >= 0) - nfs_post_op_update_inode_force_wcc(inode, hdr->res.fattr); + nfs_writeback_update_inode(hdr); return 0; } diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 595d81e354d189..849ed784d6ac1f 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1377,6 +1377,36 @@ static int nfs_should_remove_suid(const struct inode *inode) return 0; } +static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, + struct nfs_fattr *fattr) +{ + struct nfs_pgio_args *argp = &hdr->args; + struct nfs_pgio_res *resp = &hdr->res; + + if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) + return; + if (argp->offset + resp->count != fattr->size) + return; + if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) + return; + /* Set attribute barrier */ + nfs_fattr_set_barrier(fattr); +} + +void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) +{ + struct nfs_fattr *fattr = hdr->res.fattr; + struct inode *inode = hdr->inode; + + if (fattr == NULL) + return; + spin_lock(&inode->i_lock); + nfs_writeback_check_extend(hdr, fattr); + nfs_post_op_update_inode_force_wcc_locked(inode, fattr); + spin_unlock(&inode->i_lock); +} +EXPORT_SYMBOL_GPL(nfs_writeback_update_inode); + /* * This function is called when the WRITE call is complete. */ diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index f551a9299ac987..306178d7309f19 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -126,6 +126,8 @@ struct cpuidle_driver { #ifdef CONFIG_CPU_IDLE extern void disable_cpuidle(void); +extern bool cpuidle_not_available(struct cpuidle_driver *drv, + struct cpuidle_device *dev); extern int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev); @@ -150,11 +152,17 @@ extern void cpuidle_resume(void); extern int cpuidle_enable_device(struct cpuidle_device *dev); extern void cpuidle_disable_device(struct cpuidle_device *dev); extern int cpuidle_play_dead(void); -extern void cpuidle_enter_freeze(void); +extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev); +extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, + struct cpuidle_device *dev); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); #else static inline void disable_cpuidle(void) { } +static inline bool cpuidle_not_available(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{return true; } static inline int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) {return -ENODEV; } @@ -183,7 +191,12 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev) {return -ENODEV; } static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } static inline int cpuidle_play_dead(void) {return -ENODEV; } -static inline void cpuidle_enter_freeze(void) { } +static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{return -ENODEV; } +static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, + struct cpuidle_device *dev) +{return -ENODEV; } static inline struct cpuidle_driver *cpuidle_get_cpu_driver( struct cpuidle_device *dev) {return NULL; } #endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index d9b05b5bf8c795..2e88580194f023 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -52,11 +52,17 @@ * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. * Used by threaded interrupts which need to keep the * irq line disabled until the threaded handler has been run. - * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend + * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee + * that this interrupt will wake the system from a suspended + * state. See Documentation/power/suspend-and-interrupts.txt * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set * IRQF_NO_THREAD - Interrupt cannot be threaded * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device * resume time. + * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this + * interrupt handler after suspending interrupts. For system + * wakeup devices users need to implement wakeup detection in + * their interrupt handlers. */ #define IRQF_DISABLED 0x00000020 #define IRQF_SHARED 0x00000080 @@ -70,6 +76,7 @@ #define IRQF_FORCE_RESUME 0x00008000 #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 +#define IRQF_COND_SUSPEND 0x00040000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index faf433af425e41..dd1109fb241e42 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -78,6 +78,7 @@ struct irq_desc { #ifdef CONFIG_PM_SLEEP unsigned int nr_actions; unsigned int no_suspend_depth; + unsigned int cond_suspend_depth; unsigned int force_resume_depth; #endif #ifdef CONFIG_PROC_FS diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 2f77e0c651c898..b01ccf371fdcaf 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -343,6 +343,7 @@ extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr); +extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr); extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *); extern void nfs_access_set_mask(struct nfs_access_entry *, u32); @@ -355,8 +356,9 @@ extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode); extern int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode); extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *); extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); +extern int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping); extern int nfs_setattr(struct dentry *, struct iattr *); -extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); +extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, struct nfs4_label *label); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); @@ -369,6 +371,7 @@ extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ct extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); extern u64 nfs_compat_user_ino64(u64 fileid); extern void nfs_fattr_init(struct nfs_fattr *fattr); +extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr); extern unsigned long nfs_inc_attr_generation_counter(void); extern struct nfs_fattr *nfs_alloc_fattr(void); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 196a06fbc122fe..886d09e691d5a8 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1474,8 +1474,13 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, * otherwise we'll have trouble later trying to figure out * which interrupt is which (messes up the interrupt freeing * logic etc). + * + * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and + * it cannot be set along with IRQF_NO_SUSPEND. */ - if ((irqflags & IRQF_SHARED) && !dev_id) + if (((irqflags & IRQF_SHARED) && !dev_id) || + (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || + ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) return -EINVAL; desc = irq_to_desc(irq); diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 3ca53259270455..5204a6d1b9854f 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -43,9 +43,12 @@ void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) if (action->flags & IRQF_NO_SUSPEND) desc->no_suspend_depth++; + else if (action->flags & IRQF_COND_SUSPEND) + desc->cond_suspend_depth++; WARN_ON_ONCE(desc->no_suspend_depth && - desc->no_suspend_depth != desc->nr_actions); + (desc->no_suspend_depth + + desc->cond_suspend_depth) != desc->nr_actions); } /* @@ -61,6 +64,8 @@ void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) if (action->flags & IRQF_NO_SUSPEND) desc->no_suspend_depth--; + else if (action->flags & IRQF_COND_SUSPEND) + desc->cond_suspend_depth--; } static bool suspend_device_irq(struct irq_desc *desc, int irq) diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 782172f073c5ed..01ca08804f5115 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -248,11 +248,12 @@ static int klp_find_external_symbol(struct module *pmod, const char *name, /* first, check if it's an exported symbol */ preempt_disable(); sym = find_symbol(name, NULL, NULL, true, true); - preempt_enable(); if (sym) { *addr = sym->value; + preempt_enable(); return 0; } + preempt_enable(); /* otherwise check if it's in another .o within the patch module */ return klp_find_object_symbol(pmod->name, name, addr); diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 94b2d7b88a2728..80014a17834214 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -82,6 +82,7 @@ static void cpuidle_idle_call(void) struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); int next_state, entered_state; unsigned int broadcast; + bool reflect; /* * Check if the idle task must be rescheduled. If it is the @@ -105,6 +106,9 @@ static void cpuidle_idle_call(void) */ rcu_idle_enter(); + if (cpuidle_not_available(drv, dev)) + goto use_default; + /* * Suspend-to-idle ("freeze") is a system state in which all user space * has been frozen, all I/O devices have been suspended and the only @@ -115,30 +119,24 @@ static void cpuidle_idle_call(void) * until a proper wakeup interrupt happens. */ if (idle_should_freeze()) { - cpuidle_enter_freeze(); - local_irq_enable(); - goto exit_idle; - } + entered_state = cpuidle_enter_freeze(drv, dev); + if (entered_state >= 0) { + local_irq_enable(); + goto exit_idle; + } - /* - * Ask the cpuidle framework to choose a convenient idle state. - * Fall back to the default arch idle method on errors. - */ - next_state = cpuidle_select(drv, dev); - if (next_state < 0) { -use_default: + reflect = false; + next_state = cpuidle_find_deepest_state(drv, dev); + } else { + reflect = true; /* - * We can't use the cpuidle framework, let's use the default - * idle routine. + * Ask the cpuidle framework to choose a convenient idle state. */ - if (current_clr_polling_and_test()) - local_irq_enable(); - else - arch_cpu_idle(); - - goto exit_idle; + next_state = cpuidle_select(drv, dev); } - + /* Fall back to the default arch idle method on errors. */ + if (next_state < 0) + goto use_default; /* * The idle task must be scheduled, it is pointless to @@ -183,7 +181,8 @@ static void cpuidle_idle_call(void) /* * Give the governor an opportunity to reflect on the outcome */ - cpuidle_reflect(dev, entered_state); + if (reflect) + cpuidle_reflect(dev, entered_state); exit_idle: __current_set_polling(); @@ -196,6 +195,19 @@ static void cpuidle_idle_call(void) rcu_idle_exit(); start_critical_timings(); + return; + +use_default: + /* + * We can't use the cpuidle framework, let's use the default + * idle routine. + */ + if (current_clr_polling_and_test()) + local_irq_enable(); + else + arch_cpu_idle(); + + goto exit_idle; } /* diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 7e9acd9361c55b..91ffde82fa0c49 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -738,8 +738,9 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) struct rpc_xprt *xprt = rep->rr_xprt; struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); __be32 *iptr; - int credits, rdmalen, status; + int rdmalen, status; unsigned long cwnd; + u32 credits; /* Check status. If bad, signal disconnect and return rep to pool */ if (rep->rr_len == ~0U) { diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index d1b70397c60f0d..0a16fb6f088590 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -285,7 +285,7 @@ rpcr_to_rdmar(struct rpc_rqst *rqst) */ struct rpcrdma_buffer { spinlock_t rb_lock; /* protects indexes */ - int rb_max_requests;/* client max requests */ + u32 rb_max_requests;/* client max requests */ struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */ struct list_head rb_all; int rb_send_index; diff --git a/sound/drivers/opl3/opl3_midi.c b/sound/drivers/opl3/opl3_midi.c index f62780ed64adcc..7821b07415a785 100644 --- a/sound/drivers/opl3/opl3_midi.c +++ b/sound/drivers/opl3/opl3_midi.c @@ -105,6 +105,8 @@ static void snd_opl3_calc_pitch(unsigned char *fnum, unsigned char *blocknum, int pitchbend = chan->midi_pitchbend; int segment; + if (pitchbend < -0x2000) + pitchbend = -0x2000; if (pitchbend > 0x1FFF) pitchbend = 0x1FFF; diff --git a/sound/firewire/dice/dice-interface.h b/sound/firewire/dice/dice-interface.h index 27b044f84c816f..de7602bd69b569 100644 --- a/sound/firewire/dice/dice-interface.h +++ b/sound/firewire/dice/dice-interface.h @@ -298,24 +298,24 @@ */ #define RX_ISOCHRONOUS 0x008 -/* - * Index of first quadlet to be interpreted; read/write. If > 0, that many - * quadlets at the beginning of each data block will be ignored, and all the - * audio and MIDI quadlets will follow. - */ -#define RX_SEQ_START 0x00c - /* * The number of audio channels; read-only. There will be one quadlet per * channel. */ -#define RX_NUMBER_AUDIO 0x010 +#define RX_NUMBER_AUDIO 0x00c /* * The number of MIDI ports, 0-8; read-only. If > 0, there will be one * additional quadlet in each data block, following the audio quadlets. */ -#define RX_NUMBER_MIDI 0x014 +#define RX_NUMBER_MIDI 0x010 + +/* + * Index of first quadlet to be interpreted; read/write. If > 0, that many + * quadlets at the beginning of each data block will be ignored, and all the + * audio and MIDI quadlets will follow. + */ +#define RX_SEQ_START 0x014 /* * Names of all audio channels; read-only. Quadlets are byte-swapped. Names diff --git a/sound/firewire/dice/dice-proc.c b/sound/firewire/dice/dice-proc.c index f5c1d1bced59fe..ecfe20fd4de578 100644 --- a/sound/firewire/dice/dice-proc.c +++ b/sound/firewire/dice/dice-proc.c @@ -99,9 +99,9 @@ static void dice_proc_read(struct snd_info_entry *entry, } tx; struct { u32 iso; - u32 seq_start; u32 number_audio; u32 number_midi; + u32 seq_start; char names[RX_NAMES_SIZE]; u32 ac3_caps; u32 ac3_enable; @@ -204,10 +204,10 @@ static void dice_proc_read(struct snd_info_entry *entry, break; snd_iprintf(buffer, "rx %u:\n", stream); snd_iprintf(buffer, " iso channel: %d\n", (int)buf.rx.iso); - snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start); snd_iprintf(buffer, " audio channels: %u\n", buf.rx.number_audio); snd_iprintf(buffer, " midi ports: %u\n", buf.rx.number_midi); + snd_iprintf(buffer, " sequence start: %u\n", buf.rx.seq_start); if (quadlets >= 68) { dice_proc_fixup_string(buf.rx.names, RX_NAMES_SIZE); snd_iprintf(buffer, " names: %s\n", buf.rx.names); diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c index 29ccb3637164f8..e6757cd8572422 100644 --- a/sound/firewire/oxfw/oxfw-stream.c +++ b/sound/firewire/oxfw/oxfw-stream.c @@ -171,9 +171,10 @@ static int start_stream(struct snd_oxfw *oxfw, struct amdtp_stream *stream, } /* Wait first packet */ - err = amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT); - if (err < 0) + if (!amdtp_stream_wait_callback(stream, CALLBACK_TIMEOUT)) { stop_stream(oxfw, stream); + err = -ETIMEDOUT; + } end: return err; } diff --git a/sound/isa/msnd/msnd_pinnacle_mixer.c b/sound/isa/msnd/msnd_pinnacle_mixer.c index 17e49a071af449..b408540798c164 100644 --- a/sound/isa/msnd/msnd_pinnacle_mixer.c +++ b/sound/isa/msnd/msnd_pinnacle_mixer.c @@ -306,11 +306,12 @@ int snd_msndmix_new(struct snd_card *card) spin_lock_init(&chip->mixer_lock); strcpy(card->mixername, "MSND Pinnacle Mixer"); - for (idx = 0; idx < ARRAY_SIZE(snd_msnd_controls); idx++) + for (idx = 0; idx < ARRAY_SIZE(snd_msnd_controls); idx++) { err = snd_ctl_add(card, snd_ctl_new1(snd_msnd_controls + idx, chip)); if (err < 0) return err; + } return 0; } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b2b24a8b3dac8c..526398a4a4428d 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5209,6 +5209,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x17, 0x40000000}, {0x1d, 0x40700001}, {0x21, 0x02211040}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC255_STANDARD_PINS, + {0x12, 0x90a60170}, + {0x14, 0x90170140}, + {0x17, 0x40000000}, + {0x1d, 0x40700001}, + {0x21, 0x02211050}), SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, {0x12, 0x90a60130}, {0x13, 0x40000000}, diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c index f5ad214663f98b..8de836165cf2ed 100644 --- a/sound/soc/atmel/sam9g20_wm8731.c +++ b/sound/soc/atmel/sam9g20_wm8731.c @@ -46,8 +46,6 @@ #include #include -#include - #include "../codecs/wm8731.h" #include "atmel-pcm.h" #include "atmel_ssc_dai.h" @@ -171,9 +169,7 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev) int ret; if (!np) { - if (!(machine_is_at91sam9g20ek() || - machine_is_at91sam9g20ek_2mmc())) - return -ENODEV; + return -ENODEV; } ret = atmel_ssc_set_audio(0); @@ -210,39 +206,37 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev) card->dev = &pdev->dev; /* Parse device node info */ - if (np) { - ret = snd_soc_of_parse_card_name(card, "atmel,model"); - if (ret) - goto err; - - ret = snd_soc_of_parse_audio_routing(card, - "atmel,audio-routing"); - if (ret) - goto err; - - /* Parse codec info */ - at91sam9g20ek_dai.codec_name = NULL; - codec_np = of_parse_phandle(np, "atmel,audio-codec", 0); - if (!codec_np) { - dev_err(&pdev->dev, "codec info missing\n"); - return -EINVAL; - } - at91sam9g20ek_dai.codec_of_node = codec_np; - - /* Parse dai and platform info */ - at91sam9g20ek_dai.cpu_dai_name = NULL; - at91sam9g20ek_dai.platform_name = NULL; - cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0); - if (!cpu_np) { - dev_err(&pdev->dev, "dai and pcm info missing\n"); - return -EINVAL; - } - at91sam9g20ek_dai.cpu_of_node = cpu_np; - at91sam9g20ek_dai.platform_of_node = cpu_np; - - of_node_put(codec_np); - of_node_put(cpu_np); + ret = snd_soc_of_parse_card_name(card, "atmel,model"); + if (ret) + goto err; + + ret = snd_soc_of_parse_audio_routing(card, + "atmel,audio-routing"); + if (ret) + goto err; + + /* Parse codec info */ + at91sam9g20ek_dai.codec_name = NULL; + codec_np = of_parse_phandle(np, "atmel,audio-codec", 0); + if (!codec_np) { + dev_err(&pdev->dev, "codec info missing\n"); + return -EINVAL; + } + at91sam9g20ek_dai.codec_of_node = codec_np; + + /* Parse dai and platform info */ + at91sam9g20ek_dai.cpu_dai_name = NULL; + at91sam9g20ek_dai.platform_name = NULL; + cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0); + if (!cpu_np) { + dev_err(&pdev->dev, "dai and pcm info missing\n"); + return -EINVAL; } + at91sam9g20ek_dai.cpu_of_node = cpu_np; + at91sam9g20ek_dai.platform_of_node = cpu_np; + + of_node_put(codec_np); + of_node_put(cpu_np); ret = snd_soc_register_card(card); if (ret) { diff --git a/sound/soc/cirrus/Kconfig b/sound/soc/cirrus/Kconfig index 7b7fbcd49e5e42..c7cd60f009e93e 100644 --- a/sound/soc/cirrus/Kconfig +++ b/sound/soc/cirrus/Kconfig @@ -16,7 +16,7 @@ config SND_EP93XX_SOC_AC97 config SND_EP93XX_SOC_SNAPPERCL15 tristate "SoC Audio support for Bluewater Systems Snapper CL15 module" - depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15 + depends on SND_EP93XX_SOC && MACH_SNAPPER_CL15 && I2C select SND_EP93XX_SOC_I2S select SND_SOC_TLV320AIC23_I2C help diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 064e6c18e10923..ea9f0e31f9d40d 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -69,7 +69,7 @@ config SND_SOC_ALL_CODECS select SND_SOC_MAX98088 if I2C select SND_SOC_MAX98090 if I2C select SND_SOC_MAX98095 if I2C - select SND_SOC_MAX98357A + select SND_SOC_MAX98357A if GPIOLIB select SND_SOC_MAX9850 if I2C select SND_SOC_MAX9768 if I2C select SND_SOC_MAX9877 if I2C diff --git a/sound/soc/codecs/max98357a.c b/sound/soc/codecs/max98357a.c index 1806333ea29e5a..e9e6efbc21dd52 100644 --- a/sound/soc/codecs/max98357a.c +++ b/sound/soc/codecs/max98357a.c @@ -12,9 +12,19 @@ * max98357a.c -- MAX98357A ALSA SoC Codec driver */ -#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #define DRV_NAME "max98357a" diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c index e1a4a45c57e229..fd102613d20d87 100644 --- a/sound/soc/codecs/rt5670.c +++ b/sound/soc/codecs/rt5670.c @@ -225,7 +225,6 @@ static bool rt5670_volatile_register(struct device *dev, unsigned int reg) case RT5670_ADC_EQ_CTRL1: case RT5670_EQ_CTRL1: case RT5670_ALC_CTRL_1: - case RT5670_IRQ_CTRL1: case RT5670_IRQ_CTRL2: case RT5670_INT_IRQ_ST: case RT5670_IL_CMD: @@ -2703,6 +2702,12 @@ static int rt5670_i2c_probe(struct i2c_client *i2c, regmap_write(rt5670->regmap, RT5670_RESET, 0); + regmap_read(rt5670->regmap, RT5670_VENDOR_ID, &val); + if (val >= 4) + regmap_write(rt5670->regmap, RT5670_GPIO_CTRL3, 0x0980); + else + regmap_write(rt5670->regmap, RT5670_GPIO_CTRL3, 0x0d00); + ret = regmap_register_patch(rt5670->regmap, init_list, ARRAY_SIZE(init_list)); if (ret != 0) diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index 5d0bb8748dd1df..fb9c20eace3fbe 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c @@ -3284,8 +3284,8 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = { { "IB45 Bypass Mux", "Bypass", "IB45 Mux" }, { "IB45 Bypass Mux", "Pass SRC", "IB45 Mux" }, - { "IB6 Mux", "IF1 DAC 6", "IF1 DAC6" }, - { "IB6 Mux", "IF2 DAC 6", "IF2 DAC6" }, + { "IB6 Mux", "IF1 DAC 6", "IF1 DAC6 Mux" }, + { "IB6 Mux", "IF2 DAC 6", "IF2 DAC6 Mux" }, { "IB6 Mux", "SLB DAC 6", "SLB DAC6" }, { "IB6 Mux", "STO4 ADC MIX L", "Stereo4 ADC MIXL" }, { "IB6 Mux", "IF4 DAC L", "IF4 DAC L" }, @@ -3293,8 +3293,8 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = { { "IB6 Mux", "STO2 ADC MIX L", "Stereo2 ADC MIXL" }, { "IB6 Mux", "STO3 ADC MIX L", "Stereo3 ADC MIXL" }, - { "IB7 Mux", "IF1 DAC 7", "IF1 DAC7" }, - { "IB7 Mux", "IF2 DAC 7", "IF2 DAC7" }, + { "IB7 Mux", "IF1 DAC 7", "IF1 DAC7 Mux" }, + { "IB7 Mux", "IF2 DAC 7", "IF2 DAC7 Mux" }, { "IB7 Mux", "SLB DAC 7", "SLB DAC7" }, { "IB7 Mux", "STO4 ADC MIX R", "Stereo4 ADC MIXR" }, { "IB7 Mux", "IF4 DAC R", "IF4 DAC R" }, @@ -3635,15 +3635,15 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = { { "DAC1 FS", NULL, "DAC1 MIXL" }, { "DAC1 FS", NULL, "DAC1 MIXR" }, - { "DAC2 L Mux", "IF1 DAC 2", "IF1 DAC2" }, - { "DAC2 L Mux", "IF2 DAC 2", "IF2 DAC2" }, + { "DAC2 L Mux", "IF1 DAC 2", "IF1 DAC2 Mux" }, + { "DAC2 L Mux", "IF2 DAC 2", "IF2 DAC2 Mux" }, { "DAC2 L Mux", "IF3 DAC L", "IF3 DAC L" }, { "DAC2 L Mux", "IF4 DAC L", "IF4 DAC L" }, { "DAC2 L Mux", "SLB DAC 2", "SLB DAC2" }, { "DAC2 L Mux", "OB 2", "OutBound2" }, - { "DAC2 R Mux", "IF1 DAC 3", "IF1 DAC3" }, - { "DAC2 R Mux", "IF2 DAC 3", "IF2 DAC3" }, + { "DAC2 R Mux", "IF1 DAC 3", "IF1 DAC3 Mux" }, + { "DAC2 R Mux", "IF2 DAC 3", "IF2 DAC3 Mux" }, { "DAC2 R Mux", "IF3 DAC R", "IF3 DAC R" }, { "DAC2 R Mux", "IF4 DAC R", "IF4 DAC R" }, { "DAC2 R Mux", "SLB DAC 3", "SLB DAC3" }, @@ -3651,29 +3651,29 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = { { "DAC2 R Mux", "Haptic Generator", "Haptic Generator" }, { "DAC2 R Mux", "VAD ADC", "VAD ADC Mux" }, - { "DAC3 L Mux", "IF1 DAC 4", "IF1 DAC4" }, - { "DAC3 L Mux", "IF2 DAC 4", "IF2 DAC4" }, + { "DAC3 L Mux", "IF1 DAC 4", "IF1 DAC4 Mux" }, + { "DAC3 L Mux", "IF2 DAC 4", "IF2 DAC4 Mux" }, { "DAC3 L Mux", "IF3 DAC L", "IF3 DAC L" }, { "DAC3 L Mux", "IF4 DAC L", "IF4 DAC L" }, { "DAC3 L Mux", "SLB DAC 4", "SLB DAC4" }, { "DAC3 L Mux", "OB 4", "OutBound4" }, - { "DAC3 R Mux", "IF1 DAC 5", "IF1 DAC4" }, - { "DAC3 R Mux", "IF2 DAC 5", "IF2 DAC4" }, + { "DAC3 R Mux", "IF1 DAC 5", "IF1 DAC5 Mux" }, + { "DAC3 R Mux", "IF2 DAC 5", "IF2 DAC5 Mux" }, { "DAC3 R Mux", "IF3 DAC R", "IF3 DAC R" }, { "DAC3 R Mux", "IF4 DAC R", "IF4 DAC R" }, { "DAC3 R Mux", "SLB DAC 5", "SLB DAC5" }, { "DAC3 R Mux", "OB 5", "OutBound5" }, - { "DAC4 L Mux", "IF1 DAC 6", "IF1 DAC6" }, - { "DAC4 L Mux", "IF2 DAC 6", "IF2 DAC6" }, + { "DAC4 L Mux", "IF1 DAC 6", "IF1 DAC6 Mux" }, + { "DAC4 L Mux", "IF2 DAC 6", "IF2 DAC6 Mux" }, { "DAC4 L Mux", "IF3 DAC L", "IF3 DAC L" }, { "DAC4 L Mux", "IF4 DAC L", "IF4 DAC L" }, { "DAC4 L Mux", "SLB DAC 6", "SLB DAC6" }, { "DAC4 L Mux", "OB 6", "OutBound6" }, - { "DAC4 R Mux", "IF1 DAC 7", "IF1 DAC7" }, - { "DAC4 R Mux", "IF2 DAC 7", "IF2 DAC7" }, + { "DAC4 R Mux", "IF1 DAC 7", "IF1 DAC7 Mux" }, + { "DAC4 R Mux", "IF2 DAC 7", "IF2 DAC7 Mux" }, { "DAC4 R Mux", "IF3 DAC R", "IF3 DAC R" }, { "DAC4 R Mux", "IF4 DAC R", "IF4 DAC R" }, { "DAC4 R Mux", "SLB DAC 7", "SLB DAC7" }, diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c index 3a1343fa109b48..007a0e3bc2735c 100644 --- a/sound/soc/codecs/sta32x.c +++ b/sound/soc/codecs/sta32x.c @@ -106,13 +106,11 @@ static const struct reg_default sta32x_regs[] = { }; static const struct regmap_range sta32x_write_regs_range[] = { - regmap_reg_range(STA32X_CONFA, STA32X_AUTO2), - regmap_reg_range(STA32X_C1CFG, STA32X_FDRC2), + regmap_reg_range(STA32X_CONFA, STA32X_FDRC2), }; static const struct regmap_range sta32x_read_regs_range[] = { - regmap_reg_range(STA32X_CONFA, STA32X_AUTO2), - regmap_reg_range(STA32X_C1CFG, STA32X_FDRC2), + regmap_reg_range(STA32X_CONFA, STA32X_FDRC2), }; static const struct regmap_range sta32x_volatile_regs_range[] = { diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 2595611e8a6ded..b9fabbf69db6c9 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -603,10 +603,6 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, factor = (div2 + 1) * (7 * psr + 1) * 2; for (i = 0; i < 255; i++) { - /* The bclk rate must be smaller than 1/5 sysclk rate */ - if (factor * (i + 1) < 5) - continue; - tmprate = freq * factor * (i + 2); if (baudclk_is_used) @@ -614,6 +610,13 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream, else clkrate = clk_round_rate(ssi_private->baudclk, tmprate); + /* + * Hardware limitation: The bclk rate must be + * never greater than 1/5 IPG clock rate + */ + if (clkrate * 5 > clk_get_rate(ssi_private->clk)) + continue; + clkrate /= factor; afreq = clkrate / (i + 1); diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index f7c6734bd5daee..fb550b5869d21f 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -372,6 +372,11 @@ static int asoc_simple_card_dai_link_of(struct device_node *node, strlen(dai_link->cpu_dai_name) + strlen(dai_link->codec_dai_name) + 2, GFP_KERNEL); + if (!name) { + ret = -ENOMEM; + goto dai_link_of_err; + } + sprintf(name, "%s-%s", dai_link->cpu_dai_name, dai_link->codec_dai_name); dai_link->name = dai_link->stream_name = name; diff --git a/sound/soc/intel/sst-atom-controls.h b/sound/soc/intel/sst-atom-controls.h index dfebfdd5eb2aaa..daecc58f28afaf 100644 --- a/sound/soc/intel/sst-atom-controls.h +++ b/sound/soc/intel/sst-atom-controls.h @@ -150,7 +150,7 @@ enum sst_cmd_type { enum sst_task { SST_TASK_SBA = 1, - SST_TASK_MMX, + SST_TASK_MMX = 3, }; enum sst_type { diff --git a/sound/soc/intel/sst/sst.c b/sound/soc/intel/sst/sst.c index 8a8d56a146e75a..11c578651c1c8c 100644 --- a/sound/soc/intel/sst/sst.c +++ b/sound/soc/intel/sst/sst.c @@ -350,7 +350,9 @@ static inline void sst_save_shim64(struct intel_sst_drv *ctx, spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags); - shim_regs->imrx = sst_shim_read64(shim, SST_IMRX), + shim_regs->imrx = sst_shim_read64(shim, SST_IMRX); + shim_regs->csr = sst_shim_read64(shim, SST_CSR); + spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags); } @@ -367,6 +369,7 @@ static inline void sst_restore_shim64(struct intel_sst_drv *ctx, */ spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags); sst_shim_write64(shim, SST_IMRX, shim_regs->imrx), + sst_shim_write64(shim, SST_CSR, shim_regs->csr), spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags); } @@ -379,6 +382,10 @@ void sst_configure_runtime_pm(struct intel_sst_drv *ctx) * initially active. So change the state to active before * enabling the pm */ + + if (!acpi_disabled) + pm_runtime_set_active(ctx->dev); + pm_runtime_enable(ctx->dev); if (acpi_disabled) @@ -409,6 +416,7 @@ static int intel_sst_runtime_suspend(struct device *dev) synchronize_irq(ctx->irq_num); flush_workqueue(ctx->post_msg_wq); + ctx->ops->reset(ctx); /* save the shim registers because PMC doesn't save state */ sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64); diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c index ccfb41c22e53b1..f7eb42aa3f3893 100644 --- a/sound/soc/omap/omap-hdmi-audio.c +++ b/sound/soc/omap/omap-hdmi-audio.c @@ -352,6 +352,9 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev) return ret; card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); + if (!card) + return -ENOMEM; + card->name = devm_kasprintf(dev, GFP_KERNEL, "HDMI %s", dev_name(ad->dssdev)); card->owner = THIS_MODULE; diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index c7eb9dd67f608c..fd99d89de6a854 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c @@ -530,8 +530,19 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai, case OMAP_MCBSP_SYSCLK_CLKX_EXT: regs->srgr2 |= CLKSM; + regs->pcr0 |= SCLKME; + /* + * If McBSP is master but yet the CLKX/CLKR pin drives the SRG, + * disable output on those pins. This enables to inject the + * reference clock through CLKX/CLKR. For this to work + * set_dai_sysclk() _needs_ to be called after set_dai_fmt(). + */ + regs->pcr0 &= ~CLKXM; + break; case OMAP_MCBSP_SYSCLK_CLKR_EXT: regs->pcr0 |= SCLKME; + /* Disable ouput on CLKR pin in master mode */ + regs->pcr0 &= ~CLKRM; break; default: err = -ENODEV; diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c index f4b05bc23e4bfb..1343ecbf0bd5ee 100644 --- a/sound/soc/omap/omap-pcm.c +++ b/sound/soc/omap/omap-pcm.c @@ -201,7 +201,7 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd) struct snd_pcm *pcm = rtd->pcm; int ret; - ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64)); + ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32)); if (ret) return ret; diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig index 3cebf6ca03dfb9..0632a36852c847 100644 --- a/sound/soc/samsung/Kconfig +++ b/sound/soc/samsung/Kconfig @@ -174,7 +174,7 @@ config SND_SOC_SMDK_WM8994_PCM config SND_SOC_SPEYSIDE tristate "Audio support for Wolfson Speyside" - depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 + depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && I2C && SPI_MASTER select SND_SAMSUNG_I2S select SND_SOC_WM8996 select SND_SOC_WM9081 @@ -189,7 +189,7 @@ config SND_SOC_TOBERMORY config SND_SOC_BELLS tristate "Audio support for Wolfson Bells" - depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && MFD_ARIZONA + depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && MFD_ARIZONA && I2C && SPI_MASTER select SND_SAMSUNG_I2S select SND_SOC_WM5102 select SND_SOC_WM5110 @@ -206,7 +206,7 @@ config SND_SOC_LOWLAND config SND_SOC_LITTLEMILL tristate "Audio support for Wolfson Littlemill" - depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 + depends on SND_SOC_SAMSUNG && MACH_WLF_CRAGG_6410 && I2C select SND_SAMSUNG_I2S select MFD_WM8994 select SND_SOC_WM8994 @@ -223,7 +223,7 @@ config SND_SOC_SNOW config SND_SOC_ODROIDX2 tristate "Audio support for Odroid-X2 and Odroid-U3" - depends on SND_SOC_SAMSUNG + depends on SND_SOC_SAMSUNG && I2C select SND_SOC_MAX98090 select SND_SAMSUNG_I2S help @@ -231,6 +231,6 @@ config SND_SOC_ODROIDX2 config SND_SOC_ARNDALE_RT5631_ALC5631 tristate "Audio support for RT5631(ALC5631) on Arndale Board" - depends on SND_SOC_SAMSUNG + depends on SND_SOC_SAMSUNG && I2C select SND_SAMSUNG_I2S select SND_SOC_RT5631 diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 1b53605f715439..110577c523179c 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -1252,6 +1252,8 @@ static int rsnd_probe(struct platform_device *pdev) goto exit_snd_probe; } + dev_set_drvdata(dev, priv); + /* * asoc register */ @@ -1268,8 +1270,6 @@ static int rsnd_probe(struct platform_device *pdev) goto exit_snd_soc; } - dev_set_drvdata(dev, priv); - pm_runtime_enable(dev); dev_info(dev, "probed\n"); diff --git a/sound/usb/line6/playback.c b/sound/usb/line6/playback.c index 05dee690f48761..97ed593f6010f6 100644 --- a/sound/usb/line6/playback.c +++ b/sound/usb/line6/playback.c @@ -39,7 +39,7 @@ static void change_volume(struct urb *urb_out, int volume[], for (; p < buf_end; ++p) { short pv = le16_to_cpu(*p); int val = (pv * volume[chn & 1]) >> 8; - pv = clamp(val, 0x7fff, -0x8000); + pv = clamp(val, -0x8000, 0x7fff); *p = cpu_to_le16(pv); ++chn; } @@ -54,7 +54,7 @@ static void change_volume(struct urb *urb_out, int volume[], val = p[0] + (p[1] << 8) + ((signed char)p[2] << 16); val = (val * volume[chn & 1]) >> 8; - val = clamp(val, 0x7fffff, -0x800000); + val = clamp(val, -0x800000, 0x7fffff); p[0] = val; p[1] = val >> 8; p[2] = val >> 16; @@ -126,7 +126,7 @@ static void add_monitor_signal(struct urb *urb_out, unsigned char *signal, short pov = le16_to_cpu(*po); short piv = le16_to_cpu(*pi); int val = pov + ((piv * volume) >> 8); - pov = clamp(val, 0x7fff, -0x8000); + pov = clamp(val, -0x8000, 0x7fff); *po = cpu_to_le16(pov); } }