F: drivers/net/ethernet/dlink/sundance.c
SUPERH
+M: Yoshinori Sato <ysato@users.sourceforge.jp>
+M: Rich Felker <dalias@libc.org>
L: linux-sh@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-sh/list/
-S: Orphan
+S: Maintained
F: Documentation/sh/
F: arch/sh/
F: drivers/sh/
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 1
+SUBLEVEL = 3
EXTRAVERSION =
NAME = Blurry Fish Butt
gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
};
- reg_usb2_1_vbus: v5-vbus1 {
- compatible = "regulator-fixed";
- regulator-name = "v5.0-vbus1";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- enable-active-high;
- regulator-always-on;
- gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
- };
-
reg_sata0: pwr-sata0 {
compatible = "regulator-fixed";
regulator-name = "pwr_en_sata0";
macb0: ethernet@f8020000 {
phy-mode = "rmii";
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
phy0: ethernet-phy@1 {
interrupt-parent = <&pioE>;
- interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
reg = <1>;
};
};
atmel,pins =
<AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
};
+ pinctrl_macb0_phy_irq: macb0_phy_irq_0 {
+ atmel,pins =
+ <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+ };
};
};
};
};
macb0: ethernet@f8020000 {
+ pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
phy-mode = "rmii";
status = "okay";
+
+ ethernet-phy@1 {
+ reg = <0x1>;
+ interrupt-parent = <&pioE>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ };
};
mmc1: mmc@fc000000 {
pinctrl@fc06a000 {
board {
+ pinctrl_macb0_phy_irq: macb0_phy_irq {
+ atmel,pins =
+ <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+ };
pinctrl_mmc0_cd: mmc0_cd {
atmel,pins =
<AT91_PIOE 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
interrupt-parent = <&gpio5>;
interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */
ref-clock-frequency = <26000000>;
+ tcxo-clock-frequency = <26000000>;
};
};
};
};
+&gpio8 {
+ /* TI trees use GPIO instead of msecure, see also muxing */
+ p234 {
+ gpio-hog;
+ gpios = <10 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "gpio8_234/msecure";
+ };
+};
+
&omap5_pmx_core {
pinctrl-names = "default";
pinctrl-0 = <
>;
};
+ /* TI trees use GPIO mode; msecure mode does not work reliably? */
+ palmas_msecure_pins: palmas_msecure_pins {
+ pinctrl-single,pins = <
+ OMAP5_IOPAD(0x180, PIN_OUTPUT | MUX_MODE6) /* gpio8_234 */
+ >;
+ };
+
usbhost_pins: pinmux_usbhost_pins {
pinctrl-single,pins = <
0x84 (PIN_INPUT | MUX_MODE0) /* usbb2_hsic_strobe */
&usbhost_wkup_pins
>;
+ palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
+ pinctrl-single,pins = <
+ OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
+ >;
+ };
+
usbhost_wkup_pins: pinmux_usbhost_wkup_pins {
pinctrl-single,pins = <
0x1A (PIN_OUTPUT | MUX_MODE0) /* fref_clk1_out, USB hub clk */
interrupt-controller;
#interrupt-cells = <2>;
ti,system-power-controller;
+ pinctrl-names = "default";
+ pinctrl-0 = <&palmas_sys_nirq_pins &palmas_msecure_pins>;
extcon_usb3: palmas_usb {
compatible = "ti,palmas-usb-vid";
#clock-cells = <0>;
};
+ rtc {
+ compatible = "ti,palmas-rtc";
+ interrupt-parent = <&palmas>;
+ interrupts = <8 IRQ_TYPE_NONE>;
+ ti,backup-battery-chargeable;
+ ti,backup-battery-charge-high-current;
+ };
+
palmas_pmic {
compatible = "ti,palmas-pmic";
interrupt-parent = <&palmas>;
dbgu: serial@fc069000 {
compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
reg = <0xfc069000 0x200>;
- interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>;
+ interrupts = <45 IRQ_TYPE_LEVEL_HIGH 7>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_dbgu>;
clocks = <&dbgu_clk>;
};
mmcsd_default_mode: mmcsd_default {
mmcsd_default_cfg1 {
- /* MCCLK */
- pins = "GPIO8_B10";
- ste,output = <0>;
- };
- mmcsd_default_cfg2 {
- /* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */
- pins = "GPIO10_C11", "GPIO15_A12",
- "GPIO16_C13", "GPIO23_D15";
- ste,output = <1>;
- };
- mmcsd_default_cfg3 {
- /* MCCMD, MCDAT3-0, MCMSFBCLK */
- pins = "GPIO9_A10", "GPIO11_B11",
- "GPIO12_A11", "GPIO13_C12",
- "GPIO14_B12", "GPIO24_C15";
- ste,input = <1>;
+ /*
+ * MCCLK, MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2
+ * MCCMD, MCDAT3-0, MCMSFBCLK
+ */
+ pins = "GPIO8_B10", "GPIO9_A10", "GPIO10_C11", "GPIO11_B11",
+ "GPIO12_A11", "GPIO13_C12", "GPIO14_B12", "GPIO15_A12",
+ "GPIO16_C13", "GPIO23_D15", "GPIO24_C15";
+ ste,output = <2>;
};
};
};
clock-names = "mclk", "apb_pclk";
interrupt-parent = <&vica>;
interrupts = <22>;
- max-frequency = <48000000>;
+ max-frequency = <400000>;
bus-width = <4>;
cap-mmc-highspeed;
cap-sd-highspeed;
+ full-pwr-cycle;
+ /*
+ * The STw4811 circuit used with the Nomadik strictly
+ * requires that all of these signal direction pins be
+ * routed and used for its 4-bit levelshifter.
+ */
+ st,sig-dir-dat0;
+ st,sig-dir-dat2;
+ st,sig-dir-dat31;
+ st,sig-dir-cmd;
+ st,sig-pin-fbclk;
pinctrl-names = "default";
pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
vmmc-supply = <&vmmc_regulator>;
*/
#include <linux/module.h>
#include <linux/kernel.h>
-
+#include <asm/div64.h>
#include <asm/hardware/icst.h>
/*
unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
{
- return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
+ u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
+ u32 divisor = (vco.r + 2) * p->s2div[vco.s];
+
+ do_div(dividend, divisor);
+ return (unsigned long)dividend;
}
EXPORT_SYMBOL(icst_hz);
if (f > p->vco_min && f <= p->vco_max)
break;
+ i++;
} while (i < 8);
if (i >= 8)
stmfd sp!, {lr} @ save registers on stack
/* Setup so that we will disable and enable l2 */
mov r1, #0x1
- adrl r2, l2dis_3630 @ may be too distant for plain adr
- str r1, [r2]
+ adrl r3, l2dis_3630_offset @ may be too distant for plain adr
+ ldr r2, [r3] @ value for offset
+ str r1, [r2, r3] @ write to l2dis_3630
ldmfd sp!, {pc} @ restore regs and return
ENDPROC(enable_omap3630_toggle_l2_on_restore)
- .text
-/* Function to call rom code to save secure ram context */
+/*
+ * Function to call rom code to save secure ram context. This gets
+ * relocated to SRAM, so it can be all in .data section. Otherwise
+ * we need to initialize api_params separately.
+ */
+ .data
.align 3
ENTRY(save_secure_ram_context)
stmfd sp!, {r4 - r11, lr} @ save registers on stack
ENTRY(save_secure_ram_context_sz)
.word . - save_secure_ram_context
+ .text
+
/*
* ======================
* == Idle entry point ==
bic r5, r5, #0x40
str r5, [r4]
-/*
- * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
- * base instead.
- * Be careful not to clobber r7 when maintaing this code.
- */
-
is_dll_in_lock_mode:
/* Is dll in lock mode? */
ldr r4, sdrc_dlla_ctrl
tst r5, #0x4
bne exit_nonoff_modes @ Return if locked
/* wait till dll locks */
- adr r7, kick_counter
wait_dll_lock_timed:
- ldr r4, wait_dll_lock_counter
- add r4, r4, #1
- str r4, [r7, #wait_dll_lock_counter - kick_counter]
ldr r4, sdrc_dlla_status
/* Wait 20uS for lock */
mov r6, #8
orr r6, r6, #(1<<3) @ enable dll
str r6, [r4]
dsb
- ldr r4, kick_counter
- add r4, r4, #1
- str r4, [r7] @ kick_counter
b wait_dll_lock_timed
exit_nonoff_modes:
.word SDRC_DLLA_STATUS_V
sdrc_dlla_ctrl:
.word SDRC_DLLA_CTRL_V
- /*
- * When exporting to userspace while the counters are in SRAM,
- * these 2 words need to be at the end to facilitate retrival!
- */
-kick_counter:
- .word 0
-wait_dll_lock_counter:
- .word 0
-
ENTRY(omap3_do_wfi_sz)
.word . - omap3_do_wfi
cmp r2, #0x0 @ Check if target power state was OFF or RET
bne logic_l1_restore
- ldr r0, l2dis_3630
+ adr r1, l2dis_3630_offset @ address for offset
+ ldr r0, [r1] @ value for offset
+ ldr r0, [r1, r0] @ value at l2dis_3630
cmp r0, #0x1 @ should we disable L2 on 3630?
bne skipl2dis
mrc p15, 0, r0, c1, c0, 1
and r1, #0x700
cmp r1, #0x300
beq l2_inv_gp
+ adr r0, l2_inv_api_params_offset
+ ldr r3, [r0]
+ add r3, r3, r0 @ r3 points to dummy parameters
mov r0, #40 @ set service ID for PPA
mov r12, r0 @ copy secure Service ID in r12
mov r1, #0 @ set task id for ROM code in r1
mov r2, #4 @ set some flags in r2, r6
mov r6, #0xff
- adr r3, l2_inv_api_params @ r3 points to dummy parameters
dsb @ data write barrier
dmb @ data memory barrier
smc #1 @ call SMI monitor (smi #1)
b logic_l1_restore
.align
-l2_inv_api_params:
- .word 0x1, 0x00
+l2_inv_api_params_offset:
+ .long l2_inv_api_params - .
l2_inv_gp:
/* Execute smi to invalidate L2 cache */
mov r12, #0x1 @ set up to invalidate L2
mov r12, #0x2
smc #0 @ Call SMI monitor (smieq)
logic_l1_restore:
- ldr r1, l2dis_3630
+ adr r0, l2dis_3630_offset @ adress for offset
+ ldr r1, [r0] @ value for offset
+ ldr r1, [r0, r1] @ value at l2dis_3630
cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
bne skipl2reen
mrc p15, 0, r1, c1, c0, 1
.word CONTROL_STAT
control_mem_rta:
.word CONTROL_MEM_RTA_CTRL
+l2dis_3630_offset:
+ .long l2dis_3630 - .
+
+ .data
l2dis_3630:
.word 0
+ .data
+l2_inv_api_params:
+ .word 0x1, 0x00
+
/*
* Internal functions
*/
dsb
.endm
-ppa_zero_params:
- .word 0x0
-
-ppa_por_params:
- .word 1, 0
-
#ifdef CONFIG_ARCH_OMAP4
/*
beq skip_ns_smp_enable
ppa_actrl_retry:
mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
- adr r3, ppa_zero_params @ Pointer to parameters
+ adr r1, ppa_zero_params_offset
+ ldr r3, [r1]
+ add r3, r3, r1 @ Pointer to ppa_zero_params
mov r1, #0x0 @ Process ID
mov r2, #0x4 @ Flag
mov r6, #0xff
ldr r0, =OMAP4_PPA_L2_POR_INDEX
ldr r1, =OMAP44XX_SAR_RAM_BASE
ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
- adr r3, ppa_por_params
+ adr r1, ppa_por_params_offset
+ ldr r3, [r1]
+ add r3, r3, r1 @ Pointer to ppa_por_params
str r4, [r3, #0x04]
mov r1, #0x0 @ Process ID
mov r2, #0x4 @ Flag
#endif
b cpu_resume @ Jump to generic resume
+ppa_por_params_offset:
+ .long ppa_por_params - .
ENDPROC(omap4_cpu_resume)
#endif /* CONFIG_ARCH_OMAP4 */
nop
ldmfd sp!, {pc}
+ppa_zero_params_offset:
+ .long ppa_zero_params - .
ENDPROC(omap_do_wfi)
+
+ .data
+ppa_zero_params:
+ .word 0
+
+ppa_por_params:
+ .word 1, 0
ret = register_iommu_dma_ops_notifier(&platform_bus_type);
if (!ret)
ret = register_iommu_dma_ops_notifier(&amba_bustype);
+
+ /* handle devices queued before this arch_initcall */
+ if (!ret)
+ __iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
return ret;
}
arch_initcall(__iommu_dma_init);
if (end < MODULES_VADDR || end >= MODULES_END)
return -EINVAL;
+ if (!numpages)
+ return 0;
+
data.set_mask = set_mask;
data.clear_mask = clear_mask;
};
unsigned long memory_start;
+EXPORT_SYMBOL(memory_start);
+
unsigned long memory_end;
+EXPORT_SYMBOL(memory_end);
void __init setup_arch(char **);
int get_cpuinfo(char *);
return pte_wrprotect(pte);
}
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- pte_t old_pte = *ptep;
- set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
-}
+void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
-static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty)
-{
- int changed = !pte_same(*ptep, pte);
- if (changed) {
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
- flush_tlb_page(vma, addr);
- }
- return changed;
-}
+ pte_t pte, int dirty);
static inline pte_t huge_ptep_get(pte_t *ptep)
{
#ifndef _PARISC_SIGINFO_H
#define _PARISC_SIGINFO_H
+#if defined(__LP64__)
+#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
+#endif
+
#include <asm-generic/siginfo.h>
#undef NSIGTRAP
addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
- mtsp(mm->context, 1);
- pdtlb(addr);
- if (unlikely(split_tlb))
- pitlb(addr);
+ purge_tlb_entries(mm, addr);
addr += (1UL << REAL_HPAGE_SHIFT);
}
}
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+/* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
+static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
unsigned long addr_start;
addr_start = addr;
for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
- /* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry)
- * instead, but then we get double locking on pa_tlb_lock. */
- *ptep = entry;
+ set_pte(ptep, entry);
ptep++;
- /* Drop the PAGE_SIZE/non-huge tlb entry */
- purge_tlb_entries(mm, addr);
-
addr += PAGE_SIZE;
pte_val(entry) += PAGE_SIZE;
}
purge_tlb_entries_huge(mm, addr_start);
}
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t entry)
+{
+ unsigned long flags;
+
+ purge_tlb_start(flags);
+ __set_huge_pte_at(mm, addr, ptep, entry);
+ purge_tlb_end(flags);
+}
+
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
+ unsigned long flags;
pte_t entry;
+ purge_tlb_start(flags);
entry = *ptep;
- set_huge_pte_at(mm, addr, ptep, __pte(0));
+ __set_huge_pte_at(mm, addr, ptep, __pte(0));
+ purge_tlb_end(flags);
return entry;
}
+
+void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long flags;
+ pte_t old_pte;
+
+ purge_tlb_start(flags);
+ old_pte = *ptep;
+ __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+ purge_tlb_end(flags);
+}
+
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ unsigned long flags;
+ int changed;
+
+ purge_tlb_start(flags);
+ changed = !pte_same(*ptep, pte);
+ if (changed) {
+ __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ }
+ purge_tlb_end(flags);
+ return changed;
+}
+
+
int pmd_huge(pmd_t pmd)
{
return 0;
#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */
+#define EEH_PE_PRI_BUS (1 << 11) /* Cached primary bus */
struct eeh_pe {
int type; /* PE type: PHB/Bus/Device */
*/
eeh_pe_state_mark(pe, EEH_PE_KEEP);
if (bus) {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
pci_lock_rescan_remove();
pcibios_remove_pci_devices(bus);
pci_unlock_rescan_remove();
* the their PCI config any more.
*/
if (frozen_bus) {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
pci_lock_rescan_remove();
continue;
/* Notify all devices to be down */
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
bus = eeh_pe_bus_get(phb_pe);
eeh_pe_dev_traverse(pe,
eeh_report_failure, NULL);
const char *eeh_pe_loc_get(struct eeh_pe *pe)
{
struct pci_bus *bus = eeh_pe_bus_get(pe);
- struct device_node *dn = pci_bus_to_OF_node(bus);
+ struct device_node *dn;
const char *loc = NULL;
- if (!dn)
- goto out;
+ while (bus) {
+ dn = pci_bus_to_OF_node(bus);
+ if (!dn) {
+ bus = bus->parent;
+ continue;
+ }
- /* PHB PE or root PE ? */
- if (pci_is_root_bus(bus)) {
- loc = of_get_property(dn, "ibm,loc-code", NULL);
- if (!loc)
+ if (pci_is_root_bus(bus))
loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
+ else
+ loc = of_get_property(dn, "ibm,slot-location-code",
+ NULL);
+
if (loc)
- goto out;
+ return loc;
- /* Check the root port */
- dn = dn->child;
- if (!dn)
- goto out;
+ bus = bus->parent;
}
- loc = of_get_property(dn, "ibm,loc-code", NULL);
- if (!loc)
- loc = of_get_property(dn, "ibm,slot-location-code", NULL);
-
-out:
- return loc ? loc : "N/A";
+ return "N/A";
}
/**
bus = pe->phb->bus;
} else if (pe->type & EEH_PE_BUS ||
pe->type & EEH_PE_DEVICE) {
- if (pe->bus) {
+ if (pe->state & EEH_PE_PRI_BUS) {
bus = pe->bus;
goto out;
}
/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
- rlwimi r5, r4, 1, DAWRX_WT
+ rlwimi r5, r4, 2, DAWRX_WT
clrrdi r4, r4, 3
std r4, VCPU_DAWR(r3)
std r5, VCPU_DAWRX(r3)
r = -ENXIO;
break;
}
- vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+ val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+ val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
break;
case KVM_REG_PPC_VRSAVE:
- if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
- r = -ENXIO;
- break;
- }
- vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ val = get_reg_val(reg->id, vcpu->arch.vrsave);
break;
#endif /* CONFIG_ALTIVEC */
default:
r = -ENXIO;
break;
}
- val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+ vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+ vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
break;
case KVM_REG_PPC_VRSAVE:
- val = get_reg_val(reg->id, vcpu->arch.vrsave);
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+ vcpu->arch.vrsave = set_reg_val(reg->id, val);
break;
#endif /* CONFIG_ALTIVEC */
default:
* PCI devices of the PE are expected to be removed prior
* to PE reset.
*/
- if (!edev->pe->bus)
+ if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
edev->pe->bus = pci_find_bus(hose->global_number,
pdn->busno);
+ if (edev->pe->bus)
+ edev->pe->state |= EEH_PE_PRI_BUS;
+ }
/*
* Enable EEH explicitly so that we will do EEH check
static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.dma_dev_setup = pnv_pci_dma_dev_setup,
+ .dma_bus_setup = pnv_pci_dma_bus_setup,
#ifdef CONFIG_PCI_MSI
.setup_msi_irqs = pnv_setup_msi_irqs,
.teardown_msi_irqs = pnv_teardown_msi_irqs,
u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
long i;
+ if (proto_tce & TCE_PCI_WRITE)
+ proto_tce |= TCE_PCI_READ;
+
for (i = 0; i < npages; i++) {
unsigned long newtce = proto_tce |
((rpn + i) << tbl->it_page_shift);
BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
+ if (newtce & TCE_PCI_WRITE)
+ newtce |= TCE_PCI_READ;
+
oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
*hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
*direction = iommu_tce_direction(oldtce);
phb->dma_dev_setup(phb, pdev);
}
+void pnv_pci_dma_bus_setup(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+ struct pnv_phb *phb = hose->private_data;
+ struct pnv_ioda_pe *pe;
+
+ list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+ if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
+ continue;
+
+ if (!pe->pbus)
+ continue;
+
+ if (bus->number == ((pe->rid >> 8) & 0xFF)) {
+ pe->pbus = bus;
+ break;
+ }
+ }
+}
+
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
+extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
# done with the slightly better performing SSSE3 byte shuffling,
# 7/12-bit word rotation uses traditional shift+OR.
- sub $0x40,%rsp
+ mov %rsp,%r11
+ sub $0x80,%rsp
+ and $~63,%rsp
# x0..15[0-3] = s0..3[0..3]
movq 0x00(%rdi),%xmm1
pxor %xmm1,%xmm15
movdqu %xmm15,0xf0(%rsi)
- add $0x40,%rsp
+ mov %r11,%rsp
ret
ENDPROC(chacha20_4block_xor_ssse3)
}
static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
{
+ pgprotval_t val = pgprot_val(pgprot);
pgprot_t new;
- unsigned long val;
- val = pgprot_val(pgprot);
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
return new;
}
static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
{
+ pgprotval_t val = pgprot_val(pgprot);
pgprot_t new;
- unsigned long val;
- val = pgprot_val(pgprot);
pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
((val & _PAGE_PAT_LARGE) >>
(_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
/*
* copy_user_nocache - Uncached memory copy with exception handling
- * This will force destination/source out of cache for more performance.
+ * This will force destination out of cache for more performance.
+ *
+ * Note: Cached memory copy is used when destination or size is not
+ * naturally aligned. That is:
+ * - Require 8-byte alignment when size is 8 bytes or larger.
+ * - Require 4-byte alignment when size is 4 bytes.
*/
ENTRY(__copy_user_nocache)
ASM_STAC
+
+ /* If size is less than 8 bytes, go to 4-byte copy */
cmpl $8,%edx
- jb 20f /* less then 8 bytes, go to byte copy loop */
+ jb .L_4b_nocache_copy_entry
+
+ /* If destination is not 8-byte aligned, "cache" copy to align it */
ALIGN_DESTINATION
+
+ /* Set 4x8-byte copy count and remainder */
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
- jz 17f
+ jz .L_8b_nocache_copy_entry /* jump if count is 0 */
+
+ /* Perform 4x8-byte nocache loop-copy */
+.L_4x8b_nocache_copy_loop:
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
decl %ecx
- jnz 1b
-17: movl %edx,%ecx
+ jnz .L_4x8b_nocache_copy_loop
+
+ /* Set 8-byte copy count and remainder */
+.L_8b_nocache_copy_entry:
+ movl %edx,%ecx
andl $7,%edx
shrl $3,%ecx
- jz 20f
-18: movq (%rsi),%r8
-19: movnti %r8,(%rdi)
+ jz .L_4b_nocache_copy_entry /* jump if count is 0 */
+
+ /* Perform 8-byte nocache loop-copy */
+.L_8b_nocache_copy_loop:
+20: movq (%rsi),%r8
+21: movnti %r8,(%rdi)
leaq 8(%rsi),%rsi
leaq 8(%rdi),%rdi
decl %ecx
- jnz 18b
-20: andl %edx,%edx
- jz 23f
+ jnz .L_8b_nocache_copy_loop
+
+ /* If no byte left, we're done */
+.L_4b_nocache_copy_entry:
+ andl %edx,%edx
+ jz .L_finish_copy
+
+ /* If destination is not 4-byte aligned, go to byte copy: */
+ movl %edi,%ecx
+ andl $3,%ecx
+ jnz .L_1b_cache_copy_entry
+
+ /* Set 4-byte copy count (1 or 0) and remainder */
movl %edx,%ecx
-21: movb (%rsi),%al
-22: movb %al,(%rdi)
+ andl $3,%edx
+ shrl $2,%ecx
+ jz .L_1b_cache_copy_entry /* jump if count is 0 */
+
+ /* Perform 4-byte nocache copy: */
+30: movl (%rsi),%r8d
+31: movnti %r8d,(%rdi)
+ leaq 4(%rsi),%rsi
+ leaq 4(%rdi),%rdi
+
+ /* If no bytes left, we're done: */
+ andl %edx,%edx
+ jz .L_finish_copy
+
+ /* Perform byte "cache" loop-copy for the remainder */
+.L_1b_cache_copy_entry:
+ movl %edx,%ecx
+.L_1b_cache_copy_loop:
+40: movb (%rsi),%al
+41: movb %al,(%rdi)
incq %rsi
incq %rdi
decl %ecx
- jnz 21b
-23: xorl %eax,%eax
+ jnz .L_1b_cache_copy_loop
+
+ /* Finished copying; fence the prior stores */
+.L_finish_copy:
+ xorl %eax,%eax
ASM_CLAC
sfence
ret
.section .fixup,"ax"
-30: shll $6,%ecx
+.L_fixup_4x8b_copy:
+ shll $6,%ecx
addl %ecx,%edx
- jmp 60f
-40: lea (%rdx,%rcx,8),%rdx
- jmp 60f
-50: movl %ecx,%edx
-60: sfence
+ jmp .L_fixup_handle_tail
+.L_fixup_8b_copy:
+ lea (%rdx,%rcx,8),%rdx
+ jmp .L_fixup_handle_tail
+.L_fixup_4b_copy:
+ lea (%rdx,%rcx,4),%rdx
+ jmp .L_fixup_handle_tail
+.L_fixup_1b_copy:
+ movl %ecx,%edx
+.L_fixup_handle_tail:
+ sfence
jmp copy_user_handle_tail
.previous
- _ASM_EXTABLE(1b,30b)
- _ASM_EXTABLE(2b,30b)
- _ASM_EXTABLE(3b,30b)
- _ASM_EXTABLE(4b,30b)
- _ASM_EXTABLE(5b,30b)
- _ASM_EXTABLE(6b,30b)
- _ASM_EXTABLE(7b,30b)
- _ASM_EXTABLE(8b,30b)
- _ASM_EXTABLE(9b,30b)
- _ASM_EXTABLE(10b,30b)
- _ASM_EXTABLE(11b,30b)
- _ASM_EXTABLE(12b,30b)
- _ASM_EXTABLE(13b,30b)
- _ASM_EXTABLE(14b,30b)
- _ASM_EXTABLE(15b,30b)
- _ASM_EXTABLE(16b,30b)
- _ASM_EXTABLE(18b,40b)
- _ASM_EXTABLE(19b,40b)
- _ASM_EXTABLE(21b,50b)
- _ASM_EXTABLE(22b,50b)
+ _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(20b,.L_fixup_8b_copy)
+ _ASM_EXTABLE(21b,.L_fixup_8b_copy)
+ _ASM_EXTABLE(30b,.L_fixup_4b_copy)
+ _ASM_EXTABLE(31b,.L_fixup_4b_copy)
+ _ASM_EXTABLE(40b,.L_fixup_1b_copy)
+ _ASM_EXTABLE(41b,.L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
if (!pmd_k)
return -1;
+ if (pmd_huge(*pmd_k))
+ return 0;
+
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
return -1;
* 64-bit:
*
* Handle a fault on the vmalloc area
- *
- * This assumes no large pages in there.
*/
static noinline int vmalloc_fault(unsigned long address)
{
if (pud_none(*pud_ref))
return -1;
- if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
+ if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
BUG();
+ if (pud_huge(*pud))
+ return 0;
+
pmd = pmd_offset(pud, address);
pmd_ref = pmd_offset(pud_ref, address);
if (pmd_none(*pmd_ref))
return -1;
- if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
+ if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
BUG();
+ if (pmd_huge(*pmd))
+ return 0;
+
pte_ref = pte_offset_kernel(pmd_ref, address);
if (!pte_present(*pte_ref))
return -1;
pgd_t *pgd;
pgprot_t mask_set;
pgprot_t mask_clr;
- int numpages;
+ unsigned long numpages;
int flags;
unsigned long pfn;
unsigned force_split : 1;
* CPA operation. Either a large page has been
* preserved or a single page update happened.
*/
- BUG_ON(cpa->numpages > numpages);
+ BUG_ON(cpa->numpages > numpages || !cpa->numpages);
numpages -= cpa->numpages;
if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
cpa->curpage++;
return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
}
+static inline unsigned get_max_io_size(struct request_queue *q,
+ struct bio *bio)
+{
+ unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
+ unsigned mask = queue_logical_block_size(q) - 1;
+
+ /* aligned to logical block size */
+ sectors &= ~(mask >> 9);
+
+ return sectors;
+}
+
static struct bio *blk_bio_segment_split(struct request_queue *q,
struct bio *bio,
struct bio_set *bs,
unsigned front_seg_size = bio->bi_seg_front_size;
bool do_split = true;
struct bio *new = NULL;
+ const unsigned max_sectors = get_max_io_size(q, bio);
bio_for_each_segment(bv, bio, iter) {
- if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
- goto split;
-
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
goto split;
+ if (sectors + (bv.bv_len >> 9) > max_sectors) {
+ /*
+ * Consider this a new segment if we're splitting in
+ * the middle of this vector.
+ */
+ if (nsegs < queue_max_segments(q) &&
+ sectors < max_sectors) {
+ nsegs++;
+ sectors = max_sectors;
+ }
+ if (sectors)
+ goto split;
+ /* Make this single bvec as the 1st segment */
+ }
+
if (bvprvp && blk_queue_cluster(q)) {
if (seg_size + bv.bv_len > queue_max_segment_size(q))
goto new_segment;
goto unlock;
type->ops->owner = THIS_MODULE;
+ if (type->ops_nokey)
+ type->ops_nokey->owner = THIS_MODULE;
node->type = type;
list_add(&node->list, &alg_types);
err = 0;
}
EXPORT_SYMBOL_GPL(af_alg_release);
+void af_alg_release_parent(struct sock *sk)
+{
+ struct alg_sock *ask = alg_sk(sk);
+ unsigned int nokey = ask->nokey_refcnt;
+ bool last = nokey && !ask->refcnt;
+
+ sk = ask->parent;
+ ask = alg_sk(sk);
+
+ lock_sock(sk);
+ ask->nokey_refcnt -= nokey;
+ if (!last)
+ last = !--ask->refcnt;
+ release_sock(sk);
+
+ if (last)
+ sock_put(sk);
+}
+EXPORT_SYMBOL_GPL(af_alg_release_parent);
+
static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
const u32 forbidden = CRYPTO_ALG_INTERNAL;
struct sockaddr_alg *sa = (void *)uaddr;
const struct af_alg_type *type;
void *private;
+ int err;
if (sock->state == SS_CONNECTED)
return -EINVAL;
return PTR_ERR(private);
}
+ err = -EBUSY;
lock_sock(sk);
+ if (ask->refcnt | ask->nokey_refcnt)
+ goto unlock;
swap(ask->type, type);
swap(ask->private, private);
+ err = 0;
+
+unlock:
release_sock(sk);
alg_do_release(type, private);
- return 0;
+ return err;
}
static int alg_setkey(struct sock *sk, char __user *ukey,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
- int err = -ENOPROTOOPT;
+ int err = -EBUSY;
lock_sock(sk);
+ if (ask->refcnt)
+ goto unlock;
+
type = ask->type;
+ err = -ENOPROTOOPT;
if (level != SOL_ALG || !type)
goto unlock;
struct alg_sock *ask = alg_sk(sk);
const struct af_alg_type *type;
struct sock *sk2;
+ unsigned int nokey;
int err;
lock_sock(sk);
security_sk_clone(sk, sk2);
err = type->accept(ask->private, sk2);
- if (err) {
- sk_free(sk2);
+
+ nokey = err == -ENOKEY;
+ if (nokey && type->accept_nokey)
+ err = type->accept_nokey(ask->private, sk2);
+
+ if (err)
goto unlock;
- }
sk2->sk_family = PF_ALG;
- sock_hold(sk);
+ if (nokey || !ask->refcnt++)
+ sock_hold(sk);
+ ask->nokey_refcnt += nokey;
alg_sk(sk2)->parent = sk;
alg_sk(sk2)->type = type;
+ alg_sk(sk2)->nokey_refcnt = nokey;
newsock->ops = type->ops;
newsock->state = SS_CONNECTED;
+ if (nokey)
+ newsock->ops = type->ops_nokey;
+
err = 0;
unlock:
struct ahash_alg *alg = crypto_ahash_alg(hash);
hash->setkey = ahash_nosetkey;
+ hash->has_setkey = false;
hash->export = ahash_no_export;
hash->import = ahash_no_import;
hash->finup = alg->finup ?: ahash_def_finup;
hash->digest = alg->digest;
- if (alg->setkey)
+ if (alg->setkey) {
hash->setkey = alg->setkey;
+ hash->has_setkey = true;
+ }
if (alg->export)
hash->export = alg->export;
if (alg->import)
struct ahash_request req;
};
+struct algif_hash_tfm {
+ struct crypto_ahash *hash;
+ bool has_key;
+};
+
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored)
{
lock_sock(sk);
if (!ctx->more) {
- err = crypto_ahash_init(&ctx->req);
+ err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
+ &ctx->completion);
if (err)
goto unlock;
}
} else {
if (!ctx->more) {
err = crypto_ahash_init(&ctx->req);
+ err = af_alg_wait_for_completion(err, &ctx->completion);
if (err)
goto unlock;
}
.accept = hash_accept,
};
+static int hash_check_key(struct socket *sock)
+{
+ int err = 0;
+ struct sock *psk;
+ struct alg_sock *pask;
+ struct algif_hash_tfm *tfm;
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+
+ lock_sock(sk);
+ if (ask->refcnt)
+ goto unlock_child;
+
+ psk = ask->parent;
+ pask = alg_sk(ask->parent);
+ tfm = pask->private;
+
+ err = -ENOKEY;
+ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
+ if (!tfm->has_key)
+ goto unlock;
+
+ if (!pask->refcnt++)
+ sock_hold(psk);
+
+ ask->refcnt = 1;
+ sock_put(psk);
+
+ err = 0;
+
+unlock:
+ release_sock(psk);
+unlock_child:
+ release_sock(sk);
+
+ return err;
+}
+
+static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t size)
+{
+ int err;
+
+ err = hash_check_key(sock);
+ if (err)
+ return err;
+
+ return hash_sendmsg(sock, msg, size);
+}
+
+static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ int err;
+
+ err = hash_check_key(sock);
+ if (err)
+ return err;
+
+ return hash_sendpage(sock, page, offset, size, flags);
+}
+
+static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
+{
+ int err;
+
+ err = hash_check_key(sock);
+ if (err)
+ return err;
+
+ return hash_recvmsg(sock, msg, ignored, flags);
+}
+
+static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
+ int flags)
+{
+ int err;
+
+ err = hash_check_key(sock);
+ if (err)
+ return err;
+
+ return hash_accept(sock, newsock, flags);
+}
+
+static struct proto_ops algif_hash_ops_nokey = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .setsockopt = sock_no_setsockopt,
+ .poll = sock_no_poll,
+
+ .release = af_alg_release,
+ .sendmsg = hash_sendmsg_nokey,
+ .sendpage = hash_sendpage_nokey,
+ .recvmsg = hash_recvmsg_nokey,
+ .accept = hash_accept_nokey,
+};
+
static void *hash_bind(const char *name, u32 type, u32 mask)
{
- return crypto_alloc_ahash(name, type, mask);
+ struct algif_hash_tfm *tfm;
+ struct crypto_ahash *hash;
+
+ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+ if (!tfm)
+ return ERR_PTR(-ENOMEM);
+
+ hash = crypto_alloc_ahash(name, type, mask);
+ if (IS_ERR(hash)) {
+ kfree(tfm);
+ return ERR_CAST(hash);
+ }
+
+ tfm->hash = hash;
+
+ return tfm;
}
static void hash_release(void *private)
{
- crypto_free_ahash(private);
+ struct algif_hash_tfm *tfm = private;
+
+ crypto_free_ahash(tfm->hash);
+ kfree(tfm);
}
static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
{
- return crypto_ahash_setkey(private, key, keylen);
+ struct algif_hash_tfm *tfm = private;
+ int err;
+
+ err = crypto_ahash_setkey(tfm->hash, key, keylen);
+ tfm->has_key = !err;
+
+ return err;
}
static void hash_sock_destruct(struct sock *sk)
af_alg_release_parent(sk);
}
-static int hash_accept_parent(void *private, struct sock *sk)
+static int hash_accept_parent_nokey(void *private, struct sock *sk)
{
struct hash_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
- unsigned ds = crypto_ahash_digestsize(private);
+ struct algif_hash_tfm *tfm = private;
+ struct crypto_ahash *hash = tfm->hash;
+ unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
+ unsigned ds = crypto_ahash_digestsize(hash);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
ask->private = ctx;
- ahash_request_set_tfm(&ctx->req, private);
+ ahash_request_set_tfm(&ctx->req, hash);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
return 0;
}
+static int hash_accept_parent(void *private, struct sock *sk)
+{
+ struct algif_hash_tfm *tfm = private;
+
+ if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
+ return -ENOKEY;
+
+ return hash_accept_parent_nokey(private, sk);
+}
+
static const struct af_alg_type algif_type_hash = {
.bind = hash_bind,
.release = hash_release,
.setkey = hash_setkey,
.accept = hash_accept_parent,
+ .accept_nokey = hash_accept_parent_nokey,
.ops = &algif_hash_ops,
+ .ops_nokey = &algif_hash_ops_nokey,
.name = "hash",
.owner = THIS_MODULE
};
struct scatterlist sg[0];
};
+struct skcipher_tfm {
+ struct crypto_skcipher *skcipher;
+ bool has_key;
+};
+
struct skcipher_ctx {
struct list_head tsgl;
struct af_alg_sgl rsgl;
struct skcipher_async_rsgl first_sgl;
struct list_head list;
struct scatterlist *tsg;
- char iv[];
+ atomic_t *inflight;
+ struct skcipher_request req;
};
-#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
- crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
-
-#define GET_REQ_SIZE(ctx) \
- crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
-
-#define GET_IV_SIZE(ctx) \
- crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
-
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
sizeof(struct scatterlist) - 1)
static void skcipher_async_cb(struct crypto_async_request *req, int err)
{
- struct sock *sk = req->data;
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
+ struct skcipher_async_req *sreq = req->data;
struct kiocb *iocb = sreq->iocb;
- atomic_dec(&ctx->inflight);
+ atomic_dec(sreq->inflight);
skcipher_free_async_sgls(sreq);
- kfree(req);
+ kzfree(sreq);
iocb->ki_complete(iocb, err, err);
}
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
struct skcipher_sg_list *sgl;
struct af_alg_control con = {};
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
sg = sgl->sg;
- sg_unmark_end(sg + sgl->cur);
+ if (sgl->cur)
+ sg_unmark_end(sg + sgl->cur - 1);
do {
i = sgl->cur;
plen = min_t(int, len, PAGE_SIZE);
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
struct skcipher_async_req *sreq;
struct skcipher_request *req;
struct skcipher_async_rsgl *last_rsgl = NULL;
- unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
- unsigned int reqlen = sizeof(struct skcipher_async_req) +
- GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
+ unsigned int txbufs = 0, len = 0, tx_nents;
+ unsigned int reqsize = crypto_skcipher_reqsize(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
int err = -ENOMEM;
bool mark = false;
+ char *iv;
- lock_sock(sk);
- req = kmalloc(reqlen, GFP_KERNEL);
- if (unlikely(!req))
- goto unlock;
+ sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
+ if (unlikely(!sreq))
+ goto out;
- sreq = GET_SREQ(req, ctx);
+ req = &sreq->req;
+ iv = (char *)(req + 1) + reqsize;
sreq->iocb = msg->msg_iocb;
- memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
INIT_LIST_HEAD(&sreq->list);
+ sreq->inflight = &ctx->inflight;
+
+ lock_sock(sk);
+ tx_nents = skcipher_all_sg_nents(ctx);
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
- if (unlikely(!sreq->tsg)) {
- kfree(req);
+ if (unlikely(!sreq->tsg))
goto unlock;
- }
sg_init_table(sreq->tsg, tx_nents);
- memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
- skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- skcipher_async_cb, sk);
+ memcpy(iv, ctx->iv, ivsize);
+ skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+ skcipher_async_cb, sreq);
while (iov_iter_count(&msg->msg_iter)) {
struct skcipher_async_rsgl *rsgl;
sg_mark_end(sreq->tsg + txbufs - 1);
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
- len, sreq->iv);
+ len, iv);
err = ctx->enc ? crypto_skcipher_encrypt(req) :
crypto_skcipher_decrypt(req);
if (err == -EINPROGRESS) {
atomic_inc(&ctx->inflight);
err = -EIOCBQUEUED;
+ sreq = NULL;
goto unlock;
}
free:
skcipher_free_async_sgls(sreq);
- kfree(req);
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
+ kzfree(sreq);
+out:
return err;
}
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
- unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
- &ctx->req));
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
+ unsigned bs = crypto_skcipher_blocksize(tfm);
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int err = -EAGAIN;
lock_sock(sk);
while (msg_data_left(msg)) {
- sgl = list_first_entry(&ctx->tsgl,
- struct skcipher_sg_list, list);
- sg = sgl->sg;
-
- while (!sg->length)
- sg++;
-
if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
if (!used)
goto free;
+ sgl = list_first_entry(&ctx->tsgl,
+ struct skcipher_sg_list, list);
+ sg = sgl->sg;
+
+ while (!sg->length)
+ sg++;
+
skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
ctx->iv);
.poll = skcipher_poll,
};
+static int skcipher_check_key(struct socket *sock)
+{
+ int err = 0;
+ struct sock *psk;
+ struct alg_sock *pask;
+ struct skcipher_tfm *tfm;
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+
+ lock_sock(sk);
+ if (ask->refcnt)
+ goto unlock_child;
+
+ psk = ask->parent;
+ pask = alg_sk(ask->parent);
+ tfm = pask->private;
+
+ err = -ENOKEY;
+ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
+ if (!tfm->has_key)
+ goto unlock;
+
+ if (!pask->refcnt++)
+ sock_hold(psk);
+
+ ask->refcnt = 1;
+ sock_put(psk);
+
+ err = 0;
+
+unlock:
+ release_sock(psk);
+unlock_child:
+ release_sock(sk);
+
+ return err;
+}
+
+static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t size)
+{
+ int err;
+
+ err = skcipher_check_key(sock);
+ if (err)
+ return err;
+
+ return skcipher_sendmsg(sock, msg, size);
+}
+
+static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ int err;
+
+ err = skcipher_check_key(sock);
+ if (err)
+ return err;
+
+ return skcipher_sendpage(sock, page, offset, size, flags);
+}
+
+static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
+{
+ int err;
+
+ err = skcipher_check_key(sock);
+ if (err)
+ return err;
+
+ return skcipher_recvmsg(sock, msg, ignored, flags);
+}
+
+static struct proto_ops algif_skcipher_ops_nokey = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .accept = sock_no_accept,
+ .setsockopt = sock_no_setsockopt,
+
+ .release = af_alg_release,
+ .sendmsg = skcipher_sendmsg_nokey,
+ .sendpage = skcipher_sendpage_nokey,
+ .recvmsg = skcipher_recvmsg_nokey,
+ .poll = skcipher_poll,
+};
+
static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
- return crypto_alloc_skcipher(name, type, mask);
+ struct skcipher_tfm *tfm;
+ struct crypto_skcipher *skcipher;
+
+ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+ if (!tfm)
+ return ERR_PTR(-ENOMEM);
+
+ skcipher = crypto_alloc_skcipher(name, type, mask);
+ if (IS_ERR(skcipher)) {
+ kfree(tfm);
+ return ERR_CAST(skcipher);
+ }
+
+ tfm->skcipher = skcipher;
+
+ return tfm;
}
static void skcipher_release(void *private)
{
- crypto_free_skcipher(private);
+ struct skcipher_tfm *tfm = private;
+
+ crypto_free_skcipher(tfm->skcipher);
+ kfree(tfm);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
- return crypto_skcipher_setkey(private, key, keylen);
+ struct skcipher_tfm *tfm = private;
+ int err;
+
+ err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
+ tfm->has_key = !err;
+
+ return err;
}
static void skcipher_wait(struct sock *sk)
af_alg_release_parent(sk);
}
-static int skcipher_accept_parent(void *private, struct sock *sk)
+static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
{
struct skcipher_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private);
+ struct skcipher_tfm *tfm = private;
+ struct crypto_skcipher *skcipher = tfm->skcipher;
+ unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private),
+ ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
- memset(ctx->iv, 0, crypto_skcipher_ivsize(private));
+ memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
INIT_LIST_HEAD(&ctx->tsgl);
ctx->len = len;
ask->private = ctx;
- skcipher_request_set_tfm(&ctx->req, private);
- skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ skcipher_request_set_tfm(&ctx->req, skcipher);
+ skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
sk->sk_destruct = skcipher_sock_destruct;
return 0;
}
+static int skcipher_accept_parent(void *private, struct sock *sk)
+{
+ struct skcipher_tfm *tfm = private;
+
+ if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher))
+ return -ENOKEY;
+
+ return skcipher_accept_parent_nokey(private, sk);
+}
+
static const struct af_alg_type algif_type_skcipher = {
.bind = skcipher_bind,
.release = skcipher_release,
.setkey = skcipher_setkey,
.accept = skcipher_accept_parent,
+ .accept_nokey = skcipher_accept_parent_nokey,
.ops = &algif_skcipher_ops,
+ .ops_nokey = &algif_skcipher_ops_nokey,
.name = "skcipher",
.owner = THIS_MODULE
};
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32c");
MODULE_ALIAS_CRYPTO("crc32c-generic");
-MODULE_SOFTDEP("pre: crc32c");
if (link->dump == NULL)
return -EINVAL;
+ down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list)
dump_alloc += CRYPTO_REPORT_MAXSIZE;
.done = link->done,
.min_dump_alloc = dump_alloc,
};
- return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+ err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
}
+ up_read(&crypto_alg_sem);
+
+ return err;
}
err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
crt->final = shash_async_final;
crt->finup = shash_async_finup;
crt->digest = shash_async_digest;
+ crt->setkey = shash_async_setkey;
+
+ crt->has_setkey = alg->setkey != shash_no_setkey;
- if (alg->setkey)
- crt->setkey = shash_async_setkey;
if (alg->export)
crt->export = shash_async_export;
if (alg->import)
skcipher->decrypt = skcipher_decrypt_blkcipher;
skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
+ skcipher->has_setkey = calg->cra_blkcipher.max_keysize;
return 0;
}
skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
sizeof(struct ablkcipher_request);
+ skcipher->has_setkey = calg->cra_ablkcipher.max_keysize;
return 0;
}
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
}
}
- /* fabricate port_map from cap.nr_ports */
- if (!port_map) {
+ /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
+ if (!port_map && vers < 0x10300) {
port_map = (1 << ahci_nr_ports(cap)) - 1;
dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
return ret;
ret = dev_pm_domain_attach(_dev, true);
- if (ret != -EPROBE_DEFER && drv->probe) {
- ret = drv->probe(dev);
- if (ret)
- dev_pm_domain_detach(_dev, true);
+ if (ret != -EPROBE_DEFER) {
+ if (drv->probe) {
+ ret = drv->probe(dev);
+ if (ret)
+ dev_pm_domain_detach(_dev, true);
+ } else {
+ /* don't fail if just dev_pm_domain_attach failed */
+ ret = 0;
+ }
}
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
*/
static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
{
- struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
+ struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_NOIO);
if (!zstrm)
return NULL;
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
* case when compressed size is larger than the original one
*/
- zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
+ zstrm->buffer = (void *)__get_free_pages(GFP_NOIO | __GFP_ZERO, 1);
if (!zstrm->private || !zstrm->buffer) {
zcomp_strm_free(comp, zstrm);
zstrm = NULL;
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/lz4.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include "zcomp_lz4.h"
static void *zcomp_lz4_create(void)
{
- return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
+ void *ret;
+
+ /*
+ * This function can be called in swapout/fs write path
+ * so we can't use GFP_FS|IO. And it assumes we already
+ * have at least one stream in zram initialization so we
+ * don't do best effort to allocate more stream in here.
+ * A default stream will work well without further multiple
+ * streams. That's why we use NORETRY | NOWARN.
+ */
+ ret = kzalloc(LZ4_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
+ __GFP_NOWARN);
+ if (!ret)
+ ret = __vmalloc(LZ4_MEM_COMPRESS,
+ GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
+ __GFP_ZERO | __GFP_HIGHMEM,
+ PAGE_KERNEL);
+ return ret;
}
static void zcomp_lz4_destroy(void *private)
{
- kfree(private);
+ kvfree(private);
}
static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst,
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/lzo.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include "zcomp_lzo.h"
static void *lzo_create(void)
{
- return kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ void *ret;
+
+ /*
+ * This function can be called in swapout/fs write path
+ * so we can't use GFP_FS|IO. And it assumes we already
+ * have at least one stream in zram initialization so we
+ * don't do best effort to allocate more stream in here.
+ * A default stream will work well without further multiple
+ * streams. That's why we use NORETRY | NOWARN.
+ */
+ ret = kzalloc(LZO1X_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
+ __GFP_NOWARN);
+ if (!ret)
+ ret = __vmalloc(LZO1X_MEM_COMPRESS,
+ GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
+ __GFP_ZERO | __GFP_HIGHMEM,
+ PAGE_KERNEL);
+ return ret;
}
static void lzo_destroy(void *private)
{
- kfree(private);
+ kvfree(private);
}
static int lzo_compress(const unsigned char *src, unsigned char *dst,
pr_info("Removed device: %s\n", zram->disk->disk_name);
- idr_remove(&zram_index_idr, zram->disk->first_minor);
blk_cleanup_queue(zram->disk->queue);
del_gendisk(zram->disk);
put_disk(zram->disk);
mutex_lock(&zram_index_mutex);
zram = idr_find(&zram_index_idr, dev_id);
- if (zram)
+ if (zram) {
ret = zram_remove(zram);
- else
+ idr_remove(&zram_index_idr, dev_id);
+ } else {
ret = -ENODEV;
+ }
mutex_unlock(&zram_index_mutex);
return ret ? ret : count;
dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
- clk_disable_unprepare(dd->iclk);
+ clk_disable(dd->iclk);
if (req->base.complete)
req->base.complete(&req->base, err);
{
int err;
- err = clk_prepare_enable(dd->iclk);
+ err = clk_enable(dd->iclk);
if (err)
return err;
dev_info(dd->dev,
"version: 0x%x\n", dd->hw_version);
- clk_disable_unprepare(dd->iclk);
+ clk_disable(dd->iclk);
}
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
goto res_err;
}
+ err = clk_prepare(sha_dd->iclk);
+ if (err)
+ goto res_err;
+
atmel_sha_hw_version_init(sha_dd);
atmel_sha_get_cap(sha_dd);
if (IS_ERR(pdata)) {
dev_err(&pdev->dev, "platform data not available\n");
err = PTR_ERR(pdata);
- goto res_err;
+ goto iclk_unprepare;
}
}
if (!pdata->dma_slave) {
err = -ENXIO;
- goto res_err;
+ goto iclk_unprepare;
}
err = atmel_sha_dma_init(sha_dd, pdata);
if (err)
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
err_sha_dma:
+iclk_unprepare:
+ clk_unprepare(sha_dd->iclk);
res_err:
tasklet_kill(&sha_dd->done_task);
sha_dd_err:
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
- iounmap(sha_dd->io_base);
-
- clk_put(sha_dd->iclk);
-
- if (sha_dd->irq >= 0)
- free_irq(sha_dd->irq, sha_dd);
+ clk_unprepare(sha_dd->iclk);
return 0;
}
* long pointers in master configuration register
*/
clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
- MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
- MCFGR_LONG_PTR : 0));
+ MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE |
+ (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
/*
* Read the Compile Time paramters and SCFGR to determine
return -ENOMEM;
dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
- if (!dma->cache_pool)
+ if (!dma->padding_pool)
return -ENOMEM;
cesa->dma = dma;
.import = sun4i_hash_import_md5,
.halg = {
.digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct md5_state),
.base = {
.cra_name = "md5",
.cra_driver_name = "md5-sun4i-ss",
.import = sun4i_hash_import_sha1,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-sun4i-ss",
break;
}
- td->inputmode = field->report->id;
- td->inputmode_index = usage->usage_index;
+ if (td->inputmode < 0) {
+ td->inputmode = field->report->id;
+ td->inputmode_index = usage->usage_index;
+ } else {
+ /*
+ * Some elan panels wrongly declare 2 input mode
+ * features, and silently ignore when we set the
+ * value in the second field. Skip the second feature
+ * and hope for the best.
+ */
+ dev_info(&hdev->dev,
+ "Ignoring the extra HID_DG_INPUTMODE\n");
+ }
break;
case HID_DG_CONTACTMAX:
struct usbhid_device *usbhid = hid->driver_data;
int unplug = 0, status = urb->status;
- spin_lock(&usbhid->lock);
-
switch (status) {
case 0: /* success */
if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN)
hid_warn(urb->dev, "ctrl urb status %d received\n", status);
}
+ spin_lock(&usbhid->lock);
+
if (unplug) {
usbhid->ctrltail = usbhid->ctrlhead;
} else {
hwlock = radix_tree_deref_slot(slot);
if (unlikely(!hwlock))
continue;
+ if (radix_tree_is_indirect_ptr(hwlock)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
if (hwlock->bank->dev->of_node == args.np) {
ret = 0;
config STK8BA50
tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
depends on I2C
+ depends on IIO_TRIGGER
help
Say yes here to get support for the Sensortek STK8BA50 3-axis
accelerometer.
config VF610_ADC
tristate "Freescale vf610 ADC driver"
depends on OF
+ depends on HAS_IOMEM
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
goto error_kfifo_free;
indio_dev->setup_ops = setup_ops;
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+ indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
return 0;
data->client = client;
indio_dev->dev.parent = &client->dev;
+ indio_dev->name = id->name;
indio_dev->info = &mcp4725_info;
indio_dev->channels = &mcp4725_channel;
indio_dev->num_channels = 1;
return -ENOMEM;
rx = adis->buffer;
- tx = rx + indio_dev->scan_bytes;
+ tx = rx + scan_count;
spi_message_init(&adis->msg);
void iio_channel_release(struct iio_channel *channel)
{
+ if (!channel)
+ return;
iio_device_put(channel->indio_dev);
kfree(channel);
}
.realbits = 32,
.storagebits = 32,
},
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ /* _RAW is here for backward ABI compatibility */
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_PROCESSED),
},
};
s32 temp_val;
int ret;
- if (mask != IIO_CHAN_INFO_RAW)
+ if ((mask != IIO_CHAN_INFO_PROCESSED) && (mask != IIO_CHAN_INFO_RAW))
return -EINVAL;
/* we support only illumination (_ALI) so far. */
{500000, 2000000}
};
-static unsigned int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
+static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
int len, int val, int val2)
{
int i, freq;
*val = ret >> 6;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OFFSET:
- *val = 605;
+ *val = -605;
*val2 = 750000;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SCALE:
input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2,
ETP_WMAX_V2, 0, 0);
}
- input_mt_init_slots(dev, 2, 0);
+ input_mt_init_slots(dev, 2, INPUT_MT_SEMI_MT);
input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
break;
priv->abs_dev = abs_dev;
psmouse->private = priv;
- input_set_capability(rel_dev, EV_REL, REL_WHEEL);
-
/* Set up and register absolute device */
snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
psmouse->ps2dev.serio->phys);
abs_dev->id.version = psmouse->model;
abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
- error = input_register_device(priv->abs_dev);
- if (error)
- goto init_fail;
-
/* Set absolute device capabilities */
input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
+ error = input_register_device(priv->abs_dev);
+ if (error)
+ goto init_fail;
+
+ /* Add wheel capability to the relative device */
+ input_set_capability(rel_dev, EV_REL, REL_WHEEL);
+
psmouse->protocol_handler = vmmouse_process_byte;
psmouse->disconnect = vmmouse_disconnect;
psmouse->reconnect = vmmouse_reconnect;
DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
},
},
+ {
+ /* Fujitsu Lifebook U745 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
+ },
+ },
{
/* Fujitsu T70H */
.matches = {
/* Update device table */
set_dte_entry(dev_data->devid, domain, ats);
if (alias != dev_data->devid)
- set_dte_entry(dev_data->devid, domain, ats);
+ set_dte_entry(alias, domain, ats);
device_flush_dte(dev_data);
}
raw_spin_lock_irqsave(&iommu->register_lock, flags);
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+ sts = readl(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_QIES))
goto end;
{
struct pci_dev *pdev;
- if (dev_is_pci(info->dev))
+ if (!dev_is_pci(info->dev))
return;
pdev = to_pci_dev(info->dev);
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+ struct intel_svm_dev *sdev;
+ /* This might end up being called from exit_mmap(), *before* the page
+ * tables are cleared. And __mmu_notifier_release() will delete us from
+ * the list of notifiers so that our invalidate_range() callback doesn't
+ * get called when the page tables are cleared. So we need to protect
+ * against hardware accessing those page tables.
+ *
+ * We do it by clearing the entry in the PASID table and then flushing
+ * the IOTLB and the PASID table caches. This might upset hardware;
+ * perhaps we'll want to point the PASID to a dummy PGD (like the zero
+ * page) so that we end up taking a fault that the hardware really
+ * *has* to handle gracefully without affecting other processes.
+ */
svm->iommu->pasid_table[svm->pasid].val = 0;
+ wmb();
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdev, &svm->devs, list) {
+ intel_flush_pasid_dev(svm, sdev, svm->pasid);
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+ }
+ rcu_read_unlock();
- /* There's no need to do any flush because we can't get here if there
- * are any devices left anyway. */
- WARN_ON(!list_empty(&svm->devs));
}
static const struct mmu_notifier_ops intel_mmuops = {
goto out;
}
iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
- mm = NULL;
} else
iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
wmb();
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
- mmu_notifier_unregister(&svm->notifier, svm->mm);
idr_remove(&svm->iommu->pasid_idr, svm->pasid);
if (svm->mm)
- mmput(svm->mm);
+ mmu_notifier_unregister(&svm->notifier, svm->mm);
+
/* We mandate that no page faults may be outstanding
* for the PASID when intel_svm_unbind_mm() is called.
* If that is not obeyed, subtle errors will happen.
struct intel_svm *svm = NULL;
int head, tail, handled = 0;
+ /* Clear PPR bit before reading head/tail registers, to
+ * ensure that we get a new interrupt if needed. */
+ writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
+
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
while (head != tail) {
* any faults on kernel addresses. */
if (!svm->mm)
goto bad_req;
+ /* If the mm is already defunct, don't handle faults. */
+ if (!atomic_inc_not_zero(&svm->mm->mm_users))
+ goto bad_req;
down_read(&svm->mm->mmap_sem);
vma = find_extend_vma(svm->mm, address);
if (!vma || address < vma->vm_start)
result = QI_RESP_SUCCESS;
invalid:
up_read(&svm->mm->mmap_sem);
+ mmput(svm->mm);
bad_req:
/* Accounting for major/minor faults? */
rcu_read_lock();
raw_spin_lock_irqsave(&iommu->register_lock, flags);
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+ sts = readl(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_IRES))
goto end;
arm_lpae_iopte *start, *end;
unsigned long table_size;
- /* Only leaf entries at the last level */
- if (lvl == ARM_LPAE_MAX_LEVELS - 1)
- return;
-
if (lvl == ARM_LPAE_START_LVL(data))
table_size = data->pgd_size;
else
table_size = 1UL << data->pg_shift;
start = ptep;
- end = (void *)ptep + table_size;
+
+ /* Only leaf entries at the last level */
+ if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+ end = ptep;
+ else
+ end = (void *)ptep + table_size;
while (ptep != end) {
arm_lpae_iopte pte = *ptep++;
}
EXPORT_SYMBOL(md_integrity_register);
-/* Disable data integrity if non-capable/non-matching disk is being added */
-void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
+/*
+ * Attempt to add an rdev, but only if it is consistent with the current
+ * integrity profile
+ */
+int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{
struct blk_integrity *bi_rdev;
struct blk_integrity *bi_mddev;
+ char name[BDEVNAME_SIZE];
if (!mddev->gendisk)
- return;
+ return 0;
bi_rdev = bdev_get_integrity(rdev->bdev);
bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */
- return;
- if (rdev->raid_disk < 0) /* skip spares */
- return;
- if (bi_rdev && blk_integrity_compare(mddev->gendisk,
- rdev->bdev->bd_disk) >= 0)
- return;
- WARN_ON_ONCE(!mddev->suspended);
- printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
- blk_integrity_unregister(mddev->gendisk);
+ return 0;
+
+ if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
+ printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n",
+ mdname(mddev), bdevname(rdev->bdev, name));
+ return -ENXIO;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(md_integrity_add_rdev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
extern int md_check_no_bitmap(struct mddev *mddev);
extern int md_integrity_register(struct mddev *mddev);
-extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
+extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev);
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
+ err = md_integrity_add_rdev(rdev, mddev);
+ if (err)
+ break;
spin_lock_irq(&conf->device_lock);
mddev->degraded--;
rdev->raid_disk = path;
spin_unlock_irq(&conf->device_lock);
rcu_assign_pointer(p->rdev, rdev);
err = 0;
- mddev_suspend(mddev);
- md_integrity_add_rdev(rdev, mddev);
- mddev_resume(mddev);
break;
}
if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY;
+ if (md_integrity_add_rdev(rdev, mddev))
+ return -ENXIO;
+
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
break;
}
}
- mddev_suspend(mddev);
- md_integrity_add_rdev(rdev, mddev);
- mddev_resume(mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
return -EINVAL;
+ if (md_integrity_add_rdev(rdev, mddev))
+ return -ENXIO;
+
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
rcu_assign_pointer(p->rdev, rdev);
break;
}
- mddev_suspend(mddev);
- md_integrity_add_rdev(rdev, mddev);
- mddev_resume(mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
{ "ir_rx_z8f0811_hdpvr", 0 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, ir_kbd_id);
static struct i2c_driver ir_kbd_driver = {
.driver = {
static int alsa_device_exit(struct saa7134_dev *dev)
{
+ if (!snd_saa7134_cards[dev->nr])
+ return 1;
snd_card_free(snd_saa7134_cards[dev->nr]);
snd_saa7134_cards[dev->nr] = NULL;
int idx;
for (idx = 0; idx < SNDRV_CARDS; idx++) {
- snd_card_free(snd_saa7134_cards[idx]);
+ if (snd_saa7134_cards[idx])
+ snd_card_free(snd_saa7134_cards[idx]);
}
saa7134_dmasound_init = NULL;
return ret;
}
+ if (!mtd->name && mtd->dev.parent)
+ mtd->name = dev_name(mtd->dev.parent);
+
/* Set the default functions */
nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
case COUNTRY_CODE_SPAIN:
case COUNTRY_CODE_FRANCE:
case COUNTRY_CODE_ISRAEL:
- case COUNTRY_CODE_WORLD_WIDE_13:
return &rtl_regdom_12_13;
case COUNTRY_CODE_MKK:
case COUNTRY_CODE_MKK1:
return &rtl_regdom_14_60_64;
case COUNTRY_CODE_GLOBAL_DOMAIN:
return &rtl_regdom_14;
+ case COUNTRY_CODE_WORLD_WIDE_13:
case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
return &rtl_regdom_12_13_5g_all;
default:
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
rtl8821ae_bt_reg_init(hw);
- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
- rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
rtlpriv->dm.dm_initialgain_enable = 1;
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
- rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear;
+ rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
+ rtlpriv->cfg->mod_params->sw_crypto =
+ rtlpriv->cfg->mod_params->sw_crypto;
+ rtlpriv->cfg->mod_params->disable_watchdog =
+ rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
rtlpriv->psc.reg_max_lps_awakeintvl = 5;
- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
/* for ASPM, you can close aspm through
* set const_support_pciaspm = 0
static inline void wl1271_power_off(struct wl1271 *wl)
{
- int ret;
+ int ret = 0;
if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags))
return;
- ret = wl->if_ops->power(wl->dev, false);
+ if (wl->if_ops->power)
+ ret = wl->if_ops->power(wl->dev, false);
if (!ret)
clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static inline int wl1271_power_on(struct wl1271 *wl)
{
- int ret = wl->if_ops->power(wl->dev, true);
+ int ret = 0;
+
+ if (wl->if_ops->power)
+ ret = wl->if_ops->power(wl->dev, true);
if (ret == 0)
set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
*/
#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
-#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
+/* Maximum number of SPI write chunks */
+#define WSPI_MAX_NUM_OF_CHUNKS \
+ ((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
+
struct wl12xx_spi_glue {
struct device *dev;
void *buf, size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
- struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)];
+ /* SPI write buffers - 2 for each chunk */
+ struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
struct spi_message m;
- u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
+ u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */
u32 *cmd;
u32 chunk_len;
int i;
return dev ? dev->type == &namespace_io_device_type : false;
}
+static int is_uuid_busy(struct device *dev, void *data)
+{
+ u8 *uuid1 = data, *uuid2 = NULL;
+
+ if (is_namespace_pmem(dev)) {
+ struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
+
+ uuid2 = nspm->uuid;
+ } else if (is_namespace_blk(dev)) {
+ struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
+
+ uuid2 = nsblk->uuid;
+ } else if (is_nd_btt(dev)) {
+ struct nd_btt *nd_btt = to_nd_btt(dev);
+
+ uuid2 = nd_btt->uuid;
+ } else if (is_nd_pfn(dev)) {
+ struct nd_pfn *nd_pfn = to_nd_pfn(dev);
+
+ uuid2 = nd_pfn->uuid;
+ }
+
+ if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int is_namespace_uuid_busy(struct device *dev, void *data)
+{
+ if (is_nd_pmem(dev) || is_nd_blk(dev))
+ return device_for_each_child(dev, data, is_uuid_busy);
+ return 0;
+}
+
+/**
+ * nd_is_uuid_unique - verify that no other namespace has @uuid
+ * @dev: any device on a nvdimm_bus
+ * @uuid: uuid to check
+ */
+bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
+{
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
+
+ if (!nvdimm_bus)
+ return false;
+ WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
+ if (device_for_each_child(&nvdimm_bus->dev, uuid,
+ is_namespace_uuid_busy) != 0)
+ return false;
+ return true;
+}
+
bool pmem_should_map_pages(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
}
EXPORT_SYMBOL(nd_region_to_nstype);
-static int is_uuid_busy(struct device *dev, void *data)
-{
- struct nd_region *nd_region = to_nd_region(dev->parent);
- u8 *uuid = data;
-
- switch (nd_region_to_nstype(nd_region)) {
- case ND_DEVICE_NAMESPACE_PMEM: {
- struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
-
- if (!nspm->uuid)
- break;
- if (memcmp(uuid, nspm->uuid, NSLABEL_UUID_LEN) == 0)
- return -EBUSY;
- break;
- }
- case ND_DEVICE_NAMESPACE_BLK: {
- struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
- if (!nsblk->uuid)
- break;
- if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) == 0)
- return -EBUSY;
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
-static int is_namespace_uuid_busy(struct device *dev, void *data)
-{
- if (is_nd_pmem(dev) || is_nd_blk(dev))
- return device_for_each_child(dev, data, is_uuid_busy);
- return 0;
-}
-
-/**
- * nd_is_uuid_unique - verify that no other namespace has @uuid
- * @dev: any device on a nvdimm_bus
- * @uuid: uuid to check
- */
-bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
-{
- struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
-
- if (!nvdimm_bus)
- return false;
- WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
- if (device_for_each_child(&nvdimm_bus->dev, uuid,
- is_namespace_uuid_busy) != 0)
- return false;
- return true;
-}
-
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
type_mask |= IORESOURCE_TYPE_BITS;
pci_bus_for_each_resource(bus, r, i) {
+ resource_size_t min_used = min;
+
if (!r)
continue;
* overrides "min".
*/
if (avail.start)
- min = avail.start;
+ min_used = avail.start;
max = avail.end;
/* Ok, try it out.. */
- ret = allocate_resource(r, res, size, min, max,
+ ret = allocate_resource(r, res, size, min_used, max,
align, alignf, alignf_data);
if (ret == 0)
return 0;
}
ret = devm_request_irq(&pdev->dev, pp->irq,
- dra7xx_pcie_msi_irq_handler, IRQF_SHARED,
+ dra7xx_pcie_msi_irq_handler,
+ IRQF_SHARED | IRQF_NO_THREAD,
"dra7-pcie-msi", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
exynos_pcie_msi_irq_handler,
- IRQF_SHARED, "exynos-pcie", pp);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "exynos-pcie", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request msi irq\n");
return ret;
ret = devm_request_irq(&pdev->dev, pp->msi_irq,
imx6_pcie_msi_handler,
- IRQF_SHARED, "mx6-pcie-msi", pp);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "mx6-pcie-msi", pp);
if (ret) {
dev_err(&pdev->dev, "failed to request MSI irq\n");
return ret;
msi->irq = err;
- err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
+ err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
tegra_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
/* Two irqs are for MSI, but they are also used for non-MSI irqs */
err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
- IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ rcar_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
goto err;
}
err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
- IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ rcar_msi_irq_chip.name, pcie);
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
goto err;
return -ENODEV;
}
ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
- IRQF_SHARED, "spear1340-pcie", pp);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "spear1340-pcie", pp);
if (ret) {
dev_err(dev, "failed to request irq %d\n", pp->irq);
return ret;
port->irq = irq_of_parse_and_map(node, 0);
err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
- IRQF_SHARED, "xilinx-pcie", port);
+ IRQF_SHARED | IRQF_NO_THREAD,
+ "xilinx-pcie", port);
if (err) {
dev_err(dev, "unable to request irq %d\n", port->irq);
return err;
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
/* Our job is to use irqs and status from the power module
* to keep the transceiver disabled when nothing's connected.
struct twl4030_usb *twl = platform_get_drvdata(pdev);
int val;
+ usb_remove_phy(&twl->phy);
pm_runtime_get_sync(twl->dev);
cancel_delayed_work(&twl->id_workaround_work);
device_remove_file(twl->dev, &dev_attr_vbus);
/* set transceiver mode to power on defaults */
twl4030_usb_set_mode(twl, -1);
+ /* idle ulpi before powering off */
+ if (cable_present(twl->linkstat))
+ pm_runtime_put_noidle(twl->dev);
+ pm_runtime_mark_last_busy(twl->dev);
+ pm_runtime_put_sync_suspend(twl->dev);
+ pm_runtime_disable(twl->dev);
+
/* autogate 60MHz ULPI clock,
* clear dpll clock request for i2c access,
* disable 32KHz
/* disable complete OTG block */
twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
- if (cable_present(twl->linkstat))
- pm_runtime_put_noidle(twl->dev);
- pm_runtime_mark_last_busy(twl->dev);
- pm_runtime_put(twl->dev);
-
return 0;
}
static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
{
- int count = data->count;
+ unsigned int count = data->count;
if (count == 0 || count == 3 || count > 4)
return -EINVAL;
/*
* Command Lock contention
*/
- err = SCSI_DH_RETRY;
+ err = SCSI_DH_IMM_RETRY;
break;
default:
break;
err = mode_select_handle_sense(sdev, h->sense);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
+ if (err == SCSI_DH_IMM_RETRY)
+ goto retry;
}
if (err == SCSI_DH_OK) {
h->state = RDAC_STATE_ACTIVE;
{"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
{"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
{"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
+ {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
{"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
{"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
{"Promise", "", NULL, BLIST_SPARSELUN},
{"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
+ {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
{"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
void scsi_remove_target(struct device *dev)
{
struct Scsi_Host *shost = dev_to_shost(dev->parent);
- struct scsi_target *starget;
+ struct scsi_target *starget, *last_target = NULL;
unsigned long flags;
restart:
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(starget, &shost->__targets, siblings) {
- if (starget->state == STARGET_DEL)
+ if (starget->state == STARGET_DEL ||
+ starget == last_target)
continue;
if (starget->dev.parent == dev || &starget->dev == dev) {
kref_get(&starget->reap_ref);
+ last_target = starget;
spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_target(starget);
scsi_target_reap(starget);
struct scsi_disk *sdkp = dev_get_drvdata(dev);
int ret = 0;
- if (!sdkp)
- return 0; /* this can happen */
+ if (!sdkp) /* E.g.: runtime suspend following sd_remove() */
+ return 0;
if (sdkp->WCE && sdkp->media_present) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
+ return 0;
+
if (!sdkp->device->manage_start_stop)
return 0;
}
sfp->mmap_called = 1;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
return 0;
{
struct scsi_cd *cd = dev_get_drvdata(dev);
+ if (!cd) /* E.g.: runtime suspend following sr_remove() */
+ return 0;
+
if (cd->media_present)
return -EBUSY;
else
scsi_autopm_get_device(cd->device);
del_gendisk(cd->disk);
+ dev_set_drvdata(dev, NULL);
mutex_lock(&sr_ref_mutex);
kref_put(&cd->kref, sr_kref_release);
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
- ld = tty_ldisc_ref_wait(tty);
+ ld = tty_ldisc_ref(tty);
+ if (!ld)
+ goto tty_unref;
tty_buffer_lock_exclusive(&vc->port);
add_wait_queue(&vc->paste_wait, &wait);
tty_buffer_unlock_exclusive(&vc->port);
tty_ldisc_deref(ld);
+tty_unref:
tty_kref_put(tty);
}
}
/*
- * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
+ * Called with spin_lock_irq(struct se_portal_group->session_lock) held
+ * or not held.
*
* Also, this function calls iscsit_inc_session_usage_count() on the
* struct iscsi_session in question.
static int lio_tpg_shutdown_session(struct se_session *se_sess)
{
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
+ struct se_portal_group *se_tpg = se_sess->se_tpg;
+ bool local_lock = false;
+
+ if (!spin_is_locked(&se_tpg->session_lock)) {
+ spin_lock_irq(&se_tpg->session_lock);
+ local_lock = true;
+ }
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
+ if (local_lock)
+ spin_unlock_irq(&sess->conn_lock);
return 0;
}
atomic_set(&sess->session_reinstatement, 1);
spin_unlock(&sess->conn_lock);
iscsit_stop_time2retain_timer(sess);
+ spin_unlock_irq(&se_tpg->session_lock);
+
iscsit_stop_session(sess, 1, 1);
+ if (!local_lock)
+ spin_lock_irq(&se_tpg->session_lock);
return 1;
}
static void n_tty_check_unthrottle(struct tty_struct *tty)
{
- if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
- tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) {
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
return;
if (!tty->count)
return;
n_tty_kick_worker(tty);
- n_tty_write_wakeup(tty->link);
- if (waitqueue_active(&tty->link->write_wait))
- wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
+ tty_wakeup(tty->link);
return;
}
/* this is called once with whichever end is closed last */
static void pty_unix98_shutdown(struct tty_struct *tty)
{
- devpts_kill_index(tty->driver_data, tty->index);
+ struct inode *ptmx_inode;
+
+ if (tty->driver->subtype == PTY_TYPE_MASTER)
+ ptmx_inode = tty->driver_data;
+ else
+ ptmx_inode = tty->link->driver_data;
+ devpts_kill_index(ptmx_inode, tty->index);
+ devpts_del_ref(ptmx_inode);
}
static const struct tty_operations ptm_unix98_ops = {
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
tty->driver_data = inode;
+ /*
+ * In the case where all references to ptmx inode are dropped and we
+ * still have /dev/tty opened pointing to the master/slave pair (ptmx
+ * is closed/released before /dev/tty), we must make sure that the inode
+ * is still valid when we call the final pty_unix98_shutdown, thus we
+ * hold an additional reference to the ptmx inode. For the same /dev/tty
+ * last close case, we also need to make sure the super_block isn't
+ * destroyed (devpts instance unmounted), before /dev/tty is closed and
+ * on its release devpts_kill_index is called.
+ */
+ devpts_add_ref(inode);
+
tty_add_file(tty, filp);
slave_inode = devpts_pty_new(inode,
#define PCI_DEVICE_ID_INTEL_BSW_UART1 0x228a
#define PCI_DEVICE_ID_INTEL_BSW_UART2 0x228c
+#define PCI_DEVICE_ID_INTEL_BDW_UART1 0x9ce3
+#define PCI_DEVICE_ID_INTEL_BDW_UART2 0x9ce4
+
#define BYT_PRV_CLK 0x800
#define BYT_PRV_CLK_EN (1 << 0)
#define BYT_PRV_CLK_M_VAL_SHIFT 1
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_BYT_UART1:
case PCI_DEVICE_ID_INTEL_BSW_UART1:
+ case PCI_DEVICE_ID_INTEL_BDW_UART1:
rx_param->src_id = 3;
tx_param->dst_id = 2;
break;
case PCI_DEVICE_ID_INTEL_BYT_UART2:
case PCI_DEVICE_ID_INTEL_BSW_UART2:
+ case PCI_DEVICE_ID_INTEL_BDW_UART2:
rx_param->src_id = 5;
tx_param->dst_id = 4;
break;
#define PCIE_VENDOR_ID_WCH 0x1c00
#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
+#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
#define PCI_VENDOR_ID_PERICOM 0x12D8
#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
.subdevice = PCI_ANY_ID,
.setup = byt_serial_setup,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BDW_UART1,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = byt_serial_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_BDW_UART2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = byt_serial_setup,
+ },
/*
* ITE
*/
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
+ /* WCH CH382 2S card (16850 clone) */
+ {
+ .vendor = PCIE_VENDOR_ID_WCH,
+ .device = PCIE_DEVICE_ID_WCH_CH382_2S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_wch_ch38x_setup,
+ },
/* WCH CH382 2S1P card (16850 clone) */
{
.vendor = PCIE_VENDOR_ID_WCH,
pbn_fintek_4,
pbn_fintek_8,
pbn_fintek_12,
+ pbn_wch382_2,
pbn_wch384_4,
pbn_pericom_PI7C9X7951,
pbn_pericom_PI7C9X7952,
.base_baud = 115200,
.first_offset = 0x40,
},
+ [pbn_wch382_2] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ .first_offset = 0xC0,
+ },
[pbn_wch384_4] = {
.flags = FL_BASE0,
.num_ports = 4,
PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
pbn_byt },
+ /* Intel Broadwell */
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+ pbn_byt },
+ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART2,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+ pbn_byt },
+
/*
* Intel Quark x1000
*/
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_2_115200 },
+ { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_wch382_2 },
+
{ PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch384_4 },
/* Enable or disable the rs485 support */
static int
-serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int mode;
up->ier = 0;
serial_out(up, UART_IER, 0);
+ /* Clamp the delays to [0, 100ms] */
+ rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
+ rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
+
/* store new config */
- port->rs485 = *rs485conf;
+ port->rs485 = *rs485;
/*
* Just as a precaution, only allow rs485
{
struct tty_driver *driver = tty->driver;
- if (!tty->count)
- return -EIO;
-
if (driver->type == TTY_DRIVER_TYPE_PTY &&
driver->subtype == PTY_TYPE_MASTER)
return -EIO;
+ if (!tty->count)
+ return -EAGAIN;
+
if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
return -EBUSY;
if (tty) {
mutex_unlock(&tty_mutex);
- tty_lock(tty);
+ retval = tty_lock_interruptible(tty);
+ if (retval) {
+ if (retval == -EINTR)
+ retval = -ERESTARTSYS;
+ goto err_unref;
+ }
/* safe to drop the kref from tty_driver_lookup_tty() */
tty_kref_put(tty);
retval = tty_reopen(tty);
if (IS_ERR(tty)) {
retval = PTR_ERR(tty);
- goto err_file;
+ if (retval != -EAGAIN || signal_pending(current))
+ goto err_file;
+ tty_free_file(filp);
+ schedule();
+ goto retry_open;
}
tty_add_file(tty, filp);
return 0;
err_unlock:
mutex_unlock(&tty_mutex);
+err_unref:
/* after locks to avoid deadlock */
if (!IS_ERR_OR_NULL(driver))
tty_driver_kref_put(driver);
return ret;
}
+/**
+ * tiocgetd - get line discipline
+ * @tty: tty device
+ * @p: pointer to user data
+ *
+ * Retrieves the line discipline id directly from the ldisc.
+ *
+ * Locking: waits for ldisc reference (in case the line discipline
+ * is changing or the tty is being hungup)
+ */
+
+static int tiocgetd(struct tty_struct *tty, int __user *p)
+{
+ struct tty_ldisc *ld;
+ int ret;
+
+ ld = tty_ldisc_ref_wait(tty);
+ ret = put_user(ld->ops->num, p);
+ tty_ldisc_deref(ld);
+ return ret;
+}
+
/**
* send_break - performed time break
* @tty: device to break on
case TIOCGSID:
return tiocgsid(tty, real_tty, p);
case TIOCGETD:
- return put_user(tty->ldisc->ops->num, (int __user *)p);
+ return tiocgetd(tty, p);
case TIOCSETD:
return tiocsetd(tty, p);
case TIOCVHANGUP:
}
EXPORT_SYMBOL(tty_lock);
+int tty_lock_interruptible(struct tty_struct *tty)
+{
+ if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
+ return -EIO;
+ tty_kref_get(tty);
+ return mutex_lock_interruptible(&tty->legacy_mutex);
+}
+
void __lockfunc tty_unlock(struct tty_struct *tty)
{
if (tty->magic != TTY_MAGIC) {
set_bit(rb->index, &acm->read_urbs_free);
dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
__func__, status);
- return;
+ if ((status != -ENOENT) || (urb->actual_length == 0))
+ return;
}
usb_mark_last_busy(acm->dev);
usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
NULL, acm->writesize, acm_write_bulk, snd);
snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ if (quirks & SEND_ZERO_PACKET)
+ snd->urb->transfer_flags |= URB_ZERO_PACKET;
snd->instance = acm;
}
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_CDMA) },
+ { USB_DEVICE(0x1519, 0x0452), /* Intel 7260 modem */
+ .driver_info = SEND_ZERO_PACKET,
+ },
+
{ }
};
#define IGNORE_DEVICE BIT(5)
#define QUIRK_CONTROL_LINE_STATE BIT(6)
#define CLEAR_HALT_CONDITIONS BIT(7)
+#define SEND_ZERO_PACKET BIT(8)
}
bos = udev->bos;
- udev->bos = NULL;
for (i = 0; i < SET_CONFIG_TRIES; ++i) {
usb_set_usb2_hardware_lpm(udev, 1);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
- usb_release_bos_descriptor(udev);
- udev->bos = bos;
+ /* release the new BOS descriptor allocated by hub_port_init() */
+ if (udev->bos != bos) {
+ usb_release_bos_descriptor(udev);
+ udev->bos = bos;
+ }
return 0;
re_enumerate:
#include "xhci.h"
#include "xhci-trace.h"
-#define PORT2_SSIC_CONFIG_REG2 0x883c
+#define SSIC_PORT_NUM 2
+#define SSIC_PORT_CFG2 0x880c
+#define SSIC_PORT_CFG2_OFFSET 0x30
#define PROG_DONE (1 << 30)
#define SSIC_PORT_UNUSED (1 << 31)
#define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
#define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
+#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
static const char hcd_name[] = "xhci_hcd";
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
u32 val;
void __iomem *reg;
+ int i;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
- reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
-
- /* Notify SSIC that SSIC profile programming is not done */
- val = readl(reg) & ~PROG_DONE;
- writel(val, reg);
-
- /* Mark SSIC port as unused(suspend) or used(resume) */
- val = readl(reg);
- if (suspend)
- val |= SSIC_PORT_UNUSED;
- else
- val &= ~SSIC_PORT_UNUSED;
- writel(val, reg);
-
- /* Notify SSIC that SSIC profile programming is done */
- val = readl(reg) | PROG_DONE;
- writel(val, reg);
- readl(reg);
+ for (i = 0; i < SSIC_PORT_NUM; i++) {
+ reg = (void __iomem *) xhci->cap_regs +
+ SSIC_PORT_CFG2 +
+ i * SSIC_PORT_CFG2_OFFSET;
+
+ /*
+ * Notify SSIC that SSIC profile programming
+ * is not done.
+ */
+ val = readl(reg) & ~PROG_DONE;
+ writel(val, reg);
+
+ /* Mark SSIC port as unused(suspend) or used(resume) */
+ val = readl(reg);
+ if (suspend)
+ val |= SSIC_PORT_UNUSED;
+ else
+ val &= ~SSIC_PORT_UNUSED;
+ writel(val, reg);
+
+ /* Notify SSIC that SSIC profile programming is done */
+ val = readl(reg) | PROG_DONE;
+ writel(val, reg);
+ readl(reg);
+ }
}
reg = (void __iomem *) xhci->cap_regs + 0x80a4;
}
/* Fast path - was this the last TRB in the TD for this URB? */
} else if (event_trb == td->last_trb) {
- if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
- return finish_td(xhci, td, event_trb, event, ep,
- status, false);
-
if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
td->urb->actual_length =
td->urb->transfer_buffer_length -
td->urb->actual_length +=
TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-
- if (trb_comp_code == COMP_SHORT_TX) {
- xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
- td->urb_length_set = true;
- return 0;
- }
}
return finish_td(xhci, td, event_trb, event, ep, status, false);
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"HW died, freeing TD.");
urb_priv = urb->hcpriv;
- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+ for (i = urb_priv->td_cnt;
+ i < urb_priv->length && xhci->devs[urb->dev->slot_id];
+ i++) {
td = urb_priv->td[i];
if (!list_empty(&td->td_list))
list_del_init(&td->td_list);
&motg->id.nb);
if (ret < 0) {
dev_err(&pdev->dev, "register ID notifier failed\n");
+ extcon_unregister_notifier(motg->vbus.extcon,
+ EXTCON_USB, &motg->vbus.nb);
return ret;
}
if (!motg)
return -ENOMEM;
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- if (!np)
- return -ENXIO;
- ret = msm_otg_read_dt(pdev, motg);
- if (ret)
- return ret;
- }
-
motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
GFP_KERNEL);
if (!motg->phy.otg)
if (!motg->regs)
return -ENOMEM;
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ if (!np)
+ return -ENXIO;
+ ret = msm_otg_read_dt(pdev, motg);
+ if (ret)
+ return ret;
+ }
+
/*
* NOTE: The PHYs can be multiplexed between the chipidea controller
* and the dwc3 controller, using a single bit. It is important that
*/
if (motg->phy_number) {
phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4);
- if (!phy_select)
- return -ENOMEM;
+ if (!phy_select) {
+ ret = -ENOMEM;
+ goto unregister_extcon;
+ }
/* Enable second PHY with the OTG port */
writel(0x1, phy_select);
}
motg->irq = platform_get_irq(pdev, 0);
if (motg->irq < 0) {
dev_err(&pdev->dev, "platform_get_irq failed\n");
- return motg->irq;
+ ret = motg->irq;
+ goto unregister_extcon;
}
regs[0].supply = "vddcx";
ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
if (ret)
- return ret;
+ goto unregister_extcon;
motg->vddcx = regs[0].consumer;
motg->v3p3 = regs[1].consumer;
clk_disable_unprepare(motg->clk);
if (!IS_ERR(motg->core_clk))
clk_disable_unprepare(motg->core_clk);
+unregister_extcon:
+ extcon_unregister_notifier(motg->id.extcon,
+ EXTCON_USB_HOST, &motg->id.nb);
+ extcon_unregister_notifier(motg->vbus.extcon,
+ EXTCON_USB, &motg->vbus.nb);
+
return ret;
}
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
{ USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
+ { USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
{ USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
{ USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
+ { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
/* Papouch devices based on FTDI chip */
*/
#define RATOC_VENDOR_ID 0x0584
#define RATOC_PRODUCT_ID_USB60F 0xb020
+#define RATOC_PRODUCT_ID_SCU18 0xb03a
/*
* Infineon Technologies
#define TELIT_PRODUCT_CC864_SINGLE 0x1006
#define TELIT_PRODUCT_DE910_DUAL 0x1010
#define TELIT_PRODUCT_UE910_V2 0x1012
+#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
+#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
.reserved = BIT(1) | BIT(5),
};
+static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
+ .sendsetup = BIT(2),
+ .reserved = BIT(0) | BIT(1) | BIT(3),
+};
+
+static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1) | BIT(2) | BIT(3),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
(serial->num_interrupt_in == 0))
return 0;
+ if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
+ dev_err(&serial->interface->dev, "missing endpoints\n");
+ return -ENODEV;
+ }
+
/*
* It appears that Treos and Kyoceras want to use the
* 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
*/
/* some sanity check */
- if (serial->num_ports < 2)
- return -1;
+ if (serial->num_bulk_out < 2) {
+ dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
+ return -ENODEV;
+ }
/* port 0 now uses the modified endpoint Address */
port = serial->port[0];
read_extent_buffer(eb, dest + bytes_left,
name_off, name_len);
if (eb != eb_in) {
- btrfs_tree_read_unlock_blocking(eb);
+ if (!path->skip_locking)
+ btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
}
ret = btrfs_find_item(fs_root, path, parent, 0,
eb = path->nodes[0];
/* make sure we can use eb after releasing the path */
if (eb != eb_in) {
- atomic_inc(&eb->refs);
- btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ if (!path->skip_locking)
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ path->nodes[0] = NULL;
+ path->locks[0] = 0;
}
btrfs_release_path(path);
iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
*
*/
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- struct list_head *ins_list)
+ struct list_head *ins_list, bool *emitted)
{
struct btrfs_dir_item *di;
struct btrfs_delayed_item *curr, *next;
if (over)
return 1;
+ *emitted = true;
}
return 0;
}
int btrfs_should_delete_dir_index(struct list_head *del_list,
u64 index);
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- struct list_head *ins_list);
+ struct list_head *ins_list, bool *emitted);
/* for init */
int __init btrfs_delayed_inode_init(void);
int again;
struct btrfs_trans_handle *trans;
- set_freezable();
do {
again = 0;
char *name_ptr;
int name_len;
int is_curr = 0; /* ctx->pos points to the current index? */
+ bool emitted;
/* FIXME, use a real flag for deciding about the key type */
if (root->fs_info->tree_root == root)
if (ret < 0)
goto err;
+ emitted = false;
while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
if (over)
goto nopos;
+ emitted = true;
di_len = btrfs_dir_name_len(leaf, di) +
btrfs_dir_data_len(leaf, di) + sizeof(*di);
di_cur += di_len;
if (key_type == BTRFS_DIR_INDEX_KEY) {
if (is_curr)
ctx->pos++;
- ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
+ ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
if (ret)
goto nopos;
}
+ /*
+ * If we haven't emitted any dir entry, we must not touch ctx->pos as
+ * it was was set to the termination value in previous call. We assume
+ * that "." and ".." were emitted if we reach this point and set the
+ * termination value as well for an empty directory.
+ */
+ if (ctx->pos > 2 && !emitted)
+ goto nopos;
+
/* Reached end of directory/root. Bump pos past the last item. */
ctx->pos++;
kfree(dip);
+ dio_bio->bi_error = bio->bi_error;
dio_end_io(dio_bio, bio->bi_error);
if (io_bio->end_io)
kfree(dip);
+ dio_bio->bi_error = bio->bi_error;
dio_end_io(dio_bio, bio->bi_error);
bio_put(bio);
}
static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
{
struct page *page;
- struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
page = grab_cache_page(inode->i_mapping, index);
if (!page)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (!PageUptodate(page)) {
- if (extent_read_full_page_nolock(tree, page, btrfs_get_extent,
- 0))
- return NULL;
+ int ret;
+
+ ret = btrfs_readpage(NULL, page);
+ if (ret)
+ return ERR_PTR(ret);
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
page_cache_release(page);
- return NULL;
+ return ERR_PTR(-EIO);
+ }
+ if (page->mapping != inode->i_mapping) {
+ unlock_page(page);
+ page_cache_release(page);
+ return ERR_PTR(-EAGAIN);
}
}
- unlock_page(page);
return page;
}
pgoff_t index = off >> PAGE_CACHE_SHIFT;
for (i = 0; i < num_pages; i++) {
+again:
pages[i] = extent_same_get_page(inode, index + i);
- if (!pages[i])
- return -ENOMEM;
+ if (IS_ERR(pages[i])) {
+ int err = PTR_ERR(pages[i]);
+
+ if (err == -EAGAIN)
+ goto again;
+ pages[i] = NULL;
+ return err;
+ }
}
return 0;
}
-static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
+static int lock_extent_range(struct inode *inode, u64 off, u64 len,
+ bool retry_range_locking)
{
- /* do any pending delalloc/csum calc on src, one way or
- another, and lock file content */
+ /*
+ * Do any pending delalloc/csum calculations on inode, one way or
+ * another, and lock file content.
+ * The locking order is:
+ *
+ * 1) pages
+ * 2) range in the inode's io tree
+ */
while (1) {
struct btrfs_ordered_extent *ordered;
lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
if (ordered)
btrfs_put_ordered_extent(ordered);
+ if (!retry_range_locking)
+ return -EAGAIN;
btrfs_wait_ordered_range(inode, off, len);
}
+ return 0;
}
static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
}
-static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
- struct inode *inode2, u64 loff2, u64 len)
+static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
+ struct inode *inode2, u64 loff2, u64 len,
+ bool retry_range_locking)
{
+ int ret;
+
if (inode1 < inode2) {
swap(inode1, inode2);
swap(loff1, loff2);
}
- lock_extent_range(inode1, loff1, len);
- lock_extent_range(inode2, loff2, len);
+ ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
+ if (ret)
+ return ret;
+ ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
+ if (ret)
+ unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
+ loff1 + len - 1);
+ return ret;
}
struct cmp_pages {
for (i = 0; i < cmp->num_pages; i++) {
pg = cmp->src_pages[i];
- if (pg)
+ if (pg) {
+ unlock_page(pg);
page_cache_release(pg);
+ }
pg = cmp->dst_pages[i];
- if (pg)
+ if (pg) {
+ unlock_page(pg);
page_cache_release(pg);
+ }
}
kfree(cmp->src_pages);
kfree(cmp->dst_pages);
src_page = cmp->src_pages[i];
dst_page = cmp->dst_pages[i];
+ ASSERT(PageLocked(src_page));
+ ASSERT(PageLocked(dst_page));
addr = kmap_atomic(src_page);
dst_addr = kmap_atomic(dst_page);
goto out_unlock;
}
+again:
ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
if (ret)
goto out_unlock;
if (same_inode)
- lock_extent_range(src, same_lock_start, same_lock_len);
+ ret = lock_extent_range(src, same_lock_start, same_lock_len,
+ false);
else
- btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
+ ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
+ false);
+ /*
+ * If one of the inodes has dirty pages in the respective range or
+ * ordered extents, we need to flush dellaloc and wait for all ordered
+ * extents in the range. We must unlock the pages and the ranges in the
+ * io trees to avoid deadlocks when flushing delalloc (requires locking
+ * pages) and when waiting for ordered extents to complete (they require
+ * range locking).
+ */
+ if (ret == -EAGAIN) {
+ /*
+ * Ranges in the io trees already unlocked. Now unlock all
+ * pages before waiting for all IO to complete.
+ */
+ btrfs_cmp_data_free(&cmp);
+ if (same_inode) {
+ btrfs_wait_ordered_range(src, same_lock_start,
+ same_lock_len);
+ } else {
+ btrfs_wait_ordered_range(src, loff, len);
+ btrfs_wait_ordered_range(dst, dst_loff, len);
+ }
+ goto again;
+ }
+ ASSERT(ret == 0);
+ if (WARN_ON(ret)) {
+ /* ranges in the io trees already unlocked */
+ btrfs_cmp_data_free(&cmp);
+ return ret;
+ }
/* pass original length for comparison so we stay within i_size */
ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
u64 lock_start = min_t(u64, off, destoff);
u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
- lock_extent_range(src, lock_start, lock_len);
+ ret = lock_extent_range(src, lock_start, lock_len, true);
} else {
- btrfs_double_extent_lock(src, off, inode, destoff, len);
+ ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
+ true);
+ }
+ ASSERT(ret == 0);
+ if (WARN_ON(ret)) {
+ /* ranges in the io trees already unlocked */
+ goto out_unlock;
}
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
int ret;
int slot;
struct extent_buffer *l;
+ u64 min_search_start;
+
+ /*
+ * We don't want to overwrite the superblock on the drive nor any area
+ * used by the boot loader (grub for example), so we make sure to start
+ * at an offset of at least 1MB.
+ */
+ min_search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
+ search_start = max(search_start, min_search_start);
path = btrfs_alloc_path();
if (!path)
struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *len)
{
- struct btrfs_root *root = device->dev_root;
- u64 search_start;
-
/* FIXME use last free of some kind */
-
- /*
- * we don't want to overwrite the superblock on the drive,
- * so we make sure to start at an offset of at least 1MB
- */
- search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
return find_free_dev_extent_start(trans->transaction, device,
- num_bytes, search_start, start, len);
+ num_bytes, 0, start, len);
}
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
goto out_short_read;
num_stripes = btrfs_chunk_num_stripes(sb, chunk);
+ if (!num_stripes) {
+ printk(KERN_ERR
+ "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
+ num_stripes, cur_offset);
+ ret = -EIO;
+ break;
+ }
+
len = btrfs_chunk_item_size(num_stripes);
if (cur_offset + len > array_size)
goto out_short_read;
vaf.fmt = fmt;
vaf.va = &args;
- pr_err("CIFS VFS: %pV", &vaf);
+ pr_err_ratelimited("CIFS VFS: %pV", &vaf);
va_end(args);
}
/* information message: e.g., configuration, major event */
#define cifs_dbg(type, fmt, ...) \
do { \
- if (type == FYI) { \
- if (cifsFYI & CIFS_INFO) { \
- pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__); \
- } \
+ if (type == FYI && cifsFYI & CIFS_INFO) { \
+ pr_debug_ratelimited("%s: " \
+ fmt, __FILE__, ##__VA_ARGS__); \
} else if (type == VFS) { \
cifs_vfs_err(fmt, ##__VA_ARGS__); \
} else if (type == NOISY && type != 0) { \
- pr_debug(fmt, ##__VA_ARGS__); \
+ pr_debug_ratelimited(fmt, ##__VA_ARGS__); \
} \
} while (0)
ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
if (!ses->auth_key.response) {
- rc = ENOMEM;
+ rc = -ENOMEM;
ses->auth_key.len = 0;
goto setup_ntlmv2_rsp_ret;
}
server->session_key.response = NULL;
server->session_key.len = 0;
server->lstrp = jiffies;
- mutex_unlock(&server->srv_mutex);
/* mark submitted MIDs for retry and issue callback */
INIT_LIST_HEAD(&retry_list);
list_move(&mid_entry->qhead, &retry_list);
}
spin_unlock(&GlobalMid_Lock);
+ mutex_unlock(&server->srv_mutex);
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
list_for_each_safe(tmp, tmp2, &retry_list) {
* if buggy server returns . and .. late do we want to
* check for that here?
*/
+ *tmp_buf = 0;
rc = cifs_filldir(current_entry, file, ctx,
tmp_buf, max_len);
if (rc) {
cifs_in_send_dec(server);
cifs_save_when_sent(mid);
- if (rc < 0)
+ if (rc < 0) {
server->sequence_number -= 2;
+ cifs_delete_mid(mid);
+ }
+
mutex_unlock(&server->srv_mutex);
if (rc == 0)
return 0;
- cifs_delete_mid(mid);
add_credits_and_wake_if(server, credits, optype);
return rc;
}
mutex_unlock(&allocated_ptys_lock);
}
+/*
+ * pty code needs to hold extra references in case of last /dev/tty close
+ */
+
+void devpts_add_ref(struct inode *ptmx_inode)
+{
+ struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+ atomic_inc(&sb->s_active);
+ ihold(ptmx_inode);
+}
+
+void devpts_del_ref(struct inode *ptmx_inode)
+{
+ struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+ iput(ptmx_inode);
+ deactivate_super(sb);
+}
+
/**
* devpts_pty_new -- create a new inode in /dev/pts/
* @ptmx_inode: inode of the master
/* If checksum is bad mark all blocks used to prevent allocation
* essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
- ext4_error(sb, "Checksum bad for group %u", block_group);
grp = ext4_get_group_info(sb, block_group);
if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
percpu_counter_sub(&sbi->s_freeclusters_counter,
}
ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
-
err = ext4_init_block_bitmap(sb, bh, block_group, desc);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
- if (err)
+ if (err) {
+ ext4_error(sb, "Failed to init block bitmap for group "
+ "%u: %d", block_group, err);
goto out;
+ }
goto verify;
}
ext4_unlock_group(sb, block_group);
res = -ENOKEY;
goto out;
}
+ down_read(&keyring_key->sem);
ukp = user_key_payload(keyring_key);
if (ukp->datalen != sizeof(struct ext4_encryption_key)) {
res = -EINVAL;
+ up_read(&keyring_key->sem);
goto out;
}
master_key = (struct ext4_encryption_key *)ukp->data;
"ext4: key size incorrect: %d\n",
master_key->size);
res = -ENOKEY;
+ up_read(&keyring_key->sem);
goto out;
}
res = ext4_derive_key_aes(ctx.nonce, master_key->raw,
raw_key);
+ up_read(&keyring_key->sem);
if (res)
goto out;
got_key:
/* If checksum is bad mark all blocks and inodes use to prevent
* allocation, essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
- ext4_error(sb, "Checksum bad for group %u", block_group);
grp = ext4_get_group_info(sb, block_group);
if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
percpu_counter_sub(&sbi->s_freeclusters_counter,
set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
- if (err)
+ if (err) {
+ ext4_error(sb, "Failed to init inode bitmap for group "
+ "%u: %d", block_group, err);
goto out;
+ }
return bh;
}
ext4_unlock_group(sb, block_group);
ext4_lblk_t orig_blk_offset, donor_blk_offset;
unsigned long blocksize = orig_inode->i_sb->s_blocksize;
unsigned int tmp_data_size, data_size, replaced_size;
- int err2, jblocks, retries = 0;
+ int i, err2, jblocks, retries = 0;
int replaced_count = 0;
int from = data_offset_in_page << orig_inode->i_blkbits;
int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
struct super_block *sb = orig_inode->i_sb;
+ struct buffer_head *bh = NULL;
/*
* It needs twice the amount of ordinary journal buffers because
}
/* Perform all necessary steps similar write_begin()/write_end()
* but keeping in mind that i_size will not change */
- *err = __block_write_begin(pagep[0], from, replaced_size,
- ext4_get_block);
+ if (!page_has_buffers(pagep[0]))
+ create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
+ bh = page_buffers(pagep[0]);
+ for (i = 0; i < data_offset_in_page; i++)
+ bh = bh->b_this_page;
+ for (i = 0; i < block_len_in_page; i++) {
+ *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
+ if (*err < 0)
+ break;
+ }
if (!*err)
*err = block_commit_write(pagep[0], from, from + replaced_size);
if (flex_gd == NULL)
goto out3;
- if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
+ if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
goto out2;
flex_gd->count = flexbg_size;
*/
vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
unsigned long v_offset;
+ unsigned long v_end;
/*
* Can the expression below overflow on 32-bit arches?
else
v_offset = 0;
- if (end) {
- end = ((end - start) << PAGE_SHIFT) +
- vma->vm_start + v_offset;
- if (end > vma->vm_end)
- end = vma->vm_end;
- } else
- end = vma->vm_end;
+ if (!end)
+ v_end = vma->vm_end;
+ else {
+ v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
+ + vma->vm_start;
+ if (v_end > vma->vm_end)
+ v_end = vma->vm_end;
+ }
- unmap_hugepage_range(vma, vma->vm_start + v_offset, end, NULL);
+ unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
+ NULL);
}
}
return false;
for (i = 0; i < m1->fh_versions_cnt; i++) {
bool found_fh = false;
- for (j = 0; j < m2->fh_versions_cnt; i++) {
+ for (j = 0; j < m2->fh_versions_cnt; j++) {
if (nfs_compare_fh(&m1->fh_versions[i],
&m2->fh_versions[j]) == 0) {
found_fh = true;
start = xdr_reserve_space(xdr, 4);
BUG_ON(!start);
- if (ff_layout_encode_ioerr(flo, xdr, args))
- goto out;
-
+ ff_layout_encode_ioerr(flo, xdr, args);
ff_layout_encode_iostats(flo, xdr, args);
-out:
+
*start = cpu_to_be32((xdr->p - start - 1) * 4);
dprintk("%s: Return\n", __func__);
}
unsigned long invalid = 0;
unsigned long now = jiffies;
unsigned long save_cache_validity;
+ bool cache_revalidated = true;
dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
__func__, inode->i_sb->s_id, inode->i_ino,
nfs_force_lookup_revalidate(inode);
inode->i_version = fattr->change_attr;
}
- } else
+ } else {
nfsi->cache_validity |= save_cache_validity;
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
- } else if (server->caps & NFS_CAP_MTIME)
+ } else if (server->caps & NFS_CAP_MTIME) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
- } else if (server->caps & NFS_CAP_CTIME)
+ } else if (server->caps & NFS_CAP_CTIME) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
/* Check if our cached file size is stale */
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
(long long)cur_isize,
(long long)new_isize);
}
- } else
+ } else {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_PAGECACHE
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
- else if (server->caps & NFS_CAP_ATIME)
+ else if (server->caps & NFS_CAP_ATIME) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATIME
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_MODE) {
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
inode->i_mode = newmode;
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
}
- } else if (server->caps & NFS_CAP_MODE)
+ } else if (server->caps & NFS_CAP_MODE) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
if (!uid_eq(inode->i_uid, fattr->uid)) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_uid = fattr->uid;
}
- } else if (server->caps & NFS_CAP_OWNER)
+ } else if (server->caps & NFS_CAP_OWNER) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
if (!gid_eq(inode->i_gid, fattr->gid)) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_gid = fattr->gid;
}
- } else if (server->caps & NFS_CAP_OWNER_GROUP)
+ } else if (server->caps & NFS_CAP_OWNER_GROUP) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
if (inode->i_nlink != fattr->nlink) {
invalid |= NFS_INO_INVALID_DATA;
set_nlink(inode, fattr->nlink);
}
- } else if (server->caps & NFS_CAP_NLINK)
+ } else if (server->caps & NFS_CAP_NLINK) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_FORCED);
+ cache_revalidated = false;
+ }
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
/*
* report the blocks in 512byte units
*/
inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
- }
- if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
+ } else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
+ else
+ cache_revalidated = false;
/* Update attrtimeo value if we're out of the unstable period */
if (invalid & NFS_INO_INVALID_ATTR) {
/* Set barrier to be more recent than all outstanding updates */
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
} else {
- if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
- if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode))
- nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
+ if (cache_revalidated) {
+ if (!time_in_range_open(now, nfsi->attrtimeo_timestamp,
+ nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
+ nfsi->attrtimeo <<= 1;
+ if (nfsi->attrtimeo > NFS_MAXATTRTIMEO(inode))
+ nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
+ }
nfsi->attrtimeo_timestamp = now;
}
/* Set the barrier to be more recent than this fattr */
}
/* Don't declare attrcache up to date if there were no attrs! */
- if (fattr->valid != 0)
+ if (cache_revalidated)
invalid &= ~NFS_INO_INVALID_ATTR;
/* Don't invalidate the data if we were to blame */
* Protect the call to nfs4_state_set_mode_locked and
* serialise the stateid update
*/
+ spin_lock(&state->owner->so_lock);
write_seqlock(&state->seqlock);
if (deleg_stateid != NULL) {
nfs4_stateid_copy(&state->stateid, deleg_stateid);
if (open_stateid != NULL)
nfs_set_open_stateid_locked(state, open_stateid, fmode);
write_sequnlock(&state->seqlock);
- spin_lock(&state->owner->so_lock);
update_open_stateflags(state, fmode);
spin_unlock(&state->owner->so_lock);
}
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
pnfs_clear_layoutreturn_waitbit(lo);
- lo->plh_block_lgets--;
spin_unlock(&lo->plh_inode->i_lock);
pnfs_free_lseg_list(&freeme);
pnfs_put_layout_hdr(lrp->args.layout);
spin_lock(&dlm->master_lock);
ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
namelen, target, dlm->node_num);
+ /* get an extra reference on the mle.
+ * otherwise the assert_master from the new
+ * master will destroy this.
+ */
+ dlm_get_mle_inuse(mle);
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
if (mle_added) {
dlm_mle_detach_hb_events(dlm, mle);
dlm_put_mle(mle);
+ dlm_put_mle_inuse(mle);
} else if (mle) {
kmem_cache_free(dlm_mle_cache, mle);
mle = NULL;
* ensure that all assert_master work is flushed. */
flush_workqueue(dlm->dlm_worker);
- /* get an extra reference on the mle.
- * otherwise the assert_master from the new
- * master will destroy this.
- * also, make sure that all callers of dlm_get_mle
- * take both dlm->spinlock and dlm->master_lock */
- spin_lock(&dlm->spinlock);
- spin_lock(&dlm->master_lock);
- dlm_get_mle_inuse(mle);
- spin_unlock(&dlm->master_lock);
- spin_unlock(&dlm->spinlock);
-
/* notify new node and send all lock state */
/* call send_one_lockres with migration flag.
* this serves as notice to the target node that a
mle->new_master != dead_node)
continue;
+ if (mle->new_master == dead_node && mle->inuse) {
+ mlog(ML_NOTICE, "%s: target %u died during "
+ "migration from %u, the MLE is "
+ "still keep used, ignore it!\n",
+ dlm->name, dead_node,
+ mle->master);
+ continue;
+ }
+
/* If we have reached this point, this mle needs to be
* removed from the list and freed. */
dlm_clean_migration_mle(dlm, mle);
break;
}
}
+ dlm_lockres_clear_refmap_bit(dlm, res,
+ dead_node);
spin_unlock(&res->spinlock);
continue;
}
unsigned int gen;
int noqueue_attempted = 0;
int dlm_locked = 0;
+ int kick_dc = 0;
if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
mlog_errno(-EINVAL);
unlock:
lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+ /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
+ kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
+
spin_unlock_irqrestore(&lockres->l_lock, flags);
+ if (kick_dc)
+ ocfs2_wake_downconvert_thread(osb);
out:
/*
* This is helping work around a lock inversion between the page lock
int ovl_copy_xattr(struct dentry *old, struct dentry *new)
{
- ssize_t list_size, size;
- char *buf, *name, *value;
- int error;
+ ssize_t list_size, size, value_size = 0;
+ char *buf, *name, *value = NULL;
+ int uninitialized_var(error);
if (!old->d_inode->i_op->getxattr ||
!new->d_inode->i_op->getxattr)
if (!buf)
return -ENOMEM;
- error = -ENOMEM;
- value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
- if (!value)
- goto out;
-
list_size = vfs_listxattr(old, buf, list_size);
if (list_size <= 0) {
error = list_size;
- goto out_free_value;
+ goto out;
}
for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
- size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
- if (size <= 0) {
+retry:
+ size = vfs_getxattr(old, name, value, value_size);
+ if (size == -ERANGE)
+ size = vfs_getxattr(old, name, NULL, 0);
+
+ if (size < 0) {
error = size;
- goto out_free_value;
+ break;
+ }
+
+ if (size > value_size) {
+ void *new;
+
+ new = krealloc(value, size, GFP_KERNEL);
+ if (!new) {
+ error = -ENOMEM;
+ break;
+ }
+ value = new;
+ value_size = size;
+ goto retry;
}
+
error = vfs_setxattr(new, name, value, size, 0);
if (error)
- goto out_free_value;
+ break;
}
-
-out_free_value:
kfree(value);
out:
kfree(buf);
int err;
struct dentry *upperdentry;
+ /*
+ * Check for permissions before trying to copy-up. This is redundant
+ * since it will be rechecked later by ->setattr() on upper dentry. But
+ * without this, copy-up can be triggered by just about anybody.
+ *
+ * We don't initialize inode->size, which just means that
+ * inode_newsize_ok() will always check against MAX_LFS_FILESIZE and not
+ * check for a swapfile (which this won't be anyway).
+ */
+ err = inode_change_ok(dentry->d_inode, attr);
+ if (err)
+ return err;
+
err = ovl_want_write(dentry);
if (err)
goto out;
(int) PTR_ERR(dentry));
continue;
}
- ovl_cleanup(upper->d_inode, dentry);
+ if (dentry->d_inode)
+ ovl_cleanup(upper->d_inode, dentry);
dput(dentry);
}
mutex_unlock(&upper->d_inode->i_mutex);
#include <linux/fs.h>
#include <linux/namei.h>
+#include <linux/pagemap.h>
#include <linux/xattr.h>
#include <linux/security.h>
#include <linux/mount.h>
}
sb->s_stack_depth = 0;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
if (ufs->config.upperdir) {
if (!ufs->config.workdir) {
pr_err("overlayfs: missing 'workdir'\n");
root_dentry->d_fsdata = oe;
+ ovl_copyattr(ovl_dentry_real(root_dentry)->d_inode,
+ root_dentry->d_inode);
+
sb->s_magic = OVERLAYFS_SUPER_MAGIC;
sb->s_op = &ovl_super_operations;
sb->s_root = root_dentry;
state = *get_task_state(task);
vsize = eip = esp = 0;
- permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
+ permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
mm = get_task_mm(task);
if (mm) {
vsize = task_vsize(mm);
static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
- struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
+ struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
if (mm && !IS_ERR(mm)) {
unsigned int nwords = 0;
do {
wchan = get_wchan(task);
- if (wchan && ptrace_may_access(task, PTRACE_MODE_READ) && !lookup_symbol_name(wchan, symname))
+ if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)
+ && !lookup_symbol_name(wchan, symname))
seq_printf(m, "%s", symname);
else
seq_putc(m, '0');
int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
if (err)
return err;
- if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
+ if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
mutex_unlock(&task->signal->cred_guard_mutex);
return -EPERM;
}
*/
task = get_proc_task(inode);
if (task) {
- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
+ allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
put_task_struct(task);
}
return allowed;
return true;
if (in_group_p(pid->pid_gid))
return true;
- return ptrace_may_access(task, PTRACE_MODE_READ);
+ return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
}
struct mm_struct *mm = ERR_PTR(-ESRCH);
if (task) {
- mm = mm_access(task, mode);
+ mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
put_task_struct(task);
if (!IS_ERR_OR_NULL(mm)) {
if (!task)
goto out_notask;
- mm = mm_access(task, PTRACE_MODE_READ);
+ mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
if (IS_ERR_OR_NULL(mm))
goto out;
goto out;
result = -EACCES;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
goto out_put_task;
result = -ENOENT;
goto out;
ret = -EACCES;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
goto out_put_task;
ret = 0;
if (result)
return result;
- if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
result = -EACCES;
goto out_unlock;
}
if (!task)
return error;
- if (ptrace_may_access(task, PTRACE_MODE_READ)) {
+ if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
error = ns_get_path(&ns_path, task, ns_ops);
if (!error)
nd_jump_link(&ns_path);
if (!task)
return res;
- if (ptrace_may_access(task, PTRACE_MODE_READ)) {
+ if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
res = ns_get_name(name, sizeof(name), task, ns_ops);
if (res >= 0)
res = readlink_copy(buffer, buflen, name);
static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
+ pte_t huge_pte = huge_ptep_get(pte);
struct numa_maps *md;
struct page *page;
- if (!pte_present(*pte))
+ if (!pte_present(huge_pte))
return 0;
- page = pte_page(*pte);
+ page = pte_page(huge_pte);
if (!page)
return 0;
md = walk->private;
- gather_stats(page, md, pte_dirty(*pte), 1);
+ gather_stats(page, md, pte_dirty(huge_pte), 1);
return 0;
}
if (isalarm(ctx))
remaining = alarm_expires_remaining(&ctx->t.alarm);
else
- remaining = hrtimer_expires_remaining(&ctx->t.tmr);
+ remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
}
epos->offset += adsize;
}
+/*
+ * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
+ * someone does some weird stuff.
+ */
+#define UDF_MAX_INDIR_EXTS 16
+
int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
{
int8_t etype;
+ unsigned int indirections = 0;
while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
(EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
int block;
+
+ if (++indirections > UDF_MAX_INDIR_EXTS) {
+ udf_err(inode->i_sb,
+ "too many indirect extents in inode %lu\n",
+ inode->i_ino);
+ return -1;
+ }
+
epos->block = *eloc;
epos->offset = sizeof(struct allocExtDesc);
brelse(epos->bh);
if (c < 0x80U)
utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
else if (c < 0x800U) {
+ if (utf_o->u_len > (UDF_NAME_LEN - 4))
+ break;
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0xc0 | (c >> 6));
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0x80 | (c & 0x3f));
} else {
+ if (utf_o->u_len > (UDF_NAME_LEN - 5))
+ break;
utf_o->u_name[utf_o->u_len++] =
(uint8_t)(0xe0 | (c >> 12));
utf_o->u_name[utf_o->u_len++] =
static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
{
unsigned c, i, max_val, utf_char;
- int utf_cnt, u_len;
+ int utf_cnt, u_len, u_ch;
memset(ocu, 0, sizeof(dstring) * length);
ocu[0] = 8;
max_val = 0xffU;
+ u_ch = 1;
try_again:
u_len = 0U;
utf_char = 0U;
utf_cnt = 0U;
for (i = 0U; i < utf->u_len; i++) {
+ /* Name didn't fit? */
+ if (u_len + 1 + u_ch >= length)
+ return 0;
+
c = (uint8_t)utf->u_name[i];
/* Complete a multi-byte UTF-8 character */
if (max_val == 0xffU) {
max_val = 0xffffU;
ocu[0] = (uint8_t)0x10U;
+ u_ch = 2;
goto try_again;
}
goto error_out;
c = (c << 8) | ocu[i++];
len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
- UDF_NAME_LEN - utf_o->u_len);
+ UDF_NAME_LEN - 2 - utf_o->u_len);
/* Valid character? */
if (len >= 0)
utf_o->u_len += len;
int len;
unsigned i, max_val;
uint16_t uni_char;
- int u_len;
+ int u_len, u_ch;
memset(ocu, 0, sizeof(dstring) * length);
ocu[0] = 8;
max_val = 0xffU;
+ u_ch = 1;
try_again:
u_len = 0U;
for (i = 0U; i < uni->u_len; i++) {
+ /* Name didn't fit? */
+ if (u_len + 1 + u_ch >= length)
+ return 0;
len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
if (!len)
continue;
if (uni_char > max_val) {
max_val = 0xffffU;
ocu[0] = (uint8_t)0x10U;
+ u_ch = 2;
goto try_again;
}
__be64 agfl_lsn;
__be32 agfl_crc;
__be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
-} xfs_agfl_t;
+} __attribute__((packed)) xfs_agfl_t;
#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
* has not had the inode cores stamped into it. Hence for readahead, the buffer
* may be potentially invalid.
*
- * If the readahead buffer is invalid, we don't want to mark it with an error,
- * but we do want to clear the DONE status of the buffer so that a followup read
- * will re-read it from disk. This will ensure that we don't get an unnecessary
- * warnings during log recovery and we don't get unnecssary panics on debug
- * kernels.
+ * If the readahead buffer is invalid, we need to mark it with an error and
+ * clear the DONE status of the buffer so that a followup read will re-read it
+ * from disk. We don't report the error otherwise to avoid warnings during log
+ * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
+ * because all we want to do is say readahead failed; there is no-one to report
+ * the error to, so this will distinguish it from a non-ra verifier failure.
*/
static void
xfs_inode_buf_verify(
XFS_RANDOM_ITOBP_INOTOBP))) {
if (readahead) {
bp->b_flags &= ~XBF_DONE;
+ xfs_buf_ioerror(bp, -EIO);
return;
}
}
}
+ /*
+ * Clear b_error if this is a lookup from a caller that doesn't expect
+ * valid data to be found in the buffer.
+ */
+ if (!(flags & XBF_READ))
+ xfs_buf_ioerror(bp, 0);
+
XFS_STATS_INC(target->bt_mount, xb_get);
trace_xfs_buf_get(bp, flags, _RET_IP_);
return bp;
LIST_HEAD(dispose);
int loop = 0;
+ /*
+ * We need to flush the buffer workqueue to ensure that all IO
+ * completion processing is 100% done. Just waiting on buffer locks is
+ * not sufficient for async IO as the reference count held over IO is
+ * not released until after the buffer lock is dropped. Hence we need to
+ * ensure here that all reference counts have been dropped before we
+ * start walking the LRU list.
+ */
+ drain_workqueue(btp->bt_mount->m_buf_workqueue);
+
/* loop until there is nothing left on the lru list. */
while (list_lru_count(&btp->bt_lru)) {
list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
long tout = 0; /* milliseconds */
current->flags |= PF_MEMALLOC;
- set_freezable();
while (!kthread_should_stop()) {
if (tout && tout <= 20)
unsigned int keylen);
unsigned int reqsize;
+ bool has_setkey;
struct crypto_tfm base;
};
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
+static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
+{
+ return tfm->has_setkey;
+}
+
/**
* crypto_ahash_finup() - update and finalize message digest
* @req: reference to the ahash_request handle that holds all information
struct sock *parent;
+ unsigned int refcnt;
+ unsigned int nokey_refcnt;
+
const struct af_alg_type *type;
void *private;
};
void (*release)(void *private);
int (*setkey)(void *private, const u8 *key, unsigned int keylen);
int (*accept)(void *private, struct sock *sk);
+ int (*accept_nokey)(void *private, struct sock *sk);
int (*setauthsize)(void *private, unsigned int authsize);
struct proto_ops *ops;
+ struct proto_ops *ops_nokey;
struct module *owner;
char name[14];
};
int af_alg_unregister_type(const struct af_alg_type *type);
int af_alg_release(struct socket *sock);
+void af_alg_release_parent(struct sock *sk);
int af_alg_accept(struct sock *sk, struct socket *newsock);
int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
return (struct alg_sock *)sk;
}
-static inline void af_alg_release_parent(struct sock *sk)
-{
- sock_put(alg_sk(sk)->parent);
-}
-
static inline void af_alg_init_completion(struct af_alg_completion *completion)
{
init_completion(&completion->completion);
unsigned int ivsize;
unsigned int reqsize;
+ bool has_setkey;
+
struct crypto_tfm base;
};
return tfm->setkey(tfm, key, keylen);
}
+static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm)
+{
+ return tfm->has_setkey;
+}
+
/**
* crypto_skcipher_reqtfm() - obtain cipher handle from request
* @req: skcipher_request out of which the cipher handle is to be obtained
*/
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
#define __trace_if(cond) \
- if (__builtin_constant_p((cond)) ? !!(cond) : \
+ if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
({ \
int ______r; \
static struct ftrace_branch_data \
extern void console_unlock(void);
extern void console_conditional_schedule(void);
extern void console_unblank(void);
+extern void console_flush_on_panic(void);
extern struct tty_driver *console_device(int *);
extern void console_stop(struct console *);
extern void console_start(struct console *);
int devpts_new_index(struct inode *ptmx_inode);
void devpts_kill_index(struct inode *ptmx_inode, int idx);
+void devpts_add_ref(struct inode *ptmx_inode);
+void devpts_del_ref(struct inode *ptmx_inode);
/* mknod in devpts */
struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
void *priv);
/* Dummy stubs in the no-pty case */
static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
+static inline void devpts_add_ref(struct inode *ptmx_inode) { }
+static inline void devpts_del_ref(struct inode *ptmx_inode) { }
static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
dev_t device, int index, void *priv)
{
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
- * @start_pid: timer statistics field to store the pid of the task which
+ * @is_rel: Set if the timer was armed relative
+ * @start_pid: timer statistics field to store the pid of the task which
* started the timer
* @start_site: timer statistics field to store the site where the timer
* was started
ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
- unsigned long state;
+ u8 state;
+ u8 is_rel;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
#endif
+static inline ktime_t
+__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
+{
+ ktime_t rem = ktime_sub(timer->node.expires, now);
+
+ /*
+ * Adjust relative timers for the extra we added in
+ * hrtimer_start_range_ns() to prevent short timeouts.
+ */
+ if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
+ rem.tv64 -= hrtimer_resolution;
+ return rem;
+}
+
+static inline ktime_t
+hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
+{
+ return __hrtimer_expires_remaining_adjusted(timer,
+ timer->base->get_time());
+}
+
extern void clock_was_set(void);
#ifdef CONFIG_TIMERFD
extern void timerfd_clock_was_set(void);
}
/* Query timers: */
-extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
+extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
+
+static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
+{
+ return __hrtimer_get_remaining(timer, false);
+}
extern u64 hrtimer_get_next_event(void);
/* low 64 bit */
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
+/* PRS_REG */
+#define DMA_PRS_PPR ((u32)1)
+
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
do { \
cycles_t start_time = get_cycles(); \
#define PTRACE_MODE_READ 0x01
#define PTRACE_MODE_ATTACH 0x02
#define PTRACE_MODE_NOAUDIT 0x04
-/* Returns true on success, false on denial. */
+#define PTRACE_MODE_FSCREDS 0x08
+#define PTRACE_MODE_REALCREDS 0x10
+
+/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
+#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
+#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
+
+/**
+ * ptrace_may_access - check whether the caller is permitted to access
+ * a target task.
+ * @task: target task
+ * @mode: selects type of access and caller credentials
+ *
+ * Returns true on success, false on denial.
+ *
+ * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
+ * be set in @mode to specify whether the access was requested through
+ * a filesystem syscall (should use effective capabilities and fsuid
+ * of the caller) or through an explicit syscall such as
+ * process_vm_writev or ptrace (and should use the real credentials).
+ */
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
static inline int ptrace_reparented(struct task_struct *child)
void **radix_tree_next_chunk(struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned flags);
+/**
+ * radix_tree_iter_retry - retry this chunk of the iteration
+ * @iter: iterator state
+ *
+ * If we iterate over a tree protected only by the RCU lock, a race
+ * against deletion or creation may result in seeing a slot for which
+ * radix_tree_deref_retry() returns true. If so, call this function
+ * and continue the iteration.
+ */
+static inline __must_check
+void **radix_tree_iter_retry(struct radix_tree_iter *iter)
+{
+ iter->next_index = iter->index;
+ return NULL;
+}
+
/**
* radix_tree_chunk_size - get current chunk size
*
* @iter: pointer to radix tree iterator
* Returns: current chunk size
*/
-static __always_inline unsigned
+static __always_inline long
radix_tree_chunk_size(struct radix_tree_iter *iter)
{
return iter->next_index - iter->index;
return slot + offset + 1;
}
} else {
- unsigned size = radix_tree_chunk_size(iter) - 1;
+ long size = radix_tree_chunk_size(iter);
- while (size--) {
+ while (--size > 0) {
slot++;
iter->index++;
if (likely(*slot))
__put_anon_vma(anon_vma);
}
-static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
-{
- struct anon_vma *anon_vma = vma->anon_vma;
- if (anon_vma)
- down_write(&anon_vma->root->rwsem);
-}
-
-static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
-{
- struct anon_vma *anon_vma = vma->anon_vma;
- if (anon_vma)
- up_write(&anon_vma->root->rwsem);
-}
-
static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
{
down_write(&anon_vma->root->rwsem);
* See the file COPYING for more details.
*/
+#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/cpumask.h>
#include <linux/rcupdate.h>
#include <linux/static_key.h>
void *it_func; \
void *__data; \
\
+ if (!cpu_online(raw_smp_processor_id())) \
+ return; \
+ \
if (!(cond)) \
return; \
prercu; \
/* tty_mutex.c */
/* functions for preparation of BKL removal */
extern void __lockfunc tty_lock(struct tty_struct *tty);
+extern int tty_lock_interruptible(struct tty_struct *tty);
extern void __lockfunc tty_unlock(struct tty_struct *tty);
extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count);
+int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ unsigned char *buffer, int count);
+int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
+ int count);
/* main midi functions */
struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
/*
- * We raced in the idr lookup or with shm_destroy(). Either way, the
- * ID is busted.
+ * Callers of shm_lock() must validate the status of the returned ipc
+ * object pointer (as returned by ipc_lock()), and error out as
+ * appropriate.
*/
- WARN_ON(IS_ERR(ipcp));
-
+ if (IS_ERR(ipcp))
+ return (void *)ipcp;
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
}
-/* This is called by fork, once for every shm attach. */
-static void shm_open(struct vm_area_struct *vma)
+static int __shm_open(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp;
shp = shm_lock(sfd->ns, sfd->id);
+
+ if (IS_ERR(shp))
+ return PTR_ERR(shp);
+
shp->shm_atim = get_seconds();
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_nattch++;
shm_unlock(shp);
+ return 0;
+}
+
+/* This is called by fork, once for every shm attach. */
+static void shm_open(struct vm_area_struct *vma)
+{
+ int err = __shm_open(vma);
+ /*
+ * We raced in the idr lookup or with shm_destroy().
+ * Either way, the ID is busted.
+ */
+ WARN_ON_ONCE(err);
}
/*
down_write(&shm_ids(ns).rwsem);
/* remove from the list of attaches of the shm segment */
shp = shm_lock(ns, sfd->id);
+
+ /*
+ * We raced in the idr lookup or with shm_destroy().
+ * Either way, the ID is busted.
+ */
+ if (WARN_ON_ONCE(IS_ERR(shp)))
+ goto done; /* no-op */
+
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_dtim = get_seconds();
shp->shm_nattch--;
shm_destroy(ns, shp);
else
shm_unlock(shp);
+done:
up_write(&shm_ids(ns).rwsem);
}
struct shm_file_data *sfd = shm_file_data(file);
int ret;
+ /*
+ * In case of remap_file_pages() emulation, the file can represent
+ * removed IPC ID: propogate shm_lock() error to caller.
+ */
+ ret =__shm_open(vma);
+ if (ret)
+ return ret;
+
ret = sfd->file->f_op->mmap(sfd->file, vma);
- if (ret != 0)
+ if (ret) {
+ shm_close(vma);
return ret;
+ }
sfd->vm_ops = vma->vm_ops;
#ifdef CONFIG_MMU
WARN_ON(!sfd->vm_ops->fault);
#endif
vma->vm_ops = &shm_vm_ops;
- shm_open(vma);
-
- return ret;
+ return 0;
}
static int shm_release(struct inode *ino, struct file *file)
/* Reuse ptrace permission checks for now. */
err = -EACCES;
- if (!ptrace_may_access(task, PTRACE_MODE_READ))
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
goto errout;
return task;
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
+ /*
+ * Drop the reference to the pi state which
+ * the requeue_pi() code acquired for us.
+ */
+ free_pi_state(q.pi_state);
spin_unlock(q.lock_ptr);
}
} else {
}
ret = -EPERM;
- if (!ptrace_may_access(p, PTRACE_MODE_READ))
+ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
goto err_unlock;
head = p->robust_list;
}
ret = -EPERM;
- if (!ptrace_may_access(p, PTRACE_MODE_READ))
+ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
goto err_unlock;
head = p->compat_robust_list;
&task2->signal->cred_guard_mutex);
if (ret)
goto err;
- if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
- !ptrace_may_access(task2, PTRACE_MODE_READ)) {
+ if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
+ !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
ret = -EPERM;
goto err_unlock;
}
static void devm_memremap_release(struct device *dev, void *res)
{
- memunmap(res);
+ memunmap(*(void **)res);
}
static int devm_memremap_match(struct device *dev, void *res, void *match_data)
/* Module is ready to execute: parsing args may do that. */
after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
- -32768, 32767, NULL,
+ -32768, 32767, mod,
unknown_module_param_cb);
if (IS_ERR(after_dashes)) {
err = PTR_ERR(after_dashes);
&& (str[2] == '\0' || str[2] == '.');
}
+static const char *symname(struct module *mod, unsigned int symnum)
+{
+ return mod->strtab + mod->symtab[symnum].st_name;
+}
+
static const char *get_ksymbol(struct module *mod,
unsigned long addr,
unsigned long *size,
/* We ignore unnamed symbols: they're uninformative
* and inserted at a whim. */
+ if (*symname(mod, i) == '\0'
+ || is_arm_mapping_symbol(symname(mod, i)))
+ continue;
+
if (mod->symtab[i].st_value <= addr
- && mod->symtab[i].st_value > mod->symtab[best].st_value
- && *(mod->strtab + mod->symtab[i].st_name) != '\0'
- && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
+ && mod->symtab[i].st_value > mod->symtab[best].st_value)
best = i;
if (mod->symtab[i].st_value > addr
- && mod->symtab[i].st_value < nextval
- && *(mod->strtab + mod->symtab[i].st_name) != '\0'
- && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
+ && mod->symtab[i].st_value < nextval)
nextval = mod->symtab[i].st_value;
}
*size = nextval - mod->symtab[best].st_value;
if (offset)
*offset = addr - mod->symtab[best].st_value;
- return mod->strtab + mod->symtab[best].st_name;
+ return symname(mod, best);
}
/* For kallsyms to ask for address resolution. NULL means not found. Careful
if (symnum < mod->num_symtab) {
*value = mod->symtab[symnum].st_value;
*type = mod->symtab[symnum].st_info;
- strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
- KSYM_NAME_LEN);
+ strlcpy(name, symname(mod, symnum), KSYM_NAME_LEN);
strlcpy(module_name, mod->name, MODULE_NAME_LEN);
*exported = is_exported(name, *value, mod);
preempt_enable();
unsigned int i;
for (i = 0; i < mod->num_symtab; i++)
- if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
+ if (strcmp(name, symname(mod, i)) == 0 &&
mod->symtab[i].st_info != 'U')
return mod->symtab[i].st_value;
return 0;
if (mod->state == MODULE_STATE_UNFORMED)
continue;
for (i = 0; i < mod->num_symtab; i++) {
- ret = fn(data, mod->strtab + mod->symtab[i].st_name,
+ ret = fn(data, symname(mod, i),
mod, mod->symtab[i].st_value);
if (ret != 0)
return ret;
* panic() is not being callled from OOPS.
*/
debug_locks_off();
- console_trylock();
- console_unlock();
+ console_flush_on_panic();
if (!panic_blink)
panic_blink = no_blink;
static u64 seen_seq;
unsigned long flags;
bool wake_klogd = false;
- bool retry;
+ bool do_cond_resched, retry;
if (console_suspended) {
up_console_sem();
return;
}
+ /*
+ * Console drivers are called under logbuf_lock, so
+ * @console_may_schedule should be cleared before; however, we may
+ * end up dumping a lot of lines, for example, if called from
+ * console registration path, and should invoke cond_resched()
+ * between lines if allowable. Not doing so can cause a very long
+ * scheduling stall on a slow console leading to RCU stall and
+ * softlockup warnings which exacerbate the issue with more
+ * messages practically incapacitating the system.
+ */
+ do_cond_resched = console_may_schedule;
console_may_schedule = 0;
/* flush buffered message fragment immediately to console */
call_console_drivers(level, ext_text, ext_len, text, len);
start_critical_timings();
local_irq_restore(flags);
+
+ if (do_cond_resched)
+ cond_resched();
}
console_locked = 0;
console_unlock();
}
+/**
+ * console_flush_on_panic - flush console content on panic
+ *
+ * Immediately output all pending messages no matter what.
+ */
+void console_flush_on_panic(void)
+{
+ /*
+ * If someone else is holding the console lock, trylock will fail
+ * and may_schedule may be set. Ignore and proceed to unlock so
+ * that messages are flushed out. As this can be called from any
+ * context and we don't want to get preempted while flushing,
+ * ensure may_schedule is cleared.
+ */
+ console_trylock();
+ console_may_schedule = 0;
+ console_unlock();
+}
+
/*
* Return the console tty driver structure and its associated index
*/
static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
{
const struct cred *cred = current_cred(), *tcred;
+ int dumpable = 0;
+ kuid_t caller_uid;
+ kgid_t caller_gid;
+
+ if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
+ WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
+ return -EPERM;
+ }
/* May we inspect the given task?
* This check is used both for attaching with ptrace
* because setting up the necessary parent/child relationship
* or halting the specified task is impossible.
*/
- int dumpable = 0;
+
/* Don't let security modules deny introspection */
if (same_thread_group(task, current))
return 0;
rcu_read_lock();
+ if (mode & PTRACE_MODE_FSCREDS) {
+ caller_uid = cred->fsuid;
+ caller_gid = cred->fsgid;
+ } else {
+ /*
+ * Using the euid would make more sense here, but something
+ * in userland might rely on the old behavior, and this
+ * shouldn't be a security problem since
+ * PTRACE_MODE_REALCREDS implies that the caller explicitly
+ * used a syscall that requests access to another process
+ * (and not a filesystem syscall to procfs).
+ */
+ caller_uid = cred->uid;
+ caller_gid = cred->gid;
+ }
tcred = __task_cred(task);
- if (uid_eq(cred->uid, tcred->euid) &&
- uid_eq(cred->uid, tcred->suid) &&
- uid_eq(cred->uid, tcred->uid) &&
- gid_eq(cred->gid, tcred->egid) &&
- gid_eq(cred->gid, tcred->sgid) &&
- gid_eq(cred->gid, tcred->gid))
+ if (uid_eq(caller_uid, tcred->euid) &&
+ uid_eq(caller_uid, tcred->suid) &&
+ uid_eq(caller_uid, tcred->uid) &&
+ gid_eq(caller_gid, tcred->egid) &&
+ gid_eq(caller_gid, tcred->sgid) &&
+ gid_eq(caller_gid, tcred->gid))
goto ok;
if (ptrace_has_cap(tcred->user_ns, mode))
goto ok;
goto out;
task_lock(task);
- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
task_unlock(task);
if (retval)
goto unlock_creds;
sched_domains_numa_masks[i][j] = mask;
- for (k = 0; k < nr_node_ids; k++) {
+ for_each_node(k) {
if (node_distance(j, k) > sched_domains_numa_distance[i])
continue;
user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
}
- if (prctl_map.exe_fd != (u32)-1)
+ if (prctl_map.exe_fd != (u32)-1) {
error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
- down_read(&mm->mmap_sem);
- if (error)
- goto out;
+ if (error)
+ return error;
+ }
+
+ down_write(&mm->mmap_sem);
/*
* We don't validate if these members are pointing to
if (prctl_map.auxv_size)
memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
- error = 0;
-out:
- up_read(&mm->mmap_sem);
- return error;
+ up_write(&mm->mmap_sem);
+ return 0;
}
#endif /* CONFIG_CHECKPOINT_RESTORE */
error = -EINVAL;
- down_read(&mm->mmap_sem);
+ down_write(&mm->mmap_sem);
vma = find_vma(mm, addr);
prctl_map.start_code = mm->start_code;
error = 0;
out:
- up_read(&mm->mmap_sem);
+ up_write(&mm->mmap_sem);
return error;
}
*/
static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_clock_base *base,
- unsigned long newstate, int reprogram)
+ u8 newstate, int reprogram)
{
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
- unsigned int state = timer->state;
+ u8 state = timer->state;
timer->state = newstate;
if (!(state & HRTIMER_STATE_ENQUEUED))
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
{
if (hrtimer_is_queued(timer)) {
- unsigned long state = timer->state;
+ u8 state = timer->state;
int reprogram;
/*
return 0;
}
+static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
+{
+#ifdef CONFIG_TIME_LOW_RES
+ /*
+ * CONFIG_TIME_LOW_RES indicates that the system has no way to return
+ * granular time values. For relative timers we add hrtimer_resolution
+ * (i.e. one jiffie) to prevent short timeouts.
+ */
+ timer->is_rel = mode & HRTIMER_MODE_REL;
+ if (timer->is_rel)
+ tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
+#endif
+ return tim;
+}
+
/**
* hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
* @timer: the timer to be added
/* Remove an active timer from the queue: */
remove_hrtimer(timer, base, true);
- if (mode & HRTIMER_MODE_REL) {
+ if (mode & HRTIMER_MODE_REL)
tim = ktime_add_safe(tim, base->get_time());
- /*
- * CONFIG_TIME_LOW_RES is a temporary way for architectures
- * to signal that they simply return xtime in
- * do_gettimeoffset(). In this case we want to round up by
- * resolution when starting a relative timer, to avoid short
- * timeouts. This will go away with the GTOD framework.
- */
-#ifdef CONFIG_TIME_LOW_RES
- tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
-#endif
- }
+
+ tim = hrtimer_update_lowres(timer, tim, mode);
hrtimer_set_expires_range_ns(timer, tim, delta_ns);
/**
* hrtimer_get_remaining - get remaining time for the timer
* @timer: the timer to read
+ * @adjust: adjust relative timers when CONFIG_TIME_LOW_RES=y
*/
-ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
+ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
{
unsigned long flags;
ktime_t rem;
lock_hrtimer_base(timer, &flags);
- rem = hrtimer_expires_remaining(timer);
+ if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
+ rem = hrtimer_expires_remaining_adjusted(timer);
+ else
+ rem = hrtimer_expires_remaining(timer);
unlock_hrtimer_base(timer, &flags);
return rem;
}
-EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
+EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
#ifdef CONFIG_NO_HZ_COMMON
/**
timer_stats_account_hrtimer(timer);
fn = timer->function;
+ /*
+ * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
+ * timer is restarted with a period then it becomes an absolute
+ * timer. If its not restarted it does not matter.
+ */
+ if (IS_ENABLED(CONFIG_TIME_LOW_RES))
+ timer->is_rel = false;
+
/*
* Because we run timers from hardirq context, there is no chance
* they get migrated to another cpu, therefore its safe to unlock
*/
static struct timeval itimer_get_remtime(struct hrtimer *timer)
{
- ktime_t rem = hrtimer_get_remaining(timer);
+ ktime_t rem = __hrtimer_get_remaining(timer, true);
/*
* Racy but safe: if the itimer expires after the above
(timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
- remaining = ktime_sub(hrtimer_get_expires(timer), now);
+ remaining = __hrtimer_expires_remaining_adjusted(timer, now);
/* Return 0 only, when the timer is expired and not pending */
if (remaining.tv64 <= 0) {
/*
print_name_offset(m, taddr);
SEQ_printf(m, ", ");
print_name_offset(m, timer->function);
- SEQ_printf(m, ", S:%02lx", timer->state);
+ SEQ_printf(m, ", S:%02x", timer->state);
#ifdef CONFIG_TIMER_STATS
SEQ_printf(m, ", ");
print_name_offset(m, timer->start_site);
{
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
+ ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
break;
}
+ /*
+ * Some archs may not have the passed in ip in the dump.
+ * If that happens, we need to show everything.
+ */
+ if (i == stack_trace_max.nr_entries)
+ i = 0;
+
/*
* Now find where in the stack these are.
*/
static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
{
- if (overlap(addr, len, _text, _etext) ||
+ if (overlap(addr, len, _stext, _etext) ||
overlap(addr, len, __start_rodata, __end_rodata))
err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
}
asmlinkage __visible void dump_stack(void)
{
+ unsigned long flags;
int was_locked;
int old;
int cpu;
* Permit this cpu to perform nested stack dumps while serialising
* against other CPUs
*/
- preempt_disable();
-
retry:
+ local_irq_save(flags);
cpu = smp_processor_id();
old = atomic_cmpxchg(&dump_lock, -1, cpu);
if (old == -1) {
} else if (old == cpu) {
was_locked = 1;
} else {
+ local_irq_restore(flags);
cpu_relax();
goto retry;
}
if (!was_locked)
atomic_set(&dump_lock, -1);
- preempt_enable();
+ local_irq_restore(flags);
}
#else
asmlinkage __visible void dump_stack(void)
struct klist_node *n)
{
i->i_klist = k;
- i->i_cur = n;
- if (n)
- kref_get(&n->n_ref);
+ i->i_cur = NULL;
+ if (n && kref_get_unless_zero(&n->n_ref))
+ i->i_cur = n;
}
EXPORT_SYMBOL_GPL(klist_iter_init_node);
MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: crc32c");
return 0;
radix_tree_for_each_slot(slot, root, &iter, first_index) {
- results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+ results[ret] = rcu_dereference_raw(*slot);
if (!results[ret])
continue;
+ if (radix_tree_is_indirect_ptr(results[ret])) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
if (++ret == max_items)
break;
}
return 0;
radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
- results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+ results[ret] = rcu_dereference_raw(*slot);
if (!results[ret])
continue;
+ if (radix_tree_is_indirect_ptr(results[ret])) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
if (++ret == max_items)
break;
}
[STRING_UNITS_10] = 1000,
[STRING_UNITS_2] = 1024,
};
- int i, j;
- u32 remainder = 0, sf_cap, exp;
+ static const unsigned int rounding[] = { 500, 50, 5 };
+ int i = 0, j;
+ u32 remainder = 0, sf_cap;
char tmp[8];
const char *unit;
tmp[0] = '\0';
- i = 0;
- if (!size)
+
+ if (blk_size == 0)
+ size = 0;
+ if (size == 0)
goto out;
- while (blk_size >= divisor[units]) {
- remainder = do_div(blk_size, divisor[units]);
+ /* This is Napier's algorithm. Reduce the original block size to
+ *
+ * coefficient * divisor[units]^i
+ *
+ * we do the reduction so both coefficients are just under 32 bits so
+ * that multiplying them together won't overflow 64 bits and we keep
+ * as much precision as possible in the numbers.
+ *
+ * Note: it's safe to throw away the remainders here because all the
+ * precision is in the coefficients.
+ */
+ while (blk_size >> 32) {
+ do_div(blk_size, divisor[units]);
i++;
}
- exp = divisor[units] / (u32)blk_size;
- /*
- * size must be strictly greater than exp here to ensure that remainder
- * is greater than divisor[units] coming out of the if below.
- */
- if (size > exp) {
- remainder = do_div(size, divisor[units]);
- remainder *= blk_size;
+ while (size >> 32) {
+ do_div(size, divisor[units]);
i++;
- } else {
- remainder *= size;
}
+ /* now perform the actual multiplication keeping i as the sum of the
+ * two logarithms */
size *= blk_size;
- size += remainder / divisor[units];
- remainder %= divisor[units];
+ /* and logarithmically reduce it until it's just under the divisor */
while (size >= divisor[units]) {
remainder = do_div(size, divisor[units]);
i++;
}
+ /* work out in j how many digits of precision we need from the
+ * remainder */
sf_cap = size;
for (j = 0; sf_cap*10 < 1000; j++)
sf_cap *= 10;
- if (j) {
+ if (units == STRING_UNITS_2) {
+ /* express the remainder as a decimal. It's currently the
+ * numerator of a fraction whose denominator is
+ * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
remainder *= 1000;
- remainder /= divisor[units];
+ remainder >>= 10;
+ }
+
+ /* add a 5 to the digit below what will be printed to ensure
+ * an arithmetical round up and carry it through to size */
+ remainder += rounding[j];
+ if (remainder >= 1000) {
+ remainder -= 1000;
+ size += 1;
+ }
+
+ if (j) {
snprintf(tmp, sizeof(tmp), ".%03u", remainder);
tmp[j+1] = '\0';
}
* here rather than calling cond_resched().
*/
if (current->flags & PF_WQ_WORKER)
- schedule_timeout(1);
+ schedule_timeout_uninterruptible(1);
else
cond_resched();
* Did it turn free?
*/
ret = __get_any_page(page, pfn, 0);
- if (!PageLRU(page)) {
+ if (ret == 1 && !PageLRU(page)) {
/* Drop page reference which is from __get_any_page() */
put_hwpoison_page(page);
pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
*/
unsigned int munlock_vma_page(struct page *page)
{
- unsigned int nr_pages;
+ int nr_pages;
struct zone *zone = page_zone(page);
/* For try_to_munlock() and to serialize with page migration */
struct vm_area_struct *vma = mm->mmap;
while (vma) {
+ struct anon_vma *anon_vma = vma->anon_vma;
struct anon_vma_chain *avc;
- vma_lock_anon_vma(vma);
- list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
- anon_vma_interval_tree_verify(avc);
- vma_unlock_anon_vma(vma);
+ if (anon_vma) {
+ anon_vma_lock_read(anon_vma);
+ list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+ anon_vma_interval_tree_verify(avc);
+ anon_vma_unlock_read(anon_vma);
+ }
+
highest_address = vma->vm_end;
vma = vma->vm_next;
i++;
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
- int error;
+ int error = 0;
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
- /*
- * We must make sure the anon_vma is allocated
- * so that the anon_vma locking is not a noop.
- */
+ /* Guard against wrapping around to address 0. */
+ if (address < PAGE_ALIGN(address+4))
+ address = PAGE_ALIGN(address+4);
+ else
+ return -ENOMEM;
+
+ /* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma)))
return -ENOMEM;
- vma_lock_anon_vma(vma);
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
- * Also guard against wrapping around to address 0.
*/
- if (address < PAGE_ALIGN(address+4))
- address = PAGE_ALIGN(address+4);
- else {
- vma_unlock_anon_vma(vma);
- return -ENOMEM;
- }
- error = 0;
+ anon_vma_lock_write(vma->anon_vma);
/* Somebody else might have raced and expanded it already */
if (address > vma->vm_end) {
* updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against
* concurrent vma expansions.
- * vma_lock_anon_vma() doesn't help here, as
+ * anon_vma_lock_write() doesn't help here, as
* we don't guarantee that all growable vmas
* in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard
}
}
}
- vma_unlock_anon_vma(vma);
+ anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
return error;
struct mm_struct *mm = vma->vm_mm;
int error;
- /*
- * We must make sure the anon_vma is allocated
- * so that the anon_vma locking is not a noop.
- */
- if (unlikely(anon_vma_prepare(vma)))
- return -ENOMEM;
-
address &= PAGE_MASK;
error = security_mmap_addr(address);
if (error)
return error;
- vma_lock_anon_vma(vma);
+ /* We must make sure the anon_vma is allocated. */
+ if (unlikely(anon_vma_prepare(vma)))
+ return -ENOMEM;
/*
* vma->vm_start/vm_end cannot change under us because the caller
* is required to hold the mmap_sem in read mode. We need the
* anon_vma lock to serialize against concurrent expand_stacks.
*/
+ anon_vma_lock_write(vma->anon_vma);
/* Somebody else might have raced and expanded it already */
if (address < vma->vm_start) {
* updates, but we only hold a shared mmap_sem
* lock here, so we need to protect against
* concurrent vma expansions.
- * vma_lock_anon_vma() doesn't help here, as
+ * anon_vma_lock_write() doesn't help here, as
* we don't guarantee that all growable vmas
* in a mm share the same root anon vma.
* So, we reuse mm->page_table_lock to guard
}
}
}
- vma_unlock_anon_vma(vma);
+ anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma_merge(vma, vma->vm_flags);
validate_mm(mm);
return error;
if (!vma || !(vma->vm_flags & VM_SHARED))
goto out;
- if (start < vma->vm_start || start + size > vma->vm_end)
+ if (start < vma->vm_start)
goto out;
- if (pgoff == linear_page_index(vma, start)) {
- ret = 0;
- goto out;
+ if (start + size > vma->vm_end) {
+ struct vm_area_struct *next;
+
+ for (next = vma->vm_next; next; next = next->vm_next) {
+ /* hole between vmas ? */
+ if (next->vm_start != next->vm_prev->vm_end)
+ goto out;
+
+ if (next->vm_file != vma->vm_file)
+ goto out;
+
+ if (next->vm_flags != vma->vm_flags)
+ goto out;
+
+ if (start + size <= next->vm_end)
+ break;
+ }
+
+ if (!next)
+ goto out;
}
prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
flags &= MAP_NONBLOCK;
flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
if (vma->vm_flags & VM_LOCKED) {
+ struct vm_area_struct *tmp;
flags |= MAP_LOCKED;
+
/* drop PG_Mlocked flag for over-mapped range */
- munlock_vma_pages_range(vma, start, start + size);
+ for (tmp = vma; tmp->vm_start >= start + size;
+ tmp = tmp->vm_next) {
+ munlock_vma_pages_range(tmp,
+ max(tmp->vm_start, start),
+ min(tmp->vm_end, start + size));
+ }
}
file = get_file(vma->vm_file);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
- flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+ /* collapse entails shooting down ptes not pmd */
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
}
#endif
goto free_proc_pages;
}
- mm = mm_access(task, PTRACE_MODE_ATTACH);
+ mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
if (!mm || IS_ERR(mm)) {
rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
/*
static void record_obj(unsigned long handle, unsigned long obj)
{
- *(unsigned long *)handle = obj;
+ /*
+ * lsb of @obj represents handle lock while other bits
+ * represent object value the handle is pointing so
+ * updating shouldn't do store tearing.
+ */
+ WRITE_ONCE(*(unsigned long *)handle, obj);
}
/* zpool driver */
free_obj = obj_malloc(d_page, class, handle);
zs_object_copy(free_obj, used_obj, class);
index++;
+ /*
+ * record_obj updates handle's value to free_obj and it will
+ * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
+ * breaks synchronization using pin_tag(e,g, zs_free) so
+ * let's keep the lock bit.
+ */
+ free_obj |= BIT(HANDLE_PIN_BIT);
record_obj(handle, free_obj);
unpin_tag(handle);
obj_free(pool, class, used_obj);
if (unlikely(!sock))
return -ENOTSOCK;
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags);
if (base != 0) {
addr = NULL;
addrlen = 0;
struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
transport->inet->sk_write_pending--;
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
}
/**
/* Don't race with disconnect */
if (xprt_connected(xprt)) {
- if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) {
- /*
- * Notify TCP that we're limited by the application
- * window size
- */
- set_bit(SOCK_NOSPACE, &transport->sock->flags);
- sk->sk_write_pending++;
- /* ...and wait for more buffer space */
- xprt_wait_for_buffer_space(task, xs_nospace_callback);
- }
- } else {
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
+ /* wait for more buffer space */
+ sk->sk_write_pending++;
+ xprt_wait_for_buffer_space(task, xs_nospace_callback);
+ } else
ret = -ENOTCONN;
- }
spin_unlock_bh(&xprt->transport_lock);
case -EAGAIN:
status = xs_nospace(task);
break;
- default:
- dprintk("RPC: sendmsg returned unrecognized error %d\n",
- -status);
case -ENETUNREACH:
case -ENOBUFS:
case -EPIPE:
case -EPERM:
/* When the server has died, an ICMP port unreachable message
* prompts ECONNREFUSED. */
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
+ break;
+ default:
+ dprintk("RPC: sendmsg returned unrecognized error %d\n",
+ -status);
}
return status;
case -EAGAIN:
status = xs_nospace(task);
break;
- default:
- dprintk("RPC: sendmsg returned unrecognized error %d\n",
- -status);
case -ECONNRESET:
case -ECONNREFUSED:
case -ENOTCONN:
case -EADDRINUSE:
case -ENOBUFS:
case -EPIPE:
- clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags);
+ break;
+ default:
+ dprintk("RPC: sendmsg returned unrecognized error %d\n",
+ -status);
}
return status;
static void xs_write_space(struct sock *sk)
{
- struct socket *sock;
+ struct socket_wq *wq;
struct rpc_xprt *xprt;
- if (unlikely(!(sock = sk->sk_socket)))
+ if (!sk->sk_socket)
return;
- clear_bit(SOCK_NOSPACE, &sock->flags);
+ clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
if (unlikely(!(xprt = xprt_from_sock(sk))))
return;
- if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0)
- return;
+ rcu_read_lock();
+ wq = rcu_dereference(sk->sk_wq);
+ if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
+ goto out;
xprt_write_space(xprt);
+out:
+ rcu_read_unlock();
}
/**
delta.sort()
delta.reverse()
-print "add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
- (add, remove, grow, shrink, up, -down, up-down)
-print "%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")
+print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
+ (add, remove, grow, shrink, up, -down, up-down))
+print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta"))
for d, n in delta:
- if d: print "%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)
+ if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
{
int ret = 0;
const struct cred *cred, *child_cred;
+ const kernel_cap_t *caller_caps;
rcu_read_lock();
cred = current_cred();
child_cred = __task_cred(child);
+ if (mode & PTRACE_MODE_FSCREDS)
+ caller_caps = &cred->cap_effective;
+ else
+ caller_caps = &cred->cap_permitted;
if (cred->user_ns == child_cred->user_ns &&
- cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
+ cap_issubset(child_cred->cap_permitted, *caller_caps))
goto out;
if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
goto out;
#include <linux/integrity.h>
#include <linux/evm.h>
#include <crypto/hash.h>
+#include <crypto/algapi.h>
#include "evm.h"
int evm_initialized;
xattr_value_len, calc.digest);
if (rc)
break;
- rc = memcmp(xattr_data->digest, calc.digest,
+ rc = crypto_memneq(xattr_data->digest, calc.digest,
sizeof(calc.digest));
if (rc)
rc = -EINVAL;
#include <sound/compress_offload.h>
#include <sound/compress_driver.h>
+/* struct snd_compr_codec_caps overflows the ioctl bit size for some
+ * architectures, so we need to disable the relevant ioctls.
+ */
+#if _IOC_SIZEBITS < 14
+#define COMPR_CODEC_CAPS_OVERFLOW
+#endif
+
/* TODO:
* - add substream support for multiple devices in case of
* SND_DYNAMIC_MINORS is not used
return retval;
}
+#ifndef COMPR_CODEC_CAPS_OVERFLOW
static int
snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
{
kfree(caps);
return retval;
}
+#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
/* revisit this with snd_pcm_preallocate_xxx */
static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
retval = snd_compr_get_caps(stream, arg);
break;
+#ifndef COMPR_CODEC_CAPS_OVERFLOW
case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
retval = snd_compr_get_codec_caps(stream, arg);
break;
+#endif
case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
retval = snd_compr_set_params(stream, arg);
break;
return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
}
-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
+static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+ bool trylock)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hw_params *params, *sparams;
struct snd_mask sformat_mask;
struct snd_mask mask;
- if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ if (trylock) {
+ if (!(mutex_trylock(&runtime->oss.params_lock)))
+ return -EAGAIN;
+ } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
return -EINTR;
sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
params = kmalloc(sizeof(*params), GFP_KERNEL);
if (asubstream == NULL)
asubstream = substream;
if (substream->runtime->oss.params) {
- err = snd_pcm_oss_change_params(substream);
+ err = snd_pcm_oss_change_params(substream, false);
if (err < 0)
return err;
}
return 0;
runtime = substream->runtime;
if (runtime->oss.params) {
- err = snd_pcm_oss_change_params(substream);
+ err = snd_pcm_oss_change_params(substream, false);
if (err < 0)
return err;
}
runtime = substream->runtime;
if (runtime->oss.params &&
- (err = snd_pcm_oss_change_params(substream)) < 0)
+ (err = snd_pcm_oss_change_params(substream, false)) < 0)
return err;
info.fragsize = runtime->oss.period_bytes;
return -EIO;
if (runtime->oss.params) {
- if ((err = snd_pcm_oss_change_params(substream)) < 0)
+ /* use mutex_trylock() for params_lock for avoiding a deadlock
+ * between mmap_sem and params_lock taken by
+ * copy_from/to_user() in snd_pcm_oss_write/read()
+ */
+ err = snd_pcm_oss_change_params(substream, true);
+ if (err < 0)
return err;
}
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
static DEFINE_RWLOCK(snd_pcm_link_rwlock);
static DECLARE_RWSEM(snd_pcm_link_rwsem);
+/* Writer in rwsem may block readers even during its waiting in queue,
+ * and this may lead to a deadlock when the code path takes read sem
+ * twice (e.g. one in snd_pcm_action_nonatomic() and another in
+ * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
+ * spin until it gets the lock.
+ */
+static inline void down_write_nonblock(struct rw_semaphore *lock)
+{
+ while (!down_write_trylock(lock))
+ cond_resched();
+}
+
/**
* snd_pcm_stream_lock - Lock the PCM stream
* @substream: PCM substream
res = -ENOMEM;
goto _nolock;
}
- down_write(&snd_pcm_link_rwsem);
+ down_write_nonblock(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
substream->runtime->status->state != substream1->runtime->status->state ||
struct snd_pcm_substream *s;
int res = 0;
- down_write(&snd_pcm_link_rwsem);
+ down_write_nonblock(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (!snd_pcm_stream_linked(substream)) {
res = -EALREADY;
unsigned long flags;
long result = 0, count1;
struct snd_rawmidi_runtime *runtime = substream->runtime;
+ unsigned long appl_ptr;
+ spin_lock_irqsave(&runtime->lock, flags);
while (count > 0 && runtime->avail) {
count1 = runtime->buffer_size - runtime->appl_ptr;
if (count1 > count)
count1 = count;
- spin_lock_irqsave(&runtime->lock, flags);
if (count1 > (int)runtime->avail)
count1 = runtime->avail;
+
+ /* update runtime->appl_ptr before unlocking for userbuf */
+ appl_ptr = runtime->appl_ptr;
+ runtime->appl_ptr += count1;
+ runtime->appl_ptr %= runtime->buffer_size;
+ runtime->avail -= count1;
+
if (kernelbuf)
- memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1);
+ memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1);
if (userbuf) {
spin_unlock_irqrestore(&runtime->lock, flags);
if (copy_to_user(userbuf + result,
- runtime->buffer + runtime->appl_ptr, count1)) {
+ runtime->buffer + appl_ptr, count1)) {
return result > 0 ? result : -EFAULT;
}
spin_lock_irqsave(&runtime->lock, flags);
}
- runtime->appl_ptr += count1;
- runtime->appl_ptr %= runtime->buffer_size;
- runtime->avail -= count1;
- spin_unlock_irqrestore(&runtime->lock, flags);
result += count1;
count -= count1;
}
+ spin_unlock_irqrestore(&runtime->lock, flags);
return result;
}
EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
/**
- * snd_rawmidi_transmit_peek - copy data from the internal buffer
+ * __snd_rawmidi_transmit_peek - copy data from the internal buffer
* @substream: the rawmidi substream
* @buffer: the buffer pointer
* @count: data size to transfer
*
- * Copies data from the internal output buffer to the given buffer.
- *
- * Call this in the interrupt handler when the midi output is ready,
- * and call snd_rawmidi_transmit_ack() after the transmission is
- * finished.
- *
- * Return: The size of copied data, or a negative error code on failure.
+ * This is a variant of snd_rawmidi_transmit_peek() without spinlock.
*/
-int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count)
{
- unsigned long flags;
int result, count1;
struct snd_rawmidi_runtime *runtime = substream->runtime;
return -EINVAL;
}
result = 0;
- spin_lock_irqsave(&runtime->lock, flags);
if (runtime->avail >= runtime->buffer_size) {
/* warning: lowlevel layer MUST trigger down the hardware */
goto __skip;
}
}
__skip:
+ return result;
+}
+EXPORT_SYMBOL(__snd_rawmidi_transmit_peek);
+
+/**
+ * snd_rawmidi_transmit_peek - copy data from the internal buffer
+ * @substream: the rawmidi substream
+ * @buffer: the buffer pointer
+ * @count: data size to transfer
+ *
+ * Copies data from the internal output buffer to the given buffer.
+ *
+ * Call this in the interrupt handler when the midi output is ready,
+ * and call snd_rawmidi_transmit_ack() after the transmission is
+ * finished.
+ *
+ * Return: The size of copied data, or a negative error code on failure.
+ */
+int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ unsigned char *buffer, int count)
+{
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
+ int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&runtime->lock, flags);
+ result = __snd_rawmidi_transmit_peek(substream, buffer, count);
spin_unlock_irqrestore(&runtime->lock, flags);
return result;
}
EXPORT_SYMBOL(snd_rawmidi_transmit_peek);
/**
- * snd_rawmidi_transmit_ack - acknowledge the transmission
+ * __snd_rawmidi_transmit_ack - acknowledge the transmission
* @substream: the rawmidi substream
* @count: the transferred count
*
- * Advances the hardware pointer for the internal output buffer with
- * the given size and updates the condition.
- * Call after the transmission is finished.
- *
- * Return: The advanced size if successful, or a negative error code on failure.
+ * This is a variant of __snd_rawmidi_transmit_ack() without spinlock.
*/
-int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
{
- unsigned long flags;
struct snd_rawmidi_runtime *runtime = substream->runtime;
if (runtime->buffer == NULL) {
"snd_rawmidi_transmit_ack: output is not active!!!\n");
return -EINVAL;
}
- spin_lock_irqsave(&runtime->lock, flags);
snd_BUG_ON(runtime->avail + count > runtime->buffer_size);
runtime->hw_ptr += count;
runtime->hw_ptr %= runtime->buffer_size;
if (runtime->drain || snd_rawmidi_ready(substream))
wake_up(&runtime->sleep);
}
- spin_unlock_irqrestore(&runtime->lock, flags);
return count;
}
+EXPORT_SYMBOL(__snd_rawmidi_transmit_ack);
+
+/**
+ * snd_rawmidi_transmit_ack - acknowledge the transmission
+ * @substream: the rawmidi substream
+ * @count: the transferred count
+ *
+ * Advances the hardware pointer for the internal output buffer with
+ * the given size and updates the condition.
+ * Call after the transmission is finished.
+ *
+ * Return: The advanced size if successful, or a negative error code on failure.
+ */
+int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+{
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
+ int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&runtime->lock, flags);
+ result = __snd_rawmidi_transmit_ack(substream, count);
+ spin_unlock_irqrestore(&runtime->lock, flags);
+ return result;
+}
EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
/**
int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count)
{
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
+ int result;
+ unsigned long flags;
+
+ spin_lock_irqsave(&runtime->lock, flags);
if (!substream->opened)
- return -EBADFD;
- count = snd_rawmidi_transmit_peek(substream, buffer, count);
- if (count < 0)
- return count;
- return snd_rawmidi_transmit_ack(substream, count);
+ result = -EBADFD;
+ else {
+ count = __snd_rawmidi_transmit_peek(substream, buffer, count);
+ if (count <= 0)
+ result = count;
+ else
+ result = __snd_rawmidi_transmit_ack(substream, count);
+ }
+ spin_unlock_irqrestore(&runtime->lock, flags);
+ return result;
}
EXPORT_SYMBOL(snd_rawmidi_transmit);
unsigned long flags;
long count1, result;
struct snd_rawmidi_runtime *runtime = substream->runtime;
+ unsigned long appl_ptr;
- if (snd_BUG_ON(!kernelbuf && !userbuf))
+ if (!kernelbuf && !userbuf)
return -EINVAL;
if (snd_BUG_ON(!runtime->buffer))
return -EINVAL;
count1 = count;
if (count1 > (long)runtime->avail)
count1 = runtime->avail;
+
+ /* update runtime->appl_ptr before unlocking for userbuf */
+ appl_ptr = runtime->appl_ptr;
+ runtime->appl_ptr += count1;
+ runtime->appl_ptr %= runtime->buffer_size;
+ runtime->avail -= count1;
+
if (kernelbuf)
- memcpy(runtime->buffer + runtime->appl_ptr,
+ memcpy(runtime->buffer + appl_ptr,
kernelbuf + result, count1);
else if (userbuf) {
spin_unlock_irqrestore(&runtime->lock, flags);
- if (copy_from_user(runtime->buffer + runtime->appl_ptr,
+ if (copy_from_user(runtime->buffer + appl_ptr,
userbuf + result, count1)) {
spin_lock_irqsave(&runtime->lock, flags);
result = result > 0 ? result : -EFAULT;
}
spin_lock_irqsave(&runtime->lock, flags);
}
- runtime->appl_ptr += count1;
- runtime->appl_ptr %= runtime->buffer_size;
- runtime->avail -= count1;
result += count1;
count -= count1;
}
dp->index = i;
if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) {
- pr_err("ALSA: seq_oss: too many applications\n");
+ pr_debug("ALSA: seq_oss: too many applications\n");
rc = -ENOMEM;
goto _error;
}
struct seq_oss_synth *rec;
struct seq_oss_synthinfo *info;
- if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
+ if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
return;
for (i = 0; i < dp->max_synthdev; i++) {
info = &dp->synths[i];
else
down_read(&grp->list_mutex);
list_for_each_entry(subs, &grp->list_head, src_list) {
+ /* both ports ready? */
+ if (atomic_read(&subs->ref_count) != 2)
+ continue;
event->dest = subs->info.dest;
if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
/* convert time according to flag with subscription */
if (snd_BUG_ON(!pool))
return -EINVAL;
- if (pool->ptr) /* should be atomic? */
- return 0;
- pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
- if (!pool->ptr)
+ cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
+ if (!cellptr)
return -ENOMEM;
/* add new cells to the free cell list */
spin_lock_irqsave(&pool->lock, flags);
+ if (pool->ptr) {
+ spin_unlock_irqrestore(&pool->lock, flags);
+ vfree(cellptr);
+ return 0;
+ }
+
+ pool->ptr = cellptr;
pool->free = NULL;
for (cell = 0; cell < pool->size; cell++) {
}
/* */
-enum group_type {
- SRC_LIST, DEST_LIST
-};
-
static int subscribe_port(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_port_subs_info *grp,
return NULL;
}
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+ struct snd_seq_client_port *port,
+ struct snd_seq_subscribers *subs,
+ bool is_src, bool ack);
+
+static inline struct snd_seq_subscribers *
+get_subscriber(struct list_head *p, bool is_src)
+{
+ if (is_src)
+ return list_entry(p, struct snd_seq_subscribers, src_list);
+ else
+ return list_entry(p, struct snd_seq_subscribers, dest_list);
+}
+
/*
* remove all subscribers on the list
* this is called from port_delete, for each src and dest list.
static void clear_subscriber_list(struct snd_seq_client *client,
struct snd_seq_client_port *port,
struct snd_seq_port_subs_info *grp,
- int grptype)
+ int is_src)
{
struct list_head *p, *n;
struct snd_seq_client *c;
struct snd_seq_client_port *aport;
- if (grptype == SRC_LIST) {
- subs = list_entry(p, struct snd_seq_subscribers, src_list);
+ subs = get_subscriber(p, is_src);
+ if (is_src)
aport = get_client_port(&subs->info.dest, &c);
- } else {
- subs = list_entry(p, struct snd_seq_subscribers, dest_list);
+ else
aport = get_client_port(&subs->info.sender, &c);
- }
- list_del(p);
- unsubscribe_port(client, port, grp, &subs->info, 0);
+ delete_and_unsubscribe_port(client, port, subs, is_src, false);
+
if (!aport) {
/* looks like the connected port is being deleted.
* we decrease the counter, and when both ports are deleted
*/
if (atomic_dec_and_test(&subs->ref_count))
kfree(subs);
- } else {
- /* ok we got the connected port */
- struct snd_seq_port_subs_info *agrp;
- agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src;
- down_write(&agrp->list_mutex);
- if (grptype == SRC_LIST)
- list_del(&subs->dest_list);
- else
- list_del(&subs->src_list);
- up_write(&agrp->list_mutex);
- unsubscribe_port(c, aport, agrp, &subs->info, 1);
- kfree(subs);
- snd_seq_port_unlock(aport);
- snd_seq_client_unlock(c);
+ continue;
}
+
+ /* ok we got the connected port */
+ delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
+ kfree(subs);
+ snd_seq_port_unlock(aport);
+ snd_seq_client_unlock(c);
}
}
snd_use_lock_sync(&port->use_lock);
/* clear subscribers info */
- clear_subscriber_list(client, port, &port->c_src, SRC_LIST);
- clear_subscriber_list(client, port, &port->c_dest, DEST_LIST);
+ clear_subscriber_list(client, port, &port->c_src, true);
+ clear_subscriber_list(client, port, &port->c_dest, false);
if (port->private_free)
port->private_free(port->private_data);
return 0;
}
-
-/* connect two ports */
-int snd_seq_port_connect(struct snd_seq_client *connector,
- struct snd_seq_client *src_client,
- struct snd_seq_client_port *src_port,
- struct snd_seq_client *dest_client,
- struct snd_seq_client_port *dest_port,
- struct snd_seq_port_subscribe *info)
+static int check_and_subscribe_port(struct snd_seq_client *client,
+ struct snd_seq_client_port *port,
+ struct snd_seq_subscribers *subs,
+ bool is_src, bool exclusive, bool ack)
{
- struct snd_seq_port_subs_info *src = &src_port->c_src;
- struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
- struct snd_seq_subscribers *subs, *s;
- int err, src_called = 0;
- unsigned long flags;
- int exclusive;
-
- subs = kzalloc(sizeof(*subs), GFP_KERNEL);
- if (! subs)
- return -ENOMEM;
-
- subs->info = *info;
- atomic_set(&subs->ref_count, 2);
+ struct snd_seq_port_subs_info *grp;
+ struct list_head *p;
+ struct snd_seq_subscribers *s;
+ int err;
- down_write(&src->list_mutex);
- down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
-
- exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0;
+ grp = is_src ? &port->c_src : &port->c_dest;
err = -EBUSY;
+ down_write(&grp->list_mutex);
if (exclusive) {
- if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head))
+ if (!list_empty(&grp->list_head))
goto __error;
} else {
- if (src->exclusive || dest->exclusive)
+ if (grp->exclusive)
goto __error;
/* check whether already exists */
- list_for_each_entry(s, &src->list_head, src_list) {
- if (match_subs_info(info, &s->info))
- goto __error;
- }
- list_for_each_entry(s, &dest->list_head, dest_list) {
- if (match_subs_info(info, &s->info))
+ list_for_each(p, &grp->list_head) {
+ s = get_subscriber(p, is_src);
+ if (match_subs_info(&subs->info, &s->info))
goto __error;
}
}
- if ((err = subscribe_port(src_client, src_port, src, info,
- connector->number != src_client->number)) < 0)
- goto __error;
- src_called = 1;
-
- if ((err = subscribe_port(dest_client, dest_port, dest, info,
- connector->number != dest_client->number)) < 0)
+ err = subscribe_port(client, port, grp, &subs->info, ack);
+ if (err < 0) {
+ grp->exclusive = 0;
goto __error;
+ }
/* add to list */
- write_lock_irqsave(&src->list_lock, flags);
- // write_lock(&dest->list_lock); // no other lock yet
- list_add_tail(&subs->src_list, &src->list_head);
- list_add_tail(&subs->dest_list, &dest->list_head);
- // write_unlock(&dest->list_lock); // no other lock yet
- write_unlock_irqrestore(&src->list_lock, flags);
+ write_lock_irq(&grp->list_lock);
+ if (is_src)
+ list_add_tail(&subs->src_list, &grp->list_head);
+ else
+ list_add_tail(&subs->dest_list, &grp->list_head);
+ grp->exclusive = exclusive;
+ atomic_inc(&subs->ref_count);
+ write_unlock_irq(&grp->list_lock);
+ err = 0;
- src->exclusive = dest->exclusive = exclusive;
+ __error:
+ up_write(&grp->list_mutex);
+ return err;
+}
+
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+ struct snd_seq_client_port *port,
+ struct snd_seq_subscribers *subs,
+ bool is_src, bool ack)
+{
+ struct snd_seq_port_subs_info *grp;
+ struct list_head *list;
+ bool empty;
+
+ grp = is_src ? &port->c_src : &port->c_dest;
+ list = is_src ? &subs->src_list : &subs->dest_list;
+ down_write(&grp->list_mutex);
+ write_lock_irq(&grp->list_lock);
+ empty = list_empty(list);
+ if (!empty)
+ list_del_init(list);
+ grp->exclusive = 0;
+ write_unlock_irq(&grp->list_lock);
+ up_write(&grp->list_mutex);
+
+ if (!empty)
+ unsubscribe_port(client, port, grp, &subs->info, ack);
+}
+
+/* connect two ports */
+int snd_seq_port_connect(struct snd_seq_client *connector,
+ struct snd_seq_client *src_client,
+ struct snd_seq_client_port *src_port,
+ struct snd_seq_client *dest_client,
+ struct snd_seq_client_port *dest_port,
+ struct snd_seq_port_subscribe *info)
+{
+ struct snd_seq_subscribers *subs;
+ bool exclusive;
+ int err;
+
+ subs = kzalloc(sizeof(*subs), GFP_KERNEL);
+ if (!subs)
+ return -ENOMEM;
+
+ subs->info = *info;
+ atomic_set(&subs->ref_count, 0);
+ INIT_LIST_HEAD(&subs->src_list);
+ INIT_LIST_HEAD(&subs->dest_list);
+
+ exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE);
+
+ err = check_and_subscribe_port(src_client, src_port, subs, true,
+ exclusive,
+ connector->number != src_client->number);
+ if (err < 0)
+ goto error;
+ err = check_and_subscribe_port(dest_client, dest_port, subs, false,
+ exclusive,
+ connector->number != dest_client->number);
+ if (err < 0)
+ goto error_dest;
- up_write(&dest->list_mutex);
- up_write(&src->list_mutex);
return 0;
- __error:
- if (src_called)
- unsubscribe_port(src_client, src_port, src, info,
- connector->number != src_client->number);
+ error_dest:
+ delete_and_unsubscribe_port(src_client, src_port, subs, true,
+ connector->number != src_client->number);
+ error:
kfree(subs);
- up_write(&dest->list_mutex);
- up_write(&src->list_mutex);
return err;
}
-
/* remove the connection */
int snd_seq_port_disconnect(struct snd_seq_client *connector,
struct snd_seq_client *src_client,
struct snd_seq_port_subscribe *info)
{
struct snd_seq_port_subs_info *src = &src_port->c_src;
- struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
struct snd_seq_subscribers *subs;
int err = -ENOENT;
- unsigned long flags;
down_write(&src->list_mutex);
- down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
-
/* look for the connection */
list_for_each_entry(subs, &src->list_head, src_list) {
if (match_subs_info(info, &subs->info)) {
- write_lock_irqsave(&src->list_lock, flags);
- // write_lock(&dest->list_lock); // no lock yet
- list_del(&subs->src_list);
- list_del(&subs->dest_list);
- // write_unlock(&dest->list_lock);
- write_unlock_irqrestore(&src->list_lock, flags);
- src->exclusive = dest->exclusive = 0;
- unsubscribe_port(src_client, src_port, src, info,
- connector->number != src_client->number);
- unsubscribe_port(dest_client, dest_port, dest, info,
- connector->number != dest_client->number);
- kfree(subs);
+ atomic_dec(&subs->ref_count); /* mark as not ready */
err = 0;
break;
}
}
-
- up_write(&dest->list_mutex);
up_write(&src->list_mutex);
- return err;
+ if (err < 0)
+ return err;
+
+ delete_and_unsubscribe_port(src_client, src_port, subs, true,
+ connector->number != src_client->number);
+ delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
+ connector->number != dest_client->number);
+ kfree(subs);
+ return 0;
}
void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tmr->lock, flags);
/* setup defaults */
tmr->ppq = 96; /* 96 PPQ */
tmr->tempo = 500000; /* 120 BPM */
tmr->preferred_resolution = seq_default_timer_resolution;
tmr->skew = tmr->skew_base = SKEW_BASE;
+ spin_unlock_irqrestore(&tmr->lock, flags);
}
-void snd_seq_timer_reset(struct snd_seq_timer * tmr)
+static void seq_timer_reset(struct snd_seq_timer *tmr)
{
- unsigned long flags;
-
- spin_lock_irqsave(&tmr->lock, flags);
-
/* reset time & songposition */
tmr->cur_time.tv_sec = 0;
tmr->cur_time.tv_nsec = 0;
tmr->tick.cur_tick = 0;
tmr->tick.fraction = 0;
+}
+
+void snd_seq_timer_reset(struct snd_seq_timer *tmr)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&tmr->lock, flags);
+ seq_timer_reset(tmr);
spin_unlock_irqrestore(&tmr->lock, flags);
}
tmr = q->timer;
if (tmr == NULL)
return;
- if (!tmr->running)
+ spin_lock_irqsave(&tmr->lock, flags);
+ if (!tmr->running) {
+ spin_unlock_irqrestore(&tmr->lock, flags);
return;
+ }
resolution *= ticks;
if (tmr->skew != tmr->skew_base) {
(((resolution & 0xffff) * tmr->skew) >> 16);
}
- spin_lock_irqsave(&tmr->lock, flags);
-
/* update timer */
snd_seq_inc_time_nsec(&tmr->cur_time, resolution);
t->callback = snd_seq_timer_interrupt;
t->callback_data = q;
t->flags |= SNDRV_TIMER_IFLG_AUTO;
+ spin_lock_irq(&tmr->lock);
tmr->timeri = t;
+ spin_unlock_irq(&tmr->lock);
return 0;
}
int snd_seq_timer_close(struct snd_seq_queue *q)
{
struct snd_seq_timer *tmr;
+ struct snd_timer_instance *t;
tmr = q->timer;
if (snd_BUG_ON(!tmr))
return -EINVAL;
- if (tmr->timeri) {
- snd_timer_stop(tmr->timeri);
- snd_timer_close(tmr->timeri);
- tmr->timeri = NULL;
- }
+ spin_lock_irq(&tmr->lock);
+ t = tmr->timeri;
+ tmr->timeri = NULL;
+ spin_unlock_irq(&tmr->lock);
+ if (t)
+ snd_timer_close(t);
return 0;
}
-int snd_seq_timer_stop(struct snd_seq_timer * tmr)
+static int seq_timer_stop(struct snd_seq_timer *tmr)
{
if (! tmr->timeri)
return -EINVAL;
return 0;
}
+int snd_seq_timer_stop(struct snd_seq_timer *tmr)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&tmr->lock, flags);
+ err = seq_timer_stop(tmr);
+ spin_unlock_irqrestore(&tmr->lock, flags);
+ return err;
+}
+
static int initialize_timer(struct snd_seq_timer *tmr)
{
struct snd_timer *t;
return 0;
}
-int snd_seq_timer_start(struct snd_seq_timer * tmr)
+static int seq_timer_start(struct snd_seq_timer *tmr)
{
if (! tmr->timeri)
return -EINVAL;
if (tmr->running)
- snd_seq_timer_stop(tmr);
- snd_seq_timer_reset(tmr);
+ seq_timer_stop(tmr);
+ seq_timer_reset(tmr);
if (initialize_timer(tmr) < 0)
return -EINVAL;
snd_timer_start(tmr->timeri, tmr->ticks);
return 0;
}
-int snd_seq_timer_continue(struct snd_seq_timer * tmr)
+int snd_seq_timer_start(struct snd_seq_timer *tmr)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&tmr->lock, flags);
+ err = seq_timer_start(tmr);
+ spin_unlock_irqrestore(&tmr->lock, flags);
+ return err;
+}
+
+static int seq_timer_continue(struct snd_seq_timer *tmr)
{
if (! tmr->timeri)
return -EINVAL;
if (tmr->running)
return -EBUSY;
if (! tmr->initialized) {
- snd_seq_timer_reset(tmr);
+ seq_timer_reset(tmr);
if (initialize_timer(tmr) < 0)
return -EINVAL;
}
return 0;
}
+int snd_seq_timer_continue(struct snd_seq_timer *tmr)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&tmr->lock, flags);
+ err = seq_timer_continue(tmr);
+ spin_unlock_irqrestore(&tmr->lock, flags);
+ return err;
+}
+
/* return current 'real' time. use timeofday() to get better granularity. */
snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
{
snd_seq_real_time_t cur_time;
+ unsigned long flags;
+ spin_lock_irqsave(&tmr->lock, flags);
cur_time = tmr->cur_time;
if (tmr->running) {
struct timeval tm;
}
snd_seq_sanity_real_time(&cur_time);
}
-
+ spin_unlock_irqrestore(&tmr->lock, flags);
return cur_time;
}
struct snd_virmidi *vmidi = substream->runtime->private_data;
int count, res;
unsigned char buf[32], *pbuf;
+ unsigned long flags;
if (up) {
vmidi->trigger = 1;
if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
!(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
- snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
- return; /* ignored */
+ while (snd_rawmidi_transmit(substream, buf,
+ sizeof(buf)) > 0) {
+ /* ignored */
+ }
+ return;
}
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
return;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
+ spin_lock_irqsave(&substream->runtime->lock, flags);
while (1) {
- count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
+ count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
if (count <= 0)
break;
pbuf = buf;
snd_midi_event_reset_encode(vmidi->parser);
continue;
}
- snd_rawmidi_transmit_ack(substream, res);
+ __snd_rawmidi_transmit_ack(substream, res);
pbuf += res;
count -= res;
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
- return;
+ goto out;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
}
}
+ out:
+ spin_unlock_irqrestore(&substream->runtime->lock, flags);
} else {
vmidi->trigger = 0;
}
*/
static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
{
+ struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
struct snd_virmidi *vmidi = substream->runtime->private_data;
- snd_midi_event_free(vmidi->parser);
+
+ write_lock_irq(&rdev->filelist_lock);
list_del(&vmidi->list);
+ write_unlock_irq(&rdev->filelist_lock);
+ snd_midi_event_free(vmidi->parser);
substream->runtime->private_data = NULL;
kfree(vmidi);
return 0;
return 0;
}
-static int _snd_timer_stop(struct snd_timer_instance *timeri,
- int keep_flag, int event);
+static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
/*
* close a timer instance
spin_unlock_irq(&timer->lock);
mutex_lock(®ister_mutex);
list_del(&timeri->open_list);
- if (timer && list_empty(&timer->open_list_head) &&
+ if (list_empty(&timer->open_list_head) &&
timer->hw.close)
timer->hw.close(timer);
/* remove slave links */
spin_lock_irqsave(&timer->lock, flags);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
- ts->ccallback(ti, event + 100, &tstamp, resolution);
+ ts->ccallback(ts, event + 100, &tstamp, resolution);
spin_unlock_irqrestore(&timer->lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
+ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+ spin_unlock_irqrestore(&slave_active_lock, flags);
+ return -EBUSY;
+ }
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master && timeri->timer) {
spin_lock(&timeri->timer->lock);
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
result = snd_timer_start_slave(timeri);
- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+ if (result >= 0)
+ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
return result;
}
timer = timeri->timer;
if (timer->card && timer->card->shutdown)
return -ENODEV;
spin_lock_irqsave(&timer->lock, flags);
+ if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+ SNDRV_TIMER_IFLG_START)) {
+ result = -EBUSY;
+ goto unlock;
+ }
timeri->ticks = timeri->cticks = ticks;
timeri->pticks = 0;
result = snd_timer_start1(timer, timeri, ticks);
+ unlock:
spin_unlock_irqrestore(&timer->lock, flags);
- snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+ if (result >= 0)
+ snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
return result;
}
-static int _snd_timer_stop(struct snd_timer_instance * timeri,
- int keep_flag, int event)
+static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
{
struct snd_timer *timer;
unsigned long flags;
return -ENXIO;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
- if (!keep_flag) {
- spin_lock_irqsave(&slave_active_lock, flags);
- timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
- list_del_init(&timeri->ack_list);
- list_del_init(&timeri->active_list);
+ spin_lock_irqsave(&slave_active_lock, flags);
+ if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
spin_unlock_irqrestore(&slave_active_lock, flags);
+ return -EBUSY;
}
+ if (timeri->timer)
+ spin_lock(&timeri->timer->lock);
+ timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+ list_del_init(&timeri->ack_list);
+ list_del_init(&timeri->active_list);
+ if (timeri->timer)
+ spin_unlock(&timeri->timer->lock);
+ spin_unlock_irqrestore(&slave_active_lock, flags);
goto __end;
}
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
+ if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+ SNDRV_TIMER_IFLG_START))) {
+ spin_unlock_irqrestore(&timer->lock, flags);
+ return -EBUSY;
+ }
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if (timer->card && timer->card->shutdown) {
}
}
}
- if (!keep_flag)
- timeri->flags &=
- ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+ timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
spin_unlock_irqrestore(&timer->lock, flags);
__end:
if (event != SNDRV_TIMER_EVENT_RESOLUTION)
unsigned long flags;
int err;
- err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP);
+ err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
if (err < 0)
return err;
timer = timeri->timer;
if (timer->card && timer->card->shutdown)
return -ENODEV;
spin_lock_irqsave(&timer->lock, flags);
+ if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+ result = -EBUSY;
+ goto unlock;
+ }
if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
result = snd_timer_start1(timer, timeri, timer->sticks);
+ unlock:
spin_unlock_irqrestore(&timer->lock, flags);
snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
return result;
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
- return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE);
+ return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
}
/*
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
- if (--timer->running)
- list_del_init(&ti->active_list);
+ --timer->running;
+ list_del_init(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
{
struct snd_timer_user *tu;
long result = 0, unit;
+ int qhead;
int err = 0;
tu = file->private_data;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
- break;
+ goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
if (tu->disconnected) {
err = -ENODEV;
- break;
+ goto _error;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
- break;
+ goto _error;
}
}
+ qhead = tu->qhead++;
+ tu->qhead %= tu->queue_size;
spin_unlock_irq(&tu->qlock);
- if (err < 0)
- goto _error;
if (tu->tread) {
- if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
- sizeof(struct snd_timer_tread))) {
+ if (copy_to_user(buffer, &tu->tqueue[qhead],
+ sizeof(struct snd_timer_tread)))
err = -EFAULT;
- goto _error;
- }
} else {
- if (copy_to_user(buffer, &tu->queue[tu->qhead++],
- sizeof(struct snd_timer_read))) {
+ if (copy_to_user(buffer, &tu->queue[qhead],
+ sizeof(struct snd_timer_read)))
err = -EFAULT;
- goto _error;
- }
}
- tu->qhead %= tu->queue_size;
-
- result += unit;
- buffer += unit;
-
spin_lock_irq(&tu->qlock);
tu->qused--;
+ if (err < 0)
+ goto _error;
+ result += unit;
+ buffer += unit;
}
- spin_unlock_irq(&tu->qlock);
_error:
+ spin_unlock_irq(&tu->qlock);
return result > 0 ? result : err;
}
snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
};
+#define get_dummy_ops(substream) \
+ (*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
+
struct dummy_model {
const char *name;
int (*playback_constraints)(struct snd_pcm_runtime *runtime);
int iobox;
struct snd_kcontrol *cd_volume_ctl;
struct snd_kcontrol *cd_switch_ctl;
- const struct dummy_timer_ops *timer_ops;
};
/*
*/
struct dummy_systimer_pcm {
+ /* ops must be the first item */
+ const struct dummy_timer_ops *timer_ops;
spinlock_t lock;
struct timer_list timer;
unsigned long base_time;
*/
struct dummy_hrtimer_pcm {
+ /* ops must be the first item */
+ const struct dummy_timer_ops *timer_ops;
ktime_t base_time;
ktime_t period_time;
atomic_t running;
static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
- return dummy->timer_ops->start(substream);
+ return get_dummy_ops(substream)->start(substream);
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
- return dummy->timer_ops->stop(substream);
+ return get_dummy_ops(substream)->stop(substream);
}
return -EINVAL;
}
static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
{
- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
- return dummy->timer_ops->prepare(substream);
+ return get_dummy_ops(substream)->prepare(substream);
}
static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
{
- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
- return dummy->timer_ops->pointer(substream);
+ return get_dummy_ops(substream)->pointer(substream);
}
static struct snd_pcm_hardware dummy_pcm_hardware = {
struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
struct dummy_model *model = dummy->model;
struct snd_pcm_runtime *runtime = substream->runtime;
+ const struct dummy_timer_ops *ops;
int err;
- dummy->timer_ops = &dummy_systimer_ops;
+ ops = &dummy_systimer_ops;
#ifdef CONFIG_HIGH_RES_TIMERS
if (hrtimer)
- dummy->timer_ops = &dummy_hrtimer_ops;
+ ops = &dummy_hrtimer_ops;
#endif
- err = dummy->timer_ops->create(substream);
+ err = ops->create(substream);
if (err < 0)
return err;
+ get_dummy_ops(substream) = ops;
runtime->hw = dummy->pcm_hw;
if (substream->pcm->device & 1) {
err = model->capture_constraints(substream->runtime);
}
if (err < 0) {
- dummy->timer_ops->free(substream);
+ get_dummy_ops(substream)->free(substream);
return err;
}
return 0;
static int dummy_pcm_close(struct snd_pcm_substream *substream)
{
- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
- dummy->timer_ops->free(substream);
+ get_dummy_ops(substream)->free(substream);
return 0;
}
[6] = 0x07,
};
-static unsigned int
-get_formation_index(unsigned int rate)
+static int
+get_formation_index(unsigned int rate, unsigned int *index)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(snd_bebob_rate_table); i++) {
- if (snd_bebob_rate_table[i] == rate)
- return i;
+ if (snd_bebob_rate_table[i] == rate) {
+ *index = i;
+ return 0;
+ }
}
return -EINVAL;
}
goto end;
/* confirm params for both streams */
- index = get_formation_index(rate);
+ err = get_formation_index(rate, &index);
+ if (err < 0)
+ goto end;
pcm_channels = bebob->tx_stream_formations[index].pcm;
midi_channels = bebob->tx_stream_formations[index].midi;
err = amdtp_am824_set_parameters(&bebob->tx_stream, rate,
config SND_WSS_LIB
tristate
select SND_PCM
+ select SND_TIMER
config SND_SB_COMMON
tristate
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_PCM
+ select SND_TIMER
help
Say Y here to include support for Analog Devices SoundPort
AD1816A or compatible sound chips.
tristate "Gravis UltraSound Classic"
select SND_RAWMIDI
select SND_PCM
+ select SND_TIMER
help
Say Y here to include support for Gravis UltraSound Classic
soundcards.
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_PCM
+ select SND_TIMER
help
Say Y here to include support for Gravis UltraSound Extreme
soundcards.
select SND_PCM
select SND_RAWMIDI
select SND_AC97_CODEC
+ select SND_TIMER
depends on ZONE_DMA
help
Say Y here to include support for Aztech AZF3328 (PCI168)
select SND_HWDEP
select SND_RAWMIDI
select SND_AC97_CODEC
+ select SND_TIMER
depends on ZONE_DMA
help
Say Y to include support for Sound Blaster PCI 512, Live!,
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_AC97_CODEC
+ select SND_TIMER
help
Say Y here to include support for Yamaha PCI audio chips -
YMF724, YMF724F, YMF740, YMF740C, YMF744, YMF754.
unsigned int caps;
unsigned int mask, val;
- if (!enable && is_active_nid(codec, nid, dir, idx_to_check))
- return;
-
caps = query_amp_caps(codec, nid, dir);
val = get_amp_val_to_activate(codec, nid, dir, caps, enable);
mask = get_amp_mask_to_modify(codec, nid, dir, idx_to_check, caps);
update_amp(codec, nid, dir, idx, mask, val);
}
+static void check_and_activate_amp(struct hda_codec *codec, hda_nid_t nid,
+ int dir, int idx, int idx_to_check,
+ bool enable)
+{
+ /* check whether the given amp is still used by others */
+ if (!enable && is_active_nid(codec, nid, dir, idx_to_check))
+ return;
+ activate_amp(codec, nid, dir, idx, idx_to_check, enable);
+}
+
static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
int i, bool enable)
{
hda_nid_t nid = path->path[i];
init_amp(codec, nid, HDA_OUTPUT, 0);
- activate_amp(codec, nid, HDA_OUTPUT, 0, 0, enable);
+ check_and_activate_amp(codec, nid, HDA_OUTPUT, 0, 0, enable);
}
static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
* when aa-mixer is available, we need to enable the path as well
*/
for (n = 0; n < nums; n++) {
- if (n != idx && (!add_aamix || conn[n] != spec->mixer_merge_nid))
- continue;
- activate_amp(codec, nid, HDA_INPUT, n, idx, enable);
+ if (n != idx) {
+ if (conn[n] != spec->mixer_merge_nid)
+ continue;
+ /* when aamix is disabled, force to off */
+ if (!add_aamix) {
+ activate_amp(codec, nid, HDA_INPUT, n, n, false);
+ continue;
+ }
+ }
+ check_and_activate_amp(codec, nid, HDA_INPUT, n, idx, enable);
}
}
return found;
}
+static inline bool has_aamix_out_paths(struct hda_gen_spec *spec)
+{
+ return spec->aamix_out_paths[0] || spec->aamix_out_paths[1] ||
+ spec->aamix_out_paths[2];
+}
+
/* create a new path including aamix if available, and return its index */
static int check_aamix_out_path(struct hda_codec *codec, int path_idx)
{
}
}
+/* re-initialize the output paths; only called from loopback_mixing_put() */
+static void update_output_paths(struct hda_codec *codec, int num_outs,
+ const int *paths)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ struct nid_path *path;
+ int i;
+
+ for (i = 0; i < num_outs; i++) {
+ path = snd_hda_get_path_from_idx(codec, paths[i]);
+ if (path)
+ snd_hda_activate_path(codec, path, path->active,
+ spec->aamix_mode);
+ }
+}
+
static int loopback_mixing_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct hda_gen_spec *spec = codec->spec;
+ const struct auto_pin_cfg *cfg = &spec->autocfg;
unsigned int val = ucontrol->value.enumerated.item[0];
if (val == spec->aamix_mode)
return 0;
spec->aamix_mode = val;
- update_aamix_paths(codec, val, spec->out_paths[0],
- spec->aamix_out_paths[0],
- spec->autocfg.line_out_type);
- update_aamix_paths(codec, val, spec->hp_paths[0],
- spec->aamix_out_paths[1],
- AUTO_PIN_HP_OUT);
- update_aamix_paths(codec, val, spec->speaker_paths[0],
- spec->aamix_out_paths[2],
- AUTO_PIN_SPEAKER_OUT);
+ if (has_aamix_out_paths(spec)) {
+ update_aamix_paths(codec, val, spec->out_paths[0],
+ spec->aamix_out_paths[0],
+ cfg->line_out_type);
+ update_aamix_paths(codec, val, spec->hp_paths[0],
+ spec->aamix_out_paths[1],
+ AUTO_PIN_HP_OUT);
+ update_aamix_paths(codec, val, spec->speaker_paths[0],
+ spec->aamix_out_paths[2],
+ AUTO_PIN_SPEAKER_OUT);
+ } else {
+ update_output_paths(codec, cfg->line_outs, spec->out_paths);
+ if (cfg->line_out_type != AUTO_PIN_HP_OUT)
+ update_output_paths(codec, cfg->hp_outs, spec->hp_paths);
+ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
+ update_output_paths(codec, cfg->speaker_outs,
+ spec->speaker_paths);
+ }
return 1;
}
if (!spec->mixer_nid)
return 0;
- if (!(spec->aamix_out_paths[0] || spec->aamix_out_paths[1] ||
- spec->aamix_out_paths[2]))
- return 0;
if (!snd_hda_gen_add_kctl(spec, NULL, &loopback_mixing_enum))
return -ENOMEM;
spec->have_aamix_ctl = 1;
+ /* if no explicit aamix path is present (e.g. for Realtek codecs),
+ * enable aamix as default -- just for compatibility
+ */
+ spec->aamix_mode = !has_aamix_out_paths(spec);
return 0;
}
struct hda_jack_callback *jack,
bool on)
{
- if (jack && jack->tbl->nid)
+ if (jack && jack->nid)
sync_power_state_change(codec,
- set_pin_power_jack(codec, jack->tbl->nid, on));
+ set_pin_power_jack(codec, jack->nid, on));
}
/* callback only doing power up -- called at first */
if (!spec->have_aamix_ctl)
return;
+ if (!has_aamix_out_paths(spec))
+ return;
update_aamix_paths(codec, spec->aamix_mode, spec->out_paths[0],
spec->aamix_out_paths[0],
spec->autocfg.line_out_type);
#define NVIDIA_HDA_ENABLE_COHBIT 0x01
/* Defines for Intel SCH HDA snoop control */
+#define INTEL_HDA_CGCTL 0x48
+#define INTEL_HDA_CGCTL_MISCBDCGE (0x1 << 6)
#define INTEL_SCH_HDA_DEVC 0x78
#define INTEL_SCH_HDA_DEVC_NOSNOOP (0x1<<11)
{
struct hdac_bus *bus = azx_bus(chip);
struct pci_dev *pci = chip->pci;
+ u32 val;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
snd_hdac_set_codec_wakeup(bus, true);
+ if (IS_BROXTON(pci)) {
+ pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
+ val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
+ pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
+ }
azx_init_chip(chip, full_reset);
+ if (IS_BROXTON(pci)) {
+ pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
+ val = val | INTEL_HDA_CGCTL_MISCBDCGE;
+ pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
+ }
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
snd_hdac_set_codec_wakeup(bus, false);
struct hda_intel *hda;
if (card) {
- /* flush the pending probing work */
+ /* cancel the pending probing work */
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
- flush_work(&hda->probe_work);
+ cancel_work_sync(&hda->probe_work);
snd_card_free(card);
}
if (!callback)
return ERR_PTR(-ENOMEM);
callback->func = func;
- callback->tbl = jack;
+ callback->nid = jack->nid;
callback->next = jack->callback;
jack->callback = callback;
}
typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *);
struct hda_jack_callback {
- struct hda_jack_tbl *tbl;
+ hda_nid_t nid;
hda_jack_callback_fn func;
unsigned int private_data; /* arbitrary data */
struct hda_jack_callback *next;
static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
{
struct ca0132_spec *spec = codec->spec;
+ struct hda_jack_tbl *tbl;
/* Delay enabling the HP amp, to let the mic-detection
* state machine run.
*/
cancel_delayed_work_sync(&spec->unsol_hp_work);
schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
- cb->tbl->block_report = 1;
+ tbl = snd_hda_jack_tbl_get(codec, cb->nid);
+ if (tbl)
+ tbl->block_report = 1;
}
static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
CS4208_MAC_AUTO,
CS4208_MBA6,
CS4208_MBP11,
+ CS4208_MACMINI,
CS4208_GPIO0,
};
{ .id = CS4208_GPIO0, .name = "gpio0" },
{ .id = CS4208_MBA6, .name = "mba6" },
{ .id = CS4208_MBP11, .name = "mbp11" },
+ { .id = CS4208_MACMINI, .name = "macmini" },
{}
};
/* codec SSID matching */
static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
+ SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
snd_hda_apply_fixup(codec, action);
}
+/* MacMini 7,1 has the inverted jack detection */
+static void cs4208_fixup_macmini(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ static const struct hda_pintbl pincfgs[] = {
+ { 0x18, 0x00ab9150 }, /* mic (audio-in) jack: disable detect */
+ { 0x21, 0x004be140 }, /* SPDIF: disable detect */
+ { }
+ };
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ /* HP pin (0x10) has an inverted detection */
+ codec->inv_jack_detect = 1;
+ /* disable the bogus Mic and SPDIF jack detections */
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ }
+}
+
static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
.chained = true,
.chain_id = CS4208_GPIO0,
},
+ [CS4208_MACMINI] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cs4208_fixup_macmini,
+ .chained = true,
+ .chain_id = CS4208_GPIO0,
+ },
[CS4208_GPIO0] = {
.type = HDA_FIXUP_FUNC,
.v.func = cs4208_fixup_gpio0,
eld = &per_pin->sink_eld;
mutex_lock(&per_pin->lock);
- if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
+ if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
+ eld->eld_size > ELD_MAX_SIZE) {
mutex_unlock(&per_pin->lock);
snd_BUG();
return -EINVAL;
static void jack_callback(struct hda_codec *codec,
struct hda_jack_callback *jack)
{
- check_presence_and_report(codec, jack->tbl->nid);
+ check_presence_and_report(codec, jack->nid);
}
static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
if (!uctl)
return;
- val = snd_hda_codec_read(codec, jack->tbl->nid, 0,
+ val = snd_hda_codec_read(codec, jack->nid, 0,
AC_VERB_GET_VOLUME_KNOB_CONTROL, 0);
val &= HDA_AMP_VOLMASK;
uctl->value.integer.value[0] = val;
case 0x10ec0292:
alc_update_coef_idx(codec, 0x4, 1<<15, 0);
break;
+ case 0x10ec0225:
case 0x10ec0233:
case 0x10ec0255:
case 0x10ec0256:
{ 0x10ec0899, 0x1028, 0, "ALC3861" },
{ 0x10ec0298, 0x1028, 0, "ALC3266" },
{ 0x10ec0256, 0x1028, 0, "ALC3246" },
+ { 0x10ec0225, 0x1028, 0, "ALC3253" },
{ 0x10ec0670, 0x1025, 0, "ALC669X" },
{ 0x10ec0676, 0x1025, 0, "ALC679X" },
{ 0x10ec0282, 0x1043, 0, "ALC3229" },
ALC882_FIXUP_NO_PRIMARY_HP,
ALC887_FIXUP_ASUS_BASS,
ALC887_FIXUP_BASS_CHMAP,
- ALC882_FIXUP_DISABLE_AAMIX,
};
static void alc889_fixup_coef(struct hda_codec *codec,
static void alc_fixup_bass_chmap(struct hda_codec *codec,
const struct hda_fixup *fix, int action);
-static void alc_fixup_disable_aamix(struct hda_codec *codec,
- const struct hda_fixup *fix, int action);
static const struct hda_fixup alc882_fixups[] = {
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_bass_chmap,
},
- [ALC882_FIXUP_DISABLE_AAMIX] = {
- .type = HDA_FIXUP_FUNC,
- .v.func = alc_fixup_disable_aamix,
- },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+ SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
/* All Apple entries are in codec SSIDs */
SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
- SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
ALC269_TYPE_ALC298,
ALC269_TYPE_ALC255,
ALC269_TYPE_ALC256,
+ ALC269_TYPE_ALC225,
};
/*
case ALC269_TYPE_ALC298:
case ALC269_TYPE_ALC255:
case ALC269_TYPE_ALC256:
+ case ALC269_TYPE_ALC225:
ssids = alc269_ssids;
break;
default:
WRITE_COEF(0xb7, 0x802b),
{}
};
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEF(0x4a, 1<<8, 0),
+ UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
+ UPDATE_COEF(0x63, 3<<14, 3<<14),
+ UPDATE_COEF(0x4a, 3<<4, 2<<4),
+ UPDATE_COEF(0x4a, 3<<10, 3<<10),
+ UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+ UPDATE_COEF(0x4a, 3<<10, 0),
+ {}
+ };
switch (codec->core.vendor_id) {
case 0x10ec0255:
case 0x10ec0668:
alc_process_coef_fw(codec, coef0668);
break;
+ case 0x10ec0225:
+ alc_process_coef_fw(codec, coef0225);
+ break;
}
codec_dbg(codec, "Headset jack set to unplugged mode.\n");
}
UPDATE_COEF(0xc3, 0, 1<<12),
{}
};
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14),
+ UPDATE_COEF(0x4a, 3<<4, 2<<4),
+ UPDATE_COEF(0x63, 3<<14, 0),
+ {}
+ };
+
switch (codec->core.vendor_id) {
case 0x10ec0255:
alc_process_coef_fw(codec, coef0688);
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
+ case 0x10ec0225:
+ alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
+ snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+ alc_process_coef_fw(codec, coef0225);
+ snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+ break;
}
codec_dbg(codec, "Headset jack set to mic-in mode.\n");
}
WRITE_COEF(0xc3, 0x0000),
{}
};
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10),
+ UPDATE_COEF(0x49, 1<<8, 1<<8),
+ UPDATE_COEF(0x4a, 7<<6, 7<<6),
+ UPDATE_COEF(0x4a, 3<<4, 3<<4),
+ {}
+ };
switch (codec->core.vendor_id) {
case 0x10ec0255:
case 0x10ec0668:
alc_process_coef_fw(codec, coef0688);
break;
+ case 0x10ec0225:
+ alc_process_coef_fw(codec, coef0225);
+ break;
}
codec_dbg(codec, "Headset jack set to iPhone-style headset mode.\n");
}
WRITE_COEF(0xc3, 0x0000),
{}
};
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEF(0x45, 0x3f<<10, 0x39<<10),
+ UPDATE_COEF(0x49, 1<<8, 1<<8),
+ UPDATE_COEF(0x4a, 7<<6, 7<<6),
+ UPDATE_COEF(0x4a, 3<<4, 3<<4),
+ {}
+ };
switch (codec->core.vendor_id) {
case 0x10ec0255:
case 0x10ec0668:
alc_process_coef_fw(codec, coef0688);
break;
+ case 0x10ec0225:
+ alc_process_coef_fw(codec, coef0225);
+ break;
}
codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n");
}
WRITE_COEF(0xc3, 0x0c00),
{}
};
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+ UPDATE_COEF(0x49, 1<<8, 1<<8),
+ {}
+ };
switch (codec->core.vendor_id) {
case 0x10ec0255:
val = alc_read_coef_idx(codec, 0xbe);
is_ctia = (val & 0x1c02) == 0x1c02;
break;
+ case 0x10ec0225:
+ alc_process_coef_fw(codec, coef0225);
+ msleep(800);
+ val = alc_read_coef_idx(codec, 0x46);
+ is_ctia = (val & 0x00f0) == 0x00f0;
+ break;
}
codec_dbg(codec, "Headset jack detected iPhone-style headset: %s\n",
{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
{}
};
+#define ALC225_STANDARD_PINS \
+ {0x12, 0xb7a60130}, \
+ {0x21, 0x04211020}
#define ALC256_STANDARD_PINS \
{0x12, 0x90a60140}, \
{0x21, 0x03211020}
static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC225_STANDARD_PINS,
+ {0x14, 0x901701a0}),
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC225_STANDARD_PINS,
+ {0x14, 0x901701b0}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
break;
+ case 0x10ec0225:
+ spec->codec_variant = ALC269_TYPE_ALC225;
+ break;
}
if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
*/
static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
if (!spec->num_pwrs)
return;
- if (jack && jack->tbl->nid) {
- stac_toggle_power_map(codec, jack->tbl->nid,
- snd_hda_jack_detect(codec, jack->tbl->nid),
+ if (jack && jack->nid) {
+ stac_toggle_power_map(codec, jack->nid,
+ snd_hda_jack_detect(codec, jack->nid),
true);
return;
}
/* IN1/IN2 Control */
SOC_SINGLE_TLV("IN1 Boost", RT5645_IN1_CTRL1,
- RT5645_BST_SFT1, 8, 0, bst_tlv),
+ RT5645_BST_SFT1, 12, 0, bst_tlv),
SOC_SINGLE_TLV("IN2 Boost", RT5645_IN2_CTRL,
RT5645_BST_SFT2, 8, 0, bst_tlv),
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
continue;
dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
config SND_SUN_CS4231
tristate "Sun CS4231"
select SND_PCM
+ select SND_TIMER
help
Say Y here to include support for CS4231 sound device on Sun.
else
err = snd_usbmidi_create_endpoints(umidi, endpoints);
if (err < 0) {
- snd_usbmidi_free(umidi);
return err;
}
switch (chip->usb_id) {
case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
+ case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
* "Playback Design" products need a 50ms delay after setting the
* USB interface.
*/
- if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba)
+ switch (le16_to_cpu(dev->descriptor.idVendor)) {
+ case 0x23ba: /* Playback Design */
+ case 0x0644: /* TEAC Corp. */
mdelay(50);
+ break;
+ }
}
void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
(requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
mdelay(20);
+ /*
+ * "TEAC Corp." products need a 20ms delay after each
+ * class compliant request
+ */
+ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x0644) &&
+ (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ mdelay(20);
+
/* Marantz/Denon devices with USB DAC functionality need a delay
* after each class compliant request
*/
case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
- case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
+ case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
if (fp->altsetting == 2)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
+ case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
if (fp->altsetting == 3)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
sizeof(long) != 8) {
char *p;
- ls = 2;
/* make %l into %ll */
- p = strchr(format, 'l');
- if (p)
+ if (ls == 1 && (p = strchr(format, 'l')))
memmove(p+1, p, strlen(p)+1);
else if (strcmp(format, "%p") == 0)
strcpy(format, "0x%llx");
+ ls = 2;
}
switch (ls) {
case -2:
{
char help[BUFSIZ];
+ if (!e)
+ return;
+
/*
* We get error directly from syscall errno ( > 0),
* or from encoded pointer's error ( < 0).
machine = machines__find(machines, pid);
if (!machine)
- machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
+ machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
return machine;
}
* Check if there was a change in the timer state (should we raise or lower
* the line level to the GIC).
*/
-static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
+static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
* until we call this function from kvm_timer_flush_hwstate.
*/
if (!vgic_initialized(vcpu->kvm))
- return;
+ return -ENODEV;
if (kvm_timer_should_fire(vcpu) != timer->irq.level)
kvm_timer_update_irq(vcpu, !timer->irq.level);
+
+ return 0;
}
/*
bool phys_active;
int ret;
- kvm_timer_update_state(vcpu);
+ if (kvm_timer_update_state(vcpu))
+ return;
/*
* If we enter the guest with the virtual input level to the VGIC