Pull SuperH fixes from Paul Mundt.
* tag 'sh-for-linus' of git://github.com/pmundt/linux-sh:
sh-sci / PM: Avoid deadlocking runtime PM
sh: fix up the ubc clock definition for sh7785.
sh: add parameter for RSPI in clock-sh7757
sh: Fix sh2a vbr table for more than 255 irqs
node's name represents the name of the corresponding LED.
LED sub-node properties:
-- gpios : Should specify the LED's GPIO, see "Specifying GPIO information
- for devices" in Documentation/devicetree/booting-without-of.txt. Active
- low LEDs should be indicated using flags in the GPIO specifier.
+- gpios : Should specify the LED's GPIO, see "gpios property" in
+ Documentation/devicetree/gpio.txt. Active low LEDs should be
+ indicated using flags in the GPIO specifier.
- label : (optional) The label for this LED. If omitted, the label is
taken from the node name (excluding the unit address).
- linux,default-trigger : (optional) This parameter, if present, is a
nintendo Nintendo
nvidia NVIDIA
nxp NXP Semiconductors
+picochip Picochip Ltd
powervr Imagination Technologies
qcom Qualcomm, Inc.
ramtron Ramtron International
Addresses scanned: I2C 0x18 - 0x1f
Datasheets:
http://www.analog.com/static/imported-files/data_sheets/ADT7408.pdf
- * IDT TSE2002B3, TS3000B3
- Prefix: 'tse2002b3', 'ts3000b3'
+ * Atmel AT30TS00
+ Prefix: 'at30ts00'
Addresses scanned: I2C 0x18 - 0x1f
Datasheets:
- http://www.idt.com/products/getdoc.cfm?docid=18715691
- http://www.idt.com/products/getdoc.cfm?docid=18715692
+ http://www.atmel.com/Images/doc8585.pdf
+ * IDT TSE2002B3, TSE2002GB2, TS3000B3, TS3000GB2
+ Prefix: 'tse2002', 'ts3000'
+ Addresses scanned: I2C 0x18 - 0x1f
+ Datasheets:
+ http://www.idt.com/sites/default/files/documents/IDT_TSE2002B3C_DST_20100512_120303152056.pdf
+ http://www.idt.com/sites/default/files/documents/IDT_TSE2002GB2A1_DST_20111107_120303145914.pdf
+ http://www.idt.com/sites/default/files/documents/IDT_TS3000B3A_DST_20101129_120303152013.pdf
+ http://www.idt.com/sites/default/files/documents/IDT_TS3000GB2A1_DST_20111104_120303151012.pdf
* Maxim MAX6604
Prefix: 'max6604'
Addresses scanned: I2C 0x18 - 0x1f
Datasheets:
http://datasheets.maxim-ic.com/en/ds/MAX6604.pdf
- * Microchip MCP9805, MCP98242, MCP98243, MCP9843
- Prefixes: 'mcp9805', 'mcp98242', 'mcp98243', 'mcp9843'
+ * Microchip MCP9804, MCP9805, MCP98242, MCP98243, MCP9843
+ Prefixes: 'mcp9804', 'mcp9805', 'mcp98242', 'mcp98243', 'mcp9843'
Addresses scanned: I2C 0x18 - 0x1f
Datasheets:
+ http://ww1.microchip.com/downloads/en/DeviceDoc/22203C.pdf
http://ww1.microchip.com/downloads/en/DeviceDoc/21977b.pdf
http://ww1.microchip.com/downloads/en/DeviceDoc/21996a.pdf
http://ww1.microchip.com/downloads/en/DeviceDoc/22153c.pdf
Datasheets:
http://www.st.com/stonline/products/literature/ds/13447/stts424.pdf
http://www.st.com/stonline/products/literature/ds/13448/stts424e02.pdf
+ * ST Microelectronics STTS2002, STTS3000
+ Prefix: 'stts2002', 'stts3000'
+ Addresses scanned: I2C 0x18 - 0x1f
+ Datasheets:
+ http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/DATASHEET/CD00225278.pdf
+ http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/DATA_BRIEF/CD00270920.pdf
* JEDEC JC 42.4 compliant temperature sensor chips
Prefix: 'jc42'
Addresses scanned: I2C 0x18 - 0x1f
All ALPS touchpads should respond to the "E6 report" command sequence:
E8-E6-E6-E6-E9. An ALPS touchpad should respond with either 00-00-0A or
-00-00-64.
+00-00-64 if no buttons are pressed. The bits 0-2 of the first byte will be 1s
+if some buttons are pressed.
If the E6 report is successful, the touchpad model is identified using the "E7
report" sequence: E8-E7-E7-E7-E9. The response is the model signature and is
F: drivers/platform/msm/
F: drivers/*/pm8???-*
F: include/linux/mfd/pm8xxx/
-T: git git://codeaurora.org/quic/kernel/davidb/linux-msm.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/davidb/linux-msm.git
S: Maintained
ARM/TOSA MACHINE SUPPORT
VERSION = 3
PATCHLEVEL = 3
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
depends on CPU_V7
help
This option enables the workaround for the 743622 Cortex-A9
- (r2p0..r2p2) erratum. Under very rare conditions, a faulty
+ (r2p*) erratum. Under very rare conditions, a faulty
optimisation in the Cortex-A9 Store Buffer may lead to data
corruption. This workaround sets a specific bit in the diagnostic
register of the Cortex-A9 which disables the Store Buffer
xipImage
bootpImage
uImage
+*.dtb
u64 armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx, int overflow);
+ int idx);
int armpmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
+ vma.vm_flags = VM_EXEC;
vma.vm_mm = mm;
flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx, int overflow)
+ int idx)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count;
new_raw_count) != prev_raw_count)
goto again;
- new_raw_count &= armpmu->max_period;
- prev_raw_count &= armpmu->max_period;
-
- if (overflow)
- delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
- else
- delta = new_raw_count - prev_raw_count;
+ delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
if (hwc->idx < 0)
return;
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
}
static void
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
- hwc->sample_period = armpmu->max_period;
+ /*
+ * For non-sampling runs, limit the sample_period to half
+ * of the counter width. That way, the new counter value
+ * is far less likely to overtake the previous one unless
+ * you have some serious IRQ latency issues.
+ */
+ hwc->sample_period = armpmu->max_period >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
armpmu->type = ARM_PMU_DEVICE_CPU;
}
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+ if (cpu_pmu && cpu_pmu->reset)
+ cpu_pmu->reset(NULL);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
+ .notifier_call = pmu_cpu_notify,
+};
+
/*
* CPU PMU identification and registration.
*/
pr_info("enabled with %s PMU driver, %d counters available\n",
cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
+ register_cpu_notifier(&pmu_cpu_notifier);
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static int counter_is_active(unsigned long pmcr, int idx)
-{
- unsigned long mask = 0;
- if (idx == ARMV6_CYCLE_COUNTER)
- mask = ARMV6_PMCR_CCOUNT_IEN;
- else if (idx == ARMV6_COUNTER0)
- mask = ARMV6_PMCR_COUNT0_IEN;
- else if (idx == ARMV6_COUNTER1)
- mask = ARMV6_PMCR_COUNT1_IEN;
-
- if (mask)
- return pmcr & mask;
-
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return 0;
-}
-
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!counter_is_active(pmcr, idx))
+ /* Ignore if we don't have an event. */
+ if (!event)
continue;
/*
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
+ isb();
+ /* Clear the overflow flag in case an interrupt is pending. */
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
+ isb();
+
return idx;
}
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ if (!event)
+ continue;
+
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
+ if (!event)
+ continue;
+
+ if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
- unsigned long flags, ien, evtsel;
+ unsigned long flags, ien, evtsel, of_flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien &= ~XSCALE2_CCOUNT_INT_EN;
+ of_flags = XSCALE2_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ien &= ~XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+ of_flags = XSCALE2_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ien &= ~XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+ of_flags = XSCALE2_COUNT1_OVERFLOW;
break;
case XSCALE_COUNTER2:
ien &= ~XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+ of_flags = XSCALE2_COUNT2_OVERFLOW;
break;
case XSCALE_COUNTER3:
ien &= ~XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+ of_flags = XSCALE2_COUNT3_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
+ xscale2pmu_write_overflow_flags(of_flags);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
case 0xb944:
omap_revision = AM335X_REV_ES1_0;
*cpu_rev = "1.0";
+ break;
case 0xb8f2:
switch (rev) {
case 0:
platform_driver_unregister(&omap2_mbox_driver);
}
-/* must be ready before omap3isp is probed */
-subsys_initcall(omap2_mbox_init);
+module_init(omap2_mbox_init);
module_exit(omap2_mbox_exit);
MODULE_LICENSE("GPL v2");
platform_device_put(omap_iommu_pdev[i]);
return err;
}
-module_init(omap_iommu_init);
+/* must be ready before omap3isp is probed */
+subsys_initcall(omap_iommu_init);
static void __exit omap_iommu_exit(void)
{
#include "common.h"
#include "omap4-sar-layout.h"
+#include <linux/export.h>
#ifdef CONFIG_CACHE_L2X0
static void __iomem *l2cache_base;
isb();
}
}
+EXPORT_SYMBOL(omap_bus_sync);
/* Steal one page physical memory for barrier implementation */
int __init omap_barrier_reserve_memblock(void)
.constraints = {
.min_uV = 3300000,
.max_uV = 3300000,
- .apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
/* we'll take a jump through zero as a poor second */
soft_restart(0);
+}
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
#include <linux/sh_clk.h>
+#include <linux/videodev2.h>
#include <video/sh_mobile_lcdc.h>
#include <video/sh_mipi_dsi.h>
#include <sound/sh_fsi.h>
static struct platform_device fsi_ak4643_device = {
.name = "fsi-ak4642-audio",
.dev = {
- .platform_data = &fsi_info,
+ .platform_data = &fsi2_ak4643_info,
},
};
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/smsc911x.h>
+#include <linux/videodev2.h>
#include <mach/common.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
.clock_source = LCDC_CLK_BUS,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.interface_type = RGB24,
.clock_divider = 5,
.flags = 0,
static void __init mackerel_map_io(void)
{
iotable_init(mackerel_io_desc, ARRAY_SIZE(mackerel_io_desc));
+ /* DMA memory at 0xff200000 - 0xffdfffff. The default 2MB size isn't
+ * enough to allocate the frame buffer memory.
+ */
+ init_consistent_dma_size(12 << 20);
/* setup early devices and console here as well */
sh7372_add_early_devices();
default y
select ARM_GIC
select HAS_MTU
- select ARM_ERRATA_753970
+ select PL310_ERRATA_753970
select ARM_ERRATA_754322
select ARM_ERRATA_764369
select ARM_GIC
select ARM_ERRATA_720789
select ARM_ERRATA_751472
- select ARM_ERRATA_753970
+ select PL310_ERRATA_753970
select HAVE_SMP
select MIGHT_HAVE_CACHE_L2X0
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#endif
#ifdef CONFIG_ARM_ERRATA_743622
- teq r6, #0x20 @ present in r2p0
- teqne r6, #0x21 @ present in r2p1
- teqne r6, #0x22 @ present in r2p2
+ teq r5, #0x00200000 @ only present in r2p*
mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
orreq r10, r10, #1 << 6 @ set bit #6
mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
#define OMAP_GPMC_NR_IRQS 8
#define OMAP_GPMC_IRQ_END (OMAP_GPMC_IRQ_BASE + OMAP_GPMC_NR_IRQS)
+/* PRCM IRQ handler */
+#ifdef CONFIG_ARCH_OMAP2PLUS
+#define OMAP_PRCM_IRQ_BASE (OMAP_GPMC_IRQ_END)
+#define OMAP_PRCM_NR_IRQS 64
+#define OMAP_PRCM_IRQ_END (OMAP_PRCM_IRQ_BASE + OMAP_PRCM_NR_IRQS)
+#else
+#define OMAP_PRCM_IRQ_END OMAP_GPMC_IRQ_END
+#endif
-#define NR_IRQS OMAP_GPMC_IRQ_END
+#define NR_IRQS OMAP_PRCM_IRQ_END
#define OMAP_IRQ_BIT(irq) (1 << ((irq) % 32))
static int clockevent_next_event(unsigned long cycles,
struct clock_event_device *clk_event_dev)
{
- u16 val;
+ u16 val = readw(gpt_base + CR(CLKEVT));
+
+ if (val & CTRL_ENABLE)
+ writew(val & ~CTRL_ENABLE, gpt_base + CR(CLKEVT));
writew(cycles, gpt_base + LOAD(CLKEVT));
- val = readw(gpt_base + CR(CLKEVT));
val |= CTRL_ENABLE | CTRL_INT_ENABLE;
writew(val, gpt_base + CR(CLKEVT));
extern unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) (task_pt_regs(task)->pc)
-#define KSTK_ESP(tsk) (task_pt_regs(task)->sp)
+#define KSTK_EIP(task) (task_pt_regs(task)->pc)
+#define KSTK_ESP(task) (task_pt_regs(task)->sp)
#define cpu_relax() do { } while (0)
}
/* TSC based delay: */
-static void delay_tsc(unsigned long loops)
+static void delay_tsc(unsigned long __loops)
{
- unsigned long bclock, now;
+ u32 bclock, now, loops = __loops;
int cpu;
preempt_disable();
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+ if (rdev->family < CHIP_CAYMAN)
+ sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
+
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
0x00000000, /* VGT_REUSE_OFF */
0x00000000, /* VGT_VTX_CNT_EN */
+ 0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
0xc0016900,
0x000002c8,
0x00000000, /* VGT_STRMOUT_BUFFER_EN */
0x00000000, /* VGT_REUSE_OFF */
0x00000000, /* VGT_VTX_CNT_EN */
+ 0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
0xc0016900,
0x000002c8,
0x00000000, /* VGT_STRMOUT_BUFFER_EN */
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
#define PACKET3_INDIRECT_BUFFER_MP 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
+# define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
#define PACKET3_MPEG_INDEX 0x3A
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
- if (ASIC_IS_DCE3(rdev)) {
+ if (0) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
.create_handle = radeon_user_framebuffer_create_handle,
};
-void
+int
radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
+ int ret;
rfb->obj = obj;
- drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ if (ret) {
+ rfb->obj = NULL;
+ return ret;
+ }
drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
+ return 0;
}
static struct drm_framebuffer *
{
struct drm_gem_object *obj;
struct radeon_framebuffer *radeon_fb;
+ int ret;
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
if (radeon_fb == NULL)
return ERR_PTR(-ENOMEM);
- radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+ ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+ if (ret) {
+ kfree(radeon_fb);
+ drm_gem_object_unreference_unlocked(obj);
+ return NULL;
+ }
return &radeon_fb->base;
}
bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
u32 pixel_clock)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE3(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (0 && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
else
return false;
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (ASIC_IS_DCE3(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (0 && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
else
sizes->surface_depth);
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon object %d\n", ret);
+ return ret;
+ }
+
rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
info->par = rfbdev;
- radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+ ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+ if (ret) {
+ DRM_ERROR("failed to initalise framebuffer %d\n", ret);
+ goto out_unref;
+ }
fb = &rfbdev->rfb.base;
u16 blue, int regno);
extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-void radeon_framebuffer_init(struct drm_device *dev,
+int radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
If you say yes here, you get support for JEDEC JC42.4 compliant
temperature sensors, which are used on many DDR3 memory modules for
mobile devices and servers. Support will include, but not be limited
- to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
- MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
+ to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805,
+ MCP98242, MCP98243, MCP9843, SE97, SE98, STTS424(E), STTS2002,
+ STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2.
This driver can also be built as a module. If so, the module
will be called jc42.
/* Manufacturer IDs */
#define ADT_MANID 0x11d4 /* Analog Devices */
+#define ATMEL_MANID 0x001f /* Atmel */
#define MAX_MANID 0x004d /* Maxim */
#define IDT_MANID 0x00b3 /* IDT */
#define MCP_MANID 0x0054 /* Microchip */
#define ADT7408_DEVID 0x0801
#define ADT7408_DEVID_MASK 0xffff
+/* Atmel */
+#define AT30TS00_DEVID 0x8201
+#define AT30TS00_DEVID_MASK 0xffff
+
/* IDT */
#define TS3000B3_DEVID 0x2903 /* Also matches TSE2002B3 */
#define TS3000B3_DEVID_MASK 0xffff
+#define TS3000GB2_DEVID 0x2912 /* Also matches TSE2002GB2 */
+#define TS3000GB2_DEVID_MASK 0xffff
+
/* Maxim */
#define MAX6604_DEVID 0x3e00
#define MAX6604_DEVID_MASK 0xffff
/* Microchip */
+#define MCP9804_DEVID 0x0200
+#define MCP9804_DEVID_MASK 0xfffc
+
#define MCP98242_DEVID 0x2000
#define MCP98242_DEVID_MASK 0xfffc
#define STTS424E_DEVID 0x0000
#define STTS424E_DEVID_MASK 0xfffe
+#define STTS2002_DEVID 0x0300
+#define STTS2002_DEVID_MASK 0xffff
+
+#define STTS3000_DEVID 0x0200
+#define STTS3000_DEVID_MASK 0xffff
+
static u16 jc42_hysteresis[] = { 0, 1500, 3000, 6000 };
struct jc42_chips {
static struct jc42_chips jc42_chips[] = {
{ ADT_MANID, ADT7408_DEVID, ADT7408_DEVID_MASK },
+ { ATMEL_MANID, AT30TS00_DEVID, AT30TS00_DEVID_MASK },
{ IDT_MANID, TS3000B3_DEVID, TS3000B3_DEVID_MASK },
+ { IDT_MANID, TS3000GB2_DEVID, TS3000GB2_DEVID_MASK },
{ MAX_MANID, MAX6604_DEVID, MAX6604_DEVID_MASK },
+ { MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK },
{ MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
{ MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
{ MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK },
{ NXP_MANID, SE98_DEVID, SE98_DEVID_MASK },
{ STM_MANID, STTS424_DEVID, STTS424_DEVID_MASK },
{ STM_MANID, STTS424E_DEVID, STTS424E_DEVID_MASK },
+ { STM_MANID, STTS2002_DEVID, STTS2002_DEVID_MASK },
+ { STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK },
};
/* Each client has this additional data */
static const struct i2c_device_id jc42_id[] = {
{ "adt7408", 0 },
+ { "at30ts00", 0 },
{ "cat94ts02", 0 },
{ "cat6095", 0 },
{ "jc42", 0 },
{ "max6604", 0 },
+ { "mcp9804", 0 },
{ "mcp9805", 0 },
{ "mcp98242", 0 },
{ "mcp98243", 0 },
{ "se97b", 0 },
{ "se98", 0 },
{ "stts424", 0 },
- { "tse2002b3", 0 },
- { "ts3000b3", 0 },
+ { "stts2002", 0 },
+ { "stts3000", 0 },
+ { "tse2002", 0 },
+ { "ts3000", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, jc42_id);
lcrit_alarm, crit_alarm */
#define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm,
crit_alarm */
-#define PMBUS_POUT_BOOLEANS_PER_PAGE 2 /* alarm, crit_alarm */
+#define PMBUS_POUT_BOOLEANS_PER_PAGE 3 /* cap_alarm, alarm, crit_alarm
+ */
#define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */
#define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm,
lcrit_alarm, crit_alarm */
struct zl6100_data {
int id;
ktime_t access; /* chip access time */
+ int delay; /* Delay between chip accesses in uS */
struct pmbus_driver_info info;
};
/* Some chips need a delay between accesses */
static inline void zl6100_wait(const struct zl6100_data *data)
{
- if (delay) {
+ if (data->delay) {
s64 delta = ktime_us_delta(ktime_get(), data->access);
- if (delta < delay)
- udelay(delay - delta);
+ if (delta < data->delay)
+ udelay(data->delay - delta);
}
}
* can be cleared later for additional chips if tests show that it
* is not needed (in other words, better be safe than sorry).
*/
+ data->delay = delay;
if (data->id == zl2004 || data->id == zl6105)
- delay = 0;
+ data->delay = 0;
/*
* Since there was a direct I2C device access above, wait before
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_event event;
- int retval;
+ int retval = 0;
if (count < input_event_size())
return -EINVAL;
}
/*** Module ***/
-#if CONFIG_PM
+#if CONFIG_PM_SLEEP
static int twl4030_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
vibra_disable_leds();
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
-#endif
static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
{
.driver = {
.name = "twl4030-vibra",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &twl4030_vibra_pm_ops,
-#endif
},
};
module_platform_driver(twl4030_vibra_driver);
/*
* First try "E6 report".
- * ALPS should return 0,0,10 or 0,0,100
+ * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
+ * The bits 0-2 of the first byte will be 1s if some buttons are
+ * pressed.
*/
param[0] = 0;
if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
psmouse_dbg(psmouse, "E6 report: %2.2x %2.2x %2.2x",
param[0], param[1], param[2]);
- if (param[0] != 0 || param[1] != 0 || (param[2] != 10 && param[2] != 100))
+ if ((param[0] & 0xf8) != 0 || param[1] != 0 ||
+ (param[2] != 10 && param[2] != 100))
return NULL;
/*
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on USB_ARCH_HAS_HCD
select USB
+ select NEW_LEDS
+ select LEDS_CLASS
help
Say Y here if you want to use the USB version of the Wacom Intuos
or Graphire tablet. Make sure to say Y to "Mouse support"
{
struct input_dev *input = wacom->input;
unsigned char *data = wacom->data;
- int count = data[1] & 0x03;
+ int count = data[1] & 0x07;
int i;
if (data[0] != 0x02)
}
/* Programs the physical address of the device table into the IOMMU hardware */
-static void __init iommu_set_device_table(struct amd_iommu *iommu)
+static void iommu_set_device_table(struct amd_iommu *iommu)
{
u64 entry;
* Corrupt successful READs while in down state.
* If flags were specified, only corrupt those that match.
*/
- if (!error && bio_submitted_while_down &&
+ if (fc->corrupt_bio_byte && !error && bio_submitted_while_down &&
(bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc))
corrupt_bio_data(bio, fc);
unsigned offset;
unsigned num_bvecs;
sector_t remaining = where->count;
+ struct request_queue *q = bdev_get_queue(where->bdev);
+ sector_t discard_sectors;
/*
* where->count may be zero if rw holds a flush and we need to
/*
* Allocate a suitably sized-bio.
*/
- num_bvecs = dm_sector_div_up(remaining,
- (PAGE_SIZE >> SECTOR_SHIFT));
- num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
+ if (rw & REQ_DISCARD)
+ num_bvecs = 1;
+ else
+ num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
+ dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
+
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_destructor = dm_bio_destructor;
store_io_and_region_in_bio(bio, io, region);
- /*
- * Try and add as many pages as possible.
- */
- while (remaining) {
+ if (rw & REQ_DISCARD) {
+ discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
+ bio->bi_size = discard_sectors << SECTOR_SHIFT;
+ remaining -= discard_sectors;
+ } else while (remaining) {
+ /*
+ * Try and add as many pages as possible.
+ */
dp->get_page(dp, &page, &len, &offset);
len = min(len, to_bytes(remaining));
if (!bio_add_page(bio, page, len, offset))
if (!argc) {
DMWARN("Empty message received.");
- goto out;
+ goto out_argv;
}
table = dm_get_live_table(md);
return ret;
sb = page_address(rdev->sb_page);
- if (sb->magic != cpu_to_le32(DM_RAID_MAGIC)) {
+
+ /*
+ * Two cases that we want to write new superblocks and rebuild:
+ * 1) New device (no matching magic number)
+ * 2) Device specified for rebuild (!In_sync w/ offset == 0)
+ */
+ if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
+ (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
super_sync(rdev->mddev, rdev);
set_bit(FirstUse, &rdev->flags);
*/
rdev_for_each(r, t, mddev) {
if (!test_bit(In_sync, &r->flags)) {
- if (!test_bit(FirstUse, &r->flags))
- DMERR("Superblock area of "
- "rebuild device %d should have been "
- "cleared.", r->raid_disk);
- set_bit(FirstUse, &r->flags);
+ DMINFO("Device %d specified for rebuild: "
+ "Clearing superblock", r->raid_disk);
rebuilds++;
} else if (test_bit(FirstUse, &r->flags))
new_devs++;
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
+ ti->num_flush_requests = 1;
mutex_lock(&rs->md.reconfig_mutex);
ret = md_run(&rs->md);
data_sm = dm_sm_disk_create(tm, nr_blocks);
if (IS_ERR(data_sm)) {
DMERR("sm_disk_create failed");
+ dm_tm_unlock(tm, sblock);
r = PTR_ERR(data_sm);
goto bad;
}
return 0;
}
+/*
+ * __open_device: Returns @td corresponding to device with id @dev,
+ * creating it if @create is set and incrementing @td->open_count.
+ * On failure, @td is undefined.
+ */
static int __open_device(struct dm_pool_metadata *pmd,
dm_thin_id dev, int create,
struct dm_thin_device **td)
struct disk_device_details details_le;
/*
- * Check the device isn't already open.
+ * If the device is already open, return it.
*/
list_for_each_entry(td2, &pmd->thin_devices, list)
if (td2->id == dev) {
+ /*
+ * May not create an already-open device.
+ */
+ if (create)
+ return -EEXIST;
+
td2->open_count++;
*td = td2;
return 0;
if (r != -ENODATA || !create)
return r;
+ /*
+ * Create new device.
+ */
changed = 1;
details_le.mapped_blocks = 0;
details_le.transaction_id = cpu_to_le64(pmd->trans_id);
r = __open_device(pmd, dev, 1, &td);
if (r) {
- __close_device(td);
dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
dm_btree_del(&pmd->bl_info, dev_root);
return r;
}
- td->changed = 1;
__close_device(td);
return r;
goto bad;
r = __set_snapshot_details(pmd, td, origin, pmd->time);
+ __close_device(td);
+
if (r)
goto bad;
- __close_device(td);
return 0;
bad:
- __close_device(td);
dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
dm_btree_remove(&pmd->details_info, pmd->details_root,
&key, &pmd->details_root);
if (r)
return r;
+ td->mapped_blocks--;
+ td->changed = 1;
pmd->need_commit = 1;
return 0;
dev->netdev_ops = &cfhsi_ops;
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
+ dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
dev->tx_queue_len = 0;
dev->destructor = free_netdev;
skb_queue_head_init(&cfhsi->qhead);
"atl1c hardware error (status = 0x%x)\n",
status & ISR_ERROR);
/* reset MAC */
- adapter->work_event |= ATL1C_WORK_EVENT_RESET;
+ set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
schedule_work(&adapter->common_task);
return IRQ_HANDLED;
}
}
}
- netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
tnapi->tx_cons = sw_idx;
}
skb_tx_timestamp(skb);
- netdev_sent_queue(tp->dev, skb->len);
+ netdev_tx_sent_queue(txq, skb->len);
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
dev_kfree_skb_any(skb);
}
+ netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
}
- netdev_reset_queue(tp->dev);
}
/* Initialize tx/rx rings for packet processing.
CH_DEVICE(0x4408, 4),
CH_DEVICE(0x4409, 4),
CH_DEVICE(0x440a, 4),
+ CH_DEVICE(0x440d, 4),
+ CH_DEVICE(0x440e, 4),
{ 0, }
};
CH_DEVICE(0x4808, 0), /* T420-cx */
CH_DEVICE(0x4809, 0), /* T420-bt */
CH_DEVICE(0x480a, 0), /* T404-bt */
+ CH_DEVICE(0x480d, 0), /* T480-cr */
+ CH_DEVICE(0x480e, 0), /* T440-lp-cr */
{ 0, }
};
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
- return &port->stats;
+ stats->multicast = port->stats.multicast;
+ stats->rx_errors = port->stats.rx_errors;
+ return stats;
}
static void ehea_update_stats(struct work_struct *work)
context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
}
- port = ((context->pri_path.sched_queue >> 6) & 1) + 1;
- if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
- context->pri_path.sched_queue = (context->pri_path.sched_queue &
- 0xc3);
-
*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
memcpy(mailbox->buf + 8, context, sizeof *context);
if (vhcr->op_modifier == 0) {
err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
- if (err)
- goto ex_put;
+ goto ex_put;
}
err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &pdev->dev;
+
+ pm_runtime_get_sync(d);
rtl8169_net_suspend(dev);
pci_wake_from_d3(pdev, true);
pci_set_power_state(pdev, PCI_D3hot);
}
+
+ pm_runtime_put_noidle(d);
}
static struct pci_driver rtl8169_pci_driver = {
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "hv_netvsc");
+ strcpy(info->driver, KBUILD_MODNAME);
strcpy(info->version, HV_DRV_VERSION);
strcpy(info->fw_version, "N/A");
}
/* The one and only one */
static struct hv_driver netvsc_drv = {
- .name = "netvsc",
+ .name = KBUILD_MODNAME,
.id_table = id_table,
.probe = netvsc_probe,
.remove = netvsc_remove,
entry = (struct skb_data *) skb->cb;
urb = entry->urb;
+ spin_unlock_irqrestore(&q->lock, flags);
// during some PM-driven resume scenarios,
// these (async) unlinks complete immediately
retval = usb_unlink_urb (urb);
netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
else
count++;
+ spin_lock_irqsave(&q->lock, flags);
}
spin_unlock_irqrestore (&q->lock, flags);
return count;
{
struct ieee80211_sta *sta;
struct carl9170_sta_info *sta_info;
+ struct ieee80211_tx_info *tx_info;
rcu_read_lock();
sta = __carl9170_get_tx_sta(ar, skb);
goto out_rcu;
sta_info = (void *) sta->drv_priv;
- if (unlikely(sta_info->sleeping)) {
- struct ieee80211_tx_info *tx_info;
+ tx_info = IEEE80211_SKB_CB(skb);
+ if (unlikely(sta_info->sleeping) &&
+ !(tx_info->flags & (IEEE80211_TX_CTL_POLL_RESPONSE |
+ IEEE80211_TX_CTL_CLEAR_PS_FILT))) {
rcu_read_unlock();
- tx_info = IEEE80211_SKB_CB(skb);
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
atomic_dec(&ar->tx_ampdu_upload);
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ carl9170_release_dev_space(ar, skb);
carl9170_tx_status(ar, skb, false);
return true;
}
.flags = CMD_SYNC,
.data[0] = key_data.rsc_tsc,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- .len[0] = sizeof(key_data.rsc_tsc),
+ .len[0] = sizeof(*key_data.rsc_tsc),
};
ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
priv->sec_info.wpa_enabled = false;
priv->sec_info.wpa2_enabled = false;
priv->wep_key_curr_index = 0;
+ priv->sec_info.encryption_mode = 0;
ret = mwifiex_set_encode(priv, NULL, 0, 0, 1);
if (mode == NL80211_IFTYPE_ADHOC) {
#include <asm/machdep.h>
#endif /* CONFIG_PPC */
-#include <asm/setup.h>
#include <asm/page.h>
char *of_fdt_get_string(struct boot_param_header *blob, u32 offset)
if (!phy_id || sz < sizeof(*phy_id))
return NULL;
- sprintf(bus_id, PHY_ID_FMT, "0", be32_to_cpu(phy_id[0]));
+ sprintf(bus_id, PHY_ID_FMT, "fixed-0", be32_to_cpu(phy_id[0]));
phy = phy_connect(dev, bus_id, hndlr, 0, iface);
return IS_ERR(phy) ? NULL : phy;
tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
break;
case TPS65911_REG_VDDCTRL:
- vsel = selector;
+ vsel = selector + 3;
tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
}
return -ENOMEM;
}
-static int __init pl022_dma_probe(struct pl022 *pl022)
+static int __devinit pl022_dma_probe(struct pl022 *pl022)
{
dma_cap_mask_t mask;
call_rcu(&ctx->rcu_head, ctx_rcu_free);
}
-static inline void get_ioctx(struct kioctx *kioctx)
-{
- BUG_ON(atomic_read(&kioctx->users) <= 0);
- atomic_inc(&kioctx->users);
-}
-
static inline int try_get_ioctx(struct kioctx *kioctx)
{
return atomic_inc_not_zero(&kioctx->users);
mm = ctx->mm = current->mm;
atomic_inc(&mm->mm_count);
- atomic_set(&ctx->users, 1);
+ atomic_set(&ctx->users, 2);
spin_lock_init(&ctx->ctx_lock);
spin_lock_init(&ctx->ring_info.ring_lock);
init_waitqueue_head(&ctx->wait);
fput(req->ki_filp);
/* Link the iocb into the context's free list */
+ rcu_read_lock();
spin_lock_irq(&ctx->ctx_lock);
really_put_req(ctx, req);
+ /*
+ * at that point ctx might've been killed, but actual
+ * freeing is RCU'd
+ */
spin_unlock_irq(&ctx->ctx_lock);
+ rcu_read_unlock();
- put_ioctx(ctx);
spin_lock_irq(&fput_lock);
}
spin_unlock_irq(&fput_lock);
* this function will be executed w/out any aio kthread wakeup.
*/
if (unlikely(!fput_atomic(req->ki_filp))) {
- get_ioctx(ctx);
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
spin_unlock(&fput_lock);
ret = PTR_ERR(ioctx);
if (!IS_ERR(ioctx)) {
ret = put_user(ioctx->user_id, ctxp);
- if (!ret)
+ if (!ret) {
+ put_ioctx(ioctx);
return 0;
-
- get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
+ }
io_destroy(ioctx);
}
struct btrfs_path *path;
struct btrfs_key info_key = { 0 };
struct btrfs_delayed_ref_root *delayed_refs = NULL;
- struct btrfs_delayed_ref_head *head = NULL;
+ struct btrfs_delayed_ref_head *head;
int info_level = 0;
int ret;
struct list_head prefs_delayed;
* at a specified point in time
*/
again:
+ head = NULL;
+
ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
if (ret < 0)
goto out;
goto again;
}
ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed);
- if (ret)
+ if (ret) {
+ spin_unlock(&delayed_refs->lock);
goto out;
+ }
}
spin_unlock(&delayed_refs->lock);
spin_lock(&fs_info->reada_lock);
ret = radix_tree_insert(&dev->reada_zones,
- (unsigned long)zone->end >> PAGE_CACHE_SHIFT,
+ (unsigned long)(zone->end >> PAGE_CACHE_SHIFT),
zone);
spin_unlock(&fs_info->reada_lock);
#ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
#define ASM_ARM_HARDWARE_SERIAL_AMBA_H
+#include <linux/types.h>
+
/* -------------------------------------------------------------------------------
* From AMBA UART (PL010) Block Specification
* -------------------------------------------------------------------------------
return NULL;
}
+static inline struct device_node *of_find_compatible_node(
+ struct device_node *from,
+ const char *type,
+ const char *compat)
+{
+ return NULL;
+}
+
static inline int of_property_read_u32_array(const struct device_node *np,
const char *propname,
u32 *out_values, size_t sz)
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
- int redirect_genid;
unsigned long rate_last;
unsigned long pmtu_expires;
u32 pmtu_orig;
u32 pmtu_learned;
struct inetpeer_addr_base redirect_learned;
+ struct list_head gc_list;
/*
* Once inet_peer is queued for deletion (refcnt == -1), following fields
* are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
extern void inet_putpeer(struct inet_peer *p);
extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+extern void inetpeer_invalidate_tree(int family);
+
/*
* temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
* tcp_ts_stamp if no refcount is taken on inet_peer
static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
cpuset_update_active_cpus();
static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
cpuset_update_active_cpus();
return NOTIFY_OK;
return NULL;
if (PageAnon(page)) {
/* we don't move shared anon */
- if (!move_anon() || page_mapcount(page) > 1)
+ if (!move_anon() || page_mapcount(page) > 2)
return NULL;
} else if (!move_file())
/* we ignore mapcount for file pages */
#define brnf_filter_pppoe_tagged 0
#endif
+#define IS_IP(skb) \
+ (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
+
+#define IS_IPV6(skb) \
+ (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
+
+#define IS_ARP(skb) \
+ (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
+
static inline __be16 vlan_proto(const struct sk_buff *skb)
{
if (vlan_tx_tag_present(skb))
return NF_DROP;
br = p->br;
- if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
- IS_PPPOE_IPV6(skb)) {
+ if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
return NF_ACCEPT;
if (!brnf_call_iptables && !br->nf_call_iptables)
return NF_ACCEPT;
- if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) &&
- !IS_PPPOE_IP(skb))
+ if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
return NF_ACCEPT;
nf_bridge_pull_encap_header_rcsum(skb);
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
struct net_device *in;
- if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) {
+ if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
in = nf_bridge->physindev;
if (nf_bridge->mask & BRNF_PKT_TYPE) {
skb->pkt_type = PACKET_OTHERHOST;
return 0;
}
+
/* This is the 'purely bridged' case. For IP, we pass the packet to
* netfilter with indev and outdev set to the bridge device,
* but we are still able to filter on the 'real' indev/outdev
if (!parent)
return NF_DROP;
- if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
- IS_PPPOE_IP(skb))
+ if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
pf = PF_INET;
- else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
- IS_PPPOE_IPV6(skb))
+ else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
pf = PF_INET6;
else
return NF_ACCEPT;
if (!brnf_call_arptables && !br->nf_call_arptables)
return NF_ACCEPT;
- if (skb->protocol != htons(ETH_P_ARP)) {
+ if (!IS_ARP(skb)) {
if (!IS_VLAN_ARP(skb))
return NF_ACCEPT;
nf_bridge_pull_encap_header(skb);
if (!realoutdev)
return NF_DROP;
- if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
- IS_PPPOE_IP(skb))
+ if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
pf = PF_INET;
- else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
- IS_PPPOE_IPV6(skb))
+ else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
pf = PF_INET6;
else
return NF_ACCEPT;
void br_log_state(const struct net_bridge_port *p)
{
- br_info(p->br, "port %u(%s) entering %s state\n",
+ br_info(p->br, "port %u(%s) entered %s state\n",
(unsigned) p->port_no, p->dev->name,
br_port_state_names[p->state]);
}
struct net_bridge *br = p->br;
int wasroot;
- br_log_state(p);
-
wasroot = br_is_root_bridge(br);
br_become_designated_port(p);
p->state = BR_STATE_DISABLED;
p->topology_change_ack = 0;
p->config_pending = 0;
+ br_log_state(p);
br_ifinfo_notify(RTM_NEWLINK, p);
del_timer(&p->message_age_timer);
const char *base, char __user *ubase)
{
char __user *hlp = ubase + ((char *)m - base);
- if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
+ char name[EBT_FUNCTION_MAXNAMELEN] = {};
+
+ /* ebtables expects 32 bytes long names but xt_match names are 29 bytes
+ long. Copy 29 bytes and fill remaining bytes with zeroes. */
+ strncpy(name, m->u.match->name, sizeof(name));
+ if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
return -EFAULT;
return 0;
}
const char *base, char __user *ubase)
{
char __user *hlp = ubase + ((char *)w - base);
- if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
+ char name[EBT_FUNCTION_MAXNAMELEN] = {};
+
+ strncpy(name, w->u.watcher->name, sizeof(name));
+ if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
return -EFAULT;
return 0;
}
int ret;
char __user *hlp;
const struct ebt_entry_target *t;
+ char name[EBT_FUNCTION_MAXNAMELEN] = {};
if (e->bitmask == 0)
return 0;
ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
if (ret != 0)
return ret;
- if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
+ strncpy(name, t->u.target->name, sizeof(name));
+ if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
return -EFAULT;
return 0;
}
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/net.h>
+#include <linux/workqueue.h>
#include <net/ip.h>
#include <net/inetpeer.h>
#include <net/secure_seq.h>
static struct kmem_cache *peer_cachep __read_mostly;
+static LIST_HEAD(gc_list);
+static const int gc_delay = 60 * HZ;
+static struct delayed_work gc_work;
+static DEFINE_SPINLOCK(gc_lock);
+
#define node_height(x) x->avl_height
#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
+static void inetpeer_gc_worker(struct work_struct *work)
+{
+ struct inet_peer *p, *n;
+ LIST_HEAD(list);
+
+ spin_lock_bh(&gc_lock);
+ list_replace_init(&gc_list, &list);
+ spin_unlock_bh(&gc_lock);
+
+ if (list_empty(&list))
+ return;
+
+ list_for_each_entry_safe(p, n, &list, gc_list) {
+
+ if(need_resched())
+ cond_resched();
+
+ if (p->avl_left != peer_avl_empty) {
+ list_add_tail(&p->avl_left->gc_list, &list);
+ p->avl_left = peer_avl_empty;
+ }
+
+ if (p->avl_right != peer_avl_empty) {
+ list_add_tail(&p->avl_right->gc_list, &list);
+ p->avl_right = peer_avl_empty;
+ }
+
+ n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
+
+ if (!atomic_read(&p->refcnt)) {
+ list_del(&p->gc_list);
+ kmem_cache_free(peer_cachep, p);
+ }
+ }
+
+ if (list_empty(&list))
+ return;
+
+ spin_lock_bh(&gc_lock);
+ list_splice(&list, &gc_list);
+ spin_unlock_bh(&gc_lock);
+
+ schedule_delayed_work(&gc_work, gc_delay);
+}
/* Called from ip_output.c:ip_init */
void __init inet_initpeers(void)
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL);
+ INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
}
static int addr_compare(const struct inetpeer_addr *a,
p->rate_last = 0;
p->pmtu_expires = 0;
p->pmtu_orig = 0;
- p->redirect_genid = 0;
memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
-
+ INIT_LIST_HEAD(&p->gc_list);
/* Link the node. */
link_to_pool(p, base);
return rc;
}
EXPORT_SYMBOL(inet_peer_xrlim_allow);
+
+void inetpeer_invalidate_tree(int family)
+{
+ struct inet_peer *old, *new, *prev;
+ struct inet_peer_base *base = family_to_base(family);
+
+ write_seqlock_bh(&base->lock);
+
+ old = base->root;
+ if (old == peer_avl_empty_rcu)
+ goto out;
+
+ new = peer_avl_empty_rcu;
+
+ prev = cmpxchg(&base->root, old, new);
+ if (prev == old) {
+ base->total = 0;
+ spin_lock(&gc_lock);
+ list_add_tail(&prev->gc_list, &gc_list);
+ spin_unlock(&gc_lock);
+ schedule_delayed_work(&gc_work, gc_delay);
+ }
+
+out:
+ write_sequnlock_bh(&base->lock);
+}
+EXPORT_SYMBOL(inetpeer_invalidate_tree);
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
-static int redirect_genid;
static struct delayed_work expires_work;
static unsigned long expires_ljiffies;
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
- redirect_genid++;
+ inetpeer_invalidate_tree(AF_INET);
}
/*
peer = rt->peer;
if (peer) {
- if (peer->redirect_learned.a4 != new_gw ||
- peer->redirect_genid != redirect_genid) {
+ if (peer->redirect_learned.a4 != new_gw) {
peer->redirect_learned.a4 = new_gw;
- peer->redirect_genid = redirect_genid;
atomic_inc(&__rt_peer_genid);
}
check_peer_redir(&rt->dst, peer);
if (peer) {
check_peer_pmtu(&rt->dst, peer);
- if (peer->redirect_genid != redirect_genid)
- peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway)
check_peer_redir(&rt->dst, peer);
dst_init_metrics(&rt->dst, peer->metrics, false);
check_peer_pmtu(&rt->dst, peer);
- if (peer->redirect_genid != redirect_genid)
- peer->redirect_learned.a4 = 0;
+
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
rt->rt_gateway = peer->redirect_learned.a4;
}
}
+ /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
+ if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
+ goto fallback;
+
if (!skb_shift(prev, skb, len))
goto fallback;
if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
/* Join all-node multicast group */
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
+ /* Join all-router multicast group if forwarding is set */
+ if (ndev->cnf.forwarding && dev && (dev->flags & IFF_MULTICAST))
+ ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
+
return ndev;
}
if (del_timer(&ct->timeout)) {
death_by_timeout((unsigned long)ct);
- dropped = 1;
- NF_CT_STAT_INC_ATOMIC(net, early_drop);
+ /* Check if we indeed killed this entry. Reliable event
+ delivery may have inserted it into the dying list. */
+ if (test_bit(IPS_DYING_BIT, &ct->status)) {
+ dropped = 1;
+ NF_CT_STAT_INC_ATOMIC(net, early_drop);
+ }
}
nf_ct_put(ct);
return dropped;
if (!parse_nat_setup) {
#ifdef CONFIG_MODULES
rcu_read_unlock();
- spin_unlock_bh(&nf_conntrack_lock);
nfnl_unlock();
if (request_module("nf-nat-ipv4") < 0) {
nfnl_lock();
- spin_lock_bh(&nf_conntrack_lock);
rcu_read_lock();
return -EOPNOTSUPP;
}
nfnl_lock();
- spin_lock_bh(&nf_conntrack_lock);
rcu_read_lock();
if (nfnetlink_parse_nat_setup_hook)
return -EAGAIN;
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira Networks.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
*addr, new_addr, 1);
} else if (nh->protocol == IPPROTO_UDP) {
- if (likely(transport_len >= sizeof(struct udphdr)))
- inet_proto_csum_replace4(&udp_hdr(skb)->check, skb,
- *addr, new_addr, 1);
+ if (likely(transport_len >= sizeof(struct udphdr))) {
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ inet_proto_csum_replace4(&uh->check, skb,
+ *addr, new_addr, 1);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+ }
}
csum_replace4(&nh->check, *addr, new_addr);
skb->rxhash = 0;
}
-static int set_udp_port(struct sk_buff *skb,
- const struct ovs_key_udp *udp_port_key)
+static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
+{
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
+ set_tp_port(skb, port, new_port, &uh->check);
+
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ } else {
+ *port = new_port;
+ skb->rxhash = 0;
+ }
+}
+
+static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
{
struct udphdr *uh;
int err;
uh = udp_hdr(skb);
if (udp_port_key->udp_src != uh->source)
- set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check);
+ set_udp_port(skb, &uh->source, udp_port_key->udp_src);
if (udp_port_key->udp_dst != uh->dest)
- set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check);
+ set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
return 0;
}
-static int set_tcp_port(struct sk_buff *skb,
- const struct ovs_key_tcp *tcp_port_key)
+static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
{
struct tcphdr *th;
int err;
break;
case OVS_KEY_ATTR_TCP:
- err = set_tcp_port(skb, nla_data(nested_attr));
+ err = set_tcp(skb, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_UDP:
- err = set_udp_port(skb, nla_data(nested_attr));
+ err = set_udp(skb, nla_data(nested_attr));
break;
}
vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME]));
if (!vport)
return ERR_PTR(-ENODEV);
+ if (ovs_header->dp_ifindex &&
+ ovs_header->dp_ifindex != get_dpifindex(vport->dp))
+ return ERR_PTR(-ENODEV);
return vport;
} else if (a[OVS_VPORT_ATTR_PORT_NO]) {
u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
*/
static void alc_init_special_input_src(struct hda_codec *codec);
+static int alc269_fill_coef(struct hda_codec *codec);
static int alc_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
unsigned int i;
+ if (codec->vendor_id == 0x10ec0269)
+ alc269_fill_coef(codec);
+
alc_fix_pll(codec);
alc_auto_init_amp(codec, spec->init_amp);
ALC882_FIXUP_PB_M5210,
ALC882_FIXUP_ACER_ASPIRE_7736,
ALC882_FIXUP_ASUS_W90V,
+ ALC889_FIXUP_CD,
ALC889_FIXUP_VAIO_TT,
ALC888_FIXUP_EEE1601,
ALC882_FIXUP_EAPD,
{ }
}
},
+ [ALC889_FIXUP_CD] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x1c, 0x993301f0 }, /* CD */
+ { }
+ }
+ },
[ALC889_FIXUP_VAIO_TT] = {
.type = ALC_FIXUP_PINS,
.v.pins = (const struct alc_pincfg[]) {
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3", ALC889_FIXUP_CD),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
static int alc269_fill_coef(struct hda_codec *codec)
{
+ struct alc_spec *spec = codec->spec;
int val;
+ if (spec->codec_variant != ALC269_TYPE_ALC269VB)
+ return 0;
+
if ((alc_get_coef0(codec) & 0x00ff) < 0x015) {
alc_write_coef_idx(codec, 0xf, 0x960b);
alc_write_coef_idx(codec, 0xe, 0x8817);
hw->ops.open = snd_hdspm_hwdep_dummy_op;
hw->ops.ioctl = snd_hdspm_hwdep_ioctl;
+ hw->ops.ioctl_compat = snd_hdspm_hwdep_ioctl;
hw->ops.release = snd_hdspm_hwdep_dummy_op;
return 0;
.platform_name = "samsung-audio",
.cpu_dai_name = "s3c24xx-iis",
.codec_dai_name = "wm8753-hifi",
- .codec_name = "wm8753-codec.0-001a",
+ .codec_name = "wm8753.0-001a",
.init = neo1973_wm8753_init,
.ops = &neo1973_hifi_ops,
},
.stream_name = "Voice",
.cpu_dai_name = "dfbmcs320-pcm",
.codec_dai_name = "wm8753-voice",
- .codec_name = "wm8753-codec.0-001a",
+ .codec_name = "wm8753.0-001a",
.ops = &neo1973_voice_ops,
},
};