in which case the page will not be stored in the cache this time.
+BULK INODE PAGE UNCACHE
+-----------------------
+
+A convenience routine is provided to perform an uncache on all the pages
+attached to an inode. This assumes that the pages on the inode correspond on a
+1:1 basis with the pages in the cache.
+
+ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
+ struct inode *inode);
+
+This takes the netfs cookie that the pages were cached with and the inode that
+the pages are attached to. This function will wait for pages to finish being
+written to the cache and for the cache to finish with the page generally. No
+error is returned.
+
+
==========================
INDEX AND DATA FILE UPDATE
==========================
return 0;
}
+#define early_pfn_valid(pfn) pfn_valid((pfn))
+
#endif /* CONFIG_DISCONTIGMEM */
#ifdef CONFIG_NEED_MULTIPLE_NODES
pmode_cr4: .long 0 /* Saved %cr4 */
pmode_efer: .quad 0 /* Saved EFER */
pmode_gdt: .quad 0
+pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */
+pmode_behavior: .long 0 /* Wakeup behavior flags */
realmode_flags: .long 0
real_magic: .long 0
trampoline_segment: .word 0
/* Call the C code */
calll main
+ /* Restore MISC_ENABLE before entering protected mode, in case
+ BIOS decided to clear XD_DISABLE during S3. */
+ movl pmode_behavior, %eax
+ btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax
+ jnc 1f
+
+ movl pmode_misc_en, %eax
+ movl pmode_misc_en + 4, %edx
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ wrmsr
+1:
+
/* Do any other stuff... */
#ifndef CONFIG_64BIT
u32 pmode_efer_low; /* Protected mode EFER */
u32 pmode_efer_high;
u64 pmode_gdt;
+ u32 pmode_misc_en_low; /* Protected mode MISC_ENABLE */
+ u32 pmode_misc_en_high;
+ u32 pmode_behavior; /* Wakeup routine behavior flags */
u32 realmode_flags;
u32 real_magic;
u16 trampoline_segment; /* segment with trampoline code, 64-bit only */
#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
#define WAKEUP_END_SIGNATURE 0x65a22c82
+/* Wakeup behavior bits */
+#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0
+
#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
header->pmode_cr0 = read_cr0();
header->pmode_cr4 = read_cr4_safe();
+ header->pmode_behavior = 0;
+ if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
+ &header->pmode_misc_en_low,
+ &header->pmode_misc_en_high))
+ header->pmode_behavior |=
+ (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
header->realmode_flags = acpi_realmode_flags;
header->real_magic = 0x12345678;
DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
},
},
+ { /* Handle reboot issue on Acer Aspire one */
+ .callback = set_bios_reboot,
+ .ident = "Acer Aspire One A110",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
+ },
+ },
{ }
};
static int nmi_start(void)
{
get_online_cpus();
- on_each_cpu(nmi_cpu_start, NULL, 1);
ctr_running = 1;
+ /* make ctr_running visible to the nmi handler: */
+ smp_mb();
+ on_each_cpu(nmi_cpu_start, NULL, 1);
put_online_cpus();
return 0;
}
nmi_enabled = 0;
ctr_running = 0;
- barrier();
+ /* make variables visible to the nmi handler: */
+ smp_mb();
err = register_die_notifier(&profile_exceptions_nb);
if (err)
goto fail;
get_online_cpus();
register_cpu_notifier(&oprofile_cpu_nb);
- on_each_cpu(nmi_cpu_setup, NULL, 1);
nmi_enabled = 1;
+ /* make nmi_enabled visible to the nmi handler: */
+ smp_mb();
+ on_each_cpu(nmi_cpu_setup, NULL, 1);
put_online_cpus();
return 0;
nmi_enabled = 0;
ctr_running = 0;
put_online_cpus();
- barrier();
+ /* make variables visible to the nmi handler: */
+ smp_mb();
unregister_die_notifier(&profile_exceptions_nb);
msrs = &get_cpu_var(cpu_msrs);
model->shutdown(msrs);
}
#ifdef CONFIG_XEN_DOM0
-static int xen_register_pirq(u32 gsi, int triggering)
+static int xen_register_pirq(u32 gsi, int gsi_override, int triggering)
{
int rc, pirq, irq = -1;
struct physdev_map_pirq map_irq;
int shareable = 0;
char *name;
- bool gsi_override = false;
if (!xen_pv_domain())
return -1;
shareable = 1;
name = "ioapic-level";
}
-
pirq = xen_allocate_pirq_gsi(gsi);
if (pirq < 0)
goto out;
- /* Before we bind the GSI to a Linux IRQ, check whether
- * we need to override it with bus_irq (IRQ) value. Usually for
- * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
- * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
- * but there are oddballs where the IRQ != GSI:
- * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
- * which ends up being: gsi_to_irq[9] == 20
- * (which is what acpi_gsi_to_irq ends up calling when starting the
- * the ACPI interpreter and keels over since IRQ 9 has not been
- * setup as we had setup IRQ 20 for it).
- */
- if (gsi == acpi_sci_override_gsi) {
- /* Check whether the GSI != IRQ */
- acpi_gsi_to_irq(gsi, &irq);
- if (irq != gsi)
- /* Bugger, we MUST have that IRQ. */
- gsi_override = true;
- }
- if (gsi_override)
- irq = xen_bind_pirq_gsi_to_irq(irq, pirq, shareable, name);
+ if (gsi_override >= 0)
+ irq = xen_bind_pirq_gsi_to_irq(gsi_override, pirq, shareable, name);
else
irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
if (irq < 0)
return irq;
}
-static int xen_register_gsi(u32 gsi, int triggering, int polarity)
+static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
{
int rc, irq;
struct physdev_setup_gsi setup_gsi;
printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
gsi, triggering, polarity);
- irq = xen_register_pirq(gsi, triggering);
+ irq = xen_register_pirq(gsi, gsi_override, triggering);
setup_gsi.gsi = gsi;
setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
int rc;
int trigger, polarity;
int gsi = acpi_sci_override_gsi;
+ int irq = -1;
+ int gsi_override = -1;
if (!gsi)
return;
printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
"polarity=%d\n", gsi, trigger, polarity);
- gsi = xen_register_gsi(gsi, trigger, polarity);
+ /* Before we bind the GSI to a Linux IRQ, check whether
+ * we need to override it with bus_irq (IRQ) value. Usually for
+ * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
+ * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
+ * but there are oddballs where the IRQ != GSI:
+ * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
+ * which ends up being: gsi_to_irq[9] == 20
+ * (which is what acpi_gsi_to_irq ends up calling when starting the
+ * the ACPI interpreter and keels over since IRQ 9 has not been
+ * setup as we had setup IRQ 20 for it).
+ */
+ /* Check whether the GSI != IRQ */
+ if (acpi_gsi_to_irq(gsi, &irq) == 0) {
+ if (irq >= 0 && irq != gsi)
+ /* Bugger, we MUST have that IRQ. */
+ gsi_override = irq;
+ }
+
+ gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
printk(KERN_INFO "xen: acpi sci %d\n", gsi);
return;
static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
int trigger, int polarity)
{
- return xen_register_gsi(gsi, trigger, polarity);
+ return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
}
static int __init pci_xen_initial_domain(void)
if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
continue;
- xen_register_pirq(irq,
+ xen_register_pirq(irq, -1 /* no GSI override */,
trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE);
}
}
x86_platform.set_wallclock = efi_set_rtc_mmss;
#endif
- /* Setup for EFI runtime service */
- reboot_type = BOOT_EFI;
-
#if EFI_DEBUG
print_efi_memmap();
#endif
smp_wmb();
cic->key = cfqd_dead_key(cfqd);
+ rcu_read_lock();
if (rcu_dereference(ioc->ioc_data) == cic) {
+ rcu_read_unlock();
spin_lock(&ioc->lock);
rcu_assign_pointer(ioc->ioc_data, NULL);
spin_unlock(&ioc->lock);
- }
+ } else
+ rcu_read_unlock();
if (cic->cfqq[BLK_RW_ASYNC]) {
cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
spin_lock_irqsave(&ioc->lock, flags);
- BUG_ON(ioc->ioc_data == cic);
+ BUG_ON(rcu_dereference_check(ioc->ioc_data,
+ lockdep_is_held(&ioc->lock)) == cic);
radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
hlist_del_rcu(&cic->cic_list);
md_io.error = 0;
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
- rw |= REQ_FUA;
+ rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
bio = bio_alloc(GFP_NOIO, 1);
struct task_struct *bm_task;
};
-static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
- unsigned long e, int val, const enum km_type km);
-
#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
{
bio_endio(bio, -EIO);
} else {
submit_bio(rw, bio);
+ /* this should not count as user activity and cause the
+ * resync to throttle -- see drbd_rs_should_slow_down(). */
+ atomic_add(len >> 9, &mdev->rs_sect_ev);
}
}
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */
static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
- unsigned long e, int val, const enum km_type km)
+ unsigned long e, int val)
{
struct drbd_bitmap *b = mdev->bitmap;
unsigned long *p_addr = NULL;
unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
if (page_nr != last_page_nr) {
if (p_addr)
- __bm_unmap(p_addr, km);
+ __bm_unmap(p_addr, KM_IRQ1);
if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0)
bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
changed_total += c;
c = 0;
- p_addr = __bm_map_pidx(b, page_nr, km);
+ p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1);
last_page_nr = page_nr;
}
if (val)
c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
}
if (p_addr)
- __bm_unmap(p_addr, km);
+ __bm_unmap(p_addr, KM_IRQ1);
if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0)
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
bm_print_lock_info(mdev);
- c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
+ c = __bm_change_bits_to(mdev, s, e, val);
spin_unlock_irqrestore(&b->bm_lock, flags);
return c;
{
int i;
int bits;
- unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
+ unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1);
for (i = first_word; i < last_word; i++) {
bits = hweight_long(paddr[i]);
paddr[i] = ~0UL;
b->bm_set += BITS_PER_LONG - bits;
}
- kunmap_atomic(paddr, KM_USER0);
+ kunmap_atomic(paddr, KM_IRQ1);
}
-/* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
+/* Same thing as drbd_bm_set_bits,
+ * but more efficient for a large bit range.
* You must first drbd_bm_lock().
* Can be called to set the whole bitmap in one go.
* Sets bits from s to e _inclusive_. */
* Do not use memset, because we must account for changes,
* so we need to loop over the words with hweight() anyways.
*/
+ struct drbd_bitmap *b = mdev->bitmap;
unsigned long sl = ALIGN(s,BITS_PER_LONG);
unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
int first_page;
if (e - s <= 3*BITS_PER_LONG) {
/* don't bother; el and sl may even be wrong. */
- __bm_change_bits_to(mdev, s, e, 1, KM_USER0);
+ spin_lock_irq(&b->bm_lock);
+ __bm_change_bits_to(mdev, s, e, 1);
+ spin_unlock_irq(&b->bm_lock);
return;
}
/* difference is large enough that we can trust sl and el */
+ spin_lock_irq(&b->bm_lock);
+
/* bits filling the current long */
if (sl)
- __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
+ __bm_change_bits_to(mdev, s, sl-1, 1);
first_page = sl >> (3 + PAGE_SHIFT);
last_page = el >> (3 + PAGE_SHIFT);
/* first and full pages, unless first page == last page */
for (page_nr = first_page; page_nr < last_page; page_nr++) {
bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
+ spin_unlock_irq(&b->bm_lock);
cond_resched();
first_word = 0;
+ spin_lock_irq(&b->bm_lock);
}
/* last page (respectively only page, for first page == last page) */
* it would trigger an assert in __bm_change_bits_to()
*/
if (el <= e)
- __bm_change_bits_to(mdev, el, e, 1, KM_USER0);
+ __bm_change_bits_to(mdev, el, e, 1);
+ spin_unlock_irq(&b->bm_lock);
}
/* returns bit state
dev_err(DEV, "meta connection shut down by peer.\n");
goto reconnect;
} else if (rv == -EAGAIN) {
+ /* If the data socket received something meanwhile,
+ * that is good enough: peer is still alive. */
+ if (time_after(mdev->last_received,
+ jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
+ continue;
if (ping_timeout_active) {
dev_err(DEV, "PingAck did not arrive in time.\n");
goto reconnect;
goto reconnect;
}
if (received == expect) {
+ mdev->last_received = jiffies;
D_ASSERT(cmd != NULL);
if (!cmd->process(mdev, h))
goto reconnect;
return 1;
}
- /* starting with drbd 8.3.8, we can handle multi-bio EEs,
- * if it should be necessary */
- max_bio_size =
- mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
- mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
-
+ max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
number = drbd_rs_number_requests(mdev);
if (number == 0)
goto requeue;
return;
}
if (twi_int_status & MCOMP) {
- if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
+ if ((read_MASTER_CTL(iface) & MEN) == 0 &&
+ (iface->cur_mode == TWI_I2C_MODE_REPEAT ||
+ iface->cur_mode == TWI_I2C_MODE_COMBINED)) {
+ iface->result = -1;
+ write_INT_MASK(iface, 0);
+ write_MASTER_CTL(iface, 0);
+ } else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
if (iface->readNum == 0) {
/* set the read number to 1 and ask for manual
* stop in block combine mode
return i2c->msg_ptr >= i2c->msg->len;
}
-/* i2s_s3c_irq_nextbyte
+/* i2c_s3c_irq_nextbyte
*
* process an interrupt and work out what to do
*/
-static int i2s_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
+static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
{
unsigned long tmp;
unsigned char byte;
case STATE_IDLE:
dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__);
goto out;
- break;
case STATE_STOP:
dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__);
/* pretty much this leaves us with the fact that we've
* transmitted or received whatever byte we last sent */
- i2s_s3c_irq_nextbyte(i2c, status);
+ i2c_s3c_irq_nextbyte(i2c, status);
out:
return IRQ_HANDLED;
#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
#define I2C_STATUS 0x01C
#define I2C_SL_CNFG 0x020
+#define I2C_SL_CNFG_NACK (1<<1)
#define I2C_SL_CNFG_NEWSL (1<<2)
#define I2C_SL_ADDR1 0x02c
+#define I2C_SL_ADDR2 0x030
#define I2C_TX_FIFO 0x050
#define I2C_RX_FIFO 0x054
#define I2C_PACKET_TRANSFER_STATUS 0x058
if (!i2c_dev->is_dvc) {
u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
- i2c_writel(i2c_dev, sl_cfg | I2C_SL_CNFG_NEWSL, I2C_SL_CNFG);
+ sl_cfg |= I2C_SL_CNFG_NACK | I2C_SL_CNFG_NEWSL;
+ i2c_writel(i2c_dev, sl_cfg, I2C_SL_CNFG);
+ i2c_writel(i2c_dev, 0xfc, I2C_SL_ADDR1);
+ i2c_writel(i2c_dev, 0x00, I2C_SL_ADDR2);
+
}
val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
* features
*/
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
+ dev->vlan_features = dev->features;
dev->irq = pdev->irq;
struct bna_intr_info *intr_info)
{
int err = 0;
- unsigned long flags;
+ unsigned long irq_flags = 0, flags;
u32 irq;
irq_handler_t irq_handler;
if (bnad->cfg_flags & BNAD_CF_MSIX) {
irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
irq = bnad->msix_table[bnad->msix_num - 1].vector;
- flags = 0;
intr_info->intr_type = BNA_INTR_T_MSIX;
intr_info->idl[0].vector = bnad->msix_num - 1;
} else {
irq_handler = (irq_handler_t)bnad_isr;
irq = bnad->pcidev->irq;
- flags = IRQF_SHARED;
+ irq_flags = IRQF_SHARED;
intr_info->intr_type = BNA_INTR_T_INTX;
/* intr_info->idl.vector = 0 ? */
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
+ flags = irq_flags;
sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
/*
return -EINVAL;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+ GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 | dev->dev_addr[5]);
- GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]);
- GRETH_REGSAVE(regs->esa_lsb,
- addr->sa_data[2] << 24 | addr->
- sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]);
return 0;
}
{
struct sixpack *sp;
- write_lock(&disc_data_lock);
+ write_lock_bh(&disc_data_lock);
sp = tty->disc_data;
tty->disc_data = NULL;
- write_unlock(&disc_data_lock);
+ write_unlock_bh(&disc_data_lock);
if (!sp)
return;
{
struct mkiss *ax;
- write_lock(&disc_data_lock);
+ write_lock_bh(&disc_data_lock);
ax = tty->disc_data;
tty->disc_data = NULL;
- write_unlock(&disc_data_lock);
+ write_unlock_bh(&disc_data_lock);
if (!ax)
return;
PCI_DMA_FROMDEVICE);
} else {
pci_unmap_single(np->pci_dev, np->rx_dma[entry],
- buflen, PCI_DMA_FROMDEVICE);
+ buflen + NATSEMI_PADDING,
+ PCI_DMA_FROMDEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
}
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.27.00.00-01"
+#define DRV_VERSION "v1.00.00.29.00.00-01"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
QL_LB_LINK_UP = 10,
QL_FRC_COREDUMP = 11,
QL_EEH_FATAL = 12,
+ QL_ASIC_RECOVERY = 14, /* We are in ascic recovery. */
};
/* link_status bit definitions */
* thread
*/
clear_bit(QL_ADAPTER_UP, &qdev->flags);
+ /* Set asic recovery bit to indicate reset process that we are
+ * in fatal error recovery process rather than normal close
+ */
+ set_bit(QL_ASIC_RECOVERY, &qdev->flags);
queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
}
return;
case CAM_LOOKUP_ERR_EVENT:
- netif_err(qdev, link, qdev->ndev,
- "Multiple CAM hits lookup occurred.\n");
- netif_err(qdev, drv, qdev->ndev,
- "This event shouldn't occur.\n");
+ netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
+ netdev_err(qdev->ndev, "This event shouldn't occur.\n");
ql_queue_asic_error(qdev);
return;
case SOFT_ECC_ERROR_EVENT:
- netif_err(qdev, rx_err, qdev->ndev,
- "Soft ECC error detected.\n");
+ netdev_err(qdev->ndev, "Soft ECC error detected.\n");
ql_queue_asic_error(qdev);
break;
case PCI_ERR_ANON_BUF_RD:
- netif_err(qdev, rx_err, qdev->ndev,
- "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
- ib_ae_rsp->q_id);
+ netdev_err(qdev->ndev, "PCI error occurred when reading "
+ "anonymous buffers from rx_ring %d.\n",
+ ib_ae_rsp->q_id);
ql_queue_asic_error(qdev);
break;
*/
if (var & STS_FE) {
ql_queue_asic_error(qdev);
- netif_err(qdev, intr, qdev->ndev,
- "Got fatal error, STS = %x.\n", var);
+ netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
- netif_err(qdev, intr, qdev->ndev,
- "Resetting chip. Error Status Register = 0x%x\n", var);
+ netdev_err(qdev->ndev, "Resetting chip. "
+ "Error Status Register = 0x%x\n", var);
return IRQ_HANDLED;
}
end_jiffies = jiffies +
max((unsigned long)1, usecs_to_jiffies(30));
- /* Stop management traffic. */
- ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
+ /* Check if bit is set then skip the mailbox command and
+ * clear the bit, else we are in normal reset process.
+ */
+ if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
+ /* Stop management traffic. */
+ ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
- /* Wait for the NIC and MGMNT FIFOs to empty. */
- ql_wait_fifo_empty(qdev);
+ /* Wait for the NIC and MGMNT FIFOs to empty. */
+ ql_wait_fifo_empty(qdev);
+ } else
+ clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
.tpauser = 1,
.hw_swap = 1,
.no_ade = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
};
#define SH_GIGA_ETH_BASE 0xfee00000
mdp->cd->set_rate(ndev);
}
if (mdp->link == PHY_DOWN) {
- sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF)
- | ECMR_DM, ECMR);
+ sh_eth_write(ndev,
+ (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
new_state = 1;
mdp->link = phydev->link;
}
struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
u32 val;
- while (num_allocated < num_to_alloc) {
+ while (num_allocated <= num_to_alloc) {
struct vmxnet3_rx_buf_info *rbi;
union Vmxnet3_GenericDesc *gd;
BUG_ON(rbi->dma_addr == 0);
gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
- gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
+ gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
| val | rbi->len);
+ /* Fill the last buffer but dont mark it ready, or else the
+ * device will think that the queue is full */
+ if (num_allocated == num_to_alloc)
+ break;
+
+ gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
num_allocated++;
vmxnet3_cmd_ring_adv_next2fill(ring);
}
VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
};
u32 num_rxd = 0;
+ bool skip_page_frags = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
#ifdef __BIG_ENDIAN_BITFIELD
&rxComp);
while (rcd->gen == rq->comp_ring.gen) {
struct vmxnet3_rx_buf_info *rbi;
- struct sk_buff *skb;
+ struct sk_buff *skb, *new_skb = NULL;
+ struct page *new_page = NULL;
int num_to_alloc;
struct Vmxnet3_RxDesc *rxd;
u32 idx, ring_idx;
-
+ struct vmxnet3_cmd_ring *ring = NULL;
if (num_rxd >= quota) {
/* we may stop even before we see the EOP desc of
* the current pkt
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
idx = rcd->rxdIdx;
ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
+ ring = rq->rx_ring + ring_idx;
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
&rxCmdDesc);
rbi = rq->buf_info[ring_idx] + idx;
goto rcd_done;
}
+ skip_page_frags = false;
ctx->skb = rbi->skb;
- rbi->skb = NULL;
+ new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
+ if (new_skb == NULL) {
+ /* Skb allocation failed, do not handover this
+ * skb to stack. Reuse it. Drop the existing pkt
+ */
+ rq->stats.rx_buf_alloc_failure++;
+ ctx->skb = NULL;
+ rq->stats.drop_total++;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);
skb_put(ctx->skb, rcd->len);
+
+ /* Immediate refill */
+ new_skb->dev = adapter->netdev;
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ rbi->skb = new_skb;
+ rbi->dma_addr = pci_map_single(adapter->pdev,
+ rbi->skb->data, rbi->len,
+ PCI_DMA_FROMDEVICE);
+ rxd->addr = cpu_to_le64(rbi->dma_addr);
+ rxd->len = rbi->len;
+
} else {
- BUG_ON(ctx->skb == NULL);
+ BUG_ON(ctx->skb == NULL && !skip_page_frags);
+
/* non SOP buffer must be type 1 in most cases */
- if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
- BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
+ BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
+ BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
- if (rcd->len) {
- pci_unmap_page(adapter->pdev,
- rbi->dma_addr, rbi->len,
- PCI_DMA_FROMDEVICE);
+ /* If an sop buffer was dropped, skip all
+ * following non-sop fragments. They will be reused.
+ */
+ if (skip_page_frags)
+ goto rcd_done;
- vmxnet3_append_frag(ctx->skb, rcd, rbi);
- rbi->page = NULL;
- }
- } else {
- /*
- * The only time a non-SOP buffer is type 0 is
- * when it's EOP and error flag is raised, which
- * has already been handled.
+ new_page = alloc_page(GFP_ATOMIC);
+ if (unlikely(new_page == NULL)) {
+ /* Replacement page frag could not be allocated.
+ * Reuse this page. Drop the pkt and free the
+ * skb which contained this page as a frag. Skip
+ * processing all the following non-sop frags.
*/
- BUG_ON(true);
+ rq->stats.rx_buf_alloc_failure++;
+ dev_kfree_skb(ctx->skb);
+ ctx->skb = NULL;
+ skip_page_frags = true;
+ goto rcd_done;
+ }
+
+ if (rcd->len) {
+ pci_unmap_page(adapter->pdev,
+ rbi->dma_addr, rbi->len,
+ PCI_DMA_FROMDEVICE);
+
+ vmxnet3_append_frag(ctx->skb, rcd, rbi);
}
+
+ /* Immediate refill */
+ rbi->page = new_page;
+ rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
+ 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ rxd->addr = cpu_to_le64(rbi->dma_addr);
+ rxd->len = rbi->len;
}
+
skb = ctx->skb;
if (rcd->eop) {
skb->len += skb->data_len;
}
rcd_done:
- /* device may skip some rx descs */
- rq->rx_ring[ring_idx].next2comp = idx;
- VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
- rq->rx_ring[ring_idx].size);
-
- /* refill rx buffers frequently to avoid starving the h/w */
- num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
- ring_idx);
- if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
- ring_idx, adapter))) {
- vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
- adapter);
-
- /* if needed, update the register */
- if (unlikely(rq->shared->updateRxProd)) {
- VMXNET3_WRITE_BAR0_REG(adapter,
- rxprod_reg[ring_idx] + rq->qid * 8,
- rq->rx_ring[ring_idx].next2fill);
- rq->uncommitted[ring_idx] = 0;
- }
+ /* device may have skipped some rx descs */
+ ring->next2comp = idx;
+ num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
+ ring = rq->rx_ring + ring_idx;
+ while (num_to_alloc) {
+ vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
+ &rxCmdDesc);
+ BUG_ON(!rxd->addr);
+
+ /* Recv desc is ready to be used by the device */
+ rxd->gen = ring->gen;
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+ num_to_alloc--;
+ }
+
+ /* if needed, update the register */
+ if (unlikely(rq->shared->updateRxProd)) {
+ VMXNET3_WRITE_BAR0_REG(adapter,
+ rxprod_reg[ring_idx] + rq->qid * 8,
+ ring->next2fill);
+ rq->uncommitted[ring_idx] = 0;
}
vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
else
#endif
num_rx_queues = 1;
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
if (enable_mq)
num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
else
num_tx_queues = 1;
+ num_tx_queues = rounddown_pow_of_two(num_tx_queues);
netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
max(num_tx_queues, num_rx_queues));
printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
else
#endif
num_rx_queues = 1;
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
cancel_work_sync(&adapter->work);
#include <linux/if_vlan.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
+#include <linux/log2.h>
#include "vmxnet3_defs.h"
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.1.9.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.1.18.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01010900
+#define VMXNET3_DRIVER_VERSION_NUM 0x01011200
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
if (!chinfo[pier].pd_curves)
continue;
- for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
+ for (pdg = 0; pdg < AR5K_EEPROM_N_PD_CURVES; pdg++) {
struct ath5k_pdgain_info *pd =
&chinfo[pier].pd_curves[pdg];
- if (pd != NULL) {
- kfree(pd->pd_step);
- kfree(pd->pd_pwr);
- }
+ kfree(pd->pd_step);
+ kfree(pd->pd_pwr);
}
kfree(chinfo[pier].pd_curves);
ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+ /* The device has to be moved to FULLSLEEP forcibly.
+ * Otherwise the chip never moved to full sleep,
+ * when no interface is up.
+ */
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+
return 0;
}
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
+#include <linux/stringify.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#define IWL100_UCODE_API_MIN 5
#define IWL1000_FW_PRE "iwlwifi-1000-"
-#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE #api ".ucode"
+#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
#define IWL100_FW_PRE "iwlwifi-100-"
-#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE #api ".ucode"
+#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
/*
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
+#include <linux/stringify.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#define IWL105_UCODE_API_MIN 5
#define IWL2030_FW_PRE "iwlwifi-2030-"
-#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE #api ".ucode"
+#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
#define IWL2000_FW_PRE "iwlwifi-2000-"
-#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE #api ".ucode"
+#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode"
#define IWL105_FW_PRE "iwlwifi-105-"
-#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE #api ".ucode"
+#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
{
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
+#include <linux/stringify.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#define IWL5150_UCODE_API_MIN 1
#define IWL5000_FW_PRE "iwlwifi-5000-"
-#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE #api ".ucode"
+#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
#define IWL5150_FW_PRE "iwlwifi-5150-"
-#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode"
+#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
/* NIC configuration for 5000 series */
static void iwl5000_nic_config(struct iwl_priv *priv)
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
+#include <linux/stringify.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#define IWL6000G2_UCODE_API_MIN 4
#define IWL6000_FW_PRE "iwlwifi-6000-"
-#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
+#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
#define IWL6050_FW_PRE "iwlwifi-6050-"
-#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
+#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
-#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE #api ".ucode"
+#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
-#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE #api ".ucode"
+#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_context *tmp;
+ enum nl80211_iftype newviftype = newtype;
u32 interface_modes;
int err;
/* success */
iwl_teardown_interface(priv, vif, true);
- vif->type = newtype;
+ vif->type = newviftype;
vif->p2p = newp2p;
err = iwl_setup_interface(priv, ctx);
WARN_ON(err);
}
static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd)
+ struct iwl_tfd *tfd, int dma_dir)
{
struct pci_dev *dev = priv->pci_dev;
int i;
/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++)
pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
- iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
+ iwl_tfd_tb_get_len(tfd, i), dma_dir);
}
/**
struct iwl_tfd *tfd_tmp = txq->tfds;
int index = txq->q.read_ptr;
- iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]);
+ iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
+ PCI_DMA_TODEVICE);
/* free SKB */
if (txq->txb) {
i = get_cmd_index(q, q->read_ptr);
if (txq->meta[i].flags & CMD_MAPPED) {
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
+ iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i],
PCI_DMA_BIDIRECTIONAL);
txq->meta[i].flags = 0;
}
void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id)
{
- int actual_slots = slots_num;
-
- if (txq_id == priv->cmd_queue)
- actual_slots++;
-
- memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
+ memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * slots_num);
txq->need_update = 0;
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
continue;
phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
- cmd->len[i], PCI_DMA_TODEVICE);
+ cmd->len[i], PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
iwlagn_unmap_tfd(priv, out_meta,
- &txq->tfds[q->write_ptr]);
+ &txq->tfds[q->write_ptr],
+ PCI_DMA_BIDIRECTIONAL);
idx = -ENOMEM;
goto out;
}
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
- iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]);
+ iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], PCI_DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
* any drivers bound to them (a key side effect)
*/
if (dev->actconfig) {
+ /*
+ * FIXME: In order to avoid self-deadlock involving the
+ * bandwidth_mutex, we have to mark all the interfaces
+ * before unregistering any of them.
+ */
+ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++)
+ dev->actconfig->interface[i]->unregistering = 1;
+
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
struct usb_interface *interface;
continue;
dev_dbg(&dev->dev, "unregistering interface %s\n",
dev_name(&interface->dev));
- interface->unregistering = 1;
remove_intf_ep_devs(interface);
device_del(&interface->dev);
}
if (cifsi->fscache) {
cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache);
+ fscache_uncache_all_inode_pages(cifsi->fscache, inode);
fscache_relinquish_cookie(cifsi->fscache, 1);
cifsi->fscache = NULL;
}
pagevec_reinit(pagevec);
}
EXPORT_SYMBOL(fscache_mark_pages_cached);
+
+/*
+ * Uncache all the pages in an inode that are marked PG_fscache, assuming them
+ * to be associated with the given cookie.
+ */
+void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
+ struct inode *inode)
+{
+ struct address_space *mapping = inode->i_mapping;
+ struct pagevec pvec;
+ pgoff_t next;
+ int i;
+
+ _enter("%p,%p", cookie, inode);
+
+ if (!mapping || mapping->nrpages == 0) {
+ _leave(" [no pages]");
+ return;
+ }
+
+ pagevec_init(&pvec, 0);
+ next = 0;
+ while (next <= (loff_t)-1 &&
+ pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)
+ ) {
+ for (i = 0; i < pagevec_count(&pvec); i++) {
+ struct page *page = pvec.pages[i];
+ pgoff_t page_index = page->index;
+
+ ASSERTCMP(page_index, >=, next);
+ next = page_index + 1;
+
+ if (PageFsCache(page)) {
+ __fscache_wait_on_page_write(cookie, page);
+ __fscache_uncache_page(cookie, page);
+ }
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+
+ _leave("");
+}
+EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);
dfprintk(FSCACHE,
"NFS: nfsi 0x%p turning cache off\n", NFS_I(inode));
- /* Need to invalidate any mapped pages that were read in before
- * turning off the cache.
+ /* Need to uncache any pages attached to this inode that
+ * fscache knows about before turning off the cache.
*/
- if (inode->i_mapping && inode->i_mapping->nrpages)
- invalidate_inode_pages2(inode->i_mapping);
-
+ fscache_uncache_all_inode_pages(NFS_I(inode)->fscache, inode);
nfs_fscache_zap_inode_cookie(inode);
}
}
/* drbdsetup XY resize -d Z
* you are free to reduce the device size to nothing, if you want to.
* the upper limit with 64bit kernel, enough ram and flexible meta data
- * is 16 TB, currently. */
+ * is 1 PiB, currently. */
/* DRBD_MAX_SECTORS */
#define DRBD_DISK_SIZE_SECT_MIN 0
-#define DRBD_DISK_SIZE_SECT_MAX (16 * (2LLU << 30))
+#define DRBD_DISK_SIZE_SECT_MAX (1 * (2LLU << 40))
#define DRBD_DISK_SIZE_SECT_DEF 0 /* = disabled = no user size... */
#define DRBD_ON_IO_ERROR_DEF EP_PASS_ON
extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
gfp_t);
+extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
+ struct inode *);
/**
* fscache_register_netfs - Register a filesystem as desiring caching services
return false;
}
+/**
+ * fscache_uncache_all_inode_pages - Uncache all an inode's pages
+ * @cookie: The cookie representing the inode's cache object.
+ * @inode: The inode to uncache pages from.
+ *
+ * Uncache all the pages in an inode that are marked PG_fscache, assuming them
+ * to be associated with the given cookie.
+ *
+ * This function may sleep. It will wait for pages that are being written out
+ * and will wait whilst the PG_fscache mark is removed by the cache.
+ */
+static inline
+void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
+ struct inode *inode)
+{
+ if (fscache_cookie_valid(cookie))
+ __fscache_uncache_all_inode_pages(cookie, inode);
+}
+
#endif /* _LINUX_FSCACHE_H */
* when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
* increased costs.
*/
-#if BITS_PER_LONG > 32
+#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
# define SCHED_LOAD_RESOLUTION 10
# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
* @dev: network device
* @addr: The source MAC address of the frame
* @key_type: The key type that the received frame used
- * @key_id: Key identifier (0..3)
+ * @key_id: Key identifier (0..3). Can be -1 if missing.
* @tsc: The TSC value of the frame that generated the MIC failure (6 octets)
* @gfp: allocation flags
*
#define DST_NOPOLICY 0x0004
#define DST_NOHASH 0x0008
#define DST_NOCACHE 0x0010
+#define DST_NOCOUNT 0x0020
union {
struct dst_entry *next;
struct rtable __rcu *rt_next;
static void jump_label_update(struct jump_label_key *key, int enable)
{
- struct jump_entry *entry = key->entries;
-
- /* if there are no users, entry can be NULL */
- if (entry)
- __jump_label_update(key, entry, __stop___jump_table, enable);
+ struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
#ifdef CONFIG_MODULES
+ struct module *mod = __module_address((jump_label_t)key);
+
__jump_label_mod_update(key, enable);
+
+ if (mod)
+ stop = mod->jump_entries + mod->num_jump_entries;
#endif
+ /* if there are no users, entry can be NULL */
+ if (entry)
+ __jump_label_update(key, entry, stop, enable);
}
#endif
to_free_highmem = alloc_highmem - save;
} else {
to_free_highmem = 0;
- to_free_normal -= save - alloc_highmem;
+ save -= alloc_highmem;
+ if (to_free_normal > save)
+ to_free_normal -= save;
+ else
+ to_free_normal = 0;
}
memory_bm_position_reset(©_bm);
* (The default weight is 1024 - so there's no practical
* limitation from this.)
*/
-#define MIN_SHARES 2
-#define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION))
+#define MIN_SHARES (1UL << 1)
+#define MAX_SHARES (1UL << 18)
static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
#endif
if (!tg->se[0])
return -EINVAL;
- if (shares < MIN_SHARES)
- shares = MIN_SHARES;
- else if (shares > MAX_SHARES)
- shares = MAX_SHARES;
+ shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
mutex_lock(&shares_mutex);
if (tg->shares == shares)
* initialized:
*/
if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
- sched = !work_pending(&debug_obj_work);
+ sched = keventd_up() && !work_pending(&debug_obj_work);
hlist_add_head(&obj->node, &obj_pool);
obj_pool_free++;
obj_pool_used--;
static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ u32 old_features = features;
features &= real_dev->features;
features &= real_dev->vlan_features;
+
+ if (old_features & NETIF_F_SOFT_FEATURES)
+ features |= old_features & NETIF_F_SOFT_FEATURES;
+
if (dev_ethtool_get_rx_csum(real_dev))
features |= NETIF_F_RXCSUM;
features |= NETIF_F_LLTX;
skb_pull(skb, ETH_HLEN);
rcu_read_lock();
- if (is_multicast_ether_addr(dest)) {
+ if (is_broadcast_ether_addr(dest))
+ br_flood_deliver(br, skb);
+ else if (is_multicast_ether_addr(dest)) {
if (unlikely(netpoll_tx_running(dev))) {
br_flood_deliver(br, skb);
goto out;
br = p->br;
br_fdb_update(br, p, eth_hdr(skb)->h_source);
- if (is_multicast_ether_addr(dest) &&
+ if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
br_multicast_rcv(br, p, skb))
goto drop;
dst = NULL;
- if (is_multicast_ether_addr(dest)) {
+ if (is_broadcast_ether_addr(dest))
+ skb2 = skb;
+ else if (is_multicast_ether_addr(dest)) {
mdst = br_mdb_get(br, skb);
if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
if ((mdst && mdst->mglist) ||
dst->lastuse = jiffies;
dst->flags = flags;
dst->next = NULL;
- dst_entries_add(ops, 1);
+ if (!(flags & DST_NOCOUNT))
+ dst_entries_add(ops, 1);
return dst;
}
EXPORT_SYMBOL(dst_alloc);
neigh_release(neigh);
}
- dst_entries_add(dst->ops, -1);
+ if (!(dst->flags & DST_NOCOUNT))
+ dst_entries_add(dst->ops, -1);
if (dst->ops->destroy)
dst->ops->destroy(dst);
if (addr_len < sizeof(struct sockaddr_in))
goto out;
- if (addr->sin_family != AF_INET)
+ if (addr->sin_family != AF_INET) {
+ err = -EAFNOSUPPORT;
goto out;
+ }
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
cork->length += length;
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
- (rt->dst.dev->features & NETIF_F_UFO)) {
+ (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
mtu, flags);
void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
- unsigned long nr_pages, limit;
+ unsigned long limit;
int i, max_share, cnt;
unsigned long jiffy = jiffies;
sysctl_tcp_max_orphans = cnt / 2;
sysctl_max_syn_backlog = max(128, cnt / 256);
- /* Set the pressure threshold to be a fraction of global memory that
- * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
- * memory, with a floor of 128 pages.
- */
- nr_pages = totalram_pages - totalhigh_pages;
- limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
- limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
+ limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
sysctl_tcp_mem[0] = limit / 4 * 3;
sysctl_tcp_mem[1] = limit;
void __init udp_init(void)
{
- unsigned long nr_pages, limit;
+ unsigned long limit;
udp_table_init(&udp_table, "UDP");
- /* Set the pressure threshold up by the same strategy of TCP. It is a
- * fraction of global memory that is up to 1/2 at 256 MB, decreasing
- * toward zero with the amount of memory, with a floor of 128 pages.
- */
- nr_pages = totalram_pages - totalhigh_pages;
- limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
- limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
+ limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
sysctl_udp_mem[0] = limit / 4 * 3;
sysctl_udp_mem[1] = limit;
dst = skb_dst(skb);
mtu = dst_mtu(dst);
if (skb->len > mtu) {
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ if (skb->sk)
+ ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr,
+ inet_sk(skb->sk)->inet_dport, mtu);
+ else
+ icmp_send(skb, ICMP_DEST_UNREACH,
+ ICMP_FRAG_NEEDED, htonl(mtu));
ret = -EMSGSIZE;
}
out:
return -EINVAL;
if (addr->sin6_family != AF_INET6)
- return -EINVAL;
+ return -EAFNOSUPPORT;
addr_type = ipv6_addr_type(&addr->sin6_addr);
if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
/* allocate dst with ip6_dst_ops */
static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
- struct net_device *dev)
+ struct net_device *dev,
+ int flags)
{
- struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, 0);
+ struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
if (unlikely(idev == NULL))
return NULL;
- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev);
+ rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
if (unlikely(rt == NULL)) {
in6_dev_put(idev);
goto out;
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
rt->dst.output = ip6_output;
-#if 0 /* there's no chance to use these for ndisc */
- rt->dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
- ? DST_HOST
- : 0;
- ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
- rt->rt6i_dst.plen = 128;
-#endif
-
spin_lock_bh(&icmp6_dst_lock);
rt->dst.next = icmp6_dst_gc_list;
icmp6_dst_gc_list = &rt->dst;
goto out;
}
- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL);
+ rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
if (rt == NULL) {
err = -ENOMEM;
ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
rt->rt6i_dst.plen = cfg->fc_dst_len;
if (rt->rt6i_dst.plen == 128)
- rt->dst.flags = DST_HOST;
+ rt->dst.flags |= DST_HOST;
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
{
struct net *net = dev_net(ort->rt6i_dev);
struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
- ort->dst.dev);
+ ort->dst.dev, 0);
if (rt) {
rt->dst.input = ort->dst.input;
{
struct net *net = dev_net(idev->dev);
struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
- net->loopback_dev);
+ net->loopback_dev, 0);
struct neighbour *neigh;
if (rt == NULL) {
in6_dev_hold(idev);
- rt->dst.flags = DST_HOST;
+ rt->dst.flags |= DST_HOST;
rt->dst.input = ip6_input;
rt->dst.output = ip6_output;
rt->rt6i_idev = idev;
return RX_CONTINUE;
mic_fail:
- mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
+ /*
+ * In some cases the key can be unset - e.g. a multicast packet, in
+ * a driver that supports HW encryption. Send up the key idx only if
+ * the key is set.
+ */
+ mac80211_ev_michael_mic_failure(rx->sdata,
+ rx->key ? rx->key->conf.keyidx : -1,
(void *) skb->data, NULL, GFP_ATOMIC);
return RX_DROP_UNUSABLE;
}
int status = -EINVAL;
unsigned long goal;
unsigned long limit;
- unsigned long nr_pages;
int max_share;
int order;
/* Initialize handle used for association ids. */
idr_init(&sctp_assocs_id);
- /* Set the pressure threshold to be a fraction of global memory that
- * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
- * memory, with a floor of 128 pages.
- * Note this initializes the data in sctpv6_prot too
- * Unabashedly stolen from tcp_init
- */
- nr_pages = totalram_pages - totalhigh_pages;
- limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
- limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
+ limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
sysctl_sctp_mem[0] = limit / 4 * 3;
sysctl_sctp_mem[1] = limit;
static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
unsigned int optlen)
{
+ struct sctp_association *asoc;
+ struct sctp_ulpevent *event;
+
if (optlen > sizeof(struct sctp_event_subscribe))
return -EINVAL;
if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
return -EFAULT;
+
+ /*
+ * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
+ * if there is no data to be sent or retransmit, the stack will
+ * immediately send up this notification.
+ */
+ if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
+ &sctp_sk(sk)->subscribe)) {
+ asoc = sctp_id2assoc(sk, 0);
+
+ if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
+ event = sctp_ulpevent_make_sender_dry_event(asoc,
+ GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ sctp_ulpq_tail_event(&asoc->ulpq, event);
+ }
+ }
+
return 0;
}
if (addr)
NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type);
- NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
+ if (key_id != -1)
+ NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
if (tsc)
NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
static void xfrm_init_pmtu(struct dst_entry *dst);
static int stale_bundle(struct dst_entry *dst);
-static int xfrm_bundle_ok(struct xfrm_dst *xdst, int family);
+static int xfrm_bundle_ok(struct xfrm_dst *xdst);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
static int stale_bundle(struct dst_entry *dst)
{
- return !xfrm_bundle_ok((struct xfrm_dst *)dst, AF_UNSPEC);
+ return !xfrm_bundle_ok((struct xfrm_dst *)dst);
}
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
* still valid.
*/
-static int xfrm_bundle_ok(struct xfrm_dst *first, int family)
+static int xfrm_bundle_ok(struct xfrm_dst *first)
{
struct dst_entry *dst = &first->u.dst;
struct xfrm_dst *last;