-------------------
Module for sound cards based on the Asus AV66/AV100/AV200 chips,
- i.e., Xonar D1, DX, D2, D2X, DS, Essence ST (Deluxe), Essence STX,
- HDAV1.3 (Deluxe), and HDAV1.3 Slim.
+ i.e., Xonar D1, DX, D2, D2X, DS, DSX, Essence ST (Deluxe),
+ Essence STX (II), HDAV1.3 (Deluxe), and HDAV1.3 Slim.
This module supports autoprobe and multiple cards.
Procedure for submitting patches to the -stable tree:
+ - If the patch covers files in net/ or drivers/net please follow netdev stable
+ submission guidelines as described in
+ Documentation/networking/netdev-FAQ.txt
- Send the patch, after verifying that it follows the above rules, to
stable@vger.kernel.org. You must note the upstream commit ID in the
changelog of your submission, as well as the kernel version you wish
VERSION = 3
PATCHLEVEL = 10
-SUBLEVEL = 53
+SUBLEVEL = 54
EXTRAVERSION =
NAME = TOSSUG Baby Fish
scratchpad_contents.public_restore_ptr =
virt_to_phys(omap3_restore_3630);
else if (omap_rev() != OMAP3430_REV_ES3_0 &&
- omap_rev() != OMAP3430_REV_ES3_1)
+ omap_rev() != OMAP3430_REV_ES3_1 &&
+ omap_rev() != OMAP3430_REV_ES3_1_2)
scratchpad_contents.public_restore_ptr =
virt_to_phys(omap3_restore);
else
oh->mux->pads_dynamic))) {
omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
_reconfigure_io_chain();
+ } else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
+ _reconfigure_io_chain();
}
_add_initiator_dep(oh, mpu_oh);
if (oh->mux && oh->mux->pads_dynamic) {
omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
_reconfigure_io_chain();
+ } else if (oh->flags & HWMOD_FORCE_MSTANDBY) {
+ _reconfigure_io_chain();
}
oh->_state = _HWMOD_STATE_IDLE;
config EFI_STUB
bool "EFI stub support"
depends on EFI
+ select RELOCATABLE
---help---
This kernel feature allows a bzImage to be loaded directly
by EFI firmware without the use of a bootloader.
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 80
#define KVM_NR_FIXED_MTRR_REGION 88
-#define KVM_NR_VAR_MTRR 10
+#define KVM_NR_VAR_MTRR 8
#define ASYNC_PF_PER_VCPU 64
void arch_remove_reservations(struct resource *avail)
{
- /* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
+ /*
+ * Trim out BIOS area (high 2MB) and E820 regions. We do not remove
+ * the low 1MB unconditionally, as this area is needed for some ISA
+ * cards requiring a memory range, e.g. the i82365 PCMCIA controller.
+ */
if (avail->flags & IORESOURCE_MEM) {
- if (avail->start < BIOS_END)
- avail->start = BIOS_END;
resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
remove_e820_regions(avail);
if (!show_unhandled_signals)
return;
- pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
- level, current->comm, task_pid_nr(current),
- message, regs->ip, regs->cs,
- regs->sp, regs->ax, regs->si, regs->di);
+ printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
+ level, current->comm, task_pid_nr(current),
+ message, regs->ip, regs->cs,
+ regs->sp, regs->ax, regs->si, regs->di);
}
static int addr_to_vsyscall_nr(unsigned long addr)
{
int rc;
unsigned long cs;
+ int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
+ /* Outer-privilege level return is not implemented */
+ if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
+ return X86EMUL_UNHANDLEABLE;
rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
return rc;
}
vector = kvm_cpu_get_extint(v);
- if (kvm_apic_vid_enabled(v->kvm) || vector != -1)
+ if (vector != -1)
return vector; /* PIC */
return kvm_get_apic_interrupt(v); /* APIC */
static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
{
- apic->irr_pending = false;
+ struct kvm_vcpu *vcpu;
+
+ vcpu = apic->vcpu;
+
apic_clear_vector(vec, apic->regs + APIC_IRR);
- if (apic_search_irr(apic) != -1)
- apic->irr_pending = true;
+ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
+ /* try to update RVI */
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ else {
+ vec = apic_search_irr(apic);
+ apic->irr_pending = (vec != -1);
+ }
}
static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
{
- /* Note that we never get here with APIC virtualization enabled. */
+ struct kvm_vcpu *vcpu;
+
+ if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
+ return;
+
+ vcpu = apic->vcpu;
- if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
- ++apic->isr_count;
- BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
/*
- * ISR (in service register) bit is set when injecting an interrupt.
- * The highest vector is injected. Thus the latest bit set matches
- * the highest bit in ISR.
+ * With APIC virtualization enabled, all caching is disabled
+ * because the processor can modify ISR under the hood. Instead
+ * just set SVI.
*/
- apic->highest_isr_cache = vec;
+ if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
+ kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
+ else {
+ ++apic->isr_count;
+ BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
+ /*
+ * ISR (in service register) bit is set when injecting an interrupt.
+ * The highest vector is injected. Thus the latest bit set matches
+ * the highest bit in ISR.
+ */
+ apic->highest_isr_cache = vec;
+ }
}
static inline int apic_find_highest_isr(struct kvm_lapic *apic)
int vector = kvm_apic_has_interrupt(vcpu);
struct kvm_lapic *apic = vcpu->arch.apic;
- /* Note that we never get here with APIC virtualization enabled. */
-
if (vector == -1)
return -1;
+ /*
+ * We get here even with APIC virtualization enabled, if doing
+ * nested virtualization and L1 runs with the "acknowledge interrupt
+ * on exit" mode. Then we cannot inject the interrupt via RVI,
+ * because the process would deliver it through the IDT.
+ */
+
apic_set_isr(vector, apic);
apic_update_ppr(apic);
apic_clear_irr(vector, apic);
return start;
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
+ } else if (res->flags & IORESOURCE_MEM) {
+ /* The low 1MB range is reserved for ISA cards */
+ if (start < BIOS_END)
+ start = BIOS_END;
}
return start;
}
static irqreturn_t cryp_interrupt_handler(int irq, void *param)
{
struct cryp_ctx *ctx;
- int i;
+ int count;
struct cryp_device_data *device_data;
if (param == NULL) {
if (cryp_pending_irq_src(device_data,
CRYP_IRQ_SRC_OUTPUT_FIFO)) {
if (ctx->outlen / ctx->blocksize > 0) {
- for (i = 0; i < ctx->blocksize / 4; i++) {
- *(ctx->outdata) = readl_relaxed(
- &device_data->base->dout);
- ctx->outdata += 4;
- ctx->outlen -= 4;
- }
+ count = ctx->blocksize / 4;
+
+ readsl(&device_data->base->dout, ctx->outdata, count);
+ ctx->outdata += count;
+ ctx->outlen -= count;
if (ctx->outlen == 0) {
cryp_disable_irq_src(device_data,
} else if (cryp_pending_irq_src(device_data,
CRYP_IRQ_SRC_INPUT_FIFO)) {
if (ctx->datalen / ctx->blocksize > 0) {
- for (i = 0 ; i < ctx->blocksize / 4; i++) {
- writel_relaxed(ctx->indata,
- &device_data->base->din);
- ctx->indata += 4;
- ctx->datalen -= 4;
- }
+ count = ctx->blocksize / 4;
+
+ writesl(&device_data->base->din, ctx->indata, count);
+
+ ctx->indata += count;
+ ctx->datalen -= count;
if (ctx->datalen == 0)
cryp_disable_irq_src(device_data,
static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
struct page **pages, uint32_t npages, uint32_t roll)
{
- dma_addr_t pat_pa = 0;
+ dma_addr_t pat_pa = 0, data_pa = 0;
uint32_t *data;
struct pat *pat;
struct refill_engine *engine = txn->engine_handle;
.lut_id = engine->tcm->lut_id,
};
- data = alloc_dma(txn, 4*i, &pat->data_pa);
+ data = alloc_dma(txn, 4*i, &data_pa);
+ /* FIXME: what if data_pa is more than 32-bit ? */
+ pat->data_pa = data_pa;
while (i--) {
int n = i + roll;
omap_obj->paddr = tiler_ssptr(block);
omap_obj->block = block;
- DBG("got paddr: %08x", omap_obj->paddr);
+ DBG("got paddr: %pad", &omap_obj->paddr);
}
omap_obj->paddr_cnt++;
if (obj->map_list.map)
off = (uint64_t)obj->map_list.hash.key;
- seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
+ seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
omap_obj->flags, obj->name, obj->refcount.refcount.counter,
- off, omap_obj->paddr, omap_obj->paddr_cnt,
+ off, &omap_obj->paddr, omap_obj->paddr_cnt,
omap_obj->vaddr, omap_obj->roll);
if (omap_obj->flags & OMAP_BO_TILED) {
entry->paddr = tiler_ssptr(block);
entry->block = block;
- DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
- entry->paddr,
+ DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h,
+ &entry->paddr,
usergart[i].stride_pfn << PAGE_SHIFT);
}
}
DBG("%dx%d -> %dx%d (%d)", info->width, info->height,
info->out_width, info->out_height,
info->screen_width);
- DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
- info->paddr, info->p_uv_addr);
+ DBG("%d,%d %pad %pad", info->pos_x, info->pos_y,
+ &info->paddr, &info->p_uv_addr);
/* TODO: */
ilace = false;
static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
+ if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n");
rdesc[11] = rdesc[16] = 0xff;
rdesc[12] = rdesc[17] = 0x03;
* - change the button usage range to 4-7 for the extra
* buttons
*/
- if (*rsize >= 74 &&
+ if (*rsize >= 75 &&
rdesc[61] == 0x05 && rdesc[62] == 0x08 &&
rdesc[63] == 0x19 && rdesc[64] == 0x08 &&
rdesc[65] == 0x29 && rdesc[66] == 0x0f &&
struct usb_device_descriptor *udesc;
__u16 bcdDevice, rev_maj, rev_min;
- if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 &&
+ if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 &&
rdesc[84] == 0x8c && rdesc[85] == 0x02) {
hid_info(hdev,
"fixing up Logitech keyboard report descriptor\n");
rdesc[84] = rdesc[89] = 0x4d;
rdesc[85] = rdesc[90] = 0x10;
}
- if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 &&
+ if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 &&
rdesc[32] == 0x81 && rdesc[33] == 0x06 &&
rdesc[49] == 0x81 && rdesc[50] == 0x06) {
hid_info(hdev,
return;
}
- if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
- (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
- dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n",
- __func__, dj_report->device_index);
- return;
- }
-
if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
/* The device is already known. No need to reallocate it. */
dbg_hid("%s: device is already known\n", __func__);
* device (via hid_input_report() ) and return 1 so hid-core does not do
* anything else with it.
*/
+ if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
+ (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
+ dev_err(&hdev->dev, "%s: invalid device index:%d\n",
+ __func__, dj_report->device_index);
+ return false;
+ }
spin_lock_irqsave(&djrcv_dev->lock, flags);
if (dj_report->report_id == REPORT_ID_DJ_SHORT) {
static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
+ if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) {
hid_info(hdev, "fixing up button/consumer in HID report descriptor\n");
rdesc[30] = 0x0c;
}
static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
+ if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 &&
rdesc[41] == 0x00 && rdesc[59] == 0x26 &&
rdesc[60] == 0xf9 && rdesc[61] == 0x00) {
hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n");
static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
+ if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 &&
rdesc[106] == 0x03) {
hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n");
rdesc[105] = rdesc[110] = 0x03;
}
channel = be32_to_cpup(property);
- if (channel > ADS1015_CHANNELS) {
+ if (channel >= ADS1015_CHANNELS) {
dev_err(&client->dev,
"invalid channel index %d on %s\n",
channel, node->full_name);
dev_err(&client->dev,
"invalid gain on %s\n",
node->full_name);
+ return -EINVAL;
}
}
dev_err(&client->dev,
"invalid data_rate on %s\n",
node->full_name);
+ return -EINVAL;
}
}
u8 pwm_acz[3];
u8 pwm_freq[6];
u8 pwm_rr[2];
- u8 zone_low[3];
- u8 zone_abs[3];
+ s8 zone_low[3];
+ s8 zone_abs[3];
u8 zone_hyst[2];
u32 alarms;
};
return (reg * nominal + (3 << (res - 3))) / (3 << (res - 2));
}
-static inline int IN_TO_REG(int val, int nominal)
+static inline int IN_TO_REG(long val, int nominal)
{
return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255);
}
return (reg * 1000) >> (res - 8);
}
-static inline int TEMP_TO_REG(int val)
+static inline int TEMP_TO_REG(long val)
{
return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127);
}
return TEMP_RANGE[(reg >> 4) & 0x0f];
}
-static int TEMP_RANGE_TO_REG(int val, int reg)
+static int TEMP_RANGE_TO_REG(long val, int reg)
{
int i;
return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000;
}
-static inline int TEMP_HYST_TO_REG(int val, int ix, int reg)
+static inline int TEMP_HYST_TO_REG(long val, int ix, int reg)
{
int hyst = clamp_val((val + 500) / 1000, 0, 15);
return (reg == 0 || reg == 0xffff) ? 0 : 90000 * 60 / reg;
}
-static inline int FAN_TO_REG(int val, int tpc)
+static inline int FAN_TO_REG(long val, int tpc)
{
if (tpc) {
return clamp_val(val / tpc, 0, 0xffff);
return (edge > 0) ? 1 << (edge - 1) : 0;
}
-static inline int FAN_TYPE_TO_REG(int val, int reg)
+static inline int FAN_TYPE_TO_REG(long val, int reg)
{
int edge = (val == 4) ? 3 : val;
return 1000 + i * 500;
}
-static int FAN_MAX_TO_REG(int val)
+static int FAN_MAX_TO_REG(long val)
{
int i;
return acz[(reg >> 5) & 0x07];
}
-static inline int PWM_ACZ_TO_REG(int val, int reg)
+static inline int PWM_ACZ_TO_REG(long val, int reg)
{
int acz = (val == 4) ? 2 : val - 1;
return PWM_FREQ[reg & 0x0f];
}
-static int PWM_FREQ_TO_REG(int val, int reg)
+static int PWM_FREQ_TO_REG(long val, int reg)
{
int i;
return (rr & 0x08) ? PWM_RR[rr & 0x07] : 0;
}
-static int PWM_RR_TO_REG(int val, int ix, int reg)
+static int PWM_RR_TO_REG(long val, int ix, int reg)
{
int i;
return PWM_RR_FROM_REG(reg, ix) ? 1 : 0;
}
-static inline int PWM_RR_EN_TO_REG(int val, int ix, int reg)
+static inline int PWM_RR_EN_TO_REG(long val, int ix, int reg)
{
int en = (ix == 1) ? 0x80 : 0x08;
const char *buf, size_t count)
{
struct dme1737_data *data = dev_get_drvdata(dev);
- long val;
+ unsigned long val;
int err;
- err = kstrtol(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
+ if (val > 255)
+ return -EINVAL;
+
data->vrm = val;
return count;
}
return -EINVAL;
}
-static int rpm_to_speed_index(struct gpio_fan_data *fan_data, int rpm)
+static int rpm_to_speed_index(struct gpio_fan_data *fan_data, unsigned long rpm)
{
struct gpio_fan_speed *speed = fan_data->speed;
int i;
* TEMP: mC (-128C to +127C)
* REG: 1C/bit, two's complement
*/
-static inline s8 TEMP_TO_REG(int val)
+static inline s8 TEMP_TO_REG(long val)
{
int nval = clamp_val(val, -128000, 127000) ;
return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000;
/* Temperature is reported in .001 degC increments */
#define TEMP_TO_REG(val) \
- clamp_val(SCALE(val, 1000, 1), -127, 127)
+ DIV_ROUND_CLOSEST(clamp_val((val), -127000, 127000), 1000)
#define TEMPEXT_FROM_REG(val, ext) \
SCALE(((val) << 4) + (ext), 16, 1000)
#define TEMP_FROM_REG(val) ((val) * 1000)
13300, 16000, 20000, 26600, 32000, 40000, 53300, 80000
};
-static int RANGE_TO_REG(int range)
+static int RANGE_TO_REG(long range)
{
int i;
11, 15, 22, 29, 35, 44, 59, 88
};
-static int FREQ_TO_REG(const int *map, int freq)
+static int FREQ_TO_REG(const int *map, unsigned long freq)
{
int i;
if (err)
return err;
+ if (val > 255)
+ return -EINVAL;
+
data->vrm = val;
return count;
}
{
return val * 830 + 52120;
}
-static inline s8 TEMP_TO_REG(int val)
+static inline s8 TEMP_TO_REG(long val)
{
int nval = clamp_val(val, -54120, 157530) ;
return nval < 0 ? (nval - 5212 - 415) / 830 : (nval - 5212 + 415) / 830;
struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
- dev->buf_len, DMA_MEM_TO_DEV);
+ dev->buf_len, DMA_TO_DEVICE);
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
}
struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
- dev->buf_len, DMA_DEV_TO_MEM);
+ dev->buf_len, DMA_FROM_DEVICE);
/* The last two bytes have to be read without using dma */
dev->buf += dev->buf_len - 2;
dev_err(&dev->pdev->dev, "failed to disconnect.\n");
goto free;
}
+ cl->timer_count = MEI_CONNECT_TIMEOUT;
mdelay(10); /* Wait for hardware disconnection ready */
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
} else {
cl->timer_count = MEI_CONNECT_TIMEOUT;
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
} else {
+ cl->state = MEI_FILE_INITIALIZING;
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
ndev = (struct mei_nfc_dev *) cldev->priv_data;
dev = ndev->cl->dev;
+ err = -ENOMEM;
mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL);
if (!mei_buf)
- return -ENOMEM;
+ goto out;
hdr = (struct mei_nfc_hci_hdr *) mei_buf;
hdr->cmd = MEI_NFC_CMD_HCI_SEND;
hdr->data_size = length;
memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length);
-
err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE);
if (err < 0)
- return err;
-
- kfree(mei_buf);
+ goto out;
if (!wait_event_interruptible_timeout(ndev->send_wq,
ndev->recv_req_id == ndev->req_id, HZ)) {
} else {
ndev->req_id++;
}
-
+out:
+ kfree(mei_buf);
return err;
}
}
if (ioc->Request.Type.Direction == XFER_WRITE) {
if (copy_from_user(buff[sg_used], data_ptr, sz)) {
- status = -ENOMEM;
+ status = -EFAULT;
goto cleanup1;
}
} else
#include <linux/device.h> /* for dev_warn */
#include <linux/selection.h>
#include <linux/workqueue.h>
+#include <linux/tty.h>
#include <asm/cmpxchg.h>
#include "speakup.h"
struct tty_struct *tty = xchg(&spw->tty, NULL);
struct vc_data *vc = (struct vc_data *) tty->driver_data;
int pasted = 0, count;
+ struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
+ ld = tty_ldisc_ref_wait(tty);
+
+ /* FIXME: this is completely unsafe */
add_wait_queue(&vc->paste_wait, &wait);
while (sel_buffer && sel_buffer_lth > pasted) {
set_current_state(TASK_INTERRUPTIBLE);
}
count = sel_buffer_lth - pasted;
count = min_t(int, count, tty->receive_room);
- tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
- NULL, count);
+ ld->ops->receive_buf(tty, sel_buffer + pasted, NULL, count);
pasted += count;
}
remove_wait_queue(&vc->paste_wait, &wait);
current->state = TASK_RUNNING;
+
+ tty_ldisc_deref(ld);
tty_kref_put(tty);
}
/*
* Turn off DTR and RTS early.
*/
+ if (uart_console(uport) && tty)
+ uport->cons->cflag = tty->termios.c_cflag;
+
if (!tty || (tty->termios.c_cflag & HUPCL))
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
* - Change autosuspend delay of hub can avoid unnecessary auto
* suspend timer for hub, also may decrease power consumption
* of USB bus.
+ *
+ * - If user has indicated to prevent autosuspend by passing
+ * usbcore.autosuspend = -1 then keep autosuspend disabled.
*/
- pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
+#ifdef CONFIG_PM_RUNTIME
+ if (hdev->dev.power.autosuspend_delay >= 0)
+ pm_runtime_set_autosuspend_delay(&hdev->dev, 0);
+#endif
/*
* Hubs have proper suspend/resume support, except for root hubs
return status;
}
+/*
+ * There are some SS USB devices which take longer time for link training.
+ * XHCI specs 4.19.4 says that when Link training is successful, port
+ * sets CSC bit to 1. So if SW reads port status before successful link
+ * training, then it will not find device to be present.
+ * USB Analyzer log with such buggy devices show that in some cases
+ * device switch on the RX termination after long delay of host enabling
+ * the VBUS. In few other cases it has been seen that device fails to
+ * negotiate link training in first attempt. It has been
+ * reported till now that few devices take as long as 2000 ms to train
+ * the link after host enabling its VBUS and termination. Following
+ * routine implements a 2000 ms timeout for link training. If in a case
+ * link trains before timeout, loop will exit earlier.
+ *
+ * FIXME: If a device was connected before suspend, but was removed
+ * while system was asleep, then the loop in the following routine will
+ * only exit at timeout.
+ *
+ * This routine should only be called when persist is enabled for a SS
+ * device.
+ */
+static int wait_for_ss_port_enable(struct usb_device *udev,
+ struct usb_hub *hub, int *port1,
+ u16 *portchange, u16 *portstatus)
+{
+ int status = 0, delay_ms = 0;
+
+ while (delay_ms < 2000) {
+ if (status || *portstatus & USB_PORT_STAT_CONNECTION)
+ break;
+ msleep(20);
+ delay_ms += 20;
+ status = hub_port_status(hub, *port1, portstatus, portchange);
+ }
+ return status;
+}
+
/*
* usb_port_resume - re-activate a suspended usb device's upstream port
* @udev: device to re-activate, not a root hub
clear_bit(port1, hub->busy_bits);
+ if (udev->persist_enabled && hub_is_superspeed(hub->hdev))
+ status = wait_for_ss_port_enable(udev, hub, &port1, &portchange,
+ &portstatus);
+
status = check_port_resume_type(udev,
hub, port1, status, portchange, portstatus);
if (status == 0)
#define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70
/*-------------------------------------------------------------------------*/
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939
+static inline bool is_intel_quark_x1000(struct pci_dev *pdev)
+{
+ return pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC;
+}
+
+/*
+ * 0x84 is the offset of in/out threshold register,
+ * and it is the same offset as the register of 'hostpc'.
+ */
+#define intel_quark_x1000_insnreg01 hostpc
+
+/* Maximum usable threshold value is 0x7f dwords for both IN and OUT */
+#define INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD 0x007f007f
/* called after powerup, by probe or system-pm "wakeup" */
static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
if (!retval)
ehci_dbg(ehci, "MWI active\n");
+ /* Reset the threshold limit */
+ if (is_intel_quark_x1000(pdev)) {
+ /*
+ * For the Intel QUARK X1000, raise the I/O threshold to the
+ * maximum usable value in order to improve performance.
+ */
+ ehci_writel(ehci, INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD,
+ ehci->regs->intel_quark_x1000_insnreg01);
+ }
+
return 0;
}
* - ED_OPER: when there's any request queued, the ED gets rescheduled
* immediately. HC should be working on them.
*
- * - ED_IDLE: when there's no TD queue. there's no reason for the HC
- * to care about this ED; safe to disable the endpoint.
+ * - ED_IDLE: when there's no TD queue or the HC isn't running.
*
* When finish_unlinks() runs later, after SOF interrupt, it will often
* complete one or more URB unlinks before making that state change.
int completed, modified;
__hc32 *prev;
+ /* Is this ED already invisible to the hardware? */
+ if (ed->state == ED_IDLE)
+ goto ed_idle;
+
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
}
}
+ /* ED's now officially unlinked, hc doesn't see */
+ ed->state = ED_IDLE;
+ if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
+ ohci->eds_scheduled--;
+ ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
+ ed->hwNextED = 0;
+ wmb();
+ ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
+ed_idle:
+
/* reentrancy: if we drop the schedule lock, someone might
* have modified this list. normally it's just prepending
* entries (which we'd ignore), but paranoia won't hurt.
*/
- *last = ed->ed_next;
- ed->ed_next = NULL;
modified = 0;
/* unlink urbs as requested, but rescan the list after
if (completed && !list_empty (&ed->td_list))
goto rescan_this;
- /* ED's now officially unlinked, hc doesn't see */
- ed->state = ED_IDLE;
- if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
- ohci->eds_scheduled--;
- ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
- ed->hwNextED = 0;
- wmb ();
- ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE);
-
- /* but if there's work queued, reschedule */
- if (!list_empty (&ed->td_list)) {
- if (ohci->rh_state == OHCI_RH_RUNNING)
- ed_schedule (ohci, ed);
+ /*
+ * If no TDs are queued, take ED off the ed_rm_list.
+ * Otherwise, if the HC is running, reschedule.
+ * If not, leave it on the list for further dequeues.
+ */
+ if (list_empty(&ed->td_list)) {
+ *last = ed->ed_next;
+ ed->ed_next = NULL;
+ } else if (ohci->rh_state == OHCI_RH_RUNNING) {
+ *last = ed->ed_next;
+ ed->ed_next = NULL;
+ ed_schedule(ohci, ed);
+ } else {
+ last = &ed->ed_next;
}
if (modified)
/* AMD PLL quirk */
if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
xhci->quirks |= XHCI_AMD_PLL_FIX;
+
+ if (pdev->vendor == PCI_VENDOR_ID_AMD)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
* last TRB of the previous TD. The command completion handle
* will take care the rest.
*/
- if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
+ if (!event_seg && (trb_comp_code == COMP_STOP ||
+ trb_comp_code == COMP_STOP_INVAL)) {
ret = 0;
goto cleanup;
}
{ USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
{ USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
+ { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
+ { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
+ /* ekey Devices */
+ { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
/* Infineon Devices */
{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
{ }, /* Optional parameter entry */
/* www.candapter.com Ewert Energy Systems CANdapter device */
#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+#define FTDI_BM_ATOM_NANO_PID 0xa559 /* Basic Micro ATOM Nano USB2Serial */
+
/*
* Texas Instruments XDS100v2 JTAG / BeagleBone A3
* http://processors.wiki.ti.com/index.php/XDS100
/*
* Xsens Technologies BV products (http://www.xsens.com).
*/
-#define XSENS_CONVERTER_0_PID 0xD388
-#define XSENS_CONVERTER_1_PID 0xD389
+#define XSENS_VID 0x2639
+#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
+#define XSENS_MTW_PID 0x0200 /* Xsens MTw */
+#define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */
+#define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */
#define XSENS_CONVERTER_2_PID 0xD38A
-#define XSENS_CONVERTER_3_PID 0xD38B
-#define XSENS_CONVERTER_4_PID 0xD38C
-#define XSENS_CONVERTER_5_PID 0xD38D
+#define XSENS_CONVERTER_3_PID 0xD38B /* Xsens USB-serial converter */
+#define XSENS_CONVERTER_4_PID 0xD38C /* Xsens Wireless Receiver */
+#define XSENS_CONVERTER_5_PID 0xD38D /* Xsens Awinda Station */
#define XSENS_CONVERTER_6_PID 0xD38E
#define XSENS_CONVERTER_7_PID 0xD38F
#define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */
#define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */
#define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */
+
+/*
+ * ekey biometric systems GmbH (http://ekey.net/)
+ */
+#define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */
dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__);
return;
}
+ if (!urb->actual_length) {
+ dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__);
+ return;
+ }
if (status) {
dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status);
if (status != -ENOENT)
/* These are unsolicited reports from the firmware, hence no
waiting command to wakeup */
dev_dbg(&urb->dev->dev, "%s - event received\n", __func__);
- } else if (data[0] == WHITEHEAT_GET_DTR_RTS) {
+ } else if ((data[0] == WHITEHEAT_GET_DTR_RTS) &&
+ (urb->actual_length - 1 <= sizeof(command_info->result_buffer))) {
memcpy(command_info->result_buffer, &data[1],
urb->actual_length - 1);
command_info->command_finished = WHITEHEAT_CMD_COMPLETE;
found_next = 1;
if (ret != 0)
goto insert;
- slot = 0;
+ slot = path->slots[0];
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
int last = first + count - 1;
struct super_block *sb = e4b->bd_sb;
+ if (WARN_ON(count == 0))
+ return;
BUG_ON(last >= (sb->s_blocksize << 3));
assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
mb_check_buddy(e4b);
static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
{
struct ext4_prealloc_space *pa = ac->ac_pa;
+ struct ext4_buddy e4b;
+ int err;
- if (pa && pa->pa_type == MB_INODE_PA)
+ if (pa == NULL) {
+ if (ac->ac_f_ex.fe_len == 0)
+ return;
+ err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
+ if (err) {
+ /*
+ * This should never happen since we pin the
+ * pages in the ext4_allocation_context so
+ * ext4_mb_load_buddy() should never fail.
+ */
+ WARN(1, "mb_load_buddy failed (%d)", err);
+ return;
+ }
+ ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
+ mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
+ ac->ac_f_ex.fe_len);
+ ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
+ ext4_mb_unload_buddy(&e4b);
+ return;
+ }
+ if (pa->pa_type == MB_INODE_PA)
pa->pa_free += ac->ac_b_ex.fe_len;
}
return;
}
-static int isofs_read_inode(struct inode *);
+static int isofs_read_inode(struct inode *, int relocated);
static int isofs_statfs (struct dentry *, struct kstatfs *);
static struct kmem_cache *isofs_inode_cachep;
goto out;
}
-static int isofs_read_inode(struct inode *inode)
+static int isofs_read_inode(struct inode *inode, int relocated)
{
struct super_block *sb = inode->i_sb;
struct isofs_sb_info *sbi = ISOFS_SB(sb);
*/
if (!high_sierra) {
- parse_rock_ridge_inode(de, inode);
+ parse_rock_ridge_inode(de, inode, relocated);
/* if we want uid/gid set, override the rock ridge setting */
if (sbi->s_uid_set)
inode->i_uid = sbi->s_uid;
* offset that point to the underlying meta-data for the inode. The
* code below is otherwise similar to the iget() code in
* include/linux/fs.h */
-struct inode *isofs_iget(struct super_block *sb,
- unsigned long block,
- unsigned long offset)
+struct inode *__isofs_iget(struct super_block *sb,
+ unsigned long block,
+ unsigned long offset,
+ int relocated)
{
unsigned long hashval;
struct inode *inode;
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
- ret = isofs_read_inode(inode);
+ ret = isofs_read_inode(inode, relocated);
if (ret < 0) {
iget_failed(inode);
inode = ERR_PTR(ret);
struct inode; /* To make gcc happy */
-extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *);
+extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *, int relocated);
extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *);
extern int isofs_name_translate(struct iso_directory_record *, char *, struct inode *);
extern struct buffer_head *isofs_bread(struct inode *, sector_t);
extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
-extern struct inode *isofs_iget(struct super_block *sb,
- unsigned long block,
- unsigned long offset);
+struct inode *__isofs_iget(struct super_block *sb,
+ unsigned long block,
+ unsigned long offset,
+ int relocated);
+
+static inline struct inode *isofs_iget(struct super_block *sb,
+ unsigned long block,
+ unsigned long offset)
+{
+ return __isofs_iget(sb, block, offset, 0);
+}
+
+static inline struct inode *isofs_iget_reloc(struct super_block *sb,
+ unsigned long block,
+ unsigned long offset)
+{
+ return __isofs_iget(sb, block, offset, 1);
+}
/* Because the inode number is no longer relevant to finding the
* underlying meta-data for an inode, we are free to choose a more
goto out;
}
+#define RR_REGARD_XA 1
+#define RR_RELOC_DE 2
+
static int
parse_rock_ridge_inode_internal(struct iso_directory_record *de,
- struct inode *inode, int regard_xa)
+ struct inode *inode, int flags)
{
int symlink_len = 0;
int cnt, sig;
+ unsigned int reloc_block;
struct inode *reloc;
struct rock_ridge *rr;
int rootflag;
init_rock_state(&rs, inode);
setup_rock_ridge(de, inode, &rs);
- if (regard_xa) {
+ if (flags & RR_REGARD_XA) {
rs.chr += 14;
rs.len -= 14;
if (rs.len < 0)
"relocated directory\n");
goto out;
case SIG('C', 'L'):
- ISOFS_I(inode)->i_first_extent =
- isonum_733(rr->u.CL.location);
- reloc =
- isofs_iget(inode->i_sb,
- ISOFS_I(inode)->i_first_extent,
- 0);
+ if (flags & RR_RELOC_DE) {
+ printk(KERN_ERR
+ "ISOFS: Recursive directory relocation "
+ "is not supported\n");
+ goto eio;
+ }
+ reloc_block = isonum_733(rr->u.CL.location);
+ if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
+ ISOFS_I(inode)->i_iget5_offset == 0) {
+ printk(KERN_ERR
+ "ISOFS: Directory relocation points to "
+ "itself\n");
+ goto eio;
+ }
+ ISOFS_I(inode)->i_first_extent = reloc_block;
+ reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
if (IS_ERR(reloc)) {
ret = PTR_ERR(reloc);
goto out;
return rpnt;
}
-int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode)
+int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
+ int relocated)
{
- int result = parse_rock_ridge_inode_internal(de, inode, 0);
+ int flags = relocated ? RR_RELOC_DE : 0;
+ int result = parse_rock_ridge_inode_internal(de, inode, flags);
/*
* if rockridge flag was reset and we didn't look for attributes
*/
if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
&& (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
- result = parse_rock_ridge_inode_internal(de, inode, 14);
+ result = parse_rock_ridge_inode_internal(de, inode,
+ flags | RR_REGARD_XA);
}
return result;
}
int tag_bytes = journal_tag_bytes(journal);
__u32 crc32_sum = ~0; /* Transactional Checksums */
int descr_csum_size = 0;
+ int block_error = 0;
/*
* First thing is to establish what we expect to find in the log
"checksum recovering "
"block %llu in log\n",
blocknr);
- continue;
+ block_error = 1;
+ goto skip_write;
}
/* Find a buffer for the new
success = -EIO;
}
}
-
+ if (block_error && success == 0)
+ success = -EIO;
return success;
failed:
.rpc_argp = &args,
.rpc_resp = &fattr,
};
- int status;
+ int status = 0;
+
+ if (acl == NULL && (!S_ISDIR(inode->i_mode) || dfacl == NULL))
+ goto out;
status = -EOPNOTSUPP;
if (!nfs_server_capable(inode, NFS_CAP_ACLS))
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
struct inode *inode = calldata->inode;
+ bool is_rdonly, is_wronly, is_rdwr;
int call_close = 0;
dprintk("%s: begin!\n", __func__);
goto out_wait;
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
- calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
spin_lock(&state->owner->so_lock);
+ is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
+ is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
+ is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
+ /* Calculate the current open share mode */
+ calldata->arg.fmode = 0;
+ if (is_rdonly || is_rdwr)
+ calldata->arg.fmode |= FMODE_READ;
+ if (is_wronly || is_rdwr)
+ calldata->arg.fmode |= FMODE_WRITE;
/* Calculate the change in open mode */
if (state->n_rdwr == 0) {
if (state->n_rdonly == 0) {
- call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
- call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
+ call_close |= is_rdonly || is_rdwr;
calldata->arg.fmode &= ~FMODE_READ;
}
if (state->n_wronly == 0) {
- call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
- call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
+ call_close |= is_wronly || is_rdwr;
calldata->arg.fmode &= ~FMODE_WRITE;
}
}
clp->cl_cb_session = ses;
args.bc_xprt = conn->cb_xprt;
args.prognumber = clp->cl_cb_session->se_cb_prog;
- args.protocol = XPRT_TRANSPORT_BC_TCP;
+ args.protocol = conn->cb_xprt->xpt_class->xcl_ident |
+ XPRT_TRANSPORT_BC;
args.authflavor = ses->se_cb_sec.flavor;
}
/* Create RPC client */
*/
ret = nfsd_racache_init(2*nrservs);
if (ret)
- return ret;
+ goto dec_users;
+
ret = nfs4_state_start();
if (ret)
goto out_racache;
out_racache:
nfsd_racache_shutdown();
+dec_users:
+ nfsd_users--;
return ret;
}
{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
struct svc_xprt_ops *xcl_ops;
struct list_head xcl_list;
u32 xcl_max_payload;
+ int xcl_ident;
};
/*
.xcl_owner = THIS_MODULE,
.xcl_ops = &svc_udp_ops,
.xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP,
+ .xcl_ident = XPRT_TRANSPORT_UDP,
};
static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
.xcl_owner = THIS_MODULE,
.xcl_ops = &svc_tcp_ops,
.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
+ .xcl_ident = XPRT_TRANSPORT_TCP,
};
void svc_init_xprt_sock(void)
}
}
spin_unlock(&xprt_list_lock);
- printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident);
+ dprintk("RPC: transport (%d) not supported\n", args->ident);
return ERR_PTR(-EIO);
found:
.xcl_owner = THIS_MODULE,
.xcl_ops = &svc_rdma_ops,
.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
+ .xcl_ident = XPRT_TRANSPORT_RDMA,
};
struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
select SND_JACK if INPUT=y || INPUT=SND
help
Say Y here to include support for sound cards based on the
- Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS,
- Essence ST (Deluxe), and Essence STX.
+ Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS, DSX,
+ Essence ST (Deluxe), and Essence STX (II).
Support for the HDAV1.3 (Deluxe) and HDAV1.3 Slim is experimental;
for the Xense, missing.
return; /* NOP */
#endif
+ if (spec->dsp_state == DSP_DOWNLOAD_FAILED)
+ return; /* don't retry failures */
+
chipio_enable_clocks(codec);
spec->dsp_state = DSP_DOWNLOADING;
if (!ca0132_download_dsp_images(codec))
struct auto_pin_cfg *cfg = &spec->autocfg;
int i;
- spec->dsp_state = DSP_DOWNLOAD_INIT;
+ if (spec->dsp_state != DSP_DOWNLOAD_FAILED)
+ spec->dsp_state = DSP_DOWNLOAD_INIT;
spec->curr_chip_addx = INVALID_CHIP_ADDRESS;
snd_hda_power_up(codec);
codec->spec = spec;
spec->codec = codec;
+ spec->dsp_state = DSP_DOWNLOAD_INIT;
spec->num_mixers = 1;
spec->mixers[0] = ca0132_mixer;
spec->pll_coef_idx);
val = snd_hda_codec_read(codec, spec->pll_nid, 0,
AC_VERB_GET_PROC_COEF, 0);
+ if (val == -1)
+ return;
snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_COEF_INDEX,
spec->pll_coef_idx);
snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_PROC_COEF,
static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
{
int val = alc_read_coef_idx(codec, 0x04);
+ if (val == -1)
+ return;
if (power_up)
val |= 1 << 11;
else
if ((alc_get_coef0(codec) & 0x00ff) == 0x017) {
val = alc_read_coef_idx(codec, 0x04);
/* Power up output pin */
- alc_write_coef_idx(codec, 0x04, val | (1<<11));
+ if (val != -1)
+ alc_write_coef_idx(codec, 0x04, val | (1<<11));
}
if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
val = alc_read_coef_idx(codec, 0xd);
- if ((val & 0x0c00) >> 10 != 0x1) {
+ if (val != -1 && (val & 0x0c00) >> 10 != 0x1) {
/* Capless ramp up clock control */
alc_write_coef_idx(codec, 0xd, val | (1<<10));
}
val = alc_read_coef_idx(codec, 0x17);
- if ((val & 0x01c0) >> 6 != 0x4) {
+ if (val != -1 && (val & 0x01c0) >> 6 != 0x4) {
/* Class D power on reset */
alc_write_coef_idx(codec, 0x17, val | (1<<7));
}
}
val = alc_read_coef_idx(codec, 0xd); /* Class D */
- alc_write_coef_idx(codec, 0xd, val | (1<<14));
+ if (val != -1)
+ alc_write_coef_idx(codec, 0xd, val | (1<<14));
val = alc_read_coef_idx(codec, 0x4); /* HP */
- alc_write_coef_idx(codec, 0x4, val | (1<<11));
+ if (val != -1)
+ alc_write_coef_idx(codec, 0x4, val | (1<<11));
}
/*
STAC_DELL_EQ,
STAC_ALIENWARE_M17X,
STAC_92HD89XX_HP_FRONT_JACK,
+ STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
STAC_92HD73XX_MODELS
};
{}
};
+static const struct hda_pintbl stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs[] = {
+ { 0x0e, 0x400000f0 },
+ {}
+};
+
static void stac92hd73xx_fixup_ref(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
[STAC_92HD89XX_HP_FRONT_JACK] = {
.type = HDA_FIXUP_PINS,
.v.pins = stac92hd89xx_hp_front_jack_pin_configs,
+ },
+ [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
}
};
"Alienware M17x", STAC_ALIENWARE_M17X),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
"Alienware M17x R3", STAC_DELL_EQ),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1927,
+ "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
"unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
{} /* terminator */
{ OXYGEN_PCI_SUBID(0x1043, 0x835e) },
{ OXYGEN_PCI_SUBID(0x1043, 0x838e) },
{ OXYGEN_PCI_SUBID(0x1043, 0x8522) },
+ { OXYGEN_PCI_SUBID(0x1043, 0x85f4) },
{ OXYGEN_PCI_SUBID_BROKEN_EEPROM },
{ }
};
*/
/*
- * Xonar Essence ST (Deluxe)/STX
- * -----------------------------
+ * Xonar Essence ST (Deluxe)/STX (II)
+ * ----------------------------------
*
* CMI8788:
*
chip->model.resume = xonar_stx_resume;
chip->model.set_dac_params = set_pcm1796_params;
break;
+ case 0x85f4:
+ chip->model = model_xonar_st;
+ /* TODO: daughterboard support */
+ chip->model.shortname = "Xonar STX II";
+ chip->model.init = xonar_stx_init;
+ chip->model.resume = xonar_stx_resume;
+ chip->model.set_dac_params = set_pcm1796_params;
+ break;
default:
return -EINVAL;
}
spin_lock(&ioapic->lock);
for (index = 0; index < IOAPIC_NUM_PINS; index++) {
e = &ioapic->redirtbl[index];
- if (!e->fields.mask &&
- (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
- kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
- index) || index == RTC_GSI)) {
+ if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
+ kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
+ index == RTC_GSI) {
if (kvm_apic_match_dest(vcpu, NULL, 0,
e->fields.dest_id, e->fields.dest_mode)) {
__set_bit(e->fields.vector,
return pfn;
}
+static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
+{
+ unsigned long i;
+
+ for (i = 0; i < npages; ++i)
+ kvm_release_pfn_clean(pfn + i);
+}
+
int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
gfn_t gfn, end_gfn;
if (r) {
printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%llx\n", pfn);
+ kvm_unpin_pages(kvm, pfn, page_size);
goto unmap_pages;
}
return 0;
unmap_pages:
- kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
+ kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
return r;
}
return r;
}
-static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i)
- kvm_release_pfn_clean(pfn + i);
-}
-
static void kvm_iommu_put_pages(struct kvm *kvm,
gfn_t base_gfn, unsigned long npages)
{