unsigned long handler = (unsigned long)ka->sa.sa_handler;
unsigned long retcode;
int thumb = 0;
- unsigned long cpsr = regs->ARM_cpsr & ~PSR_f;
+ unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
+
+ cpsr |= PSR_ENDSTATE;
/*
* Maybe we need to deliver a 32-bit signal to a 26-bit task.
static inline void setup_syscall_restart(struct pt_regs *regs)
{
+ if (regs->ARM_ORIG_r0 == -ERESTARTNOHAND ||
+ regs->ARM_ORIG_r0 == -ERESTARTSYS ||
+ regs->ARM_ORIG_r0 == -ERESTARTNOINTR ||
+ regs->ARM_ORIG_r0 == -ERESTART_RESTARTBLOCK) {
+ /* the syscall cannot be safely restarted, return -EINTR instead */
+ regs->ARM_r0 = -EINTR;
+ return;
+ }
regs->ARM_r0 = regs->ARM_ORIG_r0;
regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
}
*/
if (syscall) {
if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
+ regs->ARM_r0 = -EAGAIN; /* prevent multiple restarts */
if (thumb_mode(regs)) {
regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
regs->ARM_pc -= 2;
static struct device_type i2c_client_type;
static int i2c_check_addr(struct i2c_adapter *adapter, int addr);
+static int i2c_check_addr_ex(struct i2c_adapter *adapter, int addr);
static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver);
/* ------------------------------------------------------------------------- */
-
+#ifdef CONFIG_I2C_DEV_RK29
+extern struct completion i2c_dev_complete;
+extern void i2c_dev_dump_start(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
+extern void i2c_dev_dump_stop(struct i2c_adapter *adap, struct i2c_msg *msgs, int num, int ret);
+#endif
static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
const struct i2c_client *client)
{
client->flags = info->flags;
client->addr = info->addr;
client->irq = info->irq;
+ client->udelay = info->udelay; // add by kfx
strlcpy(client->name, info->type, sizeof(client->name));
/* Check for address business */
+ #if 0
status = i2c_check_addr(adap, client->addr);
if (status)
goto out_err;
+ #else
+ /* ddl@rock-chips.com : Devices which have some i2c addr can work in same i2c bus,
+ if devices havn't work at the same time.*/
+ status = i2c_check_addr_ex(adap, client->addr);
+ if (status != 0)
+ dev_err(&adap->dev, "%d i2c clients have been registered at 0x%02x",
+ status, client->addr);
+ #endif
client->dev.parent = &client->adapter->dev;
client->dev.bus = &i2c_bus_type;
client->dev.type = &i2c_client_type;
- dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
- client->addr);
+ /* ddl@rock-chips.com : Devices which have some i2c addr can work in same i2c bus,
+ if devices havn't work at the same time.*/
+ #if 0
+ dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
+ client->addr);
+ #else
+ if (status == 0)
+ dev_set_name(&client->dev, "%d-%04x", i2c_adapter_id(adap),
+ client->addr);
+ else
+ dev_set_name(&client->dev, "%d-%04x-%01x", i2c_adapter_id(adap),
+ client->addr,status);
+ #endif
+
status = device_register(&client->dev);
if (status)
goto out_err;
}
static int __unregister_client(struct device *dev, void *dummy)
+ {
+ struct i2c_client *client = i2c_verify_client(dev);
+ if (client && strcmp(client->name, "dummy"))
+ i2c_unregister_device(client);
+ return 0;
+ }
+
+ static int __unregister_dummy(struct device *dev, void *dummy)
{
struct i2c_client *client = i2c_verify_client(dev);
if (client)
}
/* Detach any active clients. This can't fail, thus we do not
- checking the returned value. */
+ * check the returned value. This is a two-pass process, because
+ * we can't remove the dummy devices during the first pass: they
+ * could have been instantiated by real devices wishing to clean
+ * them up properly, so we give them a chance to do that first. */
res = device_for_each_child(&adap->dev, NULL, __unregister_client);
+ res = device_for_each_child(&adap->dev, NULL, __unregister_dummy);
#ifdef CONFIG_I2C_COMPAT
class_compat_remove_link(i2c_adapter_compat_class, &adap->dev,
{
return device_for_each_child(&adapter->dev, &addr, __i2c_check_addr);
}
+/* ddl@rock-chips.com : Devices which have some i2c addr can work in same i2c bus,
+ if devices havn't work at the same time.*/
+struct i2c_addr_cnt
+{
+ int addr;
+ int cnt;
+};
+static int __i2c_check_addr_ex(struct device *dev, void *addrp)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct i2c_addr_cnt *addrinfo = (struct i2c_addr_cnt *)addrp;
+ int addr = addrinfo->addr;
+
+ if (client && client->addr == addr) {
+ addrinfo->cnt++;
+ }
+ return 0;
+}
+static int i2c_check_addr_ex(struct i2c_adapter *adapter, int addr)
+{
+ struct i2c_addr_cnt addrinfo;
+
+ addrinfo.addr = addr;
+ addrinfo.cnt = 0;
+ device_for_each_child(&adapter->dev, &addrinfo, __i2c_check_addr_ex);
+ return addrinfo.cnt;
+}
/**
* i2c_use_client - increments the reference count of the i2c client structure
retval = i2c_add_driver(&dummy_driver);
if (retval)
goto class_err;
+#ifdef CONFIG_I2C_DEV_RK29
+ init_completion(&i2c_dev_complete);
+#endif
+
return 0;
class_err:
(msgs[ret].flags & I2C_M_RECV_LEN) ? "+" : "");
}
#endif
-
+#if defined (CONFIG_I2C_RK2818) || defined(CONFIG_I2C_RK29)
+ if (!(i2c_suspended(adap)) && (in_atomic() || irqs_disabled())) {
+#else
if (in_atomic() || irqs_disabled()) {
+#endif
+
ret = mutex_trylock(&adap->bus_lock);
if (!ret)
/* I2C activity is ongoing. */
/* Retry automatically on arbitration loss */
orig_jiffies = jiffies;
+#ifdef CONFIG_I2C_DEV_RK29
+ i2c_dev_dump_start(adap, msgs, num);
+#endif
for (ret = 0, try = 0; try <= adap->retries; try++) {
ret = adap->algo->master_xfer(adap, msgs, num);
if (ret != -EAGAIN)
if (time_after(jiffies, orig_jiffies + adap->timeout))
break;
}
+#ifdef CONFIG_I2C_DEV_RK29
+ i2c_dev_dump_stop(adap, msgs, num ,ret);
+#endif
mutex_unlock(&adap->bus_lock);
return ret;
}
}
EXPORT_SYMBOL(i2c_transfer);
+#if defined (CONFIG_I2C_RK2818) || defined(CONFIG_I2C_RK29)
+int i2c_master_send(struct i2c_client *client,const char *buf ,int count)
+{
+ int ret;
+ struct i2c_adapter *adap=client->adapter;
+ struct i2c_msg msg;
+
+ msg.addr = client->addr;
+ msg.flags = client->flags;
+ msg.len = count;
+ msg.buf = (char *)buf;
+ msg.scl_rate = 100 * 1000;
+ msg.udelay = client->udelay;
+
+ ret = i2c_transfer(adap, &msg, 1);
+ return (ret == 1) ? count : ret;
+}
+EXPORT_SYMBOL(i2c_master_send);
+
+int i2c_master_recv(struct i2c_client *client, char *buf ,int count)
+{
+ struct i2c_adapter *adap=client->adapter;
+ struct i2c_msg msg;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = client->flags | I2C_M_RD;
+ msg.len = count;
+ msg.buf = (char *)buf;
+ msg.scl_rate = 400 * 1000;
+ msg.udelay = client->udelay;
+
+ ret = i2c_transfer(adap, &msg, 1);
+
+ return (ret == 1) ? count : ret;
+}
+EXPORT_SYMBOL(i2c_master_recv);
+
+int i2c_master_normal_send(struct i2c_client *client,const char *buf ,int count, int scl_rate)
+{
+ int ret;
+ struct i2c_adapter *adap=client->adapter;
+ struct i2c_msg msg;
+
+ msg.addr = client->addr;
+ msg.flags = client->flags;
+ msg.len = count;
+ msg.buf = (char *)buf;
+ msg.scl_rate = scl_rate;
+ msg.udelay = client->udelay;
+
+ ret = i2c_transfer(adap, &msg, 1);
+ return (ret == 1) ? count : ret;
+}
+EXPORT_SYMBOL(i2c_master_normal_send);
+
+int i2c_master_normal_recv(struct i2c_client *client, char *buf ,int count, int scl_rate)
+{
+ struct i2c_adapter *adap=client->adapter;
+ struct i2c_msg msg;
+ int ret;
+
+ msg.addr = client->addr;
+ msg.flags = client->flags | I2C_M_RD;
+ msg.len = count;
+ msg.buf = (char *)buf;
+ msg.scl_rate = scl_rate;
+ msg.udelay = client->udelay;
+
+ ret = i2c_transfer(adap, &msg, 1);
+
+ return (ret == 1) ? count : ret;
+}
+EXPORT_SYMBOL(i2c_master_normal_recv);
+
+int i2c_master_reg8_send(struct i2c_client *client, const char reg, const char *buf, int count, int scl_rate)
+{
+ struct i2c_adapter *adap=client->adapter;
+ struct i2c_msg msg;
+ int ret;
+ char *tx_buf = (char *)kmalloc(count + 1, GFP_KERNEL);
+ if(!tx_buf)
+ return -ENOMEM;
+ tx_buf[0] = reg;
+ memcpy(tx_buf+1, buf, count);
+
+ msg.addr = client->addr;
+ msg.flags = client->flags;
+ msg.len = count + 1;
+ msg.buf = (char *)tx_buf;
+ msg.scl_rate = scl_rate;
+ msg.udelay = client->udelay;
+
+ ret = i2c_transfer(adap, &msg, 1);
+ kfree(tx_buf);
+ return (ret == 1) ? count : ret;
+
+}
+EXPORT_SYMBOL(i2c_master_reg8_send);
+
+int i2c_master_reg8_recv(struct i2c_client *client, const char reg, char *buf, int count, int scl_rate)
+{
+ struct i2c_adapter *adap=client->adapter;
+ struct i2c_msg msgs[2];
+ int ret;
+ char reg_buf = reg;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = client->flags;
+ msgs[0].len = 1;
+ msgs[0].buf = ®_buf;
+ msgs[0].scl_rate = scl_rate;
+ msgs[0].udelay = client->udelay;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = client->flags | I2C_M_RD;
+ msgs[1].len = count;
+ msgs[1].buf = (char *)buf;
+ msgs[1].scl_rate = scl_rate;
+ msgs[1].udelay = client->udelay;
+
+ ret = i2c_transfer(adap, msgs, 2);
+
+ return (ret == 2)? count : ret;
+}
+
+EXPORT_SYMBOL(i2c_master_reg8_recv);
+
+int i2c_master_reg8_direct_send(struct i2c_client *client, const char reg, const char *buf, int count, int scl_rate)
+{
+ return i2c_master_reg8_send(client, reg, buf, count, scl_rate);
+}
+EXPORT_SYMBOL(i2c_master_reg8_direct_send);
+
+int i2c_master_reg8_direct_recv(struct i2c_client *client, const char reg, char *buf, int count, int scl_rate)
+{
+ struct i2c_adapter *adap=client->adapter;
+ struct i2c_msg msg;
+ int ret;
+ char tx_buf[count+1];
+
+ tx_buf[0] = reg;
+ msg.addr = client->addr;
+ msg.flags = client->flags | I2C_M_REG8_DIRECT | I2C_M_RD;
+ msg.len = count + 1;
+ msg.buf = tx_buf;
+ msg.scl_rate = scl_rate;
+ msg.udelay = client->udelay;
+
+ ret = i2c_transfer(adap, &msg, 1);
+ memcpy(buf, tx_buf + 1, count);
+ return (ret == 1) ? count : ret;
+}
+EXPORT_SYMBOL(i2c_master_reg8_direct_recv);
+
+int i2c_master_reg16_send(struct i2c_client *client, const short regs, const short *buf, int count, int scl_rate)
+{
+ struct i2c_adapter *adap=client->adapter;
+ struct i2c_msg msg;
+ int ret;
+ char *tx_buf = (char *)kmalloc(2 * (count + 1), GFP_KERNEL);
+ if(!tx_buf)
+ return -ENOMEM;
+ memcpy(tx_buf, ®s, 2);
+ memcpy(tx_buf+2, (char *)buf, count * 2);
+
+ msg.addr = client->addr;
+ msg.flags = client->flags;
+ msg.len = 2 * (count + 1);
+ msg.buf = (char *)tx_buf;
+ msg.scl_rate = scl_rate;
+ msg.udelay = client->udelay;
+
+ ret = i2c_transfer(adap, &msg, 1);
+ kfree(tx_buf);
+ return (ret == 1) ? count : ret;
+}
+EXPORT_SYMBOL(i2c_master_reg16_send);
+
+int i2c_master_reg16_recv(struct i2c_client *client, const short regs, short *buf, int count, int scl_rate)
+{
+ struct i2c_adapter *adap=client->adapter;
+ struct i2c_msg msgs[2];
+ int ret;
+ char reg_buf[2];
+
+ memcpy(reg_buf, ®s, 2);
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = client->flags;
+ msgs[0].len = 2;
+ msgs[0].buf = reg_buf;
+ msgs[0].scl_rate = scl_rate;
+ msgs[0].udelay = client->udelay;
+
+ msgs[1].addr = client->addr;
+ msgs[1].flags = client->flags | I2C_M_RD;
+ msgs[1].len = count * 2;
+ msgs[1].buf = (char *)buf;
+ msgs[1].scl_rate = scl_rate;
+ msgs[1].udelay = client->udelay;
+
+ ret = i2c_transfer(adap, msgs, 2);
+
+ return (ret == 2)? count : ret;
+}
+EXPORT_SYMBOL(i2c_master_reg16_recv);
+#else
/**
* i2c_master_send - issue a single I2C message in master transmit mode
return (ret == 1) ? count : ret;
}
EXPORT_SYMBOL(i2c_master_recv);
-
+#endif
/* ----------------------------------------------------
* the i2c address scanning function
* Will not work for 10-bit addresses!
unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3];
unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2];
int num = read_write == I2C_SMBUS_READ?2:1;
- struct i2c_msg msg[2] = { { addr, flags, 1, msgbuf0 },
- { addr, flags | I2C_M_RD, 0, msgbuf1 }
+ struct i2c_msg msg[2] = { { addr, flags, 1, msgbuf0 ,100000, 0, 0},
+ { addr, flags | I2C_M_RD, 0, msgbuf1 ,100000, 0, 0}
};
int i;
u8 partial_pec = 0;
#include <linux/scatterlist.h>
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
+#include <linux/wakelock.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include "sdio_ops.h"
static struct workqueue_struct *workqueue;
+static struct wake_lock mmc_delayed_work_wake_lock;
/*
* Enabling software CRCs on the data blocks can be a significant (30%)
static int mmc_schedule_delayed_work(struct delayed_work *work,
unsigned long delay)
{
+ wake_lock(&mmc_delayed_work_wake_lock);
return queue_delayed_work(workqueue, work, delay);
}
/* If the host is claimed then we do not want to disable it anymore */
if (!mmc_try_claim_host(host))
- return;
+ goto out;
mmc_host_do_disable(host, 1);
mmc_do_release_host(host);
+
+out:
+ wake_unlock(&mmc_delayed_work_wake_lock);
}
/**
*/
mmc_delay(10);
- if (host->f_min > 400000) {
- pr_warning("%s: Minimum clock frequency too high for "
- "identification mode\n", mmc_hostname(host));
- host->ios.clock = host->f_min;
- } else
- host->ios.clock = 400000;
+ host->ios.clock = host->f_min;
host->ios.power_mode = MMC_POWER_ON;
mmc_set_ios(host);
spin_unlock_irqrestore(&host->lock, flags);
}
+int mmc_resume_bus(struct mmc_host *host)
+{
+ if (!mmc_bus_needs_resume(host))
+ return -EINVAL;
+
+ printk("%s: Starting deferred resume\n", mmc_hostname(host));
+ host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+ mmc_bus_get(host);
+ if (host->bus_ops && !host->bus_dead) {
+ mmc_power_up(host);
+ BUG_ON(!host->bus_ops->resume);
+ host->bus_ops->resume(host);
+ }
+
+ if (host->bus_ops->detect && !host->bus_dead)
+ host->bus_ops->detect(host);
+
+ mmc_bus_put(host);
+ printk("%s: Deferred resume completed\n", mmc_hostname(host));
+ return 0;
+}
+
+EXPORT_SYMBOL(mmc_resume_bus);
+
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
container_of(work, struct mmc_host, detect.work);
u32 ocr;
int err;
+ int extend_wakelock = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->rescan_disable) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
mmc_bus_get(host);
if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead)
host->bus_ops->detect(host);
+ /* If the card was removed the bus will be marked
+ * as dead - extend the wakelock so userspace
+ * can respond */
+ if (host->bus_dead)
+ extend_wakelock = 1;
+
mmc_bus_put(host);
if (!err) {
if (mmc_attach_sdio(host, ocr))
mmc_power_off(host);
+ extend_wakelock = 1;
goto out;
}
if (!err) {
if (mmc_attach_sd(host, ocr))
mmc_power_off(host);
+ extend_wakelock = 1;
goto out;
}
if (!err) {
if (mmc_attach_mmc(host, ocr))
mmc_power_off(host);
+ extend_wakelock = 1;
goto out;
}
mmc_power_off(host);
out:
+ if (extend_wakelock)
+ wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2);
+ else
+ wake_unlock(&mmc_delayed_work_wake_lock);
+
if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
}
{
int err = 0;
+ if (mmc_bus_needs_resume(host))
+ return 0;
+
if (host->caps & MMC_CAP_DISABLE)
cancel_delayed_work(&host->disable);
cancel_delayed_work(&host->detect);
if (host->bus_ops && !host->bus_dead) {
if (host->bus_ops->suspend)
err = host->bus_ops->suspend(host);
- if (err == -ENOSYS || !host->bus_ops->resume) {
- /*
- * We simply "remove" the card in this case.
- * It will be redetected on resume.
- */
- if (host->bus_ops->remove)
- host->bus_ops->remove(host);
- mmc_claim_host(host);
- mmc_detach_bus(host);
- mmc_release_host(host);
- err = 0;
- }
}
mmc_bus_put(host);
int err = 0;
mmc_bus_get(host);
+ if (host->bus_resume_flags & MMC_BUSRESUME_MANUAL_RESUME) {
+ host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+ mmc_bus_put(host);
+ return 0;
+ }
+
if (host->bus_ops && !host->bus_dead) {
mmc_power_up(host);
mmc_select_voltage(host, host->ocr);
printk(KERN_WARNING "%s: error %d during resume "
"(card was removed?)\n",
mmc_hostname(host), err);
- if (host->bus_ops->remove)
- host->bus_ops->remove(host);
- mmc_claim_host(host);
- mmc_detach_bus(host);
- mmc_release_host(host);
- /* no need to bother upper layers */
err = 0;
}
}
mmc_bus_put(host);
- /*
- * We add a slight delay here so that resume can progress
- * in parallel.
- */
- mmc_detect_change(host, 1);
-
return err;
}
-
EXPORT_SYMBOL(mmc_resume_host);
+ /* Do the card removal on suspend if card is assumed removeable
+ * Do that in pm notifier while userspace isn't yet frozen, so we will be able
+ to sync the card.
+ */
+ int mmc_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused)
+ {
+ struct mmc_host *host = container_of(
+ notify_block, struct mmc_host, pm_notify);
+ unsigned long flags;
+
+
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->rescan_disable = 1;
+ spin_unlock_irqrestore(&host->lock, flags);
+ cancel_delayed_work_sync(&host->detect);
+
+ if (!host->bus_ops || host->bus_ops->suspend)
+ break;
+
+ mmc_claim_host(host);
+
+ if (host->bus_ops->remove)
+ host->bus_ops->remove(host);
+
+ mmc_detach_bus(host);
+ mmc_release_host(host);
+ break;
+
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->rescan_disable = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
+ mmc_detect_change(host, 0);
+
+ }
+
+ return 0;
+ }
#endif
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+ struct sdio_cis *cis,
+ struct sdio_cccr *cccr,
+ struct sdio_embedded_func *funcs,
+ int num_funcs)
+{
+ host->embedded_sdio_data.cis = cis;
+ host->embedded_sdio_data.cccr = cccr;
+ host->embedded_sdio_data.funcs = funcs;
+ host->embedded_sdio_data.num_funcs = num_funcs;
+}
+
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
static int __init mmc_init(void)
{
int ret;
+ wake_lock_init(&mmc_delayed_work_wake_lock, WAKE_LOCK_SUSPEND, "mmc_delayed_work");
+
workqueue = create_singlethread_workqueue("kmmcd");
if (!workqueue)
return -ENOMEM;
mmc_unregister_host_class();
mmc_unregister_bus();
destroy_workqueue(workqueue);
+ wake_lock_destroy(&mmc_delayed_work_wake_lock);
}
subsys_initcall(mmc_init);
#include "sdio_ops.h"
#include "sdio_cis.h"
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
static int sdio_read_fbr(struct sdio_func *func)
{
int ret;
}
card->type = MMC_TYPE_SDIO;
+
+ /*
+ * Call the optional HC's init_card function to handle quirks.
+ */
+ if (host->ops->init_card)
+ host->ops->init_card(host, card);
+
/*
* For native busses: set card RCA and quit open drain mode.
if (err)
goto remove;
+ /*
+ * Update oldcard with the new RCA received from the SDIO
+ * device -- we're doing this so that it's updated in the
+ * "card" struct when oldcard overwrites that later.
+ */
+ if (oldcard)
+ oldcard->rca = card->rca;
+
mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
}
goto remove;
}
- /*
- * Read the common registers.
- */
- err = sdio_read_cccr(card);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.cccr)
+ memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
+ else {
+#endif
+ /*
+ * Read the common registers.
+ */
+ err = sdio_read_cccr(card);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
- /*
- * Read the common CIS tuples.
- */
- err = sdio_read_common_cis(card);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.cis)
+ memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
+ else {
+#endif
+ /*
+ * Read the common CIS tuples.
+ */
+ err = sdio_read_common_cis(card);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
if (oldcard) {
int same = (card->cis.vendor == oldcard->cis.vendor &&
*/
card->sdio_funcs = funcs = (ocr & 0x70000000) >> 28;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.funcs)
+ card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
+#endif
+
/*
* If needed, disconnect card detection pull-up resistor.
*/
* Initialize (but don't add) all present functions.
*/
for (i = 0;i < funcs;i++) {
- err = sdio_init_func(host->card, i + 1);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.funcs) {
+ struct sdio_func *tmp;
+
+ tmp = sdio_alloc_func(host->card);
+ if (IS_ERR(tmp))
+ goto remove;
+ tmp->num = (i + 1);
+ card->sdio_func[i] = tmp;
+ tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+ tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
+ tmp->vendor = card->cis.vendor;
+ tmp->device = card->cis.device;
+ } else {
+#endif
+ err = sdio_init_func(host->card, i + 1);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
}
mmc_release_host(host);
return err;
}
+int sdio_reset_comm(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 ocr;
+ int err;
+
+ printk("%s():\n", __func__);
+ mmc_claim_host(host);
+
+ mmc_go_idle(host);
+
+ mmc_set_clock(host, host->f_min);
+
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (err)
+ goto err;
+
+ host->ocr = mmc_select_voltage(host, ocr);
+ if (!host->ocr) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = mmc_send_io_op_cond(host, host->ocr, &ocr);
+ if (err)
+ goto err;
+
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ goto err;
+ }
+
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ goto err;
+ mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
+ }
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * Switch to high-speed (if supported).
+ */
+ err = sdio_enable_hs(card);
+ if (err)
+ goto err;
+
+ /*
+ * Change to the card's maximum speed.
+ */
+ if (mmc_card_highspeed(card)) {
+ /*
+ * The SDIO specification doesn't mention how
+ * the CIS transfer speed register relates to
+ * high-speed, but it seems that 50 MHz is
+ * mandatory.
+ */
+ mmc_set_clock(host, 50000000);
+ } else {
+ mmc_set_clock(host, card->cis.max_dtr);
+ }
+
+ err = sdio_enable_wide(card);
+ if (err)
+ goto err;
+ mmc_release_host(host);
+ return 0;
+err:
+ printk("%s: Error resetting SDIO communications (%d)\n",
+ mmc_hostname(host), err);
+ mmc_release_host(host);
+ return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
obj-$(CONFIG_E2100) += e2100.o 8390.o
+obj-$(CONFIG_RK29_VMAC) += rk29_vmac.o
obj-$(CONFIG_ES3210) += es3210.o 8390.o
obj-$(CONFIG_LNE390) += lne390.o 8390.o
obj-$(CONFIG_NE3210) += ne3210.o 8390.o
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_FORCEDETH) += forcedeth.o
- obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
+ obj-$(CONFIG_NE_H8300) += ne-h8300.o
obj-$(CONFIG_AX88796) += ax88796.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o
+obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o
+obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o
obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o
obj-$(CONFIG_LP486E) += lp486e.o
obj-$(CONFIG_ETH16I) += eth16i.o
- obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
+ obj-$(CONFIG_ZORRO8390) += zorro8390.o
obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_DECLANCE) += declance.o
obj-$(CONFIG_ATARILANCE) += atarilance.o
obj-$(CONFIG_A2065) += a2065.o
- obj-$(CONFIG_HYDRA) += hydra.o 8390.o
+ obj-$(CONFIG_HYDRA) += hydra.o
obj-$(CONFIG_ARIADNE) += ariadne.o
obj-$(CONFIG_CS89x0) += cs89x0.o
obj-$(CONFIG_MACSONIC) += macsonic.o
static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
{
struct usb_device *hdev = hub->hdev;
+ struct usb_hcd *hcd;
+ int ret;
int port1;
int status;
bool need_debounce_delay = false;
atomic_set(&to_usb_interface(hub->intfdev)->
pm_usage_cnt, 1);
return; /* Continues at init2: below */
+ } else if (type == HUB_RESET_RESUME) {
+ /* The internal host controller state for the hub device
+ * may be gone after a host power loss on system resume.
+ * Update the device's info so the HW knows it's a hub.
+ */
+ hcd = bus_to_hcd(hdev->bus);
+ if (hcd->driver->update_hub_device) {
+ ret = hcd->driver->update_hub_device(hcd, hdev,
+ &hub->tt, GFP_NOIO);
+ if (ret < 0) {
+ dev_err(hub->intfdev, "Host not "
+ "accepting hub info "
+ "update.\n");
+ dev_err(hub->intfdev, "LS/FS devices "
+ "and hubs may not work "
+ "under this hub\n.");
+ }
+ }
+ hub_power_on(hub, true);
} else {
hub_power_on(hub, true);
}
kref_put(&hub->kref, hub_release);
}
-
+struct usb_hub *g_root_hub20 = NULL;
static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_host_interface *desc;
dev_dbg (&intf->dev, "couldn't kmalloc hub struct\n");
return -ENOMEM;
}
-
+ if(!g_root_hub20)
+ {
+ g_root_hub20 = hub;
+ }
kref_init(&hub->kref);
INIT_LIST_HEAD(&hub->event_list);
hub->intfdev = &intf->dev;
|| new_state == USB_STATE_SUSPENDED)
; /* No change to wakeup settings */
else if (new_state == USB_STATE_CONFIGURED)
- device_init_wakeup(&udev->dev,
+ device_set_wakeup_capable(&udev->dev,
(udev->actconfig->desc.bmAttributes
& USB_CONFIG_ATT_WAKEUP));
else
- device_init_wakeup(&udev->dev, 0);
+ device_set_wakeup_capable(&udev->dev, 0);
}
if (udev->state == USB_STATE_SUSPENDED &&
new_state != USB_STATE_SUSPENDED)
spin_unlock_irq(&device_state_lock);
usb_stop_pm(udev);
+
hub_free_dev(udev);
{
int err;
- /* Increment the parent's count of unsuspended children */
- if (udev->parent)
+ if (udev->parent) {
+ /* Increment the parent's count of unsuspended children */
usb_autoresume_device(udev->parent);
+ /* Initialize non-root-hub device wakeup to disabled;
+ * device (un)configuration controls wakeup capable
+ * sysfs power/wakeup controls wakeup enabled/disabled
+ */
+ device_init_wakeup(&udev->dev, 0);
+ }
+
err = usb_enumerate_device(udev); /* Read descriptors */
if (err < 0)
goto fail;
udev->ttport = hdev->ttport;
} else if (udev->speed != USB_SPEED_HIGH
&& hdev->speed == USB_SPEED_HIGH) {
+ if (!hub->tt.hub) {
+ dev_err(&udev->dev, "parent hub has no TT\n");
+ retval = -EINVAL;
+ goto fail;
+ }
udev->tt = &hub->tt;
udev->ttport = port1;
}
} /* end while (1) */
}
+/* yk@rk 20100730
+ * disconnect all devices on root hub
+ */
+void hub_disconnect_device(struct usb_hub *hub)
+{
+ hub_port_connect_change(hub, 1, 0, 0x2);
+}
+
static int hub_thread(void *__unused)
{
/* khubd needs to be freezable to avoid intefering with USB-PERSIST
#define ZTE_PRODUCT_CDMA_TECH 0xfffe
#define ZTE_PRODUCT_AC8710 0xfff1
#define ZTE_PRODUCT_AC2726 0xfff5
+#define ZTE_PRODUCT_AC100 0x0094
#define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068
#define HAIER_VENDOR_ID 0x201e
#define HAIER_PRODUCT_CE100 0x2009
- #define CINTERION_VENDOR_ID 0x0681
+/* Thinkwill products */
+#define THINKWILL_VENDOR_ID 0x19f5
+#define THINKWILL_PRODUCT_ID 0x9909
+
+ /* Cinterion (formerly Siemens) products */
+ #define SIEMENS_VENDOR_ID 0x0681
+ #define CINTERION_VENDOR_ID 0x1e2d
+ #define CINTERION_PRODUCT_HC25_MDM 0x0047
+ #define CINTERION_PRODUCT_HC25_MDMNET 0x0040
+ #define CINTERION_PRODUCT_HC28_MDM 0x004C
+ #define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
+ #define CINTERION_PRODUCT_EU3_E 0x0051
+ #define CINTERION_PRODUCT_EU3_P 0x0052
+ #define CINTERION_PRODUCT_PH8 0x0053
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
/* Celot products */
#define CELOT_VENDOR_ID 0x211f
#define CELOT_PRODUCT_CT680M 0x6801
+/* leadcore LC1808*/
+#define LEADCORE_VENDOR_ID 0x1ab7
+#define LEADCORE_PRODUCT_LC1808 0x2200
+/*չѶģ×é*/
+#define SC8800G_VENDOR_ID 0x067b
+#define SC8800G_PRODUCT_ID 0x2303
+ /* ONDA Communication vendor id */
+ #define ONDA_VENDOR_ID 0x1ee8
+
+ /* ONDA MT825UP HSDPA 14.2 modem */
+ #define ONDA_MT825UP 0x000b
+
+ /* Samsung products */
+ #define SAMSUNG_VENDOR_ID 0x04e8
+ #define SAMSUNG_PRODUCT_GT_B3730 0x6889
+
static struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ { USB_DEVICE(THINKWILL_VENDOR_ID,THINKWILL_PRODUCT_ID)},
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_QUAD) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
+ { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600)},
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xFFED, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xFFFE, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xFFEB, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xF006, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_AC100)},
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
{ USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
- { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
+ { USB_DEVICE(LEADCORE_VENDOR_ID, LEADCORE_PRODUCT_LC1808) }, //zzc
+ { USB_DEVICE(SC8800G_VENDOR_ID,SC8800G_PRODUCT_ID)},
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+
+// cmy:
+ { USB_DEVICE(0x0685, 0x6000) },
+ { USB_DEVICE(0x1E89, 0x1E16) },
+ { USB_DEVICE(0x7693, 0x0001) },
+ { USB_DEVICE(0x1D09, 0x4308) },
+ { USB_DEVICE(0x1234, 0x0033) },
+ { USB_DEVICE(0xFEED, 0x0001) },
+ { USB_DEVICE(ALCATEL_VENDOR_ID, 0x0017) },
+
+ /* Cinterion */
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
+ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+ { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
+ { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
NULL, &proc_single_file_operations, \
{ .proc_show = show } )
+/* ANDROID is for special files in /proc. */
+#define ANDROID(NAME, MODE, OTYPE) \
+ NOD(NAME, (S_IFREG|(MODE)), \
+ &proc_##OTYPE##_inode_operations, \
+ &proc_##OTYPE##_operations, {})
+
/*
* Count the number of hardlinks for the pid_entry table, excluding the .
* and .. links.
mm = get_task_mm(task);
if (mm && mm != current->mm &&
- !ptrace_may_access(task, PTRACE_MODE_READ)) {
+ !ptrace_may_access(task, PTRACE_MODE_READ) &&
+ !capable(CAP_SYS_RESOURCE)) {
mmput(mm);
mm = NULL;
}
return count;
}
+static int oom_adjust_permission(struct inode *inode, int mask)
+{
+ uid_t uid;
+ struct task_struct *p = get_proc_task(inode);
+ if(p) {
+ uid = task_uid(p);
+ put_task_struct(p);
+ }
+
+ /*
+ * System Server (uid == 1000) is granted access to oom_adj of all
+ * android applications (uid > 10000) as and services (uid >= 1000)
+ */
+ if (p && (current_fsuid() == 1000) && (uid >= 1000)) {
+ if (inode->i_mode >> 6 & mask) {
+ return 0;
+ }
+ }
+
+ /* Fall back to default. */
+ return generic_permission(inode, mask, NULL);
+}
+
+static const struct inode_operations proc_oom_adjust_inode_operations = {
+ .permission = oom_adjust_permission,
+};
+
static const struct file_operations proc_oom_adjust_operations = {
.read = oom_adjust_read,
.write = oom_adjust_write,
REG("cgroup", S_IRUGO, proc_cgroup_operations),
#endif
INF("oom_score", S_IRUGO, proc_oom_score),
- REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
+ ANDROID("oom_adj",S_IRUGO|S_IWUSR, oom_adjust),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
REG("sessionid", S_IRUGO, proc_sessionid_operations),
/* for the /proc/ directory itself, after non-process stuff has been done */
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
- unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
- struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
+ unsigned int nr;
+ struct task_struct *reaper;
struct tgid_iter iter;
struct pid_namespace *ns;
+ if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET)
+ goto out_no_task;
+ nr = filp->f_pos - FIRST_PROCESS_ENTRY;
+
+ reaper = get_proc_task(filp->f_path.dentry->d_inode);
if (!reaper)
goto out_no_task;
const char *name = arch_vma_name(vma);
if (!name) {
if (mm) {
- if (vma->vm_start <= mm->start_brk &&
- vma->vm_end >= mm->brk) {
+ if (vma->vm_start <= mm->brk &&
+ vma->vm_end >= mm->start_brk) {
name = "[heap]";
} else if (vma->vm_start <= mm->start_stack &&
vma->vm_end >= mm->start_stack) {
memset(&mss, 0, sizeof mss);
mss.vma = vma;
+ /* mmap_sem is held in m_start */
if (vma->vm_mm && !is_vm_hugetlb_page(vma))
walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
};
struct pagemapread {
- u64 __user *out, *end;
+ int pos, len;
+ u64 *buffer;
};
#define PM_ENTRY_BYTES sizeof(u64)
static int add_to_pagemap(unsigned long addr, u64 pfn,
struct pagemapread *pm)
{
- if (put_user(pfn, pm->out))
- return -EFAULT;
- pm->out++;
- if (pm->out >= pm->end)
+ pm->buffer[pm->pos++] = pfn;
+ if (pm->pos >= pm->len)
return PM_END_OF_BUFFER;
return 0;
}
* determine which areas of memory are actually mapped and llseek to
* skip over unmapped regions.
*/
+#define PAGEMAP_WALK_SIZE (PMD_SIZE)
static ssize_t pagemap_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
- struct page **pages, *page;
- unsigned long uaddr, uend;
struct mm_struct *mm;
struct pagemapread pm;
- int pagecount;
int ret = -ESRCH;
struct mm_walk pagemap_walk = {};
unsigned long src;
unsigned long svpfn;
unsigned long start_vaddr;
unsigned long end_vaddr;
+ int copied = 0;
if (!task)
goto out;
if (!mm)
goto out_task;
-
- uaddr = (unsigned long)buf & PAGE_MASK;
- uend = (unsigned long)(buf + count);
- pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
- ret = 0;
- if (pagecount == 0)
- goto out_mm;
- pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+ pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+ pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
ret = -ENOMEM;
- if (!pages)
+ if (!pm.buffer)
goto out_mm;
- down_read(¤t->mm->mmap_sem);
- ret = get_user_pages(current, current->mm, uaddr, pagecount,
- 1, 0, pages, NULL);
- up_read(¤t->mm->mmap_sem);
-
- if (ret < 0)
- goto out_free;
-
- if (ret != pagecount) {
- pagecount = ret;
- ret = -EFAULT;
- goto out_pages;
- }
-
- pm.out = (u64 __user *)buf;
- pm.end = (u64 __user *)(buf + count);
-
pagemap_walk.pmd_entry = pagemap_pte_range;
pagemap_walk.pte_hole = pagemap_pte_hole;
pagemap_walk.mm = mm;
* user buffer is tracked in "pm", and the walk
* will stop when we hit the end of the buffer.
*/
- ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk);
- if (ret == PM_END_OF_BUFFER)
- ret = 0;
- /* don't need mmap_sem for these, but this looks cleaner */
- *ppos += (char __user *)pm.out - buf;
- if (!ret)
- ret = (char __user *)pm.out - buf;
-
-out_pages:
- for (; pagecount; pagecount--) {
- page = pages[pagecount-1];
- if (!PageReserved(page))
- SetPageDirty(page);
- page_cache_release(page);
+ ret = 0;
+ while (count && (start_vaddr < end_vaddr)) {
+ int len;
+ unsigned long end;
+
+ pm.pos = 0;
+ end = start_vaddr + PAGEMAP_WALK_SIZE;
+ /* overflow ? */
+ if (end < start_vaddr || end > end_vaddr)
+ end = end_vaddr;
+ down_read(&mm->mmap_sem);
+ ret = walk_page_range(start_vaddr, end, &pagemap_walk);
+ up_read(&mm->mmap_sem);
+ start_vaddr = end;
+
+ len = min(count, PM_ENTRY_BYTES * pm.pos);
+ if (copy_to_user(buf, pm.buffer, len) < 0) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ copied += len;
+ buf += len;
+ count -= len;
}
+ *ppos += copied;
+ if (!ret || ret == PM_END_OF_BUFFER)
+ ret = copied;
+
out_free:
- kfree(pages);
+ kfree(pm.buffer);
out_mm:
mmput(mm);
out_task:
int (*get_cd)(struct mmc_host *host);
void (*enable_sdio_irq)(struct mmc_host *host, int enable);
+
+ /* optional callback for HC quirks */
+ void (*init_card)(struct mmc_host *host, struct mmc_card *card);
};
struct mmc_card;
unsigned int f_min;
unsigned int f_max;
u32 ocr_avail;
+ struct notifier_block pm_notify;
#define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */
#define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */
/* Only used with MMC_CAP_DISABLE */
int enabled; /* host is enabled */
+ int rescan_disable; /* disable card detection */
int nesting_cnt; /* "enable" nesting count */
int en_dis_recurs; /* detect recursion */
unsigned int disable_delay; /* disable delay in msecs */
const struct mmc_bus_ops *bus_ops; /* current bus driver */
unsigned int bus_refs; /* reference counter */
+ unsigned int bus_resume_flags;
+#define MMC_BUSRESUME_MANUAL_RESUME (1 << 0)
+#define MMC_BUSRESUME_NEEDS_RESUME (1 << 1)
+
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
atomic_t sdio_irq_thread_abort;
struct dentry *debugfs_root;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ struct {
+ struct sdio_cis *cis;
+ struct sdio_cccr *cccr;
+ struct sdio_embedded_func *funcs;
+ int num_funcs;
+ } embedded_sdio_data;
+#endif
+
unsigned long private[0] ____cacheline_aligned;
};
extern void mmc_remove_host(struct mmc_host *);
extern void mmc_free_host(struct mmc_host *);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+extern void mmc_set_embedded_sdio_data(struct mmc_host *host,
+ struct sdio_cis *cis,
+ struct sdio_cccr *cccr,
+ struct sdio_embedded_func *funcs,
+ int num_funcs);
+#endif
+
static inline void *mmc_priv(struct mmc_host *host)
{
return (void *)host->private;
#define mmc_dev(x) ((x)->parent)
#define mmc_classdev(x) (&(x)->class_dev)
#define mmc_hostname(x) (dev_name(&(x)->class_dev))
+#define mmc_bus_needs_resume(host) ((host)->bus_resume_flags & MMC_BUSRESUME_NEEDS_RESUME)
+
+static inline void mmc_set_bus_resume_policy(struct mmc_host *host, int manual)
+{
+ if (manual)
+ host->bus_resume_flags |= MMC_BUSRESUME_MANUAL_RESUME;
+ else
+ host->bus_resume_flags &= ~MMC_BUSRESUME_MANUAL_RESUME;
+}
+
+extern int mmc_resume_bus(struct mmc_host *host);
extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
extern int mmc_resume_host(struct mmc_host *);
int mmc_host_enable(struct mmc_host *host);
int mmc_host_disable(struct mmc_host *host);
int mmc_host_lazy_disable(struct mmc_host *host);
+ int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *);
static inline void mmc_set_disable_delay(struct mmc_host *host,
unsigned int disable_delay)
uid_t uid;
struct user_namespace *user_ns;
- #ifdef CONFIG_USER_SCHED
- struct task_group *tg;
- #ifdef CONFIG_SYSFS
- struct kobject kobj;
- struct delayed_work work;
- #endif
- #endif
-
#ifdef CONFIG_PERF_EVENTS
atomic_long_t locked_vm;
#endif
* single CPU.
*/
unsigned int cpu_power;
+ unsigned int group_weight;
/*
* The CPUs this group covers.
struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED
- void (*moved_group) (struct task_struct *p, int on_rq);
+ void (*task_move_group) (struct task_struct *p, int on_rq);
#endif
};
extern cputime_t task_gtime(struct task_struct *p);
extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
+extern int task_free_register(struct notifier_block *n);
+extern int task_free_unregister(struct notifier_block *n);
+
/*
* Per process flags
*/
- #define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
- /* Not implemented yet, only for 486*/
+ #define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
*/
extern unsigned long long cpu_clock(int cpu);
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ /*
+ * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
+ * The reason for this explicit opt-in is not to have perf penalty with
+ * slow sched_clocks.
+ */
+ extern void enable_sched_clock_irqtime(void);
+ extern void disable_sched_clock_irqtime(void);
+ #else
+ static inline void enable_sched_clock_irqtime(void) {}
+ static inline void disable_sched_clock_irqtime(void) {}
+ #endif
+
extern unsigned long long
task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
extern int __cond_resched_softirq(void);
- #define cond_resched_softirq() ({ \
- __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
- __cond_resched_softirq(); \
+ #define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
})
/*
extern void normalize_rt_tasks(void);
- #ifdef CONFIG_GROUP_SCHED
+ #ifdef CONFIG_CGROUP_SCHED
extern struct task_group init_task_group;
- #ifdef CONFIG_USER_SCHED
- extern struct task_group root_task_group;
- extern void set_tg_uid(struct user_struct *user);
- #endif
extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);
/* platform domain */
#define SND_SOC_DAPM_INPUT(wname) \
{ .id = snd_soc_dapm_input, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0}
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_OUTPUT(wname) \
{ .id = snd_soc_dapm_output, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0}
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_MIC(wname, wevent) \
{ .id = snd_soc_dapm_mic, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .event = wevent, \
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD}
#define SND_SOC_DAPM_HP(wname, wevent) \
{ .id = snd_soc_dapm_hp, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .event = wevent, \
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_SPK(wname, wevent) \
{ .id = snd_soc_dapm_spk, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .event = wevent, \
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_LINE(wname, wevent) \
{ .id = snd_soc_dapm_line, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .event = wevent, \
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
/* path domain */
/* events that are pre and post DAPM */
#define SND_SOC_DAPM_PRE(wname, wevent) \
{ .id = snd_soc_dapm_pre, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .event = wevent, \
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_POST(wname, wevent) \
{ .id = snd_soc_dapm_post, .name = wname, .kcontrols = NULL, \
- .num_kcontrols = 0, .event = wevent, \
+ .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD}
/* stream domain */
.get = snd_soc_dapm_get_enum_double, \
.put = snd_soc_dapm_put_enum_double, \
.private_value = (unsigned long)&xenum }
+#define SOC_DAPM_ENUM_VIRT(xname, xenum) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_enum_double, \
+ .get = snd_soc_dapm_get_enum_virt, \
+ .put = snd_soc_dapm_put_enum_virt, \
+ .private_value = (unsigned long)&xenum }
#define SOC_DAPM_VALUE_ENUM(xname, xenum) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_enum_double, \
int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_soc_dapm_get_enum_virt(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_soc_dapm_put_enum_virt(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int snd_soc_dapm_get_value_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
config HAVE_UNSTABLE_SCHED_CLOCK
bool
- config GROUP_SCHED
- bool "Group CPU scheduler"
- depends on EXPERIMENTAL
- default n
- help
- This feature lets CPU scheduler recognize task groups and control CPU
- bandwidth allocation to such task groups.
- In order to create a group from arbitrary set of processes, use
- CONFIG_CGROUPS. (See Control Group support.)
-
- config FAIR_GROUP_SCHED
- bool "Group scheduling for SCHED_OTHER"
- depends on GROUP_SCHED
- default GROUP_SCHED
-
- config RT_GROUP_SCHED
- bool "Group scheduling for SCHED_RR/FIFO"
- depends on EXPERIMENTAL
- depends on GROUP_SCHED
- default n
- help
- This feature lets you explicitly allocate real CPU bandwidth
- to users or control groups (depending on the "Basis for grouping tasks"
- setting below. If enabled, it will also make it impossible to
- schedule realtime tasks for non-root users until you allocate
- realtime bandwidth for them.
- See Documentation/scheduler/sched-rt-group.txt for more information.
-
- choice
- depends on GROUP_SCHED
- prompt "Basis for grouping tasks"
- default USER_SCHED
-
- config USER_SCHED
- bool "user id"
- help
- This option will choose userid as the basis for grouping
- tasks, thus providing equal CPU bandwidth to each user.
-
- config CGROUP_SCHED
- bool "Control groups"
- depends on CGROUPS
- help
- This option allows you to create arbitrary task groups
- using the "cgroup" pseudo filesystem and control
- the cpu bandwidth allocated to each such task group.
- Refer to Documentation/cgroups/cgroups.txt for more
- information on "cgroup" pseudo filesystem.
-
- endchoice
-
menuconfig CGROUPS
boolean "Control Group support"
help
Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
size is 4096bytes, 512k per 1Gbytes of swap.
+ menuconfig CGROUP_SCHED
+ bool "Group CPU scheduler"
+ depends on EXPERIMENTAL && CGROUPS
+ default n
+ help
+ This feature lets CPU scheduler recognize task groups and control CPU
+ bandwidth allocation to such task groups. It uses cgroups to group
+ tasks.
+
+ if CGROUP_SCHED
+ config FAIR_GROUP_SCHED
+ bool "Group scheduling for SCHED_OTHER"
+ depends on CGROUP_SCHED
+ default CGROUP_SCHED
+
+ config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on EXPERIMENTAL
+ depends on CGROUP_SCHED
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
+ to task groups. If enabled, it will also make it impossible to
+ schedule realtime tasks for non-root users until you allocate
+ realtime bandwidth for them.
+ See Documentation/scheduler/sched-rt-group.txt for more information.
+
+ endif #CGROUP_SCHED
+
endif # CGROUPS
config MM_OWNER
config ANON_INODES
bool
+config PANIC_TIMEOUT
+ int "Default panic timeout"
+ default 0
+ help
+ Set default panic timeout.
+
menuconfig EMBEDDED
bool "Configure standard kernel features (for small systems)"
help
option replaces shmem and tmpfs with the much simpler ramfs code,
which may be appropriate on small systems without swap.
+config ASHMEM
+ bool "Enable the Anonymous Shared Memory Subsystem"
+ default n
+ depends on SHMEM || TINY_SHMEM
+ help
+ The ashmem subsystem is a new shared memory allocator, similar to
+ POSIX SHM but with different behavior and sporting a simpler
+ file-based API.
+
config AIO
bool "Enable AIO support" if EMBEDDED
default y
per cpu and per node queues.
config SLUB
+ depends on BROKEN || NUMA || !DISCONTIGMEM
bool "SLUB (Unqueued Allocator)"
help
SLUB is a slab allocator that minimizes cache line usage
int ret;
struct cpuset *cs = cgroup_cs(cont);
+ if ((current != task) && (!capable(CAP_SYS_ADMIN))) {
+ const struct cred *cred = current_cred(), *tcred;
+
+ if (cred->euid != tcred->uid && cred->euid != tcred->suid)
+ return -EPERM;
+ }
+
if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
return -ENOSPC;
return -ENODEV;
trialcs = alloc_trial_cpuset(cs);
- if (!trialcs)
- return -ENOMEM;
+ if (!trialcs) {
+ retval = -ENOMEM;
+ goto out;
+ }
switch (cft->private) {
case FILE_CPULIST:
}
free_trial_cpuset(trialcs);
+ out:
cgroup_unlock();
return retval;
}
*/
static DEFINE_MUTEX(sched_domains_mutex);
- #ifdef CONFIG_GROUP_SCHED
+ #ifdef CONFIG_CGROUP_SCHED
#include <linux/cgroup.h>
/* task group related information */
struct task_group {
- #ifdef CONFIG_CGROUP_SCHED
struct cgroup_subsys_state css;
- #endif
-
- #ifdef CONFIG_USER_SCHED
- uid_t uid;
- #endif
#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each cpu */
struct list_head children;
};
- #ifdef CONFIG_USER_SCHED
-
- /* Helper function to pass uid information to create_sched_user() */
- void set_tg_uid(struct user_struct *user)
- {
- user->tg->uid = user->uid;
- }
-
- /*
- * Root task group.
- * Every UID task group (including init_task_group aka UID-0) will
- * be a child to this group.
- */
- struct task_group root_task_group;
-
- #ifdef CONFIG_FAIR_GROUP_SCHED
- /* Default task group's sched entity on each cpu */
- static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
- /* Default task group's cfs_rq on each cpu */
- static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
- #endif /* CONFIG_FAIR_GROUP_SCHED */
-
- #ifdef CONFIG_RT_GROUP_SCHED
- static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
- static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
- #endif /* CONFIG_RT_GROUP_SCHED */
- #else /* !CONFIG_USER_SCHED */
#define root_task_group init_task_group
- #endif /* CONFIG_USER_SCHED */
/* task_group_lock serializes add/remove of task groups and also changes to
* a task group's cpu shares.
}
#endif
- #ifdef CONFIG_USER_SCHED
- # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
- #else /* !CONFIG_USER_SCHED */
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
- #endif /* CONFIG_USER_SCHED */
/*
* A weight of 0 or 1 can cause arithmetics problems.
{
struct task_group *tg;
- #ifdef CONFIG_USER_SCHED
- rcu_read_lock();
- tg = __task_cred(p)->user->tg;
- rcu_read_unlock();
- #elif defined(CONFIG_CGROUP_SCHED)
+ #ifdef CONFIG_CGROUP_SCHED
tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
struct task_group, css);
#else
return NULL;
}
- #endif /* CONFIG_GROUP_SCHED */
+ #endif /* CONFIG_CGROUP_SCHED */
/* CFS-related fields in a runqueue */
struct cfs_rq {
struct mm_struct *prev_mm;
u64 clock;
+ u64 clock_task;
atomic_t nr_iowait;
struct root_domain *rd;
struct sched_domain *sd;
+ unsigned long cpu_power;
+
unsigned char idle_at_tick;
/* For active balancing */
int post_schedule;
u64 avg_idle;
#endif
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ u64 prev_irq_time;
+ #endif
+
/* calc_load related fields */
unsigned long calc_load_update;
long calc_load_active;
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
- static inline
- void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
- {
- rq->curr->sched_class->check_preempt_curr(rq, p, flags);
- }
+ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
static inline int cpu_of(struct rq *rq)
{
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() (&__raw_get_cpu_var(runqueues))
+ static u64 irq_time_cpu(int cpu);
+ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time);
+
inline void update_rq_clock(struct rq *rq)
{
+ int cpu = cpu_of(rq);
+ u64 irq_time;
+
rq->clock = sched_clock_cpu(cpu_of(rq));
+ irq_time = irq_time_cpu(cpu);
+ if (rq->clock - irq_time > rq->clock_task)
+ rq->clock_task = rq->clock - irq_time;
+
+ sched_irq_time_avg_update(rq, irq_time);
}
/*
static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
}
+
+ static void sched_avg_update(struct rq *rq)
+ {
+ }
#endif /* CONFIG_SMP */
#if BITS_PER_LONG == 32
return max(rq->cpu_load[type-1], total);
}
- static struct sched_group *group_of(int cpu)
- {
- struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
-
- if (!sd)
- return NULL;
-
- return sd->groups;
- }
-
static unsigned long power_of(int cpu)
{
- struct sched_group *group = group_of(cpu);
-
- if (!group)
- return SCHED_LOAD_SCALE;
-
- return group->cpu_power;
+ return cpu_rq(cpu)->cpu_power;
}
static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
#endif
}
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+
+ /*
+ * There are no locks covering percpu hardirq/softirq time.
+ * They are only modified in account_system_vtime, on corresponding CPU
+ * with interrupts disabled. So, writes are safe.
+ * They are read and saved off onto struct rq in update_rq_clock().
+ * This may result in other CPU reading this CPU's irq time and can
+ * race with irq/account_system_vtime on this CPU. We would either get old
+ * or new value (or semi updated value on 32 bit) with a side effect of
+ * accounting a slice of irq time to wrong task when irq is in progress
+ * while we read rq->clock. That is a worthy compromise in place of having
+ * locks on each irq in account_system_time.
+ */
+ static DEFINE_PER_CPU(u64, cpu_hardirq_time);
+ static DEFINE_PER_CPU(u64, cpu_softirq_time);
+
+ static DEFINE_PER_CPU(u64, irq_start_time);
+ static int sched_clock_irqtime;
+
+ void enable_sched_clock_irqtime(void)
+ {
+ sched_clock_irqtime = 1;
+ }
+
+ void disable_sched_clock_irqtime(void)
+ {
+ sched_clock_irqtime = 0;
+ }
+
+ static u64 irq_time_cpu(int cpu)
+ {
+ if (!sched_clock_irqtime)
+ return 0;
+
+ return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
+ }
+
+ void account_system_vtime(struct task_struct *curr)
+ {
+ unsigned long flags;
+ int cpu;
+ u64 now, delta;
+
+ if (!sched_clock_irqtime)
+ return;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+ now = sched_clock_cpu(cpu);
+ delta = now - per_cpu(irq_start_time, cpu);
+ per_cpu(irq_start_time, cpu) = now;
+ /*
+ * We do not account for softirq time from ksoftirqd here.
+ * We want to continue accounting softirq time to ksoftirqd thread
+ * in that case, so as not to confuse scheduler with a special task
+ * that do not consume any time, but still wants to run.
+ */
+ if (hardirq_count())
+ per_cpu(cpu_hardirq_time, cpu) += delta;
+ else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
+ per_cpu(cpu_softirq_time, cpu) += delta;
+
+ local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL_GPL(account_system_vtime);
+
+ static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time)
+ {
+ if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) {
+ u64 delta_irq = curr_irq_time - rq->prev_irq_time;
+ rq->prev_irq_time = curr_irq_time;
+ sched_rt_avg_update(rq, delta_irq);
+ }
+ }
+
+ #else
+
+ static u64 irq_time_cpu(int cpu)
+ {
+ return 0;
+ }
+
+ static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { }
+
+ #endif
+
#include "sched_stats.h"
#include "sched_idletask.c"
#include "sched_fair.c"
static void set_load_weight(struct task_struct *p)
{
if (task_has_rt_policy(p)) {
- p->se.load.weight = prio_to_weight[0] * 2;
- p->se.load.inv_weight = prio_to_wmult[0] >> 1;
+ p->se.load.weight = 0;
+ p->se.load.inv_weight = WMULT_CONST;
return;
}
if (p->sched_class != &fair_sched_class)
return 0;
+ if (unlikely(p->policy == SCHED_IDLE))
+ return 0;
+
/*
* Buddy candidates are cache hot:
*/
preempt_enable();
}
+ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
+ {
+ const struct sched_class *class;
+
+ if (p->sched_class == rq->curr->sched_class) {
+ rq->curr->sched_class->check_preempt_curr(rq, p, flags);
+ } else {
+ for_each_class(class) {
+ if (class == rq->curr->sched_class)
+ break;
+ if (class == p->sched_class) {
+ resched_task(rq->curr);
+ break;
+ }
+ }
+ }
+ }
+
#ifdef CONFIG_SMP
/*
* ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
this_rq->calc_load_update += LOAD_FREQ;
calc_load_account_active(this_rq);
}
+
+ sched_avg_update(this_rq);
}
#ifdef CONFIG_SMP
* 2) too many balance attempts have failed.
*/
- tsk_cache_hot = task_hot(p, rq->clock, sd);
+ tsk_cache_hot = task_hot(p, rq->clock_task, sd);
if (!tsk_cache_hot ||
sd->nr_balance_failed > sd->cache_nice_tries) {
#ifdef CONFIG_SCHEDSTATS
unsigned long this_load;
unsigned long this_load_per_task;
unsigned long this_nr_running;
+ unsigned long this_has_capacity;
+ unsigned int this_idle_cpus;
/* Statistics of the busiest group */
+ unsigned int busiest_idle_cpus;
unsigned long max_load;
unsigned long busiest_load_per_task;
unsigned long busiest_nr_running;
unsigned long busiest_group_capacity;
+ unsigned long busiest_has_capacity;
+ unsigned int busiest_group_weight;
int group_imb; /* Is there imbalance in this sd */
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
unsigned long sum_nr_running; /* Nr tasks running in the group */
unsigned long sum_weighted_load; /* Weighted load of group's tasks */
unsigned long group_capacity;
+ unsigned long idle_cpus;
+ unsigned long group_weight;
int group_imb; /* Is there an imbalance in the group ? */
+ int group_has_capacity; /* Is there extra capacity in the group? */
};
/**
struct rq *rq = cpu_rq(cpu);
u64 total, available;
- sched_avg_update(rq);
-
total = sched_avg_period() + (rq->clock - rq->age_stamp);
- available = total - rq->rt_avg;
+
+ if (unlikely(total < rq->rt_avg)) {
+ /* Ensures that power won't end up being negative */
+ available = 0;
+ } else {
+ available = total - rq->rt_avg;
+ }
if (unlikely((s64)total < SCHED_LOAD_SCALE))
total = SCHED_LOAD_SCALE;
if (!power)
power = 1;
+ cpu_rq(cpu)->cpu_power = power;
sdg->cpu_power = power;
}
int local_group, const struct cpumask *cpus,
int *balance, struct sg_lb_stats *sgs)
{
- unsigned long load, max_cpu_load, min_cpu_load;
+ unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
int i;
unsigned int balance_cpu = -1, first_idle_cpu = 0;
unsigned long avg_load_per_task = 0;
/* Tally up the load of all CPUs in the group */
max_cpu_load = 0;
min_cpu_load = ~0UL;
+ max_nr_running = 0;
for_each_cpu_and(i, sched_group_cpus(group), cpus) {
struct rq *rq = cpu_rq(i);
load = target_load(i, load_idx);
} else {
load = source_load(i, load_idx);
- if (load > max_cpu_load)
+ if (load > max_cpu_load) {
max_cpu_load = load;
+ max_nr_running = rq->nr_running;
+ }
if (min_cpu_load > load)
min_cpu_load = load;
}
sgs->group_load += load;
sgs->sum_nr_running += rq->nr_running;
sgs->sum_weighted_load += weighted_cpuload(i);
-
+ if (idle_cpu(i))
+ sgs->idle_cpus++;
}
/*
if (sgs->sum_nr_running)
avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
- if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
+ if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
sgs->group_imb = 1;
- sgs->group_capacity =
- DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
+ sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
+ sgs->group_weight = group->group_weight;
+
+ if (sgs->group_capacity > sgs->sum_nr_running)
+ sgs->group_has_capacity = 1;
}
/**
/*
* In case the child domain prefers tasks go to siblings
* first, lower the group capacity to one so that we'll try
- * and move all the excess tasks away.
+ * and move all the excess tasks away. We lower the capacity
+ * of a group only if the local group has the capacity to fit
+ * these excess tasks, i.e. nr_running < group_capacity. The
+ * extra check prevents the case where you always pull from the
+ * heaviest group when it is already under-utilized (possible
+ * with a large weight task outweighs the tasks on the system).
*/
- if (prefer_sibling)
+ if (prefer_sibling && !local_group && sds->this_has_capacity)
sgs.group_capacity = min(sgs.group_capacity, 1UL);
if (local_group) {
sds->this = group;
sds->this_nr_running = sgs.sum_nr_running;
sds->this_load_per_task = sgs.sum_weighted_load;
+ sds->this_has_capacity = sgs.group_has_capacity;
+ sds->this_idle_cpus = sgs.idle_cpus;
} else if (sgs.avg_load > sds->max_load &&
(sgs.sum_nr_running > sgs.group_capacity ||
sgs.group_imb)) {
sds->max_load = sgs.avg_load;
sds->busiest = group;
sds->busiest_nr_running = sgs.sum_nr_running;
+ sds->busiest_idle_cpus = sgs.idle_cpus;
sds->busiest_group_capacity = sgs.group_capacity;
+ sds->busiest_group_weight = sgs.group_weight;
sds->busiest_load_per_task = sgs.sum_weighted_load;
+ sds->busiest_has_capacity = sgs.group_has_capacity;
sds->group_imb = sgs.group_imb;
}
return fix_small_imbalance(sds, this_cpu, imbalance);
}
+
/******* find_busiest_group() helpers end here *********************/
/**
* 4) This group is more busy than the avg busieness at this
* sched_domain.
* 5) The imbalance is within the specified limit.
+ *
+ * Note: when doing newidle balance, if the local group has excess
+ * capacity (i.e. nr_running < group_capacity) and the busiest group
+ * does not have any capacity, we force a load balance to pull tasks
+ * to the local group. In this case, we skip past checks 3, 4 and 5.
*/
if (balance && !(*balance))
goto ret;
if (!sds.busiest || sds.busiest_nr_running == 0)
goto out_balanced;
+ /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
+ if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
+ !sds.busiest_has_capacity)
+ goto force_balance;
+
if (sds.this_load >= sds.max_load)
goto out_balanced;
if (sds.this_load >= sds.avg_load)
goto out_balanced;
- if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
- goto out_balanced;
+ /*
+ * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative.
+ * And to check for busy balance use !idle_cpu instead of
+ * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE
+ * even when they are idle.
+ */
+ if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) {
+ if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
+ goto out_balanced;
+ } else {
+ /*
+ * This cpu is idle. If the busiest group load doesn't
+ * have more tasks than the number of available cpu's and
+ * there is no imbalance between this and busiest group
+ * wrt to idle cpu's, it is balanced.
+ */
+ if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
+ sds.busiest_nr_running <= sds.busiest_group_weight)
+ goto out_balanced;
+ }
+ force_balance:
/* Looks like there is an imbalance. Compute it */
calculate_imbalance(&sds, this_cpu, imbalance);
return sds.busiest;
if (!ld_moved) {
schedstat_inc(sd, lb_failed[idle]);
- sd->nr_balance_failed++;
+ /*
+ * Increment the failure counter only on periodic balance.
+ * We do not want newidle balance, which can be very
+ * frequent, pollute the failure counter causing
+ * excessive cache_hot migrations and active balances.
+ */
+ if (idle != CPU_NEWLY_IDLE)
+ sd->nr_balance_failed++;
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
if (task_current(rq, p)) {
update_rq_clock(rq);
- ns = rq->clock - p->se.exec_start;
+ ns = rq->clock_task - p->se.exec_start;
if ((s64)ns < 0)
ns = 0;
}
tmp = cputime_to_cputime64(cputime);
if (hardirq_count() - hardirq_offset)
cpustat->irq = cputime64_add(cpustat->irq, tmp);
- else if (softirq_count())
+ else if (in_serving_softirq())
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
else
cpustat->system = cputime64_add(cpustat->system, tmp);
unsigned state;
state = p->state ? __ffs(p->state) + 1 : 0;
- printk(KERN_INFO "%-13.13s %c", p->comm,
+ printk(KERN_INFO "%-15.15s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
#if BITS_PER_LONG == 32
if (state == TASK_RUNNING)
idle->se.exec_start = sched_clock();
cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
+ /*
+ * We're having a chicken and egg problem, even though we are
+ * holding rq->lock, the cpu isn't yet set to this cpu so the
+ * lockdep check in task_group() will fail.
+ *
+ * Similar case to sched_fork(). / Alternatively we could
+ * use task_rq_lock() here and obtain the other rq->lock.
+ *
+ * Silence PROVE_RCU
+ */
+ rcu_read_lock();
__set_task_cpu(idle, cpu);
+ rcu_read_unlock();
rq->curr = rq->idle = idle;
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
* The idle tasks have their own, simple scheduling class:
*/
idle->sched_class = &idle_sched_class;
- ftrace_graph_init_task(idle);
+ ftrace_graph_init_idle_task(idle, cpu);
}
/*
if (cpu != group_first_cpu(sd->groups))
return;
+ sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
+
child = sd->child;
sd->groups->cpu_power = 0;
#ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
- #ifdef CONFIG_USER_SCHED
- alloc_size *= 2;
- #endif
#ifdef CONFIG_CPUMASK_OFFSTACK
alloc_size += num_possible_cpus() * cpumask_size();
#endif
init_task_group.cfs_rq = (struct cfs_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
- #ifdef CONFIG_USER_SCHED
- root_task_group.se = (struct sched_entity **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
-
- root_task_group.cfs_rq = (struct cfs_rq **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
- #endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
init_task_group.rt_se = (struct sched_rt_entity **)ptr;
init_task_group.rt_rq = (struct rt_rq **)ptr;
ptr += nr_cpu_ids * sizeof(void **);
- #ifdef CONFIG_USER_SCHED
- root_task_group.rt_se = (struct sched_rt_entity **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
-
- root_task_group.rt_rq = (struct rt_rq **)ptr;
- ptr += nr_cpu_ids * sizeof(void **);
- #endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_RT_GROUP_SCHED */
#ifdef CONFIG_CPUMASK_OFFSTACK
for_each_possible_cpu(i) {
#ifdef CONFIG_RT_GROUP_SCHED
init_rt_bandwidth(&init_task_group.rt_bandwidth,
global_rt_period(), global_rt_runtime());
- #ifdef CONFIG_USER_SCHED
- init_rt_bandwidth(&root_task_group.rt_bandwidth,
- global_rt_period(), RUNTIME_INF);
- #endif /* CONFIG_USER_SCHED */
#endif /* CONFIG_RT_GROUP_SCHED */
- #ifdef CONFIG_GROUP_SCHED
+ #ifdef CONFIG_CGROUP_SCHED
list_add(&init_task_group.list, &task_groups);
INIT_LIST_HEAD(&init_task_group.children);
- #ifdef CONFIG_USER_SCHED
- INIT_LIST_HEAD(&root_task_group.children);
- init_task_group.parent = &root_task_group;
- list_add(&init_task_group.siblings, &root_task_group.children);
- #endif /* CONFIG_USER_SCHED */
- #endif /* CONFIG_GROUP_SCHED */
+ #endif /* CONFIG_CGROUP_SCHED */
#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
* directly in rq->cfs (i.e init_task_group->se[] = NULL).
*/
init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
- #elif defined CONFIG_USER_SCHED
- root_task_group.shares = NICE_0_LOAD;
- init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, 0, NULL);
- /*
- * In case of task-groups formed thr' the user id of tasks,
- * init_task_group represents tasks belonging to root user.
- * Hence it forms a sibling of all subsequent groups formed.
- * In this case, init_task_group gets only a fraction of overall
- * system cpu resource, based on the weight assigned to root
- * user's cpu share (INIT_TASK_GROUP_LOAD). This is accomplished
- * by letting tasks of init_task_group sit in a separate cfs_rq
- * (init_tg_cfs_rq) and having one entity represent this group of
- * tasks in rq->cfs (i.e init_task_group->se[] != NULL).
- */
- init_tg_cfs_entry(&init_task_group,
- &per_cpu(init_tg_cfs_rq, i),
- &per_cpu(init_sched_entity, i), i, 1,
- root_task_group.se[i]);
-
#endif
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_SMP
rq->sd = NULL;
rq->rd = NULL;
+ rq->cpu_power = SCHED_LOAD_SCALE;
rq->post_schedule = 0;
rq->active_balance = 0;
rq->next_balance = jiffies;
return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
}
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+ __might_sleep_init_called = 1;
+ return 0;
+}
+early_initcall(__might_sleep_init);
+
void __might_sleep(char *file, int line, int preempt_offset)
{
#ifdef in_atomic
static unsigned long prev_jiffy; /* ratelimiting */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
- system_state != SYSTEM_RUNNING || oops_in_progress)
+ oops_in_progress)
+ return;
+ if (system_state != SYSTEM_RUNNING &&
+ (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
}
#endif /* CONFIG_RT_GROUP_SCHED */
- #ifdef CONFIG_GROUP_SCHED
+ #ifdef CONFIG_CGROUP_SCHED
static void free_sched_group(struct task_group *tg)
{
free_fair_sched_group(tg);
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
- set_task_rq(tsk, task_cpu(tsk));
-
#ifdef CONFIG_FAIR_GROUP_SCHED
- if (tsk->sched_class->moved_group)
- tsk->sched_class->moved_group(tsk, on_rq);
+ if (tsk->sched_class->task_move_group)
+ tsk->sched_class->task_move_group(tsk, on_rq);
+ else
#endif
+ set_task_rq(tsk, task_cpu(tsk));
if (unlikely(running))
tsk->sched_class->set_curr_task(rq);
task_rq_unlock(rq, &flags);
}
- #endif /* CONFIG_GROUP_SCHED */
+ #endif /* CONFIG_CGROUP_SCHED */
#ifdef CONFIG_FAIR_GROUP_SCHED
static void __set_se_shares(struct sched_entity *se, unsigned long shares)
runtime = d->rt_runtime;
}
- #ifdef CONFIG_USER_SCHED
- if (tg == &root_task_group) {
- period = global_rt_period();
- runtime = global_rt_runtime();
- }
- #endif
-
/*
* Cannot have more runtime than the period.
*/
static int
cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
+ if ((current != tsk) && (!capable(CAP_SYS_NICE))) {
+ const struct cred *cred = current_cred(), *tcred;
+
+ tcred = __task_cred(tsk);
+
+ if (cred->euid != tcred->uid && cred->euid != tcred->suid)
+ return -EPERM;
+ }
+
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
return -EINVAL;
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/unistd.h>
+/***************
+* DEBUG
+****************/
+#define RESTART_DEBUG
+#ifdef RESTART_DEBUG
+#define restart_dbg(format, arg...) \
+ printk("RESTART_DEBUG : " format "\n" , ## arg)
+#else
+#define restart_dbg(format, arg...) do {} while (0)
+#endif
+
+
#ifndef SET_UNALIGN_CTL
# define SET_UNALIGN_CTL(a,b) (-EINVAL)
*/
void kernel_restart(char *cmd)
{
+ /*
+ * debug trace
+ */
+ restart_dbg("%s->%d->cmd=%s",__FUNCTION__,__LINE__,cmd);
+
kernel_restart_prepare(cmd);
if (!cmd)
- printk(KERN_EMERG "Restarting system.\n");
+ printk( "Restarting system.\n");
else
- printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
+ printk( "Restarting system with command '%s'.\n", cmd);
machine_restart(cmd);
}
EXPORT_SYMBOL_GPL(kernel_restart);
lock_kernel();
switch (cmd) {
case LINUX_REBOOT_CMD_RESTART:
+ /*
+ * debug trace
+ */
+ restart_dbg("%s->%d->cmd=%x",__FUNCTION__,__LINE__,cmd);
+
kernel_restart(NULL);
break;
panic("cannot halt");
case LINUX_REBOOT_CMD_POWER_OFF:
+ /*
+ * debug trace
+ */
+ restart_dbg("%s->%d->cmd=%x",__FUNCTION__,__LINE__,cmd);
+
kernel_power_off();
unlock_kernel();
do_exit(0);
return -EFAULT;
}
buffer[sizeof(buffer) - 1] = '\0';
-
+ /*
+ * debug trace
+ */
+ restart_dbg("%s->%d->cmd=%x args=%s",__FUNCTION__,__LINE__,cmd,buffer);
+
kernel_restart(buffer);
break;
if (!new_user)
return -EAGAIN;
- if (!task_can_switch_user(new_user, current)) {
- free_uid(new_user);
- return -EINVAL;
- }
-
if (atomic_read(&new_user->processes) >=
current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
new_user != INIT_USER) {
}
EXPORT_SYMBOL(timecounter_cyc2time);
+/**
+ * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
+ * @mult: pointer to mult variable
+ * @shift: pointer to shift variable
+ * @from: frequency to convert from
+ * @to: frequency to convert to
+ * @minsec: guaranteed runtime conversion range in seconds
+ *
+ * The function evaluates the shift/mult pair for the scaled math
+ * operations of clocksources and clockevents.
+ *
+ * @to and @from are frequency values in HZ. For clock sources @to is
+ * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
+ * event @to is the counter frequency and @from is NSEC_PER_SEC.
+ *
+ * The @minsec conversion range argument controls the time frame in
+ * seconds which must be covered by the runtime conversion with the
+ * calculated mult and shift factors. This guarantees that no 64bit
+ * overflow happens when the input value of the conversion is
+ * multiplied with the calculated mult factor. Larger ranges may
+ * reduce the conversion accuracy by chosing smaller mult and shift
+ * factors.
+ */
+void
+clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec)
+{
+ u64 tmp;
+ u32 sft, sftacc= 32;
+
+ /*
+ * Calculate the shift factor which is limiting the conversion
+ * range:
+ */
+ tmp = ((u64)minsec * from) >> 32;
+ while (tmp) {
+ tmp >>=1;
+ sftacc--;
+ }
+
+ /*
+ * Find the conversion shift/mult pair which has the best
+ * accuracy and fits the maxsec conversion range:
+ */
+ for (sft = 32; sft > 0; sft--) {
+ tmp = (u64) to << sft;
+ tmp += from / 2;
+ do_div(tmp, from);
+ if ((tmp >> sftacc) == 0)
+ break;
+ }
+ *mult = tmp;
+ *shift = sft;
+}
+
/*[Clocksource internal variables]---------
* curr_clocksource:
* currently selected clocksource.
mutex_lock(&clocksource_mutex);
clocksource_enqueue(cs);
- clocksource_select();
clocksource_enqueue_watchdog(cs);
+ clocksource_select();
mutex_unlock(&clocksource_mutex);
return 0;
}
}
EXPORT_SYMBOL_GPL(shmem_file_setup);
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = file;
+ vma->vm_ops = &shmem_vm_ops;
+}
+
/**
* shmem_zero_setup - setup a shared anonymous mapping
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
file = shmem_file_setup("dev/zero", size, vma->vm_flags);
if (IS_ERR(file))
return PTR_ERR(file);
- shmem_set_file(vma, file);
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = file;
+ vma->vm_ops = &shmem_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
+ __u16 pkt_type = sco_pi(sk)->pkt_type;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
- else
+ else {
type = SCO_LINK;
+ pkt_type &= SCO_ESCO_MASK;
+ }
- hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ hcon = hci_connect(hdev, type, pkt_type, dst,
+ BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (!hcon)
goto done;
return 0;
}
-static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
{
- struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
+ struct sockaddr_sco sa;
struct sock *sk = sock->sk;
- bdaddr_t *src = &sa->sco_bdaddr;
- int err = 0;
+ bdaddr_t *src = &sa.sco_bdaddr;
+ int len, err = 0;
- BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
+ BT_DBG("sk %p %s", sk, batostr(&sa.sco_bdaddr));
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
+ memset(&sa, 0, sizeof(sa));
+ len = min_t(unsigned int, sizeof(sa), alen);
+ memcpy(&sa, addr, len);
+
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EADDRINUSE;
} else {
/* Save source address */
- bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+ bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
+ sco_pi(sk)->pkt_type = sa.sco_pkt_type;
sk->sk_state = BT_BOUND;
}
static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
- struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- int err = 0;
-
+ struct sockaddr_sco sa;
+ int len, err = 0;
BT_DBG("sk %p", sk);
- if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_sco))
+ if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
- if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
- return -EBADFD;
-
- if (sk->sk_type != SOCK_SEQPACKET)
- return -EINVAL;
+ memset(&sa, 0, sizeof(sa));
+ len = min_t(unsigned int, sizeof(sa), alen);
+ memcpy(&sa, addr, len);
lock_sock(sk);
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
+ err = -EBADFD;
+ goto done;
+ }
+
/* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
+ bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
+ sco_pi(sk)->pkt_type = sa.sco_pkt_type;
if ((err = sco_connect(sk)))
goto done;
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
+ sa->sco_pkt_type = sco_pi(sk)->pkt_type;
return 0;
}
break;
}
+ memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
#include <net/arp.h>
#include <net/ip.h>
+#include <net/tcp.h>
#include <net/route.h>
#include <net/ip_fib.h>
#include <net/rtnetlink.h>
case SIOCSIFBRDADDR: /* Set the broadcast address */
case SIOCSIFDSTADDR: /* Set the destination address */
case SIOCSIFNETMASK: /* Set the netmask for the interface */
+ case SIOCKILLADDR: /* Nuke all sockets on this address */
ret = -EACCES;
if (!capable(CAP_NET_ADMIN))
goto out;
}
ret = -EADDRNOTAVAIL;
- if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
+ if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS
+ && cmd != SIOCKILLADDR)
goto done;
switch (cmd) {
inet_insert_ifa(ifa);
}
break;
+ case SIOCKILLADDR: /* Nuke all connections on this address */
+ ret = 0;
+ tcp_v4_nuke_addr(sin->sin_addr.s_addr);
+ break;
}
done:
rtnl_unlock();
return mtu >= 68;
}
+ static void inetdev_send_gratuitous_arp(struct net_device *dev,
+ struct in_device *in_dev)
+
+ {
+ struct in_ifaddr *ifa = in_dev->ifa_list;
+
+ if (!ifa)
+ return;
+
+ arp_send(ARPOP_REQUEST, ETH_P_ARP,
+ ifa->ifa_address, dev,
+ ifa->ifa_address, NULL,
+ dev->dev_addr, NULL);
+ }
+
/* Called only under RTNL semaphore */
static int inetdev_event(struct notifier_block *this, unsigned long event,
}
ip_mc_up(in_dev);
/* fall through */
- case NETDEV_NOTIFY_PEERS:
case NETDEV_CHANGEADDR:
+ if (!IN_DEV_ARP_NOTIFY(in_dev))
+ break;
+ /* fall through */
+ case NETDEV_NOTIFY_PEERS:
/* Send gratuitous ARP to notify of link change */
- if (IN_DEV_ARP_NOTIFY(in_dev)) {
- struct in_ifaddr *ifa = in_dev->ifa_list;
-
- if (ifa)
- arp_send(ARPOP_REQUEST, ETH_P_ARP,
- ifa->ifa_address, dev,
- ifa->ifa_address, NULL,
- dev->dev_addr, NULL);
- }
+ inetdev_send_gratuitous_arp(dev, in_dev);
break;
case NETDEV_DOWN:
ip_mc_down(in_dev);
static const struct soc_enum speaker_mode =
SOC_ENUM_SINGLE(WM8993_SPKMIXR_ATTENUATION, 8, 2, speaker_mode_text);
-static void wait_for_dc_servo(struct snd_soc_codec *codec)
+static void wait_for_dc_servo(struct snd_soc_codec *codec, unsigned int op)
{
unsigned int reg;
int count = 0;
+ unsigned int val;
+
+ val = op | WM8993_DCS_ENA_CHAN_0 | WM8993_DCS_ENA_CHAN_1;
+
+ /* Trigger the command */
+ snd_soc_write(codec, WM8993_DC_SERVO_0, val);
+
+ dev_vdbg(codec->dev, "Waiting for DC servo...\n");
- dev_dbg(codec->dev, "Waiting for DC servo...\n");
do {
count++;
- msleep(1);
- reg = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_0);
- dev_dbg(codec->dev, "DC servo status: %x\n", reg);
- } while ((reg & WM8993_DCS_CAL_COMPLETE_MASK)
- != WM8993_DCS_CAL_COMPLETE_MASK && count < 1000);
-
- if ((reg & WM8993_DCS_CAL_COMPLETE_MASK)
- != WM8993_DCS_CAL_COMPLETE_MASK)
+ msleep(10);
+ reg = snd_soc_read(codec, WM8993_DC_SERVO_0);
+ dev_vdbg(codec->dev, "DC servo: %x\n", reg);
+ } while (reg & op && count < 400);
+
+ if (reg & op)
dev_err(codec->dev, "Timed out waiting for DC Servo\n");
}
+/*
+ * Startup calibration of the DC servo
+ */
+static void calibrate_dc_servo(struct snd_soc_codec *codec)
+{
+ struct wm_hubs_data *hubs = codec->private_data;
+ u16 reg, reg_l, reg_r, dcs_cfg;
+
+ /* Set for 32 series updates */
+ snd_soc_update_bits(codec, WM8993_DC_SERVO_1,
+ WM8993_DCS_SERIES_NO_01_MASK,
+ 32 << WM8993_DCS_SERIES_NO_01_SHIFT);
+ wait_for_dc_servo(codec,
+ WM8993_DCS_TRIG_SERIES_0 | WM8993_DCS_TRIG_SERIES_1);
+
+ /* Apply correction to DC servo result */
+ if (hubs->dcs_codes) {
+ dev_dbg(codec->dev, "Applying %d code DC servo correction\n",
+ hubs->dcs_codes);
+
+ /* Different chips in the family support different
+ * readback methods.
+ */
+ switch (hubs->dcs_readback_mode) {
+ case 0:
+ reg_l = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1)
+ & WM8993_DCS_INTEG_CHAN_0_MASK;;
+ reg_r = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2)
+ & WM8993_DCS_INTEG_CHAN_1_MASK;
+ break;
+ case 1:
+ reg = snd_soc_read(codec, WM8993_DC_SERVO_3);
+ reg_l = (reg & WM8993_DCS_DAC_WR_VAL_1_MASK)
+ >> WM8993_DCS_DAC_WR_VAL_1_SHIFT;
+ reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK;
+ break;
+ default:
+ WARN(1, "Unknown DCS readback method");
+ break;
+ }
+
+ /* HPOUT1L */
+ if (reg_l + hubs->dcs_codes > 0 &&
+ reg_l + hubs->dcs_codes < 0xff)
+ reg_l += hubs->dcs_codes;
+ dcs_cfg = reg_l << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
+
+ /* HPOUT1R */
+ if (reg_r + hubs->dcs_codes > 0 &&
+ reg_r + hubs->dcs_codes < 0xff)
+ reg_r += hubs->dcs_codes;
+ dcs_cfg |= reg_r;
+
+ /* Do it */
+ snd_soc_write(codec, WM8993_DC_SERVO_3, dcs_cfg);
+ wait_for_dc_servo(codec,
+ WM8993_DCS_TRIG_DAC_WR_0 |
+ WM8993_DCS_TRIG_DAC_WR_1);
+ }
+}
+
/*
* Update the DC servo calibration on gain changes
*/
static int wm8993_put_dc_servo(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct wm_hubs_data *hubs = codec->private_data;
int ret;
ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
+ /* If we're applying an offset correction then updating the
+ * callibration would be likely to introduce further offsets. */
+ if (hubs->dcs_codes)
+ return ret;
+
/* Only need to do this if the outputs are active */
if (snd_soc_read(codec, WM8993_POWER_MANAGEMENT_1)
& (WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA))
line_tlv),
};
+static int hp_supply_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct wm_hubs_data *hubs = codec->private_data;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ switch (hubs->hp_startup_mode) {
+ case 0:
+ break;
+ case 1:
+ /* Enable the headphone amp */
+ snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
+ WM8993_HPOUT1L_ENA |
+ WM8993_HPOUT1R_ENA,
+ WM8993_HPOUT1L_ENA |
+ WM8993_HPOUT1R_ENA);
+
+ /* Enable the second stage */
+ snd_soc_update_bits(codec, WM8993_ANALOGUE_HP_0,
+ WM8993_HPOUT1L_DLY |
+ WM8993_HPOUT1R_DLY,
+ WM8993_HPOUT1L_DLY |
+ WM8993_HPOUT1R_DLY);
+ break;
+ default:
+ dev_err(codec->dev, "Unknown HP startup mode %d\n",
+ hubs->hp_startup_mode);
+ break;
+ }
+
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
+ WM8993_CP_ENA, 0);
+ break;
+ }
+
+ return 0;
+}
+
static int hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA,
WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA);
-
+ // printk("hp_event power1 up: 0x%04x",snd_soc_read(codec,WM8993_POWER_MANAGEMENT_1));
reg |= WM8993_HPOUT1L_DLY | WM8993_HPOUT1R_DLY;
snd_soc_write(codec, WM8993_ANALOGUE_HP_0, reg);
- /* Start the DC servo */
- snd_soc_update_bits(codec, WM8993_DC_SERVO_0,
- 0xFFFF,
- WM8993_DCS_ENA_CHAN_0 |
- WM8993_DCS_ENA_CHAN_1 |
- WM8993_DCS_TRIG_STARTUP_1 |
- WM8993_DCS_TRIG_STARTUP_0);
- wait_for_dc_servo(codec);
+ /* Smallest supported update interval */
+ snd_soc_update_bits(codec, WM8993_DC_SERVO_1,
+ WM8993_DCS_TIMER_PERIOD_01_MASK, 1);
+
+ calibrate_dc_servo(codec);
reg |= WM8993_HPOUT1R_OUTP | WM8993_HPOUT1R_RMV_SHORT |
WM8993_HPOUT1L_OUTP | WM8993_HPOUT1L_RMV_SHORT;
break;
case SND_SOC_DAPM_PRE_PMD:
- reg &= ~(WM8993_HPOUT1L_RMV_SHORT |
- WM8993_HPOUT1L_DLY |
- WM8993_HPOUT1L_OUTP |
- WM8993_HPOUT1R_RMV_SHORT |
- WM8993_HPOUT1R_DLY |
- WM8993_HPOUT1R_OUTP);
+ snd_soc_update_bits(codec, WM8993_ANALOGUE_HP_0,
+ WM8993_HPOUT1L_DLY |
+ WM8993_HPOUT1R_DLY |
+ WM8993_HPOUT1L_RMV_SHORT |
+ WM8993_HPOUT1R_RMV_SHORT, 0);
- snd_soc_update_bits(codec, WM8993_DC_SERVO_0,
- 0xffff, 0);
+ snd_soc_update_bits(codec, WM8993_ANALOGUE_HP_0,
+ WM8993_HPOUT1L_OUTP |
+ WM8993_HPOUT1R_OUTP, 0);
- snd_soc_write(codec, WM8993_ANALOGUE_HP_0, reg);
snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_1,
WM8993_HPOUT1L_ENA | WM8993_HPOUT1R_ENA,
0);
-
- snd_soc_update_bits(codec, WM8993_CHARGE_PUMP_1,
- WM8993_CP_ENA, 0);
+ // printk("hp_event power1 down: 0x%04x",snd_soc_read(codec,WM8993_POWER_MANAGEMENT_1));
break;
}
SND_SOC_DAPM_INPUT("IN1LN"),
SND_SOC_DAPM_INPUT("IN1LP"),
SND_SOC_DAPM_INPUT("IN2LN"),
-SND_SOC_DAPM_INPUT("IN2LP/VXRN"),
+SND_SOC_DAPM_INPUT("IN2LP:VXRN"),
SND_SOC_DAPM_INPUT("IN1RN"),
SND_SOC_DAPM_INPUT("IN1RP"),
SND_SOC_DAPM_INPUT("IN2RN"),
-SND_SOC_DAPM_INPUT("IN2RP/VXRP"),
+SND_SOC_DAPM_INPUT("IN2RP:VXRP"),
SND_SOC_DAPM_MICBIAS("MICBIAS2", WM8993_POWER_MANAGEMENT_1, 5, 0),
SND_SOC_DAPM_MICBIAS("MICBIAS1", WM8993_POWER_MANAGEMENT_1, 4, 0),
SND_SOC_DAPM_PGA("Left Output PGA", WM8993_POWER_MANAGEMENT_3, 7, 0, NULL, 0),
SND_SOC_DAPM_PGA("Right Output PGA", WM8993_POWER_MANAGEMENT_3, 6, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("Headphone Supply", SND_SOC_NOPM, 0, 0, hp_supply_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
SND_SOC_DAPM_PGA_E("Headphone PGA", SND_SOC_NOPM, 0, 0,
NULL, 0,
hp_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
};
static const struct snd_soc_dapm_route analogue_routes[] = {
+ { "IN1L PGA", NULL , "MICBIAS2" },
+ { "IN1R PGA", NULL , "MICBIAS1" },
+ { "MICBIAS2", NULL , "IN1LP"},
+ { "MICBIAS2", NULL , "IN1LN"},
+ { "MICBIAS1", NULL , "IN1RP"},
+ { "MICBIAS1", NULL , "IN1RN"},
+
{ "IN1L PGA", "IN1LP Switch", "IN1LP" },
{ "IN1L PGA", "IN1LN Switch", "IN1LN" },
{ "IN1R PGA", "IN1RP Switch", "IN1RP" },
{ "IN1R PGA", "IN1RN Switch", "IN1RN" },
- { "IN2L PGA", "IN2LP Switch", "IN2LP/VXRN" },
+ { "IN2L PGA", "IN2LP Switch", "IN2LP:VXRN" },
{ "IN2L PGA", "IN2LN Switch", "IN2LN" },
- { "IN2R PGA", "IN2RP Switch", "IN2RP/VXRP" },
+ { "IN2R PGA", "IN2RP Switch", "IN2RP:VXRP" },
{ "IN2R PGA", "IN2RN Switch", "IN2RN" },
- { "Direct Voice", NULL, "IN2LP/VXRN" },
- { "Direct Voice", NULL, "IN2RP/VXRP" },
+ { "Direct Voice", NULL, "IN2LP:VXRN" },
+ { "Direct Voice", NULL, "IN2RP:VXRP" },
{ "MIXINL", "IN1L Switch", "IN1L PGA" },
{ "MIXINL", "IN2L Switch", "IN2L PGA" },
{ "Left Output Mixer", "Right Input Switch", "MIXINR" },
{ "Left Output Mixer", "IN2RN Switch", "IN2RN" },
{ "Left Output Mixer", "IN2LN Switch", "IN2LN" },
- { "Left Output Mixer", "IN2LP Switch", "IN2LP/VXRN" },
+ { "Left Output Mixer", "IN2LP Switch", "IN2LP:VXRN" },
{ "Left Output Mixer", "IN1L Switch", "IN1L PGA" },
{ "Left Output Mixer", "IN1R Switch", "IN1R PGA" },
{ "Right Output Mixer", "Right Input Switch", "MIXINR" },
{ "Right Output Mixer", "IN2LN Switch", "IN2LN" },
{ "Right Output Mixer", "IN2RN Switch", "IN2RN" },
- { "Right Output Mixer", "IN2RP Switch", "IN2RP/VXRP" },
+ { "Right Output Mixer", "IN2RP Switch", "IN2RP:VXRP" },
{ "Right Output Mixer", "IN1L Switch", "IN1L PGA" },
{ "Right Output Mixer", "IN1R Switch", "IN1R PGA" },
{ "SPKL", "Input Switch", "MIXINL" },
{ "SPKL", "IN1LP Switch", "IN1LP" },
- { "SPKL", "Output Switch", "Left Output Mixer" },
+ { "SPKL", "Output Switch", "Left Output PGA" },
{ "SPKL", NULL, "TOCLK" },
{ "SPKR", "Input Switch", "MIXINR" },
{ "SPKR", "IN1RP Switch", "IN1RP" },
- { "SPKR", "Output Switch", "Right Output Mixer" },
+ { "SPKR", "Output Switch", "Right Output PGA" },
{ "SPKR", NULL, "TOCLK" },
{ "SPKL Boost", "Direct Voice Switch", "Direct Voice" },
{ "SPKOUTRP", NULL, "SPKR Driver" },
{ "SPKOUTRN", NULL, "SPKR Driver" },
- { "Left Headphone Mux", "Mixer", "Left Output Mixer" },
- { "Right Headphone Mux", "Mixer", "Right Output Mixer" },
+ { "Left Headphone Mux", "Mixer", "Left Output PGA" },
+ { "Right Headphone Mux", "Mixer", "Right Output PGA" },
{ "Headphone PGA", NULL, "Left Headphone Mux" },
{ "Headphone PGA", NULL, "Right Headphone Mux" },
{ "Headphone PGA", NULL, "CLK_SYS" },
+ { "Headphone PGA", NULL, "Headphone Supply" },
{ "HPOUT1L", NULL, "Headphone PGA" },
{ "HPOUT1R", NULL, "Headphone PGA" },
}
EXPORT_SYMBOL_GPL(wm_hubs_add_analogue_routes);
+int wm_hubs_handle_analogue_pdata(struct snd_soc_codec *codec,
+ int lineout1_diff, int lineout2_diff,
+ int lineout1fb, int lineout2fb,
+ int jd_scthr, int jd_thr, int micbias1_lvl,
+ int micbias2_lvl)
+{
+ if (!lineout1_diff)
+ snd_soc_update_bits(codec, WM8993_LINE_MIXER1,
+ WM8993_LINEOUT1_MODE,
+ WM8993_LINEOUT1_MODE);
+ if (!lineout2_diff)
+ snd_soc_update_bits(codec, WM8993_LINE_MIXER2,
+ WM8993_LINEOUT2_MODE,
+ WM8993_LINEOUT2_MODE);
+
+ /* If the line outputs are differential then we aren't presenting
+ * VMID as an output and can disable it.
+ */
+// if (lineout1_diff && lineout2_diff)
+// codec->idle_bias_off = 1;
+
+ if (lineout1fb)
+ snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL,
+ WM8993_LINEOUT1_FB, WM8993_LINEOUT1_FB);
+
+ if (lineout2fb)
+ snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL,
+ WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB);
+
+ snd_soc_update_bits(codec, WM8993_MICBIAS,
+ WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK |
+ WM8993_MICB1_LVL | WM8993_MICB2_LVL,
+ jd_scthr << WM8993_JD_SCTHR_SHIFT |
+ jd_thr << WM8993_JD_THR_SHIFT |
+ micbias1_lvl |
+ micbias2_lvl << WM8993_MICB2_LVL_SHIFT);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wm_hubs_handle_analogue_pdata);
+
MODULE_DESCRIPTION("Shared support for Wolfson hubs products");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");