Accounting groups can be created by first mounting the cgroup filesystem.
- # mkdir /cgroups
- # mount -t cgroup -ocpuacct none /cgroups
-
- With the above step, the initial or the parent accounting group
- becomes visible at /cgroups. At bootup, this group includes all the
- tasks in the system. /cgroups/tasks lists the tasks in this cgroup.
- /cgroups/cpuacct.usage gives the CPU time (in nanoseconds) obtained by
- this group which is essentially the CPU time obtained by all the tasks
+ # mount -t cgroup -ocpuacct none /sys/fs/cgroup
+
+ With the above step, the initial or the parent accounting group becomes
+ visible at /sys/fs/cgroup. At bootup, this group includes all the tasks in
+ the system. /sys/fs/cgroup/tasks lists the tasks in this cgroup.
+ /sys/fs/cgroup/cpuacct.usage gives the CPU time (in nanoseconds) obtained
+ by this group which is essentially the CPU time obtained by all the tasks
in the system.
- New accounting groups can be created under the parent group /cgroups.
+ New accounting groups can be created under the parent group /sys/fs/cgroup.
- # cd /cgroups
+ # cd /sys/fs/cgroup
# mkdir g1
# echo $$ > g1
The above steps create a new group g1 and move the current shell
process (bash) into it. CPU time consumed by this bash and its children
can be obtained from g1/cpuacct.usage and the same is accumulated in
- /cgroups/cpuacct.usage also.
+ /sys/fs/cgroup/cpuacct.usage also.
cpuacct.stat file lists a few statistics which further divide the
CPU time obtained by the cgroup into user and system times. Currently
user and system are in USER_HZ unit.
+cpuacct.cpufreq file gives CPU time (in nanoseconds) spent at each CPU
+frequency. Platform hooks must be implemented inorder to properly track
+time at each CPU frequency.
+
+cpuacct.power file gives CPU power consumed (in milliWatt seconds). Platform
+must provide and implement power callback functions.
+
cpuacct controller uses percpu_counter interface to collect user and
system times. This has two side effects:
sub pc, lr, r0, lsr #32 @ properly flush pipeline
#endif
+ #define PROC_ENTRY_SIZE (4*5)
+
/*
* Here follow the relocatable cache support functions for the
* various processors. This is a generic hook for locating an
ARM( addeq pc, r12, r3 ) @ call cache function
THUMB( addeq r12, r3 )
THUMB( moveq pc, r12 ) @ call cache function
- add r12, r12, #4*5
+ add r12, r12, #PROC_ENTRY_SIZE
b 1b
/*
@ b __arm6_mmu_cache_off
@ b __armv3_mmu_cache_flush
+#if !defined(CONFIG_CPU_V7)
+ /* This collides with some V7 IDs, preventing correct detection */
.word 0x00000000 @ old ARM ID
.word 0x0000f000
mov pc, lr
THUMB( nop )
mov pc, lr
THUMB( nop )
+#endif
.word 0x41007000 @ ARM7/710
.word 0xfff8fe00
.word 0x41069260 @ ARM926EJ-S (v5TEJ)
.word 0xff0ffff0
- b __arm926ejs_mmu_cache_on
- b __armv4_mmu_cache_off
- b __armv5tej_mmu_cache_flush
+ W(b) __arm926ejs_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv5tej_mmu_cache_flush
.word 0x00007000 @ ARM7 IDs
.word 0x0000f000
.size proc_types, . - proc_types
+ /*
+ * If you get a "non-constant expression in ".if" statement"
+ * error from the assembler on this line, check that you have
+ * not accidentally written a "b" instruction where you should
+ * have written W(b).
+ */
+ .if (. - proc_types) % PROC_ENTRY_SIZE != 0
+ .error "The size of one or more proc_types entries is wrong."
+ .endif
+
/*
* Turn off the Cache and MMU. ARMv3 does not support
* reading the control register, but ARMv4 does.
usr_entry
kuser_cmpxchg_check
+ #ifdef CONFIG_IRQSOFF_TRACER
+ bl trace_hardirqs_off
+ #endif
+
get_thread_info tsk
#ifdef CONFIG_PREEMPT
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
#endif
mov why, #0
- b ret_to_user
+ b ret_to_user_from_irq
UNWIND(.fnend )
ENDPROC(__irq_usr)
blo __und_usr_unknown
3: ldrht r0, [r4]
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
- orr r0, r0, r5, lsl #16
+ orr r0, r0, r5, lsl #16
#else
b __und_usr_unknown
#endif
fs = get_fs();
set_fs(KERNEL_DS);
- for (i = -4; i < 1; i++) {
+ for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
if (end > vma->vm_end)
end = vma->vm_end;
- flush_cache_user_range(vma, start, end);
+ up_read(&mm->mmap_sem);
+ flush_cache_user_range(start, end);
+ return;
}
up_read(&mm->mmap_sem);
}
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
- if (!pte_present(*pte) || !pte_dirty(*pte)) {
+ if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
free_part_info(&disk->part0);
kfree(disk);
}
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ int cnt = 0;
+
+ disk_part_iter_init(&piter, disk, 0);
+ while((part = disk_part_iter_next(&piter)))
+ cnt++;
+ disk_part_iter_exit(&piter);
+ add_uevent_var(env, "NPARTS=%u", cnt);
+ return 0;
+}
+
struct class block_class = {
.name = "block",
};
.groups = disk_attr_groups,
.release = disk_release,
.devnode = block_devnode,
+ .uevent = disk_uevent,
};
#ifdef CONFIG_PROC_FS
struct gendisk *disk; /* the associated disk */
spinlock_t lock;
+ struct mutex block_mutex; /* protects blocking */
int block; /* event blocking depth */
unsigned int pending; /* events already sent out */
unsigned int clearing; /* events being cleared */
return msecs_to_jiffies(intv_msecs);
}
- static void __disk_block_events(struct gendisk *disk, bool sync)
+ /**
+ * disk_block_events - block and flush disk event checking
+ * @disk: disk to block events for
+ *
+ * On return from this function, it is guaranteed that event checking
+ * isn't in progress and won't happen until unblocked by
+ * disk_unblock_events(). Events blocking is counted and the actual
+ * unblocking happens after the matching number of unblocks are done.
+ *
+ * Note that this intentionally does not block event checking from
+ * disk_clear_events().
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+ void disk_block_events(struct gendisk *disk)
{
struct disk_events *ev = disk->ev;
unsigned long flags;
bool cancel;
+ if (!ev)
+ return;
+
+ /*
+ * Outer mutex ensures that the first blocker completes canceling
+ * the event work before further blockers are allowed to finish.
+ */
+ mutex_lock(&ev->block_mutex);
+
spin_lock_irqsave(&ev->lock, flags);
cancel = !ev->block++;
spin_unlock_irqrestore(&ev->lock, flags);
- if (cancel) {
- if (sync)
- cancel_delayed_work_sync(&disk->ev->dwork);
- else
- cancel_delayed_work(&disk->ev->dwork);
- }
+ if (cancel)
+ cancel_delayed_work_sync(&disk->ev->dwork);
+
+ mutex_unlock(&ev->block_mutex);
}
static void __disk_unblock_events(struct gendisk *disk, bool check_now)
spin_unlock_irqrestore(&ev->lock, flags);
}
- /**
- * disk_block_events - block and flush disk event checking
- * @disk: disk to block events for
- *
- * On return from this function, it is guaranteed that event checking
- * isn't in progress and won't happen until unblocked by
- * disk_unblock_events(). Events blocking is counted and the actual
- * unblocking happens after the matching number of unblocks are done.
- *
- * Note that this intentionally does not block event checking from
- * disk_clear_events().
- *
- * CONTEXT:
- * Might sleep.
- */
- void disk_block_events(struct gendisk *disk)
- {
- if (disk->ev)
- __disk_block_events(disk, true);
- }
-
/**
* disk_unblock_events - unblock disk event checking
* @disk: disk to unblock events for
*/
void disk_check_events(struct gendisk *disk)
{
- if (disk->ev) {
- __disk_block_events(disk, false);
- __disk_unblock_events(disk, true);
+ struct disk_events *ev = disk->ev;
+ unsigned long flags;
+
+ if (!ev)
+ return;
+
+ spin_lock_irqsave(&ev->lock, flags);
+ if (!ev->block) {
+ cancel_delayed_work(&ev->dwork);
+ queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
}
+ spin_unlock_irqrestore(&ev->lock, flags);
}
EXPORT_SYMBOL_GPL(disk_check_events);
spin_unlock_irq(&ev->lock);
/* uncondtionally schedule event check and wait for it to finish */
- __disk_block_events(disk, true);
+ disk_block_events(disk);
queue_delayed_work(system_nrt_wq, &ev->dwork, 0);
flush_delayed_work(&ev->dwork);
__disk_unblock_events(disk, false);
if (intv < 0 && intv != -1)
return -EINVAL;
- __disk_block_events(disk, true);
+ disk_block_events(disk);
disk->ev->poll_msecs = intv;
__disk_unblock_events(disk, true);
INIT_LIST_HEAD(&ev->node);
ev->disk = disk;
spin_lock_init(&ev->lock);
+ mutex_init(&ev->block_mutex);
ev->block = 1;
ev->poll_msecs = -1;
INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn);
if (!disk->ev)
return;
- __disk_block_events(disk, true);
+ disk_block_events(disk);
mutex_lock(&disk_events_mutex);
list_del_init(&disk->ev->node);
#include <linux/sched.h>
#include <linux/async.h>
#include <linux/suspend.h>
+#include <linux/timer.h>
#include "../base.h"
#include "power.h"
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
+static void dpm_drv_timeout(unsigned long data);
+struct dpm_drv_wd_data {
+ struct device *dev;
+ struct task_struct *tsk;
+};
+
static int async_error;
/**
*/
void device_pm_init(struct device *dev)
{
- dev->power.in_suspend = false;
+ dev->power.is_prepared = false;
+ dev->power.is_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
mutex_lock(&dpm_list_mtx);
- if (dev->parent && dev->parent->power.in_suspend)
+ if (dev->parent && dev->parent->power.is_prepared)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
dpm_wait(dev->parent, async);
device_lock(dev);
- dev->power.in_suspend = false;
+ /*
+ * This is a fib. But we'll allow new children to be added below
+ * a resumed device, even if the device hasn't been completed yet.
+ */
+ dev->power.is_prepared = false;
+
+ if (!dev->power.is_suspended)
+ goto Unlock;
if (dev->pwr_domain) {
pm_dev_dbg(dev, state, "power domain ");
}
End:
+ dev->power.is_suspended = false;
+
+ Unlock:
device_unlock(dev);
complete_all(&dev->power.completion);
&& !pm_trace_is_enabled();
}
+/**
+ * dpm_drv_timeout - Driver suspend / resume watchdog handler
+ * @data: struct device which timed out
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so
+ * BUG() out for a crash-dump
+ *
+ */
+static void dpm_drv_timeout(unsigned long data)
+{
+ struct dpm_drv_wd_data *wd_data = (void *)data;
+ struct device *dev = wd_data->dev;
+ struct task_struct *tsk = wd_data->tsk;
+
+ printk(KERN_EMERG "**** DPM device timeout: %s (%s)\n", dev_name(dev),
+ (dev->driver ? dev->driver->name : "no driver"));
+
+ printk(KERN_EMERG "dpm suspend stack:\n");
+ show_stack(tsk, NULL);
+
+ BUG();
+}
+
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
struct device *dev = to_device(dpm_prepared_list.prev);
get_device(dev);
- dev->power.in_suspend = false;
+ dev->power.is_prepared = false;
list_move(&dev->power.entry, &list);
mutex_unlock(&dpm_list_mtx);
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
+ struct timer_list timer;
+ struct dpm_drv_wd_data data;
dpm_wait_for_children(dev, async);
+
+ data.dev = dev;
+ data.tsk = get_current();
+ init_timer_on_stack(&timer);
+ timer.expires = jiffies + HZ * 12;
+ timer.function = dpm_drv_timeout;
+ timer.data = (unsigned long)&data;
+ add_timer(&timer);
+
device_lock(dev);
if (async_error)
- goto End;
+ goto Unlock;
if (pm_wakeup_pending()) {
async_error = -EBUSY;
- goto End;
+ goto Unlock;
}
if (dev->pwr_domain) {
}
End:
+ dev->power.is_suspended = !error;
+
+ Unlock:
device_unlock(dev);
+
+ del_timer_sync(&timer);
+ destroy_timer_on_stack(&timer);
+
complete_all(&dev->power.completion);
if (error)
put_device(dev);
break;
}
- dev->power.in_suspend = true;
+ dev->power.is_prepared = true;
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &dpm_prepared_list);
put_device(dev);
old_index = stat->last_index;
new_index = freq_table_get_index(stat, freq->new);
- cpufreq_stats_update(freq->cpu);
- if (old_index == new_index)
+ /* We can't do stat->time_in_state[-1]= .. */
+ if (old_index == -1 || new_index == -1)
return 0;
- if (old_index == -1 || new_index == -1)
+ cpufreq_stats_update(freq->cpu);
+
+ if (old_index == new_index)
return 0;
spin_lock(&cpufreq_stats_lock);
return 0;
}
+static int cpufreq_stats_create_table_cpu(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ struct cpufreq_frequency_table *table;
+ int ret = -ENODEV;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return -ENODEV;
+
+ table = cpufreq_frequency_get_table(cpu);
+ if (!table)
+ goto out;
+
+ ret = cpufreq_stats_create_table(policy, table);
+
+out:
+ cpufreq_cpu_put(policy);
+ return ret;
+}
+
static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
case CPU_DEAD_FROZEN:
cpufreq_stats_free_table(cpu);
break;
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ cpufreq_stats_create_table_cpu(cpu);
+ break;
}
return NOTIFY_OK;
}
unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu) {
cpufreq_stats_free_table(cpu);
+ cpufreq_stats_free_sysfs(cpu);
}
}
#include <linux/input.h>
#include <linux/major.h>
#include <linux/device.h>
+#include <linux/wakelock.h>
#include "input-compat.h"
struct evdev {
unsigned int tail;
unsigned int packet_head; /* [future] position of the first element of next packet */
spinlock_t buffer_lock; /* protects access to buffer, head and tail */
+ struct wake_lock wake_lock;
+ char name[28];
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
/* Interrupts are disabled, just acquire the lock. */
spin_lock(&client->buffer_lock);
+ wake_lock_timeout(&client->wake_lock, 5 * HZ);
client->buffer[client->head++] = *event;
client->head &= client->bufsize - 1;
struct evdev *evdev = handle->private;
struct evdev_client *client;
struct input_event event;
+ struct timespec ts;
- do_gettimeofday(&event.time);
+ ktime_get_ts(&ts);
+ event.time.tv_sec = ts.tv_sec;
+ event.time.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
event.type = type;
event.code = code;
event.value = value;
rcu_read_unlock();
- wake_up_interruptible(&evdev->wait);
+ if (type == EV_SYN && code == SYN_REPORT)
+ wake_up_interruptible(&evdev->wait);
}
static int evdev_fasync(int fd, struct file *file, int on)
mutex_unlock(&evdev->mutex);
evdev_detach_client(evdev, client);
+ wake_lock_destroy(&client->wake_lock);
kfree(client);
evdev_close_device(evdev);
client->bufsize = bufsize;
spin_lock_init(&client->buffer_lock);
+ snprintf(client->name, sizeof(client->name), "%s-%d",
+ dev_name(&evdev->dev), task_tgid_vnr(current));
+ wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name);
client->evdev = evdev;
evdev_attach_client(evdev, client);
err_free_client:
evdev_detach_client(evdev, client);
+ wake_lock_destroy(&client->wake_lock);
kfree(client);
err_put_evdev:
put_device(&evdev->dev);
if (have_event) {
*event = client->buffer[client->tail++];
client->tail &= client->bufsize - 1;
+ if (client->head == client->tail)
+ wake_unlock(&client->wake_lock);
}
spin_unlock_irq(&client->buffer_lock);
+ config LEDS_GPIO_REGISTER
+ bool
+ help
+ This option provides the function gpio_led_register_device.
+ As this function is used by arch code it must not be compiled as a
+ module.
+
menuconfig NEW_LEDS
bool "LED Support"
help
This is not related to standard keyboard LEDs which are controlled
via the input system.
+ if NEW_LEDS
+
config LEDS_CLASS
bool "LED Class Support"
- depends on NEW_LEDS
help
This option enables the led sysfs class in /sys/class/leds. You'll
need this to do anything useful with LEDs. If unsure, say N.
- config LEDS_GPIO_REGISTER
- bool
- help
- This option provides the function gpio_led_register_device.
- As this function is used by arch code it must not be compiled as a
- module.
-
- if NEW_LEDS
-
comment "LED drivers"
config LEDS_88PM860X
config LEDS_ASIC3
bool "LED support for the HTC ASIC3"
+ depends on LEDS_CLASS
depends on MFD_ASIC3
default y
help
This allows LEDs to be initialised in the ON state.
If unsure, say Y.
+config LEDS_TRIGGER_SLEEP
+ tristate "LED Sleep Mode Trigger"
+ depends on LEDS_TRIGGERS && HAS_EARLYSUSPEND
+ help
+ This turns LEDs on when the screen is off but the cpu still running.
+
comment "iptables trigger is under Netfilter config (LED target)"
depends on LEDS_TRIGGERS
static inline int mmc_get_devidx(struct gendisk *disk)
{
- int devmaj = MAJOR(disk_devt(disk));
- int devidx = MINOR(disk_devt(disk)) / perdev_minors;
-
- if (!devmaj)
- devidx = disk->first_minor / perdev_minors;
+ int devidx = disk->first_minor / perdev_minors;
return devidx;
}
continue;
}
status = get_card_status(card, req);
+ } else if (disable_multi == 1) {
+ disable_multi = 0;
}
if (brq.sbc.error) {
return 0;
}
+static int
+mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card);
+
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
int ret;
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(card->host)) {
+ mmc_resume_bus(card->host);
+ mmc_blk_set_blksize(md, card);
+ }
+#endif
+
mmc_claim_host(card->host);
ret = mmc_blk_part_switch(card, md);
if (ret) {
INIT_LIST_HEAD(&md->part);
md->usage = 1;
- ret = mmc_init_queue(&md->queue, card, &md->lock);
+ ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
if (ret)
goto err_putdisk;
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
+ md->disk->flags = GENHD_FL_EXT_DEVT;
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
mmc_set_drvdata(card, md);
mmc_fixup_device(card, blk_fixups);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 1);
+#endif
if (mmc_add_disk(md))
goto out;
struct mmc_blk_data *md = mmc_get_drvdata(card);
mmc_blk_remove_parts(card, md);
+ mmc_claim_host(card->host);
+ mmc_blk_part_switch(card, md);
+ mmc_release_host(card->host);
mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
}
#ifdef CONFIG_PM
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
+#ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME
mmc_blk_set_blksize(md, card);
+#endif
/*
* Resume involves the card going into idle state,
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/wakelock.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include "sdio_ops.h"
static struct workqueue_struct *workqueue;
+static struct wake_lock mmc_delayed_work_wake_lock;
/*
* Enabling software CRCs on the data blocks can be a significant (30%)
static int mmc_schedule_delayed_work(struct delayed_work *work,
unsigned long delay)
{
+ wake_lock(&mmc_delayed_work_wake_lock);
return queue_delayed_work(workqueue, work, delay);
}
/* If the host is claimed then we do not want to disable it anymore */
if (!mmc_try_claim_host(host))
- return;
+ goto out;
mmc_host_do_disable(host, 1);
mmc_do_release_host(host);
+
+out:
+ wake_unlock(&mmc_delayed_work_wake_lock);
}
/**
spin_unlock_irqrestore(&host->lock, flags);
}
+int mmc_resume_bus(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ if (!mmc_bus_needs_resume(host))
+ return -EINVAL;
+
+ printk("%s: Starting deferred resume\n", mmc_hostname(host));
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+ host->rescan_disable = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_bus_get(host);
+ if (host->bus_ops && !host->bus_dead) {
+ mmc_power_up(host);
+ BUG_ON(!host->bus_ops->resume);
+ host->bus_ops->resume(host);
+ }
+
+ if (host->bus_ops->detect && !host->bus_dead)
+ host->bus_ops->detect(host);
+
+ mmc_bus_put(host);
+ printk("%s: Deferred resume completed\n", mmc_hostname(host));
+ return 0;
+}
+
+EXPORT_SYMBOL(mmc_resume_bus);
+
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
*/
timeout_clks <<= 1;
timeout_us += (timeout_clks * 1000) /
- (card->host->ios.clock / 1000);
+ (mmc_host_clk_rate(card->host) / 1000);
erase_timeout = timeout_us / 1000;
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
int i;
+ bool extend_wakelock = false;
if (host->rescan_disable)
return;
&& !(host->caps & MMC_CAP_NONREMOVABLE))
host->bus_ops->detect(host);
+ /* If the card was removed the bus will be marked
+ * as dead - extend the wakelock so userspace
+ * can respond */
+ if (host->bus_dead)
+ extend_wakelock = 1;
+
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
* the card is no longer present.
mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+ if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
+ extend_wakelock = true;
break;
+ }
if (freqs[i] <= host->f_min)
break;
}
mmc_release_host(host);
out:
+ if (extend_wakelock)
+ wake_lock_timeout(&mmc_delayed_work_wake_lock, HZ / 2);
+ else
+ wake_unlock(&mmc_delayed_work_wake_lock);
if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ);
}
{
int err = 0;
+ if (mmc_bus_needs_resume(host))
+ return 0;
+
if (host->caps & MMC_CAP_DISABLE)
cancel_delayed_work(&host->disable);
cancel_delayed_work(&host->detect);
int err = 0;
mmc_bus_get(host);
+ if (mmc_bus_manual_resume(host)) {
+ host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+ mmc_bus_put(host);
+ return 0;
+ }
+
if (host->bus_ops && !host->bus_dead) {
if (!mmc_card_keep_power(host)) {
mmc_power_up(host);
case PM_SUSPEND_PREPARE:
spin_lock_irqsave(&host->lock, flags);
+ if (mmc_bus_needs_resume(host)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ break;
+ }
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
cancel_delayed_work_sync(&host->detect);
case PM_POST_RESTORE:
spin_lock_irqsave(&host->lock, flags);
+ if (mmc_bus_manual_resume(host)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ break;
+ }
host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
mmc_detect_change(host, 0);
}
#endif
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+ struct sdio_cis *cis,
+ struct sdio_cccr *cccr,
+ struct sdio_embedded_func *funcs,
+ int num_funcs)
+{
+ host->embedded_sdio_data.cis = cis;
+ host->embedded_sdio_data.cccr = cccr;
+ host->embedded_sdio_data.funcs = funcs;
+ host->embedded_sdio_data.num_funcs = num_funcs;
+}
+
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
static int __init mmc_init(void)
{
int ret;
if (!workqueue)
return -ENOMEM;
+ wake_lock_init(&mmc_delayed_work_wake_lock, WAKE_LOCK_SUSPEND,
+ "mmc_delayed_work");
+
ret = mmc_register_bus();
if (ret)
goto destroy_workqueue;
mmc_unregister_bus();
destroy_workqueue:
destroy_workqueue(workqueue);
+ wake_lock_destroy(&mmc_delayed_work_wake_lock);
return ret;
}
mmc_unregister_host_class();
mmc_unregister_bus();
destroy_workqueue(workqueue);
+ wake_lock_destroy(&mmc_delayed_work_wake_lock);
}
subsys_initcall(mmc_init);
#include "sdio_ops.h"
#include "sdio_cis.h"
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
static int sdio_read_fbr(struct sdio_func *func)
{
int ret;
goto finish;
}
- /*
- * Read the common registers.
- */
- err = sdio_read_cccr(card);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.cccr)
+ memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
+ else {
+#endif
+ /*
+ * Read the common registers.
+ */
+ err = sdio_read_cccr(card);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
- /*
- * Read the common CIS tuples.
- */
- err = sdio_read_common_cis(card);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.cis)
+ memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
+ else {
+#endif
+ /*
+ * Read the common CIS tuples.
+ */
+ err = sdio_read_common_cis(card);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
if (oldcard) {
int same = (card->cis.vendor == oldcard->cis.vendor &&
static int mmc_sdio_power_restore(struct mmc_host *host)
{
int ret;
+ u32 ocr;
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
+
+ /*
+ * Reset the card by performing the same steps that are taken by
+ * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe.
+ *
+ * sdio_reset() is technically not needed. Having just powered up the
+ * hardware, it should already be in reset state. However, some
+ * platforms (such as SD8686 on OLPC) do not instantly cut power,
+ * meaning that a reset is required when restoring power soon after
+ * powering off. It is harmless in other cases.
+ *
+ * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec,
+ * is not necessary for non-removable cards. However, it is required
+ * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and
+ * harmless in other situations.
+ *
+ * With these steps taken, mmc_select_voltage() is also required to
+ * restore the correct voltage setting of the card.
+ */
+ sdio_reset(host);
+ mmc_go_idle(host);
+ mmc_send_if_cond(host, host->ocr_avail);
+
+ ret = mmc_send_io_op_cond(host, 0, &ocr);
+ if (ret)
+ goto out;
+
+ if (host->ocr_avail_sdio)
+ host->ocr_avail = host->ocr_avail_sdio;
+
+ host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
+ if (!host->ocr) {
+ ret = -EINVAL;
+ goto out;
+ }
+
ret = mmc_sdio_init_card(host, host->ocr, host->card,
mmc_card_keep_power(host));
if (!ret && host->sdio_irqs)
mmc_signal_sdio_irq(host);
+
+ out:
mmc_release_host(host);
return ret;
funcs = (ocr & 0x70000000) >> 28;
card->sdio_funcs = 0;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.funcs)
+ card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
+#endif
+
/*
* Initialize (but don't add) all present functions.
*/
for (i = 0; i < funcs; i++, card->sdio_funcs++) {
- err = sdio_init_func(host->card, i + 1);
- if (err)
- goto remove;
-
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.funcs) {
+ struct sdio_func *tmp;
+
+ tmp = sdio_alloc_func(host->card);
+ if (IS_ERR(tmp))
+ goto remove;
+ tmp->num = (i + 1);
+ card->sdio_func[i] = tmp;
+ tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+ tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
+ tmp->vendor = card->cis.vendor;
+ tmp->device = card->cis.device;
+ } else {
+#endif
+ err = sdio_init_func(host->card, i + 1);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
/*
* Enable Runtime PM for this func (if supported)
*/
return err;
}
+int sdio_reset_comm(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 ocr;
+ int err;
+
+ printk("%s():\n", __func__);
+ mmc_claim_host(host);
+
+ mmc_go_idle(host);
+
+ mmc_set_clock(host, host->f_min);
+
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (err)
+ goto err;
+
+ host->ocr = mmc_select_voltage(host, ocr);
+ if (!host->ocr) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = mmc_send_io_op_cond(host, host->ocr, &ocr);
+ if (err)
+ goto err;
+
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ goto err;
+ }
+
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ goto err;
+ mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
+ }
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * Switch to high-speed (if supported).
+ */
+ err = sdio_enable_hs(card);
+ if (err > 0)
+ mmc_sd_go_highspeed(card);
+ else if (err)
+ goto err;
+
+ /*
+ * Change to the card's maximum speed.
+ */
+ mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+
+ err = sdio_enable_4bit_bus(card);
+ if (err > 0)
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+ else if (err)
+ goto err;
+
+ mmc_release_host(host);
+ return 0;
+err:
+ printk("%s: Error resetting SDIO communications (%d)\n",
+ mmc_hostname(host), err);
+ mmc_release_host(host);
+ return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
#include "sdio_cis.h"
#include "sdio_bus.h"
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/host.h>
+#endif
+
/* show configuration fields */
#define sdio_config_attr(field, format_string) \
static ssize_t \
/* Then undo the runtime PM settings in sdio_bus_probe() */
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
- pm_runtime_put_noidle(dev);
+ pm_runtime_put_sync(dev);
out:
return ret;
{
struct sdio_func *func = dev_to_sdio_func(dev);
- sdio_free_func_cis(func);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ /*
+ * If this device is embedded then we never allocated
+ * cis tables for this func
+ */
+ if (!func->card->host->embedded_sdio_data.funcs)
+#endif
+ sdio_free_func_cis(func);
if (func->info)
kfree(func->info);
NULL, &proc_single_file_operations, \
{ .proc_show = show } )
+/* ANDROID is for special files in /proc. */
+#define ANDROID(NAME, MODE, OTYPE) \
+ NOD(NAME, (S_IFREG|(MODE)), \
+ &proc_##OTYPE##_inode_operations, \
+ &proc_##OTYPE##_operations, {})
+
/*
* Count the number of hardlinks for the pid_entry table, excluding the .
* and .. links.
mm = get_task_mm(task);
if (mm && mm != current->mm &&
- !ptrace_may_access(task, PTRACE_MODE_READ)) {
+ !ptrace_may_access(task, PTRACE_MODE_READ) &&
+ !capable(CAP_SYS_RESOURCE)) {
mmput(mm);
mm = ERR_PTR(-EACCES);
}
return err < 0 ? err : count;
}
+static int oom_adjust_permission(struct inode *inode, int mask,
+ unsigned int flags)
+{
+ uid_t uid;
+ struct task_struct *p;
+
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
+
+ p = get_proc_task(inode);
+ if(p) {
+ uid = task_uid(p);
+ put_task_struct(p);
+ }
+
+ /*
+ * System Server (uid == 1000) is granted access to oom_adj of all
+ * android applications (uid > 10000) as and services (uid >= 1000)
+ */
+ if (p && (current_fsuid() == 1000) && (uid >= 1000)) {
+ if (inode->i_mode >> 6 & mask) {
+ return 0;
+ }
+ }
+
+ /* Fall back to default. */
+ return generic_permission(inode, mask, flags, NULL);
+}
+
+static const struct inode_operations proc_oom_adjust_inode_operations = {
+ .permission = oom_adjust_permission,
+};
+
static const struct file_operations proc_oom_adjust_operations = {
.read = oom_adjust_read,
.write = oom_adjust_write,
*/
static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
{
- int rv;
-
- if (flags & IPERM_FLAG_RCU)
- return -ECHILD;
- rv = generic_permission(inode, mask, flags, NULL);
+ int rv = generic_permission(inode, mask, flags, NULL);
if (rv == 0)
return 0;
if (task_pid(current) == proc_pid(inode))
REG("cgroup", S_IRUGO, proc_cgroup_operations),
#endif
INF("oom_score", S_IRUGO, proc_oom_score),
- REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adjust_operations),
+ ANDROID("oom_adj",S_IRUGO|S_IWUSR, oom_adjust),
REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
config CONSTRUCTORS
bool
depends on !UML
- default y
config HAVE_IRQ_WORK
bool
endchoice
+ config DEFAULT_HOSTNAME
+ string "Default hostname"
+ default "(none)"
+ help
+ This option determines the default system hostname before userspace
+ calls sethostname(2). The kernel traditionally uses "(none)" here,
+ but you may wish to use a different default here to make a minimal
+ system more usable with less configuration.
+
config SWAP
bool "Support for paging of anonymous memory (swap)"
depends on MMU && BLOCK
config ANON_INODES
bool
+config PANIC_TIMEOUT
+ int "Default panic timeout"
+ default 0
+ help
+ Set default panic timeout.
+
menuconfig EXPERT
bool "Configure standard kernel features (expert users)"
help
option replaces shmem and tmpfs with the much simpler ramfs code,
which may be appropriate on small systems without swap.
+config ASHMEM
+ bool "Enable the Anonymous Shared Memory Subsystem"
+ default n
+ depends on SHMEM || TINY_SHMEM
+ help
+ The ashmem subsystem is a new shared memory allocator, similar to
+ POSIX SHM but with different behavior and sporting a simpler
+ file-based API.
+
config AIO
bool "Enable AIO support" if EXPERT
default y
} while (next);
}
- static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
+ void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
{
struct shmem_inode_info *info = SHMEM_I(inode);
unsigned long idx;
spinlock_t *punch_lock;
unsigned long upper_limit;
+ truncate_inode_pages_range(inode->i_mapping, start, end);
+
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (idx >= info->next_index)
* lowered next_index. Also, though shmem_getpage checks
* i_size before adding to cache, no recheck after: so fix the
* narrow window there too.
- *
- * Recalling truncate_inode_pages_range and unmap_mapping_range
- * every time for punch_hole (which never got a chance to clear
- * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
- * yet hardly ever necessary: try to optimize them out later.
*/
truncate_inode_pages_range(inode->i_mapping, start, end);
- if (punch_hole)
- unmap_mapping_range(inode->i_mapping, start,
- end - start, 1);
}
spin_lock(&info->lock);
shmem_free_pages(pages_to_free.next);
}
}
+ EXPORT_SYMBOL_GPL(shmem_truncate_range);
- static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
+ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
- loff_t newsize = attr->ia_size;
int error;
error = inode_change_ok(inode, attr);
if (error)
return error;
- if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)
- && newsize != inode->i_size) {
+ if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
+ loff_t oldsize = inode->i_size;
+ loff_t newsize = attr->ia_size;
struct page *page = NULL;
- if (newsize < inode->i_size) {
+ if (newsize < oldsize) {
/*
* If truncating down to a partial page, then
* if that page is already allocated, hold it
spin_unlock(&info->lock);
}
}
-
- /* XXX(truncate): truncate_setsize should be called last */
- truncate_setsize(inode, newsize);
+ if (newsize != oldsize) {
+ i_size_write(inode, newsize);
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+ }
+ if (newsize < oldsize) {
+ loff_t holebegin = round_up(newsize, PAGE_SIZE);
+ unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
+ shmem_truncate_range(inode, newsize, (loff_t)-1);
+ /* unmap again to remove racily COWed private pages */
+ unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
+ }
if (page)
page_cache_release(page);
- shmem_truncate_range(inode, newsize, (loff_t)-1);
}
setattr_copy(inode, attr);
struct shmem_xattr *xattr, *nxattr;
if (inode->i_mapping->a_ops == &shmem_aops) {
- truncate_inode_pages(inode->i_mapping, 0);
shmem_unacct_size(info->flags, inode->i_size);
inode->i_size = 0;
shmem_truncate_range(inode, 0, (loff_t)-1);
};
static const struct inode_operations shmem_inode_operations = {
- .setattr = shmem_notify_change,
+ .setattr = shmem_setattr,
.truncate_range = shmem_truncate_range,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.removexattr = shmem_removexattr,
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
- .setattr = shmem_notify_change,
+ .setattr = shmem_setattr,
.check_acl = generic_check_acl,
#endif
};
.removexattr = shmem_removexattr,
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
- .setattr = shmem_notify_change,
+ .setattr = shmem_setattr,
.check_acl = generic_check_acl,
#endif
};
return 0;
}
+ void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
+ {
+ truncate_inode_pages_range(inode->i_mapping, start, end);
+ }
+ EXPORT_SYMBOL_GPL(shmem_truncate_range);
+
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
/**
* mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
}
EXPORT_SYMBOL_GPL(shmem_file_setup);
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = file;
+ vma->vm_ops = &shmem_vm_ops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+}
+
/**
* shmem_zero_setup - setup a shared anonymous mapping
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
if (IS_ERR(file))
return PTR_ERR(file);
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = file;
- vma->vm_ops = &shmem_vm_ops;
- vma->vm_flags |= VM_CAN_NONLINEAR;
+ shmem_set_file(vma, file);
return 0;
}
+
+ /**
+ * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
+ * @mapping: the page's address_space
+ * @index: the page index
+ * @gfp: the page allocator flags to use if allocating
+ *
+ * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
+ * with any new page allocations done using the specified allocation flags.
+ * But read_cache_page_gfp() uses the ->readpage() method: which does not
+ * suit tmpfs, since it may have pages in swapcache, and needs to find those
+ * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
+ *
+ * Provide a stub for those callers to start using now, then later
+ * flesh it out to call shmem_getpage() with additional gfp mask, when
+ * shmem_file_splice_read() is added and shmem_readpage() is removed.
+ */
+ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp)
+ {
+ return read_cache_page_gfp(mapping, index, gfp);
+ }
+ EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
* command otherwise */
u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
- /* Events for 1.2 and newer controllers */
- if (hdev->lmp_ver > 1) {
- events[4] |= 0x01; /* Flow Specification Complete */
- events[4] |= 0x02; /* Inquiry Result with RSSI */
- events[4] |= 0x04; /* Read Remote Extended Features Complete */
- events[5] |= 0x08; /* Synchronous Connection Complete */
- events[5] |= 0x10; /* Synchronous Connection Changed */
- }
+ /* CSR 1.1 dongles does not accept any bitfield so don't try to set
+ * any event mask for pre 1.2 devices */
+ if (hdev->lmp_ver <= 1)
+ return;
+
+ events[4] |= 0x01; /* Flow Specification Complete */
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
+ events[4] |= 0x04; /* Read Remote Extended Features Complete */
+ events[5] |= 0x08; /* Synchronous Connection Complete */
+ events[5] |= 0x10; /* Synchronous Connection Changed */
if (hdev->features[3] & LMP_RSSI_INQ)
events[4] |= 0x04; /* Inquiry Result with RSSI */
}
} else {
if (!conn) {
- conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
+ conn = hci_conn_add(hdev, ACL_LINK, 0, &cp->bdaddr);
if (conn) {
conn->out = 1;
conn->link_mode |= HCI_LM_MASTER;
}
} else {
if (!conn) {
- conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
+ conn = hci_conn_add(hdev, LE_LINK, 0, &cp->peer_addr);
if (conn)
conn->out = 1;
else
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
- conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
+ /* pkt_type not yet used for incoming connections */
+ conn = hci_conn_add(hdev, ev->link_type, 0, &ev->bdaddr);
if (!conn) {
BT_ERR("No memory for new connection");
hci_dev_unlock(hdev);
hci_conn_add_sysfs(conn);
break;
+ case 0x10: /* Connection Accept Timeout */
case 0x11: /* Unsupported Feature or Parameter Value */
case 0x1c: /* SCO interval rejected */
case 0x1a: /* Unsupported Remote Feature */
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
if (!conn) {
- conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
+ conn = hci_conn_add(hdev, LE_LINK, 0, &ev->bdaddr);
if (!conn) {
BT_ERR("No memory for new connection");
hci_dev_unlock(hdev);
{
bdaddr_t *src = &bt_sk(sk)->src;
bdaddr_t *dst = &bt_sk(sk)->dst;
+ __u16 pkt_type = sco_pi(sk)->pkt_type;
struct sco_conn *conn;
struct hci_conn *hcon;
struct hci_dev *hdev;
if (lmp_esco_capable(hdev) && !disable_esco)
type = ESCO_LINK;
- else
+ else {
type = SCO_LINK;
+ pkt_type &= SCO_ESCO_MASK;
+ }
- hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
+ hcon = hci_connect(hdev, type, pkt_type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
case BT_CONNECTED:
case BT_CONFIG:
+ if (sco_pi(sk)->conn) {
+ sk->sk_state = BT_DISCONN;
+ sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
+ hci_conn_put(sco_pi(sk)->conn->hcon);
+ sco_pi(sk)->conn->hcon = NULL;
+ } else
+ sco_chan_del(sk, ECONNRESET);
+ break;
+
case BT_CONNECT:
case BT_DISCONN:
sco_chan_del(sk, ECONNRESET);
return 0;
}
-static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
{
- struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
+ struct sockaddr_sco sa;
struct sock *sk = sock->sk;
- bdaddr_t *src = &sa->sco_bdaddr;
- int err = 0;
+ bdaddr_t *src = &sa.sco_bdaddr;
+ int len, err = 0;
- BT_DBG("sk %p %s", sk, batostr(&sa->sco_bdaddr));
+ BT_DBG("sk %p %s", sk, batostr(&sa.sco_bdaddr));
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
+ memset(&sa, 0, sizeof(sa));
+ len = min_t(unsigned int, sizeof(sa), alen);
+ memcpy(&sa, addr, len);
+
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EADDRINUSE;
} else {
/* Save source address */
- bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+ bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
+ sco_pi(sk)->pkt_type = sa.sco_pkt_type;
sk->sk_state = BT_BOUND;
}
static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
- struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- int err = 0;
-
+ struct sockaddr_sco sa;
+ int len, err = 0;
BT_DBG("sk %p", sk);
- if (alen < sizeof(struct sockaddr_sco) ||
- addr->sa_family != AF_BLUETOOTH)
+ if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
- if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
- return -EBADFD;
-
- if (sk->sk_type != SOCK_SEQPACKET)
- return -EINVAL;
+ memset(&sa, 0, sizeof(sa));
+ len = min_t(unsigned int, sizeof(sa), alen);
+ memcpy(&sa, addr, len);
lock_sock(sk);
+ if (sk->sk_type != SOCK_SEQPACKET) {
+ err = -EINVAL;
+ goto done;
+ }
+
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
+ err = -EBADFD;
+ goto done;
+ }
+
/* Set destination address and psm */
- bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
+ bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
+ sco_pi(sk)->pkt_type = sa.sco_pkt_type;
err = sco_connect(sk);
if (err)
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
else
bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
+ sa->sco_pkt_type = sco_pi(sk)->pkt_type;
return 0;
}
conn->sk = NULL;
sco_pi(sk)->conn = NULL;
sco_conn_unlock(conn);
- hci_conn_put(conn->hcon);
+
+ if (conn->hcon)
+ hci_conn_put(conn->hcon);
}
sk->sk_state = BT_CLOSED;
#include <linux/mroute.h>
#endif
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+ return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+ return 1;
+}
+#endif
/* The inetsw table contains everything that inet_create needs to
* build a new socket.
return ipprot->netns_ok;
}
+
/*
* Create an inet socket.
*/
int try_loading_module = 0;
int err;
+ if (!current_has_network())
+ return -EACCES;
+
if (unlikely(!inet_ehash_secret))
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
build_ehash_secret();
lock_sock(sk2);
+ sock_rps_record_flow(sk2);
WARN_ON(!((1 << sk2->sk_state) &
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
case SIOCSIFPFLAGS:
case SIOCGIFPFLAGS:
case SIOCSIFFLAGS:
+ case SIOCKILLADDR:
err = devinet_ioctl(net, cmd, (void __user *)arg);
break;
default: