Merge tag 'v3.10.23' into develop-3.10
author黄涛 <huangtao@rock-chips.com>
Tue, 10 Dec 2013 04:23:21 +0000 (12:23 +0800)
committer黄涛 <huangtao@rock-chips.com>
Tue, 10 Dec 2013 04:23:21 +0000 (12:23 +0800)
This is the 3.10.23 stable release

Conflicts:
drivers/input/evdev.c

17 files changed:
1  2 
Makefile
block/blk-core.c
drivers/hid/hid-multitouch.c
drivers/input/evdev.c
drivers/mmc/card/block.c
drivers/net/tun.c
kernel/cgroup.c
kernel/time/alarmtimer.c
kernel/workqueue.c
mm/mmap.c
mm/mprotect.c
mm/vmstat.c
net/bluetooth/af_bluetooth.c
net/bluetooth/sco.c
net/ipv4/tcp.c
net/unix/af_unix.c
net/wireless/scan.c

diff --combined Makefile
index a1594bbbe9d58956f5f14f0f85142ecd8b5f0641,c6d2ec5e958092d352a32800d4e729d6ac3287c2..b46dca925f4f72506a49c4b2a86f44833d50dd48
+++ b/Makefile
@@@ -1,6 -1,6 +1,6 @@@
  VERSION = 3
  PATCHLEVEL = 10
- SUBLEVEL = 21
+ SUBLEVEL = 23
  EXTRAVERSION =
  NAME = TOSSUG Baby Fish
  
@@@ -192,11 -192,7 +192,11 @@@ SUBARCH := $(shell uname -m | sed -e s/
  # "make" in the configured kernel build directory always uses that.
  # Default value for CROSS_COMPILE is not to prefix executables
  # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
 +ARCH          ?= arm
  ARCH          ?= $(SUBARCH)
 +ifneq ($(wildcard ../prebuilts/gcc/linux-x86/arm/arm-eabi-4.6),)
 +CROSS_COMPILE ?= ../prebuilts/gcc/linux-x86/arm/arm-eabi-4.6/bin/arm-eabi-
 +endif
  CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%)
  
  # Architecture as present in compile.h
@@@ -400,7 -396,7 +400,7 @@@ export KBUILD_CFLAGS CFLAGS_KERNEL CFLA
  export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
  export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
  export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
 -export KBUILD_ARFLAGS
 +export KBUILD_ARFLAGS OBJCOPY_OUTPUT_FORMAT
  
  # When compiling out-of-tree modules, put MODVERDIR in the module
  # tree rather than in the kernel tree. The kernel tree might
@@@ -686,10 -682,6 +686,10 @@@ ifeq ($(CONFIG_STRIP_ASM_SYMS),y
  LDFLAGS_vmlinux       += $(call ld-option, -X,)
  endif
  
 +ifeq ($(CONFIG_PIE),y)
 +LDFLAGS_vmlinux += --just-symbols=pie/pie.syms
 +endif
 +
  # Default kernel image to build when no specific target is given.
  # KBUILD_IMAGE may be overruled on the command line or
  # set in the environment
@@@ -745,15 -737,13 +745,15 @@@ core-y          += kernel/ mm/ fs/ ipc/ securit
  
  vmlinux-dirs  := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
                     $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
 -                   $(net-y) $(net-m) $(libs-y) $(libs-m)))
 +                   $(net-y) $(net-m) $(libs-y) $(libs-m) $(libpie-y)))
  
  vmlinux-alldirs       := $(sort $(vmlinux-dirs) $(patsubst %/,%,$(filter %/, \
                     $(init-n) $(init-) \
                     $(core-n) $(core-) $(drivers-n) $(drivers-) \
                     $(net-n)  $(net-)  $(libs-n)    $(libs-))))
  
 +pie-$(CONFIG_PIE) := pie/
 +
  init-y                := $(patsubst %/, %/built-in.o, $(init-y))
  core-y                := $(patsubst %/, %/built-in.o, $(core-y))
  drivers-y     := $(patsubst %/, %/built-in.o, $(drivers-y))
@@@ -761,21 -751,16 +761,21 @@@ net-y           := $(patsubst %/, %/built-in.o, 
  libs-y1               := $(patsubst %/, %/lib.a, $(libs-y))
  libs-y2               := $(patsubst %/, %/built-in.o, $(libs-y))
  libs-y                := $(libs-y1) $(libs-y2)
 +pie-y         := $(patsubst %/, %/built-in.o, $(pie-y))
 +libpie-y      := $(patsubst %/, %/built-in.o, $(libpie-y))
  
  # Externally visible symbols (used by link-vmlinux.sh)
  export KBUILD_VMLINUX_INIT := $(head-y) $(init-y)
  export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y) $(drivers-y) $(net-y)
 +export KBUILD_VMLINUX_PIE  := $(pie-y)
 +export KBUILD_LIBPIE       := $(libpie-y)
 +export KBUILD_PIE_LDS      := $(PIE_LDS)
  export KBUILD_LDS          := arch/$(SRCARCH)/kernel/vmlinux.lds
  export LDFLAGS_vmlinux
  # used by scripts/pacmage/Makefile
  export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools virt)
  
 -vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN)
 +vmlinux-deps := $(KBUILD_LDS) $(KBUILD_PIE_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_PIE)
  
  # Final link of vmlinux
        cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
diff --combined block/blk-core.c
index f6fcc9709347fd0e6ee9ebe4612da631d1b55c50,2c66daba44ddd28c6c6c55b1bacd1254a4d7555b..3d37cdec5c61d639ef3c6b57d417d86f680dc5d0
@@@ -645,10 -645,12 +645,12 @@@ struct request_queue *blk_alloc_queue_n
        __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
  
        if (blkcg_init_queue(q))
-               goto fail_id;
+               goto fail_bdi;
  
        return q;
  
+ fail_bdi:
+       bdi_destroy(&q->backing_dev_info);
  fail_id:
        ida_simple_remove(&blk_queue_ida, q->id);
  fail_q:
@@@ -739,9 -741,17 +741,17 @@@ blk_init_allocated_queue(struct request
  
        q->sg_reserved_size = INT_MAX;
  
+       /* Protect q->elevator from elevator_change */
+       mutex_lock(&q->sysfs_lock);
        /* init elevator */
-       if (elevator_init(q, NULL))
+       if (elevator_init(q, NULL)) {
+               mutex_unlock(&q->sysfs_lock);
                return NULL;
+       }
+       mutex_unlock(&q->sysfs_lock);
        return q;
  }
  EXPORT_SYMBOL(blk_init_allocated_queue);
@@@ -3181,8 -3191,7 +3191,8 @@@ int __init blk_dev_init(void
  
        /* used for unplugging and affects IO latency/throughput - HIGHPRI */
        kblockd_workqueue = alloc_workqueue("kblockd",
 -                                          WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
 +                                          WQ_MEM_RECLAIM | WQ_HIGHPRI |
 +                                          WQ_POWER_EFFICIENT, 0);
        if (!kblockd_workqueue)
                panic("Failed to create kblockd\n");
  
index bb6fe3ee003093d791195896317db4ab1ed48b9f,3d8e58ac7499e4334c0ac0931afb1bfaf675cdeb..fb9ac1266719ea0ef22669e49ebd762c6dace115
@@@ -244,12 -244,12 +244,12 @@@ static struct mt_class mt_classes[] = 
        { .name = MT_CLS_GENERALTOUCH_TWOFINGERS,
                .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
                        MT_QUIRK_VALID_IS_INRANGE |
-                       MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+                       MT_QUIRK_SLOT_IS_CONTACTID,
                .maxcontacts = 2
        },
        { .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
                .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
-                       MT_QUIRK_SLOT_IS_CONTACTNUMBER
+                       MT_QUIRK_SLOT_IS_CONTACTID
        },
  
        { .name = MT_CLS_FLATFROG,
@@@ -443,16 -443,6 +443,16 @@@ static int mt_touch_input_mapping(struc
            (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON)
                td->mt_flags |= INPUT_MT_POINTER;
  
 +      /* Only map fields from TouchScreen or TouchPad collections.
 +         * We need to ignore fields that belong to other collections
 +         * such as Mouse that might have the same GenericDesktop usages. */
 +      if (field->application == HID_DG_TOUCHSCREEN)
 +              set_bit(INPUT_PROP_DIRECT, hi->input->propbit);
 +      else if (field->application == HID_DG_TOUCHPAD)
 +              set_bit(INPUT_PROP_POINTER, hi->input->propbit);
 +      else
 +              return 0;
 +
        if (usage->usage_index)
                prev_usage = &field->usage[usage->usage_index - 1];
  
@@@ -782,13 -772,12 +782,13 @@@ static void mt_touch_report(struct hid_
                mt_sync_frame(td, report->field[0]->hidinput->input);
  }
  
 -static void mt_touch_input_configured(struct hid_device *hdev,
 +static int mt_touch_input_configured(struct hid_device *hdev,
                                        struct hid_input *hi)
  {
        struct mt_device *td = hid_get_drvdata(hdev);
        struct mt_class *cls = &td->mtclass;
        struct input_dev *input = hi->input;
 +      int ret;
  
        if (!td->maxcontacts)
                td->maxcontacts = MT_DEFAULT_MAXCONTACT;
        if (cls->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
                td->mt_flags |= INPUT_MT_DROP_UNUSED;
  
 -      input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
 +      ret = input_mt_init_slots(input, td->maxcontacts, td->mt_flags);
 +      if (ret)
 +              return ret;
  
        td->mt_flags = 0;
 +      return 0;
  }
  
  static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@@ -941,21 -927,19 +941,21 @@@ static void mt_post_parse(struct mt_dev
                cls->quirks &= ~MT_QUIRK_CONTACT_CNT_ACCURATE;
  }
  
 -static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
 +static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
  {
        struct mt_device *td = hid_get_drvdata(hdev);
        char *name = kstrdup(hdev->name, GFP_KERNEL);
 +      int ret = 0;
  
        if (name)
                hi->input->name = name;
  
        if (hi->report->id == td->mt_report_id)
 -              mt_touch_input_configured(hdev, hi);
 +              ret = mt_touch_input_configured(hdev, hi);
  
        if (hi->report->id == td->pen_report_id)
                mt_pen_input_configured(hdev, hi);
 +      return ret;
  }
  
  static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
@@@ -1207,6 -1191,21 +1207,21 @@@ static const struct hid_device_id mt_de
        { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
                MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
                        USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) },
+       { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101) },
+       { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102) },
+       { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106) },
+       { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A) },
+       { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100) },
  
        /* Gametel game controller */
        { .driver_data = MT_CLS_NSMU,
                MT_USB_DEVICE(USB_VENDOR_ID_QUANTA,
                        USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008) },
  
+       /* SiS panels */
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
+               USB_DEVICE_ID_SIS9200_TOUCH) },
+       { .driver_data = MT_CLS_DEFAULT,
+               HID_USB_DEVICE(USB_VENDOR_ID_SIS2_TOUCH,
+               USB_DEVICE_ID_SIS817_TOUCH) },
        /* Stantum panels */
        { .driver_data = MT_CLS_CONFIDENCE,
                MT_USB_DEVICE(USB_VENDOR_ID_STANTUM,
        { .driver_data = MT_CLS_NSMU,
                MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
                        USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
+       /* Wistron panels */
+       { .driver_data = MT_CLS_NSMU,
+               MT_USB_DEVICE(USB_VENDOR_ID_WISTRON,
+                       USB_DEVICE_ID_WISTRON_OPTICAL_TOUCH) },
        /* XAT */
        { .driver_data = MT_CLS_NSMU,
                MT_USB_DEVICE(USB_VENDOR_ID_XAT,
diff --combined drivers/input/evdev.c
index 23425d7922ca0240f0527306a49e23540f99c9c9,c122dd2adc22bd16985cfea673132a4bff5b5c4c..f4897c8c15005ed943686e12ca3f022f24396933
  #include <linux/poll.h>
  #include <linux/sched.h>
  #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/mm.h>
  #include <linux/module.h>
  #include <linux/init.h>
  #include <linux/input/mt.h>
  #include <linux/major.h>
  #include <linux/device.h>
  #include <linux/cdev.h>
 +#include <linux/wakelock.h>
  #include "input-compat.h"
  
  struct evdev {
@@@ -45,9 -46,6 +47,9 @@@ struct evdev_client 
        unsigned int tail;
        unsigned int packet_head; /* [future] position of the first element of next packet */
        spinlock_t buffer_lock; /* protects access to buffer, head and tail */
 +      struct wake_lock wake_lock;
 +      bool use_wake_lock;
 +      char name[28];
        struct fasync_struct *fasync;
        struct evdev *evdev;
        struct list_head node;
@@@ -75,14 -73,10 +77,14 @@@ static void __pass_event(struct evdev_c
                client->buffer[client->tail].value = 0;
  
                client->packet_head = client->tail;
 +              if (client->use_wake_lock)
 +                      wake_unlock(&client->wake_lock);
        }
  
        if (event->type == EV_SYN && event->code == SYN_REPORT) {
                client->packet_head = client->head;
 +              if (client->use_wake_lock)
 +                      wake_lock(&client->wake_lock);
                kill_fasync(&client->fasync, SIGIO, POLL_IN);
        }
  }
@@@ -297,9 -291,11 +299,13 @@@ static int evdev_release(struct inode *
        mutex_unlock(&evdev->mutex);
  
        evdev_detach_client(evdev, client);
-       kfree(client);
 +      if (client->use_wake_lock)
 +              wake_lock_destroy(&client->wake_lock);
+       if (is_vmalloc_addr(client))
+               vfree(client);
+       else
+               kfree(client);
  
        evdev_close_device(evdev);
  
@@@ -319,19 -315,19 +325,21 @@@ static int evdev_open(struct inode *ino
  {
        struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev);
        unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev);
+       unsigned int size = sizeof(struct evdev_client) +
+                                       bufsize * sizeof(struct input_event);
        struct evdev_client *client;
        int error;
  
-       client = kzalloc(sizeof(struct evdev_client) +
-                               bufsize * sizeof(struct input_event),
-                        GFP_KERNEL);
+       client = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+       if (!client)
+               client = vzalloc(size);
        if (!client)
                return -ENOMEM;
  
        client->bufsize = bufsize;
        spin_lock_init(&client->buffer_lock);
 +      snprintf(client->name, sizeof(client->name), "%s-%d",
 +                      dev_name(&evdev->dev), task_tgid_vnr(current));
        client->evdev = evdev;
        evdev_attach_client(evdev, client);
  
@@@ -398,9 -394,6 +406,9 @@@ static int evdev_fetch_next_event(struc
        if (have_event) {
                *event = client->buffer[client->tail++];
                client->tail &= client->bufsize - 1;
 +              if (client->use_wake_lock &&
 +                  client->packet_head == client->tail)
 +                      wake_unlock(&client->wake_lock);
        }
  
        spin_unlock_irq(&client->buffer_lock);
@@@ -689,35 -682,6 +697,35 @@@ static int evdev_handle_mt_request(stru
        return 0;
  }
  
 +static int evdev_enable_suspend_block(struct evdev *evdev,
 +                                    struct evdev_client *client)
 +{
 +      if (client->use_wake_lock)
 +              return 0;
 +
 +      spin_lock_irq(&client->buffer_lock);
 +      wake_lock_init(&client->wake_lock, WAKE_LOCK_SUSPEND, client->name);
 +      client->use_wake_lock = true;
 +      if (client->packet_head != client->tail)
 +              wake_lock(&client->wake_lock);
 +      spin_unlock_irq(&client->buffer_lock);
 +      return 0;
 +}
 +
 +static int evdev_disable_suspend_block(struct evdev *evdev,
 +                                     struct evdev_client *client)
 +{
 +      if (!client->use_wake_lock)
 +              return 0;
 +
 +      spin_lock_irq(&client->buffer_lock);
 +      client->use_wake_lock = false;
 +      wake_lock_destroy(&client->wake_lock);
 +      spin_unlock_irq(&client->buffer_lock);
 +
 +      return 0;
 +}
 +
  static long evdev_do_ioctl(struct file *file, unsigned int cmd,
                           void __user *p, int compat_mode)
  {
  
        case EVIOCSKEYCODE_V2:
                return evdev_handle_set_keycode_v2(dev, p);
 +
 +      case EVIOCGSUSPENDBLOCK:
 +              return put_user(client->use_wake_lock, ip);
 +
 +      case EVIOCSSUSPENDBLOCK:
 +              if (p)
 +                      return evdev_enable_suspend_block(evdev, client);
 +              else
 +                      return evdev_disable_suspend_block(evdev, client);
        }
  
        size = _IOC_SIZE(cmd);
diff --combined drivers/mmc/card/block.c
index 366d3528b321ec74602fa8100cc332c9697d74f0,76a3d3a752d810e579d9b59580eef68d162df2e4..4f98f8b3839552899434261a8284e3fa7c88826e
@@@ -35,9 -35,6 +35,9 @@@
  #include <linux/capability.h>
  #include <linux/compat.h>
  
 +#define CREATE_TRACE_POINTS
 +#include <trace/events/mmc.h>
 +
  #include <linux/mmc/ioctl.h>
  #include <linux/mmc/card.h>
  #include <linux/mmc/host.h>
@@@ -166,7 -163,11 +166,7 @@@ static struct mmc_blk_data *mmc_blk_get
  
  static inline int mmc_get_devidx(struct gendisk *disk)
  {
 -      int devmaj = MAJOR(disk_devt(disk));
 -      int devidx = MINOR(disk_devt(disk)) / perdev_minors;
 -
 -      if (!devmaj)
 -              devidx = disk->first_minor / perdev_minors;
 +      int devidx = disk->first_minor / perdev_minors;
        return devidx;
  }
  
@@@ -727,22 -728,18 +727,22 @@@ static int mmc_blk_cmd_error(struct req
                        req->rq_disk->disk_name, "timed out", name, status);
  
                /* If the status cmd initially failed, retry the r/w cmd */
 -              if (!status_valid)
 +              if (!status_valid) {
 +                      pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
                        return ERR_RETRY;
 -
 +              }
                /*
                 * If it was a r/w cmd crc error, or illegal command
                 * (eg, issued in wrong state) then retry - we should
                 * have corrected the state problem above.
                 */
 -              if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
 +              if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
 +                      pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
                        return ERR_RETRY;
 +              }
  
                /* Otherwise abort the command */
 +              pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
                return ERR_ABORT;
  
        default:
   * Otherwise we don't understand what happened, so abort.
   */
  static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
-       struct mmc_blk_request *brq, int *ecc_err)
+       struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
  {
        bool prev_cmd_status_valid = true;
        u32 status, stop_status = 0;
            (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
                *ecc_err = 1;
  
+       /* Flag General errors */
+       if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+               if ((status & R1_ERROR) ||
+                       (brq->stop.resp[0] & R1_ERROR)) {
+                       pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
+                              req->rq_disk->disk_name, __func__,
+                              brq->stop.resp[0], status);
+                       *gen_err = 1;
+               }
        /*
         * Check the current card state.  If it is in some data transfer
         * mode, tell it to stop (and hopefully transition back to TRAN.)
                        return ERR_ABORT;
                if (stop_status & R1_CARD_ECC_FAILED)
                        *ecc_err = 1;
+               if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+                       if (stop_status & R1_ERROR) {
+                               pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+                                      req->rq_disk->disk_name, __func__,
+                                      stop_status);
+                               *gen_err = 1;
+                       }
        }
  
        /* Check for set block count errors */
@@@ -1005,12 -1019,9 +1022,12 @@@ retry
                        goto out;
        }
  
 -      if (mmc_can_sanitize(card))
 +      if (mmc_can_sanitize(card)) {
 +              trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                 EXT_CSD_SANITIZE_START, 1, 0);
 +              trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
 +      }
  out_retry:
        if (err && !mmc_blk_reset(md, card->host, type))
                goto retry;
@@@ -1075,7 -1086,7 +1092,7 @@@ static int mmc_blk_err_check(struct mmc
                                                    mmc_active);
        struct mmc_blk_request *brq = &mq_mrq->brq;
        struct request *req = mq_mrq->req;
-       int ecc_err = 0;
+       int ecc_err = 0, gen_err = 0;
  
        /*
         * sbc.error indicates a problem with the set block count
         */
        if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
            brq->data.error) {
-               switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
+               switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
                case ERR_RETRY:
                        return MMC_BLK_RETRY;
                case ERR_ABORT:
                u32 status;
                unsigned long timeout;
  
+               /* Check stop command response */
+               if (brq->stop.resp[0] & R1_ERROR) {
+                       pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+                              req->rq_disk->disk_name, __func__,
+                              brq->stop.resp[0]);
+                       gen_err = 1;
+               }
                timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
                do {
                        int err = get_card_status(card, &status, 5);
                                return MMC_BLK_CMD_ERR;
                        }
  
+                       if (status & R1_ERROR) {
+                               pr_err("%s: %s: general error sending status command, card status %#x\n",
+                                      req->rq_disk->disk_name, __func__,
+                                      status);
+                               gen_err = 1;
+                       }
                        /* Timeout if the device never becomes ready for data
                         * and never leaves the program state.
                         */
                         (R1_CURRENT_STATE(status) == R1_STATE_PRG));
        }
  
+       /* if general error occurs, retry the write operation. */
+       if (gen_err) {
+               pr_warn("%s: retrying write for general error\n",
+                               req->rq_disk->disk_name);
+               return MMC_BLK_RETRY;
+       }
        if (brq->data.error) {
                pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
                       req->rq_disk->disk_name, brq->data.error,
@@@ -1899,11 -1932,6 +1938,11 @@@ static int mmc_blk_issue_rq(struct mmc_
        struct mmc_host *host = card->host;
        unsigned long flags;
  
 +#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 +      if (mmc_bus_needs_resume(card->host))
 +              mmc_resume_bus(card->host);
 +#endif
 +
        if (req && !mq->mqrq_prev->req)
                /* claim host only for the first request */
                mmc_claim_host(card->host);
@@@ -2026,7 -2054,6 +2065,7 @@@ static struct mmc_blk_data *mmc_blk_all
        md->disk->queue = md->queue.queue;
        md->disk->driverfs_dev = parent;
        set_disk_ro(md->disk, md->read_only || default_ro);
 +      md->disk->flags = GENHD_FL_EXT_DEVT;
        if (area_type & MMC_BLK_DATA_AREA_RPMB)
                md->disk->flags |= GENHD_FL_NO_PART_SCAN;
  
@@@ -2341,9 -2368,6 +2380,9 @@@ static int mmc_blk_probe(struct mmc_car
        mmc_set_drvdata(card, md);
        mmc_fixup_device(card, blk_fixups);
  
 +#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 +      mmc_set_bus_resume_policy(card->host, 1);
 +#endif
        if (mmc_add_disk(md))
                goto out;
  
@@@ -2369,9 -2393,6 +2408,9 @@@ static void mmc_blk_remove(struct mmc_c
        mmc_release_host(card->host);
        mmc_blk_remove_req(md);
        mmc_set_drvdata(card, NULL);
 +#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
 +      mmc_set_bus_resume_policy(card->host, 0);
 +#endif
  }
  
  #ifdef CONFIG_PM
diff --combined drivers/net/tun.c
index a4f35b0b7c81396fff9b116105658e173984278a,9ef85fea1d1ef6d035ca75050b5b85daa8247742..0129e824db5b319505f81d116b1750bb451156a9
@@@ -1069,6 -1069,7 +1069,7 @@@ static ssize_t tun_get_user(struct tun_
        struct sk_buff *skb;
        size_t len = total_len, align = NET_SKB_PAD, linear;
        struct virtio_net_hdr gso = { 0 };
+       int good_linear;
        int offset = 0;
        int copylen;
        bool zerocopy = false;
                        return -EINVAL;
        }
  
+       good_linear = SKB_MAX_HEAD(align);
        if (msg_control) {
                /* There are 256 bytes to be copied in skb, so there is
                 * enough room for skb expand head in case it is used.
                 * The rest of the buffer is mapped from userspace.
                 */
                copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
+               if (copylen > good_linear)
+                       copylen = good_linear;
                linear = copylen;
                if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
                        zerocopy = true;
  
        if (!zerocopy) {
                copylen = len;
-               linear = gso.hdr_len;
+               if (gso.hdr_len > good_linear)
+                       linear = good_linear;
+               else
+                       linear = gso.hdr_len;
        }
  
        skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
@@@ -1888,12 -1896,6 +1896,12 @@@ static long __tun_chr_ioctl(struct fil
        int vnet_hdr_sz;
        int ret;
  
 +#ifdef CONFIG_ANDROID_PARANOID_NETWORK
 +      if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
 +              return -EPERM;
 +      }
 +#endif
 +
        if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
                if (copy_from_user(&ifr, argp, ifreq_len))
                        return -EFAULT;
diff --combined kernel/cgroup.c
index 90b9aaa4b289512426d510e35b4e2b508791a802,d0def7fc2848dd899281b2bb2527f05fa79ec7a9..cd1c303214f32c20672538283d1e053323abb65c
@@@ -91,6 -91,14 +91,14 @@@ static DEFINE_MUTEX(cgroup_mutex)
  
  static DEFINE_MUTEX(cgroup_root_mutex);
  
+ /*
+  * cgroup destruction makes heavy use of work items and there can be a lot
+  * of concurrent destructions.  Use a separate workqueue so that cgroup
+  * destruction work items don't end up filling up max_active of system_wq
+  * which may lead to deadlock.
+  */
+ static struct workqueue_struct *cgroup_destroy_wq;
  /*
   * Generate an array of cgroup subsystem pointers. At boot time, this is
   * populated with the built in subsystems, and modular subsystems are
@@@ -873,7 -881,7 +881,7 @@@ static void cgroup_free_rcu(struct rcu_
  {
        struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
  
-       schedule_work(&cgrp->free_work);
+       queue_work(cgroup_destroy_wq, &cgrp->free_work);
  }
  
  static void cgroup_diput(struct dentry *dentry, struct inode *inode)
@@@ -2098,24 -2106,6 +2106,24 @@@ out_free_group_list
        return retval;
  }
  
 +static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 +{
 +      struct cgroup_subsys *ss;
 +      int ret;
 +
 +      for_each_subsys(cgrp->root, ss) {
 +              if (ss->allow_attach) {
 +                      ret = ss->allow_attach(cgrp, tset);
 +                      if (ret)
 +                              return ret;
 +              } else {
 +                      return -EACCES;
 +              }
 +      }
 +
 +      return 0;
 +}
 +
  /*
   * Find the task_struct of the task to attach by vpid and pass it along to the
   * function to attach either it or all tasks in its threadgroup. Will lock
@@@ -2147,18 -2137,9 +2155,18 @@@ retry_find_task
                if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
                    !uid_eq(cred->euid, tcred->uid) &&
                    !uid_eq(cred->euid, tcred->suid)) {
 -                      rcu_read_unlock();
 -                      ret = -EACCES;
 -                      goto out_unlock_cgroup;
 +                      /*
 +                       * if the default permission check fails, give each
 +                       * cgroup a chance to extend the permission check
 +                       */
 +                      struct cgroup_taskset tset = { };
 +                      tset.single.task = tsk;
 +                      tset.single.cgrp = cgrp;
 +                      ret = cgroup_allow_attach(cgrp, &tset);
 +                      if (ret) {
 +                              rcu_read_unlock();
 +                              goto out_unlock_cgroup;
 +                      }
                }
        } else
                tsk = current;
        return err;
  }
  
+ static int __init cgroup_wq_init(void)
+ {
+       /*
+        * There isn't much point in executing destruction path in
+        * parallel.  Good chunk is serialized with cgroup_mutex anyway.
+        * Use 1 for @max_active.
+        *
+        * We would prefer to do this in cgroup_init() above, but that
+        * is called before init_workqueues(): so leave this until after.
+        */
+       cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+       BUG_ON(!cgroup_destroy_wq);
+       return 0;
+ }
+ core_initcall(cgroup_wq_init);
  /*
   * proc_cgroup_show()
   *  - Print task's cgroup paths into seq_file, one line for each hierarchy
@@@ -5023,7 -5020,7 +5047,7 @@@ void __css_put(struct cgroup_subsys_sta
  
        v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
        if (v == 0)
-               schedule_work(&css->dput_work);
+               queue_work(cgroup_destroy_wq, &css->dput_work);
  }
  EXPORT_SYMBOL_GPL(__css_put);
  
diff --combined kernel/time/alarmtimer.c
index 3e5cba274475df2ab70edb04d9695dfe28a945aa,a8f5084dcde7d38a7a7d392655debbb9bd0a264f..d41fcb46a40311f09e68a73f97491e4ba2b462b2
@@@ -199,12 -199,6 +199,12 @@@ static enum hrtimer_restart alarmtimer_
  
  }
  
 +ktime_t alarm_expires_remaining(const struct alarm *alarm)
 +{
 +      struct alarm_base *base = &alarm_bases[alarm->type];
 +      return ktime_sub(alarm->node.expires, base->gettime());
 +}
 +
  #ifdef CONFIG_RTC_CLASS
  /**
   * alarmtimer_suspend - Suspend time callback
@@@ -311,7 -305,7 +311,7 @@@ void alarm_init(struct alarm *alarm, en
  }
  
  /**
 - * alarm_start - Sets an alarm to fire
 + * alarm_start - Sets an absolute alarm to fire
   * @alarm: ptr to alarm to set
   * @start: time to run the alarm
   */
@@@ -330,31 -324,6 +330,31 @@@ int alarm_start(struct alarm *alarm, kt
        return ret;
  }
  
 +/**
 + * alarm_start_relative - Sets a relative alarm to fire
 + * @alarm: ptr to alarm to set
 + * @start: time relative to now to run the alarm
 + */
 +int alarm_start_relative(struct alarm *alarm, ktime_t start)
 +{
 +      struct alarm_base *base = &alarm_bases[alarm->type];
 +
 +      start = ktime_add(start, base->gettime());
 +      return alarm_start(alarm, start);
 +}
 +
 +void alarm_restart(struct alarm *alarm)
 +{
 +      struct alarm_base *base = &alarm_bases[alarm->type];
 +      unsigned long flags;
 +
 +      spin_lock_irqsave(&base->lock, flags);
 +      hrtimer_set_expires(&alarm->timer, alarm->node.expires);
 +      hrtimer_restart(&alarm->timer);
 +      alarmtimer_enqueue(base, alarm);
 +      spin_unlock_irqrestore(&base->lock, flags);
 +}
 +
  /**
   * alarm_try_to_cancel - Tries to cancel an alarm timer
   * @alarm: ptr to alarm to be canceled
@@@ -425,12 -394,6 +425,12 @@@ u64 alarm_forward(struct alarm *alarm, 
        return overrun;
  }
  
 +u64 alarm_forward_now(struct alarm *alarm, ktime_t interval)
 +{
 +      struct alarm_base *base = &alarm_bases[alarm->type];
 +
 +      return alarm_forward(alarm, base->gettime(), interval);
 +}
  
  
  
@@@ -482,7 -445,7 +482,7 @@@ static int alarm_clock_getres(const clo
        clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
  
        if (!alarmtimer_get_rtcdev())
-               return -ENOTSUPP;
+               return -EINVAL;
  
        return hrtimer_get_res(baseid, tp);
  }
@@@ -499,7 -462,7 +499,7 @@@ static int alarm_clock_get(clockid_t wh
        struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
  
        if (!alarmtimer_get_rtcdev())
-               return -ENOTSUPP;
+               return -EINVAL;
  
        *tp = ktime_to_timespec(base->gettime());
        return 0;
diff --combined kernel/workqueue.c
index 36108647386ce0c2d5536236ea18c052295d56f9,68086a34b8efb76c5de8bdd6260b7d223f1c6e0a..917fbdea97f737153d1496ba2389ae4e6a206616
@@@ -272,15 -272,6 +272,15 @@@ static cpumask_var_t *wq_numa_possible_
  static bool wq_disable_numa;
  module_param_named(disable_numa, wq_disable_numa, bool, 0444);
  
 +/* see the comment above the definition of WQ_POWER_EFFICIENT */
 +#ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT
 +static bool wq_power_efficient = true;
 +#else
 +static bool wq_power_efficient;
 +#endif
 +
 +module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 +
  static bool wq_numa_enabled;          /* unbound NUMA affinity enabled */
  
  /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
@@@ -304,6 -295,9 +304,9 @@@ static DEFINE_HASHTABLE(unbound_pool_ha
  /* I: attributes used when instantiating standard unbound pools on demand */
  static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
  
+ /* I: attributes used when instantiating ordered pools on demand */
+ static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
  struct workqueue_struct *system_wq __read_mostly;
  EXPORT_SYMBOL(system_wq);
  struct workqueue_struct *system_highpri_wq __read_mostly;
@@@ -314,10 -308,6 +317,10 @@@ struct workqueue_struct *system_unbound
  EXPORT_SYMBOL_GPL(system_unbound_wq);
  struct workqueue_struct *system_freezable_wq __read_mostly;
  EXPORT_SYMBOL_GPL(system_freezable_wq);
 +struct workqueue_struct *system_power_efficient_wq __read_mostly;
 +EXPORT_SYMBOL_GPL(system_power_efficient_wq);
 +struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
 +EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
  
  static int worker_thread(void *__worker);
  static void copy_workqueue_attrs(struct workqueue_attrs *to,
@@@ -4072,7 -4062,7 +4075,7 @@@ out_unlock
  static int alloc_and_link_pwqs(struct workqueue_struct *wq)
  {
        bool highpri = wq->flags & WQ_HIGHPRI;
-       int cpu;
+       int cpu, ret;
  
        if (!(wq->flags & WQ_UNBOUND)) {
                wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
                        mutex_unlock(&wq->mutex);
                }
                return 0;
+       } else if (wq->flags & __WQ_ORDERED) {
+               ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
+               /* there should only be single pwq for ordering guarantee */
+               WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
+                             wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
+                    "ordering guarantee broken for workqueue %s\n", wq->name);
+               return ret;
        } else {
                return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
        }
@@@ -4120,10 -4117,6 +4130,10 @@@ struct workqueue_struct *__alloc_workqu
        struct workqueue_struct *wq;
        struct pool_workqueue *pwq;
  
 +      /* see the comment above the definition of WQ_POWER_EFFICIENT */
 +      if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
 +              flags |= WQ_UNBOUND;
 +
        /* allocate wq and format name */
        if (flags & WQ_UNBOUND)
                tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]);
@@@ -5007,13 -5000,23 +5017,23 @@@ static int __init init_workqueues(void
                }
        }
  
-       /* create default unbound wq attrs */
+       /* create default unbound and ordered wq attrs */
        for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
                struct workqueue_attrs *attrs;
  
                BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
                attrs->nice = std_nice[i];
                unbound_std_wq_attrs[i] = attrs;
+               /*
+                * An ordered wq should have only one pwq as ordering is
+                * guaranteed by max_active which is enforced by pwqs.
+                * Turn off NUMA so that dfl_pwq is used for all nodes.
+                */
+               BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
+               attrs->nice = std_nice[i];
+               attrs->no_numa = true;
+               ordered_wq_attrs[i] = attrs;
        }
  
        system_wq = alloc_workqueue("events", 0, 0);
                                            WQ_UNBOUND_MAX_ACTIVE);
        system_freezable_wq = alloc_workqueue("events_freezable",
                                              WQ_FREEZABLE, 0);
 +      system_power_efficient_wq = alloc_workqueue("events_power_efficient",
 +                                            WQ_POWER_EFFICIENT, 0);
 +      system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
 +                                            WQ_FREEZABLE | WQ_POWER_EFFICIENT,
 +                                            0);
        BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
 -             !system_unbound_wq || !system_freezable_wq);
 +             !system_unbound_wq || !system_freezable_wq ||
 +             !system_power_efficient_wq ||
 +             !system_freezable_power_efficient_wq);
        return 0;
  }
  early_initcall(init_workqueues);
diff --combined mm/mmap.c
index e90411b9ebe58342d2c7e50ca08473a6b7f6ed6e,8f87b14c796839a99851af83de6ee15f48f0b16d..9aa554b7e620e1c0596a45035fbf0ebc3372dd0e
+++ b/mm/mmap.c
@@@ -893,8 -893,7 +893,8 @@@ again:                     remove_next = 1 + (end > next-
   * per-vma resources, so we don't attempt to merge those.
   */
  static inline int is_mergeable_vma(struct vm_area_struct *vma,
 -                      struct file *file, unsigned long vm_flags)
 +                      struct file *file, unsigned long vm_flags,
 +                      const char __user *anon_name)
  {
        if (vma->vm_flags ^ vm_flags)
                return 0;
                return 0;
        if (vma->vm_ops && vma->vm_ops->close)
                return 0;
 +      if (vma_get_anon_name(vma) != anon_name)
 +              return 0;
        return 1;
  }
  
@@@ -934,10 -931,9 +934,10 @@@ static inline int is_mergeable_anon_vma
   */
  static int
  can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
 -      struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 +      struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
 +      const char __user *anon_name)
  {
 -      if (is_mergeable_vma(vma, file, vm_flags) &&
 +      if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                if (vma->vm_pgoff == vm_pgoff)
                        return 1;
   */
  static int
  can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 -      struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
 +      struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
 +      const char __user *anon_name)
  {
 -      if (is_mergeable_vma(vma, file, vm_flags) &&
 +      if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
            is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
                pgoff_t vm_pglen;
                vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  }
  
  /*
 - * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
 - * whether that can be merged with its predecessor or its successor.
 - * Or both (it neatly fills a hole).
 + * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
 + * figure out whether that can be merged with its predecessor or its
 + * successor.  Or both (it neatly fills a hole).
   *
   * In most cases - when called for mmap, brk or mremap - [addr,end) is
   * certain not to be mapped by the time vma_merge is called; but when
@@@ -1000,8 -995,7 +1000,8 @@@ struct vm_area_struct *vma_merge(struc
                        struct vm_area_struct *prev, unsigned long addr,
                        unsigned long end, unsigned long vm_flags,
                        struct anon_vma *anon_vma, struct file *file,
 -                      pgoff_t pgoff, struct mempolicy *policy)
 +                      pgoff_t pgoff, struct mempolicy *policy,
 +                      const char __user *anon_name)
  {
        pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
        struct vm_area_struct *area, *next;
         */
        if (prev && prev->vm_end == addr &&
                        mpol_equal(vma_policy(prev), policy) &&
 -                      can_vma_merge_after(prev, vm_flags,
 -                                              anon_vma, file, pgoff)) {
 +                      can_vma_merge_after(prev, vm_flags, anon_vma,
 +                                              file, pgoff, anon_name)) {
                /*
                 * OK, it can.  Can we now merge in the successor as well?
                 */
                if (next && end == next->vm_start &&
                                mpol_equal(policy, vma_policy(next)) &&
 -                              can_vma_merge_before(next, vm_flags,
 -                                      anon_vma, file, pgoff+pglen) &&
 +                              can_vma_merge_before(next, vm_flags, anon_vma,
 +                                              file, pgoff+pglen, anon_name) &&
                                is_mergeable_anon_vma(prev->anon_vma,
                                                      next->anon_vma, NULL)) {
                                                        /* cases 1, 6 */
         */
        if (next && end == next->vm_start &&
                        mpol_equal(policy, vma_policy(next)) &&
 -                      can_vma_merge_before(next, vm_flags,
 -                                      anon_vma, file, pgoff+pglen)) {
 +                      can_vma_merge_before(next, vm_flags, anon_vma,
 +                                      file, pgoff+pglen, anon_name)) {
                if (prev && addr < prev->vm_end)        /* case 4 */
                        err = vma_adjust(prev, prev->vm_start,
                                addr, prev->vm_pgoff, NULL);
@@@ -1525,8 -1519,7 +1525,8 @@@ munmap_back
        /*
         * Can we just expand an old mapping?
         */
 -      vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
 +      vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff,
 +                      NULL, NULL);
        if (vma)
                goto out;
  
@@@ -1860,7 -1853,7 +1860,7 @@@ arch_get_unmapped_area(struct file *fil
        struct vm_area_struct *vma;
        struct vm_unmapped_area_info info;
  
-       if (len > TASK_SIZE)
+       if (len > TASK_SIZE - mmap_min_addr)
                return -ENOMEM;
  
        if (flags & MAP_FIXED)
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
+               if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
                    (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
@@@ -1908,7 -1901,7 +1908,7 @@@ arch_get_unmapped_area_topdown(struct f
        struct vm_unmapped_area_info info;
  
        /* requested length too big for entire address space */
-       if (len > TASK_SIZE)
+       if (len > TASK_SIZE - mmap_min_addr)
                return -ENOMEM;
  
        if (flags & MAP_FIXED)
        if (addr) {
                addr = PAGE_ALIGN(addr);
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
+               if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
                                (!vma || addr + len <= vma->vm_start))
                        return addr;
        }
  
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
-       info.low_limit = PAGE_SIZE;
+       info.low_limit = max(PAGE_SIZE, mmap_min_addr);
        info.high_limit = mm->mmap_base;
        info.align_mask = 0;
        addr = vm_unmapped_area(&info);
@@@ -2670,7 -2663,7 +2670,7 @@@ static unsigned long do_brk(unsigned lo
  
        /* Can we just expand an old private anonymous mapping? */
        vma = vma_merge(mm, prev, addr, addr + len, flags,
 -                                      NULL, NULL, pgoff, NULL);
 +                                      NULL, NULL, pgoff, NULL, NULL);
        if (vma)
                goto out;
  
@@@ -2828,8 -2821,7 +2828,8 @@@ struct vm_area_struct *copy_vma(struct 
        if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
                return NULL;    /* should never get here */
        new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
 -                      vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
 +                      vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
 +                      vma_get_anon_name(vma));
        if (new_vma) {
                /*
                 * Source vma may have been merged into new_vma
diff --combined mm/mprotect.c
index 5d80adfabec8770df951aa749641ff8718d19bbc,d4d5399c7aba23434664fa5b107409658781f13b..ea4a0f32f8e18261cd8cf5cb53543bdaad1da4ff
@@@ -135,6 -135,7 +135,7 @@@ static inline unsigned long change_pmd_
        pmd_t *pmd;
        unsigned long next;
        unsigned long pages = 0;
+       unsigned long nr_huge_updates = 0;
        bool all_same_node;
  
        pmd = pmd_offset(pud, addr);
                                split_huge_page_pmd(vma, addr, pmd);
                        else if (change_huge_pmd(vma, pmd, addr, newprot,
                                                 prot_numa)) {
-                               pages++;
+                               pages += HPAGE_PMD_NR;
+                               nr_huge_updates++;
                                continue;
                        }
                        /* fall through */
                        change_pmd_protnuma(vma->vm_mm, addr, pmd);
        } while (pmd++, addr = next, addr != end);
  
+       if (nr_huge_updates)
+               count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
        return pages;
  }
  
@@@ -271,8 -276,7 +276,8 @@@ mprotect_fixup(struct vm_area_struct *v
         */
        pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
        *pprev = vma_merge(mm, *pprev, start, end, newflags,
 -                      vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
 +                      vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
 +                      vma_get_anon_name(vma));
        if (*pprev) {
                vma = *pprev;
                goto success;
diff --combined mm/vmstat.c
index b916a43a6b37e17e065655bdfb4a854e850bfd58,10bbb5427a6d8dc8e99fab6f43f1184c6cee7703..6d9bace4e5893c39e1998154b94bc1acc4b7c085
@@@ -14,7 -14,6 +14,7 @@@
  #include <linux/module.h>
  #include <linux/slab.h>
  #include <linux/cpu.h>
 +#include <linux/cpumask.h>
  #include <linux/vmstat.h>
  #include <linux/sched.h>
  #include <linux/math64.h>
@@@ -433,12 -432,11 +433,12 @@@ EXPORT_SYMBOL(dec_zone_page_state)
   * with the global counters. These could cause remote node cache line
   * bouncing and will have to be only done when necessary.
   */
 -void refresh_cpu_vm_stats(int cpu)
 +bool refresh_cpu_vm_stats(int cpu)
  {
        struct zone *zone;
        int i;
        int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
 +      bool vm_activity = false;
  
        for_each_populated_zone(zone) {
                struct per_cpu_pageset *p;
                if (p->expire)
                        continue;
  
 -              if (p->pcp.count)
 +              if (p->pcp.count) {
 +                      vm_activity = true;
                        drain_zone_pages(zone, &p->pcp);
 +              }
  #endif
        }
  
        for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 -              if (global_diff[i])
 +              if (global_diff[i]) {
                        atomic_long_add(global_diff[i], &vm_stat[i]);
 +                      vm_activity = true;
 +              }
 +
 +      return vm_activity;
 +
  }
  
  /*
@@@ -788,6 -779,7 +788,7 @@@ const char * const vmstat_text[] = 
  
  #ifdef CONFIG_NUMA_BALANCING
        "numa_pte_updates",
+       "numa_huge_pte_updates",
        "numa_hint_faults",
        "numa_hint_faults_local",
        "numa_pages_migrated",
@@@ -1183,72 -1175,22 +1184,72 @@@ static const struct file_operations pro
  #ifdef CONFIG_SMP
  static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
  int sysctl_stat_interval __read_mostly = HZ;
 +static struct cpumask vmstat_off_cpus;
 +struct delayed_work vmstat_monitor_work;
  
 -static void vmstat_update(struct work_struct *w)
 +static inline bool need_vmstat(int cpu)
  {
 -      refresh_cpu_vm_stats(smp_processor_id());
 -      schedule_delayed_work(&__get_cpu_var(vmstat_work),
 -              round_jiffies_relative(sysctl_stat_interval));
 +      struct zone *zone;
 +      int i;
 +
 +      for_each_populated_zone(zone) {
 +              struct per_cpu_pageset *p;
 +
 +              p = per_cpu_ptr(zone->pageset, cpu);
 +
 +              for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 +                      if (p->vm_stat_diff[i])
 +                              return true;
 +
 +              if (zone_to_nid(zone) != numa_node_id() && p->pcp.count)
 +                      return true;
 +      }
 +
 +      return false;
  }
  
 -static void __cpuinit start_cpu_timer(int cpu)
 +static void vmstat_update(struct work_struct *w);
 +
 +static void start_cpu_timer(int cpu)
  {
        struct delayed_work *work = &per_cpu(vmstat_work, cpu);
  
 -      INIT_DEFERRABLE_WORK(work, vmstat_update);
 +      cpumask_clear_cpu(cpu, &vmstat_off_cpus);
        schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
  }
  
 +static void __cpuinit setup_cpu_timer(int cpu)
 +{
 +      struct delayed_work *work = &per_cpu(vmstat_work, cpu);
 +
 +      INIT_DEFERRABLE_WORK(work, vmstat_update);
 +      start_cpu_timer(cpu);
 +}
 +
 +static void vmstat_update_monitor(struct work_struct *w)
 +{
 +      int cpu;
 +
 +      for_each_cpu_and(cpu, &vmstat_off_cpus, cpu_online_mask)
 +              if (need_vmstat(cpu))
 +                      start_cpu_timer(cpu);
 +
 +      queue_delayed_work(system_unbound_wq, &vmstat_monitor_work,
 +              round_jiffies_relative(sysctl_stat_interval));
 +}
 +
 +
 +static void vmstat_update(struct work_struct *w)
 +{
 +      int cpu = smp_processor_id();
 +
 +      if (likely(refresh_cpu_vm_stats(cpu)))
 +              schedule_delayed_work(&__get_cpu_var(vmstat_work),
 +                              round_jiffies_relative(sysctl_stat_interval));
 +      else
 +              cpumask_set_cpu(cpu, &vmstat_off_cpus);
 +}
 +
  /*
   * Use the cpu notifier to insure that the thresholds are recalculated
   * when necessary.
@@@ -1263,19 -1205,17 +1264,19 @@@ static int __cpuinit vmstat_cpuup_callb
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
                refresh_zone_stat_thresholds();
 -              start_cpu_timer(cpu);
 +              setup_cpu_timer(cpu);
                node_set_state(cpu_to_node(cpu), N_CPU);
                break;
        case CPU_DOWN_PREPARE:
        case CPU_DOWN_PREPARE_FROZEN:
 -              cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
 -              per_cpu(vmstat_work, cpu).work.func = NULL;
 +              if (!cpumask_test_cpu(cpu, &vmstat_off_cpus)) {
 +                      cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
 +                      per_cpu(vmstat_work, cpu).work.func = NULL;
 +              }
                break;
        case CPU_DOWN_FAILED:
        case CPU_DOWN_FAILED_FROZEN:
 -              start_cpu_timer(cpu);
 +              setup_cpu_timer(cpu);
                break;
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
@@@ -1298,14 -1238,8 +1299,14 @@@ static int __init setup_vmstat(void
  
        register_cpu_notifier(&vmstat_notifier);
  
 +      INIT_DEFERRABLE_WORK(&vmstat_monitor_work,
 +                              vmstat_update_monitor);
 +      queue_delayed_work(system_unbound_wq,
 +                              &vmstat_monitor_work,
 +                              round_jiffies_relative(HZ));
 +
        for_each_online_cpu(cpu)
 -              start_cpu_timer(cpu);
 +              setup_cpu_timer(cpu);
  #endif
  #ifdef CONFIG_PROC_FS
        proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
index a44c12c2de4a22346fcac21b7362773e029d175a,6629cdc134dc1a439a044027d6826bd6dd4b56f3..f7c36826f3f4d70129375c2935f624110e26d2d0
  #include <net/bluetooth/bluetooth.h>
  #include <linux/proc_fs.h>
  
 +#ifndef CONFIG_BT_SOCK_DEBUG
 +#undef  BT_DBG
 +#define BT_DBG(D...)
 +#endif
 +
  #define VERSION "2.16"
  
  /* Bluetooth sockets */
@@@ -108,40 -103,11 +108,40 @@@ void bt_sock_unregister(int proto
  }
  EXPORT_SYMBOL(bt_sock_unregister);
  
 +#ifdef CONFIG_PARANOID_NETWORK
 +static inline int current_has_bt_admin(void)
 +{
 +      return !current_euid();
 +}
 +
 +static inline int current_has_bt(void)
 +{
 +      return current_has_bt_admin();
 +}
 +# else
 +static inline int current_has_bt_admin(void)
 +{
 +      return 1;
 +}
 +
 +static inline int current_has_bt(void)
 +{
 +      return 1;
 +}
 +#endif
 +
  static int bt_sock_create(struct net *net, struct socket *sock, int proto,
                          int kern)
  {
        int err;
  
 +      if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
 +                      proto == BTPROTO_L2CAP) {
 +              if (!current_has_bt())
 +                      return -EPERM;
 +      } else if (!current_has_bt_admin())
 +              return -EPERM;
 +
        if (net != &init_net)
                return -EAFNOSUPPORT;
  
@@@ -255,8 -221,6 +255,6 @@@ int bt_sock_recvmsg(struct kiocb *iocb
        if (flags & (MSG_OOB))
                return -EOPNOTSUPP;
  
-       msg->msg_namelen = 0;
        skb = skb_recv_datagram(sk, flags, noblock, &err);
        if (!skb) {
                if (sk->sk_shutdown & RCV_SHUTDOWN)
@@@ -321,8 -285,6 +319,6 @@@ int bt_sock_stream_recvmsg(struct kioc
        if (flags & MSG_OOB)
                return -EOPNOTSUPP;
  
-       msg->msg_namelen = 0;
        BT_DBG("sk %p size %zu", sk, size);
  
        lock_sock(sk);
diff --combined net/bluetooth/sco.c
index 91c184f9528e2f7f421c8c29b0aa8453add7ddc6,2bb1d3a5e76b3dcc17722813e3fa1f587a8ffae1..3178c7b4a17148f30fa811d80053afb8309bdb3a
@@@ -158,7 -158,6 +158,7 @@@ static int sco_connect(struct sock *sk
  {
        bdaddr_t *src = &bt_sk(sk)->src;
        bdaddr_t *dst = &bt_sk(sk)->dst;
 +      __u16 pkt_type = sco_pi(sk)->pkt_type;
        struct sco_conn *conn;
        struct hci_conn *hcon;
        struct hci_dev  *hdev;
  
        if (lmp_esco_capable(hdev) && !disable_esco)
                type = ESCO_LINK;
 -      else
 +      else {
                type = SCO_LINK;
 +              pkt_type &= SCO_ESCO_MASK;
 +      }
  
 -      hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
 -                         HCI_AT_NO_BONDING);
 +      hcon = hci_connect(hdev, type, pkt_type, dst, BDADDR_BREDR,
 +                         BT_SECURITY_LOW, HCI_AT_NO_BONDING);
        if (IS_ERR(hcon)) {
                err = PTR_ERR(hcon);
                goto done;
@@@ -448,21 -445,17 +448,21 @@@ static int sco_sock_create(struct net *
        return 0;
  }
  
 -static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
 +static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
  {
 -      struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
 +      struct sockaddr_sco sa;
        struct sock *sk = sock->sk;
 -      int err = 0;
 +      int len, err = 0;
  
 -      BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
 +      BT_DBG("sk %p %pMR", sk, &sa.sco_bdaddr);
  
        if (!addr || addr->sa_family != AF_BLUETOOTH)
                return -EINVAL;
  
 +      memset(&sa, 0, sizeof(sa));
 +      len = min_t(unsigned int, sizeof(sa), alen);
 +      memcpy(&sa, addr, len);
 +
        lock_sock(sk);
  
        if (sk->sk_state != BT_OPEN) {
                goto done;
        }
  
 -      bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
 +      bacpy(&bt_sk(sk)->src, &sa.sco_bdaddr);
 +      sco_pi(sk)->pkt_type = sa.sco_pkt_type;
  
        sk->sk_state = BT_BOUND;
  
@@@ -487,34 -479,26 +487,34 @@@ done
  
  static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
  {
 -      struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
        struct sock *sk = sock->sk;
 -      int err;
 +      struct sockaddr_sco sa;
 +      int len, err;
  
        BT_DBG("sk %p", sk);
  
 -      if (alen < sizeof(struct sockaddr_sco) ||
 -          addr->sa_family != AF_BLUETOOTH)
 +      if (!addr || addr->sa_family != AF_BLUETOOTH)
                return -EINVAL;
  
 -      if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
 -              return -EBADFD;
 -
 -      if (sk->sk_type != SOCK_SEQPACKET)
 -              return -EINVAL;
 +      memset(&sa, 0, sizeof(sa));
 +      len = min_t(unsigned int, sizeof(sa), alen);
 +      memcpy(&sa, addr, len);
  
        lock_sock(sk);
  
 +      if (sk->sk_type != SOCK_SEQPACKET) {
 +              err = -EINVAL;
 +              goto done;
 +      }
 +
 +      if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
 +              err = -EBADFD;
 +              goto done;
 +      }
 +
        /* Set destination address and psm */
 -      bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
 +      bacpy(&bt_sk(sk)->dst, &sa.sco_bdaddr);
 +      sco_pi(sk)->pkt_type = sa.sco_pkt_type;
  
        err = sco_connect(sk);
        if (err)
@@@ -638,7 -622,6 +638,7 @@@ static int sco_sock_getname(struct sock
                bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
        else
                bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
 +      sa->sco_pkt_type = sco_pi(sk)->pkt_type;
  
        return 0;
  }
@@@ -717,7 -700,6 +717,6 @@@ static int sco_sock_recvmsg(struct kioc
            test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
                sco_conn_defer_accept(pi->conn->hcon, 0);
                sk->sk_state = BT_CONFIG;
-               msg->msg_namelen = 0;
  
                release_sock(sk);
                return 0;
diff --combined net/ipv4/tcp.c
index ce657df38cba4f805e035cfdb036a645b17e649e,1a2e249cef4913be893cc66903f6a1cb01c21821..78411dad59efd58d0cdc96612b585c387150f179
  #include <linux/crypto.h>
  #include <linux/time.h>
  #include <linux/slab.h>
 +#include <linux/uid_stat.h>
  
  #include <net/icmp.h>
  #include <net/inet_common.h>
  #include <net/tcp.h>
  #include <net/xfrm.h>
  #include <net/ip.h>
 +#include <net/ip6_route.h>
 +#include <net/ipv6.h>
 +#include <net/transp_v6.h>
  #include <net/netdma.h>
  #include <net/sock.h>
  
@@@ -811,12 -807,6 +811,6 @@@ static unsigned int tcp_xmit_size_goal(
                xmit_size_goal = min_t(u32, gso_size,
                                       sk->sk_gso_max_size - 1 - hlen);
  
-               /* TSQ : try to have at least two segments in flight
-                * (one in NIC TX ring, another in Qdisc)
-                */
-               xmit_size_goal = min_t(u32, xmit_size_goal,
-                                      sysctl_tcp_limit_output_bytes >> 1);
                xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
  
                /* We try hard to avoid divides here */
@@@ -1247,9 -1237,6 +1241,9 @@@ out
        if (copied)
                tcp_push(sk, flags, mss_now, tp->nonagle);
        release_sock(sk);
 +
 +      if (copied + copied_syn)
 +              uid_stat_tcp_snd(current_uid(), copied + copied_syn);
        return copied + copied_syn;
  
  do_fault:
@@@ -1554,7 -1541,6 +1548,7 @@@ int tcp_read_sock(struct sock *sk, read
        if (copied > 0) {
                tcp_recv_skb(sk, seq, &offset);
                tcp_cleanup_rbuf(sk, copied);
 +              uid_stat_tcp_rcv(current_uid(), copied);
        }
        return copied;
  }
@@@ -1959,9 -1945,6 +1953,9 @@@ skip_copy
        tcp_cleanup_rbuf(sk, copied);
  
        release_sock(sk);
 +
 +      if (copied > 0)
 +              uid_stat_tcp_rcv(current_uid(), copied);
        return copied;
  
  out:
  
  recv_urg:
        err = tcp_recv_urg(sk, msg, len, flags);
 +      if (err > 0)
 +              uid_stat_tcp_rcv(current_uid(), err);
        goto out;
  
  recv_sndq:
@@@ -2918,6 -2899,7 +2912,7 @@@ struct sk_buff *tcp_tso_segment(struct 
        netdev_features_t features)
  {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
+       unsigned int sum_truesize = 0;
        struct tcphdr *th;
        unsigned int thlen;
        unsigned int seq;
                if (copy_destructor) {
                        skb->destructor = gso_skb->destructor;
                        skb->sk = gso_skb->sk;
-                       /* {tcp|sock}_wfree() use exact truesize accounting :
-                        * sum(skb->truesize) MUST be exactly be gso_skb->truesize
-                        * So we account mss bytes of 'true size' for each segment.
-                        * The last segment will contain the remaining.
-                        */
-                       skb->truesize = mss;
-                       gso_skb->truesize -= mss;
+                       sum_truesize += skb->truesize;
                }
                skb = skb->next;
                th = tcp_hdr(skb);
        if (copy_destructor) {
                swap(gso_skb->sk, skb->sk);
                swap(gso_skb->destructor, skb->destructor);
-               swap(gso_skb->truesize, skb->truesize);
+               sum_truesize += skb->truesize;
+               atomic_add(sum_truesize - gso_skb->truesize,
+                          &skb->sk->sk_wmem_alloc);
        }
  
        delta = htonl(oldlen + (skb->tail - skb->transport_header) +
@@@ -3487,107 -3465,3 +3478,107 @@@ void __init tcp_init(void
  
        tcp_tasklet_init();
  }
 +
 +static int tcp_is_local(struct net *net, __be32 addr) {
 +      struct rtable *rt;
 +      struct flowi4 fl4 = { .daddr = addr };
 +      rt = ip_route_output_key(net, &fl4);
 +      if (IS_ERR_OR_NULL(rt))
 +              return 0;
 +      return rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK);
 +}
 +
 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 +static int tcp_is_local6(struct net *net, struct in6_addr *addr) {
 +      struct rt6_info *rt6 = rt6_lookup(net, addr, addr, 0, 0);
 +      return rt6 && rt6->dst.dev && (rt6->dst.dev->flags & IFF_LOOPBACK);
 +}
 +#endif
 +
 +/*
 + * tcp_nuke_addr - destroy all sockets on the given local address
 + * if local address is the unspecified address (0.0.0.0 or ::), destroy all
 + * sockets with local addresses that are not configured.
 + */
 +int tcp_nuke_addr(struct net *net, struct sockaddr *addr)
 +{
 +      int family = addr->sa_family;
 +      unsigned int bucket;
 +
 +      struct in_addr *in;
 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 +      struct in6_addr *in6;
 +#endif
 +      if (family == AF_INET) {
 +              in = &((struct sockaddr_in *)addr)->sin_addr;
 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 +      } else if (family == AF_INET6) {
 +              in6 = &((struct sockaddr_in6 *)addr)->sin6_addr;
 +#endif
 +      } else {
 +              return -EAFNOSUPPORT;
 +      }
 +
 +      for (bucket = 0; bucket < tcp_hashinfo.ehash_mask; bucket++) {
 +              struct hlist_nulls_node *node;
 +              struct sock *sk;
 +              spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, bucket);
 +
 +restart:
 +              spin_lock_bh(lock);
 +              sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[bucket].chain) {
 +                      struct inet_sock *inet = inet_sk(sk);
 +
 +                      if (sysctl_ip_dynaddr && sk->sk_state == TCP_SYN_SENT)
 +                              continue;
 +                      if (sock_flag(sk, SOCK_DEAD))
 +                              continue;
 +
 +                      if (family == AF_INET) {
 +                              __be32 s4 = inet->inet_rcv_saddr;
 +                              if (s4 == LOOPBACK4_IPV6)
 +                                      continue;
 +
 +                              if (in->s_addr != s4 &&
 +                                  !(in->s_addr == INADDR_ANY &&
 +                                    !tcp_is_local(net, s4)))
 +                                      continue;
 +                      }
 +
 +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 +                      if (family == AF_INET6) {
 +                              struct in6_addr *s6;
 +                              if (!inet->pinet6)
 +                                      continue;
 +
 +                              s6 = &inet->pinet6->rcv_saddr;
 +                              if (ipv6_addr_type(s6) == IPV6_ADDR_MAPPED)
 +                                      continue;
 +
 +                              if (!ipv6_addr_equal(in6, s6) &&
 +                                  !(ipv6_addr_equal(in6, &in6addr_any) &&
 +                                    !tcp_is_local6(net, s6)))
 +                              continue;
 +                      }
 +#endif
 +
 +                      sock_hold(sk);
 +                      spin_unlock_bh(lock);
 +
 +                      local_bh_disable();
 +                      bh_lock_sock(sk);
 +                      sk->sk_err = ETIMEDOUT;
 +                      sk->sk_error_report(sk);
 +
 +                      tcp_done(sk);
 +                      bh_unlock_sock(sk);
 +                      local_bh_enable();
 +                      sock_put(sk);
 +
 +                      goto restart;
 +              }
 +              spin_unlock_bh(lock);
 +      }
 +
 +      return 0;
 +}
diff --combined net/unix/af_unix.c
index e64bbcf5fb2cac509150c4081f70ec75c7652db9,8664ad0d5797328fd96f2a76f4ebd221ec36f4b8..6c66e8d4c7156b1db45e74b27624f9a4b0550690
  #include <linux/mount.h>
  #include <net/checksum.h>
  #include <linux/security.h>
 +#include <linux/freezer.h>
  
  struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
  EXPORT_SYMBOL_GPL(unix_socket_table);
@@@ -1762,7 -1761,6 +1762,6 @@@ static void unix_copy_addr(struct msghd
  {
        struct unix_sock *u = unix_sk(sk);
  
-       msg->msg_namelen = 0;
        if (u->addr) {
                msg->msg_namelen = u->addr->len;
                memcpy(msg->msg_name, u->addr->name, u->addr->len);
@@@ -1786,8 -1784,6 +1785,6 @@@ static int unix_dgram_recvmsg(struct ki
        if (flags&MSG_OOB)
                goto out;
  
-       msg->msg_namelen = 0;
        err = mutex_lock_interruptible(&u->readlock);
        if (err) {
                err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
@@@ -1890,7 -1886,7 +1887,7 @@@ static long unix_stream_data_wait(struc
  
                set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
                unix_state_unlock(sk);
 -              timeo = schedule_timeout(timeo);
 +              timeo = freezable_schedule_timeout(timeo);
                unix_state_lock(sk);
                clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
        }
@@@ -1927,8 -1923,6 +1924,6 @@@ static int unix_stream_recvmsg(struct k
        target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
        timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
  
-       msg->msg_namelen = 0;
        /* Lock the socket to prevent queue disordering
         * while sleeps in memcpy_tomsg
         */
diff --combined net/wireless/scan.c
index 41b0f96a933f0e385edc2660c7e33c0d5b630bc4,81019ee3ddc8c8a71955322d193238962edc05b4..4db2177a69ea1f684d537b54256b8bf17b8a1707
@@@ -55,7 -55,7 +55,7 @@@
   * also linked into the probe response struct.
   */
  
 -#define IEEE80211_SCAN_RESULT_EXPIRE  (30 * HZ)
 +#define IEEE80211_SCAN_RESULT_EXPIRE  (3 * HZ)
  
  static void bss_free(struct cfg80211_internal_bss *bss)
  {
@@@ -253,10 -253,10 +253,10 @@@ void __cfg80211_sched_scan_results(stru
        rdev = container_of(wk, struct cfg80211_registered_device,
                            sched_scan_results_wk);
  
-       request = rdev->sched_scan_req;
        mutex_lock(&rdev->sched_scan_mtx);
  
+       request = rdev->sched_scan_req;
        /* we don't have sched_scan_req anymore if the scan is stopping */
        if (request) {
                if (request->flags & NL80211_SCAN_FLAG_FLUSH) {