Merge branch 'x86/trampoline' into x86/urgent
authorH. Peter Anvin <hpa@zytor.com>
Wed, 30 May 2012 19:11:26 +0000 (12:11 -0700)
committerH. Peter Anvin <hpa@zytor.com>
Wed, 30 May 2012 19:11:32 +0000 (12:11 -0700)
x86/trampoline contains an urgent commit which is necessarily on a
newer baseline.

Signed-off-by: H. Peter Anvin <hpa@zytor.com>
1  2 
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/mm/pat.c

index aaa056f31693a921457a7bf0918de99572192041,b772dd6ad45016e5e943ec37cafdef5fac03a152..b4180f425fb87f8a04367a937f38b518427001e3
@@@ -437,6 -437,14 +437,14 @@@ static inline void mce_gather_info(stru
                if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
                        m->ip = regs->ip;
                        m->cs = regs->cs;
+                       /*
+                        * When in VM86 mode make the cs look like ring 3
+                        * always. This is a lie, but it's better than passing
+                        * the additional vm86 bit around everywhere.
+                        */
+                       if (v8086_mode(regs))
+                               m->cs |= 3;
                }
                /* Use accurate RIP reporting if available. */
                if (rip_msr)
@@@ -641,16 -649,18 +649,18 @@@ EXPORT_SYMBOL_GPL(machine_check_poll)
   * Do a quick check if any of the events requires a panic.
   * This decides if we keep the events around or clear them.
   */
- static int mce_no_way_out(struct mce *m, char **msg)
+ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp)
  {
-       int i;
+       int i, ret = 0;
  
        for (i = 0; i < banks; i++) {
                m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
+               if (m->status & MCI_STATUS_VAL)
+                       __set_bit(i, validp);
                if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
-                       return 1;
+                       ret = 1;
        }
-       return 0;
+       return ret;
  }
  
  /*
@@@ -1013,6 -1023,7 +1023,7 @@@ void do_machine_check(struct pt_regs *r
         */
        int kill_it = 0;
        DECLARE_BITMAP(toclear, MAX_NR_BANKS);
+       DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
        char *msg = "Unknown";
  
        atomic_inc(&mce_entry);
        final = &__get_cpu_var(mces_seen);
        *final = m;
  
-       no_way_out = mce_no_way_out(&m, &msg);
+       memset(valid_banks, 0, sizeof(valid_banks));
+       no_way_out = mce_no_way_out(&m, &msg, valid_banks);
  
        barrier();
  
        order = mce_start(&no_way_out);
        for (i = 0; i < banks; i++) {
                __clear_bit(i, toclear);
+               if (!test_bit(i, valid_banks))
+                       continue;
                if (!mce_banks[i].ctl)
                        continue;
  
@@@ -1458,9 -1472,9 +1472,9 @@@ static int __cpuinit __mcheck_cpu_apply
                                 rdmsrl(msrs[i], val);
  
                                 /* CntP bit set? */
 -                               if (val & BIT(62)) {
 -                                       val &= ~BIT(62);
 -                                       wrmsrl(msrs[i], val);
 +                               if (val & BIT_64(62)) {
 +                                      val &= ~BIT_64(62);
 +                                      wrmsrl(msrs[i], val);
                                 }
                         }
  
diff --combined arch/x86/mm/pat.c
index bea6e573e02b1cdc2b35ee5db62daa9fd7929bd9,f11729fd019c6899e61e2989ed17a1619a46239d..3d68ef6d2266cb66b3d07c578191b80c5348e0e2
@@@ -158,47 -158,31 +158,47 @@@ static unsigned long pat_x_mtrr_type(u6
        return req_type;
  }
  
 +struct pagerange_state {
 +      unsigned long           cur_pfn;
 +      int                     ram;
 +      int                     not_ram;
 +};
 +
 +static int
 +pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
 +{
 +      struct pagerange_state *state = arg;
 +
 +      state->not_ram  |= initial_pfn > state->cur_pfn;
 +      state->ram      |= total_nr_pages > 0;
 +      state->cur_pfn   = initial_pfn + total_nr_pages;
 +
 +      return state->ram && state->not_ram;
 +}
 +
  static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
  {
 -      int ram_page = 0, not_rampage = 0;
 -      unsigned long page_nr;
 -
 -      for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
 -           ++page_nr) {
 -              /*
 -               * For legacy reasons, physical address range in the legacy ISA
 -               * region is tracked as non-RAM. This will allow users of
 -               * /dev/mem to map portions of legacy ISA region, even when
 -               * some of those portions are listed(or not even listed) with
 -               * different e820 types(RAM/reserved/..)
 -               */
 -              if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
 -                  page_is_ram(page_nr))
 -                      ram_page = 1;
 -              else
 -                      not_rampage = 1;
 -
 -              if (ram_page == not_rampage)
 -                      return -1;
 +      int ret = 0;
 +      unsigned long start_pfn = start >> PAGE_SHIFT;
 +      unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
 +      struct pagerange_state state = {start_pfn, 0, 0};
 +
 +      /*
 +       * For legacy reasons, physical address range in the legacy ISA
 +       * region is tracked as non-RAM. This will allow users of
 +       * /dev/mem to map portions of legacy ISA region, even when
 +       * some of those portions are listed(or not even listed) with
 +       * different e820 types(RAM/reserved/..)
 +       */
 +      if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
 +              start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
 +
 +      if (start_pfn < end_pfn) {
 +              ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
 +                              &state, pagerange_is_ram_callback);
        }
  
 -      return ram_page;
 +      return (ret > 0) ? -1 : (state.ram ? 1 : 0);
  }
  
  /*
@@@ -225,9 -209,8 +225,8 @@@ static int reserve_ram_pages_type(u64 s
                page = pfn_to_page(pfn);
                type = get_page_memtype(page);
                if (type != -1) {
-                       printk(KERN_INFO "reserve_ram_pages_type failed "
-                               "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
-                               start, end, type, req_type);
+                       printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
+                               start, end - 1, type, req_type);
                        if (new_type)
                                *new_type = type;
  
@@@ -330,9 -313,9 +329,9 @@@ int reserve_memtype(u64 start, u64 end
  
        err = rbt_memtype_check_insert(new, new_type);
        if (err) {
-               printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
-                      "track %s, req %s\n",
-                      start, end, cattr_name(new->type), cattr_name(req_type));
+               printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
+                      start, end - 1,
+                      cattr_name(new->type), cattr_name(req_type));
                kfree(new);
                spin_unlock(&memtype_lock);
  
  
        spin_unlock(&memtype_lock);
  
-       dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
-               start, end, cattr_name(new->type), cattr_name(req_type),
+       dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
+               start, end - 1, cattr_name(new->type), cattr_name(req_type),
                new_type ? cattr_name(*new_type) : "-");
  
        return err;
@@@ -376,14 -359,14 +375,14 @@@ int free_memtype(u64 start, u64 end
        spin_unlock(&memtype_lock);
  
        if (!entry) {
-               printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
-                       current->comm, current->pid, start, end);
+               printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
+                      current->comm, current->pid, start, end - 1);
                return -EINVAL;
        }
  
        kfree(entry);
  
-       dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
+       dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
  
        return 0;
  }
@@@ -507,9 -490,8 +506,8 @@@ static inline int range_is_allowed(unsi
  
        while (cursor < to) {
                if (!devmem_is_allowed(pfn)) {
-                       printk(KERN_INFO
-               "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
-                               current->comm, from, to);
+                       printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
+                               current->comm, from, to - 1);
                        return 0;
                }
                cursor += PAGE_SIZE;
@@@ -570,12 -552,11 +568,11 @@@ int kernel_map_sync_memtype(u64 base, u
                                size;
  
        if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
-               printk(KERN_INFO
-                       "%s:%d ioremap_change_attr failed %s "
-                       "for %Lx-%Lx\n",
+               printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
+                       "for [mem %#010Lx-%#010Lx]\n",
                        current->comm, current->pid,
                        cattr_name(flags),
-                       base, (unsigned long long)(base + size));
+                       base, (unsigned long long)(base + size-1));
                return -EINVAL;
        }
        return 0;
@@@ -607,12 -588,11 +604,11 @@@ static int reserve_pfn_range(u64 paddr
  
                flags = lookup_memtype(paddr);
                if (want_flags != flags) {
-                       printk(KERN_WARNING
-                       "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
+                       printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
                                current->comm, current->pid,
                                cattr_name(want_flags),
                                (unsigned long long)paddr,
-                               (unsigned long long)(paddr + size),
+                               (unsigned long long)(paddr + size - 1),
                                cattr_name(flags));
                        *vma_prot = __pgprot((pgprot_val(*vma_prot) &
                                              (~_PAGE_CACHE_MASK)) |
                    !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
                        free_memtype(paddr, paddr + size);
                        printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
-                               " for %Lx-%Lx, got %s\n",
+                               " for [mem %#010Lx-%#010Lx], got %s\n",
                                current->comm, current->pid,
                                cattr_name(want_flags),
                                (unsigned long long)paddr,
-                               (unsigned long long)(paddr + size),
+                               (unsigned long long)(paddr + size - 1),
                                cattr_name(flags));
                        return -EINVAL;
                }