Merge branch 'linus' into x86/urgent
authorIngo Molnar <mingo@elte.hu>
Wed, 17 Jun 2009 06:59:01 +0000 (08:59 +0200)
committerIngo Molnar <mingo@elte.hu>
Wed, 17 Jun 2009 06:59:10 +0000 (08:59 +0200)
Merge reason: pull in latest to fix a bug in it.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
12 files changed:
arch/x86/boot/bioscall.S
arch/x86/include/asm/amd_iommu.h
arch/x86/include/asm/atomic_32.h
arch/x86/kernel/amd_iommu.c
arch/x86/kernel/amd_iommu_init.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/crash.c
arch/x86/kernel/efi.c
arch/x86/kernel/hpet.c
arch/x86/kernel/pci-dma.c
arch/x86/mm/fault.c
arch/x86/mm/init_64.c

index 507793739ea58f0e26104bd36d6f734cc60f91e1..1dfbf64e52a2b013d19c4551837db384e890d80a 100644 (file)
@@ -13,7 +13,7 @@
  * touching registers they shouldn't be.
  */
 
-       .code16
+       .code16gcc
        .text
        .globl  intcall
        .type   intcall, @function
index 262e02820049aa8cc92dca18e83afdcbb829a71c..bdf96f119f069a8615067b33dc7e0a28fc133df2 100644 (file)
@@ -29,9 +29,11 @@ extern void amd_iommu_detect(void);
 extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
 extern void amd_iommu_flush_all_domains(void);
 extern void amd_iommu_flush_all_devices(void);
+extern void amd_iommu_shutdown(void);
 #else
 static inline int amd_iommu_init(void) { return -ENODEV; }
 static inline void amd_iommu_detect(void) { }
+static inline void amd_iommu_shutdown(void) { }
 #endif
 
 #endif /* _ASM_X86_AMD_IOMMU_H */
index 8cb9c814e1203a0b7434018392e2ee01ef3ca199..2503d4e64c2a79a2aab7618e9c325aadb428e74d 100644 (file)
@@ -257,7 +257,7 @@ typedef struct {
 
 /**
  * atomic64_read - read atomic64 variable
- * @v: pointer of type atomic64_t
+ * @ptr: pointer of type atomic64_t
  *
  * Atomically reads the value of @v.
  * Doesn't imply a read memory barrier.
@@ -294,7 +294,6 @@ atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val,
  * atomic64_xchg - xchg atomic64 variable
  * @ptr:      pointer to type atomic64_t
  * @new_val:  value to assign
- * @old_val:  old value that was there
  *
  * Atomically xchgs the value of @ptr to @new_val and returns
  * the old value.
index 1c60554537c358ef37fa9352e5bfd624dadf3e1a..9372f0406ad4a51d654ce2a1dae5c41f7b3bf48e 100644 (file)
@@ -434,6 +434,16 @@ static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
        iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
 }
 
+/* Flush the whole IO/TLB for a given protection domain - including PDE */
+static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid)
+{
+       u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
+
+       INC_STATS_COUNTER(domain_flush_single);
+
+       iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1);
+}
+
 /*
  * This function is used to flush the IO/TLB for a given protection domain
  * on every IOMMU in the system
@@ -1078,7 +1088,13 @@ static void attach_device(struct amd_iommu *iommu,
        amd_iommu_pd_table[devid] = domain;
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 
+       /*
+        * We might boot into a crash-kernel here. The crashed kernel
+        * left the caches in the IOMMU dirty. So we have to flush
+        * here to evict all dirty stuff.
+        */
        iommu_queue_inv_dev_entry(iommu, devid);
+       iommu_flush_tlb_pde(iommu, domain->id);
 }
 
 /*
index 238989ec077df9e0b669c2f2a65d8b820a833510..10b2accd12ea5983d917b0646cf445f9c8fe8b93 100644 (file)
@@ -260,6 +260,14 @@ static void iommu_enable(struct amd_iommu *iommu)
 
 static void iommu_disable(struct amd_iommu *iommu)
 {
+       /* Disable command buffer */
+       iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
+
+       /* Disable event logging and event interrupts */
+       iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
+       iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+
+       /* Disable IOMMU hardware itself */
        iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
 }
 
@@ -478,6 +486,10 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu)
        memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
                    &entry, sizeof(entry));
 
+       /* set head and tail to zero manually */
+       writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
+       writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
+
        iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
 }
 
@@ -1042,6 +1054,7 @@ static void enable_iommus(void)
        struct amd_iommu *iommu;
 
        for_each_iommu(iommu) {
+               iommu_disable(iommu);
                iommu_set_device_table(iommu);
                iommu_enable_command_buffer(iommu);
                iommu_enable_event_buffer(iommu);
@@ -1066,12 +1079,6 @@ static void disable_iommus(void)
 
 static int amd_iommu_resume(struct sys_device *dev)
 {
-       /*
-        * Disable IOMMUs before reprogramming the hardware registers.
-        * IOMMU is still enabled from the resume kernel.
-        */
-       disable_iommus();
-
        /* re-load the hardware */
        enable_iommus();
 
@@ -1079,8 +1086,8 @@ static int amd_iommu_resume(struct sys_device *dev)
         * we have to flush after the IOMMUs are enabled because a
         * disabled IOMMU will never execute the commands we send
         */
-       amd_iommu_flush_all_domains();
        amd_iommu_flush_all_devices();
+       amd_iommu_flush_all_domains();
 
        return 0;
 }
@@ -1273,6 +1280,11 @@ free:
        goto out;
 }
 
+void amd_iommu_shutdown(void)
+{
+       disable_iommus();
+}
+
 /****************************************************************************
  *
  * Early detect code. This code runs at IOMMU detection time in the DMA
index 3ffdcfa9abdf07accfa466518b8ac1adf16a9c05..5b9cb8839cae7c800c990e5c03e0d4b03e0ccc1b 100644 (file)
@@ -853,6 +853,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
        numa_add_cpu(smp_processor_id());
 #endif
+
+       /* Cap the iomem address space to what is addressable on all CPUs */
+       iomem_resource.end &= (1ULL << c->x86_phys_bits) - 1;
 }
 
 #ifdef CONFIG_X86_64
index ff958248e61d7d48965c7b61c3cfdf32bc551f23..5e409dc298a479d3f72e9a65990edb5fedc514da 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/cpu.h>
 #include <asm/reboot.h>
 #include <asm/virtext.h>
+#include <asm/iommu.h>
 
 
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
@@ -103,5 +104,10 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
 #ifdef CONFIG_HPET_TIMER
        hpet_disable();
 #endif
+
+#ifdef CONFIG_X86_64
+       pci_iommu_shutdown();
+#endif
+
        crash_save_cpu(regs, safe_smp_processor_id());
 }
index 1736acc4d7aa6cfc13bc8ebd0db0e2727a8ebf5e..96f7ac0bbf01e567ebf4082ec15b73b9de7a9d2d 100644 (file)
@@ -240,10 +240,35 @@ static void __init do_add_efi_memmap(void)
                unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
                int e820_type;
 
-               if (md->attribute & EFI_MEMORY_WB)
-                       e820_type = E820_RAM;
-               else
+               switch (md->type) {
+               case EFI_LOADER_CODE:
+               case EFI_LOADER_DATA:
+               case EFI_BOOT_SERVICES_CODE:
+               case EFI_BOOT_SERVICES_DATA:
+               case EFI_CONVENTIONAL_MEMORY:
+                       if (md->attribute & EFI_MEMORY_WB)
+                               e820_type = E820_RAM;
+                       else
+                               e820_type = E820_RESERVED;
+                       break;
+               case EFI_ACPI_RECLAIM_MEMORY:
+                       e820_type = E820_ACPI;
+                       break;
+               case EFI_ACPI_MEMORY_NVS:
+                       e820_type = E820_NVS;
+                       break;
+               case EFI_UNUSABLE_MEMORY:
+                       e820_type = E820_UNUSABLE;
+                       break;
+               default:
+                       /*
+                        * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
+                        * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
+                        * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
+                        */
                        e820_type = E820_RESERVED;
+                       break;
+               }
                e820_add_region(start, size, e820_type);
        }
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
index 81408b93f887116c7abf33f1ca79cf692ba97f62..dedc2bddf7a5bda84bee20adecb2e9f7d3fe615a 100644 (file)
@@ -510,7 +510,8 @@ static int hpet_setup_irq(struct hpet_dev *dev)
 {
 
        if (request_irq(dev->irq, hpet_interrupt_handler,
-                       IRQF_DISABLED|IRQF_NOBALANCING, dev->name, dev))
+                       IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING,
+                       dev->name, dev))
                return -1;
 
        disable_irq(dev->irq);
index 745579bc825687cdb5855b9d5e7a7a6cbd042239..328592fb6044c6a5be8b607d56f6ff317f72bff2 100644 (file)
@@ -290,6 +290,8 @@ static int __init pci_iommu_init(void)
 void pci_iommu_shutdown(void)
 {
        gart_iommu_shutdown();
+
+       amd_iommu_shutdown();
 }
 /* Must execute after PCI subsystem */
 fs_initcall(pci_iommu_init);
index c6acc632637417c193394da4881fa19112ace761..0482fa649738718ba5fa6903d45b1862510527e0 100644 (file)
@@ -951,11 +951,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
        tsk = current;
        mm = tsk->mm;
 
-       prefetchw(&mm->mmap_sem);
-
        /* Get the faulting address: */
        address = read_cr2();
 
+       prefetchw(&mm->mmap_sem);
+
        if (unlikely(kmmio_fault(regs, address)))
                return;
 
index 52bb9519bb86b4ec778d613939ea658adb1052a6..52e1bff6bfd0165f7d8fa4ad829d6a62cbf85f3d 100644 (file)
@@ -527,7 +527,7 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
        return phys_pud_init(pud, addr, end, page_size_mask);
 }
 
-unsigned long __init
+unsigned long __meminit
 kernel_physical_mapping_init(unsigned long start,
                             unsigned long end,
                             unsigned long page_size_mask)