2 * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/mmu_notifier.h>
20 #include <linux/amd-iommu.h>
21 #include <linux/mm_types.h>
22 #include <linux/profile.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/iommu.h>
26 #include <linux/wait.h>
27 #include <linux/pci.h>
28 #include <linux/gfp.h>
30 #include "amd_iommu_types.h"
31 #include "amd_iommu_proto.h"
33 MODULE_LICENSE("GPL v2");
34 MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
36 #define MAX_DEVICES 0x10000
37 #define PRI_QUEUE_SIZE 512
46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */
48 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
50 struct mm_struct *mm; /* mm_struct for the faults */
51 struct mmu_notifier mn; /* mmu_notifier handle */
52 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
53 struct device_state *device_state; /* Link to our device_state */
54 int pasid; /* PASID index */
55 bool invalid; /* Used during setup and
56 teardown of the pasid */
57 spinlock_t lock; /* Protect pri_queues and
59 wait_queue_head_t wq; /* To wait for count == 0 */
63 struct list_head list;
67 struct pasid_state **states;
68 struct iommu_domain *domain;
71 amd_iommu_invalid_ppr_cb inv_ppr_cb;
72 amd_iommu_invalidate_ctx inv_ctx_cb;
78 struct work_struct work;
79 struct device_state *dev_state;
80 struct pasid_state *state;
90 static LIST_HEAD(state_list);
91 static spinlock_t state_lock;
93 static struct workqueue_struct *iommu_wq;
96 * Empty page table - Used between
97 * mmu_notifier_invalidate_range_start and
98 * mmu_notifier_invalidate_range_end
100 static u64 *empty_page_table;
102 static void free_pasid_states(struct device_state *dev_state);
104 static u16 device_id(struct pci_dev *pdev)
108 devid = pdev->bus->number;
109 devid = (devid << 8) | pdev->devfn;
114 static struct device_state *__get_device_state(u16 devid)
116 struct device_state *dev_state;
118 list_for_each_entry(dev_state, &state_list, list) {
119 if (dev_state->devid == devid)
126 static struct device_state *get_device_state(u16 devid)
128 struct device_state *dev_state;
131 spin_lock_irqsave(&state_lock, flags);
132 dev_state = __get_device_state(devid);
133 if (dev_state != NULL)
134 atomic_inc(&dev_state->count);
135 spin_unlock_irqrestore(&state_lock, flags);
140 static void free_device_state(struct device_state *dev_state)
143 * First detach device from domain - No more PRI requests will arrive
144 * from that device after it is unbound from the IOMMUv2 domain.
146 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
148 /* Everything is down now, free the IOMMUv2 domain */
149 iommu_domain_free(dev_state->domain);
151 /* Finally get rid of the device-state */
155 static void put_device_state(struct device_state *dev_state)
157 if (atomic_dec_and_test(&dev_state->count))
158 wake_up(&dev_state->wq);
161 static void put_device_state_wait(struct device_state *dev_state)
165 prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
166 if (!atomic_dec_and_test(&dev_state->count))
168 finish_wait(&dev_state->wq, &wait);
170 free_device_state(dev_state);
173 /* Must be called under dev_state->lock */
174 static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
175 int pasid, bool alloc)
177 struct pasid_state **root, **ptr;
180 level = dev_state->pasid_levels;
181 root = dev_state->states;
185 index = (pasid >> (9 * level)) & 0x1ff;
195 *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
200 root = (struct pasid_state **)*ptr;
207 static int set_pasid_state(struct device_state *dev_state,
208 struct pasid_state *pasid_state,
211 struct pasid_state **ptr;
215 spin_lock_irqsave(&dev_state->lock, flags);
216 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
231 spin_unlock_irqrestore(&dev_state->lock, flags);
236 static void clear_pasid_state(struct device_state *dev_state, int pasid)
238 struct pasid_state **ptr;
241 spin_lock_irqsave(&dev_state->lock, flags);
242 ptr = __get_pasid_state_ptr(dev_state, pasid, true);
250 spin_unlock_irqrestore(&dev_state->lock, flags);
253 static struct pasid_state *get_pasid_state(struct device_state *dev_state,
256 struct pasid_state **ptr, *ret = NULL;
259 spin_lock_irqsave(&dev_state->lock, flags);
260 ptr = __get_pasid_state_ptr(dev_state, pasid, false);
267 atomic_inc(&ret->count);
270 spin_unlock_irqrestore(&dev_state->lock, flags);
275 static void free_pasid_state(struct pasid_state *pasid_state)
280 static void put_pasid_state(struct pasid_state *pasid_state)
282 if (atomic_dec_and_test(&pasid_state->count))
283 wake_up(&pasid_state->wq);
286 static void put_pasid_state_wait(struct pasid_state *pasid_state)
290 prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
292 if (!atomic_dec_and_test(&pasid_state->count))
295 finish_wait(&pasid_state->wq, &wait);
296 free_pasid_state(pasid_state);
299 static void unbind_pasid(struct pasid_state *pasid_state)
301 struct iommu_domain *domain;
303 domain = pasid_state->device_state->domain;
306 * Mark pasid_state as invalid, no more faults will we added to the
307 * work queue after this is visible everywhere.
309 pasid_state->invalid = true;
311 /* Make sure this is visible */
314 /* After this the device/pasid can't access the mm anymore */
315 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
317 /* Make sure no more pending faults are in the queue */
318 flush_workqueue(iommu_wq);
321 static void free_pasid_states_level1(struct pasid_state **tbl)
325 for (i = 0; i < 512; ++i) {
329 free_page((unsigned long)tbl[i]);
333 static void free_pasid_states_level2(struct pasid_state **tbl)
335 struct pasid_state **ptr;
338 for (i = 0; i < 512; ++i) {
342 ptr = (struct pasid_state **)tbl[i];
343 free_pasid_states_level1(ptr);
347 static void free_pasid_states(struct device_state *dev_state)
349 struct pasid_state *pasid_state;
352 for (i = 0; i < dev_state->max_pasids; ++i) {
353 pasid_state = get_pasid_state(dev_state, i);
354 if (pasid_state == NULL)
357 put_pasid_state(pasid_state);
360 * This will call the mn_release function and
363 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
365 put_pasid_state_wait(pasid_state); /* Reference taken in
366 amd_iommu_bind_pasid */
368 /* Drop reference taken in amd_iommu_bind_pasid */
369 put_device_state(dev_state);
372 if (dev_state->pasid_levels == 2)
373 free_pasid_states_level2(dev_state->states);
374 else if (dev_state->pasid_levels == 1)
375 free_pasid_states_level1(dev_state->states);
376 else if (dev_state->pasid_levels != 0)
379 free_page((unsigned long)dev_state->states);
382 static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
384 return container_of(mn, struct pasid_state, mn);
387 static void __mn_flush_page(struct mmu_notifier *mn,
388 unsigned long address)
390 struct pasid_state *pasid_state;
391 struct device_state *dev_state;
393 pasid_state = mn_to_state(mn);
394 dev_state = pasid_state->device_state;
396 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
399 static int mn_clear_flush_young(struct mmu_notifier *mn,
400 struct mm_struct *mm,
404 for (; start < end; start += PAGE_SIZE)
405 __mn_flush_page(mn, start);
410 static void mn_invalidate_page(struct mmu_notifier *mn,
411 struct mm_struct *mm,
412 unsigned long address)
414 __mn_flush_page(mn, address);
417 static void mn_invalidate_range_start(struct mmu_notifier *mn,
418 struct mm_struct *mm,
419 unsigned long start, unsigned long end)
421 struct pasid_state *pasid_state;
422 struct device_state *dev_state;
425 pasid_state = mn_to_state(mn);
426 dev_state = pasid_state->device_state;
428 spin_lock_irqsave(&pasid_state->lock, flags);
429 if (pasid_state->mmu_notifier_count == 0) {
430 amd_iommu_domain_set_gcr3(dev_state->domain,
432 __pa(empty_page_table));
434 pasid_state->mmu_notifier_count += 1;
435 spin_unlock_irqrestore(&pasid_state->lock, flags);
438 static void mn_invalidate_range_end(struct mmu_notifier *mn,
439 struct mm_struct *mm,
440 unsigned long start, unsigned long end)
442 struct pasid_state *pasid_state;
443 struct device_state *dev_state;
446 pasid_state = mn_to_state(mn);
447 dev_state = pasid_state->device_state;
449 spin_lock_irqsave(&pasid_state->lock, flags);
450 pasid_state->mmu_notifier_count -= 1;
451 if (pasid_state->mmu_notifier_count == 0) {
452 amd_iommu_domain_set_gcr3(dev_state->domain,
454 __pa(pasid_state->mm->pgd));
456 spin_unlock_irqrestore(&pasid_state->lock, flags);
459 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
461 struct pasid_state *pasid_state;
462 struct device_state *dev_state;
467 pasid_state = mn_to_state(mn);
468 dev_state = pasid_state->device_state;
469 run_inv_ctx_cb = !pasid_state->invalid;
471 if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb)
472 dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
474 unbind_pasid(pasid_state);
477 static struct mmu_notifier_ops iommu_mn = {
478 .release = mn_release,
479 .clear_flush_young = mn_clear_flush_young,
480 .invalidate_page = mn_invalidate_page,
481 .invalidate_range_start = mn_invalidate_range_start,
482 .invalidate_range_end = mn_invalidate_range_end,
485 static void set_pri_tag_status(struct pasid_state *pasid_state,
490 spin_lock_irqsave(&pasid_state->lock, flags);
491 pasid_state->pri[tag].status = status;
492 spin_unlock_irqrestore(&pasid_state->lock, flags);
495 static void finish_pri_tag(struct device_state *dev_state,
496 struct pasid_state *pasid_state,
501 spin_lock_irqsave(&pasid_state->lock, flags);
502 if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
503 pasid_state->pri[tag].finish) {
504 amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
505 pasid_state->pri[tag].status, tag);
506 pasid_state->pri[tag].finish = false;
507 pasid_state->pri[tag].status = PPR_SUCCESS;
509 spin_unlock_irqrestore(&pasid_state->lock, flags);
512 static void handle_fault_error(struct fault *fault)
516 if (!fault->dev_state->inv_ppr_cb) {
517 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
521 status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
526 case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
527 set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
529 case AMD_IOMMU_INV_PRI_RSP_INVALID:
530 set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
532 case AMD_IOMMU_INV_PRI_RSP_FAIL:
533 set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
540 static void do_fault(struct work_struct *work)
542 struct fault *fault = container_of(work, struct fault, work);
543 struct mm_struct *mm;
544 struct vm_area_struct *vma;
548 write = !!(fault->flags & PPR_FAULT_WRITE);
550 mm = fault->state->mm;
551 address = fault->address;
553 down_read(&mm->mmap_sem);
554 vma = find_extend_vma(mm, address);
555 if (!vma || address < vma->vm_start) {
556 /* failed to get a vma in the right range */
557 up_read(&mm->mmap_sem);
558 handle_fault_error(fault);
562 ret = handle_mm_fault(mm, vma, address, write);
563 if (ret & VM_FAULT_ERROR) {
564 /* failed to service fault */
565 up_read(&mm->mmap_sem);
566 handle_fault_error(fault);
570 up_read(&mm->mmap_sem);
573 finish_pri_tag(fault->dev_state, fault->state, fault->tag);
575 put_pasid_state(fault->state);
580 static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
582 struct amd_iommu_fault *iommu_fault;
583 struct pasid_state *pasid_state;
584 struct device_state *dev_state;
592 tag = iommu_fault->tag & 0x1ff;
593 finish = (iommu_fault->tag >> 9) & 1;
596 dev_state = get_device_state(iommu_fault->device_id);
597 if (dev_state == NULL)
600 pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
601 if (pasid_state == NULL || pasid_state->invalid) {
602 /* We know the device but not the PASID -> send INVALID */
603 amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
608 spin_lock_irqsave(&pasid_state->lock, flags);
609 atomic_inc(&pasid_state->pri[tag].inflight);
611 pasid_state->pri[tag].finish = true;
612 spin_unlock_irqrestore(&pasid_state->lock, flags);
614 fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
616 /* We are OOM - send success and let the device re-fault */
617 finish_pri_tag(dev_state, pasid_state, tag);
621 fault->dev_state = dev_state;
622 fault->address = iommu_fault->address;
623 fault->state = pasid_state;
625 fault->finish = finish;
626 fault->pasid = iommu_fault->pasid;
627 fault->flags = iommu_fault->flags;
628 INIT_WORK(&fault->work, do_fault);
630 queue_work(iommu_wq, &fault->work);
636 if (ret != NOTIFY_OK && pasid_state)
637 put_pasid_state(pasid_state);
639 put_device_state(dev_state);
645 static struct notifier_block ppr_nb = {
646 .notifier_call = ppr_notifier,
649 int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
650 struct task_struct *task)
652 struct pasid_state *pasid_state;
653 struct device_state *dev_state;
654 struct mm_struct *mm;
660 if (!amd_iommu_v2_supported())
663 devid = device_id(pdev);
664 dev_state = get_device_state(devid);
666 if (dev_state == NULL)
670 if (pasid < 0 || pasid >= dev_state->max_pasids)
674 pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
675 if (pasid_state == NULL)
679 atomic_set(&pasid_state->count, 1);
680 init_waitqueue_head(&pasid_state->wq);
681 spin_lock_init(&pasid_state->lock);
683 mm = get_task_mm(task);
684 pasid_state->mm = mm;
685 pasid_state->device_state = dev_state;
686 pasid_state->pasid = pasid;
687 pasid_state->invalid = true; /* Mark as valid only if we are
688 done with setting up the pasid */
689 pasid_state->mn.ops = &iommu_mn;
691 if (pasid_state->mm == NULL)
694 mmu_notifier_register(&pasid_state->mn, mm);
696 ret = set_pasid_state(dev_state, pasid_state, pasid);
700 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
701 __pa(pasid_state->mm->pgd));
703 goto out_clear_state;
705 /* Now we are ready to handle faults */
706 pasid_state->invalid = false;
709 * Drop the reference to the mm_struct here. We rely on the
710 * mmu_notifier release call-back to inform us when the mm
718 clear_pasid_state(dev_state, pasid);
721 mmu_notifier_unregister(&pasid_state->mn, mm);
725 free_pasid_state(pasid_state);
728 put_device_state(dev_state);
732 EXPORT_SYMBOL(amd_iommu_bind_pasid);
734 void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
736 struct pasid_state *pasid_state;
737 struct device_state *dev_state;
742 if (!amd_iommu_v2_supported())
745 devid = device_id(pdev);
746 dev_state = get_device_state(devid);
747 if (dev_state == NULL)
750 if (pasid < 0 || pasid >= dev_state->max_pasids)
753 pasid_state = get_pasid_state(dev_state, pasid);
754 if (pasid_state == NULL)
757 * Drop reference taken here. We are safe because we still hold
758 * the reference taken in the amd_iommu_bind_pasid function.
760 put_pasid_state(pasid_state);
762 /* Clear the pasid state so that the pasid can be re-used */
763 clear_pasid_state(dev_state, pasid_state->pasid);
766 * Call mmu_notifier_unregister to drop our reference
769 mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
771 put_pasid_state_wait(pasid_state); /* Reference taken in
772 amd_iommu_bind_pasid */
774 /* Drop reference taken in this function */
775 put_device_state(dev_state);
777 /* Drop reference taken in amd_iommu_bind_pasid */
778 put_device_state(dev_state);
780 EXPORT_SYMBOL(amd_iommu_unbind_pasid);
782 int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
784 struct device_state *dev_state;
791 if (!amd_iommu_v2_supported())
794 if (pasids <= 0 || pasids > (PASID_MASK + 1))
797 devid = device_id(pdev);
799 dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
800 if (dev_state == NULL)
803 spin_lock_init(&dev_state->lock);
804 init_waitqueue_head(&dev_state->wq);
805 dev_state->pdev = pdev;
806 dev_state->devid = devid;
809 for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
810 dev_state->pasid_levels += 1;
812 atomic_set(&dev_state->count, 1);
813 dev_state->max_pasids = pasids;
816 dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
817 if (dev_state->states == NULL)
818 goto out_free_dev_state;
820 dev_state->domain = iommu_domain_alloc(&pci_bus_type);
821 if (dev_state->domain == NULL)
822 goto out_free_states;
824 amd_iommu_domain_direct_map(dev_state->domain);
826 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
828 goto out_free_domain;
830 ret = iommu_attach_device(dev_state->domain, &pdev->dev);
832 goto out_free_domain;
834 spin_lock_irqsave(&state_lock, flags);
836 if (__get_device_state(devid) != NULL) {
837 spin_unlock_irqrestore(&state_lock, flags);
839 goto out_free_domain;
842 list_add_tail(&dev_state->list, &state_list);
844 spin_unlock_irqrestore(&state_lock, flags);
849 iommu_domain_free(dev_state->domain);
852 free_page((unsigned long)dev_state->states);
859 EXPORT_SYMBOL(amd_iommu_init_device);
861 void amd_iommu_free_device(struct pci_dev *pdev)
863 struct device_state *dev_state;
867 if (!amd_iommu_v2_supported())
870 devid = device_id(pdev);
872 spin_lock_irqsave(&state_lock, flags);
874 dev_state = __get_device_state(devid);
875 if (dev_state == NULL) {
876 spin_unlock_irqrestore(&state_lock, flags);
880 list_del(&dev_state->list);
882 spin_unlock_irqrestore(&state_lock, flags);
884 /* Get rid of any remaining pasid states */
885 free_pasid_states(dev_state);
887 put_device_state_wait(dev_state);
889 EXPORT_SYMBOL(amd_iommu_free_device);
891 int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
892 amd_iommu_invalid_ppr_cb cb)
894 struct device_state *dev_state;
899 if (!amd_iommu_v2_supported())
902 devid = device_id(pdev);
904 spin_lock_irqsave(&state_lock, flags);
907 dev_state = __get_device_state(devid);
908 if (dev_state == NULL)
911 dev_state->inv_ppr_cb = cb;
916 spin_unlock_irqrestore(&state_lock, flags);
920 EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
922 int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
923 amd_iommu_invalidate_ctx cb)
925 struct device_state *dev_state;
930 if (!amd_iommu_v2_supported())
933 devid = device_id(pdev);
935 spin_lock_irqsave(&state_lock, flags);
938 dev_state = __get_device_state(devid);
939 if (dev_state == NULL)
942 dev_state->inv_ctx_cb = cb;
947 spin_unlock_irqrestore(&state_lock, flags);
951 EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
953 static int __init amd_iommu_v2_init(void)
957 pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
959 if (!amd_iommu_v2_supported()) {
960 pr_info("AMD IOMMUv2 functionality not available on this system\n");
962 * Load anyway to provide the symbols to other modules
963 * which may use AMD IOMMUv2 optionally.
968 spin_lock_init(&state_lock);
971 iommu_wq = create_workqueue("amd_iommu_v2");
972 if (iommu_wq == NULL)
976 empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
977 if (empty_page_table == NULL)
980 amd_iommu_register_ppr_notifier(&ppr_nb);
985 destroy_workqueue(iommu_wq);
991 static void __exit amd_iommu_v2_exit(void)
993 struct device_state *dev_state;
996 if (!amd_iommu_v2_supported())
999 amd_iommu_unregister_ppr_notifier(&ppr_nb);
1001 flush_workqueue(iommu_wq);
1004 * The loop below might call flush_workqueue(), so call
1005 * destroy_workqueue() after it
1007 for (i = 0; i < MAX_DEVICES; ++i) {
1008 dev_state = get_device_state(i);
1010 if (dev_state == NULL)
1015 put_device_state(dev_state);
1016 amd_iommu_free_device(dev_state->pdev);
1019 destroy_workqueue(iommu_wq);
1021 free_page((unsigned long)empty_page_table);
1024 module_init(amd_iommu_v2_init);
1025 module_exit(amd_iommu_v2_exit);