2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/bitmap.h>
19 #include <linux/cpu.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/log2.h>
24 #include <linux/msi.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_pci.h>
29 #include <linux/of_platform.h>
30 #include <linux/percpu.h>
31 #include <linux/slab.h>
33 #include <linux/irqchip.h>
34 #include <linux/irqchip/arm-gic-v3.h>
36 #include <asm/cacheflush.h>
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
40 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0)
42 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
45 * Collection structure - just an ID, and a redistributor address to
46 * ping. We use one per CPU as a bag of interrupts assigned to this
49 struct its_collection {
55 * The ITS structure - contains most of the infrastructure, with the
56 * msi_controller, the command queue, the collections, and the list of
57 * devices writing to it.
61 struct list_head entry;
62 struct msi_controller msi_chip;
63 struct irq_domain *domain;
65 unsigned long phys_base;
66 struct its_cmd_block *cmd_base;
67 struct its_cmd_block *cmd_write;
68 void *tables[GITS_BASER_NR_REGS];
69 struct its_collection *collections;
70 struct list_head its_device_list;
75 #define ITS_ITT_ALIGN SZ_256
77 struct event_lpi_map {
78 unsigned long *lpi_map;
80 irq_hw_number_t lpi_base;
85 * The ITS view of a device - belongs to an ITS, a collection, owns an
86 * interrupt translation table, and a list of interrupts.
89 struct list_head entry;
91 struct event_lpi_map event_map;
97 static LIST_HEAD(its_nodes);
98 static DEFINE_SPINLOCK(its_lock);
99 static struct device_node *gic_root_node;
100 static struct rdists *gic_rdists;
102 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
103 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
105 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
108 struct its_node *its = its_dev->its;
110 return its->collections + its_dev->event_map.col_map[event];
114 * ITS command descriptors - parameters to be encoded in a command
117 struct its_cmd_desc {
120 struct its_device *dev;
125 struct its_device *dev;
130 struct its_device *dev;
135 struct its_collection *col;
140 struct its_device *dev;
146 struct its_device *dev;
147 struct its_collection *col;
152 struct its_device *dev;
157 struct its_collection *col;
163 * The ITS command block, which is what the ITS actually parses.
165 struct its_cmd_block {
169 #define ITS_CMD_QUEUE_SZ SZ_64K
170 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
172 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
173 struct its_cmd_desc *);
175 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
177 cmd->raw_cmd[0] &= ~0xffUL;
178 cmd->raw_cmd[0] |= cmd_nr;
181 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
183 cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
184 cmd->raw_cmd[0] |= ((u64)devid) << 32;
187 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
189 cmd->raw_cmd[1] &= ~0xffffffffUL;
190 cmd->raw_cmd[1] |= id;
193 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
195 cmd->raw_cmd[1] &= 0xffffffffUL;
196 cmd->raw_cmd[1] |= ((u64)phys_id) << 32;
199 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
201 cmd->raw_cmd[1] &= ~0x1fUL;
202 cmd->raw_cmd[1] |= size & 0x1f;
205 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
207 cmd->raw_cmd[2] &= ~0xffffffffffffUL;
208 cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL;
211 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
213 cmd->raw_cmd[2] &= ~(1UL << 63);
214 cmd->raw_cmd[2] |= ((u64)!!valid) << 63;
217 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
219 cmd->raw_cmd[2] &= ~(0xffffffffUL << 16);
220 cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16));
223 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
225 cmd->raw_cmd[2] &= ~0xffffUL;
226 cmd->raw_cmd[2] |= col;
229 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
231 /* Let's fixup BE commands */
232 cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
233 cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
234 cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
235 cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
238 static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
239 struct its_cmd_desc *desc)
241 unsigned long itt_addr;
242 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
244 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
245 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
247 its_encode_cmd(cmd, GITS_CMD_MAPD);
248 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
249 its_encode_size(cmd, size - 1);
250 its_encode_itt(cmd, itt_addr);
251 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
258 static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
259 struct its_cmd_desc *desc)
261 its_encode_cmd(cmd, GITS_CMD_MAPC);
262 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
263 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
264 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
268 return desc->its_mapc_cmd.col;
271 static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
272 struct its_cmd_desc *desc)
274 struct its_collection *col;
276 col = dev_event_to_col(desc->its_mapvi_cmd.dev,
277 desc->its_mapvi_cmd.event_id);
279 its_encode_cmd(cmd, GITS_CMD_MAPVI);
280 its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
281 its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
282 its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
283 its_encode_collection(cmd, col->col_id);
290 static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
291 struct its_cmd_desc *desc)
293 struct its_collection *col;
295 col = dev_event_to_col(desc->its_movi_cmd.dev,
296 desc->its_movi_cmd.event_id);
298 its_encode_cmd(cmd, GITS_CMD_MOVI);
299 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
300 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
301 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
308 static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
309 struct its_cmd_desc *desc)
311 struct its_collection *col;
313 col = dev_event_to_col(desc->its_discard_cmd.dev,
314 desc->its_discard_cmd.event_id);
316 its_encode_cmd(cmd, GITS_CMD_DISCARD);
317 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
318 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
325 static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
326 struct its_cmd_desc *desc)
328 struct its_collection *col;
330 col = dev_event_to_col(desc->its_inv_cmd.dev,
331 desc->its_inv_cmd.event_id);
333 its_encode_cmd(cmd, GITS_CMD_INV);
334 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
335 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
342 static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
343 struct its_cmd_desc *desc)
345 its_encode_cmd(cmd, GITS_CMD_INVALL);
346 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
353 static u64 its_cmd_ptr_to_offset(struct its_node *its,
354 struct its_cmd_block *ptr)
356 return (ptr - its->cmd_base) * sizeof(*ptr);
359 static int its_queue_full(struct its_node *its)
364 widx = its->cmd_write - its->cmd_base;
365 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
367 /* This is incredibly unlikely to happen, unless the ITS locks up. */
368 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
374 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
376 struct its_cmd_block *cmd;
377 u32 count = 1000000; /* 1s! */
379 while (its_queue_full(its)) {
382 pr_err_ratelimited("ITS queue not draining\n");
389 cmd = its->cmd_write++;
391 /* Handle queue wrapping */
392 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
393 its->cmd_write = its->cmd_base;
398 static struct its_cmd_block *its_post_commands(struct its_node *its)
400 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
402 writel_relaxed(wr, its->base + GITS_CWRITER);
404 return its->cmd_write;
407 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
410 * Make sure the commands written to memory are observable by
413 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
414 __flush_dcache_area(cmd, sizeof(*cmd));
419 static void its_wait_for_range_completion(struct its_node *its,
420 struct its_cmd_block *from,
421 struct its_cmd_block *to)
423 u64 rd_idx, from_idx, to_idx;
424 u32 count = 1000000; /* 1s! */
426 from_idx = its_cmd_ptr_to_offset(its, from);
427 to_idx = its_cmd_ptr_to_offset(its, to);
430 rd_idx = readl_relaxed(its->base + GITS_CREADR);
431 if (rd_idx >= to_idx || rd_idx < from_idx)
436 pr_err_ratelimited("ITS queue timeout\n");
444 static void its_send_single_command(struct its_node *its,
445 its_cmd_builder_t builder,
446 struct its_cmd_desc *desc)
448 struct its_cmd_block *cmd, *sync_cmd, *next_cmd;
449 struct its_collection *sync_col;
452 raw_spin_lock_irqsave(&its->lock, flags);
454 cmd = its_allocate_entry(its);
455 if (!cmd) { /* We're soooooo screewed... */
456 pr_err_ratelimited("ITS can't allocate, dropping command\n");
457 raw_spin_unlock_irqrestore(&its->lock, flags);
460 sync_col = builder(cmd, desc);
461 its_flush_cmd(its, cmd);
464 sync_cmd = its_allocate_entry(its);
466 pr_err_ratelimited("ITS can't SYNC, skipping\n");
469 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
470 its_encode_target(sync_cmd, sync_col->target_address);
471 its_fixup_cmd(sync_cmd);
472 its_flush_cmd(its, sync_cmd);
476 next_cmd = its_post_commands(its);
477 raw_spin_unlock_irqrestore(&its->lock, flags);
479 its_wait_for_range_completion(its, cmd, next_cmd);
482 static void its_send_inv(struct its_device *dev, u32 event_id)
484 struct its_cmd_desc desc;
486 desc.its_inv_cmd.dev = dev;
487 desc.its_inv_cmd.event_id = event_id;
489 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
492 static void its_send_mapd(struct its_device *dev, int valid)
494 struct its_cmd_desc desc;
496 desc.its_mapd_cmd.dev = dev;
497 desc.its_mapd_cmd.valid = !!valid;
499 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
502 static void its_send_mapc(struct its_node *its, struct its_collection *col,
505 struct its_cmd_desc desc;
507 desc.its_mapc_cmd.col = col;
508 desc.its_mapc_cmd.valid = !!valid;
510 its_send_single_command(its, its_build_mapc_cmd, &desc);
513 static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id)
515 struct its_cmd_desc desc;
517 desc.its_mapvi_cmd.dev = dev;
518 desc.its_mapvi_cmd.phys_id = irq_id;
519 desc.its_mapvi_cmd.event_id = id;
521 its_send_single_command(dev->its, its_build_mapvi_cmd, &desc);
524 static void its_send_movi(struct its_device *dev,
525 struct its_collection *col, u32 id)
527 struct its_cmd_desc desc;
529 desc.its_movi_cmd.dev = dev;
530 desc.its_movi_cmd.col = col;
531 desc.its_movi_cmd.event_id = id;
533 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
536 static void its_send_discard(struct its_device *dev, u32 id)
538 struct its_cmd_desc desc;
540 desc.its_discard_cmd.dev = dev;
541 desc.its_discard_cmd.event_id = id;
543 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
546 static void its_send_invall(struct its_node *its, struct its_collection *col)
548 struct its_cmd_desc desc;
550 desc.its_invall_cmd.col = col;
552 its_send_single_command(its, its_build_invall_cmd, &desc);
556 * irqchip functions - assumes MSI, mostly.
559 static inline u32 its_get_event_id(struct irq_data *d)
561 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
562 return d->hwirq - its_dev->event_map.lpi_base;
565 static void lpi_set_config(struct irq_data *d, bool enable)
567 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
568 irq_hw_number_t hwirq = d->hwirq;
569 u32 id = its_get_event_id(d);
570 u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192;
573 *cfg |= LPI_PROP_ENABLED;
575 *cfg &= ~LPI_PROP_ENABLED;
578 * Make the above write visible to the redistributors.
579 * And yes, we're flushing exactly: One. Single. Byte.
582 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
583 __flush_dcache_area(cfg, sizeof(*cfg));
586 its_send_inv(its_dev, id);
589 static void its_mask_irq(struct irq_data *d)
591 lpi_set_config(d, false);
594 static void its_unmask_irq(struct irq_data *d)
596 lpi_set_config(d, true);
599 static void its_eoi_irq(struct irq_data *d)
601 gic_write_eoir(d->hwirq);
604 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
607 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
608 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
609 struct its_collection *target_col;
610 u32 id = its_get_event_id(d);
612 if (cpu >= nr_cpu_ids)
615 target_col = &its_dev->its->collections[cpu];
616 its_send_movi(its_dev, target_col, id);
617 its_dev->event_map.col_map[id] = cpu;
619 return IRQ_SET_MASK_OK_DONE;
622 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
624 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
625 struct its_node *its;
629 addr = its->phys_base + GITS_TRANSLATER;
631 msg->address_lo = addr & ((1UL << 32) - 1);
632 msg->address_hi = addr >> 32;
633 msg->data = its_get_event_id(d);
636 static struct irq_chip its_irq_chip = {
638 .irq_mask = its_mask_irq,
639 .irq_unmask = its_unmask_irq,
640 .irq_eoi = its_eoi_irq,
641 .irq_set_affinity = its_set_affinity,
642 .irq_compose_msi_msg = its_irq_compose_msi_msg,
645 static void its_mask_msi_irq(struct irq_data *d)
648 irq_chip_mask_parent(d);
651 static void its_unmask_msi_irq(struct irq_data *d)
653 pci_msi_unmask_irq(d);
654 irq_chip_unmask_parent(d);
657 static struct irq_chip its_msi_irq_chip = {
659 .irq_unmask = its_unmask_msi_irq,
660 .irq_mask = its_mask_msi_irq,
661 .irq_eoi = irq_chip_eoi_parent,
662 .irq_write_msi_msg = pci_msi_domain_write_msg,
666 * How we allocate LPIs:
668 * The GIC has id_bits bits for interrupt identifiers. From there, we
669 * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
670 * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
673 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
675 #define IRQS_PER_CHUNK_SHIFT 5
676 #define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT)
678 static unsigned long *lpi_bitmap;
679 static u32 lpi_chunks;
680 static DEFINE_SPINLOCK(lpi_lock);
682 static int its_lpi_to_chunk(int lpi)
684 return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
687 static int its_chunk_to_lpi(int chunk)
689 return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
692 static int its_lpi_init(u32 id_bits)
694 lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
696 lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long),
703 pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
707 static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
709 unsigned long *bitmap = NULL;
714 nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
716 spin_lock(&lpi_lock);
719 chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
721 if (chunk_id < lpi_chunks)
725 } while (nr_chunks > 0);
730 bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long),
735 for (i = 0; i < nr_chunks; i++)
736 set_bit(chunk_id + i, lpi_bitmap);
738 *base = its_chunk_to_lpi(chunk_id);
739 *nr_ids = nr_chunks * IRQS_PER_CHUNK;
742 spin_unlock(&lpi_lock);
747 static void its_lpi_free(struct event_lpi_map *map)
749 int base = map->lpi_base;
750 int nr_ids = map->nr_lpis;
753 spin_lock(&lpi_lock);
755 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
756 int chunk = its_lpi_to_chunk(lpi);
757 BUG_ON(chunk > lpi_chunks);
758 if (test_bit(chunk, lpi_bitmap)) {
759 clear_bit(chunk, lpi_bitmap);
761 pr_err("Bad LPI chunk %d\n", chunk);
765 spin_unlock(&lpi_lock);
772 * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to
773 * deal with (one configuration byte per interrupt). PENDBASE has to
774 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
776 #define LPI_PROPBASE_SZ SZ_64K
777 #define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K)
780 * This is how many bits of ID we need, including the useless ones.
782 #define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K)
784 #define LPI_PROP_DEFAULT_PRIO 0xa0
786 static int __init its_alloc_lpi_tables(void)
790 gic_rdists->prop_page = alloc_pages(GFP_NOWAIT,
791 get_order(LPI_PROPBASE_SZ));
792 if (!gic_rdists->prop_page) {
793 pr_err("Failed to allocate PROPBASE\n");
797 paddr = page_to_phys(gic_rdists->prop_page);
798 pr_info("GIC: using LPI property table @%pa\n", &paddr);
800 /* Priority 0xa0, Group-1, disabled */
801 memset(page_address(gic_rdists->prop_page),
802 LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
805 /* Make sure the GIC will observe the written configuration */
806 __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ);
811 static const char *its_base_type_string[] = {
812 [GITS_BASER_TYPE_DEVICE] = "Devices",
813 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
814 [GITS_BASER_TYPE_CPU] = "Physical CPUs",
815 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
816 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
817 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
818 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
821 static void its_free_tables(struct its_node *its)
825 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
826 if (its->tables[i]) {
827 free_page((unsigned long)its->tables[i]);
828 its->tables[i] = NULL;
833 static int its_alloc_tables(struct its_node *its)
838 u64 shr = GITS_BASER_InnerShareable;
839 u64 cache = GITS_BASER_WaWb;
841 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
842 u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
843 u64 type = GITS_BASER_TYPE(val);
844 u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
845 int order = get_order(psz);
850 if (type == GITS_BASER_TYPE_NONE)
854 * Allocate as many entries as required to fit the
855 * range of device IDs that the ITS can grok... The ID
856 * space being incredibly sparse, this results in a
857 * massive waste of memory.
859 * For other tables, only allocate a single page.
861 if (type == GITS_BASER_TYPE_DEVICE) {
862 u64 typer = readq_relaxed(its->base + GITS_TYPER);
863 u32 ids = GITS_TYPER_DEVBITS(typer);
866 * 'order' was initialized earlier to the default page
867 * granule of the the ITS. We can't have an allocation
868 * smaller than that. If the requested allocation
869 * is smaller, round up to the default page granule.
871 order = max(get_order((1UL << ids) * entry_size),
873 if (order >= MAX_ORDER) {
874 order = MAX_ORDER - 1;
875 pr_warn("%s: Device Table too large, reduce its page order to %u\n",
876 its->msi_chip.of_node->full_name, order);
880 alloc_size = (1 << order) * PAGE_SIZE;
881 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
887 its->tables[i] = base;
890 val = (virt_to_phys(base) |
891 (type << GITS_BASER_TYPE_SHIFT) |
892 ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
899 val |= GITS_BASER_PAGE_SIZE_4K;
902 val |= GITS_BASER_PAGE_SIZE_16K;
905 val |= GITS_BASER_PAGE_SIZE_64K;
909 val |= (alloc_size / psz) - 1;
911 writeq_relaxed(val, its->base + GITS_BASER + i * 8);
912 tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
914 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
916 * Shareability didn't stick. Just use
917 * whatever the read reported, which is likely
918 * to be the only thing this redistributor
919 * supports. If that's zero, make it
920 * non-cacheable as well.
922 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
924 cache = GITS_BASER_nC;
928 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
930 * Page size didn't stick. Let's try a smaller
931 * size and retry. If we reach 4K, then
932 * something is horribly wrong...
945 pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n",
946 its->msi_chip.of_node->full_name, i,
947 (unsigned long) val, (unsigned long) tmp);
952 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
953 (int)(alloc_size / entry_size),
954 its_base_type_string[type],
955 (unsigned long)virt_to_phys(base),
956 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
962 its_free_tables(its);
967 static int its_alloc_collections(struct its_node *its)
969 its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections),
971 if (!its->collections)
977 static void its_cpu_init_lpis(void)
979 void __iomem *rbase = gic_data_rdist_rd_base();
980 struct page *pend_page;
983 /* If we didn't allocate the pending table yet, do it now */
984 pend_page = gic_data_rdist()->pend_page;
988 * The pending pages have to be at least 64kB aligned,
989 * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
991 pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO,
992 get_order(max(LPI_PENDBASE_SZ, SZ_64K)));
994 pr_err("Failed to allocate PENDBASE for CPU%d\n",
999 /* Make sure the GIC will observe the zero-ed page */
1000 __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ);
1002 paddr = page_to_phys(pend_page);
1003 pr_info("CPU%d: using LPI pending table @%pa\n",
1004 smp_processor_id(), &paddr);
1005 gic_data_rdist()->pend_page = pend_page;
1009 val = readl_relaxed(rbase + GICR_CTLR);
1010 val &= ~GICR_CTLR_ENABLE_LPIS;
1011 writel_relaxed(val, rbase + GICR_CTLR);
1014 * Make sure any change to the table is observable by the GIC.
1019 val = (page_to_phys(gic_rdists->prop_page) |
1020 GICR_PROPBASER_InnerShareable |
1021 GICR_PROPBASER_WaWb |
1022 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1024 writeq_relaxed(val, rbase + GICR_PROPBASER);
1025 tmp = readq_relaxed(rbase + GICR_PROPBASER);
1027 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
1028 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1030 * The HW reports non-shareable, we must
1031 * remove the cacheability attributes as
1034 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1035 GICR_PROPBASER_CACHEABILITY_MASK);
1036 val |= GICR_PROPBASER_nC;
1037 writeq_relaxed(val, rbase + GICR_PROPBASER);
1039 pr_info_once("GIC: using cache flushing for LPI property table\n");
1040 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1044 val = (page_to_phys(pend_page) |
1045 GICR_PENDBASER_InnerShareable |
1046 GICR_PENDBASER_WaWb);
1048 writeq_relaxed(val, rbase + GICR_PENDBASER);
1049 tmp = readq_relaxed(rbase + GICR_PENDBASER);
1051 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
1053 * The HW reports non-shareable, we must remove the
1054 * cacheability attributes as well.
1056 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
1057 GICR_PENDBASER_CACHEABILITY_MASK);
1058 val |= GICR_PENDBASER_nC;
1059 writeq_relaxed(val, rbase + GICR_PENDBASER);
1063 val = readl_relaxed(rbase + GICR_CTLR);
1064 val |= GICR_CTLR_ENABLE_LPIS;
1065 writel_relaxed(val, rbase + GICR_CTLR);
1067 /* Make sure the GIC has seen the above */
1071 static void its_cpu_init_collection(void)
1073 struct its_node *its;
1076 spin_lock(&its_lock);
1077 cpu = smp_processor_id();
1079 list_for_each_entry(its, &its_nodes, entry) {
1083 * We now have to bind each collection to its target
1086 if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
1088 * This ITS wants the physical address of the
1091 target = gic_data_rdist()->phys_base;
1094 * This ITS wants a linear CPU number.
1096 target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
1097 target = GICR_TYPER_CPU_NUMBER(target) << 16;
1100 /* Perform collection mapping */
1101 its->collections[cpu].target_address = target;
1102 its->collections[cpu].col_id = cpu;
1104 its_send_mapc(its, &its->collections[cpu], 1);
1105 its_send_invall(its, &its->collections[cpu]);
1108 spin_unlock(&its_lock);
1111 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
1113 struct its_device *its_dev = NULL, *tmp;
1114 unsigned long flags;
1116 raw_spin_lock_irqsave(&its->lock, flags);
1118 list_for_each_entry(tmp, &its->its_device_list, entry) {
1119 if (tmp->device_id == dev_id) {
1125 raw_spin_unlock_irqrestore(&its->lock, flags);
1130 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1133 struct its_device *dev;
1134 unsigned long *lpi_map;
1135 unsigned long flags;
1136 u16 *col_map = NULL;
1143 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1145 * At least one bit of EventID is being used, hence a minimum
1146 * of two entries. No, the architecture doesn't let you
1147 * express an ITT with a single entry.
1149 nr_ites = max(2UL, roundup_pow_of_two(nvecs));
1150 sz = nr_ites * its->ite_size;
1151 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
1152 itt = kzalloc(sz, GFP_KERNEL);
1153 lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
1155 col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
1157 if (!dev || !itt || !lpi_map || !col_map) {
1167 dev->nr_ites = nr_ites;
1168 dev->event_map.lpi_map = lpi_map;
1169 dev->event_map.col_map = col_map;
1170 dev->event_map.lpi_base = lpi_base;
1171 dev->event_map.nr_lpis = nr_lpis;
1172 dev->device_id = dev_id;
1173 INIT_LIST_HEAD(&dev->entry);
1175 raw_spin_lock_irqsave(&its->lock, flags);
1176 list_add(&dev->entry, &its->its_device_list);
1177 raw_spin_unlock_irqrestore(&its->lock, flags);
1179 /* Map device to its ITT */
1180 its_send_mapd(dev, 1);
1185 static void its_free_device(struct its_device *its_dev)
1187 unsigned long flags;
1189 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
1190 list_del(&its_dev->entry);
1191 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
1192 kfree(its_dev->itt);
1196 static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
1200 idx = find_first_zero_bit(dev->event_map.lpi_map,
1201 dev->event_map.nr_lpis);
1202 if (idx == dev->event_map.nr_lpis)
1205 *hwirq = dev->event_map.lpi_base + idx;
1206 set_bit(idx, dev->event_map.lpi_map);
1211 struct its_pci_alias {
1212 struct pci_dev *pdev;
1217 static int its_pci_msi_vec_count(struct pci_dev *pdev)
1221 msi = max(pci_msi_vec_count(pdev), 0);
1222 msix = max(pci_msix_vec_count(pdev), 0);
1224 return max(msi, msix);
1227 static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
1229 struct its_pci_alias *dev_alias = data;
1231 dev_alias->dev_id = alias;
1232 if (pdev != dev_alias->pdev)
1233 dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
1238 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
1239 int nvec, msi_alloc_info_t *info)
1241 struct pci_dev *pdev;
1242 struct its_node *its;
1243 struct its_device *its_dev;
1244 struct its_pci_alias dev_alias;
1246 if (!dev_is_pci(dev))
1249 pdev = to_pci_dev(dev);
1250 dev_alias.pdev = pdev;
1251 dev_alias.count = nvec;
1253 pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias);
1254 its = domain->parent->host_data;
1256 its_dev = its_find_device(its, dev_alias.dev_id);
1259 * We already have seen this ID, probably through
1260 * another alias (PCI bridge of some sort). No need to
1261 * create the device.
1263 dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id);
1267 its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count);
1271 dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n",
1272 dev_alias.count, ilog2(dev_alias.count));
1274 info->scratchpad[0].ptr = its_dev;
1275 info->scratchpad[1].ptr = dev;
1279 static struct msi_domain_ops its_pci_msi_ops = {
1280 .msi_prepare = its_msi_prepare,
1283 static struct msi_domain_info its_pci_msi_domain_info = {
1284 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
1285 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
1286 .ops = &its_pci_msi_ops,
1287 .chip = &its_msi_irq_chip,
1290 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
1292 irq_hw_number_t hwirq)
1294 struct of_phandle_args args;
1296 args.np = domain->parent->of_node;
1297 args.args_count = 3;
1298 args.args[0] = GIC_IRQ_TYPE_LPI;
1299 args.args[1] = hwirq;
1300 args.args[2] = IRQ_TYPE_EDGE_RISING;
1302 return irq_domain_alloc_irqs_parent(domain, virq, 1, &args);
1305 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1306 unsigned int nr_irqs, void *args)
1308 msi_alloc_info_t *info = args;
1309 struct its_device *its_dev = info->scratchpad[0].ptr;
1310 irq_hw_number_t hwirq;
1314 for (i = 0; i < nr_irqs; i++) {
1315 err = its_alloc_device_irq(its_dev, &hwirq);
1319 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
1323 irq_domain_set_hwirq_and_chip(domain, virq + i,
1324 hwirq, &its_irq_chip, its_dev);
1325 dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
1326 (int)(hwirq - its_dev->event_map.lpi_base),
1327 (int)hwirq, virq + i);
1333 static void its_irq_domain_activate(struct irq_domain *domain,
1336 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1337 u32 event = its_get_event_id(d);
1339 /* Bind the LPI to the first possible CPU */
1340 its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
1342 /* Map the GIC IRQ and event to the device */
1343 its_send_mapvi(its_dev, d->hwirq, event);
1346 static void its_irq_domain_deactivate(struct irq_domain *domain,
1349 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1350 u32 event = its_get_event_id(d);
1352 /* Stop the delivery of interrupts */
1353 its_send_discard(its_dev, event);
1356 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1357 unsigned int nr_irqs)
1359 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
1360 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1363 for (i = 0; i < nr_irqs; i++) {
1364 struct irq_data *data = irq_domain_get_irq_data(domain,
1366 u32 event = its_get_event_id(data);
1368 /* Mark interrupt index as unused */
1369 clear_bit(event, its_dev->event_map.lpi_map);
1371 /* Nuke the entry in the domain */
1372 irq_domain_reset_irq_data(data);
1375 /* If all interrupts have been freed, start mopping the floor */
1376 if (bitmap_empty(its_dev->event_map.lpi_map,
1377 its_dev->event_map.nr_lpis)) {
1378 its_lpi_free(&its_dev->event_map);
1380 /* Unmap device/itt */
1381 its_send_mapd(its_dev, 0);
1382 its_free_device(its_dev);
1385 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1388 static const struct irq_domain_ops its_domain_ops = {
1389 .alloc = its_irq_domain_alloc,
1390 .free = its_irq_domain_free,
1391 .activate = its_irq_domain_activate,
1392 .deactivate = its_irq_domain_deactivate,
1395 static int its_force_quiescent(void __iomem *base)
1397 u32 count = 1000000; /* 1s */
1400 val = readl_relaxed(base + GITS_CTLR);
1401 if (val & GITS_CTLR_QUIESCENT)
1404 /* Disable the generation of all interrupts to this ITS */
1405 val &= ~GITS_CTLR_ENABLE;
1406 writel_relaxed(val, base + GITS_CTLR);
1408 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
1410 val = readl_relaxed(base + GITS_CTLR);
1411 if (val & GITS_CTLR_QUIESCENT)
1423 static int its_probe(struct device_node *node, struct irq_domain *parent)
1425 struct resource res;
1426 struct its_node *its;
1427 void __iomem *its_base;
1432 err = of_address_to_resource(node, 0, &res);
1434 pr_warn("%s: no regs?\n", node->full_name);
1438 its_base = ioremap(res.start, resource_size(&res));
1440 pr_warn("%s: unable to map registers\n", node->full_name);
1444 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
1445 if (val != 0x30 && val != 0x40) {
1446 pr_warn("%s: no ITS detected, giving up\n", node->full_name);
1451 err = its_force_quiescent(its_base);
1453 pr_warn("%s: failed to quiesce, giving up\n",
1458 pr_info("ITS: %s\n", node->full_name);
1460 its = kzalloc(sizeof(*its), GFP_KERNEL);
1466 raw_spin_lock_init(&its->lock);
1467 INIT_LIST_HEAD(&its->entry);
1468 INIT_LIST_HEAD(&its->its_device_list);
1469 its->base = its_base;
1470 its->phys_base = res.start;
1471 its->msi_chip.of_node = node;
1472 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
1474 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
1475 if (!its->cmd_base) {
1479 its->cmd_write = its->cmd_base;
1481 err = its_alloc_tables(its);
1485 err = its_alloc_collections(its);
1487 goto out_free_tables;
1489 baser = (virt_to_phys(its->cmd_base) |
1491 GITS_CBASER_InnerShareable |
1492 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
1495 writeq_relaxed(baser, its->base + GITS_CBASER);
1496 tmp = readq_relaxed(its->base + GITS_CBASER);
1498 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
1499 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
1501 * The HW reports non-shareable, we must
1502 * remove the cacheability attributes as
1505 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
1506 GITS_CBASER_CACHEABILITY_MASK);
1507 baser |= GITS_CBASER_nC;
1508 writeq_relaxed(baser, its->base + GITS_CBASER);
1510 pr_info("ITS: using cache flushing for cmd queue\n");
1511 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
1514 writeq_relaxed(0, its->base + GITS_CWRITER);
1515 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
1517 if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
1518 its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
1521 goto out_free_tables;
1524 its->domain->parent = parent;
1526 its->msi_chip.domain = pci_msi_create_irq_domain(node,
1527 &its_pci_msi_domain_info,
1529 if (!its->msi_chip.domain) {
1531 goto out_free_domains;
1534 err = of_pci_msi_chip_add(&its->msi_chip);
1536 goto out_free_domains;
1539 spin_lock(&its_lock);
1540 list_add(&its->entry, &its_nodes);
1541 spin_unlock(&its_lock);
1546 if (its->msi_chip.domain)
1547 irq_domain_remove(its->msi_chip.domain);
1549 irq_domain_remove(its->domain);
1551 its_free_tables(its);
1553 kfree(its->cmd_base);
1558 pr_err("ITS: failed probing %s (%d)\n", node->full_name, err);
1562 static bool gic_rdists_supports_plpis(void)
1564 return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
1567 int its_cpu_init(void)
1569 if (!list_empty(&its_nodes)) {
1570 if (!gic_rdists_supports_plpis()) {
1571 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
1574 its_cpu_init_lpis();
1575 its_cpu_init_collection();
1581 static struct of_device_id its_device_id[] = {
1582 { .compatible = "arm,gic-v3-its", },
1586 int its_init(struct device_node *node, struct rdists *rdists,
1587 struct irq_domain *parent_domain)
1589 struct device_node *np;
1591 for (np = of_find_matching_node(node, its_device_id); np;
1592 np = of_find_matching_node(np, its_device_id)) {
1593 its_probe(np, parent_domain);
1596 if (list_empty(&its_nodes)) {
1597 pr_warn("ITS: No ITS available, not enabling LPIs\n");
1601 gic_rdists = rdists;
1602 gic_root_node = node;
1604 its_alloc_lpi_tables();
1605 its_lpi_init(rdists->id_bits);