1 #ifndef __ASM_ES7000_APIC_H
2 #define __ASM_ES7000_APIC_H
4 #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
5 #define esr_disable (1)
7 static inline int apic_id_registered(void)
12 static inline const cpumask_t *target_cpus_cluster(void)
17 static inline const cpumask_t *target_cpus(void)
19 return &cpumask_of_cpu(smp_processor_id());
22 #define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
23 #define INT_DELIVERY_MODE_CLUSTER (dest_LowestPrio)
24 #define INT_DEST_MODE_CLUSTER (1) /* logical delivery broadcast to all procs */
25 #define NO_BALANCE_IRQ_CLUSTER (1)
27 #define APIC_DFR_VALUE (APIC_DFR_FLAT)
28 #define INT_DELIVERY_MODE (dest_Fixed)
29 #define INT_DEST_MODE (0) /* phys delivery to target procs */
30 #define NO_BALANCE_IRQ (0)
31 #undef APIC_DEST_LOGICAL
32 #define APIC_DEST_LOGICAL 0x0
34 static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
38 static inline unsigned long check_apicid_present(int bit)
40 return physid_isset(bit, phys_cpu_present_map);
43 #define apicid_cluster(apicid) (apicid & 0xF0)
45 static inline unsigned long calculate_ldr(int cpu)
48 id = xapic_phys_to_log_apicid(cpu);
49 return (SET_APIC_LOGICAL_ID(id));
53 * Set up the logical destination ID.
55 * Intel recommends to set DFR, LdR and TPR before enabling
56 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
57 * document number 292116). So here it goes...
59 static inline void init_apic_ldr_cluster(void)
62 int cpu = smp_processor_id();
64 apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
65 val = calculate_ldr(cpu);
66 apic_write(APIC_LDR, val);
69 static inline void init_apic_ldr(void)
72 int cpu = smp_processor_id();
74 apic_write(APIC_DFR, APIC_DFR_VALUE);
75 val = calculate_ldr(cpu);
76 apic_write(APIC_LDR, val);
79 extern int apic_version [MAX_APICS];
80 static inline void setup_apic_routing(void)
82 int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
83 printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
84 (apic_version[apic] == 0x14) ?
85 "Physical Cluster" : "Logical Cluster",
86 nr_ioapics, cpus_addr(*target_cpus())[0]);
89 static inline int multi_timer_check(int apic, int irq)
94 static inline int apicid_to_node(int logical_apicid)
100 static inline int cpu_present_to_apicid(int mps_cpu)
103 return boot_cpu_physical_apicid;
104 else if (mps_cpu < nr_cpu_ids)
105 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
110 static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
114 mask = physid_mask_of_physid(id);
119 extern u8 cpu_2_logical_apicid[];
120 /* Mapping from cpu number to logical apicid */
121 static inline int cpu_to_logical_apicid(int cpu)
124 if (cpu >= nr_cpu_ids)
126 return (int)cpu_2_logical_apicid[cpu];
128 return logical_smp_processor_id();
132 static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
134 /* For clustered we don't have a good way to do this yet - hack */
135 return physids_promote(0xff);
139 static inline void setup_portio_remap(void)
143 extern unsigned int boot_cpu_physical_apicid;
144 static inline int check_phys_apicid_present(int cpu_physical_apicid)
146 boot_cpu_physical_apicid = read_apic_id();
150 static inline unsigned int
151 cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
158 num_bits_set = cpumask_weight(cpumask);
159 /* Return id to all */
160 if (num_bits_set == NR_CPUS)
163 * The cpus in the mask must all be on the apic cluster. If are not
164 * on the same apicid cluster return default value of TARGET_CPUS.
166 cpu = cpumask_first(cpumask);
167 apicid = cpu_to_logical_apicid(cpu);
168 while (cpus_found < num_bits_set) {
169 if (cpumask_test_cpu(cpu, cpumask)) {
170 int new_apicid = cpu_to_logical_apicid(cpu);
171 if (apicid_cluster(apicid) !=
172 apicid_cluster(new_apicid)){
173 printk ("%s: Not a valid mask!\n", __func__);
184 static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
191 num_bits_set = cpus_weight(*cpumask);
192 /* Return id to all */
193 if (num_bits_set == NR_CPUS)
194 return cpu_to_logical_apicid(0);
196 * The cpus in the mask must all be on the apic cluster. If are not
197 * on the same apicid cluster return default value of TARGET_CPUS.
199 cpu = first_cpu(*cpumask);
200 apicid = cpu_to_logical_apicid(cpu);
201 while (cpus_found < num_bits_set) {
202 if (cpu_isset(cpu, *cpumask)) {
203 int new_apicid = cpu_to_logical_apicid(cpu);
204 if (apicid_cluster(apicid) !=
205 apicid_cluster(new_apicid)){
206 printk ("%s: Not a valid mask!\n", __func__);
207 return cpu_to_logical_apicid(0);
218 static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
219 const struct cpumask *andmask)
224 int apicid = cpu_to_logical_apicid(0);
225 cpumask_var_t cpumask;
227 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
230 cpumask_and(cpumask, inmask, andmask);
231 cpumask_and(cpumask, cpumask, cpu_online_mask);
233 num_bits_set = cpumask_weight(cpumask);
234 /* Return id to all */
235 if (num_bits_set == NR_CPUS)
238 * The cpus in the mask must all be on the apic cluster. If are not
239 * on the same apicid cluster return default value of TARGET_CPUS.
241 cpu = cpumask_first(cpumask);
242 apicid = cpu_to_logical_apicid(cpu);
243 while (cpus_found < num_bits_set) {
244 if (cpumask_test_cpu(cpu, cpumask)) {
245 int new_apicid = cpu_to_logical_apicid(cpu);
246 if (apicid_cluster(apicid) !=
247 apicid_cluster(new_apicid)){
248 printk ("%s: Not a valid mask!\n", __func__);
249 return cpu_to_logical_apicid(0);
257 free_cpumask_var(cpumask);
261 static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
263 return cpuid_apic >> index_msb;
266 #endif /* __ASM_ES7000_APIC_H */