{
int rc, irq, trigger, polarity;
++ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
++ *irqp = gsi;
++ return 0;
++ }
++
rc = acpi_get_override_irq(gsi, &trigger, &polarity);
if (rc == 0) {
trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
static int __init acpi_parse_sbf(struct acpi_table_header *table)
{
-- struct acpi_table_boot *sb;
--
-- sb = (struct acpi_table_boot *)table;
-- if (!sb) {
-- printk(KERN_WARNING PREFIX "Unable to map SBF\n");
-- return -ENODEV;
-- }
++ struct acpi_table_boot *sb = (struct acpi_table_boot *)table;
sbf_port = sb->cmos_index; /* Save CMOS port */
static int __init acpi_parse_hpet(struct acpi_table_header *table)
{
-- struct acpi_table_hpet *hpet_tbl;
--
-- hpet_tbl = (struct acpi_table_hpet *)table;
-- if (!hpet_tbl) {
-- printk(KERN_WARNING PREFIX "Unable to map HPET\n");
-- return -ENODEV;
-- }
++ struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
printk(KERN_WARNING PREFIX "HPET timers must be located in "
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
--static char __initdata nocb_buf[NR_CPUS * 5];
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
/*
special = t->rcu_read_unlock_special;
if (special.b.need_qs) {
rcu_preempt_qs();
+ + t->rcu_read_unlock_special.b.need_qs = false;
if (!t->rcu_read_unlock_special.s) {
local_irq_restore(flags);
return;
cpumask_and(rcu_nocb_mask, cpu_possible_mask,
rcu_nocb_mask);
}
-- cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
-- pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
++ pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
++ cpumask_pr_args(rcu_nocb_mask));
if (rcu_nocb_poll)
pr_info("\tPoll for callbacks from no-CBs CPUs.\n");