2 * OPAL asynchronus Memory error handling support in PowreNV.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright 2013 IBM Corporation
19 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
24 #include <linux/kernel.h>
25 #include <linux/init.h>
28 #include <linux/slab.h>
31 #include <asm/cputable.h>
33 static int opal_mem_err_nb_init;
34 static LIST_HEAD(opal_memory_err_list);
35 static DEFINE_SPINLOCK(opal_mem_err_lock);
38 struct list_head list;
42 static void handle_memory_error_event(struct OpalMemoryErrorData *merr_evt)
44 uint64_t paddr_start, paddr_end;
46 pr_debug("%s: Retrived memory error event, type: 0x%x\n",
47 __func__, merr_evt->type);
48 switch (merr_evt->type) {
49 case OPAL_MEM_ERR_TYPE_RESILIENCE:
50 paddr_start = merr_evt->u.resilience.physical_address_start;
51 paddr_end = merr_evt->u.resilience.physical_address_end;
53 case OPAL_MEM_ERR_TYPE_DYN_DALLOC:
54 paddr_start = merr_evt->u.dyn_dealloc.physical_address_start;
55 paddr_end = merr_evt->u.dyn_dealloc.physical_address_end;
61 for (; paddr_start < paddr_end; paddr_start += PAGE_SIZE) {
62 memory_failure(paddr_start >> PAGE_SHIFT, 0, 0);
66 static void handle_memory_error(void)
69 struct OpalMemoryErrorData *merr_evt;
70 struct OpalMsgNode *msg_node;
72 spin_lock_irqsave(&opal_mem_err_lock, flags);
73 while (!list_empty(&opal_memory_err_list)) {
74 msg_node = list_entry(opal_memory_err_list.next,
75 struct OpalMsgNode, list);
76 list_del(&msg_node->list);
77 spin_unlock_irqrestore(&opal_mem_err_lock, flags);
79 merr_evt = (struct OpalMemoryErrorData *)
80 &msg_node->msg.params[0];
81 handle_memory_error_event(merr_evt);
83 spin_lock_irqsave(&opal_mem_err_lock, flags);
85 spin_unlock_irqrestore(&opal_mem_err_lock, flags);
88 static void mem_error_handler(struct work_struct *work)
90 handle_memory_error();
93 static DECLARE_WORK(mem_error_work, mem_error_handler);
96 * opal_memory_err_event - notifier handler that queues up the opal message
97 * to be preocessed later.
99 static int opal_memory_err_event(struct notifier_block *nb,
100 unsigned long msg_type, void *msg)
103 struct OpalMsgNode *msg_node;
105 if (msg_type != OPAL_MSG_MEM_ERR)
108 msg_node = kzalloc(sizeof(*msg_node), GFP_ATOMIC);
110 pr_err("MEMORY_ERROR: out of memory, Opal message event not"
114 memcpy(&msg_node->msg, msg, sizeof(struct opal_msg));
116 spin_lock_irqsave(&opal_mem_err_lock, flags);
117 list_add(&msg_node->list, &opal_memory_err_list);
118 spin_unlock_irqrestore(&opal_mem_err_lock, flags);
120 schedule_work(&mem_error_work);
124 static struct notifier_block opal_mem_err_nb = {
125 .notifier_call = opal_memory_err_event,
130 static int __init opal_mem_err_init(void)
134 if (!opal_mem_err_nb_init) {
135 ret = opal_message_notifier_register(
136 OPAL_MSG_MEM_ERR, &opal_mem_err_nb);
138 pr_err("%s: Can't register OPAL event notifier (%d)\n",
142 opal_mem_err_nb_init = 1;
146 subsys_initcall(opal_mem_err_init);