s390/etr,stp: fix possible deadlock on machine check
[firefly-linux-kernel-4.4.55.git] / arch / s390 / kernel / nmi.c
1 /*
2  *   Machine check handler
3  *
4  *    Copyright IBM Corp. 2000, 2009
5  *    Author(s): Ingo Adlung <adlung@de.ibm.com>,
6  *               Martin Schwidefsky <schwidefsky@de.ibm.com>,
7  *               Cornelia Huck <cornelia.huck@de.ibm.com>,
8  *               Heiko Carstens <heiko.carstens@de.ibm.com>,
9  */
10
11 #include <linux/kernel_stat.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/hardirq.h>
15 #include <linux/time.h>
16 #include <linux/module.h>
17 #include <asm/lowcore.h>
18 #include <asm/smp.h>
19 #include <asm/etr.h>
20 #include <asm/cputime.h>
21 #include <asm/nmi.h>
22 #include <asm/crw.h>
23 #include <asm/switch_to.h>
24 #include <asm/fpu-internal.h>
25 #include <asm/ctl_reg.h>
26
27 struct mcck_struct {
28         int kill_task;
29         int channel_report;
30         int warning;
31         unsigned int etr_queue : 1;
32         unsigned int stp_queue : 1;
33         unsigned long long mcck_code;
34 };
35
36 static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck);
37
38 static void s390_handle_damage(char *msg)
39 {
40         smp_send_stop();
41         disabled_wait((unsigned long) __builtin_return_address(0));
42         while (1);
43 }
44
45 /*
46  * Main machine check handler function. Will be called with interrupts enabled
47  * or disabled and machine checks enabled or disabled.
48  */
49 void s390_handle_mcck(void)
50 {
51         unsigned long flags;
52         struct mcck_struct mcck;
53
54         /*
55          * Disable machine checks and get the current state of accumulated
56          * machine checks. Afterwards delete the old state and enable machine
57          * checks again.
58          */
59         local_irq_save(flags);
60         local_mcck_disable();
61         mcck = *this_cpu_ptr(&cpu_mcck);
62         memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
63         clear_cpu_flag(CIF_MCCK_PENDING);
64         local_mcck_enable();
65         local_irq_restore(flags);
66
67         if (mcck.channel_report)
68                 crw_handle_channel_report();
69         /*
70          * A warning may remain for a prolonged period on the bare iron.
71          * (actually until the machine is powered off, or the problem is gone)
72          * So we just stop listening for the WARNING MCH and avoid continuously
73          * being interrupted.  One caveat is however, that we must do this per
74          * processor and cannot use the smp version of ctl_clear_bit().
75          * On VM we only get one interrupt per virtally presented machinecheck.
76          * Though one suffices, we may get one interrupt per (virtual) cpu.
77          */
78         if (mcck.warning) {     /* WARNING pending ? */
79                 static int mchchk_wng_posted = 0;
80
81                 /* Use single cpu clear, as we cannot handle smp here. */
82                 __ctl_clear_bit(14, 24);        /* Disable WARNING MCH */
83                 if (xchg(&mchchk_wng_posted, 1) == 0)
84                         kill_cad_pid(SIGPWR, 1);
85         }
86         if (mcck.etr_queue)
87                 etr_queue_work();
88         if (mcck.stp_queue)
89                 stp_queue_work();
90         if (mcck.kill_task) {
91                 local_irq_enable();
92                 printk(KERN_EMERG "mcck: Terminating task because of machine "
93                        "malfunction (code 0x%016llx).\n", mcck.mcck_code);
94                 printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
95                        current->comm, current->pid);
96                 do_exit(SIGSEGV);
97         }
98 }
99 EXPORT_SYMBOL_GPL(s390_handle_mcck);
100
101 /*
102  * returns 0 if all registers could be validated
103  * returns 1 otherwise
104  */
105 static int notrace s390_revalidate_registers(struct mci *mci)
106 {
107         int kill_task;
108         u64 zero;
109         void *fpt_save_area, *fpt_creg_save_area;
110
111         kill_task = 0;
112         zero = 0;
113
114         if (!mci->gr) {
115                 /*
116                  * General purpose registers couldn't be restored and have
117                  * unknown contents. Process needs to be terminated.
118                  */
119                 kill_task = 1;
120         }
121         if (!mci->fp) {
122                 /*
123                  * Floating point registers can't be restored and
124                  * therefore the process needs to be terminated.
125                  */
126                 kill_task = 1;
127         }
128         fpt_save_area = &S390_lowcore.floating_pt_save_area;
129         fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area;
130         if (!mci->fc) {
131                 /*
132                  * Floating point control register can't be restored.
133                  * Task will be terminated.
134                  */
135                 asm volatile("lfpc 0(%0)" : : "a" (&zero), "m" (zero));
136                 kill_task = 1;
137         } else
138                 asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
139
140         if (!MACHINE_HAS_VX) {
141                 /* Revalidate floating point registers */
142                 asm volatile(
143                         "       ld      0,0(%0)\n"
144                         "       ld      1,8(%0)\n"
145                         "       ld      2,16(%0)\n"
146                         "       ld      3,24(%0)\n"
147                         "       ld      4,32(%0)\n"
148                         "       ld      5,40(%0)\n"
149                         "       ld      6,48(%0)\n"
150                         "       ld      7,56(%0)\n"
151                         "       ld      8,64(%0)\n"
152                         "       ld      9,72(%0)\n"
153                         "       ld      10,80(%0)\n"
154                         "       ld      11,88(%0)\n"
155                         "       ld      12,96(%0)\n"
156                         "       ld      13,104(%0)\n"
157                         "       ld      14,112(%0)\n"
158                         "       ld      15,120(%0)\n"
159                         : : "a" (fpt_save_area));
160         } else {
161                 /* Revalidate vector registers */
162                 union ctlreg0 cr0;
163
164                 if (!mci->vr) {
165                         /*
166                          * Vector registers can't be restored and therefore
167                          * the process needs to be terminated.
168                          */
169                         kill_task = 1;
170                 }
171                 cr0.val = S390_lowcore.cregs_save_area[0];
172                 cr0.afp = cr0.vx = 1;
173                 __ctl_load(cr0.val, 0, 0);
174                 asm volatile(
175                         "       la      1,%0\n"
176                         "       .word   0xe70f,0x1000,0x0036\n" /* vlm 0,15,0(1) */
177                         "       .word   0xe70f,0x1100,0x0c36\n" /* vlm 16,31,256(1) */
178                         : : "Q" (*(struct vx_array *)
179                                  &S390_lowcore.vector_save_area) : "1");
180                 __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
181         }
182         /* Revalidate access registers */
183         asm volatile(
184                 "       lam     0,15,0(%0)"
185                 : : "a" (&S390_lowcore.access_regs_save_area));
186         if (!mci->ar) {
187                 /*
188                  * Access registers have unknown contents.
189                  * Terminating task.
190                  */
191                 kill_task = 1;
192         }
193         /* Revalidate control registers */
194         if (!mci->cr) {
195                 /*
196                  * Control registers have unknown contents.
197                  * Can't recover and therefore stopping machine.
198                  */
199                 s390_handle_damage("invalid control registers.");
200         } else {
201                 asm volatile(
202                         "       lctlg   0,15,0(%0)"
203                         : : "a" (&S390_lowcore.cregs_save_area));
204         }
205         /*
206          * We don't even try to revalidate the TOD register, since we simply
207          * can't write something sensible into that register.
208          */
209         /*
210          * See if we can revalidate the TOD programmable register with its
211          * old contents (should be zero) otherwise set it to zero.
212          */
213         if (!mci->pr)
214                 asm volatile(
215                         "       sr      0,0\n"
216                         "       sckpf"
217                         : : : "0", "cc");
218         else
219                 asm volatile(
220                         "       l       0,0(%0)\n"
221                         "       sckpf"
222                         : : "a" (&S390_lowcore.tod_progreg_save_area)
223                         : "0", "cc");
224         /* Revalidate clock comparator register */
225         set_clock_comparator(S390_lowcore.clock_comparator);
226         /* Check if old PSW is valid */
227         if (!mci->wp)
228                 /*
229                  * Can't tell if we come from user or kernel mode
230                  * -> stopping machine.
231                  */
232                 s390_handle_damage("old psw invalid.");
233
234         if (!mci->ms || !mci->pm || !mci->ia)
235                 kill_task = 1;
236
237         return kill_task;
238 }
239
240 #define MAX_IPD_COUNT   29
241 #define MAX_IPD_TIME    (5 * 60 * USEC_PER_SEC) /* 5 minutes */
242
243 #define ED_STP_ISLAND   6       /* External damage STP island check */
244 #define ED_STP_SYNC     7       /* External damage STP sync check */
245 #define ED_ETR_SYNC     12      /* External damage ETR sync check */
246 #define ED_ETR_SWITCH   13      /* External damage ETR switch to local */
247
248 /*
249  * machine check handler.
250  */
251 void notrace s390_do_machine_check(struct pt_regs *regs)
252 {
253         static int ipd_count;
254         static DEFINE_SPINLOCK(ipd_lock);
255         static unsigned long long last_ipd;
256         struct mcck_struct *mcck;
257         unsigned long long tmp;
258         struct mci *mci;
259         int umode;
260
261         nmi_enter();
262         inc_irq_stat(NMI_NMI);
263         mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
264         mcck = this_cpu_ptr(&cpu_mcck);
265         umode = user_mode(regs);
266
267         if (mci->sd) {
268                 /* System damage -> stopping machine */
269                 s390_handle_damage("received system damage machine check.");
270         }
271         if (mci->pd) {
272                 if (mci->b) {
273                         /* Processing backup -> verify if we can survive this */
274                         u64 z_mcic, o_mcic, t_mcic;
275                         z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
276                         o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
277                                   1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
278                                   1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
279                                   1ULL<<16);
280                         t_mcic = *(u64 *)mci;
281
282                         if (((t_mcic & z_mcic) != 0) ||
283                             ((t_mcic & o_mcic) != o_mcic)) {
284                                 s390_handle_damage("processing backup machine "
285                                                    "check with damage.");
286                         }
287
288                         /*
289                          * Nullifying exigent condition, therefore we might
290                          * retry this instruction.
291                          */
292                         spin_lock(&ipd_lock);
293                         tmp = get_tod_clock();
294                         if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
295                                 ipd_count++;
296                         else
297                                 ipd_count = 1;
298                         last_ipd = tmp;
299                         if (ipd_count == MAX_IPD_COUNT)
300                                 s390_handle_damage("too many ipd retries.");
301                         spin_unlock(&ipd_lock);
302                 } else {
303                         /* Processing damage -> stopping machine */
304                         s390_handle_damage("received instruction processing "
305                                            "damage machine check.");
306                 }
307         }
308         if (s390_revalidate_registers(mci)) {
309                 if (umode) {
310                         /*
311                          * Couldn't restore all register contents while in
312                          * user mode -> mark task for termination.
313                          */
314                         mcck->kill_task = 1;
315                         mcck->mcck_code = *(unsigned long long *) mci;
316                         set_cpu_flag(CIF_MCCK_PENDING);
317                 } else {
318                         /*
319                          * Couldn't restore all register contents while in
320                          * kernel mode -> stopping machine.
321                          */
322                         s390_handle_damage("unable to revalidate registers.");
323                 }
324         }
325         if (mci->cd) {
326                 /* Timing facility damage */
327                 s390_handle_damage("TOD clock damaged");
328         }
329         if (mci->ed && mci->ec) {
330                 /* External damage */
331                 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SYNC))
332                         mcck->etr_queue |= etr_sync_check();
333                 if (S390_lowcore.external_damage_code & (1U << ED_ETR_SWITCH))
334                         mcck->etr_queue |= etr_switch_to_local();
335                 if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
336                         mcck->stp_queue |= stp_sync_check();
337                 if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
338                         mcck->stp_queue |= stp_island_check();
339                 if (mcck->etr_queue || mcck->stp_queue)
340                         set_cpu_flag(CIF_MCCK_PENDING);
341         }
342         if (mci->se)
343                 /* Storage error uncorrected */
344                 s390_handle_damage("received storage error uncorrected "
345                                    "machine check.");
346         if (mci->ke)
347                 /* Storage key-error uncorrected */
348                 s390_handle_damage("received storage key-error uncorrected "
349                                    "machine check.");
350         if (mci->ds && mci->fa)
351                 /* Storage degradation */
352                 s390_handle_damage("received storage degradation machine "
353                                    "check.");
354         if (mci->cp) {
355                 /* Channel report word pending */
356                 mcck->channel_report = 1;
357                 set_cpu_flag(CIF_MCCK_PENDING);
358         }
359         if (mci->w) {
360                 /* Warning pending */
361                 mcck->warning = 1;
362                 set_cpu_flag(CIF_MCCK_PENDING);
363         }
364         nmi_exit();
365 }
366
367 static int __init machine_check_init(void)
368 {
369         ctl_set_bit(14, 25);    /* enable external damage MCH */
370         ctl_set_bit(14, 27);    /* enable system recovery MCH */
371         ctl_set_bit(14, 24);    /* enable warning MCH */
372         return 0;
373 }
374 early_initcall(machine_check_init);