Merge remote-tracking branch 'lsk/v3.10/topic/gator' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / drivers / char / ipmi / ipmi_si_intf.c
1 /*
2  * ipmi_si.c
3  *
4  * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5  * BT).
6  *
7  * Author: MontaVista Software, Inc.
8  *         Corey Minyard <minyard@mvista.com>
9  *         source@mvista.com
10  *
11  * Copyright 2002 MontaVista Software Inc.
12  * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
13  *
14  *  This program is free software; you can redistribute it and/or modify it
15  *  under the terms of the GNU General Public License as published by the
16  *  Free Software Foundation; either version 2 of the License, or (at your
17  *  option) any later version.
18  *
19  *
20  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  *  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25  *  BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26  *  OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27  *  ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28  *  TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29  *  USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  *  You should have received a copy of the GNU General Public License along
32  *  with this program; if not, write to the Free Software Foundation, Inc.,
33  *  675 Mass Ave, Cambridge, MA 02139, USA.
34  */
35
36 /*
37  * This file holds the "policy" for the interface to the SMI state
38  * machine.  It does the configuration, handles timers and interrupts,
39  * and drives the real SMI state machine.
40  */
41
42 #include <linux/module.h>
43 #include <linux/moduleparam.h>
44 #include <linux/sched.h>
45 #include <linux/seq_file.h>
46 #include <linux/timer.h>
47 #include <linux/errno.h>
48 #include <linux/spinlock.h>
49 #include <linux/slab.h>
50 #include <linux/delay.h>
51 #include <linux/list.h>
52 #include <linux/pci.h>
53 #include <linux/ioport.h>
54 #include <linux/notifier.h>
55 #include <linux/mutex.h>
56 #include <linux/kthread.h>
57 #include <asm/irq.h>
58 #include <linux/interrupt.h>
59 #include <linux/rcupdate.h>
60 #include <linux/ipmi.h>
61 #include <linux/ipmi_smi.h>
62 #include <asm/io.h>
63 #include "ipmi_si_sm.h"
64 #include <linux/init.h>
65 #include <linux/dmi.h>
66 #include <linux/string.h>
67 #include <linux/ctype.h>
68 #include <linux/pnp.h>
69 #include <linux/of_device.h>
70 #include <linux/of_platform.h>
71 #include <linux/of_address.h>
72 #include <linux/of_irq.h>
73
74 #define PFX "ipmi_si: "
75
76 /* Measure times between events in the driver. */
77 #undef DEBUG_TIMING
78
79 /* Call every 10 ms. */
80 #define SI_TIMEOUT_TIME_USEC    10000
81 #define SI_USEC_PER_JIFFY       (1000000/HZ)
82 #define SI_TIMEOUT_JIFFIES      (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
83 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
84                                       short timeout */
85
86 enum si_intf_state {
87         SI_NORMAL,
88         SI_GETTING_FLAGS,
89         SI_GETTING_EVENTS,
90         SI_CLEARING_FLAGS,
91         SI_CLEARING_FLAGS_THEN_SET_IRQ,
92         SI_GETTING_MESSAGES,
93         SI_ENABLE_INTERRUPTS1,
94         SI_ENABLE_INTERRUPTS2,
95         SI_DISABLE_INTERRUPTS1,
96         SI_DISABLE_INTERRUPTS2
97         /* FIXME - add watchdog stuff. */
98 };
99
100 /* Some BT-specific defines we need here. */
101 #define IPMI_BT_INTMASK_REG             2
102 #define IPMI_BT_INTMASK_CLEAR_IRQ_BIT   2
103 #define IPMI_BT_INTMASK_ENABLE_IRQ_BIT  1
104
105 enum si_type {
106     SI_KCS, SI_SMIC, SI_BT
107 };
108 static char *si_to_str[] = { "kcs", "smic", "bt" };
109
110 static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
111                                         "ACPI", "SMBIOS", "PCI",
112                                         "device-tree", "default" };
113
114 #define DEVICE_NAME "ipmi_si"
115
116 static struct platform_driver ipmi_driver;
117
118 /*
119  * Indexes into stats[] in smi_info below.
120  */
121 enum si_stat_indexes {
122         /*
123          * Number of times the driver requested a timer while an operation
124          * was in progress.
125          */
126         SI_STAT_short_timeouts = 0,
127
128         /*
129          * Number of times the driver requested a timer while nothing was in
130          * progress.
131          */
132         SI_STAT_long_timeouts,
133
134         /* Number of times the interface was idle while being polled. */
135         SI_STAT_idles,
136
137         /* Number of interrupts the driver handled. */
138         SI_STAT_interrupts,
139
140         /* Number of time the driver got an ATTN from the hardware. */
141         SI_STAT_attentions,
142
143         /* Number of times the driver requested flags from the hardware. */
144         SI_STAT_flag_fetches,
145
146         /* Number of times the hardware didn't follow the state machine. */
147         SI_STAT_hosed_count,
148
149         /* Number of completed messages. */
150         SI_STAT_complete_transactions,
151
152         /* Number of IPMI events received from the hardware. */
153         SI_STAT_events,
154
155         /* Number of watchdog pretimeouts. */
156         SI_STAT_watchdog_pretimeouts,
157
158         /* Number of asynchronous messages received. */
159         SI_STAT_incoming_messages,
160
161
162         /* This *must* remain last, add new values above this. */
163         SI_NUM_STATS
164 };
165
166 struct smi_info {
167         int                    intf_num;
168         ipmi_smi_t             intf;
169         struct si_sm_data      *si_sm;
170         struct si_sm_handlers  *handlers;
171         enum si_type           si_type;
172         spinlock_t             si_lock;
173         struct list_head       xmit_msgs;
174         struct list_head       hp_xmit_msgs;
175         struct ipmi_smi_msg    *curr_msg;
176         enum si_intf_state     si_state;
177
178         /*
179          * Used to handle the various types of I/O that can occur with
180          * IPMI
181          */
182         struct si_sm_io io;
183         int (*io_setup)(struct smi_info *info);
184         void (*io_cleanup)(struct smi_info *info);
185         int (*irq_setup)(struct smi_info *info);
186         void (*irq_cleanup)(struct smi_info *info);
187         unsigned int io_size;
188         enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
189         void (*addr_source_cleanup)(struct smi_info *info);
190         void *addr_source_data;
191
192         /*
193          * Per-OEM handler, called from handle_flags().  Returns 1
194          * when handle_flags() needs to be re-run or 0 indicating it
195          * set si_state itself.
196          */
197         int (*oem_data_avail_handler)(struct smi_info *smi_info);
198
199         /*
200          * Flags from the last GET_MSG_FLAGS command, used when an ATTN
201          * is set to hold the flags until we are done handling everything
202          * from the flags.
203          */
204 #define RECEIVE_MSG_AVAIL       0x01
205 #define EVENT_MSG_BUFFER_FULL   0x02
206 #define WDT_PRE_TIMEOUT_INT     0x08
207 #define OEM0_DATA_AVAIL     0x20
208 #define OEM1_DATA_AVAIL     0x40
209 #define OEM2_DATA_AVAIL     0x80
210 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
211                              OEM1_DATA_AVAIL | \
212                              OEM2_DATA_AVAIL)
213         unsigned char       msg_flags;
214
215         /* Does the BMC have an event buffer? */
216         char                has_event_buffer;
217
218         /*
219          * If set to true, this will request events the next time the
220          * state machine is idle.
221          */
222         atomic_t            req_events;
223
224         /*
225          * If true, run the state machine to completion on every send
226          * call.  Generally used after a panic to make sure stuff goes
227          * out.
228          */
229         int                 run_to_completion;
230
231         /* The I/O port of an SI interface. */
232         int                 port;
233
234         /*
235          * The space between start addresses of the two ports.  For
236          * instance, if the first port is 0xca2 and the spacing is 4, then
237          * the second port is 0xca6.
238          */
239         unsigned int        spacing;
240
241         /* zero if no irq; */
242         int                 irq;
243
244         /* The timer for this si. */
245         struct timer_list   si_timer;
246
247         /* This flag is set, if the timer is running (timer_pending() isn't enough) */
248         bool                timer_running;
249
250         /* The time (in jiffies) the last timeout occurred at. */
251         unsigned long       last_timeout_jiffies;
252
253         /* Used to gracefully stop the timer without race conditions. */
254         atomic_t            stop_operation;
255
256         /*
257          * The driver will disable interrupts when it gets into a
258          * situation where it cannot handle messages due to lack of
259          * memory.  Once that situation clears up, it will re-enable
260          * interrupts.
261          */
262         int interrupt_disabled;
263
264         /* From the get device id response... */
265         struct ipmi_device_id device_id;
266
267         /* Driver model stuff. */
268         struct device *dev;
269         struct platform_device *pdev;
270
271         /*
272          * True if we allocated the device, false if it came from
273          * someplace else (like PCI).
274          */
275         int dev_registered;
276
277         /* Slave address, could be reported from DMI. */
278         unsigned char slave_addr;
279
280         /* Counters and things for the proc filesystem. */
281         atomic_t stats[SI_NUM_STATS];
282
283         struct task_struct *thread;
284
285         struct list_head link;
286         union ipmi_smi_info_union addr_info;
287 };
288
289 #define smi_inc_stat(smi, stat) \
290         atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
291 #define smi_get_stat(smi, stat) \
292         ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
293
294 #define SI_MAX_PARMS 4
295
296 static int force_kipmid[SI_MAX_PARMS];
297 static int num_force_kipmid;
298 #ifdef CONFIG_PCI
299 static int pci_registered;
300 #endif
301 #ifdef CONFIG_ACPI
302 static int pnp_registered;
303 #endif
304
305 static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
306 static int num_max_busy_us;
307
308 static int unload_when_empty = 1;
309
310 static int add_smi(struct smi_info *smi);
311 static int try_smi_init(struct smi_info *smi);
312 static void cleanup_one_si(struct smi_info *to_clean);
313 static void cleanup_ipmi_si(void);
314
315 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
316 static int register_xaction_notifier(struct notifier_block *nb)
317 {
318         return atomic_notifier_chain_register(&xaction_notifier_list, nb);
319 }
320
321 static void deliver_recv_msg(struct smi_info *smi_info,
322                              struct ipmi_smi_msg *msg)
323 {
324         /* Deliver the message to the upper layer. */
325         ipmi_smi_msg_received(smi_info->intf, msg);
326 }
327
328 static void return_hosed_msg(struct smi_info *smi_info, int cCode)
329 {
330         struct ipmi_smi_msg *msg = smi_info->curr_msg;
331
332         if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
333                 cCode = IPMI_ERR_UNSPECIFIED;
334         /* else use it as is */
335
336         /* Make it a response */
337         msg->rsp[0] = msg->data[0] | 4;
338         msg->rsp[1] = msg->data[1];
339         msg->rsp[2] = cCode;
340         msg->rsp_size = 3;
341
342         smi_info->curr_msg = NULL;
343         deliver_recv_msg(smi_info, msg);
344 }
345
346 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
347 {
348         int              rv;
349         struct list_head *entry = NULL;
350 #ifdef DEBUG_TIMING
351         struct timeval t;
352 #endif
353
354         /* Pick the high priority queue first. */
355         if (!list_empty(&(smi_info->hp_xmit_msgs))) {
356                 entry = smi_info->hp_xmit_msgs.next;
357         } else if (!list_empty(&(smi_info->xmit_msgs))) {
358                 entry = smi_info->xmit_msgs.next;
359         }
360
361         if (!entry) {
362                 smi_info->curr_msg = NULL;
363                 rv = SI_SM_IDLE;
364         } else {
365                 int err;
366
367                 list_del(entry);
368                 smi_info->curr_msg = list_entry(entry,
369                                                 struct ipmi_smi_msg,
370                                                 link);
371 #ifdef DEBUG_TIMING
372                 do_gettimeofday(&t);
373                 printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
374 #endif
375                 err = atomic_notifier_call_chain(&xaction_notifier_list,
376                                 0, smi_info);
377                 if (err & NOTIFY_STOP_MASK) {
378                         rv = SI_SM_CALL_WITHOUT_DELAY;
379                         goto out;
380                 }
381                 err = smi_info->handlers->start_transaction(
382                         smi_info->si_sm,
383                         smi_info->curr_msg->data,
384                         smi_info->curr_msg->data_size);
385                 if (err)
386                         return_hosed_msg(smi_info, err);
387
388                 rv = SI_SM_CALL_WITHOUT_DELAY;
389         }
390  out:
391         return rv;
392 }
393
394 static void start_enable_irq(struct smi_info *smi_info)
395 {
396         unsigned char msg[2];
397
398         /*
399          * If we are enabling interrupts, we have to tell the
400          * BMC to use them.
401          */
402         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
403         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
404
405         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
406         smi_info->si_state = SI_ENABLE_INTERRUPTS1;
407 }
408
409 static void start_disable_irq(struct smi_info *smi_info)
410 {
411         unsigned char msg[2];
412
413         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
414         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
415
416         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
417         smi_info->si_state = SI_DISABLE_INTERRUPTS1;
418 }
419
420 static void start_clear_flags(struct smi_info *smi_info)
421 {
422         unsigned char msg[3];
423
424         /* Make sure the watchdog pre-timeout flag is not set at startup. */
425         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
426         msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
427         msg[2] = WDT_PRE_TIMEOUT_INT;
428
429         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
430         smi_info->si_state = SI_CLEARING_FLAGS;
431 }
432
433 static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
434 {
435         smi_info->last_timeout_jiffies = jiffies;
436         mod_timer(&smi_info->si_timer, new_val);
437         smi_info->timer_running = true;
438 }
439
440 /*
441  * When we have a situtaion where we run out of memory and cannot
442  * allocate messages, we just leave them in the BMC and run the system
443  * polled until we can allocate some memory.  Once we have some
444  * memory, we will re-enable the interrupt.
445  */
446 static inline void disable_si_irq(struct smi_info *smi_info)
447 {
448         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
449                 start_disable_irq(smi_info);
450                 smi_info->interrupt_disabled = 1;
451                 if (!atomic_read(&smi_info->stop_operation))
452                         smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
453         }
454 }
455
456 static inline void enable_si_irq(struct smi_info *smi_info)
457 {
458         if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
459                 start_enable_irq(smi_info);
460                 smi_info->interrupt_disabled = 0;
461         }
462 }
463
464 static void handle_flags(struct smi_info *smi_info)
465 {
466  retry:
467         if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
468                 /* Watchdog pre-timeout */
469                 smi_inc_stat(smi_info, watchdog_pretimeouts);
470
471                 start_clear_flags(smi_info);
472                 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
473                 ipmi_smi_watchdog_pretimeout(smi_info->intf);
474         } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
475                 /* Messages available. */
476                 smi_info->curr_msg = ipmi_alloc_smi_msg();
477                 if (!smi_info->curr_msg) {
478                         disable_si_irq(smi_info);
479                         smi_info->si_state = SI_NORMAL;
480                         return;
481                 }
482                 enable_si_irq(smi_info);
483
484                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
485                 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
486                 smi_info->curr_msg->data_size = 2;
487
488                 smi_info->handlers->start_transaction(
489                         smi_info->si_sm,
490                         smi_info->curr_msg->data,
491                         smi_info->curr_msg->data_size);
492                 smi_info->si_state = SI_GETTING_MESSAGES;
493         } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
494                 /* Events available. */
495                 smi_info->curr_msg = ipmi_alloc_smi_msg();
496                 if (!smi_info->curr_msg) {
497                         disable_si_irq(smi_info);
498                         smi_info->si_state = SI_NORMAL;
499                         return;
500                 }
501                 enable_si_irq(smi_info);
502
503                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
504                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
505                 smi_info->curr_msg->data_size = 2;
506
507                 smi_info->handlers->start_transaction(
508                         smi_info->si_sm,
509                         smi_info->curr_msg->data,
510                         smi_info->curr_msg->data_size);
511                 smi_info->si_state = SI_GETTING_EVENTS;
512         } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
513                    smi_info->oem_data_avail_handler) {
514                 if (smi_info->oem_data_avail_handler(smi_info))
515                         goto retry;
516         } else
517                 smi_info->si_state = SI_NORMAL;
518 }
519
520 static void handle_transaction_done(struct smi_info *smi_info)
521 {
522         struct ipmi_smi_msg *msg;
523 #ifdef DEBUG_TIMING
524         struct timeval t;
525
526         do_gettimeofday(&t);
527         printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
528 #endif
529         switch (smi_info->si_state) {
530         case SI_NORMAL:
531                 if (!smi_info->curr_msg)
532                         break;
533
534                 smi_info->curr_msg->rsp_size
535                         = smi_info->handlers->get_result(
536                                 smi_info->si_sm,
537                                 smi_info->curr_msg->rsp,
538                                 IPMI_MAX_MSG_LENGTH);
539
540                 /*
541                  * Do this here becase deliver_recv_msg() releases the
542                  * lock, and a new message can be put in during the
543                  * time the lock is released.
544                  */
545                 msg = smi_info->curr_msg;
546                 smi_info->curr_msg = NULL;
547                 deliver_recv_msg(smi_info, msg);
548                 break;
549
550         case SI_GETTING_FLAGS:
551         {
552                 unsigned char msg[4];
553                 unsigned int  len;
554
555                 /* We got the flags from the SMI, now handle them. */
556                 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
557                 if (msg[2] != 0) {
558                         /* Error fetching flags, just give up for now. */
559                         smi_info->si_state = SI_NORMAL;
560                 } else if (len < 4) {
561                         /*
562                          * Hmm, no flags.  That's technically illegal, but
563                          * don't use uninitialized data.
564                          */
565                         smi_info->si_state = SI_NORMAL;
566                 } else {
567                         smi_info->msg_flags = msg[3];
568                         handle_flags(smi_info);
569                 }
570                 break;
571         }
572
573         case SI_CLEARING_FLAGS:
574         case SI_CLEARING_FLAGS_THEN_SET_IRQ:
575         {
576                 unsigned char msg[3];
577
578                 /* We cleared the flags. */
579                 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
580                 if (msg[2] != 0) {
581                         /* Error clearing flags */
582                         dev_warn(smi_info->dev,
583                                  "Error clearing flags: %2.2x\n", msg[2]);
584                 }
585                 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
586                         start_enable_irq(smi_info);
587                 else
588                         smi_info->si_state = SI_NORMAL;
589                 break;
590         }
591
592         case SI_GETTING_EVENTS:
593         {
594                 smi_info->curr_msg->rsp_size
595                         = smi_info->handlers->get_result(
596                                 smi_info->si_sm,
597                                 smi_info->curr_msg->rsp,
598                                 IPMI_MAX_MSG_LENGTH);
599
600                 /*
601                  * Do this here becase deliver_recv_msg() releases the
602                  * lock, and a new message can be put in during the
603                  * time the lock is released.
604                  */
605                 msg = smi_info->curr_msg;
606                 smi_info->curr_msg = NULL;
607                 if (msg->rsp[2] != 0) {
608                         /* Error getting event, probably done. */
609                         msg->done(msg);
610
611                         /* Take off the event flag. */
612                         smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
613                         handle_flags(smi_info);
614                 } else {
615                         smi_inc_stat(smi_info, events);
616
617                         /*
618                          * Do this before we deliver the message
619                          * because delivering the message releases the
620                          * lock and something else can mess with the
621                          * state.
622                          */
623                         handle_flags(smi_info);
624
625                         deliver_recv_msg(smi_info, msg);
626                 }
627                 break;
628         }
629
630         case SI_GETTING_MESSAGES:
631         {
632                 smi_info->curr_msg->rsp_size
633                         = smi_info->handlers->get_result(
634                                 smi_info->si_sm,
635                                 smi_info->curr_msg->rsp,
636                                 IPMI_MAX_MSG_LENGTH);
637
638                 /*
639                  * Do this here becase deliver_recv_msg() releases the
640                  * lock, and a new message can be put in during the
641                  * time the lock is released.
642                  */
643                 msg = smi_info->curr_msg;
644                 smi_info->curr_msg = NULL;
645                 if (msg->rsp[2] != 0) {
646                         /* Error getting event, probably done. */
647                         msg->done(msg);
648
649                         /* Take off the msg flag. */
650                         smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
651                         handle_flags(smi_info);
652                 } else {
653                         smi_inc_stat(smi_info, incoming_messages);
654
655                         /*
656                          * Do this before we deliver the message
657                          * because delivering the message releases the
658                          * lock and something else can mess with the
659                          * state.
660                          */
661                         handle_flags(smi_info);
662
663                         deliver_recv_msg(smi_info, msg);
664                 }
665                 break;
666         }
667
668         case SI_ENABLE_INTERRUPTS1:
669         {
670                 unsigned char msg[4];
671
672                 /* We got the flags from the SMI, now handle them. */
673                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
674                 if (msg[2] != 0) {
675                         dev_warn(smi_info->dev,
676                                  "Couldn't get irq info: %x.\n", msg[2]);
677                         dev_warn(smi_info->dev,
678                                  "Maybe ok, but ipmi might run very slowly.\n");
679                         smi_info->si_state = SI_NORMAL;
680                 } else {
681                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
682                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
683                         msg[2] = (msg[3] |
684                                   IPMI_BMC_RCV_MSG_INTR |
685                                   IPMI_BMC_EVT_MSG_INTR);
686                         smi_info->handlers->start_transaction(
687                                 smi_info->si_sm, msg, 3);
688                         smi_info->si_state = SI_ENABLE_INTERRUPTS2;
689                 }
690                 break;
691         }
692
693         case SI_ENABLE_INTERRUPTS2:
694         {
695                 unsigned char msg[4];
696
697                 /* We got the flags from the SMI, now handle them. */
698                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
699                 if (msg[2] != 0) {
700                         dev_warn(smi_info->dev,
701                                  "Couldn't set irq info: %x.\n", msg[2]);
702                         dev_warn(smi_info->dev,
703                                  "Maybe ok, but ipmi might run very slowly.\n");
704                 } else
705                         smi_info->interrupt_disabled = 0;
706                 smi_info->si_state = SI_NORMAL;
707                 break;
708         }
709
710         case SI_DISABLE_INTERRUPTS1:
711         {
712                 unsigned char msg[4];
713
714                 /* We got the flags from the SMI, now handle them. */
715                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
716                 if (msg[2] != 0) {
717                         dev_warn(smi_info->dev, "Could not disable interrupts"
718                                  ", failed get.\n");
719                         smi_info->si_state = SI_NORMAL;
720                 } else {
721                         msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
722                         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
723                         msg[2] = (msg[3] &
724                                   ~(IPMI_BMC_RCV_MSG_INTR |
725                                     IPMI_BMC_EVT_MSG_INTR));
726                         smi_info->handlers->start_transaction(
727                                 smi_info->si_sm, msg, 3);
728                         smi_info->si_state = SI_DISABLE_INTERRUPTS2;
729                 }
730                 break;
731         }
732
733         case SI_DISABLE_INTERRUPTS2:
734         {
735                 unsigned char msg[4];
736
737                 /* We got the flags from the SMI, now handle them. */
738                 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
739                 if (msg[2] != 0) {
740                         dev_warn(smi_info->dev, "Could not disable interrupts"
741                                  ", failed set.\n");
742                 }
743                 smi_info->si_state = SI_NORMAL;
744                 break;
745         }
746         }
747 }
748
749 /*
750  * Called on timeouts and events.  Timeouts should pass the elapsed
751  * time, interrupts should pass in zero.  Must be called with
752  * si_lock held and interrupts disabled.
753  */
754 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
755                                            int time)
756 {
757         enum si_sm_result si_sm_result;
758
759  restart:
760         /*
761          * There used to be a loop here that waited a little while
762          * (around 25us) before giving up.  That turned out to be
763          * pointless, the minimum delays I was seeing were in the 300us
764          * range, which is far too long to wait in an interrupt.  So
765          * we just run until the state machine tells us something
766          * happened or it needs a delay.
767          */
768         si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
769         time = 0;
770         while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
771                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
772
773         if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
774                 smi_inc_stat(smi_info, complete_transactions);
775
776                 handle_transaction_done(smi_info);
777                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
778         } else if (si_sm_result == SI_SM_HOSED) {
779                 smi_inc_stat(smi_info, hosed_count);
780
781                 /*
782                  * Do the before return_hosed_msg, because that
783                  * releases the lock.
784                  */
785                 smi_info->si_state = SI_NORMAL;
786                 if (smi_info->curr_msg != NULL) {
787                         /*
788                          * If we were handling a user message, format
789                          * a response to send to the upper layer to
790                          * tell it about the error.
791                          */
792                         return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
793                 }
794                 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
795         }
796
797         /*
798          * We prefer handling attn over new messages.  But don't do
799          * this if there is not yet an upper layer to handle anything.
800          */
801         if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
802                 unsigned char msg[2];
803
804                 smi_inc_stat(smi_info, attentions);
805
806                 /*
807                  * Got a attn, send down a get message flags to see
808                  * what's causing it.  It would be better to handle
809                  * this in the upper layer, but due to the way
810                  * interrupts work with the SMI, that's not really
811                  * possible.
812                  */
813                 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
814                 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
815
816                 smi_info->handlers->start_transaction(
817                         smi_info->si_sm, msg, 2);
818                 smi_info->si_state = SI_GETTING_FLAGS;
819                 goto restart;
820         }
821
822         /* If we are currently idle, try to start the next message. */
823         if (si_sm_result == SI_SM_IDLE) {
824                 smi_inc_stat(smi_info, idles);
825
826                 si_sm_result = start_next_msg(smi_info);
827                 if (si_sm_result != SI_SM_IDLE)
828                         goto restart;
829         }
830
831         if ((si_sm_result == SI_SM_IDLE)
832             && (atomic_read(&smi_info->req_events))) {
833                 /*
834                  * We are idle and the upper layer requested that I fetch
835                  * events, so do so.
836                  */
837                 atomic_set(&smi_info->req_events, 0);
838
839                 smi_info->curr_msg = ipmi_alloc_smi_msg();
840                 if (!smi_info->curr_msg)
841                         goto out;
842
843                 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
844                 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
845                 smi_info->curr_msg->data_size = 2;
846
847                 smi_info->handlers->start_transaction(
848                         smi_info->si_sm,
849                         smi_info->curr_msg->data,
850                         smi_info->curr_msg->data_size);
851                 smi_info->si_state = SI_GETTING_EVENTS;
852                 goto restart;
853         }
854  out:
855         return si_sm_result;
856 }
857
858 static void sender(void                *send_info,
859                    struct ipmi_smi_msg *msg,
860                    int                 priority)
861 {
862         struct smi_info   *smi_info = send_info;
863         enum si_sm_result result;
864         unsigned long     flags;
865 #ifdef DEBUG_TIMING
866         struct timeval    t;
867 #endif
868
869         if (atomic_read(&smi_info->stop_operation)) {
870                 msg->rsp[0] = msg->data[0] | 4;
871                 msg->rsp[1] = msg->data[1];
872                 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
873                 msg->rsp_size = 3;
874                 deliver_recv_msg(smi_info, msg);
875                 return;
876         }
877
878 #ifdef DEBUG_TIMING
879         do_gettimeofday(&t);
880         printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
881 #endif
882
883         if (smi_info->run_to_completion) {
884                 /*
885                  * If we are running to completion, then throw it in
886                  * the list and run transactions until everything is
887                  * clear.  Priority doesn't matter here.
888                  */
889
890                 /*
891                  * Run to completion means we are single-threaded, no
892                  * need for locks.
893                  */
894                 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
895
896                 result = smi_event_handler(smi_info, 0);
897                 while (result != SI_SM_IDLE) {
898                         udelay(SI_SHORT_TIMEOUT_USEC);
899                         result = smi_event_handler(smi_info,
900                                                    SI_SHORT_TIMEOUT_USEC);
901                 }
902                 return;
903         }
904
905         spin_lock_irqsave(&smi_info->si_lock, flags);
906         if (priority > 0)
907                 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
908         else
909                 list_add_tail(&msg->link, &smi_info->xmit_msgs);
910
911         if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
912                 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
913
914                 if (smi_info->thread)
915                         wake_up_process(smi_info->thread);
916
917                 start_next_msg(smi_info);
918                 smi_event_handler(smi_info, 0);
919         }
920         spin_unlock_irqrestore(&smi_info->si_lock, flags);
921 }
922
923 static void set_run_to_completion(void *send_info, int i_run_to_completion)
924 {
925         struct smi_info   *smi_info = send_info;
926         enum si_sm_result result;
927
928         smi_info->run_to_completion = i_run_to_completion;
929         if (i_run_to_completion) {
930                 result = smi_event_handler(smi_info, 0);
931                 while (result != SI_SM_IDLE) {
932                         udelay(SI_SHORT_TIMEOUT_USEC);
933                         result = smi_event_handler(smi_info,
934                                                    SI_SHORT_TIMEOUT_USEC);
935                 }
936         }
937 }
938
939 /*
940  * Use -1 in the nsec value of the busy waiting timespec to tell that
941  * we are spinning in kipmid looking for something and not delaying
942  * between checks
943  */
944 static inline void ipmi_si_set_not_busy(struct timespec *ts)
945 {
946         ts->tv_nsec = -1;
947 }
948 static inline int ipmi_si_is_busy(struct timespec *ts)
949 {
950         return ts->tv_nsec != -1;
951 }
952
953 static int ipmi_thread_busy_wait(enum si_sm_result smi_result,
954                                  const struct smi_info *smi_info,
955                                  struct timespec *busy_until)
956 {
957         unsigned int max_busy_us = 0;
958
959         if (smi_info->intf_num < num_max_busy_us)
960                 max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
961         if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
962                 ipmi_si_set_not_busy(busy_until);
963         else if (!ipmi_si_is_busy(busy_until)) {
964                 getnstimeofday(busy_until);
965                 timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
966         } else {
967                 struct timespec now;
968                 getnstimeofday(&now);
969                 if (unlikely(timespec_compare(&now, busy_until) > 0)) {
970                         ipmi_si_set_not_busy(busy_until);
971                         return 0;
972                 }
973         }
974         return 1;
975 }
976
977
978 /*
979  * A busy-waiting loop for speeding up IPMI operation.
980  *
981  * Lousy hardware makes this hard.  This is only enabled for systems
982  * that are not BT and do not have interrupts.  It starts spinning
983  * when an operation is complete or until max_busy tells it to stop
984  * (if that is enabled).  See the paragraph on kimid_max_busy_us in
985  * Documentation/IPMI.txt for details.
986  */
987 static int ipmi_thread(void *data)
988 {
989         struct smi_info *smi_info = data;
990         unsigned long flags;
991         enum si_sm_result smi_result;
992         struct timespec busy_until;
993
994         ipmi_si_set_not_busy(&busy_until);
995         set_user_nice(current, 19);
996         while (!kthread_should_stop()) {
997                 int busy_wait;
998
999                 spin_lock_irqsave(&(smi_info->si_lock), flags);
1000                 smi_result = smi_event_handler(smi_info, 0);
1001
1002                 /*
1003                  * If the driver is doing something, there is a possible
1004                  * race with the timer.  If the timer handler see idle,
1005                  * and the thread here sees something else, the timer
1006                  * handler won't restart the timer even though it is
1007                  * required.  So start it here if necessary.
1008                  */
1009                 if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
1010                         smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
1011
1012                 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1013                 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1014                                                   &busy_until);
1015                 if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
1016                         ; /* do nothing */
1017                 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
1018                         schedule();
1019                 else if (smi_result == SI_SM_IDLE)
1020                         schedule_timeout_interruptible(100);
1021                 else
1022                         schedule_timeout_interruptible(1);
1023         }
1024         return 0;
1025 }
1026
1027
1028 static void poll(void *send_info)
1029 {
1030         struct smi_info *smi_info = send_info;
1031         unsigned long flags = 0;
1032         int run_to_completion = smi_info->run_to_completion;
1033
1034         /*
1035          * Make sure there is some delay in the poll loop so we can
1036          * drive time forward and timeout things.
1037          */
1038         udelay(10);
1039         if (!run_to_completion)
1040                 spin_lock_irqsave(&smi_info->si_lock, flags);
1041         smi_event_handler(smi_info, 10);
1042         if (!run_to_completion)
1043                 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1044 }
1045
1046 static void request_events(void *send_info)
1047 {
1048         struct smi_info *smi_info = send_info;
1049
1050         if (atomic_read(&smi_info->stop_operation) ||
1051                                 !smi_info->has_event_buffer)
1052                 return;
1053
1054         atomic_set(&smi_info->req_events, 1);
1055 }
1056
1057 static int initialized;
1058
1059 static void smi_timeout(unsigned long data)
1060 {
1061         struct smi_info   *smi_info = (struct smi_info *) data;
1062         enum si_sm_result smi_result;
1063         unsigned long     flags;
1064         unsigned long     jiffies_now;
1065         long              time_diff;
1066         long              timeout;
1067 #ifdef DEBUG_TIMING
1068         struct timeval    t;
1069 #endif
1070
1071         spin_lock_irqsave(&(smi_info->si_lock), flags);
1072 #ifdef DEBUG_TIMING
1073         do_gettimeofday(&t);
1074         printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1075 #endif
1076         jiffies_now = jiffies;
1077         time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1078                      * SI_USEC_PER_JIFFY);
1079         smi_result = smi_event_handler(smi_info, time_diff);
1080
1081         if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1082                 /* Running with interrupts, only do long timeouts. */
1083                 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1084                 smi_inc_stat(smi_info, long_timeouts);
1085                 goto do_mod_timer;
1086         }
1087
1088         /*
1089          * If the state machine asks for a short delay, then shorten
1090          * the timer timeout.
1091          */
1092         if (smi_result == SI_SM_CALL_WITH_DELAY) {
1093                 smi_inc_stat(smi_info, short_timeouts);
1094                 timeout = jiffies + 1;
1095         } else {
1096                 smi_inc_stat(smi_info, long_timeouts);
1097                 timeout = jiffies + SI_TIMEOUT_JIFFIES;
1098         }
1099
1100  do_mod_timer:
1101         if (smi_result != SI_SM_IDLE)
1102                 smi_mod_timer(smi_info, timeout);
1103         else
1104                 smi_info->timer_running = false;
1105         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1106 }
1107
1108 static irqreturn_t si_irq_handler(int irq, void *data)
1109 {
1110         struct smi_info *smi_info = data;
1111         unsigned long   flags;
1112 #ifdef DEBUG_TIMING
1113         struct timeval  t;
1114 #endif
1115
1116         spin_lock_irqsave(&(smi_info->si_lock), flags);
1117
1118         smi_inc_stat(smi_info, interrupts);
1119
1120 #ifdef DEBUG_TIMING
1121         do_gettimeofday(&t);
1122         printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1123 #endif
1124         smi_event_handler(smi_info, 0);
1125         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1126         return IRQ_HANDLED;
1127 }
1128
1129 static irqreturn_t si_bt_irq_handler(int irq, void *data)
1130 {
1131         struct smi_info *smi_info = data;
1132         /* We need to clear the IRQ flag for the BT interface. */
1133         smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1134                              IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1135                              | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1136         return si_irq_handler(irq, data);
1137 }
1138
1139 static int smi_start_processing(void       *send_info,
1140                                 ipmi_smi_t intf)
1141 {
1142         struct smi_info *new_smi = send_info;
1143         int             enable = 0;
1144
1145         new_smi->intf = intf;
1146
1147         /* Try to claim any interrupts. */
1148         if (new_smi->irq_setup)
1149                 new_smi->irq_setup(new_smi);
1150
1151         /* Set up the timer that drives the interface. */
1152         setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1153         smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1154
1155         /*
1156          * Check if the user forcefully enabled the daemon.
1157          */
1158         if (new_smi->intf_num < num_force_kipmid)
1159                 enable = force_kipmid[new_smi->intf_num];
1160         /*
1161          * The BT interface is efficient enough to not need a thread,
1162          * and there is no need for a thread if we have interrupts.
1163          */
1164         else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1165                 enable = 1;
1166
1167         if (enable) {
1168                 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1169                                               "kipmi%d", new_smi->intf_num);
1170                 if (IS_ERR(new_smi->thread)) {
1171                         dev_notice(new_smi->dev, "Could not start"
1172                                    " kernel thread due to error %ld, only using"
1173                                    " timers to drive the interface\n",
1174                                    PTR_ERR(new_smi->thread));
1175                         new_smi->thread = NULL;
1176                 }
1177         }
1178
1179         return 0;
1180 }
1181
1182 static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1183 {
1184         struct smi_info *smi = send_info;
1185
1186         data->addr_src = smi->addr_source;
1187         data->dev = smi->dev;
1188         data->addr_info = smi->addr_info;
1189         get_device(smi->dev);
1190
1191         return 0;
1192 }
1193
1194 static void set_maintenance_mode(void *send_info, int enable)
1195 {
1196         struct smi_info   *smi_info = send_info;
1197
1198         if (!enable)
1199                 atomic_set(&smi_info->req_events, 0);
1200 }
1201
1202 static struct ipmi_smi_handlers handlers = {
1203         .owner                  = THIS_MODULE,
1204         .start_processing       = smi_start_processing,
1205         .get_smi_info           = get_smi_info,
1206         .sender                 = sender,
1207         .request_events         = request_events,
1208         .set_maintenance_mode   = set_maintenance_mode,
1209         .set_run_to_completion  = set_run_to_completion,
1210         .poll                   = poll,
1211 };
1212
1213 /*
1214  * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1215  * a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS.
1216  */
1217
1218 static LIST_HEAD(smi_infos);
1219 static DEFINE_MUTEX(smi_infos_lock);
1220 static int smi_num; /* Used to sequence the SMIs */
1221
1222 #define DEFAULT_REGSPACING      1
1223 #define DEFAULT_REGSIZE         1
1224
1225 #ifdef CONFIG_ACPI
1226 static bool          si_tryacpi = 1;
1227 #endif
1228 #ifdef CONFIG_DMI
1229 static bool          si_trydmi = 1;
1230 #endif
1231 static bool          si_tryplatform = 1;
1232 #ifdef CONFIG_PCI
1233 static bool          si_trypci = 1;
1234 #endif
1235 static bool          si_trydefaults = 1;
1236 static char          *si_type[SI_MAX_PARMS];
1237 #define MAX_SI_TYPE_STR 30
1238 static char          si_type_str[MAX_SI_TYPE_STR];
1239 static unsigned long addrs[SI_MAX_PARMS];
1240 static unsigned int num_addrs;
1241 static unsigned int  ports[SI_MAX_PARMS];
1242 static unsigned int num_ports;
1243 static int           irqs[SI_MAX_PARMS];
1244 static unsigned int num_irqs;
1245 static int           regspacings[SI_MAX_PARMS];
1246 static unsigned int num_regspacings;
1247 static int           regsizes[SI_MAX_PARMS];
1248 static unsigned int num_regsizes;
1249 static int           regshifts[SI_MAX_PARMS];
1250 static unsigned int num_regshifts;
1251 static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
1252 static unsigned int num_slave_addrs;
1253
1254 #define IPMI_IO_ADDR_SPACE  0
1255 #define IPMI_MEM_ADDR_SPACE 1
1256 static char *addr_space_to_str[] = { "i/o", "mem" };
1257
1258 static int hotmod_handler(const char *val, struct kernel_param *kp);
1259
1260 module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1261 MODULE_PARM_DESC(hotmod, "Add and remove interfaces.  See"
1262                  " Documentation/IPMI.txt in the kernel sources for the"
1263                  " gory details.");
1264
1265 #ifdef CONFIG_ACPI
1266 module_param_named(tryacpi, si_tryacpi, bool, 0);
1267 MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
1268                  " default scan of the interfaces identified via ACPI");
1269 #endif
1270 #ifdef CONFIG_DMI
1271 module_param_named(trydmi, si_trydmi, bool, 0);
1272 MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
1273                  " default scan of the interfaces identified via DMI");
1274 #endif
1275 module_param_named(tryplatform, si_tryplatform, bool, 0);
1276 MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
1277                  " default scan of the interfaces identified via platform"
1278                  " interfaces like openfirmware");
1279 #ifdef CONFIG_PCI
1280 module_param_named(trypci, si_trypci, bool, 0);
1281 MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
1282                  " default scan of the interfaces identified via pci");
1283 #endif
1284 module_param_named(trydefaults, si_trydefaults, bool, 0);
1285 MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1286                  " default scan of the KCS and SMIC interface at the standard"
1287                  " address");
1288 module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1289 MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1290                  " interface separated by commas.  The types are 'kcs',"
1291                  " 'smic', and 'bt'.  For example si_type=kcs,bt will set"
1292                  " the first interface to kcs and the second to bt");
1293 module_param_array(addrs, ulong, &num_addrs, 0);
1294 MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1295                  " addresses separated by commas.  Only use if an interface"
1296                  " is in memory.  Otherwise, set it to zero or leave"
1297                  " it blank.");
1298 module_param_array(ports, uint, &num_ports, 0);
1299 MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1300                  " addresses separated by commas.  Only use if an interface"
1301                  " is a port.  Otherwise, set it to zero or leave"
1302                  " it blank.");
1303 module_param_array(irqs, int, &num_irqs, 0);
1304 MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1305                  " addresses separated by commas.  Only use if an interface"
1306                  " has an interrupt.  Otherwise, set it to zero or leave"
1307                  " it blank.");
1308 module_param_array(regspacings, int, &num_regspacings, 0);
1309 MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1310                  " and each successive register used by the interface.  For"
1311                  " instance, if the start address is 0xca2 and the spacing"
1312                  " is 2, then the second address is at 0xca4.  Defaults"
1313                  " to 1.");
1314 module_param_array(regsizes, int, &num_regsizes, 0);
1315 MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1316                  " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1317                  " 16-bit, 32-bit, or 64-bit register.  Use this if you"
1318                  " the 8-bit IPMI register has to be read from a larger"
1319                  " register.");
1320 module_param_array(regshifts, int, &num_regshifts, 0);
1321 MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1322                  " IPMI register, in bits.  For instance, if the data"
1323                  " is read from a 32-bit word and the IPMI data is in"
1324                  " bit 8-15, then the shift would be 8");
1325 module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1326 MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1327                  " the controller.  Normally this is 0x20, but can be"
1328                  " overridden by this parm.  This is an array indexed"
1329                  " by interface number.");
1330 module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1331 MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1332                  " disabled(0).  Normally the IPMI driver auto-detects"
1333                  " this, but the value may be overridden by this parm.");
1334 module_param(unload_when_empty, int, 0);
1335 MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1336                  " specified or found, default is 1.  Setting to 0"
1337                  " is useful for hot add of devices using hotmod.");
1338 module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1339 MODULE_PARM_DESC(kipmid_max_busy_us,
1340                  "Max time (in microseconds) to busy-wait for IPMI data before"
1341                  " sleeping. 0 (default) means to wait forever. Set to 100-500"
1342                  " if kipmid is using up a lot of CPU time.");
1343
1344
1345 static void std_irq_cleanup(struct smi_info *info)
1346 {
1347         if (info->si_type == SI_BT)
1348                 /* Disable the interrupt in the BT interface. */
1349                 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1350         free_irq(info->irq, info);
1351 }
1352
1353 static int std_irq_setup(struct smi_info *info)
1354 {
1355         int rv;
1356
1357         if (!info->irq)
1358                 return 0;
1359
1360         if (info->si_type == SI_BT) {
1361                 rv = request_irq(info->irq,
1362                                  si_bt_irq_handler,
1363                                  IRQF_SHARED | IRQF_DISABLED,
1364                                  DEVICE_NAME,
1365                                  info);
1366                 if (!rv)
1367                         /* Enable the interrupt in the BT interface. */
1368                         info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1369                                          IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1370         } else
1371                 rv = request_irq(info->irq,
1372                                  si_irq_handler,
1373                                  IRQF_SHARED | IRQF_DISABLED,
1374                                  DEVICE_NAME,
1375                                  info);
1376         if (rv) {
1377                 dev_warn(info->dev, "%s unable to claim interrupt %d,"
1378                          " running polled\n",
1379                          DEVICE_NAME, info->irq);
1380                 info->irq = 0;
1381         } else {
1382                 info->irq_cleanup = std_irq_cleanup;
1383                 dev_info(info->dev, "Using irq %d\n", info->irq);
1384         }
1385
1386         return rv;
1387 }
1388
1389 static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1390 {
1391         unsigned int addr = io->addr_data;
1392
1393         return inb(addr + (offset * io->regspacing));
1394 }
1395
1396 static void port_outb(struct si_sm_io *io, unsigned int offset,
1397                       unsigned char b)
1398 {
1399         unsigned int addr = io->addr_data;
1400
1401         outb(b, addr + (offset * io->regspacing));
1402 }
1403
1404 static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1405 {
1406         unsigned int addr = io->addr_data;
1407
1408         return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1409 }
1410
1411 static void port_outw(struct si_sm_io *io, unsigned int offset,
1412                       unsigned char b)
1413 {
1414         unsigned int addr = io->addr_data;
1415
1416         outw(b << io->regshift, addr + (offset * io->regspacing));
1417 }
1418
1419 static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1420 {
1421         unsigned int addr = io->addr_data;
1422
1423         return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1424 }
1425
1426 static void port_outl(struct si_sm_io *io, unsigned int offset,
1427                       unsigned char b)
1428 {
1429         unsigned int addr = io->addr_data;
1430
1431         outl(b << io->regshift, addr+(offset * io->regspacing));
1432 }
1433
1434 static void port_cleanup(struct smi_info *info)
1435 {
1436         unsigned int addr = info->io.addr_data;
1437         int          idx;
1438
1439         if (addr) {
1440                 for (idx = 0; idx < info->io_size; idx++)
1441                         release_region(addr + idx * info->io.regspacing,
1442                                        info->io.regsize);
1443         }
1444 }
1445
1446 static int port_setup(struct smi_info *info)
1447 {
1448         unsigned int addr = info->io.addr_data;
1449         int          idx;
1450
1451         if (!addr)
1452                 return -ENODEV;
1453
1454         info->io_cleanup = port_cleanup;
1455
1456         /*
1457          * Figure out the actual inb/inw/inl/etc routine to use based
1458          * upon the register size.
1459          */
1460         switch (info->io.regsize) {
1461         case 1:
1462                 info->io.inputb = port_inb;
1463                 info->io.outputb = port_outb;
1464                 break;
1465         case 2:
1466                 info->io.inputb = port_inw;
1467                 info->io.outputb = port_outw;
1468                 break;
1469         case 4:
1470                 info->io.inputb = port_inl;
1471                 info->io.outputb = port_outl;
1472                 break;
1473         default:
1474                 dev_warn(info->dev, "Invalid register size: %d\n",
1475                          info->io.regsize);
1476                 return -EINVAL;
1477         }
1478
1479         /*
1480          * Some BIOSes reserve disjoint I/O regions in their ACPI
1481          * tables.  This causes problems when trying to register the
1482          * entire I/O region.  Therefore we must register each I/O
1483          * port separately.
1484          */
1485         for (idx = 0; idx < info->io_size; idx++) {
1486                 if (request_region(addr + idx * info->io.regspacing,
1487                                    info->io.regsize, DEVICE_NAME) == NULL) {
1488                         /* Undo allocations */
1489                         while (idx--) {
1490                                 release_region(addr + idx * info->io.regspacing,
1491                                                info->io.regsize);
1492                         }
1493                         return -EIO;
1494                 }
1495         }
1496         return 0;
1497 }
1498
1499 static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1500 {
1501         return readb((io->addr)+(offset * io->regspacing));
1502 }
1503
1504 static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1505                      unsigned char b)
1506 {
1507         writeb(b, (io->addr)+(offset * io->regspacing));
1508 }
1509
1510 static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1511 {
1512         return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
1513                 & 0xff;
1514 }
1515
1516 static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1517                      unsigned char b)
1518 {
1519         writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1520 }
1521
1522 static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1523 {
1524         return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
1525                 & 0xff;
1526 }
1527
1528 static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1529                      unsigned char b)
1530 {
1531         writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1532 }
1533
1534 #ifdef readq
1535 static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1536 {
1537         return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
1538                 & 0xff;
1539 }
1540
1541 static void mem_outq(struct si_sm_io *io, unsigned int offset,
1542                      unsigned char b)
1543 {
1544         writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1545 }
1546 #endif
1547
1548 static void mem_cleanup(struct smi_info *info)
1549 {
1550         unsigned long addr = info->io.addr_data;
1551         int           mapsize;
1552
1553         if (info->io.addr) {
1554                 iounmap(info->io.addr);
1555
1556                 mapsize = ((info->io_size * info->io.regspacing)
1557                            - (info->io.regspacing - info->io.regsize));
1558
1559                 release_mem_region(addr, mapsize);
1560         }
1561 }
1562
1563 static int mem_setup(struct smi_info *info)
1564 {
1565         unsigned long addr = info->io.addr_data;
1566         int           mapsize;
1567
1568         if (!addr)
1569                 return -ENODEV;
1570
1571         info->io_cleanup = mem_cleanup;
1572
1573         /*
1574          * Figure out the actual readb/readw/readl/etc routine to use based
1575          * upon the register size.
1576          */
1577         switch (info->io.regsize) {
1578         case 1:
1579                 info->io.inputb = intf_mem_inb;
1580                 info->io.outputb = intf_mem_outb;
1581                 break;
1582         case 2:
1583                 info->io.inputb = intf_mem_inw;
1584                 info->io.outputb = intf_mem_outw;
1585                 break;
1586         case 4:
1587                 info->io.inputb = intf_mem_inl;
1588                 info->io.outputb = intf_mem_outl;
1589                 break;
1590 #ifdef readq
1591         case 8:
1592                 info->io.inputb = mem_inq;
1593                 info->io.outputb = mem_outq;
1594                 break;
1595 #endif
1596         default:
1597                 dev_warn(info->dev, "Invalid register size: %d\n",
1598                          info->io.regsize);
1599                 return -EINVAL;
1600         }
1601
1602         /*
1603          * Calculate the total amount of memory to claim.  This is an
1604          * unusual looking calculation, but it avoids claiming any
1605          * more memory than it has to.  It will claim everything
1606          * between the first address to the end of the last full
1607          * register.
1608          */
1609         mapsize = ((info->io_size * info->io.regspacing)
1610                    - (info->io.regspacing - info->io.regsize));
1611
1612         if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1613                 return -EIO;
1614
1615         info->io.addr = ioremap(addr, mapsize);
1616         if (info->io.addr == NULL) {
1617                 release_mem_region(addr, mapsize);
1618                 return -EIO;
1619         }
1620         return 0;
1621 }
1622
1623 /*
1624  * Parms come in as <op1>[:op2[:op3...]].  ops are:
1625  *   add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1626  * Options are:
1627  *   rsp=<regspacing>
1628  *   rsi=<regsize>
1629  *   rsh=<regshift>
1630  *   irq=<irq>
1631  *   ipmb=<ipmb addr>
1632  */
1633 enum hotmod_op { HM_ADD, HM_REMOVE };
1634 struct hotmod_vals {
1635         char *name;
1636         int  val;
1637 };
1638 static struct hotmod_vals hotmod_ops[] = {
1639         { "add",        HM_ADD },
1640         { "remove",     HM_REMOVE },
1641         { NULL }
1642 };
1643 static struct hotmod_vals hotmod_si[] = {
1644         { "kcs",        SI_KCS },
1645         { "smic",       SI_SMIC },
1646         { "bt",         SI_BT },
1647         { NULL }
1648 };
1649 static struct hotmod_vals hotmod_as[] = {
1650         { "mem",        IPMI_MEM_ADDR_SPACE },
1651         { "i/o",        IPMI_IO_ADDR_SPACE },
1652         { NULL }
1653 };
1654
1655 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1656 {
1657         char *s;
1658         int  i;
1659
1660         s = strchr(*curr, ',');
1661         if (!s) {
1662                 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1663                 return -EINVAL;
1664         }
1665         *s = '\0';
1666         s++;
1667         for (i = 0; hotmod_ops[i].name; i++) {
1668                 if (strcmp(*curr, v[i].name) == 0) {
1669                         *val = v[i].val;
1670                         *curr = s;
1671                         return 0;
1672                 }
1673         }
1674
1675         printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1676         return -EINVAL;
1677 }
1678
1679 static int check_hotmod_int_op(const char *curr, const char *option,
1680                                const char *name, int *val)
1681 {
1682         char *n;
1683
1684         if (strcmp(curr, name) == 0) {
1685                 if (!option) {
1686                         printk(KERN_WARNING PFX
1687                                "No option given for '%s'\n",
1688                                curr);
1689                         return -EINVAL;
1690                 }
1691                 *val = simple_strtoul(option, &n, 0);
1692                 if ((*n != '\0') || (*option == '\0')) {
1693                         printk(KERN_WARNING PFX
1694                                "Bad option given for '%s'\n",
1695                                curr);
1696                         return -EINVAL;
1697                 }
1698                 return 1;
1699         }
1700         return 0;
1701 }
1702
1703 static struct smi_info *smi_info_alloc(void)
1704 {
1705         struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
1706
1707         if (info)
1708                 spin_lock_init(&info->si_lock);
1709         return info;
1710 }
1711
1712 static int hotmod_handler(const char *val, struct kernel_param *kp)
1713 {
1714         char *str = kstrdup(val, GFP_KERNEL);
1715         int  rv;
1716         char *next, *curr, *s, *n, *o;
1717         enum hotmod_op op;
1718         enum si_type si_type;
1719         int  addr_space;
1720         unsigned long addr;
1721         int regspacing;
1722         int regsize;
1723         int regshift;
1724         int irq;
1725         int ipmb;
1726         int ival;
1727         int len;
1728         struct smi_info *info;
1729
1730         if (!str)
1731                 return -ENOMEM;
1732
1733         /* Kill any trailing spaces, as we can get a "\n" from echo. */
1734         len = strlen(str);
1735         ival = len - 1;
1736         while ((ival >= 0) && isspace(str[ival])) {
1737                 str[ival] = '\0';
1738                 ival--;
1739         }
1740
1741         for (curr = str; curr; curr = next) {
1742                 regspacing = 1;
1743                 regsize = 1;
1744                 regshift = 0;
1745                 irq = 0;
1746                 ipmb = 0; /* Choose the default if not specified */
1747
1748                 next = strchr(curr, ':');
1749                 if (next) {
1750                         *next = '\0';
1751                         next++;
1752                 }
1753
1754                 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1755                 if (rv)
1756                         break;
1757                 op = ival;
1758
1759                 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1760                 if (rv)
1761                         break;
1762                 si_type = ival;
1763
1764                 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1765                 if (rv)
1766                         break;
1767
1768                 s = strchr(curr, ',');
1769                 if (s) {
1770                         *s = '\0';
1771                         s++;
1772                 }
1773                 addr = simple_strtoul(curr, &n, 0);
1774                 if ((*n != '\0') || (*curr == '\0')) {
1775                         printk(KERN_WARNING PFX "Invalid hotmod address"
1776                                " '%s'\n", curr);
1777                         break;
1778                 }
1779
1780                 while (s) {
1781                         curr = s;
1782                         s = strchr(curr, ',');
1783                         if (s) {
1784                                 *s = '\0';
1785                                 s++;
1786                         }
1787                         o = strchr(curr, '=');
1788                         if (o) {
1789                                 *o = '\0';
1790                                 o++;
1791                         }
1792                         rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1793                         if (rv < 0)
1794                                 goto out;
1795                         else if (rv)
1796                                 continue;
1797                         rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1798                         if (rv < 0)
1799                                 goto out;
1800                         else if (rv)
1801                                 continue;
1802                         rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1803                         if (rv < 0)
1804                                 goto out;
1805                         else if (rv)
1806                                 continue;
1807                         rv = check_hotmod_int_op(curr, o, "irq", &irq);
1808                         if (rv < 0)
1809                                 goto out;
1810                         else if (rv)
1811                                 continue;
1812                         rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1813                         if (rv < 0)
1814                                 goto out;
1815                         else if (rv)
1816                                 continue;
1817
1818                         rv = -EINVAL;
1819                         printk(KERN_WARNING PFX
1820                                "Invalid hotmod option '%s'\n",
1821                                curr);
1822                         goto out;
1823                 }
1824
1825                 if (op == HM_ADD) {
1826                         info = smi_info_alloc();
1827                         if (!info) {
1828                                 rv = -ENOMEM;
1829                                 goto out;
1830                         }
1831
1832                         info->addr_source = SI_HOTMOD;
1833                         info->si_type = si_type;
1834                         info->io.addr_data = addr;
1835                         info->io.addr_type = addr_space;
1836                         if (addr_space == IPMI_MEM_ADDR_SPACE)
1837                                 info->io_setup = mem_setup;
1838                         else
1839                                 info->io_setup = port_setup;
1840
1841                         info->io.addr = NULL;
1842                         info->io.regspacing = regspacing;
1843                         if (!info->io.regspacing)
1844                                 info->io.regspacing = DEFAULT_REGSPACING;
1845                         info->io.regsize = regsize;
1846                         if (!info->io.regsize)
1847                                 info->io.regsize = DEFAULT_REGSPACING;
1848                         info->io.regshift = regshift;
1849                         info->irq = irq;
1850                         if (info->irq)
1851                                 info->irq_setup = std_irq_setup;
1852                         info->slave_addr = ipmb;
1853
1854                         if (!add_smi(info)) {
1855                                 if (try_smi_init(info))
1856                                         cleanup_one_si(info);
1857                         } else {
1858                                 kfree(info);
1859                         }
1860                 } else {
1861                         /* remove */
1862                         struct smi_info *e, *tmp_e;
1863
1864                         mutex_lock(&smi_infos_lock);
1865                         list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1866                                 if (e->io.addr_type != addr_space)
1867                                         continue;
1868                                 if (e->si_type != si_type)
1869                                         continue;
1870                                 if (e->io.addr_data == addr)
1871                                         cleanup_one_si(e);
1872                         }
1873                         mutex_unlock(&smi_infos_lock);
1874                 }
1875         }
1876         rv = len;
1877  out:
1878         kfree(str);
1879         return rv;
1880 }
1881
1882 static int hardcode_find_bmc(void)
1883 {
1884         int ret = -ENODEV;
1885         int             i;
1886         struct smi_info *info;
1887
1888         for (i = 0; i < SI_MAX_PARMS; i++) {
1889                 if (!ports[i] && !addrs[i])
1890                         continue;
1891
1892                 info = smi_info_alloc();
1893                 if (!info)
1894                         return -ENOMEM;
1895
1896                 info->addr_source = SI_HARDCODED;
1897                 printk(KERN_INFO PFX "probing via hardcoded address\n");
1898
1899                 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
1900                         info->si_type = SI_KCS;
1901                 } else if (strcmp(si_type[i], "smic") == 0) {
1902                         info->si_type = SI_SMIC;
1903                 } else if (strcmp(si_type[i], "bt") == 0) {
1904                         info->si_type = SI_BT;
1905                 } else {
1906                         printk(KERN_WARNING PFX "Interface type specified "
1907                                "for interface %d, was invalid: %s\n",
1908                                i, si_type[i]);
1909                         kfree(info);
1910                         continue;
1911                 }
1912
1913                 if (ports[i]) {
1914                         /* An I/O port */
1915                         info->io_setup = port_setup;
1916                         info->io.addr_data = ports[i];
1917                         info->io.addr_type = IPMI_IO_ADDR_SPACE;
1918                 } else if (addrs[i]) {
1919                         /* A memory port */
1920                         info->io_setup = mem_setup;
1921                         info->io.addr_data = addrs[i];
1922                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1923                 } else {
1924                         printk(KERN_WARNING PFX "Interface type specified "
1925                                "for interface %d, but port and address were "
1926                                "not set or set to zero.\n", i);
1927                         kfree(info);
1928                         continue;
1929                 }
1930
1931                 info->io.addr = NULL;
1932                 info->io.regspacing = regspacings[i];
1933                 if (!info->io.regspacing)
1934                         info->io.regspacing = DEFAULT_REGSPACING;
1935                 info->io.regsize = regsizes[i];
1936                 if (!info->io.regsize)
1937                         info->io.regsize = DEFAULT_REGSPACING;
1938                 info->io.regshift = regshifts[i];
1939                 info->irq = irqs[i];
1940                 if (info->irq)
1941                         info->irq_setup = std_irq_setup;
1942                 info->slave_addr = slave_addrs[i];
1943
1944                 if (!add_smi(info)) {
1945                         if (try_smi_init(info))
1946                                 cleanup_one_si(info);
1947                         ret = 0;
1948                 } else {
1949                         kfree(info);
1950                 }
1951         }
1952         return ret;
1953 }
1954
1955 #ifdef CONFIG_ACPI
1956
1957 #include <linux/acpi.h>
1958
1959 /*
1960  * Once we get an ACPI failure, we don't try any more, because we go
1961  * through the tables sequentially.  Once we don't find a table, there
1962  * are no more.
1963  */
1964 static int acpi_failure;
1965
1966 /* For GPE-type interrupts. */
1967 static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
1968         u32 gpe_number, void *context)
1969 {
1970         struct smi_info *smi_info = context;
1971         unsigned long   flags;
1972 #ifdef DEBUG_TIMING
1973         struct timeval t;
1974 #endif
1975
1976         spin_lock_irqsave(&(smi_info->si_lock), flags);
1977
1978         smi_inc_stat(smi_info, interrupts);
1979
1980 #ifdef DEBUG_TIMING
1981         do_gettimeofday(&t);
1982         printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1983 #endif
1984         smi_event_handler(smi_info, 0);
1985         spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1986
1987         return ACPI_INTERRUPT_HANDLED;
1988 }
1989
1990 static void acpi_gpe_irq_cleanup(struct smi_info *info)
1991 {
1992         if (!info->irq)
1993                 return;
1994
1995         acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1996 }
1997
1998 static int acpi_gpe_irq_setup(struct smi_info *info)
1999 {
2000         acpi_status status;
2001
2002         if (!info->irq)
2003                 return 0;
2004
2005         /* FIXME - is level triggered right? */
2006         status = acpi_install_gpe_handler(NULL,
2007                                           info->irq,
2008                                           ACPI_GPE_LEVEL_TRIGGERED,
2009                                           &ipmi_acpi_gpe,
2010                                           info);
2011         if (status != AE_OK) {
2012                 dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
2013                          " running polled\n", DEVICE_NAME, info->irq);
2014                 info->irq = 0;
2015                 return -EINVAL;
2016         } else {
2017                 info->irq_cleanup = acpi_gpe_irq_cleanup;
2018                 dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
2019                 return 0;
2020         }
2021 }
2022
2023 /*
2024  * Defined at
2025  * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf
2026  */
2027 struct SPMITable {
2028         s8      Signature[4];
2029         u32     Length;
2030         u8      Revision;
2031         u8      Checksum;
2032         s8      OEMID[6];
2033         s8      OEMTableID[8];
2034         s8      OEMRevision[4];
2035         s8      CreatorID[4];
2036         s8      CreatorRevision[4];
2037         u8      InterfaceType;
2038         u8      IPMIlegacy;
2039         s16     SpecificationRevision;
2040
2041         /*
2042          * Bit 0 - SCI interrupt supported
2043          * Bit 1 - I/O APIC/SAPIC
2044          */
2045         u8      InterruptType;
2046
2047         /*
2048          * If bit 0 of InterruptType is set, then this is the SCI
2049          * interrupt in the GPEx_STS register.
2050          */
2051         u8      GPE;
2052
2053         s16     Reserved;
2054
2055         /*
2056          * If bit 1 of InterruptType is set, then this is the I/O
2057          * APIC/SAPIC interrupt.
2058          */
2059         u32     GlobalSystemInterrupt;
2060
2061         /* The actual register address. */
2062         struct acpi_generic_address addr;
2063
2064         u8      UID[4];
2065
2066         s8      spmi_id[1]; /* A '\0' terminated array starts here. */
2067 };
2068
2069 static int try_init_spmi(struct SPMITable *spmi)
2070 {
2071         struct smi_info  *info;
2072
2073         if (spmi->IPMIlegacy != 1) {
2074                 printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
2075                 return -ENODEV;
2076         }
2077
2078         info = smi_info_alloc();
2079         if (!info) {
2080                 printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
2081                 return -ENOMEM;
2082         }
2083
2084         info->addr_source = SI_SPMI;
2085         printk(KERN_INFO PFX "probing via SPMI\n");
2086
2087         /* Figure out the interface type. */
2088         switch (spmi->InterfaceType) {
2089         case 1: /* KCS */
2090                 info->si_type = SI_KCS;
2091                 break;
2092         case 2: /* SMIC */
2093                 info->si_type = SI_SMIC;
2094                 break;
2095         case 3: /* BT */
2096                 info->si_type = SI_BT;
2097                 break;
2098         default:
2099                 printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
2100                        spmi->InterfaceType);
2101                 kfree(info);
2102                 return -EIO;
2103         }
2104
2105         if (spmi->InterruptType & 1) {
2106                 /* We've got a GPE interrupt. */
2107                 info->irq = spmi->GPE;
2108                 info->irq_setup = acpi_gpe_irq_setup;
2109         } else if (spmi->InterruptType & 2) {
2110                 /* We've got an APIC/SAPIC interrupt. */
2111                 info->irq = spmi->GlobalSystemInterrupt;
2112                 info->irq_setup = std_irq_setup;
2113         } else {
2114                 /* Use the default interrupt setting. */
2115                 info->irq = 0;
2116                 info->irq_setup = NULL;
2117         }
2118
2119         if (spmi->addr.bit_width) {
2120                 /* A (hopefully) properly formed register bit width. */
2121                 info->io.regspacing = spmi->addr.bit_width / 8;
2122         } else {
2123                 info->io.regspacing = DEFAULT_REGSPACING;
2124         }
2125         info->io.regsize = info->io.regspacing;
2126         info->io.regshift = spmi->addr.bit_offset;
2127
2128         if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
2129                 info->io_setup = mem_setup;
2130                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2131         } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
2132                 info->io_setup = port_setup;
2133                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2134         } else {
2135                 kfree(info);
2136                 printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
2137                 return -EIO;
2138         }
2139         info->io.addr_data = spmi->addr.address;
2140
2141         pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
2142                  (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
2143                  info->io.addr_data, info->io.regsize, info->io.regspacing,
2144                  info->irq);
2145
2146         if (add_smi(info))
2147                 kfree(info);
2148
2149         return 0;
2150 }
2151
2152 static void spmi_find_bmc(void)
2153 {
2154         acpi_status      status;
2155         struct SPMITable *spmi;
2156         int              i;
2157
2158         if (acpi_disabled)
2159                 return;
2160
2161         if (acpi_failure)
2162                 return;
2163
2164         for (i = 0; ; i++) {
2165                 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
2166                                         (struct acpi_table_header **)&spmi);
2167                 if (status != AE_OK)
2168                         return;
2169
2170                 try_init_spmi(spmi);
2171         }
2172 }
2173
2174 static int ipmi_pnp_probe(struct pnp_dev *dev,
2175                                     const struct pnp_device_id *dev_id)
2176 {
2177         struct acpi_device *acpi_dev;
2178         struct smi_info *info;
2179         struct resource *res, *res_second;
2180         acpi_handle handle;
2181         acpi_status status;
2182         unsigned long long tmp;
2183
2184         acpi_dev = pnp_acpi_device(dev);
2185         if (!acpi_dev)
2186                 return -ENODEV;
2187
2188         info = smi_info_alloc();
2189         if (!info)
2190                 return -ENOMEM;
2191
2192         info->addr_source = SI_ACPI;
2193         printk(KERN_INFO PFX "probing via ACPI\n");
2194
2195         handle = acpi_dev->handle;
2196         info->addr_info.acpi_info.acpi_handle = handle;
2197
2198         /* _IFT tells us the interface type: KCS, BT, etc */
2199         status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
2200         if (ACPI_FAILURE(status))
2201                 goto err_free;
2202
2203         switch (tmp) {
2204         case 1:
2205                 info->si_type = SI_KCS;
2206                 break;
2207         case 2:
2208                 info->si_type = SI_SMIC;
2209                 break;
2210         case 3:
2211                 info->si_type = SI_BT;
2212                 break;
2213         default:
2214                 dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
2215                 goto err_free;
2216         }
2217
2218         res = pnp_get_resource(dev, IORESOURCE_IO, 0);
2219         if (res) {
2220                 info->io_setup = port_setup;
2221                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2222         } else {
2223                 res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
2224                 if (res) {
2225                         info->io_setup = mem_setup;
2226                         info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2227                 }
2228         }
2229         if (!res) {
2230                 dev_err(&dev->dev, "no I/O or memory address\n");
2231                 goto err_free;
2232         }
2233         info->io.addr_data = res->start;
2234
2235         info->io.regspacing = DEFAULT_REGSPACING;
2236         res_second = pnp_get_resource(dev,
2237                                (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
2238                                         IORESOURCE_IO : IORESOURCE_MEM,
2239                                1);
2240         if (res_second) {
2241                 if (res_second->start > info->io.addr_data)
2242                         info->io.regspacing = res_second->start - info->io.addr_data;
2243         }
2244         info->io.regsize = DEFAULT_REGSPACING;
2245         info->io.regshift = 0;
2246
2247         /* If _GPE exists, use it; otherwise use standard interrupts */
2248         status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
2249         if (ACPI_SUCCESS(status)) {
2250                 info->irq = tmp;
2251                 info->irq_setup = acpi_gpe_irq_setup;
2252         } else if (pnp_irq_valid(dev, 0)) {
2253                 info->irq = pnp_irq(dev, 0);
2254                 info->irq_setup = std_irq_setup;
2255         }
2256
2257         info->dev = &dev->dev;
2258         pnp_set_drvdata(dev, info);
2259
2260         dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
2261                  res, info->io.regsize, info->io.regspacing,
2262                  info->irq);
2263
2264         if (add_smi(info))
2265                 goto err_free;
2266
2267         return 0;
2268
2269 err_free:
2270         kfree(info);
2271         return -EINVAL;
2272 }
2273
2274 static void ipmi_pnp_remove(struct pnp_dev *dev)
2275 {
2276         struct smi_info *info = pnp_get_drvdata(dev);
2277
2278         cleanup_one_si(info);
2279 }
2280
2281 static const struct pnp_device_id pnp_dev_table[] = {
2282         {"IPI0001", 0},
2283         {"", 0},
2284 };
2285
2286 static struct pnp_driver ipmi_pnp_driver = {
2287         .name           = DEVICE_NAME,
2288         .probe          = ipmi_pnp_probe,
2289         .remove         = ipmi_pnp_remove,
2290         .id_table       = pnp_dev_table,
2291 };
2292 #endif
2293
2294 #ifdef CONFIG_DMI
2295 struct dmi_ipmi_data {
2296         u8              type;
2297         u8              addr_space;
2298         unsigned long   base_addr;
2299         u8              irq;
2300         u8              offset;
2301         u8              slave_addr;
2302 };
2303
2304 static int decode_dmi(const struct dmi_header *dm,
2305                                 struct dmi_ipmi_data *dmi)
2306 {
2307         const u8        *data = (const u8 *)dm;
2308         unsigned long   base_addr;
2309         u8              reg_spacing;
2310         u8              len = dm->length;
2311
2312         dmi->type = data[4];
2313
2314         memcpy(&base_addr, data+8, sizeof(unsigned long));
2315         if (len >= 0x11) {
2316                 if (base_addr & 1) {
2317                         /* I/O */
2318                         base_addr &= 0xFFFE;
2319                         dmi->addr_space = IPMI_IO_ADDR_SPACE;
2320                 } else
2321                         /* Memory */
2322                         dmi->addr_space = IPMI_MEM_ADDR_SPACE;
2323
2324                 /* If bit 4 of byte 0x10 is set, then the lsb for the address
2325                    is odd. */
2326                 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
2327
2328                 dmi->irq = data[0x11];
2329
2330                 /* The top two bits of byte 0x10 hold the register spacing. */
2331                 reg_spacing = (data[0x10] & 0xC0) >> 6;
2332                 switch (reg_spacing) {
2333                 case 0x00: /* Byte boundaries */
2334                     dmi->offset = 1;
2335                     break;
2336                 case 0x01: /* 32-bit boundaries */
2337                     dmi->offset = 4;
2338                     break;
2339                 case 0x02: /* 16-byte boundaries */
2340                     dmi->offset = 16;
2341                     break;
2342                 default:
2343                     /* Some other interface, just ignore it. */
2344                     return -EIO;
2345                 }
2346         } else {
2347                 /* Old DMI spec. */
2348                 /*
2349                  * Note that technically, the lower bit of the base
2350                  * address should be 1 if the address is I/O and 0 if
2351                  * the address is in memory.  So many systems get that
2352                  * wrong (and all that I have seen are I/O) so we just
2353                  * ignore that bit and assume I/O.  Systems that use
2354                  * memory should use the newer spec, anyway.
2355                  */
2356                 dmi->base_addr = base_addr & 0xfffe;
2357                 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2358                 dmi->offset = 1;
2359         }
2360
2361         dmi->slave_addr = data[6];
2362
2363         return 0;
2364 }
2365
2366 static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
2367 {
2368         struct smi_info *info;
2369
2370         info = smi_info_alloc();
2371         if (!info) {
2372                 printk(KERN_ERR PFX "Could not allocate SI data\n");
2373                 return;
2374         }
2375
2376         info->addr_source = SI_SMBIOS;
2377         printk(KERN_INFO PFX "probing via SMBIOS\n");
2378
2379         switch (ipmi_data->type) {
2380         case 0x01: /* KCS */
2381                 info->si_type = SI_KCS;
2382                 break;
2383         case 0x02: /* SMIC */
2384                 info->si_type = SI_SMIC;
2385                 break;
2386         case 0x03: /* BT */
2387                 info->si_type = SI_BT;
2388                 break;
2389         default:
2390                 kfree(info);
2391                 return;
2392         }
2393
2394         switch (ipmi_data->addr_space) {
2395         case IPMI_MEM_ADDR_SPACE:
2396                 info->io_setup = mem_setup;
2397                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2398                 break;
2399
2400         case IPMI_IO_ADDR_SPACE:
2401                 info->io_setup = port_setup;
2402                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2403                 break;
2404
2405         default:
2406                 kfree(info);
2407                 printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
2408                        ipmi_data->addr_space);
2409                 return;
2410         }
2411         info->io.addr_data = ipmi_data->base_addr;
2412
2413         info->io.regspacing = ipmi_data->offset;
2414         if (!info->io.regspacing)
2415                 info->io.regspacing = DEFAULT_REGSPACING;
2416         info->io.regsize = DEFAULT_REGSPACING;
2417         info->io.regshift = 0;
2418
2419         info->slave_addr = ipmi_data->slave_addr;
2420
2421         info->irq = ipmi_data->irq;
2422         if (info->irq)
2423                 info->irq_setup = std_irq_setup;
2424
2425         pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
2426                  (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
2427                  info->io.addr_data, info->io.regsize, info->io.regspacing,
2428                  info->irq);
2429
2430         if (add_smi(info))
2431                 kfree(info);
2432 }
2433
2434 static void dmi_find_bmc(void)
2435 {
2436         const struct dmi_device *dev = NULL;
2437         struct dmi_ipmi_data data;
2438         int                  rv;
2439
2440         while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
2441                 memset(&data, 0, sizeof(data));
2442                 rv = decode_dmi((const struct dmi_header *) dev->device_data,
2443                                 &data);
2444                 if (!rv)
2445                         try_init_dmi(&data);
2446         }
2447 }
2448 #endif /* CONFIG_DMI */
2449
2450 #ifdef CONFIG_PCI
2451
2452 #define PCI_ERMC_CLASSCODE              0x0C0700
2453 #define PCI_ERMC_CLASSCODE_MASK         0xffffff00
2454 #define PCI_ERMC_CLASSCODE_TYPE_MASK    0xff
2455 #define PCI_ERMC_CLASSCODE_TYPE_SMIC    0x00
2456 #define PCI_ERMC_CLASSCODE_TYPE_KCS     0x01
2457 #define PCI_ERMC_CLASSCODE_TYPE_BT      0x02
2458
2459 #define PCI_HP_VENDOR_ID    0x103C
2460 #define PCI_MMC_DEVICE_ID   0x121A
2461 #define PCI_MMC_ADDR_CW     0x10
2462
2463 static void ipmi_pci_cleanup(struct smi_info *info)
2464 {
2465         struct pci_dev *pdev = info->addr_source_data;
2466
2467         pci_disable_device(pdev);
2468 }
2469
2470 static int ipmi_pci_probe_regspacing(struct smi_info *info)
2471 {
2472         if (info->si_type == SI_KCS) {
2473                 unsigned char   status;
2474                 int             regspacing;
2475
2476                 info->io.regsize = DEFAULT_REGSIZE;
2477                 info->io.regshift = 0;
2478                 info->io_size = 2;
2479                 info->handlers = &kcs_smi_handlers;
2480
2481                 /* detect 1, 4, 16byte spacing */
2482                 for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
2483                         info->io.regspacing = regspacing;
2484                         if (info->io_setup(info)) {
2485                                 dev_err(info->dev,
2486                                         "Could not setup I/O space\n");
2487                                 return DEFAULT_REGSPACING;
2488                         }
2489                         /* write invalid cmd */
2490                         info->io.outputb(&info->io, 1, 0x10);
2491                         /* read status back */
2492                         status = info->io.inputb(&info->io, 1);
2493                         info->io_cleanup(info);
2494                         if (status)
2495                                 return regspacing;
2496                         regspacing *= 4;
2497                 }
2498         }
2499         return DEFAULT_REGSPACING;
2500 }
2501
2502 static int ipmi_pci_probe(struct pci_dev *pdev,
2503                                     const struct pci_device_id *ent)
2504 {
2505         int rv;
2506         int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2507         struct smi_info *info;
2508
2509         info = smi_info_alloc();
2510         if (!info)
2511                 return -ENOMEM;
2512
2513         info->addr_source = SI_PCI;
2514         dev_info(&pdev->dev, "probing via PCI");
2515
2516         switch (class_type) {
2517         case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2518                 info->si_type = SI_SMIC;
2519                 break;
2520
2521         case PCI_ERMC_CLASSCODE_TYPE_KCS:
2522                 info->si_type = SI_KCS;
2523                 break;
2524
2525         case PCI_ERMC_CLASSCODE_TYPE_BT:
2526                 info->si_type = SI_BT;
2527                 break;
2528
2529         default:
2530                 kfree(info);
2531                 dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
2532                 return -ENOMEM;
2533         }
2534
2535         rv = pci_enable_device(pdev);
2536         if (rv) {
2537                 dev_err(&pdev->dev, "couldn't enable PCI device\n");
2538                 kfree(info);
2539                 return rv;
2540         }
2541
2542         info->addr_source_cleanup = ipmi_pci_cleanup;
2543         info->addr_source_data = pdev;
2544
2545         if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2546                 info->io_setup = port_setup;
2547                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2548         } else {
2549                 info->io_setup = mem_setup;
2550                 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2551         }
2552         info->io.addr_data = pci_resource_start(pdev, 0);
2553
2554         info->io.regspacing = ipmi_pci_probe_regspacing(info);
2555         info->io.regsize = DEFAULT_REGSIZE;
2556         info->io.regshift = 0;
2557
2558         info->irq = pdev->irq;
2559         if (info->irq)
2560                 info->irq_setup = std_irq_setup;
2561
2562         info->dev = &pdev->dev;
2563         pci_set_drvdata(pdev, info);
2564
2565         dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
2566                 &pdev->resource[0], info->io.regsize, info->io.regspacing,
2567                 info->irq);
2568
2569         if (add_smi(info))
2570                 kfree(info);
2571
2572         return 0;
2573 }
2574
2575 static void ipmi_pci_remove(struct pci_dev *pdev)
2576 {
2577         struct smi_info *info = pci_get_drvdata(pdev);
2578         cleanup_one_si(info);
2579 }
2580
2581 static struct pci_device_id ipmi_pci_devices[] = {
2582         { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
2583         { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
2584         { 0, }
2585 };
2586 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2587
2588 static struct pci_driver ipmi_pci_driver = {
2589         .name =         DEVICE_NAME,
2590         .id_table =     ipmi_pci_devices,
2591         .probe =        ipmi_pci_probe,
2592         .remove =       ipmi_pci_remove,
2593 };
2594 #endif /* CONFIG_PCI */
2595
2596 static struct of_device_id ipmi_match[];
2597 static int ipmi_probe(struct platform_device *dev)
2598 {
2599 #ifdef CONFIG_OF
2600         const struct of_device_id *match;
2601         struct smi_info *info;
2602         struct resource resource;
2603         const __be32 *regsize, *regspacing, *regshift;
2604         struct device_node *np = dev->dev.of_node;
2605         int ret;
2606         int proplen;
2607
2608         dev_info(&dev->dev, "probing via device tree\n");
2609
2610         match = of_match_device(ipmi_match, &dev->dev);
2611         if (!match)
2612                 return -EINVAL;
2613
2614         ret = of_address_to_resource(np, 0, &resource);
2615         if (ret) {
2616                 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2617                 return ret;
2618         }
2619
2620         regsize = of_get_property(np, "reg-size", &proplen);
2621         if (regsize && proplen != 4) {
2622                 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2623                 return -EINVAL;
2624         }
2625
2626         regspacing = of_get_property(np, "reg-spacing", &proplen);
2627         if (regspacing && proplen != 4) {
2628                 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2629                 return -EINVAL;
2630         }
2631
2632         regshift = of_get_property(np, "reg-shift", &proplen);
2633         if (regshift && proplen != 4) {
2634                 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2635                 return -EINVAL;
2636         }
2637
2638         info = smi_info_alloc();
2639
2640         if (!info) {
2641                 dev_err(&dev->dev,
2642                         "could not allocate memory for OF probe\n");
2643                 return -ENOMEM;
2644         }
2645
2646         info->si_type           = (enum si_type) match->data;
2647         info->addr_source       = SI_DEVICETREE;
2648         info->irq_setup         = std_irq_setup;
2649
2650         if (resource.flags & IORESOURCE_IO) {
2651                 info->io_setup          = port_setup;
2652                 info->io.addr_type      = IPMI_IO_ADDR_SPACE;
2653         } else {
2654                 info->io_setup          = mem_setup;
2655                 info->io.addr_type      = IPMI_MEM_ADDR_SPACE;
2656         }
2657
2658         info->io.addr_data      = resource.start;
2659
2660         info->io.regsize        = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
2661         info->io.regspacing     = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
2662         info->io.regshift       = regshift ? be32_to_cpup(regshift) : 0;
2663
2664         info->irq               = irq_of_parse_and_map(dev->dev.of_node, 0);
2665         info->dev               = &dev->dev;
2666
2667         dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
2668                 info->io.addr_data, info->io.regsize, info->io.regspacing,
2669                 info->irq);
2670
2671         dev_set_drvdata(&dev->dev, info);
2672
2673         if (add_smi(info)) {
2674                 kfree(info);
2675                 return -EBUSY;
2676         }
2677 #endif
2678         return 0;
2679 }
2680
2681 static int ipmi_remove(struct platform_device *dev)
2682 {
2683 #ifdef CONFIG_OF
2684         cleanup_one_si(dev_get_drvdata(&dev->dev));
2685 #endif
2686         return 0;
2687 }
2688
2689 static struct of_device_id ipmi_match[] =
2690 {
2691         { .type = "ipmi", .compatible = "ipmi-kcs",
2692           .data = (void *)(unsigned long) SI_KCS },
2693         { .type = "ipmi", .compatible = "ipmi-smic",
2694           .data = (void *)(unsigned long) SI_SMIC },
2695         { .type = "ipmi", .compatible = "ipmi-bt",
2696           .data = (void *)(unsigned long) SI_BT },
2697         {},
2698 };
2699
2700 static struct platform_driver ipmi_driver = {
2701         .driver = {
2702                 .name = DEVICE_NAME,
2703                 .owner = THIS_MODULE,
2704                 .of_match_table = ipmi_match,
2705         },
2706         .probe          = ipmi_probe,
2707         .remove         = ipmi_remove,
2708 };
2709
2710 static int wait_for_msg_done(struct smi_info *smi_info)
2711 {
2712         enum si_sm_result     smi_result;
2713
2714         smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2715         for (;;) {
2716                 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2717                     smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
2718                         schedule_timeout_uninterruptible(1);
2719                         smi_result = smi_info->handlers->event(
2720                                 smi_info->si_sm, 100);
2721                 } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
2722                         smi_result = smi_info->handlers->event(
2723                                 smi_info->si_sm, 0);
2724                 } else
2725                         break;
2726         }
2727         if (smi_result == SI_SM_HOSED)
2728                 /*
2729                  * We couldn't get the state machine to run, so whatever's at
2730                  * the port is probably not an IPMI SMI interface.
2731                  */
2732                 return -ENODEV;
2733
2734         return 0;
2735 }
2736
2737 static int try_get_dev_id(struct smi_info *smi_info)
2738 {
2739         unsigned char         msg[2];
2740         unsigned char         *resp;
2741         unsigned long         resp_len;
2742         int                   rv = 0;
2743
2744         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2745         if (!resp)
2746                 return -ENOMEM;
2747
2748         /*
2749          * Do a Get Device ID command, since it comes back with some
2750          * useful info.
2751          */
2752         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2753         msg[1] = IPMI_GET_DEVICE_ID_CMD;
2754         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2755
2756         rv = wait_for_msg_done(smi_info);
2757         if (rv)
2758                 goto out;
2759
2760         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2761                                                   resp, IPMI_MAX_MSG_LENGTH);
2762
2763         /* Check and record info from the get device id, in case we need it. */
2764         rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
2765
2766  out:
2767         kfree(resp);
2768         return rv;
2769 }
2770
2771 static int try_enable_event_buffer(struct smi_info *smi_info)
2772 {
2773         unsigned char         msg[3];
2774         unsigned char         *resp;
2775         unsigned long         resp_len;
2776         int                   rv = 0;
2777
2778         resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
2779         if (!resp)
2780                 return -ENOMEM;
2781
2782         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2783         msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
2784         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2785
2786         rv = wait_for_msg_done(smi_info);
2787         if (rv) {
2788                 printk(KERN_WARNING PFX "Error getting response from get"
2789                        " global enables command, the event buffer is not"
2790                        " enabled.\n");
2791                 goto out;
2792         }
2793
2794         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2795                                                   resp, IPMI_MAX_MSG_LENGTH);
2796
2797         if (resp_len < 4 ||
2798                         resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2799                         resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD   ||
2800                         resp[2] != 0) {
2801                 printk(KERN_WARNING PFX "Invalid return from get global"
2802                        " enables command, cannot enable the event buffer.\n");
2803                 rv = -EINVAL;
2804                 goto out;
2805         }
2806
2807         if (resp[3] & IPMI_BMC_EVT_MSG_BUFF)
2808                 /* buffer is already enabled, nothing to do. */
2809                 goto out;
2810
2811         msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2812         msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
2813         msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
2814         smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
2815
2816         rv = wait_for_msg_done(smi_info);
2817         if (rv) {
2818                 printk(KERN_WARNING PFX "Error getting response from set"
2819                        " global, enables command, the event buffer is not"
2820                        " enabled.\n");
2821                 goto out;
2822         }
2823
2824         resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2825                                                   resp, IPMI_MAX_MSG_LENGTH);
2826
2827         if (resp_len < 3 ||
2828                         resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
2829                         resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
2830                 printk(KERN_WARNING PFX "Invalid return from get global,"
2831                        "enables command, not enable the event buffer.\n");
2832                 rv = -EINVAL;
2833                 goto out;
2834         }
2835
2836         if (resp[2] != 0)
2837                 /*
2838                  * An error when setting the event buffer bit means
2839                  * that the event buffer is not supported.
2840                  */
2841                 rv = -ENOENT;
2842  out:
2843         kfree(resp);
2844         return rv;
2845 }
2846
2847 static int smi_type_proc_show(struct seq_file *m, void *v)
2848 {
2849         struct smi_info *smi = m->private;
2850
2851         return seq_printf(m, "%s\n", si_to_str[smi->si_type]);
2852 }
2853
2854 static int smi_type_proc_open(struct inode *inode, struct file *file)
2855 {
2856         return single_open(file, smi_type_proc_show, PDE_DATA(inode));
2857 }
2858
2859 static const struct file_operations smi_type_proc_ops = {
2860         .open           = smi_type_proc_open,
2861         .read           = seq_read,
2862         .llseek         = seq_lseek,
2863         .release        = single_release,
2864 };
2865
2866 static int smi_si_stats_proc_show(struct seq_file *m, void *v)
2867 {
2868         struct smi_info *smi = m->private;
2869
2870         seq_printf(m, "interrupts_enabled:    %d\n",
2871                        smi->irq && !smi->interrupt_disabled);
2872         seq_printf(m, "short_timeouts:        %u\n",
2873                        smi_get_stat(smi, short_timeouts));
2874         seq_printf(m, "long_timeouts:         %u\n",
2875                        smi_get_stat(smi, long_timeouts));
2876         seq_printf(m, "idles:                 %u\n",
2877                        smi_get_stat(smi, idles));
2878         seq_printf(m, "interrupts:            %u\n",
2879                        smi_get_stat(smi, interrupts));
2880         seq_printf(m, "attentions:            %u\n",
2881                        smi_get_stat(smi, attentions));
2882         seq_printf(m, "flag_fetches:          %u\n",
2883                        smi_get_stat(smi, flag_fetches));
2884         seq_printf(m, "hosed_count:           %u\n",
2885                        smi_get_stat(smi, hosed_count));
2886         seq_printf(m, "complete_transactions: %u\n",
2887                        smi_get_stat(smi, complete_transactions));
2888         seq_printf(m, "events:                %u\n",
2889                        smi_get_stat(smi, events));
2890         seq_printf(m, "watchdog_pretimeouts:  %u\n",
2891                        smi_get_stat(smi, watchdog_pretimeouts));
2892         seq_printf(m, "incoming_messages:     %u\n",
2893                        smi_get_stat(smi, incoming_messages));
2894         return 0;
2895 }
2896
2897 static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
2898 {
2899         return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode));
2900 }
2901
2902 static const struct file_operations smi_si_stats_proc_ops = {
2903         .open           = smi_si_stats_proc_open,
2904         .read           = seq_read,
2905         .llseek         = seq_lseek,
2906         .release        = single_release,
2907 };
2908
2909 static int smi_params_proc_show(struct seq_file *m, void *v)
2910 {
2911         struct smi_info *smi = m->private;
2912
2913         return seq_printf(m,
2914                        "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2915                        si_to_str[smi->si_type],
2916                        addr_space_to_str[smi->io.addr_type],
2917                        smi->io.addr_data,
2918                        smi->io.regspacing,
2919                        smi->io.regsize,
2920                        smi->io.regshift,
2921                        smi->irq,
2922                        smi->slave_addr);
2923 }
2924
2925 static int smi_params_proc_open(struct inode *inode, struct file *file)
2926 {
2927         return single_open(file, smi_params_proc_show, PDE_DATA(inode));
2928 }
2929
2930 static const struct file_operations smi_params_proc_ops = {
2931         .open           = smi_params_proc_open,
2932         .read           = seq_read,
2933         .llseek         = seq_lseek,
2934         .release        = single_release,
2935 };
2936
2937 /*
2938  * oem_data_avail_to_receive_msg_avail
2939  * @info - smi_info structure with msg_flags set
2940  *
2941  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2942  * Returns 1 indicating need to re-run handle_flags().
2943  */
2944 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2945 {
2946         smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2947                                RECEIVE_MSG_AVAIL);
2948         return 1;
2949 }
2950
2951 /*
2952  * setup_dell_poweredge_oem_data_handler
2953  * @info - smi_info.device_id must be populated
2954  *
2955  * Systems that match, but have firmware version < 1.40 may assert
2956  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2957  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
2958  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2959  * as RECEIVE_MSG_AVAIL instead.
2960  *
2961  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2962  * assert the OEM[012] bits, and if it did, the driver would have to
2963  * change to handle that properly, we don't actually check for the
2964  * firmware version.
2965  * Device ID = 0x20                BMC on PowerEdge 8G servers
2966  * Device Revision = 0x80
2967  * Firmware Revision1 = 0x01       BMC version 1.40
2968  * Firmware Revision2 = 0x40       BCD encoded
2969  * IPMI Version = 0x51             IPMI 1.5
2970  * Manufacturer ID = A2 02 00      Dell IANA
2971  *
2972  * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2973  * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2974  *
2975  */
2976 #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
2977 #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2978 #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
2979 #define DELL_IANA_MFR_ID 0x0002a2
2980 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2981 {
2982         struct ipmi_device_id *id = &smi_info->device_id;
2983         if (id->manufacturer_id == DELL_IANA_MFR_ID) {
2984                 if (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID  &&
2985                     id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
2986                     id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
2987                         smi_info->oem_data_avail_handler =
2988                                 oem_data_avail_to_receive_msg_avail;
2989                 } else if (ipmi_version_major(id) < 1 ||
2990                            (ipmi_version_major(id) == 1 &&
2991                             ipmi_version_minor(id) < 5)) {
2992                         smi_info->oem_data_avail_handler =
2993                                 oem_data_avail_to_receive_msg_avail;
2994                 }
2995         }
2996 }
2997
2998 #define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2999 static void return_hosed_msg_badsize(struct smi_info *smi_info)
3000 {
3001         struct ipmi_smi_msg *msg = smi_info->curr_msg;
3002
3003         /* Make it a response */
3004         msg->rsp[0] = msg->data[0] | 4;
3005         msg->rsp[1] = msg->data[1];
3006         msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
3007         msg->rsp_size = 3;
3008         smi_info->curr_msg = NULL;
3009         deliver_recv_msg(smi_info, msg);
3010 }
3011
3012 /*
3013  * dell_poweredge_bt_xaction_handler
3014  * @info - smi_info.device_id must be populated
3015  *
3016  * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
3017  * not respond to a Get SDR command if the length of the data
3018  * requested is exactly 0x3A, which leads to command timeouts and no
3019  * data returned.  This intercepts such commands, and causes userspace
3020  * callers to try again with a different-sized buffer, which succeeds.
3021  */
3022
3023 #define STORAGE_NETFN 0x0A
3024 #define STORAGE_CMD_GET_SDR 0x23
3025 static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
3026                                              unsigned long unused,
3027                                              void *in)
3028 {
3029         struct smi_info *smi_info = in;
3030         unsigned char *data = smi_info->curr_msg->data;
3031         unsigned int size   = smi_info->curr_msg->data_size;
3032         if (size >= 8 &&
3033             (data[0]>>2) == STORAGE_NETFN &&
3034             data[1] == STORAGE_CMD_GET_SDR &&
3035             data[7] == 0x3A) {
3036                 return_hosed_msg_badsize(smi_info);
3037                 return NOTIFY_STOP;
3038         }
3039         return NOTIFY_DONE;
3040 }
3041
3042 static struct notifier_block dell_poweredge_bt_xaction_notifier = {
3043         .notifier_call  = dell_poweredge_bt_xaction_handler,
3044 };
3045
3046 /*
3047  * setup_dell_poweredge_bt_xaction_handler
3048  * @info - smi_info.device_id must be filled in already
3049  *
3050  * Fills in smi_info.device_id.start_transaction_pre_hook
3051  * when we know what function to use there.
3052  */
3053 static void
3054 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
3055 {
3056         struct ipmi_device_id *id = &smi_info->device_id;
3057         if (id->manufacturer_id == DELL_IANA_MFR_ID &&
3058             smi_info->si_type == SI_BT)
3059                 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
3060 }
3061
3062 /*
3063  * setup_oem_data_handler
3064  * @info - smi_info.device_id must be filled in already
3065  *
3066  * Fills in smi_info.device_id.oem_data_available_handler
3067  * when we know what function to use there.
3068  */
3069
3070 static void setup_oem_data_handler(struct smi_info *smi_info)
3071 {
3072         setup_dell_poweredge_oem_data_handler(smi_info);
3073 }
3074
3075 static void setup_xaction_handlers(struct smi_info *smi_info)
3076 {
3077         setup_dell_poweredge_bt_xaction_handler(smi_info);
3078 }
3079
3080 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
3081 {
3082         if (smi_info->intf) {
3083                 /*
3084                  * The timer and thread are only running if the
3085                  * interface has been started up and registered.
3086                  */
3087                 if (smi_info->thread != NULL)
3088                         kthread_stop(smi_info->thread);
3089                 del_timer_sync(&smi_info->si_timer);
3090         }
3091 }
3092
3093 static struct ipmi_default_vals
3094 {
3095         int type;
3096         int port;
3097 } ipmi_defaults[] =
3098 {
3099         { .type = SI_KCS, .port = 0xca2 },
3100         { .type = SI_SMIC, .port = 0xca9 },
3101         { .type = SI_BT, .port = 0xe4 },
3102         { .port = 0 }
3103 };
3104
3105 static void default_find_bmc(void)
3106 {
3107         struct smi_info *info;
3108         int             i;
3109
3110         for (i = 0; ; i++) {
3111                 if (!ipmi_defaults[i].port)
3112                         break;
3113 #ifdef CONFIG_PPC
3114                 if (check_legacy_ioport(ipmi_defaults[i].port))
3115                         continue;
3116 #endif
3117                 info = smi_info_alloc();
3118                 if (!info)
3119                         return;
3120
3121                 info->addr_source = SI_DEFAULT;
3122
3123                 info->si_type = ipmi_defaults[i].type;
3124                 info->io_setup = port_setup;
3125                 info->io.addr_data = ipmi_defaults[i].port;
3126                 info->io.addr_type = IPMI_IO_ADDR_SPACE;
3127
3128                 info->io.addr = NULL;
3129                 info->io.regspacing = DEFAULT_REGSPACING;
3130                 info->io.regsize = DEFAULT_REGSPACING;
3131                 info->io.regshift = 0;
3132
3133                 if (add_smi(info) == 0) {
3134                         if ((try_smi_init(info)) == 0) {
3135                                 /* Found one... */
3136                                 printk(KERN_INFO PFX "Found default %s"
3137                                 " state machine at %s address 0x%lx\n",
3138                                 si_to_str[info->si_type],
3139                                 addr_space_to_str[info->io.addr_type],
3140                                 info->io.addr_data);
3141                         } else
3142                                 cleanup_one_si(info);
3143                 } else {
3144                         kfree(info);
3145                 }
3146         }
3147 }
3148
3149 static int is_new_interface(struct smi_info *info)
3150 {
3151         struct smi_info *e;
3152
3153         list_for_each_entry(e, &smi_infos, link) {
3154                 if (e->io.addr_type != info->io.addr_type)
3155                         continue;
3156                 if (e->io.addr_data == info->io.addr_data)
3157                         return 0;
3158         }
3159
3160         return 1;
3161 }
3162
3163 static int add_smi(struct smi_info *new_smi)
3164 {
3165         int rv = 0;
3166
3167         printk(KERN_INFO PFX "Adding %s-specified %s state machine",
3168                         ipmi_addr_src_to_str[new_smi->addr_source],
3169                         si_to_str[new_smi->si_type]);
3170         mutex_lock(&smi_infos_lock);
3171         if (!is_new_interface(new_smi)) {
3172                 printk(KERN_CONT " duplicate interface\n");
3173                 rv = -EBUSY;
3174                 goto out_err;
3175         }
3176
3177         printk(KERN_CONT "\n");
3178
3179         /* So we know not to free it unless we have allocated one. */
3180         new_smi->intf = NULL;
3181         new_smi->si_sm = NULL;
3182         new_smi->handlers = NULL;
3183
3184         list_add_tail(&new_smi->link, &smi_infos);
3185
3186 out_err:
3187         mutex_unlock(&smi_infos_lock);
3188         return rv;
3189 }
3190
3191 static int try_smi_init(struct smi_info *new_smi)
3192 {
3193         int rv = 0;
3194         int i;
3195
3196         printk(KERN_INFO PFX "Trying %s-specified %s state"
3197                " machine at %s address 0x%lx, slave address 0x%x,"
3198                " irq %d\n",
3199                ipmi_addr_src_to_str[new_smi->addr_source],
3200                si_to_str[new_smi->si_type],
3201                addr_space_to_str[new_smi->io.addr_type],
3202                new_smi->io.addr_data,
3203                new_smi->slave_addr, new_smi->irq);
3204
3205         switch (new_smi->si_type) {
3206         case SI_KCS:
3207                 new_smi->handlers = &kcs_smi_handlers;
3208                 break;
3209
3210         case SI_SMIC:
3211                 new_smi->handlers = &smic_smi_handlers;
3212                 break;
3213
3214         case SI_BT:
3215                 new_smi->handlers = &bt_smi_handlers;
3216                 break;
3217
3218         default:
3219                 /* No support for anything else yet. */
3220                 rv = -EIO;
3221                 goto out_err;
3222         }
3223
3224         /* Allocate the state machine's data and initialize it. */
3225         new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
3226         if (!new_smi->si_sm) {
3227                 printk(KERN_ERR PFX
3228                        "Could not allocate state machine memory\n");
3229                 rv = -ENOMEM;
3230                 goto out_err;
3231         }
3232         new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
3233                                                         &new_smi->io);
3234
3235         /* Now that we know the I/O size, we can set up the I/O. */
3236         rv = new_smi->io_setup(new_smi);
3237         if (rv) {
3238                 printk(KERN_ERR PFX "Could not set up I/O space\n");
3239                 goto out_err;
3240         }
3241
3242         /* Do low-level detection first. */
3243         if (new_smi->handlers->detect(new_smi->si_sm)) {
3244                 if (new_smi->addr_source)
3245                         printk(KERN_INFO PFX "Interface detection failed\n");
3246                 rv = -ENODEV;
3247                 goto out_err;
3248         }
3249
3250         /*
3251          * Attempt a get device id command.  If it fails, we probably
3252          * don't have a BMC here.
3253          */
3254         rv = try_get_dev_id(new_smi);
3255         if (rv) {
3256                 if (new_smi->addr_source)
3257                         printk(KERN_INFO PFX "There appears to be no BMC"
3258                                " at this location\n");
3259                 goto out_err;
3260         }
3261
3262         setup_oem_data_handler(new_smi);
3263         setup_xaction_handlers(new_smi);
3264
3265         INIT_LIST_HEAD(&(new_smi->xmit_msgs));
3266         INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
3267         new_smi->curr_msg = NULL;
3268         atomic_set(&new_smi->req_events, 0);
3269         new_smi->run_to_completion = 0;
3270         for (i = 0; i < SI_NUM_STATS; i++)
3271                 atomic_set(&new_smi->stats[i], 0);
3272
3273         new_smi->interrupt_disabled = 1;
3274         atomic_set(&new_smi->stop_operation, 0);
3275         new_smi->intf_num = smi_num;
3276         smi_num++;
3277
3278         rv = try_enable_event_buffer(new_smi);
3279         if (rv == 0)
3280                 new_smi->has_event_buffer = 1;
3281
3282         /*
3283          * Start clearing the flags before we enable interrupts or the
3284          * timer to avoid racing with the timer.
3285          */
3286         start_clear_flags(new_smi);
3287         /* IRQ is defined to be set when non-zero. */
3288         if (new_smi->irq)
3289                 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
3290
3291         if (!new_smi->dev) {
3292                 /*
3293                  * If we don't already have a device from something
3294                  * else (like PCI), then register a new one.
3295                  */
3296                 new_smi->pdev = platform_device_alloc("ipmi_si",
3297                                                       new_smi->intf_num);
3298                 if (!new_smi->pdev) {
3299                         printk(KERN_ERR PFX
3300                                "Unable to allocate platform device\n");
3301                         goto out_err;
3302                 }
3303                 new_smi->dev = &new_smi->pdev->dev;
3304                 new_smi->dev->driver = &ipmi_driver.driver;
3305
3306                 rv = platform_device_add(new_smi->pdev);
3307                 if (rv) {
3308                         printk(KERN_ERR PFX
3309                                "Unable to register system interface device:"
3310                                " %d\n",
3311                                rv);
3312                         goto out_err;
3313                 }
3314                 new_smi->dev_registered = 1;
3315         }
3316
3317         rv = ipmi_register_smi(&handlers,
3318                                new_smi,
3319                                &new_smi->device_id,
3320                                new_smi->dev,
3321                                "bmc",
3322                                new_smi->slave_addr);
3323         if (rv) {
3324                 dev_err(new_smi->dev, "Unable to register device: error %d\n",
3325                         rv);
3326                 goto out_err_stop_timer;
3327         }
3328
3329         rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
3330                                      &smi_type_proc_ops,
3331                                      new_smi);
3332         if (rv) {
3333                 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3334                 goto out_err_stop_timer;
3335         }
3336
3337         rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
3338                                      &smi_si_stats_proc_ops,
3339                                      new_smi);
3340         if (rv) {
3341                 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3342                 goto out_err_stop_timer;
3343         }
3344
3345         rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
3346                                      &smi_params_proc_ops,
3347                                      new_smi);
3348         if (rv) {
3349                 dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
3350                 goto out_err_stop_timer;
3351         }
3352
3353         dev_info(new_smi->dev, "IPMI %s interface initialized\n",
3354                  si_to_str[new_smi->si_type]);
3355
3356         return 0;
3357
3358  out_err_stop_timer:
3359         atomic_inc(&new_smi->stop_operation);
3360         wait_for_timer_and_thread(new_smi);
3361
3362  out_err:
3363         new_smi->interrupt_disabled = 1;
3364
3365         if (new_smi->intf) {
3366                 ipmi_unregister_smi(new_smi->intf);
3367                 new_smi->intf = NULL;
3368         }
3369
3370         if (new_smi->irq_cleanup) {
3371                 new_smi->irq_cleanup(new_smi);
3372                 new_smi->irq_cleanup = NULL;
3373         }
3374
3375         /*
3376          * Wait until we know that we are out of any interrupt
3377          * handlers might have been running before we freed the
3378          * interrupt.
3379          */
3380         synchronize_sched();
3381
3382         if (new_smi->si_sm) {
3383                 if (new_smi->handlers)
3384                         new_smi->handlers->cleanup(new_smi->si_sm);
3385                 kfree(new_smi->si_sm);
3386                 new_smi->si_sm = NULL;
3387         }
3388         if (new_smi->addr_source_cleanup) {
3389                 new_smi->addr_source_cleanup(new_smi);
3390                 new_smi->addr_source_cleanup = NULL;
3391         }
3392         if (new_smi->io_cleanup) {
3393                 new_smi->io_cleanup(new_smi);
3394                 new_smi->io_cleanup = NULL;
3395         }
3396
3397         if (new_smi->dev_registered) {
3398                 platform_device_unregister(new_smi->pdev);
3399                 new_smi->dev_registered = 0;
3400         }
3401
3402         return rv;
3403 }
3404
3405 static int init_ipmi_si(void)
3406 {
3407         int  i;
3408         char *str;
3409         int  rv;
3410         struct smi_info *e;
3411         enum ipmi_addr_src type = SI_INVALID;
3412
3413         if (initialized)
3414                 return 0;
3415         initialized = 1;
3416
3417         if (si_tryplatform) {
3418                 rv = platform_driver_register(&ipmi_driver);
3419                 if (rv) {
3420                         printk(KERN_ERR PFX "Unable to register "
3421                                "driver: %d\n", rv);
3422                         return rv;
3423                 }
3424         }
3425
3426         /* Parse out the si_type string into its components. */
3427         str = si_type_str;
3428         if (*str != '\0') {
3429                 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
3430                         si_type[i] = str;
3431                         str = strchr(str, ',');
3432                         if (str) {
3433                                 *str = '\0';
3434                                 str++;
3435                         } else {
3436                                 break;
3437                         }
3438                 }
3439         }
3440
3441         printk(KERN_INFO "IPMI System Interface driver.\n");
3442
3443         /* If the user gave us a device, they presumably want us to use it */
3444         if (!hardcode_find_bmc())
3445                 return 0;
3446
3447 #ifdef CONFIG_PCI
3448         if (si_trypci) {
3449                 rv = pci_register_driver(&ipmi_pci_driver);
3450                 if (rv)
3451                         printk(KERN_ERR PFX "Unable to register "
3452                                "PCI driver: %d\n", rv);
3453                 else
3454                         pci_registered = 1;
3455         }
3456 #endif
3457
3458 #ifdef CONFIG_ACPI
3459         if (si_tryacpi) {
3460                 pnp_register_driver(&ipmi_pnp_driver);
3461                 pnp_registered = 1;
3462         }
3463 #endif
3464
3465 #ifdef CONFIG_DMI
3466         if (si_trydmi)
3467                 dmi_find_bmc();
3468 #endif
3469
3470 #ifdef CONFIG_ACPI
3471         if (si_tryacpi)
3472                 spmi_find_bmc();
3473 #endif
3474
3475         /* We prefer devices with interrupts, but in the case of a machine
3476            with multiple BMCs we assume that there will be several instances
3477            of a given type so if we succeed in registering a type then also
3478            try to register everything else of the same type */
3479
3480         mutex_lock(&smi_infos_lock);
3481         list_for_each_entry(e, &smi_infos, link) {
3482                 /* Try to register a device if it has an IRQ and we either
3483                    haven't successfully registered a device yet or this
3484                    device has the same type as one we successfully registered */
3485                 if (e->irq && (!type || e->addr_source == type)) {
3486                         if (!try_smi_init(e)) {
3487                                 type = e->addr_source;
3488                         }
3489                 }
3490         }
3491
3492         /* type will only have been set if we successfully registered an si */
3493         if (type) {
3494                 mutex_unlock(&smi_infos_lock);
3495                 return 0;
3496         }
3497
3498         /* Fall back to the preferred device */
3499
3500         list_for_each_entry(e, &smi_infos, link) {
3501                 if (!e->irq && (!type || e->addr_source == type)) {
3502                         if (!try_smi_init(e)) {
3503                                 type = e->addr_source;
3504                         }
3505                 }
3506         }
3507         mutex_unlock(&smi_infos_lock);
3508
3509         if (type)
3510                 return 0;
3511
3512         if (si_trydefaults) {
3513                 mutex_lock(&smi_infos_lock);
3514                 if (list_empty(&smi_infos)) {
3515                         /* No BMC was found, try defaults. */
3516                         mutex_unlock(&smi_infos_lock);
3517                         default_find_bmc();
3518                 } else
3519                         mutex_unlock(&smi_infos_lock);
3520         }
3521
3522         mutex_lock(&smi_infos_lock);
3523         if (unload_when_empty && list_empty(&smi_infos)) {
3524                 mutex_unlock(&smi_infos_lock);
3525                 cleanup_ipmi_si();
3526                 printk(KERN_WARNING PFX
3527                        "Unable to find any System Interface(s)\n");
3528                 return -ENODEV;
3529         } else {
3530                 mutex_unlock(&smi_infos_lock);
3531                 return 0;
3532         }
3533 }
3534 module_init(init_ipmi_si);
3535
3536 static void cleanup_one_si(struct smi_info *to_clean)
3537 {
3538         int           rv = 0;
3539         unsigned long flags;
3540
3541         if (!to_clean)
3542                 return;
3543
3544         list_del(&to_clean->link);
3545
3546         /* Tell the driver that we are shutting down. */
3547         atomic_inc(&to_clean->stop_operation);
3548
3549         /*
3550          * Make sure the timer and thread are stopped and will not run
3551          * again.
3552          */
3553         wait_for_timer_and_thread(to_clean);
3554
3555         /*
3556          * Timeouts are stopped, now make sure the interrupts are off
3557          * for the device.  A little tricky with locks to make sure
3558          * there are no races.
3559          */
3560         spin_lock_irqsave(&to_clean->si_lock, flags);
3561         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3562                 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3563                 poll(to_clean);
3564                 schedule_timeout_uninterruptible(1);
3565                 spin_lock_irqsave(&to_clean->si_lock, flags);
3566         }
3567         disable_si_irq(to_clean);
3568         spin_unlock_irqrestore(&to_clean->si_lock, flags);
3569         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3570                 poll(to_clean);
3571                 schedule_timeout_uninterruptible(1);
3572         }
3573
3574         /* Clean up interrupts and make sure that everything is done. */
3575         if (to_clean->irq_cleanup)
3576                 to_clean->irq_cleanup(to_clean);
3577         while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3578                 poll(to_clean);
3579                 schedule_timeout_uninterruptible(1);
3580         }
3581
3582         if (to_clean->intf)
3583                 rv = ipmi_unregister_smi(to_clean->intf);
3584
3585         if (rv) {
3586                 printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
3587                        rv);
3588         }
3589
3590         if (to_clean->handlers)
3591                 to_clean->handlers->cleanup(to_clean->si_sm);
3592
3593         kfree(to_clean->si_sm);
3594
3595         if (to_clean->addr_source_cleanup)
3596                 to_clean->addr_source_cleanup(to_clean);
3597         if (to_clean->io_cleanup)
3598                 to_clean->io_cleanup(to_clean);
3599
3600         if (to_clean->dev_registered)
3601                 platform_device_unregister(to_clean->pdev);
3602
3603         kfree(to_clean);
3604 }
3605
3606 static void cleanup_ipmi_si(void)
3607 {
3608         struct smi_info *e, *tmp_e;
3609
3610         if (!initialized)
3611                 return;
3612
3613 #ifdef CONFIG_PCI
3614         if (pci_registered)
3615                 pci_unregister_driver(&ipmi_pci_driver);
3616 #endif
3617 #ifdef CONFIG_ACPI
3618         if (pnp_registered)
3619                 pnp_unregister_driver(&ipmi_pnp_driver);
3620 #endif
3621
3622         platform_driver_unregister(&ipmi_driver);
3623
3624         mutex_lock(&smi_infos_lock);
3625         list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3626                 cleanup_one_si(e);
3627         mutex_unlock(&smi_infos_lock);
3628 }
3629 module_exit(cleanup_ipmi_si);
3630
3631 MODULE_LICENSE("GPL");
3632 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3633 MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
3634                    " system interfaces.");