]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/char/ipmi/ipmi_si_intf.c
ipmi: hold ATTN until upper layer ready
[net-next-2.6.git] / drivers / char / ipmi / ipmi_si_intf.c
CommitLineData
1da177e4
LT
1/*
2 * ipmi_si.c
3 *
4 * The interface to the IPMI driver for the system interfaces (KCS, SMIC,
5 * BT).
6 *
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
9 * source@mvista.com
10 *
11 * Copyright 2002 MontaVista Software Inc.
dba9b4f6 12 * Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
1da177e4
LT
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2 of the License, or (at your
17 * option) any later version.
18 *
19 *
20 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
21 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
28 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
29 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * You should have received a copy of the GNU General Public License along
32 * with this program; if not, write to the Free Software Foundation, Inc.,
33 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 */
35
36/*
37 * This file holds the "policy" for the interface to the SMI state
38 * machine. It does the configuration, handles timers and interrupts,
39 * and drives the real SMI state machine.
40 */
41
1da177e4
LT
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44#include <asm/system.h>
45#include <linux/sched.h>
46#include <linux/timer.h>
47#include <linux/errno.h>
48#include <linux/spinlock.h>
49#include <linux/slab.h>
50#include <linux/delay.h>
51#include <linux/list.h>
52#include <linux/pci.h>
53#include <linux/ioport.h>
ea94027b 54#include <linux/notifier.h>
b0defcdb 55#include <linux/mutex.h>
e9a705a0 56#include <linux/kthread.h>
1da177e4 57#include <asm/irq.h>
1da177e4
LT
58#include <linux/interrupt.h>
59#include <linux/rcupdate.h>
60#include <linux/ipmi_smi.h>
61#include <asm/io.h>
62#include "ipmi_si_sm.h"
63#include <linux/init.h>
b224cd3a 64#include <linux/dmi.h>
b361e27b
CM
65#include <linux/string.h>
66#include <linux/ctype.h>
67
dba9b4f6
CM
68#ifdef CONFIG_PPC_OF
69#include <asm/of_device.h>
70#include <asm/of_platform.h>
71#endif
72
b361e27b 73#define PFX "ipmi_si: "
1da177e4
LT
74
75/* Measure times between events in the driver. */
76#undef DEBUG_TIMING
77
78/* Call every 10 ms. */
79#define SI_TIMEOUT_TIME_USEC 10000
80#define SI_USEC_PER_JIFFY (1000000/HZ)
81#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
82#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
83 short timeout */
84
ee6cd5f8
CM
85/* Bit for BMC global enables. */
86#define IPMI_BMC_RCV_MSG_INTR 0x01
87#define IPMI_BMC_EVT_MSG_INTR 0x02
88#define IPMI_BMC_EVT_MSG_BUFF 0x04
89#define IPMI_BMC_SYS_LOG 0x08
90
1da177e4
LT
91enum si_intf_state {
92 SI_NORMAL,
93 SI_GETTING_FLAGS,
94 SI_GETTING_EVENTS,
95 SI_CLEARING_FLAGS,
96 SI_CLEARING_FLAGS_THEN_SET_IRQ,
97 SI_GETTING_MESSAGES,
98 SI_ENABLE_INTERRUPTS1,
ee6cd5f8
CM
99 SI_ENABLE_INTERRUPTS2,
100 SI_DISABLE_INTERRUPTS1,
101 SI_DISABLE_INTERRUPTS2
1da177e4
LT
102 /* FIXME - add watchdog stuff. */
103};
104
9dbf68f9
CM
105/* Some BT-specific defines we need here. */
106#define IPMI_BT_INTMASK_REG 2
107#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
108#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
109
1da177e4
LT
110enum si_type {
111 SI_KCS, SI_SMIC, SI_BT
112};
b361e27b 113static char *si_to_str[] = { "kcs", "smic", "bt" };
1da177e4 114
50c812b2
CM
115#define DEVICE_NAME "ipmi_si"
116
117static struct device_driver ipmi_driver =
118{
119 .name = DEVICE_NAME,
120 .bus = &platform_bus_type
121};
3ae0e0f9 122
1da177e4
LT
123struct smi_info
124{
a9a2c44f 125 int intf_num;
1da177e4
LT
126 ipmi_smi_t intf;
127 struct si_sm_data *si_sm;
128 struct si_sm_handlers *handlers;
129 enum si_type si_type;
130 spinlock_t si_lock;
131 spinlock_t msg_lock;
132 struct list_head xmit_msgs;
133 struct list_head hp_xmit_msgs;
134 struct ipmi_smi_msg *curr_msg;
135 enum si_intf_state si_state;
136
137 /* Used to handle the various types of I/O that can occur with
138 IPMI */
139 struct si_sm_io io;
140 int (*io_setup)(struct smi_info *info);
141 void (*io_cleanup)(struct smi_info *info);
142 int (*irq_setup)(struct smi_info *info);
143 void (*irq_cleanup)(struct smi_info *info);
144 unsigned int io_size;
b0defcdb
CM
145 char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
146 void (*addr_source_cleanup)(struct smi_info *info);
147 void *addr_source_data;
1da177e4 148
3ae0e0f9
CM
149 /* Per-OEM handler, called from handle_flags().
150 Returns 1 when handle_flags() needs to be re-run
151 or 0 indicating it set si_state itself.
152 */
153 int (*oem_data_avail_handler)(struct smi_info *smi_info);
154
1da177e4
LT
155 /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
156 is set to hold the flags until we are done handling everything
157 from the flags. */
158#define RECEIVE_MSG_AVAIL 0x01
159#define EVENT_MSG_BUFFER_FULL 0x02
160#define WDT_PRE_TIMEOUT_INT 0x08
3ae0e0f9
CM
161#define OEM0_DATA_AVAIL 0x20
162#define OEM1_DATA_AVAIL 0x40
163#define OEM2_DATA_AVAIL 0x80
164#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
165 OEM1_DATA_AVAIL | \
166 OEM2_DATA_AVAIL)
1da177e4
LT
167 unsigned char msg_flags;
168
169 /* If set to true, this will request events the next time the
170 state machine is idle. */
171 atomic_t req_events;
172
173 /* If true, run the state machine to completion on every send
174 call. Generally used after a panic to make sure stuff goes
175 out. */
176 int run_to_completion;
177
178 /* The I/O port of an SI interface. */
179 int port;
180
181 /* The space between start addresses of the two ports. For
182 instance, if the first port is 0xca2 and the spacing is 4, then
183 the second port is 0xca6. */
184 unsigned int spacing;
185
186 /* zero if no irq; */
187 int irq;
188
189 /* The timer for this si. */
190 struct timer_list si_timer;
191
192 /* The time (in jiffies) the last timeout occurred at. */
193 unsigned long last_timeout_jiffies;
194
195 /* Used to gracefully stop the timer without race conditions. */
a9a2c44f 196 atomic_t stop_operation;
1da177e4
LT
197
198 /* The driver will disable interrupts when it gets into a
199 situation where it cannot handle messages due to lack of
200 memory. Once that situation clears up, it will re-enable
201 interrupts. */
202 int interrupt_disabled;
203
50c812b2 204 /* From the get device id response... */
3ae0e0f9 205 struct ipmi_device_id device_id;
1da177e4 206
50c812b2
CM
207 /* Driver model stuff. */
208 struct device *dev;
209 struct platform_device *pdev;
210
211 /* True if we allocated the device, false if it came from
212 * someplace else (like PCI). */
213 int dev_registered;
214
1da177e4
LT
215 /* Slave address, could be reported from DMI. */
216 unsigned char slave_addr;
217
218 /* Counters and things for the proc filesystem. */
219 spinlock_t count_lock;
220 unsigned long short_timeouts;
221 unsigned long long_timeouts;
222 unsigned long timeout_restarts;
223 unsigned long idles;
224 unsigned long interrupts;
225 unsigned long attentions;
226 unsigned long flag_fetches;
227 unsigned long hosed_count;
228 unsigned long complete_transactions;
229 unsigned long events;
230 unsigned long watchdog_pretimeouts;
231 unsigned long incoming_messages;
a9a2c44f 232
e9a705a0 233 struct task_struct *thread;
b0defcdb
CM
234
235 struct list_head link;
1da177e4
LT
236};
237
a51f4a81
CM
238#define SI_MAX_PARMS 4
239
240static int force_kipmid[SI_MAX_PARMS];
241static int num_force_kipmid;
242
b361e27b
CM
243static int unload_when_empty = 1;
244
b0defcdb 245static int try_smi_init(struct smi_info *smi);
b361e27b 246static void cleanup_one_si(struct smi_info *to_clean);
b0defcdb 247
e041c683 248static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
ea94027b
CM
249static int register_xaction_notifier(struct notifier_block * nb)
250{
e041c683 251 return atomic_notifier_chain_register(&xaction_notifier_list, nb);
ea94027b
CM
252}
253
1da177e4
LT
254static void deliver_recv_msg(struct smi_info *smi_info,
255 struct ipmi_smi_msg *msg)
256{
257 /* Deliver the message to the upper layer with the lock
258 released. */
259 spin_unlock(&(smi_info->si_lock));
260 ipmi_smi_msg_received(smi_info->intf, msg);
261 spin_lock(&(smi_info->si_lock));
262}
263
4d7cbac7 264static void return_hosed_msg(struct smi_info *smi_info, int cCode)
1da177e4
LT
265{
266 struct ipmi_smi_msg *msg = smi_info->curr_msg;
267
4d7cbac7
CM
268 if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
269 cCode = IPMI_ERR_UNSPECIFIED;
270 /* else use it as is */
271
1da177e4
LT
272 /* Make it a reponse */
273 msg->rsp[0] = msg->data[0] | 4;
274 msg->rsp[1] = msg->data[1];
4d7cbac7 275 msg->rsp[2] = cCode;
1da177e4
LT
276 msg->rsp_size = 3;
277
278 smi_info->curr_msg = NULL;
279 deliver_recv_msg(smi_info, msg);
280}
281
282static enum si_sm_result start_next_msg(struct smi_info *smi_info)
283{
284 int rv;
285 struct list_head *entry = NULL;
286#ifdef DEBUG_TIMING
287 struct timeval t;
288#endif
289
290 /* No need to save flags, we aleady have interrupts off and we
291 already hold the SMI lock. */
292 spin_lock(&(smi_info->msg_lock));
293
294 /* Pick the high priority queue first. */
b0defcdb 295 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
1da177e4 296 entry = smi_info->hp_xmit_msgs.next;
b0defcdb 297 } else if (!list_empty(&(smi_info->xmit_msgs))) {
1da177e4
LT
298 entry = smi_info->xmit_msgs.next;
299 }
300
b0defcdb 301 if (!entry) {
1da177e4
LT
302 smi_info->curr_msg = NULL;
303 rv = SI_SM_IDLE;
304 } else {
305 int err;
306
307 list_del(entry);
308 smi_info->curr_msg = list_entry(entry,
309 struct ipmi_smi_msg,
310 link);
311#ifdef DEBUG_TIMING
312 do_gettimeofday(&t);
313 printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
314#endif
e041c683
AS
315 err = atomic_notifier_call_chain(&xaction_notifier_list,
316 0, smi_info);
ea94027b
CM
317 if (err & NOTIFY_STOP_MASK) {
318 rv = SI_SM_CALL_WITHOUT_DELAY;
319 goto out;
320 }
1da177e4
LT
321 err = smi_info->handlers->start_transaction(
322 smi_info->si_sm,
323 smi_info->curr_msg->data,
324 smi_info->curr_msg->data_size);
325 if (err) {
4d7cbac7 326 return_hosed_msg(smi_info, err);
1da177e4
LT
327 }
328
329 rv = SI_SM_CALL_WITHOUT_DELAY;
330 }
ea94027b 331 out:
1da177e4
LT
332 spin_unlock(&(smi_info->msg_lock));
333
334 return rv;
335}
336
337static void start_enable_irq(struct smi_info *smi_info)
338{
339 unsigned char msg[2];
340
341 /* If we are enabling interrupts, we have to tell the
342 BMC to use them. */
343 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
344 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
345
346 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
347 smi_info->si_state = SI_ENABLE_INTERRUPTS1;
348}
349
ee6cd5f8
CM
350static void start_disable_irq(struct smi_info *smi_info)
351{
352 unsigned char msg[2];
353
354 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
355 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
356
357 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
358 smi_info->si_state = SI_DISABLE_INTERRUPTS1;
359}
360
1da177e4
LT
361static void start_clear_flags(struct smi_info *smi_info)
362{
363 unsigned char msg[3];
364
365 /* Make sure the watchdog pre-timeout flag is not set at startup. */
366 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
367 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
368 msg[2] = WDT_PRE_TIMEOUT_INT;
369
370 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
371 smi_info->si_state = SI_CLEARING_FLAGS;
372}
373
374/* When we have a situtaion where we run out of memory and cannot
375 allocate messages, we just leave them in the BMC and run the system
376 polled until we can allocate some memory. Once we have some
377 memory, we will re-enable the interrupt. */
378static inline void disable_si_irq(struct smi_info *smi_info)
379{
b0defcdb 380 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
ee6cd5f8 381 start_disable_irq(smi_info);
1da177e4
LT
382 smi_info->interrupt_disabled = 1;
383 }
384}
385
386static inline void enable_si_irq(struct smi_info *smi_info)
387{
388 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
ee6cd5f8 389 start_enable_irq(smi_info);
1da177e4
LT
390 smi_info->interrupt_disabled = 0;
391 }
392}
393
394static void handle_flags(struct smi_info *smi_info)
395{
3ae0e0f9 396 retry:
1da177e4
LT
397 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
398 /* Watchdog pre-timeout */
399 spin_lock(&smi_info->count_lock);
400 smi_info->watchdog_pretimeouts++;
401 spin_unlock(&smi_info->count_lock);
402
403 start_clear_flags(smi_info);
404 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
405 spin_unlock(&(smi_info->si_lock));
406 ipmi_smi_watchdog_pretimeout(smi_info->intf);
407 spin_lock(&(smi_info->si_lock));
408 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
409 /* Messages available. */
410 smi_info->curr_msg = ipmi_alloc_smi_msg();
b0defcdb 411 if (!smi_info->curr_msg) {
1da177e4
LT
412 disable_si_irq(smi_info);
413 smi_info->si_state = SI_NORMAL;
414 return;
415 }
416 enable_si_irq(smi_info);
417
418 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
419 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
420 smi_info->curr_msg->data_size = 2;
421
422 smi_info->handlers->start_transaction(
423 smi_info->si_sm,
424 smi_info->curr_msg->data,
425 smi_info->curr_msg->data_size);
426 smi_info->si_state = SI_GETTING_MESSAGES;
427 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
428 /* Events available. */
429 smi_info->curr_msg = ipmi_alloc_smi_msg();
b0defcdb 430 if (!smi_info->curr_msg) {
1da177e4
LT
431 disable_si_irq(smi_info);
432 smi_info->si_state = SI_NORMAL;
433 return;
434 }
435 enable_si_irq(smi_info);
436
437 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
438 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
439 smi_info->curr_msg->data_size = 2;
440
441 smi_info->handlers->start_transaction(
442 smi_info->si_sm,
443 smi_info->curr_msg->data,
444 smi_info->curr_msg->data_size);
445 smi_info->si_state = SI_GETTING_EVENTS;
4064d5ef
CM
446 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
447 smi_info->oem_data_avail_handler) {
448 if (smi_info->oem_data_avail_handler(smi_info))
449 goto retry;
1da177e4
LT
450 } else {
451 smi_info->si_state = SI_NORMAL;
452 }
453}
454
455static void handle_transaction_done(struct smi_info *smi_info)
456{
457 struct ipmi_smi_msg *msg;
458#ifdef DEBUG_TIMING
459 struct timeval t;
460
461 do_gettimeofday(&t);
462 printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
463#endif
464 switch (smi_info->si_state) {
465 case SI_NORMAL:
b0defcdb 466 if (!smi_info->curr_msg)
1da177e4
LT
467 break;
468
469 smi_info->curr_msg->rsp_size
470 = smi_info->handlers->get_result(
471 smi_info->si_sm,
472 smi_info->curr_msg->rsp,
473 IPMI_MAX_MSG_LENGTH);
474
475 /* Do this here becase deliver_recv_msg() releases the
476 lock, and a new message can be put in during the
477 time the lock is released. */
478 msg = smi_info->curr_msg;
479 smi_info->curr_msg = NULL;
480 deliver_recv_msg(smi_info, msg);
481 break;
482
483 case SI_GETTING_FLAGS:
484 {
485 unsigned char msg[4];
486 unsigned int len;
487
488 /* We got the flags from the SMI, now handle them. */
489 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
490 if (msg[2] != 0) {
491 /* Error fetching flags, just give up for
492 now. */
493 smi_info->si_state = SI_NORMAL;
494 } else if (len < 4) {
495 /* Hmm, no flags. That's technically illegal, but
496 don't use uninitialized data. */
497 smi_info->si_state = SI_NORMAL;
498 } else {
499 smi_info->msg_flags = msg[3];
500 handle_flags(smi_info);
501 }
502 break;
503 }
504
505 case SI_CLEARING_FLAGS:
506 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
507 {
508 unsigned char msg[3];
509
510 /* We cleared the flags. */
511 smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
512 if (msg[2] != 0) {
513 /* Error clearing flags */
514 printk(KERN_WARNING
515 "ipmi_si: Error clearing flags: %2.2x\n",
516 msg[2]);
517 }
518 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
519 start_enable_irq(smi_info);
520 else
521 smi_info->si_state = SI_NORMAL;
522 break;
523 }
524
525 case SI_GETTING_EVENTS:
526 {
527 smi_info->curr_msg->rsp_size
528 = smi_info->handlers->get_result(
529 smi_info->si_sm,
530 smi_info->curr_msg->rsp,
531 IPMI_MAX_MSG_LENGTH);
532
533 /* Do this here becase deliver_recv_msg() releases the
534 lock, and a new message can be put in during the
535 time the lock is released. */
536 msg = smi_info->curr_msg;
537 smi_info->curr_msg = NULL;
538 if (msg->rsp[2] != 0) {
539 /* Error getting event, probably done. */
540 msg->done(msg);
541
542 /* Take off the event flag. */
543 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
544 handle_flags(smi_info);
545 } else {
546 spin_lock(&smi_info->count_lock);
547 smi_info->events++;
548 spin_unlock(&smi_info->count_lock);
549
550 /* Do this before we deliver the message
551 because delivering the message releases the
552 lock and something else can mess with the
553 state. */
554 handle_flags(smi_info);
555
556 deliver_recv_msg(smi_info, msg);
557 }
558 break;
559 }
560
561 case SI_GETTING_MESSAGES:
562 {
563 smi_info->curr_msg->rsp_size
564 = smi_info->handlers->get_result(
565 smi_info->si_sm,
566 smi_info->curr_msg->rsp,
567 IPMI_MAX_MSG_LENGTH);
568
569 /* Do this here becase deliver_recv_msg() releases the
570 lock, and a new message can be put in during the
571 time the lock is released. */
572 msg = smi_info->curr_msg;
573 smi_info->curr_msg = NULL;
574 if (msg->rsp[2] != 0) {
575 /* Error getting event, probably done. */
576 msg->done(msg);
577
578 /* Take off the msg flag. */
579 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
580 handle_flags(smi_info);
581 } else {
582 spin_lock(&smi_info->count_lock);
583 smi_info->incoming_messages++;
584 spin_unlock(&smi_info->count_lock);
585
586 /* Do this before we deliver the message
587 because delivering the message releases the
588 lock and something else can mess with the
589 state. */
590 handle_flags(smi_info);
591
592 deliver_recv_msg(smi_info, msg);
593 }
594 break;
595 }
596
597 case SI_ENABLE_INTERRUPTS1:
598 {
599 unsigned char msg[4];
600
601 /* We got the flags from the SMI, now handle them. */
602 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
603 if (msg[2] != 0) {
604 printk(KERN_WARNING
605 "ipmi_si: Could not enable interrupts"
606 ", failed get, using polled mode.\n");
607 smi_info->si_state = SI_NORMAL;
608 } else {
609 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
610 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
ee6cd5f8
CM
611 msg[2] = (msg[3] |
612 IPMI_BMC_RCV_MSG_INTR |
613 IPMI_BMC_EVT_MSG_INTR);
1da177e4
LT
614 smi_info->handlers->start_transaction(
615 smi_info->si_sm, msg, 3);
616 smi_info->si_state = SI_ENABLE_INTERRUPTS2;
617 }
618 break;
619 }
620
621 case SI_ENABLE_INTERRUPTS2:
622 {
623 unsigned char msg[4];
624
625 /* We got the flags from the SMI, now handle them. */
626 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
627 if (msg[2] != 0) {
628 printk(KERN_WARNING
629 "ipmi_si: Could not enable interrupts"
630 ", failed set, using polled mode.\n");
631 }
632 smi_info->si_state = SI_NORMAL;
633 break;
634 }
ee6cd5f8
CM
635
636 case SI_DISABLE_INTERRUPTS1:
637 {
638 unsigned char msg[4];
639
640 /* We got the flags from the SMI, now handle them. */
641 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
642 if (msg[2] != 0) {
643 printk(KERN_WARNING
644 "ipmi_si: Could not disable interrupts"
645 ", failed get.\n");
646 smi_info->si_state = SI_NORMAL;
647 } else {
648 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
649 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
650 msg[2] = (msg[3] &
651 ~(IPMI_BMC_RCV_MSG_INTR |
652 IPMI_BMC_EVT_MSG_INTR));
653 smi_info->handlers->start_transaction(
654 smi_info->si_sm, msg, 3);
655 smi_info->si_state = SI_DISABLE_INTERRUPTS2;
656 }
657 break;
658 }
659
660 case SI_DISABLE_INTERRUPTS2:
661 {
662 unsigned char msg[4];
663
664 /* We got the flags from the SMI, now handle them. */
665 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
666 if (msg[2] != 0) {
667 printk(KERN_WARNING
668 "ipmi_si: Could not disable interrupts"
669 ", failed set.\n");
670 }
671 smi_info->si_state = SI_NORMAL;
672 break;
673 }
1da177e4
LT
674 }
675}
676
677/* Called on timeouts and events. Timeouts should pass the elapsed
fcfa4724
CM
678 time, interrupts should pass in zero. Must be called with
679 si_lock held and interrupts disabled. */
1da177e4
LT
680static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
681 int time)
682{
683 enum si_sm_result si_sm_result;
684
685 restart:
686 /* There used to be a loop here that waited a little while
687 (around 25us) before giving up. That turned out to be
688 pointless, the minimum delays I was seeing were in the 300us
689 range, which is far too long to wait in an interrupt. So
690 we just run until the state machine tells us something
691 happened or it needs a delay. */
692 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
693 time = 0;
694 while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
695 {
696 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
697 }
698
699 if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
700 {
701 spin_lock(&smi_info->count_lock);
702 smi_info->complete_transactions++;
703 spin_unlock(&smi_info->count_lock);
704
705 handle_transaction_done(smi_info);
706 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
707 }
708 else if (si_sm_result == SI_SM_HOSED)
709 {
710 spin_lock(&smi_info->count_lock);
711 smi_info->hosed_count++;
712 spin_unlock(&smi_info->count_lock);
713
714 /* Do the before return_hosed_msg, because that
715 releases the lock. */
716 smi_info->si_state = SI_NORMAL;
717 if (smi_info->curr_msg != NULL) {
718 /* If we were handling a user message, format
719 a response to send to the upper layer to
720 tell it about the error. */
4d7cbac7 721 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
1da177e4
LT
722 }
723 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
724 }
725
4ea18425
CM
726 /*
727 * We prefer handling attn over new messages. But don't do
728 * this if there is not yet an upper layer to handle anything.
729 */
730 if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN)
1da177e4
LT
731 {
732 unsigned char msg[2];
733
734 spin_lock(&smi_info->count_lock);
735 smi_info->attentions++;
736 spin_unlock(&smi_info->count_lock);
737
738 /* Got a attn, send down a get message flags to see
739 what's causing it. It would be better to handle
740 this in the upper layer, but due to the way
741 interrupts work with the SMI, that's not really
742 possible. */
743 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
744 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
745
746 smi_info->handlers->start_transaction(
747 smi_info->si_sm, msg, 2);
748 smi_info->si_state = SI_GETTING_FLAGS;
749 goto restart;
750 }
751
752 /* If we are currently idle, try to start the next message. */
753 if (si_sm_result == SI_SM_IDLE) {
754 spin_lock(&smi_info->count_lock);
755 smi_info->idles++;
756 spin_unlock(&smi_info->count_lock);
757
758 si_sm_result = start_next_msg(smi_info);
759 if (si_sm_result != SI_SM_IDLE)
760 goto restart;
761 }
762
763 if ((si_sm_result == SI_SM_IDLE)
764 && (atomic_read(&smi_info->req_events)))
765 {
766 /* We are idle and the upper layer requested that I fetch
767 events, so do so. */
55162fb1 768 atomic_set(&smi_info->req_events, 0);
1da177e4 769
55162fb1
CM
770 smi_info->curr_msg = ipmi_alloc_smi_msg();
771 if (!smi_info->curr_msg)
772 goto out;
1da177e4 773
55162fb1
CM
774 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
775 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
776 smi_info->curr_msg->data_size = 2;
1da177e4
LT
777
778 smi_info->handlers->start_transaction(
55162fb1
CM
779 smi_info->si_sm,
780 smi_info->curr_msg->data,
781 smi_info->curr_msg->data_size);
782 smi_info->si_state = SI_GETTING_EVENTS;
1da177e4
LT
783 goto restart;
784 }
55162fb1 785 out:
1da177e4
LT
786 return si_sm_result;
787}
788
789static void sender(void *send_info,
790 struct ipmi_smi_msg *msg,
791 int priority)
792{
793 struct smi_info *smi_info = send_info;
794 enum si_sm_result result;
795 unsigned long flags;
796#ifdef DEBUG_TIMING
797 struct timeval t;
798#endif
799
b361e27b
CM
800 if (atomic_read(&smi_info->stop_operation)) {
801 msg->rsp[0] = msg->data[0] | 4;
802 msg->rsp[1] = msg->data[1];
803 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
804 msg->rsp_size = 3;
805 deliver_recv_msg(smi_info, msg);
806 return;
807 }
808
1da177e4
LT
809 spin_lock_irqsave(&(smi_info->msg_lock), flags);
810#ifdef DEBUG_TIMING
811 do_gettimeofday(&t);
812 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
813#endif
814
815 if (smi_info->run_to_completion) {
816 /* If we are running to completion, then throw it in
817 the list and run transactions until everything is
818 clear. Priority doesn't matter here. */
819 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
820
821 /* We have to release the msg lock and claim the smi
822 lock in this case, because of race conditions. */
823 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
824
825 spin_lock_irqsave(&(smi_info->si_lock), flags);
826 result = smi_event_handler(smi_info, 0);
827 while (result != SI_SM_IDLE) {
828 udelay(SI_SHORT_TIMEOUT_USEC);
829 result = smi_event_handler(smi_info,
830 SI_SHORT_TIMEOUT_USEC);
831 }
832 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
833 return;
834 } else {
835 if (priority > 0) {
836 list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
837 } else {
838 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
839 }
840 }
841 spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
842
843 spin_lock_irqsave(&(smi_info->si_lock), flags);
844 if ((smi_info->si_state == SI_NORMAL)
845 && (smi_info->curr_msg == NULL))
846 {
847 start_next_msg(smi_info);
1da177e4
LT
848 }
849 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
850}
851
852static void set_run_to_completion(void *send_info, int i_run_to_completion)
853{
854 struct smi_info *smi_info = send_info;
855 enum si_sm_result result;
856 unsigned long flags;
857
858 spin_lock_irqsave(&(smi_info->si_lock), flags);
859
860 smi_info->run_to_completion = i_run_to_completion;
861 if (i_run_to_completion) {
862 result = smi_event_handler(smi_info, 0);
863 while (result != SI_SM_IDLE) {
864 udelay(SI_SHORT_TIMEOUT_USEC);
865 result = smi_event_handler(smi_info,
866 SI_SHORT_TIMEOUT_USEC);
867 }
868 }
869
870 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
871}
872
a9a2c44f
CM
873static int ipmi_thread(void *data)
874{
875 struct smi_info *smi_info = data;
e9a705a0 876 unsigned long flags;
a9a2c44f
CM
877 enum si_sm_result smi_result;
878
a9a2c44f 879 set_user_nice(current, 19);
e9a705a0 880 while (!kthread_should_stop()) {
a9a2c44f 881 spin_lock_irqsave(&(smi_info->si_lock), flags);
8a3628d5 882 smi_result = smi_event_handler(smi_info, 0);
a9a2c44f 883 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
e9a705a0
MD
884 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
885 /* do nothing */
a9a2c44f 886 }
e9a705a0 887 else if (smi_result == SI_SM_CALL_WITH_DELAY)
33979734 888 schedule();
e9a705a0
MD
889 else
890 schedule_timeout_interruptible(1);
a9a2c44f 891 }
a9a2c44f
CM
892 return 0;
893}
894
895
1da177e4
LT
896static void poll(void *send_info)
897{
898 struct smi_info *smi_info = send_info;
fcfa4724 899 unsigned long flags;
1da177e4 900
15c62e10
CM
901 /*
902 * Make sure there is some delay in the poll loop so we can
903 * drive time forward and timeout things.
904 */
905 udelay(10);
fcfa4724 906 spin_lock_irqsave(&smi_info->si_lock, flags);
15c62e10 907 smi_event_handler(smi_info, 10);
fcfa4724 908 spin_unlock_irqrestore(&smi_info->si_lock, flags);
1da177e4
LT
909}
910
911static void request_events(void *send_info)
912{
913 struct smi_info *smi_info = send_info;
914
b361e27b
CM
915 if (atomic_read(&smi_info->stop_operation))
916 return;
917
1da177e4
LT
918 atomic_set(&smi_info->req_events, 1);
919}
920
0c8204b3 921static int initialized;
1da177e4 922
1da177e4
LT
923static void smi_timeout(unsigned long data)
924{
925 struct smi_info *smi_info = (struct smi_info *) data;
926 enum si_sm_result smi_result;
927 unsigned long flags;
928 unsigned long jiffies_now;
c4edff1c 929 long time_diff;
1da177e4
LT
930#ifdef DEBUG_TIMING
931 struct timeval t;
932#endif
933
1da177e4
LT
934 spin_lock_irqsave(&(smi_info->si_lock), flags);
935#ifdef DEBUG_TIMING
936 do_gettimeofday(&t);
937 printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
938#endif
939 jiffies_now = jiffies;
c4edff1c 940 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1da177e4
LT
941 * SI_USEC_PER_JIFFY);
942 smi_result = smi_event_handler(smi_info, time_diff);
943
944 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
945
946 smi_info->last_timeout_jiffies = jiffies_now;
947
b0defcdb 948 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
1da177e4
LT
949 /* Running with interrupts, only do long timeouts. */
950 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
951 spin_lock_irqsave(&smi_info->count_lock, flags);
952 smi_info->long_timeouts++;
953 spin_unlock_irqrestore(&smi_info->count_lock, flags);
954 goto do_add_timer;
955 }
956
957 /* If the state machine asks for a short delay, then shorten
958 the timer timeout. */
959 if (smi_result == SI_SM_CALL_WITH_DELAY) {
960 spin_lock_irqsave(&smi_info->count_lock, flags);
961 smi_info->short_timeouts++;
962 spin_unlock_irqrestore(&smi_info->count_lock, flags);
1da177e4 963 smi_info->si_timer.expires = jiffies + 1;
1da177e4
LT
964 } else {
965 spin_lock_irqsave(&smi_info->count_lock, flags);
966 smi_info->long_timeouts++;
967 spin_unlock_irqrestore(&smi_info->count_lock, flags);
968 smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
1da177e4
LT
969 }
970
971 do_add_timer:
972 add_timer(&(smi_info->si_timer));
973}
974
7d12e780 975static irqreturn_t si_irq_handler(int irq, void *data)
1da177e4
LT
976{
977 struct smi_info *smi_info = data;
978 unsigned long flags;
979#ifdef DEBUG_TIMING
980 struct timeval t;
981#endif
982
983 spin_lock_irqsave(&(smi_info->si_lock), flags);
984
985 spin_lock(&smi_info->count_lock);
986 smi_info->interrupts++;
987 spin_unlock(&smi_info->count_lock);
988
1da177e4
LT
989#ifdef DEBUG_TIMING
990 do_gettimeofday(&t);
991 printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
992#endif
993 smi_event_handler(smi_info, 0);
1da177e4
LT
994 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
995 return IRQ_HANDLED;
996}
997
7d12e780 998static irqreturn_t si_bt_irq_handler(int irq, void *data)
9dbf68f9
CM
999{
1000 struct smi_info *smi_info = data;
1001 /* We need to clear the IRQ flag for the BT interface. */
1002 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1003 IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1004 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
7d12e780 1005 return si_irq_handler(irq, data);
9dbf68f9
CM
1006}
1007
453823ba
CM
1008static int smi_start_processing(void *send_info,
1009 ipmi_smi_t intf)
1010{
1011 struct smi_info *new_smi = send_info;
a51f4a81 1012 int enable = 0;
453823ba
CM
1013
1014 new_smi->intf = intf;
1015
c45adc39
CM
1016 /* Try to claim any interrupts. */
1017 if (new_smi->irq_setup)
1018 new_smi->irq_setup(new_smi);
1019
453823ba
CM
1020 /* Set up the timer that drives the interface. */
1021 setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
1022 new_smi->last_timeout_jiffies = jiffies;
1023 mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
1024
a51f4a81
CM
1025 /*
1026 * Check if the user forcefully enabled the daemon.
1027 */
1028 if (new_smi->intf_num < num_force_kipmid)
1029 enable = force_kipmid[new_smi->intf_num];
df3fe8de
CM
1030 /*
1031 * The BT interface is efficient enough to not need a thread,
1032 * and there is no need for a thread if we have interrupts.
1033 */
a51f4a81
CM
1034 else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
1035 enable = 1;
1036
1037 if (enable) {
453823ba
CM
1038 new_smi->thread = kthread_run(ipmi_thread, new_smi,
1039 "kipmi%d", new_smi->intf_num);
1040 if (IS_ERR(new_smi->thread)) {
1041 printk(KERN_NOTICE "ipmi_si_intf: Could not start"
1042 " kernel thread due to error %ld, only using"
1043 " timers to drive the interface\n",
1044 PTR_ERR(new_smi->thread));
1045 new_smi->thread = NULL;
1046 }
1047 }
1048
1049 return 0;
1050}
9dbf68f9 1051
b9675136
CM
1052static void set_maintenance_mode(void *send_info, int enable)
1053{
1054 struct smi_info *smi_info = send_info;
1055
1056 if (!enable)
1057 atomic_set(&smi_info->req_events, 0);
1058}
1059
1da177e4
LT
1060static struct ipmi_smi_handlers handlers =
1061{
1062 .owner = THIS_MODULE,
453823ba 1063 .start_processing = smi_start_processing,
1da177e4
LT
1064 .sender = sender,
1065 .request_events = request_events,
b9675136 1066 .set_maintenance_mode = set_maintenance_mode,
1da177e4
LT
1067 .set_run_to_completion = set_run_to_completion,
1068 .poll = poll,
1069};
1070
1071/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
1072 a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */
1073
b0defcdb 1074static LIST_HEAD(smi_infos);
d6dfd131 1075static DEFINE_MUTEX(smi_infos_lock);
b0defcdb 1076static int smi_num; /* Used to sequence the SMIs */
1da177e4 1077
1da177e4 1078#define DEFAULT_REGSPACING 1
dba9b4f6 1079#define DEFAULT_REGSIZE 1
1da177e4
LT
1080
1081static int si_trydefaults = 1;
1082static char *si_type[SI_MAX_PARMS];
1083#define MAX_SI_TYPE_STR 30
1084static char si_type_str[MAX_SI_TYPE_STR];
1085static unsigned long addrs[SI_MAX_PARMS];
64a6f950 1086static unsigned int num_addrs;
1da177e4 1087static unsigned int ports[SI_MAX_PARMS];
64a6f950 1088static unsigned int num_ports;
1da177e4 1089static int irqs[SI_MAX_PARMS];
64a6f950 1090static unsigned int num_irqs;
1da177e4 1091static int regspacings[SI_MAX_PARMS];
64a6f950 1092static unsigned int num_regspacings;
1da177e4 1093static int regsizes[SI_MAX_PARMS];
64a6f950 1094static unsigned int num_regsizes;
1da177e4 1095static int regshifts[SI_MAX_PARMS];
64a6f950 1096static unsigned int num_regshifts;
1da177e4 1097static int slave_addrs[SI_MAX_PARMS];
64a6f950 1098static unsigned int num_slave_addrs;
1da177e4 1099
b361e27b
CM
1100#define IPMI_IO_ADDR_SPACE 0
1101#define IPMI_MEM_ADDR_SPACE 1
1d5636cc 1102static char *addr_space_to_str[] = { "i/o", "mem" };
b361e27b
CM
1103
1104static int hotmod_handler(const char *val, struct kernel_param *kp);
1105
1106module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
1107MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
1108 " Documentation/IPMI.txt in the kernel sources for the"
1109 " gory details.");
1da177e4
LT
1110
1111module_param_named(trydefaults, si_trydefaults, bool, 0);
1112MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
1113 " default scan of the KCS and SMIC interface at the standard"
1114 " address");
1115module_param_string(type, si_type_str, MAX_SI_TYPE_STR, 0);
1116MODULE_PARM_DESC(type, "Defines the type of each interface, each"
1117 " interface separated by commas. The types are 'kcs',"
1118 " 'smic', and 'bt'. For example si_type=kcs,bt will set"
1119 " the first interface to kcs and the second to bt");
64a6f950 1120module_param_array(addrs, ulong, &num_addrs, 0);
1da177e4
LT
1121MODULE_PARM_DESC(addrs, "Sets the memory address of each interface, the"
1122 " addresses separated by commas. Only use if an interface"
1123 " is in memory. Otherwise, set it to zero or leave"
1124 " it blank.");
64a6f950 1125module_param_array(ports, uint, &num_ports, 0);
1da177e4
LT
1126MODULE_PARM_DESC(ports, "Sets the port address of each interface, the"
1127 " addresses separated by commas. Only use if an interface"
1128 " is a port. Otherwise, set it to zero or leave"
1129 " it blank.");
1130module_param_array(irqs, int, &num_irqs, 0);
1131MODULE_PARM_DESC(irqs, "Sets the interrupt of each interface, the"
1132 " addresses separated by commas. Only use if an interface"
1133 " has an interrupt. Otherwise, set it to zero or leave"
1134 " it blank.");
1135module_param_array(regspacings, int, &num_regspacings, 0);
1136MODULE_PARM_DESC(regspacings, "The number of bytes between the start address"
1137 " and each successive register used by the interface. For"
1138 " instance, if the start address is 0xca2 and the spacing"
1139 " is 2, then the second address is at 0xca4. Defaults"
1140 " to 1.");
1141module_param_array(regsizes, int, &num_regsizes, 0);
1142MODULE_PARM_DESC(regsizes, "The size of the specific IPMI register in bytes."
1143 " This should generally be 1, 2, 4, or 8 for an 8-bit,"
1144 " 16-bit, 32-bit, or 64-bit register. Use this if you"
1145 " the 8-bit IPMI register has to be read from a larger"
1146 " register.");
1147module_param_array(regshifts, int, &num_regshifts, 0);
1148MODULE_PARM_DESC(regshifts, "The amount to shift the data read from the."
1149 " IPMI register, in bits. For instance, if the data"
1150 " is read from a 32-bit word and the IPMI data is in"
1151 " bit 8-15, then the shift would be 8");
1152module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1153MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for"
1154 " the controller. Normally this is 0x20, but can be"
1155 " overridden by this parm. This is an array indexed"
1156 " by interface number.");
a51f4a81
CM
1157module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1158MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
1159 " disabled(0). Normally the IPMI driver auto-detects"
1160 " this, but the value may be overridden by this parm.");
b361e27b
CM
1161module_param(unload_when_empty, int, 0);
1162MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1163 " specified or found, default is 1. Setting to 0"
1164 " is useful for hot add of devices using hotmod.");
1da177e4
LT
1165
1166
b0defcdb 1167static void std_irq_cleanup(struct smi_info *info)
1da177e4 1168{
b0defcdb
CM
1169 if (info->si_type == SI_BT)
1170 /* Disable the interrupt in the BT interface. */
1171 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0);
1172 free_irq(info->irq, info);
1da177e4 1173}
1da177e4
LT
1174
1175static int std_irq_setup(struct smi_info *info)
1176{
1177 int rv;
1178
b0defcdb 1179 if (!info->irq)
1da177e4
LT
1180 return 0;
1181
9dbf68f9
CM
1182 if (info->si_type == SI_BT) {
1183 rv = request_irq(info->irq,
1184 si_bt_irq_handler,
ee6cd5f8 1185 IRQF_SHARED | IRQF_DISABLED,
9dbf68f9
CM
1186 DEVICE_NAME,
1187 info);
b0defcdb 1188 if (!rv)
9dbf68f9
CM
1189 /* Enable the interrupt in the BT interface. */
1190 info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
1191 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1192 } else
1193 rv = request_irq(info->irq,
1194 si_irq_handler,
ee6cd5f8 1195 IRQF_SHARED | IRQF_DISABLED,
9dbf68f9
CM
1196 DEVICE_NAME,
1197 info);
1da177e4
LT
1198 if (rv) {
1199 printk(KERN_WARNING
1200 "ipmi_si: %s unable to claim interrupt %d,"
1201 " running polled\n",
1202 DEVICE_NAME, info->irq);
1203 info->irq = 0;
1204 } else {
b0defcdb 1205 info->irq_cleanup = std_irq_cleanup;
1da177e4
LT
1206 printk(" Using irq %d\n", info->irq);
1207 }
1208
1209 return rv;
1210}
1211
1da177e4
LT
1212static unsigned char port_inb(struct si_sm_io *io, unsigned int offset)
1213{
b0defcdb 1214 unsigned int addr = io->addr_data;
1da177e4 1215
b0defcdb 1216 return inb(addr + (offset * io->regspacing));
1da177e4
LT
1217}
1218
1219static void port_outb(struct si_sm_io *io, unsigned int offset,
1220 unsigned char b)
1221{
b0defcdb 1222 unsigned int addr = io->addr_data;
1da177e4 1223
b0defcdb 1224 outb(b, addr + (offset * io->regspacing));
1da177e4
LT
1225}
1226
1227static unsigned char port_inw(struct si_sm_io *io, unsigned int offset)
1228{
b0defcdb 1229 unsigned int addr = io->addr_data;
1da177e4 1230
b0defcdb 1231 return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1da177e4
LT
1232}
1233
1234static void port_outw(struct si_sm_io *io, unsigned int offset,
1235 unsigned char b)
1236{
b0defcdb 1237 unsigned int addr = io->addr_data;
1da177e4 1238
b0defcdb 1239 outw(b << io->regshift, addr + (offset * io->regspacing));
1da177e4
LT
1240}
1241
1242static unsigned char port_inl(struct si_sm_io *io, unsigned int offset)
1243{
b0defcdb 1244 unsigned int addr = io->addr_data;
1da177e4 1245
b0defcdb 1246 return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff;
1da177e4
LT
1247}
1248
1249static void port_outl(struct si_sm_io *io, unsigned int offset,
1250 unsigned char b)
1251{
b0defcdb 1252 unsigned int addr = io->addr_data;
1da177e4 1253
b0defcdb 1254 outl(b << io->regshift, addr+(offset * io->regspacing));
1da177e4
LT
1255}
1256
1257static void port_cleanup(struct smi_info *info)
1258{
b0defcdb 1259 unsigned int addr = info->io.addr_data;
d61a3ead 1260 int idx;
1da177e4 1261
b0defcdb 1262 if (addr) {
d61a3ead
CM
1263 for (idx = 0; idx < info->io_size; idx++) {
1264 release_region(addr + idx * info->io.regspacing,
1265 info->io.regsize);
1266 }
1da177e4 1267 }
1da177e4
LT
1268}
1269
1270static int port_setup(struct smi_info *info)
1271{
b0defcdb 1272 unsigned int addr = info->io.addr_data;
d61a3ead 1273 int idx;
1da177e4 1274
b0defcdb 1275 if (!addr)
1da177e4
LT
1276 return -ENODEV;
1277
1278 info->io_cleanup = port_cleanup;
1279
1280 /* Figure out the actual inb/inw/inl/etc routine to use based
1281 upon the register size. */
1282 switch (info->io.regsize) {
1283 case 1:
1284 info->io.inputb = port_inb;
1285 info->io.outputb = port_outb;
1286 break;
1287 case 2:
1288 info->io.inputb = port_inw;
1289 info->io.outputb = port_outw;
1290 break;
1291 case 4:
1292 info->io.inputb = port_inl;
1293 info->io.outputb = port_outl;
1294 break;
1295 default:
1296 printk("ipmi_si: Invalid register size: %d\n",
1297 info->io.regsize);
1298 return -EINVAL;
1299 }
1300
d61a3ead
CM
1301 /* Some BIOSes reserve disjoint I/O regions in their ACPI
1302 * tables. This causes problems when trying to register the
1303 * entire I/O region. Therefore we must register each I/O
1304 * port separately.
1305 */
1306 for (idx = 0; idx < info->io_size; idx++) {
1307 if (request_region(addr + idx * info->io.regspacing,
1308 info->io.regsize, DEVICE_NAME) == NULL) {
1309 /* Undo allocations */
1310 while (idx--) {
1311 release_region(addr + idx * info->io.regspacing,
1312 info->io.regsize);
1313 }
1314 return -EIO;
1315 }
1316 }
1da177e4
LT
1317 return 0;
1318}
1319
546cfdf4 1320static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset)
1da177e4
LT
1321{
1322 return readb((io->addr)+(offset * io->regspacing));
1323}
1324
546cfdf4 1325static void intf_mem_outb(struct si_sm_io *io, unsigned int offset,
1da177e4
LT
1326 unsigned char b)
1327{
1328 writeb(b, (io->addr)+(offset * io->regspacing));
1329}
1330
546cfdf4 1331static unsigned char intf_mem_inw(struct si_sm_io *io, unsigned int offset)
1da177e4
LT
1332{
1333 return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift)
64d9fe69 1334 & 0xff;
1da177e4
LT
1335}
1336
546cfdf4 1337static void intf_mem_outw(struct si_sm_io *io, unsigned int offset,
1da177e4
LT
1338 unsigned char b)
1339{
1340 writeb(b << io->regshift, (io->addr)+(offset * io->regspacing));
1341}
1342
546cfdf4 1343static unsigned char intf_mem_inl(struct si_sm_io *io, unsigned int offset)
1da177e4
LT
1344{
1345 return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift)
64d9fe69 1346 & 0xff;
1da177e4
LT
1347}
1348
546cfdf4 1349static void intf_mem_outl(struct si_sm_io *io, unsigned int offset,
1da177e4
LT
1350 unsigned char b)
1351{
1352 writel(b << io->regshift, (io->addr)+(offset * io->regspacing));
1353}
1354
1355#ifdef readq
1356static unsigned char mem_inq(struct si_sm_io *io, unsigned int offset)
1357{
1358 return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift)
64d9fe69 1359 & 0xff;
1da177e4
LT
1360}
1361
1362static void mem_outq(struct si_sm_io *io, unsigned int offset,
1363 unsigned char b)
1364{
1365 writeq(b << io->regshift, (io->addr)+(offset * io->regspacing));
1366}
1367#endif
1368
1369static void mem_cleanup(struct smi_info *info)
1370{
b0defcdb 1371 unsigned long addr = info->io.addr_data;
1da177e4
LT
1372 int mapsize;
1373
1374 if (info->io.addr) {
1375 iounmap(info->io.addr);
1376
1377 mapsize = ((info->io_size * info->io.regspacing)
1378 - (info->io.regspacing - info->io.regsize));
1379
b0defcdb 1380 release_mem_region(addr, mapsize);
1da177e4 1381 }
1da177e4
LT
1382}
1383
1384static int mem_setup(struct smi_info *info)
1385{
b0defcdb 1386 unsigned long addr = info->io.addr_data;
1da177e4
LT
1387 int mapsize;
1388
b0defcdb 1389 if (!addr)
1da177e4
LT
1390 return -ENODEV;
1391
1392 info->io_cleanup = mem_cleanup;
1393
1394 /* Figure out the actual readb/readw/readl/etc routine to use based
1395 upon the register size. */
1396 switch (info->io.regsize) {
1397 case 1:
546cfdf4
AD
1398 info->io.inputb = intf_mem_inb;
1399 info->io.outputb = intf_mem_outb;
1da177e4
LT
1400 break;
1401 case 2:
546cfdf4
AD
1402 info->io.inputb = intf_mem_inw;
1403 info->io.outputb = intf_mem_outw;
1da177e4
LT
1404 break;
1405 case 4:
546cfdf4
AD
1406 info->io.inputb = intf_mem_inl;
1407 info->io.outputb = intf_mem_outl;
1da177e4
LT
1408 break;
1409#ifdef readq
1410 case 8:
1411 info->io.inputb = mem_inq;
1412 info->io.outputb = mem_outq;
1413 break;
1414#endif
1415 default:
1416 printk("ipmi_si: Invalid register size: %d\n",
1417 info->io.regsize);
1418 return -EINVAL;
1419 }
1420
1421 /* Calculate the total amount of memory to claim. This is an
1422 * unusual looking calculation, but it avoids claiming any
1423 * more memory than it has to. It will claim everything
1424 * between the first address to the end of the last full
1425 * register. */
1426 mapsize = ((info->io_size * info->io.regspacing)
1427 - (info->io.regspacing - info->io.regsize));
1428
b0defcdb 1429 if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL)
1da177e4
LT
1430 return -EIO;
1431
b0defcdb 1432 info->io.addr = ioremap(addr, mapsize);
1da177e4 1433 if (info->io.addr == NULL) {
b0defcdb 1434 release_mem_region(addr, mapsize);
1da177e4
LT
1435 return -EIO;
1436 }
1437 return 0;
1438}
1439
b361e27b
CM
1440/*
1441 * Parms come in as <op1>[:op2[:op3...]]. ops are:
1442 * add|remove,kcs|bt|smic,mem|i/o,<address>[,<opt1>[,<opt2>[,...]]]
1443 * Options are:
1444 * rsp=<regspacing>
1445 * rsi=<regsize>
1446 * rsh=<regshift>
1447 * irq=<irq>
1448 * ipmb=<ipmb addr>
1449 */
1450enum hotmod_op { HM_ADD, HM_REMOVE };
1451struct hotmod_vals {
1452 char *name;
1453 int val;
1454};
1455static struct hotmod_vals hotmod_ops[] = {
1456 { "add", HM_ADD },
1457 { "remove", HM_REMOVE },
1458 { NULL }
1459};
1460static struct hotmod_vals hotmod_si[] = {
1461 { "kcs", SI_KCS },
1462 { "smic", SI_SMIC },
1463 { "bt", SI_BT },
1464 { NULL }
1465};
1466static struct hotmod_vals hotmod_as[] = {
1467 { "mem", IPMI_MEM_ADDR_SPACE },
1468 { "i/o", IPMI_IO_ADDR_SPACE },
1469 { NULL }
1470};
1d5636cc 1471
b361e27b
CM
1472static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1473{
1474 char *s;
1475 int i;
1476
1477 s = strchr(*curr, ',');
1478 if (!s) {
1479 printk(KERN_WARNING PFX "No hotmod %s given.\n", name);
1480 return -EINVAL;
1481 }
1482 *s = '\0';
1483 s++;
1484 for (i = 0; hotmod_ops[i].name; i++) {
1d5636cc 1485 if (strcmp(*curr, v[i].name) == 0) {
b361e27b
CM
1486 *val = v[i].val;
1487 *curr = s;
1488 return 0;
1489 }
1490 }
1491
1492 printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr);
1493 return -EINVAL;
1494}
1495
1d5636cc
CM
1496static int check_hotmod_int_op(const char *curr, const char *option,
1497 const char *name, int *val)
1498{
1499 char *n;
1500
1501 if (strcmp(curr, name) == 0) {
1502 if (!option) {
1503 printk(KERN_WARNING PFX
1504 "No option given for '%s'\n",
1505 curr);
1506 return -EINVAL;
1507 }
1508 *val = simple_strtoul(option, &n, 0);
1509 if ((*n != '\0') || (*option == '\0')) {
1510 printk(KERN_WARNING PFX
1511 "Bad option given for '%s'\n",
1512 curr);
1513 return -EINVAL;
1514 }
1515 return 1;
1516 }
1517 return 0;
1518}
1519
b361e27b
CM
1520static int hotmod_handler(const char *val, struct kernel_param *kp)
1521{
1522 char *str = kstrdup(val, GFP_KERNEL);
1d5636cc 1523 int rv;
b361e27b
CM
1524 char *next, *curr, *s, *n, *o;
1525 enum hotmod_op op;
1526 enum si_type si_type;
1527 int addr_space;
1528 unsigned long addr;
1529 int regspacing;
1530 int regsize;
1531 int regshift;
1532 int irq;
1533 int ipmb;
1534 int ival;
1d5636cc 1535 int len;
b361e27b
CM
1536 struct smi_info *info;
1537
1538 if (!str)
1539 return -ENOMEM;
1540
1541 /* Kill any trailing spaces, as we can get a "\n" from echo. */
1d5636cc
CM
1542 len = strlen(str);
1543 ival = len - 1;
b361e27b
CM
1544 while ((ival >= 0) && isspace(str[ival])) {
1545 str[ival] = '\0';
1546 ival--;
1547 }
1548
1549 for (curr = str; curr; curr = next) {
1550 regspacing = 1;
1551 regsize = 1;
1552 regshift = 0;
1553 irq = 0;
1554 ipmb = 0x20;
1555
1556 next = strchr(curr, ':');
1557 if (next) {
1558 *next = '\0';
1559 next++;
1560 }
1561
1562 rv = parse_str(hotmod_ops, &ival, "operation", &curr);
1563 if (rv)
1564 break;
1565 op = ival;
1566
1567 rv = parse_str(hotmod_si, &ival, "interface type", &curr);
1568 if (rv)
1569 break;
1570 si_type = ival;
1571
1572 rv = parse_str(hotmod_as, &addr_space, "address space", &curr);
1573 if (rv)
1574 break;
1575
1576 s = strchr(curr, ',');
1577 if (s) {
1578 *s = '\0';
1579 s++;
1580 }
1581 addr = simple_strtoul(curr, &n, 0);
1582 if ((*n != '\0') || (*curr == '\0')) {
1583 printk(KERN_WARNING PFX "Invalid hotmod address"
1584 " '%s'\n", curr);
1585 break;
1586 }
1587
1588 while (s) {
1589 curr = s;
1590 s = strchr(curr, ',');
1591 if (s) {
1592 *s = '\0';
1593 s++;
1594 }
1595 o = strchr(curr, '=');
1596 if (o) {
1597 *o = '\0';
1598 o++;
1599 }
1d5636cc
CM
1600 rv = check_hotmod_int_op(curr, o, "rsp", &regspacing);
1601 if (rv < 0)
b361e27b 1602 goto out;
1d5636cc
CM
1603 else if (rv)
1604 continue;
1605 rv = check_hotmod_int_op(curr, o, "rsi", &regsize);
1606 if (rv < 0)
1607 goto out;
1608 else if (rv)
1609 continue;
1610 rv = check_hotmod_int_op(curr, o, "rsh", &regshift);
1611 if (rv < 0)
1612 goto out;
1613 else if (rv)
1614 continue;
1615 rv = check_hotmod_int_op(curr, o, "irq", &irq);
1616 if (rv < 0)
1617 goto out;
1618 else if (rv)
1619 continue;
1620 rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb);
1621 if (rv < 0)
1622 goto out;
1623 else if (rv)
1624 continue;
1625
1626 rv = -EINVAL;
1627 printk(KERN_WARNING PFX
1628 "Invalid hotmod option '%s'\n",
1629 curr);
1630 goto out;
b361e27b
CM
1631 }
1632
1633 if (op == HM_ADD) {
1634 info = kzalloc(sizeof(*info), GFP_KERNEL);
1635 if (!info) {
1636 rv = -ENOMEM;
1637 goto out;
1638 }
1639
1640 info->addr_source = "hotmod";
1641 info->si_type = si_type;
1642 info->io.addr_data = addr;
1643 info->io.addr_type = addr_space;
1644 if (addr_space == IPMI_MEM_ADDR_SPACE)
1645 info->io_setup = mem_setup;
1646 else
1647 info->io_setup = port_setup;
1648
1649 info->io.addr = NULL;
1650 info->io.regspacing = regspacing;
1651 if (!info->io.regspacing)
1652 info->io.regspacing = DEFAULT_REGSPACING;
1653 info->io.regsize = regsize;
1654 if (!info->io.regsize)
1655 info->io.regsize = DEFAULT_REGSPACING;
1656 info->io.regshift = regshift;
1657 info->irq = irq;
1658 if (info->irq)
1659 info->irq_setup = std_irq_setup;
1660 info->slave_addr = ipmb;
1661
1662 try_smi_init(info);
1663 } else {
1664 /* remove */
1665 struct smi_info *e, *tmp_e;
1666
1667 mutex_lock(&smi_infos_lock);
1668 list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
1669 if (e->io.addr_type != addr_space)
1670 continue;
1671 if (e->si_type != si_type)
1672 continue;
1673 if (e->io.addr_data == addr)
1674 cleanup_one_si(e);
1675 }
1676 mutex_unlock(&smi_infos_lock);
1677 }
1678 }
1d5636cc 1679 rv = len;
b361e27b
CM
1680 out:
1681 kfree(str);
1682 return rv;
1683}
b0defcdb
CM
1684
1685static __devinit void hardcode_find_bmc(void)
1da177e4 1686{
b0defcdb 1687 int i;
1da177e4
LT
1688 struct smi_info *info;
1689
b0defcdb
CM
1690 for (i = 0; i < SI_MAX_PARMS; i++) {
1691 if (!ports[i] && !addrs[i])
1692 continue;
1da177e4 1693
b0defcdb
CM
1694 info = kzalloc(sizeof(*info), GFP_KERNEL);
1695 if (!info)
1696 return;
1da177e4 1697
b0defcdb 1698 info->addr_source = "hardcoded";
1da177e4 1699
1d5636cc 1700 if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
b0defcdb 1701 info->si_type = SI_KCS;
1d5636cc 1702 } else if (strcmp(si_type[i], "smic") == 0) {
b0defcdb 1703 info->si_type = SI_SMIC;
1d5636cc 1704 } else if (strcmp(si_type[i], "bt") == 0) {
b0defcdb
CM
1705 info->si_type = SI_BT;
1706 } else {
1707 printk(KERN_WARNING
1708 "ipmi_si: Interface type specified "
1709 "for interface %d, was invalid: %s\n",
1710 i, si_type[i]);
1711 kfree(info);
1712 continue;
1713 }
1da177e4 1714
b0defcdb
CM
1715 if (ports[i]) {
1716 /* An I/O port */
1717 info->io_setup = port_setup;
1718 info->io.addr_data = ports[i];
1719 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1720 } else if (addrs[i]) {
1721 /* A memory port */
1722 info->io_setup = mem_setup;
1723 info->io.addr_data = addrs[i];
1724 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1725 } else {
1726 printk(KERN_WARNING
1727 "ipmi_si: Interface type specified "
1728 "for interface %d, "
1729 "but port and address were not set or "
1730 "set to zero.\n", i);
1731 kfree(info);
1732 continue;
1733 }
1da177e4 1734
b0defcdb
CM
1735 info->io.addr = NULL;
1736 info->io.regspacing = regspacings[i];
1737 if (!info->io.regspacing)
1738 info->io.regspacing = DEFAULT_REGSPACING;
1739 info->io.regsize = regsizes[i];
1740 if (!info->io.regsize)
1741 info->io.regsize = DEFAULT_REGSPACING;
1742 info->io.regshift = regshifts[i];
1743 info->irq = irqs[i];
1744 if (info->irq)
1745 info->irq_setup = std_irq_setup;
1da177e4 1746
b0defcdb
CM
1747 try_smi_init(info);
1748 }
1749}
1da177e4 1750
8466361a 1751#ifdef CONFIG_ACPI
1da177e4
LT
1752
1753#include <linux/acpi.h>
1754
1755/* Once we get an ACPI failure, we don't try any more, because we go
1756 through the tables sequentially. Once we don't find a table, there
1757 are no more. */
0c8204b3 1758static int acpi_failure;
1da177e4
LT
1759
1760/* For GPE-type interrupts. */
1761static u32 ipmi_acpi_gpe(void *context)
1762{
1763 struct smi_info *smi_info = context;
1764 unsigned long flags;
1765#ifdef DEBUG_TIMING
1766 struct timeval t;
1767#endif
1768
1769 spin_lock_irqsave(&(smi_info->si_lock), flags);
1770
1771 spin_lock(&smi_info->count_lock);
1772 smi_info->interrupts++;
1773 spin_unlock(&smi_info->count_lock);
1774
1da177e4
LT
1775#ifdef DEBUG_TIMING
1776 do_gettimeofday(&t);
1777 printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
1778#endif
1779 smi_event_handler(smi_info, 0);
1da177e4
LT
1780 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1781
1782 return ACPI_INTERRUPT_HANDLED;
1783}
1784
b0defcdb
CM
1785static void acpi_gpe_irq_cleanup(struct smi_info *info)
1786{
1787 if (!info->irq)
1788 return;
1789
1790 acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
1791}
1792
1da177e4
LT
1793static int acpi_gpe_irq_setup(struct smi_info *info)
1794{
1795 acpi_status status;
1796
b0defcdb 1797 if (!info->irq)
1da177e4
LT
1798 return 0;
1799
1800 /* FIXME - is level triggered right? */
1801 status = acpi_install_gpe_handler(NULL,
1802 info->irq,
1803 ACPI_GPE_LEVEL_TRIGGERED,
1804 &ipmi_acpi_gpe,
1805 info);
1806 if (status != AE_OK) {
1807 printk(KERN_WARNING
1808 "ipmi_si: %s unable to claim ACPI GPE %d,"
1809 " running polled\n",
1810 DEVICE_NAME, info->irq);
1811 info->irq = 0;
1812 return -EINVAL;
1813 } else {
b0defcdb 1814 info->irq_cleanup = acpi_gpe_irq_cleanup;
1da177e4
LT
1815 printk(" Using ACPI GPE %d\n", info->irq);
1816 return 0;
1817 }
1818}
1819
1da177e4
LT
1820/*
1821 * Defined at
1822 * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
1823 */
1824struct SPMITable {
1825 s8 Signature[4];
1826 u32 Length;
1827 u8 Revision;
1828 u8 Checksum;
1829 s8 OEMID[6];
1830 s8 OEMTableID[8];
1831 s8 OEMRevision[4];
1832 s8 CreatorID[4];
1833 s8 CreatorRevision[4];
1834 u8 InterfaceType;
1835 u8 IPMIlegacy;
1836 s16 SpecificationRevision;
1837
1838 /*
1839 * Bit 0 - SCI interrupt supported
1840 * Bit 1 - I/O APIC/SAPIC
1841 */
1842 u8 InterruptType;
1843
1844 /* If bit 0 of InterruptType is set, then this is the SCI
1845 interrupt in the GPEx_STS register. */
1846 u8 GPE;
1847
1848 s16 Reserved;
1849
1850 /* If bit 1 of InterruptType is set, then this is the I/O
1851 APIC/SAPIC interrupt. */
1852 u32 GlobalSystemInterrupt;
1853
1854 /* The actual register address. */
1855 struct acpi_generic_address addr;
1856
1857 u8 UID[4];
1858
1859 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1860};
1861
b0defcdb 1862static __devinit int try_init_acpi(struct SPMITable *spmi)
1da177e4
LT
1863{
1864 struct smi_info *info;
1da177e4
LT
1865 u8 addr_space;
1866
1da177e4
LT
1867 if (spmi->IPMIlegacy != 1) {
1868 printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
1869 return -ENODEV;
1870 }
1871
15a58ed1 1872 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1da177e4
LT
1873 addr_space = IPMI_MEM_ADDR_SPACE;
1874 else
1875 addr_space = IPMI_IO_ADDR_SPACE;
b0defcdb
CM
1876
1877 info = kzalloc(sizeof(*info), GFP_KERNEL);
1878 if (!info) {
1879 printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
1880 return -ENOMEM;
1881 }
1882
1883 info->addr_source = "ACPI";
1da177e4 1884
1da177e4
LT
1885 /* Figure out the interface type. */
1886 switch (spmi->InterfaceType)
1887 {
1888 case 1: /* KCS */
b0defcdb 1889 info->si_type = SI_KCS;
1da177e4 1890 break;
1da177e4 1891 case 2: /* SMIC */
b0defcdb 1892 info->si_type = SI_SMIC;
1da177e4 1893 break;
1da177e4 1894 case 3: /* BT */
b0defcdb 1895 info->si_type = SI_BT;
1da177e4 1896 break;
1da177e4
LT
1897 default:
1898 printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
1899 spmi->InterfaceType);
b0defcdb 1900 kfree(info);
1da177e4
LT
1901 return -EIO;
1902 }
1903
1da177e4
LT
1904 if (spmi->InterruptType & 1) {
1905 /* We've got a GPE interrupt. */
1906 info->irq = spmi->GPE;
1907 info->irq_setup = acpi_gpe_irq_setup;
1da177e4
LT
1908 } else if (spmi->InterruptType & 2) {
1909 /* We've got an APIC/SAPIC interrupt. */
1910 info->irq = spmi->GlobalSystemInterrupt;
1911 info->irq_setup = std_irq_setup;
1da177e4
LT
1912 } else {
1913 /* Use the default interrupt setting. */
1914 info->irq = 0;
1915 info->irq_setup = NULL;
1916 }
1917
15a58ed1 1918 if (spmi->addr.bit_width) {
35bc37a0 1919 /* A (hopefully) properly formed register bit width. */
15a58ed1 1920 info->io.regspacing = spmi->addr.bit_width / 8;
35bc37a0 1921 } else {
35bc37a0
CM
1922 info->io.regspacing = DEFAULT_REGSPACING;
1923 }
b0defcdb 1924 info->io.regsize = info->io.regspacing;
15a58ed1 1925 info->io.regshift = spmi->addr.bit_offset;
1da177e4 1926
15a58ed1 1927 if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
1da177e4 1928 info->io_setup = mem_setup;
8fe1425a 1929 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
15a58ed1 1930 } else if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1da177e4 1931 info->io_setup = port_setup;
8fe1425a 1932 info->io.addr_type = IPMI_IO_ADDR_SPACE;
1da177e4
LT
1933 } else {
1934 kfree(info);
1935 printk("ipmi_si: Unknown ACPI I/O Address type\n");
1936 return -EIO;
1937 }
b0defcdb 1938 info->io.addr_data = spmi->addr.address;
1da177e4 1939
b0defcdb 1940 try_smi_init(info);
1da177e4 1941
1da177e4
LT
1942 return 0;
1943}
b0defcdb
CM
1944
1945static __devinit void acpi_find_bmc(void)
1946{
1947 acpi_status status;
1948 struct SPMITable *spmi;
1949 int i;
1950
1951 if (acpi_disabled)
1952 return;
1953
1954 if (acpi_failure)
1955 return;
1956
1957 for (i = 0; ; i++) {
15a58ed1
AS
1958 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1959 (struct acpi_table_header **)&spmi);
b0defcdb
CM
1960 if (status != AE_OK)
1961 return;
1962
1963 try_init_acpi(spmi);
1964 }
1965}
1da177e4
LT
1966#endif
1967
a9fad4cc 1968#ifdef CONFIG_DMI
b0defcdb 1969struct dmi_ipmi_data
1da177e4
LT
1970{
1971 u8 type;
1972 u8 addr_space;
1973 unsigned long base_addr;
1974 u8 irq;
1975 u8 offset;
1976 u8 slave_addr;
b0defcdb 1977};
1da177e4 1978
1855256c 1979static int __devinit decode_dmi(const struct dmi_header *dm,
b0defcdb 1980 struct dmi_ipmi_data *dmi)
1da177e4 1981{
1855256c 1982 const u8 *data = (const u8 *)dm;
1da177e4
LT
1983 unsigned long base_addr;
1984 u8 reg_spacing;
b224cd3a 1985 u8 len = dm->length;
1da177e4 1986
b0defcdb 1987 dmi->type = data[4];
1da177e4
LT
1988
1989 memcpy(&base_addr, data+8, sizeof(unsigned long));
1990 if (len >= 0x11) {
1991 if (base_addr & 1) {
1992 /* I/O */
1993 base_addr &= 0xFFFE;
b0defcdb 1994 dmi->addr_space = IPMI_IO_ADDR_SPACE;
1da177e4
LT
1995 }
1996 else {
1997 /* Memory */
b0defcdb 1998 dmi->addr_space = IPMI_MEM_ADDR_SPACE;
1da177e4
LT
1999 }
2000 /* If bit 4 of byte 0x10 is set, then the lsb for the address
2001 is odd. */
b0defcdb 2002 dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
1da177e4 2003
b0defcdb 2004 dmi->irq = data[0x11];
1da177e4
LT
2005
2006 /* The top two bits of byte 0x10 hold the register spacing. */
b224cd3a 2007 reg_spacing = (data[0x10] & 0xC0) >> 6;
1da177e4
LT
2008 switch(reg_spacing){
2009 case 0x00: /* Byte boundaries */
b0defcdb 2010 dmi->offset = 1;
1da177e4
LT
2011 break;
2012 case 0x01: /* 32-bit boundaries */
b0defcdb 2013 dmi->offset = 4;
1da177e4
LT
2014 break;
2015 case 0x02: /* 16-byte boundaries */
b0defcdb 2016 dmi->offset = 16;
1da177e4
LT
2017 break;
2018 default:
2019 /* Some other interface, just ignore it. */
2020 return -EIO;
2021 }
2022 } else {
2023 /* Old DMI spec. */
92068801
CM
2024 /* Note that technically, the lower bit of the base
2025 * address should be 1 if the address is I/O and 0 if
2026 * the address is in memory. So many systems get that
2027 * wrong (and all that I have seen are I/O) so we just
2028 * ignore that bit and assume I/O. Systems that use
2029 * memory should use the newer spec, anyway. */
b0defcdb
CM
2030 dmi->base_addr = base_addr & 0xfffe;
2031 dmi->addr_space = IPMI_IO_ADDR_SPACE;
2032 dmi->offset = 1;
1da177e4
LT
2033 }
2034
b0defcdb 2035 dmi->slave_addr = data[6];
1da177e4 2036
b0defcdb 2037 return 0;
1da177e4
LT
2038}
2039
b0defcdb 2040static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
1da177e4 2041{
b0defcdb 2042 struct smi_info *info;
1da177e4 2043
b0defcdb
CM
2044 info = kzalloc(sizeof(*info), GFP_KERNEL);
2045 if (!info) {
2046 printk(KERN_ERR
2047 "ipmi_si: Could not allocate SI data\n");
2048 return;
1da177e4 2049 }
1da177e4 2050
b0defcdb 2051 info->addr_source = "SMBIOS";
1da177e4 2052
e8b33617 2053 switch (ipmi_data->type) {
b0defcdb
CM
2054 case 0x01: /* KCS */
2055 info->si_type = SI_KCS;
2056 break;
2057 case 0x02: /* SMIC */
2058 info->si_type = SI_SMIC;
2059 break;
2060 case 0x03: /* BT */
2061 info->si_type = SI_BT;
2062 break;
2063 default:
80cd6920 2064 kfree(info);
b0defcdb 2065 return;
1da177e4 2066 }
1da177e4 2067
b0defcdb
CM
2068 switch (ipmi_data->addr_space) {
2069 case IPMI_MEM_ADDR_SPACE:
1da177e4 2070 info->io_setup = mem_setup;
b0defcdb
CM
2071 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2072 break;
2073
2074 case IPMI_IO_ADDR_SPACE:
1da177e4 2075 info->io_setup = port_setup;
b0defcdb
CM
2076 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2077 break;
2078
2079 default:
1da177e4 2080 kfree(info);
b0defcdb
CM
2081 printk(KERN_WARNING
2082 "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
2083 ipmi_data->addr_space);
2084 return;
1da177e4 2085 }
b0defcdb 2086 info->io.addr_data = ipmi_data->base_addr;
1da177e4 2087
b0defcdb
CM
2088 info->io.regspacing = ipmi_data->offset;
2089 if (!info->io.regspacing)
1da177e4
LT
2090 info->io.regspacing = DEFAULT_REGSPACING;
2091 info->io.regsize = DEFAULT_REGSPACING;
b0defcdb 2092 info->io.regshift = 0;
1da177e4
LT
2093
2094 info->slave_addr = ipmi_data->slave_addr;
2095
b0defcdb
CM
2096 info->irq = ipmi_data->irq;
2097 if (info->irq)
2098 info->irq_setup = std_irq_setup;
1da177e4 2099
b0defcdb
CM
2100 try_smi_init(info);
2101}
1da177e4 2102
b0defcdb
CM
2103static void __devinit dmi_find_bmc(void)
2104{
1855256c 2105 const struct dmi_device *dev = NULL;
b0defcdb
CM
2106 struct dmi_ipmi_data data;
2107 int rv;
2108
2109 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
397f4ebf 2110 memset(&data, 0, sizeof(data));
1855256c
JG
2111 rv = decode_dmi((const struct dmi_header *) dev->device_data,
2112 &data);
b0defcdb
CM
2113 if (!rv)
2114 try_init_dmi(&data);
2115 }
1da177e4 2116}
a9fad4cc 2117#endif /* CONFIG_DMI */
1da177e4
LT
2118
2119#ifdef CONFIG_PCI
2120
b0defcdb
CM
2121#define PCI_ERMC_CLASSCODE 0x0C0700
2122#define PCI_ERMC_CLASSCODE_MASK 0xffffff00
2123#define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff
2124#define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00
2125#define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01
2126#define PCI_ERMC_CLASSCODE_TYPE_BT 0x02
2127
1da177e4
LT
2128#define PCI_HP_VENDOR_ID 0x103C
2129#define PCI_MMC_DEVICE_ID 0x121A
2130#define PCI_MMC_ADDR_CW 0x10
2131
b0defcdb
CM
2132static void ipmi_pci_cleanup(struct smi_info *info)
2133{
2134 struct pci_dev *pdev = info->addr_source_data;
2135
2136 pci_disable_device(pdev);
2137}
1da177e4 2138
b0defcdb
CM
2139static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
2140 const struct pci_device_id *ent)
1da177e4 2141{
b0defcdb
CM
2142 int rv;
2143 int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
2144 struct smi_info *info;
2145 int first_reg_offset = 0;
1da177e4 2146
b0defcdb
CM
2147 info = kzalloc(sizeof(*info), GFP_KERNEL);
2148 if (!info)
1cd441f9 2149 return -ENOMEM;
1da177e4 2150
b0defcdb 2151 info->addr_source = "PCI";
1da177e4 2152
b0defcdb
CM
2153 switch (class_type) {
2154 case PCI_ERMC_CLASSCODE_TYPE_SMIC:
2155 info->si_type = SI_SMIC;
2156 break;
1da177e4 2157
b0defcdb
CM
2158 case PCI_ERMC_CLASSCODE_TYPE_KCS:
2159 info->si_type = SI_KCS;
2160 break;
2161
2162 case PCI_ERMC_CLASSCODE_TYPE_BT:
2163 info->si_type = SI_BT;
2164 break;
2165
2166 default:
2167 kfree(info);
2168 printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
2169 pci_name(pdev), class_type);
1cd441f9 2170 return -ENOMEM;
1da177e4
LT
2171 }
2172
b0defcdb
CM
2173 rv = pci_enable_device(pdev);
2174 if (rv) {
2175 printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
2176 pci_name(pdev));
2177 kfree(info);
2178 return rv;
1da177e4
LT
2179 }
2180
b0defcdb
CM
2181 info->addr_source_cleanup = ipmi_pci_cleanup;
2182 info->addr_source_data = pdev;
1da177e4 2183
b0defcdb
CM
2184 if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
2185 first_reg_offset = 1;
1da177e4 2186
b0defcdb
CM
2187 if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
2188 info->io_setup = port_setup;
2189 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2190 } else {
2191 info->io_setup = mem_setup;
2192 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
1da177e4 2193 }
b0defcdb 2194 info->io.addr_data = pci_resource_start(pdev, 0);
1da177e4 2195
b0defcdb 2196 info->io.regspacing = DEFAULT_REGSPACING;
1da177e4 2197 info->io.regsize = DEFAULT_REGSPACING;
b0defcdb 2198 info->io.regshift = 0;
1da177e4 2199
b0defcdb
CM
2200 info->irq = pdev->irq;
2201 if (info->irq)
2202 info->irq_setup = std_irq_setup;
1da177e4 2203
50c812b2 2204 info->dev = &pdev->dev;
fca3b747 2205 pci_set_drvdata(pdev, info);
50c812b2 2206
b0defcdb
CM
2207 return try_smi_init(info);
2208}
1da177e4 2209
b0defcdb
CM
2210static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
2211{
fca3b747
CM
2212 struct smi_info *info = pci_get_drvdata(pdev);
2213 cleanup_one_si(info);
b0defcdb 2214}
1da177e4 2215
b0defcdb
CM
2216#ifdef CONFIG_PM
2217static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2218{
1da177e4
LT
2219 return 0;
2220}
1da177e4 2221
b0defcdb 2222static int ipmi_pci_resume(struct pci_dev *pdev)
1da177e4 2223{
b0defcdb
CM
2224 return 0;
2225}
1da177e4 2226#endif
1da177e4 2227
b0defcdb
CM
2228static struct pci_device_id ipmi_pci_devices[] = {
2229 { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
248bdd5e
KC
2230 { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
2231 { 0, }
b0defcdb
CM
2232};
2233MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
2234
2235static struct pci_driver ipmi_pci_driver = {
2236 .name = DEVICE_NAME,
2237 .id_table = ipmi_pci_devices,
2238 .probe = ipmi_pci_probe,
2239 .remove = __devexit_p(ipmi_pci_remove),
2240#ifdef CONFIG_PM
2241 .suspend = ipmi_pci_suspend,
2242 .resume = ipmi_pci_resume,
2243#endif
2244};
2245#endif /* CONFIG_PCI */
1da177e4
LT
2246
2247
dba9b4f6
CM
2248#ifdef CONFIG_PPC_OF
2249static int __devinit ipmi_of_probe(struct of_device *dev,
2250 const struct of_device_id *match)
2251{
2252 struct smi_info *info;
2253 struct resource resource;
2254 const int *regsize, *regspacing, *regshift;
2255 struct device_node *np = dev->node;
2256 int ret;
2257 int proplen;
2258
2259 dev_info(&dev->dev, PFX "probing via device tree\n");
2260
2261 ret = of_address_to_resource(np, 0, &resource);
2262 if (ret) {
2263 dev_warn(&dev->dev, PFX "invalid address from OF\n");
2264 return ret;
2265 }
2266
9c25099d 2267 regsize = of_get_property(np, "reg-size", &proplen);
dba9b4f6
CM
2268 if (regsize && proplen != 4) {
2269 dev_warn(&dev->dev, PFX "invalid regsize from OF\n");
2270 return -EINVAL;
2271 }
2272
9c25099d 2273 regspacing = of_get_property(np, "reg-spacing", &proplen);
dba9b4f6
CM
2274 if (regspacing && proplen != 4) {
2275 dev_warn(&dev->dev, PFX "invalid regspacing from OF\n");
2276 return -EINVAL;
2277 }
2278
9c25099d 2279 regshift = of_get_property(np, "reg-shift", &proplen);
dba9b4f6
CM
2280 if (regshift && proplen != 4) {
2281 dev_warn(&dev->dev, PFX "invalid regshift from OF\n");
2282 return -EINVAL;
2283 }
2284
2285 info = kzalloc(sizeof(*info), GFP_KERNEL);
2286
2287 if (!info) {
2288 dev_err(&dev->dev,
2289 PFX "could not allocate memory for OF probe\n");
2290 return -ENOMEM;
2291 }
2292
2293 info->si_type = (enum si_type) match->data;
2294 info->addr_source = "device-tree";
2295 info->io_setup = mem_setup;
2296 info->irq_setup = std_irq_setup;
2297
2298 info->io.addr_type = IPMI_MEM_ADDR_SPACE;
2299 info->io.addr_data = resource.start;
2300
2301 info->io.regsize = regsize ? *regsize : DEFAULT_REGSIZE;
2302 info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING;
2303 info->io.regshift = regshift ? *regshift : 0;
2304
2305 info->irq = irq_of_parse_and_map(dev->node, 0);
2306 info->dev = &dev->dev;
2307
32d21985 2308 dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n",
dba9b4f6
CM
2309 info->io.addr_data, info->io.regsize, info->io.regspacing,
2310 info->irq);
2311
2312 dev->dev.driver_data = (void*) info;
2313
2314 return try_smi_init(info);
2315}
2316
2317static int __devexit ipmi_of_remove(struct of_device *dev)
2318{
2319 cleanup_one_si(dev->dev.driver_data);
2320 return 0;
2321}
2322
2323static struct of_device_id ipmi_match[] =
2324{
2325 { .type = "ipmi", .compatible = "ipmi-kcs", .data = (void *)(unsigned long) SI_KCS },
2326 { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
2327 { .type = "ipmi", .compatible = "ipmi-bt", .data = (void *)(unsigned long) SI_BT },
2328 {},
2329};
2330
2331static struct of_platform_driver ipmi_of_platform_driver =
2332{
2333 .name = "ipmi",
2334 .match_table = ipmi_match,
2335 .probe = ipmi_of_probe,
2336 .remove = __devexit_p(ipmi_of_remove),
2337};
2338#endif /* CONFIG_PPC_OF */
2339
2340
1da177e4
LT
2341static int try_get_dev_id(struct smi_info *smi_info)
2342{
50c812b2
CM
2343 unsigned char msg[2];
2344 unsigned char *resp;
2345 unsigned long resp_len;
2346 enum si_sm_result smi_result;
2347 int rv = 0;
1da177e4
LT
2348
2349 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
b0defcdb 2350 if (!resp)
1da177e4
LT
2351 return -ENOMEM;
2352
2353 /* Do a Get Device ID command, since it comes back with some
2354 useful info. */
2355 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2356 msg[1] = IPMI_GET_DEVICE_ID_CMD;
2357 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
2358
2359 smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
2360 for (;;)
2361 {
c3e7e791
CM
2362 if (smi_result == SI_SM_CALL_WITH_DELAY ||
2363 smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
da4cd8df 2364 schedule_timeout_uninterruptible(1);
1da177e4
LT
2365 smi_result = smi_info->handlers->event(
2366 smi_info->si_sm, 100);
2367 }
2368 else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
2369 {
2370 smi_result = smi_info->handlers->event(
2371 smi_info->si_sm, 0);
2372 }
2373 else
2374 break;
2375 }
2376 if (smi_result == SI_SM_HOSED) {
2377 /* We couldn't get the state machine to run, so whatever's at
2378 the port is probably not an IPMI SMI interface. */
2379 rv = -ENODEV;
2380 goto out;
2381 }
2382
2383 /* Otherwise, we got some data. */
2384 resp_len = smi_info->handlers->get_result(smi_info->si_sm,
2385 resp, IPMI_MAX_MSG_LENGTH);
1da177e4 2386
d8c98618
CM
2387 /* Check and record info from the get device id, in case we need it. */
2388 rv = ipmi_demangle_device_id(resp, resp_len, &smi_info->device_id);
1da177e4
LT
2389
2390 out:
2391 kfree(resp);
2392 return rv;
2393}
2394
2395static int type_file_read_proc(char *page, char **start, off_t off,
2396 int count, int *eof, void *data)
2397{
1da177e4
LT
2398 struct smi_info *smi = data;
2399
b361e27b 2400 return sprintf(page, "%s\n", si_to_str[smi->si_type]);
1da177e4
LT
2401}
2402
2403static int stat_file_read_proc(char *page, char **start, off_t off,
2404 int count, int *eof, void *data)
2405{
2406 char *out = (char *) page;
2407 struct smi_info *smi = data;
2408
2409 out += sprintf(out, "interrupts_enabled: %d\n",
b0defcdb 2410 smi->irq && !smi->interrupt_disabled);
1da177e4
LT
2411 out += sprintf(out, "short_timeouts: %ld\n",
2412 smi->short_timeouts);
2413 out += sprintf(out, "long_timeouts: %ld\n",
2414 smi->long_timeouts);
2415 out += sprintf(out, "timeout_restarts: %ld\n",
2416 smi->timeout_restarts);
2417 out += sprintf(out, "idles: %ld\n",
2418 smi->idles);
2419 out += sprintf(out, "interrupts: %ld\n",
2420 smi->interrupts);
2421 out += sprintf(out, "attentions: %ld\n",
2422 smi->attentions);
2423 out += sprintf(out, "flag_fetches: %ld\n",
2424 smi->flag_fetches);
2425 out += sprintf(out, "hosed_count: %ld\n",
2426 smi->hosed_count);
2427 out += sprintf(out, "complete_transactions: %ld\n",
2428 smi->complete_transactions);
2429 out += sprintf(out, "events: %ld\n",
2430 smi->events);
2431 out += sprintf(out, "watchdog_pretimeouts: %ld\n",
2432 smi->watchdog_pretimeouts);
2433 out += sprintf(out, "incoming_messages: %ld\n",
2434 smi->incoming_messages);
2435
b361e27b
CM
2436 return out - page;
2437}
2438
2439static int param_read_proc(char *page, char **start, off_t off,
2440 int count, int *eof, void *data)
2441{
2442 struct smi_info *smi = data;
2443
2444 return sprintf(page,
2445 "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
2446 si_to_str[smi->si_type],
2447 addr_space_to_str[smi->io.addr_type],
2448 smi->io.addr_data,
2449 smi->io.regspacing,
2450 smi->io.regsize,
2451 smi->io.regshift,
2452 smi->irq,
2453 smi->slave_addr);
1da177e4
LT
2454}
2455
3ae0e0f9
CM
2456/*
2457 * oem_data_avail_to_receive_msg_avail
2458 * @info - smi_info structure with msg_flags set
2459 *
2460 * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
2461 * Returns 1 indicating need to re-run handle_flags().
2462 */
2463static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
2464{
e8b33617
CM
2465 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
2466 RECEIVE_MSG_AVAIL);
3ae0e0f9
CM
2467 return 1;
2468}
2469
2470/*
2471 * setup_dell_poweredge_oem_data_handler
2472 * @info - smi_info.device_id must be populated
2473 *
2474 * Systems that match, but have firmware version < 1.40 may assert
2475 * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
2476 * it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
2477 * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
2478 * as RECEIVE_MSG_AVAIL instead.
2479 *
2480 * As Dell has no plans to release IPMI 1.5 firmware that *ever*
2481 * assert the OEM[012] bits, and if it did, the driver would have to
2482 * change to handle that properly, we don't actually check for the
2483 * firmware version.
2484 * Device ID = 0x20 BMC on PowerEdge 8G servers
2485 * Device Revision = 0x80
2486 * Firmware Revision1 = 0x01 BMC version 1.40
2487 * Firmware Revision2 = 0x40 BCD encoded
2488 * IPMI Version = 0x51 IPMI 1.5
2489 * Manufacturer ID = A2 02 00 Dell IANA
2490 *
d5a2b89a
CM
2491 * Additionally, PowerEdge systems with IPMI < 1.5 may also assert
2492 * OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
2493 *
3ae0e0f9
CM
2494 */
2495#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
2496#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
2497#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
50c812b2 2498#define DELL_IANA_MFR_ID 0x0002a2
3ae0e0f9
CM
2499static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
2500{
2501 struct ipmi_device_id *id = &smi_info->device_id;
50c812b2 2502 if (id->manufacturer_id == DELL_IANA_MFR_ID) {
d5a2b89a
CM
2503 if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
2504 id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
50c812b2 2505 id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
d5a2b89a
CM
2506 smi_info->oem_data_avail_handler =
2507 oem_data_avail_to_receive_msg_avail;
2508 }
2509 else if (ipmi_version_major(id) < 1 ||
2510 (ipmi_version_major(id) == 1 &&
2511 ipmi_version_minor(id) < 5)) {
2512 smi_info->oem_data_avail_handler =
2513 oem_data_avail_to_receive_msg_avail;
2514 }
3ae0e0f9
CM
2515 }
2516}
2517
ea94027b
CM
2518#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
2519static void return_hosed_msg_badsize(struct smi_info *smi_info)
2520{
2521 struct ipmi_smi_msg *msg = smi_info->curr_msg;
2522
2523 /* Make it a reponse */
2524 msg->rsp[0] = msg->data[0] | 4;
2525 msg->rsp[1] = msg->data[1];
2526 msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
2527 msg->rsp_size = 3;
2528 smi_info->curr_msg = NULL;
2529 deliver_recv_msg(smi_info, msg);
2530}
2531
2532/*
2533 * dell_poweredge_bt_xaction_handler
2534 * @info - smi_info.device_id must be populated
2535 *
2536 * Dell PowerEdge servers with the BT interface (x6xx and 1750) will
2537 * not respond to a Get SDR command if the length of the data
2538 * requested is exactly 0x3A, which leads to command timeouts and no
2539 * data returned. This intercepts such commands, and causes userspace
2540 * callers to try again with a different-sized buffer, which succeeds.
2541 */
2542
2543#define STORAGE_NETFN 0x0A
2544#define STORAGE_CMD_GET_SDR 0x23
2545static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
2546 unsigned long unused,
2547 void *in)
2548{
2549 struct smi_info *smi_info = in;
2550 unsigned char *data = smi_info->curr_msg->data;
2551 unsigned int size = smi_info->curr_msg->data_size;
2552 if (size >= 8 &&
2553 (data[0]>>2) == STORAGE_NETFN &&
2554 data[1] == STORAGE_CMD_GET_SDR &&
2555 data[7] == 0x3A) {
2556 return_hosed_msg_badsize(smi_info);
2557 return NOTIFY_STOP;
2558 }
2559 return NOTIFY_DONE;
2560}
2561
2562static struct notifier_block dell_poweredge_bt_xaction_notifier = {
2563 .notifier_call = dell_poweredge_bt_xaction_handler,
2564};
2565
2566/*
2567 * setup_dell_poweredge_bt_xaction_handler
2568 * @info - smi_info.device_id must be filled in already
2569 *
2570 * Fills in smi_info.device_id.start_transaction_pre_hook
2571 * when we know what function to use there.
2572 */
2573static void
2574setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
2575{
2576 struct ipmi_device_id *id = &smi_info->device_id;
50c812b2 2577 if (id->manufacturer_id == DELL_IANA_MFR_ID &&
ea94027b
CM
2578 smi_info->si_type == SI_BT)
2579 register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
2580}
2581
3ae0e0f9
CM
2582/*
2583 * setup_oem_data_handler
2584 * @info - smi_info.device_id must be filled in already
2585 *
2586 * Fills in smi_info.device_id.oem_data_available_handler
2587 * when we know what function to use there.
2588 */
2589
2590static void setup_oem_data_handler(struct smi_info *smi_info)
2591{
2592 setup_dell_poweredge_oem_data_handler(smi_info);
2593}
2594
ea94027b
CM
2595static void setup_xaction_handlers(struct smi_info *smi_info)
2596{
2597 setup_dell_poweredge_bt_xaction_handler(smi_info);
2598}
2599
a9a2c44f
CM
2600static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
2601{
453823ba
CM
2602 if (smi_info->intf) {
2603 /* The timer and thread are only running if the
2604 interface has been started up and registered. */
2605 if (smi_info->thread != NULL)
2606 kthread_stop(smi_info->thread);
2607 del_timer_sync(&smi_info->si_timer);
2608 }
a9a2c44f
CM
2609}
2610
7420884c 2611static __devinitdata struct ipmi_default_vals
b0defcdb
CM
2612{
2613 int type;
2614 int port;
7420884c 2615} ipmi_defaults[] =
b0defcdb
CM
2616{
2617 { .type = SI_KCS, .port = 0xca2 },
2618 { .type = SI_SMIC, .port = 0xca9 },
2619 { .type = SI_BT, .port = 0xe4 },
2620 { .port = 0 }
2621};
2622
2623static __devinit void default_find_bmc(void)
2624{
2625 struct smi_info *info;
2626 int i;
2627
2628 for (i = 0; ; i++) {
2629 if (!ipmi_defaults[i].port)
2630 break;
2631
2632 info = kzalloc(sizeof(*info), GFP_KERNEL);
2633 if (!info)
2634 return;
2635
4ff31d77
CK
2636#ifdef CONFIG_PPC_MERGE
2637 if (check_legacy_ioport(ipmi_defaults[i].port))
2638 continue;
2639#endif
2640
b0defcdb
CM
2641 info->addr_source = NULL;
2642
2643 info->si_type = ipmi_defaults[i].type;
2644 info->io_setup = port_setup;
2645 info->io.addr_data = ipmi_defaults[i].port;
2646 info->io.addr_type = IPMI_IO_ADDR_SPACE;
2647
2648 info->io.addr = NULL;
2649 info->io.regspacing = DEFAULT_REGSPACING;
2650 info->io.regsize = DEFAULT_REGSPACING;
2651 info->io.regshift = 0;
2652
2653 if (try_smi_init(info) == 0) {
2654 /* Found one... */
2655 printk(KERN_INFO "ipmi_si: Found default %s state"
2656 " machine at %s address 0x%lx\n",
2657 si_to_str[info->si_type],
2658 addr_space_to_str[info->io.addr_type],
2659 info->io.addr_data);
2660 return;
2661 }
2662 }
2663}
2664
2665static int is_new_interface(struct smi_info *info)
1da177e4 2666{
b0defcdb 2667 struct smi_info *e;
1da177e4 2668
b0defcdb
CM
2669 list_for_each_entry(e, &smi_infos, link) {
2670 if (e->io.addr_type != info->io.addr_type)
2671 continue;
2672 if (e->io.addr_data == info->io.addr_data)
2673 return 0;
2674 }
1da177e4 2675
b0defcdb
CM
2676 return 1;
2677}
1da177e4 2678
b0defcdb
CM
2679static int try_smi_init(struct smi_info *new_smi)
2680{
2681 int rv;
2682
2683 if (new_smi->addr_source) {
2684 printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
2685 " machine at %s address 0x%lx, slave address 0x%x,"
2686 " irq %d\n",
2687 new_smi->addr_source,
2688 si_to_str[new_smi->si_type],
2689 addr_space_to_str[new_smi->io.addr_type],
2690 new_smi->io.addr_data,
2691 new_smi->slave_addr, new_smi->irq);
2692 }
2693
d6dfd131 2694 mutex_lock(&smi_infos_lock);
b0defcdb
CM
2695 if (!is_new_interface(new_smi)) {
2696 printk(KERN_WARNING "ipmi_si: duplicate interface\n");
2697 rv = -EBUSY;
2698 goto out_err;
2699 }
1da177e4
LT
2700
2701 /* So we know not to free it unless we have allocated one. */
2702 new_smi->intf = NULL;
2703 new_smi->si_sm = NULL;
2704 new_smi->handlers = NULL;
2705
b0defcdb
CM
2706 switch (new_smi->si_type) {
2707 case SI_KCS:
1da177e4 2708 new_smi->handlers = &kcs_smi_handlers;
b0defcdb
CM
2709 break;
2710
2711 case SI_SMIC:
1da177e4 2712 new_smi->handlers = &smic_smi_handlers;
b0defcdb
CM
2713 break;
2714
2715 case SI_BT:
1da177e4 2716 new_smi->handlers = &bt_smi_handlers;
b0defcdb
CM
2717 break;
2718
2719 default:
1da177e4
LT
2720 /* No support for anything else yet. */
2721 rv = -EIO;
2722 goto out_err;
2723 }
2724
2725 /* Allocate the state machine's data and initialize it. */
2726 new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
b0defcdb 2727 if (!new_smi->si_sm) {
1da177e4
LT
2728 printk(" Could not allocate state machine memory\n");
2729 rv = -ENOMEM;
2730 goto out_err;
2731 }
2732 new_smi->io_size = new_smi->handlers->init_data(new_smi->si_sm,
2733 &new_smi->io);
2734
2735 /* Now that we know the I/O size, we can set up the I/O. */
2736 rv = new_smi->io_setup(new_smi);
2737 if (rv) {
2738 printk(" Could not set up I/O space\n");
2739 goto out_err;
2740 }
2741
2742 spin_lock_init(&(new_smi->si_lock));
2743 spin_lock_init(&(new_smi->msg_lock));
2744 spin_lock_init(&(new_smi->count_lock));
2745
2746 /* Do low-level detection first. */
2747 if (new_smi->handlers->detect(new_smi->si_sm)) {
b0defcdb
CM
2748 if (new_smi->addr_source)
2749 printk(KERN_INFO "ipmi_si: Interface detection"
2750 " failed\n");
1da177e4
LT
2751 rv = -ENODEV;
2752 goto out_err;
2753 }
2754
2755 /* Attempt a get device id command. If it fails, we probably
b0defcdb 2756 don't have a BMC here. */
1da177e4 2757 rv = try_get_dev_id(new_smi);
b0defcdb
CM
2758 if (rv) {
2759 if (new_smi->addr_source)
2760 printk(KERN_INFO "ipmi_si: There appears to be no BMC"
2761 " at this location\n");
1da177e4 2762 goto out_err;
b0defcdb 2763 }
1da177e4 2764
3ae0e0f9 2765 setup_oem_data_handler(new_smi);
ea94027b 2766 setup_xaction_handlers(new_smi);
3ae0e0f9 2767
1da177e4
LT
2768 INIT_LIST_HEAD(&(new_smi->xmit_msgs));
2769 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
2770 new_smi->curr_msg = NULL;
2771 atomic_set(&new_smi->req_events, 0);
2772 new_smi->run_to_completion = 0;
2773
2774 new_smi->interrupt_disabled = 0;
a9a2c44f 2775 atomic_set(&new_smi->stop_operation, 0);
b0defcdb
CM
2776 new_smi->intf_num = smi_num;
2777 smi_num++;
1da177e4
LT
2778
2779 /* Start clearing the flags before we enable interrupts or the
2780 timer to avoid racing with the timer. */
2781 start_clear_flags(new_smi);
2782 /* IRQ is defined to be set when non-zero. */
2783 if (new_smi->irq)
2784 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
2785
50c812b2
CM
2786 if (!new_smi->dev) {
2787 /* If we don't already have a device from something
2788 * else (like PCI), then register a new one. */
2789 new_smi->pdev = platform_device_alloc("ipmi_si",
2790 new_smi->intf_num);
2791 if (rv) {
2792 printk(KERN_ERR
2793 "ipmi_si_intf:"
2794 " Unable to allocate platform device\n");
453823ba 2795 goto out_err;
50c812b2
CM
2796 }
2797 new_smi->dev = &new_smi->pdev->dev;
2798 new_smi->dev->driver = &ipmi_driver;
2799
b48f5457 2800 rv = platform_device_add(new_smi->pdev);
50c812b2
CM
2801 if (rv) {
2802 printk(KERN_ERR
2803 "ipmi_si_intf:"
2804 " Unable to register system interface device:"
2805 " %d\n",
2806 rv);
453823ba 2807 goto out_err;
50c812b2
CM
2808 }
2809 new_smi->dev_registered = 1;
2810 }
2811
1da177e4
LT
2812 rv = ipmi_register_smi(&handlers,
2813 new_smi,
50c812b2
CM
2814 &new_smi->device_id,
2815 new_smi->dev,
759643b8 2816 "bmc",
453823ba 2817 new_smi->slave_addr);
1da177e4
LT
2818 if (rv) {
2819 printk(KERN_ERR
2820 "ipmi_si: Unable to register device: error %d\n",
2821 rv);
2822 goto out_err_stop_timer;
2823 }
2824
2825 rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
2826 type_file_read_proc, NULL,
2827 new_smi, THIS_MODULE);
2828 if (rv) {
2829 printk(KERN_ERR
2830 "ipmi_si: Unable to create proc entry: %d\n",
2831 rv);
2832 goto out_err_stop_timer;
2833 }
2834
2835 rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
2836 stat_file_read_proc, NULL,
2837 new_smi, THIS_MODULE);
2838 if (rv) {
2839 printk(KERN_ERR
2840 "ipmi_si: Unable to create proc entry: %d\n",
2841 rv);
2842 goto out_err_stop_timer;
2843 }
2844
b361e27b
CM
2845 rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
2846 param_read_proc, NULL,
2847 new_smi, THIS_MODULE);
2848 if (rv) {
2849 printk(KERN_ERR
2850 "ipmi_si: Unable to create proc entry: %d\n",
2851 rv);
2852 goto out_err_stop_timer;
2853 }
2854
b0defcdb
CM
2855 list_add_tail(&new_smi->link, &smi_infos);
2856
d6dfd131 2857 mutex_unlock(&smi_infos_lock);
1da177e4 2858
8f14137e 2859 printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
1da177e4
LT
2860
2861 return 0;
2862
2863 out_err_stop_timer:
a9a2c44f
CM
2864 atomic_inc(&new_smi->stop_operation);
2865 wait_for_timer_and_thread(new_smi);
1da177e4
LT
2866
2867 out_err:
2868 if (new_smi->intf)
2869 ipmi_unregister_smi(new_smi->intf);
2870
b0defcdb
CM
2871 if (new_smi->irq_cleanup)
2872 new_smi->irq_cleanup(new_smi);
1da177e4
LT
2873
2874 /* Wait until we know that we are out of any interrupt
2875 handlers might have been running before we freed the
2876 interrupt. */
fbd568a3 2877 synchronize_sched();
1da177e4
LT
2878
2879 if (new_smi->si_sm) {
2880 if (new_smi->handlers)
2881 new_smi->handlers->cleanup(new_smi->si_sm);
2882 kfree(new_smi->si_sm);
2883 }
b0defcdb
CM
2884 if (new_smi->addr_source_cleanup)
2885 new_smi->addr_source_cleanup(new_smi);
7767e126
PG
2886 if (new_smi->io_cleanup)
2887 new_smi->io_cleanup(new_smi);
1da177e4 2888
50c812b2
CM
2889 if (new_smi->dev_registered)
2890 platform_device_unregister(new_smi->pdev);
2891
2892 kfree(new_smi);
2893
d6dfd131 2894 mutex_unlock(&smi_infos_lock);
b0defcdb 2895
1da177e4
LT
2896 return rv;
2897}
2898
b0defcdb 2899static __devinit int init_ipmi_si(void)
1da177e4 2900{
1da177e4
LT
2901 int i;
2902 char *str;
50c812b2 2903 int rv;
1da177e4
LT
2904
2905 if (initialized)
2906 return 0;
2907 initialized = 1;
2908
50c812b2
CM
2909 /* Register the device drivers. */
2910 rv = driver_register(&ipmi_driver);
2911 if (rv) {
2912 printk(KERN_ERR
2913 "init_ipmi_si: Unable to register driver: %d\n",
2914 rv);
2915 return rv;
2916 }
2917
2918
1da177e4
LT
2919 /* Parse out the si_type string into its components. */
2920 str = si_type_str;
2921 if (*str != '\0') {
e8b33617 2922 for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
1da177e4
LT
2923 si_type[i] = str;
2924 str = strchr(str, ',');
2925 if (str) {
2926 *str = '\0';
2927 str++;
2928 } else {
2929 break;
2930 }
2931 }
2932 }
2933
1fdd75bd 2934 printk(KERN_INFO "IPMI System Interface driver.\n");
1da177e4 2935
b0defcdb
CM
2936 hardcode_find_bmc();
2937
a9fad4cc 2938#ifdef CONFIG_DMI
b224cd3a 2939 dmi_find_bmc();
1da177e4
LT
2940#endif
2941
b0defcdb 2942#ifdef CONFIG_ACPI
1d5636cc 2943 acpi_find_bmc();
b0defcdb 2944#endif
1da177e4 2945
b0defcdb 2946#ifdef CONFIG_PCI
168b35a7
CM
2947 rv = pci_register_driver(&ipmi_pci_driver);
2948 if (rv){
2949 printk(KERN_ERR
2950 "init_ipmi_si: Unable to register PCI driver: %d\n",
2951 rv);
2952 }
b0defcdb
CM
2953#endif
2954
dba9b4f6
CM
2955#ifdef CONFIG_PPC_OF
2956 of_register_platform_driver(&ipmi_of_platform_driver);
2957#endif
2958
b0defcdb 2959 if (si_trydefaults) {
d6dfd131 2960 mutex_lock(&smi_infos_lock);
b0defcdb
CM
2961 if (list_empty(&smi_infos)) {
2962 /* No BMC was found, try defaults. */
d6dfd131 2963 mutex_unlock(&smi_infos_lock);
b0defcdb
CM
2964 default_find_bmc();
2965 } else {
d6dfd131 2966 mutex_unlock(&smi_infos_lock);
b0defcdb 2967 }
1da177e4
LT
2968 }
2969
d6dfd131 2970 mutex_lock(&smi_infos_lock);
b361e27b 2971 if (unload_when_empty && list_empty(&smi_infos)) {
d6dfd131 2972 mutex_unlock(&smi_infos_lock);
b0defcdb
CM
2973#ifdef CONFIG_PCI
2974 pci_unregister_driver(&ipmi_pci_driver);
2975#endif
10fb62e5
CK
2976
2977#ifdef CONFIG_PPC_OF
2978 of_unregister_platform_driver(&ipmi_of_platform_driver);
2979#endif
55ebcc38 2980 driver_unregister(&ipmi_driver);
1da177e4
LT
2981 printk("ipmi_si: Unable to find any System Interface(s)\n");
2982 return -ENODEV;
b0defcdb 2983 } else {
d6dfd131 2984 mutex_unlock(&smi_infos_lock);
b0defcdb 2985 return 0;
1da177e4 2986 }
1da177e4
LT
2987}
2988module_init(init_ipmi_si);
2989
b361e27b 2990static void cleanup_one_si(struct smi_info *to_clean)
1da177e4
LT
2991{
2992 int rv;
2993 unsigned long flags;
2994
b0defcdb 2995 if (!to_clean)
1da177e4
LT
2996 return;
2997
b0defcdb
CM
2998 list_del(&to_clean->link);
2999
ee6cd5f8 3000 /* Tell the driver that we are shutting down. */
a9a2c44f 3001 atomic_inc(&to_clean->stop_operation);
b0defcdb 3002
ee6cd5f8
CM
3003 /* Make sure the timer and thread are stopped and will not run
3004 again. */
a9a2c44f 3005 wait_for_timer_and_thread(to_clean);
1da177e4 3006
ee6cd5f8
CM
3007 /* Timeouts are stopped, now make sure the interrupts are off
3008 for the device. A little tricky with locks to make sure
3009 there are no races. */
3010 spin_lock_irqsave(&to_clean->si_lock, flags);
3011 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3012 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3013 poll(to_clean);
3014 schedule_timeout_uninterruptible(1);
3015 spin_lock_irqsave(&to_clean->si_lock, flags);
3016 }
3017 disable_si_irq(to_clean);
3018 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3019 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3020 poll(to_clean);
3021 schedule_timeout_uninterruptible(1);
3022 }
3023
3024 /* Clean up interrupts and make sure that everything is done. */
3025 if (to_clean->irq_cleanup)
3026 to_clean->irq_cleanup(to_clean);
e8b33617 3027 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
1da177e4 3028 poll(to_clean);
da4cd8df 3029 schedule_timeout_uninterruptible(1);
1da177e4
LT
3030 }
3031
3032 rv = ipmi_unregister_smi(to_clean->intf);
3033 if (rv) {
3034 printk(KERN_ERR
3035 "ipmi_si: Unable to unregister device: errno=%d\n",
3036 rv);
3037 }
3038
3039 to_clean->handlers->cleanup(to_clean->si_sm);
3040
3041 kfree(to_clean->si_sm);
3042
b0defcdb
CM
3043 if (to_clean->addr_source_cleanup)
3044 to_clean->addr_source_cleanup(to_clean);
7767e126
PG
3045 if (to_clean->io_cleanup)
3046 to_clean->io_cleanup(to_clean);
50c812b2
CM
3047
3048 if (to_clean->dev_registered)
3049 platform_device_unregister(to_clean->pdev);
3050
3051 kfree(to_clean);
1da177e4
LT
3052}
3053
3054static __exit void cleanup_ipmi_si(void)
3055{
b0defcdb 3056 struct smi_info *e, *tmp_e;
1da177e4 3057
b0defcdb 3058 if (!initialized)
1da177e4
LT
3059 return;
3060
b0defcdb
CM
3061#ifdef CONFIG_PCI
3062 pci_unregister_driver(&ipmi_pci_driver);
3063#endif
3064
dba9b4f6
CM
3065#ifdef CONFIG_PPC_OF
3066 of_unregister_platform_driver(&ipmi_of_platform_driver);
3067#endif
3068
d6dfd131 3069 mutex_lock(&smi_infos_lock);
b0defcdb
CM
3070 list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
3071 cleanup_one_si(e);
d6dfd131 3072 mutex_unlock(&smi_infos_lock);
50c812b2
CM
3073
3074 driver_unregister(&ipmi_driver);
1da177e4
LT
3075}
3076module_exit(cleanup_ipmi_si);
3077
3078MODULE_LICENSE("GPL");
1fdd75bd
CM
3079MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
3080MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");