4 * Incoming and outgoing message routing for an IPMI interface.
6 * Author: MontaVista Software, Inc.
7 * Corey Minyard <minyard@mvista.com>
10 * Copyright 2002 MontaVista Software Inc.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
18 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
24 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
26 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
27 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 675 Mass Ave, Cambridge, MA 02139, USA.
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <asm/system.h>
37 #include <linux/poll.h>
38 #include <linux/spinlock.h>
39 #include <linux/mutex.h>
40 #include <linux/slab.h>
41 #include <linux/ipmi.h>
42 #include <linux/ipmi_smi.h>
43 #include <linux/notifier.h>
44 #include <linux/init.h>
45 #include <linux/proc_fs.h>
46 #include <linux/rcupdate.h>
48 #define PFX "IPMI message handler: "
50 #define IPMI_DRIVER_VERSION "39.2"
52 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
53 static int ipmi_init_msghandler(void);
55 static int initialized;
58 static struct proc_dir_entry *proc_ipmi_root;
59 #endif /* CONFIG_PROC_FS */
61 /* Remain in auto-maintenance mode for this amount of time (in ms). */
62 #define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
64 #define MAX_EVENTS_IN_QUEUE 25
66 /* Don't let a message sit in a queue forever, always time it with at lest
67 the max message timer. This is in milliseconds. */
68 #define MAX_MSG_TIMEOUT 60000
72 * The main "user" data structure.
76 struct list_head link;
78 /* Set to "0" when the user is destroyed. */
83 /* The upper layer that handles receive messages. */
84 struct ipmi_user_hndl *handler;
87 /* The interface this user is bound to. */
90 /* Does this interface receive IPMI events? */
96 struct list_head link;
104 * This is used to form a linked lised during mass deletion.
105 * Since this is in an RCU list, we cannot use the link above
106 * or change any data until the RCU period completes. So we
107 * use this next variable during mass deletion so we can have
108 * a list and don't have to wait and restart the search on
109 * every individual deletion of a command. */
110 struct cmd_rcvr *next;
115 unsigned int inuse : 1;
116 unsigned int broadcast : 1;
118 unsigned long timeout;
119 unsigned long orig_timeout;
120 unsigned int retries_left;
122 /* To verify on an incoming send message response that this is
123 the message that the response is for, we keep a sequence id
124 and increment it every time we send a message. */
127 /* This is held so we can properly respond to the message on a
128 timeout, and it is used to hold the temporary data for
129 retransmission, too. */
130 struct ipmi_recv_msg *recv_msg;
133 /* Store the information in a msgid (long) to allow us to find a
134 sequence table entry from the msgid. */
135 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
137 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
139 seq = ((msgid >> 26) & 0x3f); \
140 seqid = (msgid & 0x3fffff); \
143 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
147 unsigned char medium;
148 unsigned char protocol;
150 /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
151 but may be changed by the user. */
152 unsigned char address;
154 /* My LUN. This should generally stay the SMS LUN, but just in
159 #ifdef CONFIG_PROC_FS
160 struct ipmi_proc_entry
163 struct ipmi_proc_entry *next;
169 struct platform_device *dev;
170 struct ipmi_device_id id;
171 unsigned char guid[16];
174 struct kref refcount;
176 /* bmc device attributes */
177 struct device_attribute device_id_attr;
178 struct device_attribute provides_dev_sdrs_attr;
179 struct device_attribute revision_attr;
180 struct device_attribute firmware_rev_attr;
181 struct device_attribute version_attr;
182 struct device_attribute add_dev_support_attr;
183 struct device_attribute manufacturer_id_attr;
184 struct device_attribute product_id_attr;
185 struct device_attribute guid_attr;
186 struct device_attribute aux_firmware_rev_attr;
189 #define IPMI_IPMB_NUM_SEQ 64
190 #define IPMI_MAX_CHANNELS 16
193 /* What interface number are we? */
196 struct kref refcount;
198 /* Used for a list of interfaces. */
199 struct list_head link;
201 /* The list of upper layers that are using me. seq_lock
203 struct list_head users;
205 /* Information to supply to users. */
206 unsigned char ipmi_version_major;
207 unsigned char ipmi_version_minor;
209 /* Used for wake ups at startup. */
210 wait_queue_head_t waitq;
212 struct bmc_device *bmc;
216 /* This is the lower-layer's sender routine. Note that you
217 * must either be holding the ipmi_interfaces_mutex or be in
218 * an umpreemptible region to use this. You must fetch the
219 * value into a local variable and make sure it is not NULL. */
220 struct ipmi_smi_handlers *handlers;
223 #ifdef CONFIG_PROC_FS
224 /* A list of proc entries for this interface. */
225 struct mutex proc_entry_lock;
226 struct ipmi_proc_entry *proc_entries;
229 /* Driver-model device for the system interface. */
230 struct device *si_dev;
232 /* A table of sequence numbers for this interface. We use the
233 sequence numbers for IPMB messages that go out of the
234 interface to match them up with their responses. A routine
235 is called periodically to time the items in this list. */
237 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
240 /* Messages that were delayed for some reason (out of memory,
241 for instance), will go in here to be processed later in a
242 periodic timer interrupt. */
243 spinlock_t waiting_msgs_lock;
244 struct list_head waiting_msgs;
246 /* The list of command receivers that are registered for commands
247 on this interface. */
248 struct mutex cmd_rcvrs_mutex;
249 struct list_head cmd_rcvrs;
251 /* Events that were queues because no one was there to receive
253 spinlock_t events_lock; /* For dealing with event stuff. */
254 struct list_head waiting_events;
255 unsigned int waiting_events_count; /* How many events in queue? */
256 char delivering_events;
257 char event_msg_printed;
259 /* The event receiver for my BMC, only really used at panic
260 shutdown as a place to store this. */
261 unsigned char event_receiver;
262 unsigned char event_receiver_lun;
263 unsigned char local_sel_device;
264 unsigned char local_event_generator;
266 /* For handling of maintenance mode. */
267 int maintenance_mode;
268 int maintenance_mode_enable;
269 int auto_maintenance_timeout;
270 spinlock_t maintenance_mode_lock; /* Used in a timer... */
272 /* A cheap hack, if this is non-null and a message to an
273 interface comes in with a NULL user, call this routine with
274 it. Note that the message will still be freed by the
275 caller. This only works on the system interface. */
276 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
278 /* When we are scanning the channels for an SMI, this will
279 tell which channel we are scanning. */
282 /* Channel information */
283 struct ipmi_channel channels[IPMI_MAX_CHANNELS];
286 struct proc_dir_entry *proc_dir;
287 char proc_dir_name[10];
289 spinlock_t counter_lock; /* For making counters atomic. */
291 /* Commands we got that were invalid. */
292 unsigned int sent_invalid_commands;
294 /* Commands we sent to the MC. */
295 unsigned int sent_local_commands;
296 /* Responses from the MC that were delivered to a user. */
297 unsigned int handled_local_responses;
298 /* Responses from the MC that were not delivered to a user. */
299 unsigned int unhandled_local_responses;
301 /* Commands we sent out to the IPMB bus. */
302 unsigned int sent_ipmb_commands;
303 /* Commands sent on the IPMB that had errors on the SEND CMD */
304 unsigned int sent_ipmb_command_errs;
305 /* Each retransmit increments this count. */
306 unsigned int retransmitted_ipmb_commands;
307 /* When a message times out (runs out of retransmits) this is
309 unsigned int timed_out_ipmb_commands;
311 /* This is like above, but for broadcasts. Broadcasts are
312 *not* included in the above count (they are expected to
314 unsigned int timed_out_ipmb_broadcasts;
316 /* Responses I have sent to the IPMB bus. */
317 unsigned int sent_ipmb_responses;
319 /* The response was delivered to the user. */
320 unsigned int handled_ipmb_responses;
321 /* The response had invalid data in it. */
322 unsigned int invalid_ipmb_responses;
323 /* The response didn't have anyone waiting for it. */
324 unsigned int unhandled_ipmb_responses;
326 /* Commands we sent out to the IPMB bus. */
327 unsigned int sent_lan_commands;
328 /* Commands sent on the IPMB that had errors on the SEND CMD */
329 unsigned int sent_lan_command_errs;
330 /* Each retransmit increments this count. */
331 unsigned int retransmitted_lan_commands;
332 /* When a message times out (runs out of retransmits) this is
334 unsigned int timed_out_lan_commands;
336 /* Responses I have sent to the IPMB bus. */
337 unsigned int sent_lan_responses;
339 /* The response was delivered to the user. */
340 unsigned int handled_lan_responses;
341 /* The response had invalid data in it. */
342 unsigned int invalid_lan_responses;
343 /* The response didn't have anyone waiting for it. */
344 unsigned int unhandled_lan_responses;
346 /* The command was delivered to the user. */
347 unsigned int handled_commands;
348 /* The command had invalid data in it. */
349 unsigned int invalid_commands;
350 /* The command didn't have anyone waiting for it. */
351 unsigned int unhandled_commands;
353 /* Invalid data in an event. */
354 unsigned int invalid_events;
356 /* Events that were received with the proper format. */
360 * run_to_completion duplicate of smb_info, smi_info
361 * and ipmi_serial_info structures. Used to decrease numbers of
362 * parameters passed by "low" level IPMI code.
364 int run_to_completion;
366 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
369 * The driver model view of the IPMI messaging driver.
371 static struct device_driver ipmidriver = {
373 .bus = &platform_bus_type
375 static DEFINE_MUTEX(ipmidriver_mutex);
377 static LIST_HEAD(ipmi_interfaces);
378 static DEFINE_MUTEX(ipmi_interfaces_mutex);
380 /* List of watchers that want to know when smi's are added and
382 static LIST_HEAD(smi_watchers);
383 static DEFINE_MUTEX(smi_watchers_mutex);
386 static void free_recv_msg_list(struct list_head *q)
388 struct ipmi_recv_msg *msg, *msg2;
390 list_for_each_entry_safe(msg, msg2, q, link) {
391 list_del(&msg->link);
392 ipmi_free_recv_msg(msg);
396 static void free_smi_msg_list(struct list_head *q)
398 struct ipmi_smi_msg *msg, *msg2;
400 list_for_each_entry_safe(msg, msg2, q, link) {
401 list_del(&msg->link);
402 ipmi_free_smi_msg(msg);
406 static void clean_up_interface_data(ipmi_smi_t intf)
409 struct cmd_rcvr *rcvr, *rcvr2;
410 struct list_head list;
412 free_smi_msg_list(&intf->waiting_msgs);
413 free_recv_msg_list(&intf->waiting_events);
416 * Wholesale remove all the entries from the list in the
417 * interface and wait for RCU to know that none are in use.
419 mutex_lock(&intf->cmd_rcvrs_mutex);
420 INIT_LIST_HEAD(&list);
421 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
422 mutex_unlock(&intf->cmd_rcvrs_mutex);
424 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
427 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
428 if ((intf->seq_table[i].inuse)
429 && (intf->seq_table[i].recv_msg))
431 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
436 static void intf_free(struct kref *ref)
438 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
440 clean_up_interface_data(intf);
444 struct watcher_entry {
447 struct list_head link;
450 int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
453 LIST_HEAD(to_deliver);
454 struct watcher_entry *e, *e2;
456 mutex_lock(&smi_watchers_mutex);
458 mutex_lock(&ipmi_interfaces_mutex);
460 /* Build a list of things to deliver. */
461 list_for_each_entry(intf, &ipmi_interfaces, link) {
462 if (intf->intf_num == -1)
464 e = kmalloc(sizeof(*e), GFP_KERNEL);
467 kref_get(&intf->refcount);
469 e->intf_num = intf->intf_num;
470 list_add_tail(&e->link, &to_deliver);
473 /* We will succeed, so add it to the list. */
474 list_add(&watcher->link, &smi_watchers);
476 mutex_unlock(&ipmi_interfaces_mutex);
478 list_for_each_entry_safe(e, e2, &to_deliver, link) {
480 watcher->new_smi(e->intf_num, e->intf->si_dev);
481 kref_put(&e->intf->refcount, intf_free);
485 mutex_unlock(&smi_watchers_mutex);
490 mutex_unlock(&ipmi_interfaces_mutex);
491 mutex_unlock(&smi_watchers_mutex);
492 list_for_each_entry_safe(e, e2, &to_deliver, link) {
494 kref_put(&e->intf->refcount, intf_free);
500 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
502 mutex_lock(&smi_watchers_mutex);
503 list_del(&(watcher->link));
504 mutex_unlock(&smi_watchers_mutex);
509 * Must be called with smi_watchers_mutex held.
512 call_smi_watchers(int i, struct device *dev)
514 struct ipmi_smi_watcher *w;
516 list_for_each_entry(w, &smi_watchers, link) {
517 if (try_module_get(w->owner)) {
519 module_put(w->owner);
525 ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
527 if (addr1->addr_type != addr2->addr_type)
530 if (addr1->channel != addr2->channel)
533 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
534 struct ipmi_system_interface_addr *smi_addr1
535 = (struct ipmi_system_interface_addr *) addr1;
536 struct ipmi_system_interface_addr *smi_addr2
537 = (struct ipmi_system_interface_addr *) addr2;
538 return (smi_addr1->lun == smi_addr2->lun);
541 if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
542 || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
544 struct ipmi_ipmb_addr *ipmb_addr1
545 = (struct ipmi_ipmb_addr *) addr1;
546 struct ipmi_ipmb_addr *ipmb_addr2
547 = (struct ipmi_ipmb_addr *) addr2;
549 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
550 && (ipmb_addr1->lun == ipmb_addr2->lun));
553 if (addr1->addr_type == IPMI_LAN_ADDR_TYPE) {
554 struct ipmi_lan_addr *lan_addr1
555 = (struct ipmi_lan_addr *) addr1;
556 struct ipmi_lan_addr *lan_addr2
557 = (struct ipmi_lan_addr *) addr2;
559 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
560 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
561 && (lan_addr1->session_handle
562 == lan_addr2->session_handle)
563 && (lan_addr1->lun == lan_addr2->lun));
569 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
571 if (len < sizeof(struct ipmi_system_interface_addr)) {
575 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
576 if (addr->channel != IPMI_BMC_CHANNEL)
581 if ((addr->channel == IPMI_BMC_CHANNEL)
582 || (addr->channel >= IPMI_MAX_CHANNELS)
583 || (addr->channel < 0))
586 if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
587 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
589 if (len < sizeof(struct ipmi_ipmb_addr)) {
595 if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
596 if (len < sizeof(struct ipmi_lan_addr)) {
605 unsigned int ipmi_addr_length(int addr_type)
607 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
608 return sizeof(struct ipmi_system_interface_addr);
610 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
611 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
613 return sizeof(struct ipmi_ipmb_addr);
616 if (addr_type == IPMI_LAN_ADDR_TYPE)
617 return sizeof(struct ipmi_lan_addr);
622 static void deliver_response(struct ipmi_recv_msg *msg)
625 ipmi_smi_t intf = msg->user_msg_data;
628 /* Special handling for NULL users. */
629 if (intf->null_user_handler) {
630 intf->null_user_handler(intf, msg);
631 spin_lock_irqsave(&intf->counter_lock, flags);
632 intf->handled_local_responses++;
633 spin_unlock_irqrestore(&intf->counter_lock, flags);
635 /* No handler, so give up. */
636 spin_lock_irqsave(&intf->counter_lock, flags);
637 intf->unhandled_local_responses++;
638 spin_unlock_irqrestore(&intf->counter_lock, flags);
640 ipmi_free_recv_msg(msg);
642 ipmi_user_t user = msg->user;
643 user->handler->ipmi_recv_hndl(msg, user->handler_data);
648 deliver_err_response(struct ipmi_recv_msg *msg, int err)
650 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
651 msg->msg_data[0] = err;
652 msg->msg.netfn |= 1; /* Convert to a response. */
653 msg->msg.data_len = 1;
654 msg->msg.data = msg->msg_data;
655 deliver_response(msg);
658 /* Find the next sequence number not being used and add the given
659 message with the given timeout to the sequence table. This must be
660 called with the interface's seq_lock held. */
661 static int intf_next_seq(ipmi_smi_t intf,
662 struct ipmi_recv_msg *recv_msg,
663 unsigned long timeout,
672 for (i = intf->curr_seq;
673 (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
674 i = (i+1)%IPMI_IPMB_NUM_SEQ)
676 if (!intf->seq_table[i].inuse)
680 if (!intf->seq_table[i].inuse) {
681 intf->seq_table[i].recv_msg = recv_msg;
683 /* Start with the maximum timeout, when the send response
684 comes in we will start the real timer. */
685 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
686 intf->seq_table[i].orig_timeout = timeout;
687 intf->seq_table[i].retries_left = retries;
688 intf->seq_table[i].broadcast = broadcast;
689 intf->seq_table[i].inuse = 1;
690 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
692 *seqid = intf->seq_table[i].seqid;
693 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
701 /* Return the receive message for the given sequence number and
702 release the sequence number so it can be reused. Some other data
703 is passed in to be sure the message matches up correctly (to help
704 guard against message coming in after their timeout and the
705 sequence number being reused). */
706 static int intf_find_seq(ipmi_smi_t intf,
711 struct ipmi_addr *addr,
712 struct ipmi_recv_msg **recv_msg)
717 if (seq >= IPMI_IPMB_NUM_SEQ)
720 spin_lock_irqsave(&(intf->seq_lock), flags);
721 if (intf->seq_table[seq].inuse) {
722 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
724 if ((msg->addr.channel == channel)
725 && (msg->msg.cmd == cmd)
726 && (msg->msg.netfn == netfn)
727 && (ipmi_addr_equal(addr, &(msg->addr))))
730 intf->seq_table[seq].inuse = 0;
734 spin_unlock_irqrestore(&(intf->seq_lock), flags);
740 /* Start the timer for a specific sequence table entry. */
741 static int intf_start_seq_timer(ipmi_smi_t intf,
750 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
752 spin_lock_irqsave(&(intf->seq_lock), flags);
753 /* We do this verification because the user can be deleted
754 while a message is outstanding. */
755 if ((intf->seq_table[seq].inuse)
756 && (intf->seq_table[seq].seqid == seqid))
758 struct seq_table *ent = &(intf->seq_table[seq]);
759 ent->timeout = ent->orig_timeout;
762 spin_unlock_irqrestore(&(intf->seq_lock), flags);
767 /* Got an error for the send message for a specific sequence number. */
768 static int intf_err_seq(ipmi_smi_t intf,
776 struct ipmi_recv_msg *msg = NULL;
779 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
781 spin_lock_irqsave(&(intf->seq_lock), flags);
782 /* We do this verification because the user can be deleted
783 while a message is outstanding. */
784 if ((intf->seq_table[seq].inuse)
785 && (intf->seq_table[seq].seqid == seqid))
787 struct seq_table *ent = &(intf->seq_table[seq]);
793 spin_unlock_irqrestore(&(intf->seq_lock), flags);
796 deliver_err_response(msg, err);
802 int ipmi_create_user(unsigned int if_num,
803 struct ipmi_user_hndl *handler,
808 ipmi_user_t new_user;
812 /* There is no module usecount here, because it's not
813 required. Since this can only be used by and called from
814 other modules, they will implicitly use this module, and
815 thus this can't be removed unless the other modules are
821 /* Make sure the driver is actually initialized, this handles
822 problems with initialization order. */
824 rv = ipmi_init_msghandler();
828 /* The init code doesn't return an error if it was turned
829 off, but it won't initialize. Check that. */
834 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
838 mutex_lock(&ipmi_interfaces_mutex);
839 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
840 if (intf->intf_num == if_num)
843 /* Not found, return an error */
848 /* Note that each existing user holds a refcount to the interface. */
849 kref_get(&intf->refcount);
851 kref_init(&new_user->refcount);
852 new_user->handler = handler;
853 new_user->handler_data = handler_data;
854 new_user->intf = intf;
855 new_user->gets_events = 0;
857 if (!try_module_get(intf->handlers->owner)) {
862 if (intf->handlers->inc_usecount) {
863 rv = intf->handlers->inc_usecount(intf->send_info);
865 module_put(intf->handlers->owner);
870 /* Hold the lock so intf->handlers is guaranteed to be good
872 mutex_unlock(&ipmi_interfaces_mutex);
875 spin_lock_irqsave(&intf->seq_lock, flags);
876 list_add_rcu(&new_user->link, &intf->users);
877 spin_unlock_irqrestore(&intf->seq_lock, flags);
882 kref_put(&intf->refcount, intf_free);
884 mutex_unlock(&ipmi_interfaces_mutex);
889 static void free_user(struct kref *ref)
891 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
895 int ipmi_destroy_user(ipmi_user_t user)
897 ipmi_smi_t intf = user->intf;
900 struct cmd_rcvr *rcvr;
901 struct cmd_rcvr *rcvrs = NULL;
905 /* Remove the user from the interface's sequence table. */
906 spin_lock_irqsave(&intf->seq_lock, flags);
907 list_del_rcu(&user->link);
909 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
910 if (intf->seq_table[i].inuse
911 && (intf->seq_table[i].recv_msg->user == user))
913 intf->seq_table[i].inuse = 0;
914 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
917 spin_unlock_irqrestore(&intf->seq_lock, flags);
920 * Remove the user from the command receiver's table. First
921 * we build a list of everything (not using the standard link,
922 * since other things may be using it till we do
923 * synchronize_rcu()) then free everything in that list.
925 mutex_lock(&intf->cmd_rcvrs_mutex);
926 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
927 if (rcvr->user == user) {
928 list_del_rcu(&rcvr->link);
933 mutex_unlock(&intf->cmd_rcvrs_mutex);
941 mutex_lock(&ipmi_interfaces_mutex);
942 if (intf->handlers) {
943 module_put(intf->handlers->owner);
944 if (intf->handlers->dec_usecount)
945 intf->handlers->dec_usecount(intf->send_info);
947 mutex_unlock(&ipmi_interfaces_mutex);
949 kref_put(&intf->refcount, intf_free);
951 kref_put(&user->refcount, free_user);
956 void ipmi_get_version(ipmi_user_t user,
957 unsigned char *major,
958 unsigned char *minor)
960 *major = user->intf->ipmi_version_major;
961 *minor = user->intf->ipmi_version_minor;
964 int ipmi_set_my_address(ipmi_user_t user,
965 unsigned int channel,
966 unsigned char address)
968 if (channel >= IPMI_MAX_CHANNELS)
970 user->intf->channels[channel].address = address;
974 int ipmi_get_my_address(ipmi_user_t user,
975 unsigned int channel,
976 unsigned char *address)
978 if (channel >= IPMI_MAX_CHANNELS)
980 *address = user->intf->channels[channel].address;
984 int ipmi_set_my_LUN(ipmi_user_t user,
985 unsigned int channel,
988 if (channel >= IPMI_MAX_CHANNELS)
990 user->intf->channels[channel].lun = LUN & 0x3;
994 int ipmi_get_my_LUN(ipmi_user_t user,
995 unsigned int channel,
996 unsigned char *address)
998 if (channel >= IPMI_MAX_CHANNELS)
1000 *address = user->intf->channels[channel].lun;
1004 int ipmi_get_maintenance_mode(ipmi_user_t user)
1007 unsigned long flags;
1009 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1010 mode = user->intf->maintenance_mode;
1011 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1015 EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1017 static void maintenance_mode_update(ipmi_smi_t intf)
1019 if (intf->handlers->set_maintenance_mode)
1020 intf->handlers->set_maintenance_mode(
1021 intf->send_info, intf->maintenance_mode_enable);
1024 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1027 unsigned long flags;
1028 ipmi_smi_t intf = user->intf;
1030 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1031 if (intf->maintenance_mode != mode) {
1033 case IPMI_MAINTENANCE_MODE_AUTO:
1034 intf->maintenance_mode = mode;
1035 intf->maintenance_mode_enable
1036 = (intf->auto_maintenance_timeout > 0);
1039 case IPMI_MAINTENANCE_MODE_OFF:
1040 intf->maintenance_mode = mode;
1041 intf->maintenance_mode_enable = 0;
1044 case IPMI_MAINTENANCE_MODE_ON:
1045 intf->maintenance_mode = mode;
1046 intf->maintenance_mode_enable = 1;
1054 maintenance_mode_update(intf);
1057 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1061 EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1063 int ipmi_set_gets_events(ipmi_user_t user, int val)
1065 unsigned long flags;
1066 ipmi_smi_t intf = user->intf;
1067 struct ipmi_recv_msg *msg, *msg2;
1068 struct list_head msgs;
1070 INIT_LIST_HEAD(&msgs);
1072 spin_lock_irqsave(&intf->events_lock, flags);
1073 user->gets_events = val;
1075 if (intf->delivering_events)
1077 * Another thread is delivering events for this, so
1078 * let it handle any new events.
1082 /* Deliver any queued events. */
1083 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1084 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1085 list_move_tail(&msg->link, &msgs);
1086 intf->waiting_events_count = 0;
1087 if (intf->event_msg_printed) {
1088 printk(KERN_WARNING PFX "Event queue no longer"
1090 intf->event_msg_printed = 0;
1093 intf->delivering_events = 1;
1094 spin_unlock_irqrestore(&intf->events_lock, flags);
1096 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1098 kref_get(&user->refcount);
1099 deliver_response(msg);
1102 spin_lock_irqsave(&intf->events_lock, flags);
1103 intf->delivering_events = 0;
1107 spin_unlock_irqrestore(&intf->events_lock, flags);
1112 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
1113 unsigned char netfn,
1117 struct cmd_rcvr *rcvr;
1119 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1120 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1121 && (rcvr->chans & (1 << chan)))
1127 static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
1128 unsigned char netfn,
1132 struct cmd_rcvr *rcvr;
1134 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1135 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1136 && (rcvr->chans & chans))
1142 int ipmi_register_for_cmd(ipmi_user_t user,
1143 unsigned char netfn,
1147 ipmi_smi_t intf = user->intf;
1148 struct cmd_rcvr *rcvr;
1152 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1156 rcvr->netfn = netfn;
1157 rcvr->chans = chans;
1160 mutex_lock(&intf->cmd_rcvrs_mutex);
1161 /* Make sure the command/netfn is not already registered. */
1162 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1167 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1170 mutex_unlock(&intf->cmd_rcvrs_mutex);
1177 int ipmi_unregister_for_cmd(ipmi_user_t user,
1178 unsigned char netfn,
1182 ipmi_smi_t intf = user->intf;
1183 struct cmd_rcvr *rcvr;
1184 struct cmd_rcvr *rcvrs = NULL;
1185 int i, rv = -ENOENT;
1187 mutex_lock(&intf->cmd_rcvrs_mutex);
1188 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1189 if (((1 << i) & chans) == 0)
1191 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1194 if (rcvr->user == user) {
1196 rcvr->chans &= ~chans;
1197 if (rcvr->chans == 0) {
1198 list_del_rcu(&rcvr->link);
1204 mutex_unlock(&intf->cmd_rcvrs_mutex);
1214 static unsigned char
1215 ipmb_checksum(unsigned char *data, int size)
1217 unsigned char csum = 0;
1219 for (; size > 0; size--, data++)
1225 static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1226 struct kernel_ipmi_msg *msg,
1227 struct ipmi_ipmb_addr *ipmb_addr,
1229 unsigned char ipmb_seq,
1231 unsigned char source_address,
1232 unsigned char source_lun)
1236 /* Format the IPMB header data. */
1237 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1238 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1239 smi_msg->data[2] = ipmb_addr->channel;
1241 smi_msg->data[3] = 0;
1242 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1243 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1244 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1245 smi_msg->data[i+6] = source_address;
1246 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1247 smi_msg->data[i+8] = msg->cmd;
1249 /* Now tack on the data to the message. */
1250 if (msg->data_len > 0)
1251 memcpy(&(smi_msg->data[i+9]), msg->data,
1253 smi_msg->data_size = msg->data_len + 9;
1255 /* Now calculate the checksum and tack it on. */
1256 smi_msg->data[i+smi_msg->data_size]
1257 = ipmb_checksum(&(smi_msg->data[i+6]),
1258 smi_msg->data_size-6);
1260 /* Add on the checksum size and the offset from the
1262 smi_msg->data_size += 1 + i;
1264 smi_msg->msgid = msgid;
1267 static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1268 struct kernel_ipmi_msg *msg,
1269 struct ipmi_lan_addr *lan_addr,
1271 unsigned char ipmb_seq,
1272 unsigned char source_lun)
1274 /* Format the IPMB header data. */
1275 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1276 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1277 smi_msg->data[2] = lan_addr->channel;
1278 smi_msg->data[3] = lan_addr->session_handle;
1279 smi_msg->data[4] = lan_addr->remote_SWID;
1280 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1281 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1282 smi_msg->data[7] = lan_addr->local_SWID;
1283 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1284 smi_msg->data[9] = msg->cmd;
1286 /* Now tack on the data to the message. */
1287 if (msg->data_len > 0)
1288 memcpy(&(smi_msg->data[10]), msg->data,
1290 smi_msg->data_size = msg->data_len + 10;
1292 /* Now calculate the checksum and tack it on. */
1293 smi_msg->data[smi_msg->data_size]
1294 = ipmb_checksum(&(smi_msg->data[7]),
1295 smi_msg->data_size-7);
1297 /* Add on the checksum size and the offset from the
1299 smi_msg->data_size += 1;
1301 smi_msg->msgid = msgid;
1304 /* Separate from ipmi_request so that the user does not have to be
1305 supplied in certain circumstances (mainly at panic time). If
1306 messages are supplied, they will be freed, even if an error
1308 static int i_ipmi_request(ipmi_user_t user,
1310 struct ipmi_addr *addr,
1312 struct kernel_ipmi_msg *msg,
1313 void *user_msg_data,
1315 struct ipmi_recv_msg *supplied_recv,
1317 unsigned char source_address,
1318 unsigned char source_lun,
1320 unsigned int retry_time_ms)
1323 struct ipmi_smi_msg *smi_msg;
1324 struct ipmi_recv_msg *recv_msg;
1325 unsigned long flags;
1326 struct ipmi_smi_handlers *handlers;
1329 if (supplied_recv) {
1330 recv_msg = supplied_recv;
1332 recv_msg = ipmi_alloc_recv_msg();
1333 if (recv_msg == NULL) {
1337 recv_msg->user_msg_data = user_msg_data;
1340 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1342 smi_msg = ipmi_alloc_smi_msg();
1343 if (smi_msg == NULL) {
1344 ipmi_free_recv_msg(recv_msg);
1350 handlers = intf->handlers;
1356 recv_msg->user = user;
1358 kref_get(&user->refcount);
1359 recv_msg->msgid = msgid;
1360 /* Store the message to send in the receive message so timeout
1361 responses can get the proper response data. */
1362 recv_msg->msg = *msg;
1364 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1365 struct ipmi_system_interface_addr *smi_addr;
1367 if (msg->netfn & 1) {
1368 /* Responses are not allowed to the SMI. */
1373 smi_addr = (struct ipmi_system_interface_addr *) addr;
1374 if (smi_addr->lun > 3) {
1375 spin_lock_irqsave(&intf->counter_lock, flags);
1376 intf->sent_invalid_commands++;
1377 spin_unlock_irqrestore(&intf->counter_lock, flags);
1382 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1384 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1385 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1386 || (msg->cmd == IPMI_GET_MSG_CMD)
1387 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
1389 /* We don't let the user do these, since we manage
1390 the sequence numbers. */
1391 spin_lock_irqsave(&intf->counter_lock, flags);
1392 intf->sent_invalid_commands++;
1393 spin_unlock_irqrestore(&intf->counter_lock, flags);
1398 if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1399 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1400 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1401 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
1403 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1404 intf->auto_maintenance_timeout
1405 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1406 if (!intf->maintenance_mode
1407 && !intf->maintenance_mode_enable)
1409 intf->maintenance_mode_enable = 1;
1410 maintenance_mode_update(intf);
1412 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1416 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1417 spin_lock_irqsave(&intf->counter_lock, flags);
1418 intf->sent_invalid_commands++;
1419 spin_unlock_irqrestore(&intf->counter_lock, flags);
1424 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1425 smi_msg->data[1] = msg->cmd;
1426 smi_msg->msgid = msgid;
1427 smi_msg->user_data = recv_msg;
1428 if (msg->data_len > 0)
1429 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1430 smi_msg->data_size = msg->data_len + 2;
1431 spin_lock_irqsave(&intf->counter_lock, flags);
1432 intf->sent_local_commands++;
1433 spin_unlock_irqrestore(&intf->counter_lock, flags);
1434 } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
1435 || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
1437 struct ipmi_ipmb_addr *ipmb_addr;
1438 unsigned char ipmb_seq;
1442 if (addr->channel >= IPMI_MAX_CHANNELS) {
1443 spin_lock_irqsave(&intf->counter_lock, flags);
1444 intf->sent_invalid_commands++;
1445 spin_unlock_irqrestore(&intf->counter_lock, flags);
1450 if (intf->channels[addr->channel].medium
1451 != IPMI_CHANNEL_MEDIUM_IPMB)
1453 spin_lock_irqsave(&intf->counter_lock, flags);
1454 intf->sent_invalid_commands++;
1455 spin_unlock_irqrestore(&intf->counter_lock, flags);
1461 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1462 retries = 0; /* Don't retry broadcasts. */
1466 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1467 /* Broadcasts add a zero at the beginning of the
1468 message, but otherwise is the same as an IPMB
1470 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1475 /* Default to 1 second retries. */
1476 if (retry_time_ms == 0)
1477 retry_time_ms = 1000;
1479 /* 9 for the header and 1 for the checksum, plus
1480 possibly one for the broadcast. */
1481 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1482 spin_lock_irqsave(&intf->counter_lock, flags);
1483 intf->sent_invalid_commands++;
1484 spin_unlock_irqrestore(&intf->counter_lock, flags);
1489 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1490 if (ipmb_addr->lun > 3) {
1491 spin_lock_irqsave(&intf->counter_lock, flags);
1492 intf->sent_invalid_commands++;
1493 spin_unlock_irqrestore(&intf->counter_lock, flags);
1498 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1500 if (recv_msg->msg.netfn & 0x1) {
1501 /* It's a response, so use the user's sequence
1503 spin_lock_irqsave(&intf->counter_lock, flags);
1504 intf->sent_ipmb_responses++;
1505 spin_unlock_irqrestore(&intf->counter_lock, flags);
1506 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1508 source_address, source_lun);
1510 /* Save the receive message so we can use it
1511 to deliver the response. */
1512 smi_msg->user_data = recv_msg;
1514 /* It's a command, so get a sequence for it. */
1516 spin_lock_irqsave(&(intf->seq_lock), flags);
1518 spin_lock(&intf->counter_lock);
1519 intf->sent_ipmb_commands++;
1520 spin_unlock(&intf->counter_lock);
1522 /* Create a sequence number with a 1 second
1523 timeout and 4 retries. */
1524 rv = intf_next_seq(intf,
1532 /* We have used up all the sequence numbers,
1533 probably, so abort. */
1534 spin_unlock_irqrestore(&(intf->seq_lock),
1539 /* Store the sequence number in the message,
1540 so that when the send message response
1541 comes back we can start the timer. */
1542 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1543 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1544 ipmb_seq, broadcast,
1545 source_address, source_lun);
1547 /* Copy the message into the recv message data, so we
1548 can retransmit it later if necessary. */
1549 memcpy(recv_msg->msg_data, smi_msg->data,
1550 smi_msg->data_size);
1551 recv_msg->msg.data = recv_msg->msg_data;
1552 recv_msg->msg.data_len = smi_msg->data_size;
1554 /* We don't unlock until here, because we need
1555 to copy the completed message into the
1556 recv_msg before we release the lock.
1557 Otherwise, race conditions may bite us. I
1558 know that's pretty paranoid, but I prefer
1560 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1562 } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
1563 struct ipmi_lan_addr *lan_addr;
1564 unsigned char ipmb_seq;
1567 if (addr->channel >= IPMI_MAX_CHANNELS) {
1568 spin_lock_irqsave(&intf->counter_lock, flags);
1569 intf->sent_invalid_commands++;
1570 spin_unlock_irqrestore(&intf->counter_lock, flags);
1575 if ((intf->channels[addr->channel].medium
1576 != IPMI_CHANNEL_MEDIUM_8023LAN)
1577 && (intf->channels[addr->channel].medium
1578 != IPMI_CHANNEL_MEDIUM_ASYNC))
1580 spin_lock_irqsave(&intf->counter_lock, flags);
1581 intf->sent_invalid_commands++;
1582 spin_unlock_irqrestore(&intf->counter_lock, flags);
1589 /* Default to 1 second retries. */
1590 if (retry_time_ms == 0)
1591 retry_time_ms = 1000;
1593 /* 11 for the header and 1 for the checksum. */
1594 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1595 spin_lock_irqsave(&intf->counter_lock, flags);
1596 intf->sent_invalid_commands++;
1597 spin_unlock_irqrestore(&intf->counter_lock, flags);
1602 lan_addr = (struct ipmi_lan_addr *) addr;
1603 if (lan_addr->lun > 3) {
1604 spin_lock_irqsave(&intf->counter_lock, flags);
1605 intf->sent_invalid_commands++;
1606 spin_unlock_irqrestore(&intf->counter_lock, flags);
1611 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1613 if (recv_msg->msg.netfn & 0x1) {
1614 /* It's a response, so use the user's sequence
1616 spin_lock_irqsave(&intf->counter_lock, flags);
1617 intf->sent_lan_responses++;
1618 spin_unlock_irqrestore(&intf->counter_lock, flags);
1619 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1622 /* Save the receive message so we can use it
1623 to deliver the response. */
1624 smi_msg->user_data = recv_msg;
1626 /* It's a command, so get a sequence for it. */
1628 spin_lock_irqsave(&(intf->seq_lock), flags);
1630 spin_lock(&intf->counter_lock);
1631 intf->sent_lan_commands++;
1632 spin_unlock(&intf->counter_lock);
1634 /* Create a sequence number with a 1 second
1635 timeout and 4 retries. */
1636 rv = intf_next_seq(intf,
1644 /* We have used up all the sequence numbers,
1645 probably, so abort. */
1646 spin_unlock_irqrestore(&(intf->seq_lock),
1651 /* Store the sequence number in the message,
1652 so that when the send message response
1653 comes back we can start the timer. */
1654 format_lan_msg(smi_msg, msg, lan_addr,
1655 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1656 ipmb_seq, source_lun);
1658 /* Copy the message into the recv message data, so we
1659 can retransmit it later if necessary. */
1660 memcpy(recv_msg->msg_data, smi_msg->data,
1661 smi_msg->data_size);
1662 recv_msg->msg.data = recv_msg->msg_data;
1663 recv_msg->msg.data_len = smi_msg->data_size;
1665 /* We don't unlock until here, because we need
1666 to copy the completed message into the
1667 recv_msg before we release the lock.
1668 Otherwise, race conditions may bite us. I
1669 know that's pretty paranoid, but I prefer
1671 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1674 /* Unknown address type. */
1675 spin_lock_irqsave(&intf->counter_lock, flags);
1676 intf->sent_invalid_commands++;
1677 spin_unlock_irqrestore(&intf->counter_lock, flags);
1685 for (m = 0; m < smi_msg->data_size; m++)
1686 printk(" %2.2x", smi_msg->data[m]);
1691 handlers->sender(intf->send_info, smi_msg, priority);
1698 ipmi_free_smi_msg(smi_msg);
1699 ipmi_free_recv_msg(recv_msg);
1703 static int check_addr(ipmi_smi_t intf,
1704 struct ipmi_addr *addr,
1705 unsigned char *saddr,
1708 if (addr->channel >= IPMI_MAX_CHANNELS)
1710 *lun = intf->channels[addr->channel].lun;
1711 *saddr = intf->channels[addr->channel].address;
1715 int ipmi_request_settime(ipmi_user_t user,
1716 struct ipmi_addr *addr,
1718 struct kernel_ipmi_msg *msg,
1719 void *user_msg_data,
1722 unsigned int retry_time_ms)
1724 unsigned char saddr, lun;
1729 rv = check_addr(user->intf, addr, &saddr, &lun);
1732 return i_ipmi_request(user,
1746 int ipmi_request_supply_msgs(ipmi_user_t user,
1747 struct ipmi_addr *addr,
1749 struct kernel_ipmi_msg *msg,
1750 void *user_msg_data,
1752 struct ipmi_recv_msg *supplied_recv,
1755 unsigned char saddr, lun;
1760 rv = check_addr(user->intf, addr, &saddr, &lun);
1763 return i_ipmi_request(user,
1777 #ifdef CONFIG_PROC_FS
1778 static int ipmb_file_read_proc(char *page, char **start, off_t off,
1779 int count, int *eof, void *data)
1781 char *out = (char *) page;
1782 ipmi_smi_t intf = data;
1786 for (i = 0; i < IPMI_MAX_CHANNELS; i++)
1787 rv += sprintf(out+rv, "%x ", intf->channels[i].address);
1788 out[rv-1] = '\n'; /* Replace the final space with a newline */
1794 static int version_file_read_proc(char *page, char **start, off_t off,
1795 int count, int *eof, void *data)
1797 char *out = (char *) page;
1798 ipmi_smi_t intf = data;
1800 return sprintf(out, "%d.%d\n",
1801 ipmi_version_major(&intf->bmc->id),
1802 ipmi_version_minor(&intf->bmc->id));
1805 static int stat_file_read_proc(char *page, char **start, off_t off,
1806 int count, int *eof, void *data)
1808 char *out = (char *) page;
1809 ipmi_smi_t intf = data;
1811 out += sprintf(out, "sent_invalid_commands: %d\n",
1812 intf->sent_invalid_commands);
1813 out += sprintf(out, "sent_local_commands: %d\n",
1814 intf->sent_local_commands);
1815 out += sprintf(out, "handled_local_responses: %d\n",
1816 intf->handled_local_responses);
1817 out += sprintf(out, "unhandled_local_responses: %d\n",
1818 intf->unhandled_local_responses);
1819 out += sprintf(out, "sent_ipmb_commands: %d\n",
1820 intf->sent_ipmb_commands);
1821 out += sprintf(out, "sent_ipmb_command_errs: %d\n",
1822 intf->sent_ipmb_command_errs);
1823 out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
1824 intf->retransmitted_ipmb_commands);
1825 out += sprintf(out, "timed_out_ipmb_commands: %d\n",
1826 intf->timed_out_ipmb_commands);
1827 out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n",
1828 intf->timed_out_ipmb_broadcasts);
1829 out += sprintf(out, "sent_ipmb_responses: %d\n",
1830 intf->sent_ipmb_responses);
1831 out += sprintf(out, "handled_ipmb_responses: %d\n",
1832 intf->handled_ipmb_responses);
1833 out += sprintf(out, "invalid_ipmb_responses: %d\n",
1834 intf->invalid_ipmb_responses);
1835 out += sprintf(out, "unhandled_ipmb_responses: %d\n",
1836 intf->unhandled_ipmb_responses);
1837 out += sprintf(out, "sent_lan_commands: %d\n",
1838 intf->sent_lan_commands);
1839 out += sprintf(out, "sent_lan_command_errs: %d\n",
1840 intf->sent_lan_command_errs);
1841 out += sprintf(out, "retransmitted_lan_commands: %d\n",
1842 intf->retransmitted_lan_commands);
1843 out += sprintf(out, "timed_out_lan_commands: %d\n",
1844 intf->timed_out_lan_commands);
1845 out += sprintf(out, "sent_lan_responses: %d\n",
1846 intf->sent_lan_responses);
1847 out += sprintf(out, "handled_lan_responses: %d\n",
1848 intf->handled_lan_responses);
1849 out += sprintf(out, "invalid_lan_responses: %d\n",
1850 intf->invalid_lan_responses);
1851 out += sprintf(out, "unhandled_lan_responses: %d\n",
1852 intf->unhandled_lan_responses);
1853 out += sprintf(out, "handled_commands: %d\n",
1854 intf->handled_commands);
1855 out += sprintf(out, "invalid_commands: %d\n",
1856 intf->invalid_commands);
1857 out += sprintf(out, "unhandled_commands: %d\n",
1858 intf->unhandled_commands);
1859 out += sprintf(out, "invalid_events: %d\n",
1860 intf->invalid_events);
1861 out += sprintf(out, "events: %d\n",
1864 return (out - ((char *) page));
1866 #endif /* CONFIG_PROC_FS */
1868 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
1869 read_proc_t *read_proc, write_proc_t *write_proc,
1870 void *data, struct module *owner)
1873 #ifdef CONFIG_PROC_FS
1874 struct proc_dir_entry *file;
1875 struct ipmi_proc_entry *entry;
1877 /* Create a list element. */
1878 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1881 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
1886 strcpy(entry->name, name);
1888 file = create_proc_entry(name, 0, smi->proc_dir);
1895 file->read_proc = read_proc;
1896 file->write_proc = write_proc;
1897 file->owner = owner;
1899 mutex_lock(&smi->proc_entry_lock);
1900 /* Stick it on the list. */
1901 entry->next = smi->proc_entries;
1902 smi->proc_entries = entry;
1903 mutex_unlock(&smi->proc_entry_lock);
1905 #endif /* CONFIG_PROC_FS */
1910 static int add_proc_entries(ipmi_smi_t smi, int num)
1914 #ifdef CONFIG_PROC_FS
1915 sprintf(smi->proc_dir_name, "%d", num);
1916 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
1920 smi->proc_dir->owner = THIS_MODULE;
1924 rv = ipmi_smi_add_proc_entry(smi, "stats",
1925 stat_file_read_proc, NULL,
1929 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
1930 ipmb_file_read_proc, NULL,
1934 rv = ipmi_smi_add_proc_entry(smi, "version",
1935 version_file_read_proc, NULL,
1937 #endif /* CONFIG_PROC_FS */
1942 static void remove_proc_entries(ipmi_smi_t smi)
1944 #ifdef CONFIG_PROC_FS
1945 struct ipmi_proc_entry *entry;
1947 mutex_lock(&smi->proc_entry_lock);
1948 while (smi->proc_entries) {
1949 entry = smi->proc_entries;
1950 smi->proc_entries = entry->next;
1952 remove_proc_entry(entry->name, smi->proc_dir);
1956 mutex_unlock(&smi->proc_entry_lock);
1957 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
1958 #endif /* CONFIG_PROC_FS */
1961 static int __find_bmc_guid(struct device *dev, void *data)
1963 unsigned char *id = data;
1964 struct bmc_device *bmc = dev_get_drvdata(dev);
1965 return memcmp(bmc->guid, id, 16) == 0;
1968 static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
1969 unsigned char *guid)
1973 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
1975 return dev_get_drvdata(dev);
1980 struct prod_dev_id {
1981 unsigned int product_id;
1982 unsigned char device_id;
1985 static int __find_bmc_prod_dev_id(struct device *dev, void *data)
1987 struct prod_dev_id *id = data;
1988 struct bmc_device *bmc = dev_get_drvdata(dev);
1990 return (bmc->id.product_id == id->product_id
1991 && bmc->id.device_id == id->device_id);
1994 static struct bmc_device *ipmi_find_bmc_prod_dev_id(
1995 struct device_driver *drv,
1996 unsigned int product_id, unsigned char device_id)
1998 struct prod_dev_id id = {
1999 .product_id = product_id,
2000 .device_id = device_id,
2004 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2006 return dev_get_drvdata(dev);
2011 static ssize_t device_id_show(struct device *dev,
2012 struct device_attribute *attr,
2015 struct bmc_device *bmc = dev_get_drvdata(dev);
2017 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2020 static ssize_t provides_dev_sdrs_show(struct device *dev,
2021 struct device_attribute *attr,
2024 struct bmc_device *bmc = dev_get_drvdata(dev);
2026 return snprintf(buf, 10, "%u\n",
2027 (bmc->id.device_revision & 0x80) >> 7);
2030 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2033 struct bmc_device *bmc = dev_get_drvdata(dev);
2035 return snprintf(buf, 20, "%u\n",
2036 bmc->id.device_revision & 0x0F);
2039 static ssize_t firmware_rev_show(struct device *dev,
2040 struct device_attribute *attr,
2043 struct bmc_device *bmc = dev_get_drvdata(dev);
2045 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2046 bmc->id.firmware_revision_2);
2049 static ssize_t ipmi_version_show(struct device *dev,
2050 struct device_attribute *attr,
2053 struct bmc_device *bmc = dev_get_drvdata(dev);
2055 return snprintf(buf, 20, "%u.%u\n",
2056 ipmi_version_major(&bmc->id),
2057 ipmi_version_minor(&bmc->id));
2060 static ssize_t add_dev_support_show(struct device *dev,
2061 struct device_attribute *attr,
2064 struct bmc_device *bmc = dev_get_drvdata(dev);
2066 return snprintf(buf, 10, "0x%02x\n",
2067 bmc->id.additional_device_support);
2070 static ssize_t manufacturer_id_show(struct device *dev,
2071 struct device_attribute *attr,
2074 struct bmc_device *bmc = dev_get_drvdata(dev);
2076 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2079 static ssize_t product_id_show(struct device *dev,
2080 struct device_attribute *attr,
2083 struct bmc_device *bmc = dev_get_drvdata(dev);
2085 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2088 static ssize_t aux_firmware_rev_show(struct device *dev,
2089 struct device_attribute *attr,
2092 struct bmc_device *bmc = dev_get_drvdata(dev);
2094 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2095 bmc->id.aux_firmware_revision[3],
2096 bmc->id.aux_firmware_revision[2],
2097 bmc->id.aux_firmware_revision[1],
2098 bmc->id.aux_firmware_revision[0]);
2101 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2104 struct bmc_device *bmc = dev_get_drvdata(dev);
2106 return snprintf(buf, 100, "%Lx%Lx\n",
2107 (long long) bmc->guid[0],
2108 (long long) bmc->guid[8]);
2111 static void remove_files(struct bmc_device *bmc)
2116 device_remove_file(&bmc->dev->dev,
2117 &bmc->device_id_attr);
2118 device_remove_file(&bmc->dev->dev,
2119 &bmc->provides_dev_sdrs_attr);
2120 device_remove_file(&bmc->dev->dev,
2121 &bmc->revision_attr);
2122 device_remove_file(&bmc->dev->dev,
2123 &bmc->firmware_rev_attr);
2124 device_remove_file(&bmc->dev->dev,
2125 &bmc->version_attr);
2126 device_remove_file(&bmc->dev->dev,
2127 &bmc->add_dev_support_attr);
2128 device_remove_file(&bmc->dev->dev,
2129 &bmc->manufacturer_id_attr);
2130 device_remove_file(&bmc->dev->dev,
2131 &bmc->product_id_attr);
2133 if (bmc->id.aux_firmware_revision_set)
2134 device_remove_file(&bmc->dev->dev,
2135 &bmc->aux_firmware_rev_attr);
2137 device_remove_file(&bmc->dev->dev,
2142 cleanup_bmc_device(struct kref *ref)
2144 struct bmc_device *bmc;
2146 bmc = container_of(ref, struct bmc_device, refcount);
2149 platform_device_unregister(bmc->dev);
2153 static void ipmi_bmc_unregister(ipmi_smi_t intf)
2155 struct bmc_device *bmc = intf->bmc;
2157 if (intf->sysfs_name) {
2158 sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2159 kfree(intf->sysfs_name);
2160 intf->sysfs_name = NULL;
2162 if (intf->my_dev_name) {
2163 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name);
2164 kfree(intf->my_dev_name);
2165 intf->my_dev_name = NULL;
2168 mutex_lock(&ipmidriver_mutex);
2169 kref_put(&bmc->refcount, cleanup_bmc_device);
2171 mutex_unlock(&ipmidriver_mutex);
2174 static int create_files(struct bmc_device *bmc)
2178 bmc->device_id_attr.attr.name = "device_id";
2179 bmc->device_id_attr.attr.mode = S_IRUGO;
2180 bmc->device_id_attr.show = device_id_show;
2182 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2183 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2184 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2186 bmc->revision_attr.attr.name = "revision";
2187 bmc->revision_attr.attr.mode = S_IRUGO;
2188 bmc->revision_attr.show = revision_show;
2190 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2191 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2192 bmc->firmware_rev_attr.show = firmware_rev_show;
2194 bmc->version_attr.attr.name = "ipmi_version";
2195 bmc->version_attr.attr.mode = S_IRUGO;
2196 bmc->version_attr.show = ipmi_version_show;
2198 bmc->add_dev_support_attr.attr.name = "additional_device_support";
2199 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2200 bmc->add_dev_support_attr.show = add_dev_support_show;
2202 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2203 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2204 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2206 bmc->product_id_attr.attr.name = "product_id";
2207 bmc->product_id_attr.attr.mode = S_IRUGO;
2208 bmc->product_id_attr.show = product_id_show;
2210 bmc->guid_attr.attr.name = "guid";
2211 bmc->guid_attr.attr.mode = S_IRUGO;
2212 bmc->guid_attr.show = guid_show;
2214 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2215 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2216 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2218 err = device_create_file(&bmc->dev->dev,
2219 &bmc->device_id_attr);
2221 err = device_create_file(&bmc->dev->dev,
2222 &bmc->provides_dev_sdrs_attr);
2223 if (err) goto out_devid;
2224 err = device_create_file(&bmc->dev->dev,
2225 &bmc->revision_attr);
2226 if (err) goto out_sdrs;
2227 err = device_create_file(&bmc->dev->dev,
2228 &bmc->firmware_rev_attr);
2229 if (err) goto out_rev;
2230 err = device_create_file(&bmc->dev->dev,
2231 &bmc->version_attr);
2232 if (err) goto out_firm;
2233 err = device_create_file(&bmc->dev->dev,
2234 &bmc->add_dev_support_attr);
2235 if (err) goto out_version;
2236 err = device_create_file(&bmc->dev->dev,
2237 &bmc->manufacturer_id_attr);
2238 if (err) goto out_add_dev;
2239 err = device_create_file(&bmc->dev->dev,
2240 &bmc->product_id_attr);
2241 if (err) goto out_manu;
2242 if (bmc->id.aux_firmware_revision_set) {
2243 err = device_create_file(&bmc->dev->dev,
2244 &bmc->aux_firmware_rev_attr);
2245 if (err) goto out_prod_id;
2247 if (bmc->guid_set) {
2248 err = device_create_file(&bmc->dev->dev,
2250 if (err) goto out_aux_firm;
2256 if (bmc->id.aux_firmware_revision_set)
2257 device_remove_file(&bmc->dev->dev,
2258 &bmc->aux_firmware_rev_attr);
2260 device_remove_file(&bmc->dev->dev,
2261 &bmc->product_id_attr);
2263 device_remove_file(&bmc->dev->dev,
2264 &bmc->manufacturer_id_attr);
2266 device_remove_file(&bmc->dev->dev,
2267 &bmc->add_dev_support_attr);
2269 device_remove_file(&bmc->dev->dev,
2270 &bmc->version_attr);
2272 device_remove_file(&bmc->dev->dev,
2273 &bmc->firmware_rev_attr);
2275 device_remove_file(&bmc->dev->dev,
2276 &bmc->revision_attr);
2278 device_remove_file(&bmc->dev->dev,
2279 &bmc->provides_dev_sdrs_attr);
2281 device_remove_file(&bmc->dev->dev,
2282 &bmc->device_id_attr);
2287 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2288 const char *sysfs_name)
2291 struct bmc_device *bmc = intf->bmc;
2292 struct bmc_device *old_bmc;
2296 mutex_lock(&ipmidriver_mutex);
2299 * Try to find if there is an bmc_device struct
2300 * representing the interfaced BMC already
2303 old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid);
2305 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver,
2310 * If there is already an bmc_device, free the new one,
2311 * otherwise register the new BMC device
2315 intf->bmc = old_bmc;
2318 kref_get(&bmc->refcount);
2319 mutex_unlock(&ipmidriver_mutex);
2322 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2323 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2324 bmc->id.manufacturer_id,
2329 unsigned char orig_dev_id = bmc->id.device_id;
2330 int warn_printed = 0;
2332 snprintf(name, sizeof(name),
2333 "ipmi_bmc.%4.4x", bmc->id.product_id);
2335 while (ipmi_find_bmc_prod_dev_id(&ipmidriver,
2337 bmc->id.device_id)) {
2338 if (!warn_printed) {
2339 printk(KERN_WARNING PFX
2340 "This machine has two different BMCs"
2341 " with the same product id and device"
2342 " id. This is an error in the"
2343 " firmware, but incrementing the"
2344 " device id to work around the problem."
2345 " Prod ID = 0x%x, Dev ID = 0x%x\n",
2346 bmc->id.product_id, bmc->id.device_id);
2349 bmc->id.device_id++; /* Wraps at 255 */
2350 if (bmc->id.device_id == orig_dev_id) {
2352 "Out of device ids!\n");
2357 bmc->dev = platform_device_alloc(name, bmc->id.device_id);
2359 mutex_unlock(&ipmidriver_mutex);
2362 " Unable to allocate platform device\n");
2365 bmc->dev->dev.driver = &ipmidriver;
2366 dev_set_drvdata(&bmc->dev->dev, bmc);
2367 kref_init(&bmc->refcount);
2369 rv = platform_device_add(bmc->dev);
2370 mutex_unlock(&ipmidriver_mutex);
2372 platform_device_put(bmc->dev);
2376 " Unable to register bmc device: %d\n",
2378 /* Don't go to out_err, you can only do that if
2379 the device is registered already. */
2383 rv = create_files(bmc);
2385 mutex_lock(&ipmidriver_mutex);
2386 platform_device_unregister(bmc->dev);
2387 mutex_unlock(&ipmidriver_mutex);
2393 "ipmi: Found new BMC (man_id: 0x%6.6x, "
2394 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2395 bmc->id.manufacturer_id,
2401 * create symlink from system interface device to bmc device
2404 intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL);
2405 if (!intf->sysfs_name) {
2408 "ipmi_msghandler: allocate link to BMC: %d\n",
2413 rv = sysfs_create_link(&intf->si_dev->kobj,
2414 &bmc->dev->dev.kobj, intf->sysfs_name);
2416 kfree(intf->sysfs_name);
2417 intf->sysfs_name = NULL;
2419 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2424 size = snprintf(dummy, 0, "ipmi%d", ifnum);
2425 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2426 if (!intf->my_dev_name) {
2427 kfree(intf->sysfs_name);
2428 intf->sysfs_name = NULL;
2431 "ipmi_msghandler: allocate link from BMC: %d\n",
2435 snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2437 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj,
2440 kfree(intf->sysfs_name);
2441 intf->sysfs_name = NULL;
2442 kfree(intf->my_dev_name);
2443 intf->my_dev_name = NULL;
2446 " Unable to create symlink to bmc: %d\n",
2454 ipmi_bmc_unregister(intf);
2459 send_guid_cmd(ipmi_smi_t intf, int chan)
2461 struct kernel_ipmi_msg msg;
2462 struct ipmi_system_interface_addr si;
2464 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2465 si.channel = IPMI_BMC_CHANNEL;
2468 msg.netfn = IPMI_NETFN_APP_REQUEST;
2469 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
2472 return i_ipmi_request(NULL,
2474 (struct ipmi_addr *) &si,
2481 intf->channels[0].address,
2482 intf->channels[0].lun,
2487 guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2489 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2490 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2491 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
2495 if (msg->msg.data[0] != 0) {
2496 /* Error from getting the GUID, the BMC doesn't have one. */
2497 intf->bmc->guid_set = 0;
2501 if (msg->msg.data_len < 17) {
2502 intf->bmc->guid_set = 0;
2503 printk(KERN_WARNING PFX
2504 "guid_handler: The GUID response from the BMC was too"
2505 " short, it was %d but should have been 17. Assuming"
2506 " GUID is not available.\n",
2511 memcpy(intf->bmc->guid, msg->msg.data, 16);
2512 intf->bmc->guid_set = 1;
2514 wake_up(&intf->waitq);
2518 get_guid(ipmi_smi_t intf)
2522 intf->bmc->guid_set = 0x2;
2523 intf->null_user_handler = guid_handler;
2524 rv = send_guid_cmd(intf, 0);
2526 /* Send failed, no GUID available. */
2527 intf->bmc->guid_set = 0;
2528 wait_event(intf->waitq, intf->bmc->guid_set != 2);
2529 intf->null_user_handler = NULL;
2533 send_channel_info_cmd(ipmi_smi_t intf, int chan)
2535 struct kernel_ipmi_msg msg;
2536 unsigned char data[1];
2537 struct ipmi_system_interface_addr si;
2539 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2540 si.channel = IPMI_BMC_CHANNEL;
2543 msg.netfn = IPMI_NETFN_APP_REQUEST;
2544 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
2548 return i_ipmi_request(NULL,
2550 (struct ipmi_addr *) &si,
2557 intf->channels[0].address,
2558 intf->channels[0].lun,
2563 channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2568 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2569 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
2570 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
2572 /* It's the one we want */
2573 if (msg->msg.data[0] != 0) {
2574 /* Got an error from the channel, just go on. */
2576 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
2577 /* If the MC does not support this
2578 command, that is legal. We just
2579 assume it has one IPMB at channel
2581 intf->channels[0].medium
2582 = IPMI_CHANNEL_MEDIUM_IPMB;
2583 intf->channels[0].protocol
2584 = IPMI_CHANNEL_PROTOCOL_IPMB;
2587 intf->curr_channel = IPMI_MAX_CHANNELS;
2588 wake_up(&intf->waitq);
2593 if (msg->msg.data_len < 4) {
2594 /* Message not big enough, just go on. */
2597 chan = intf->curr_channel;
2598 intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
2599 intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
2602 intf->curr_channel++;
2603 if (intf->curr_channel >= IPMI_MAX_CHANNELS)
2604 wake_up(&intf->waitq);
2606 rv = send_channel_info_cmd(intf, intf->curr_channel);
2609 /* Got an error somehow, just give up. */
2610 intf->curr_channel = IPMI_MAX_CHANNELS;
2611 wake_up(&intf->waitq);
2613 printk(KERN_WARNING PFX
2614 "Error sending channel information: %d\n",
2622 void ipmi_poll_interface(ipmi_user_t user)
2624 ipmi_smi_t intf = user->intf;
2626 if (intf->handlers->poll)
2627 intf->handlers->poll(intf->send_info);
2630 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2632 struct ipmi_device_id *device_id,
2633 struct device *si_dev,
2634 const char *sysfs_name,
2635 unsigned char slave_addr)
2641 struct list_head *link;
2643 /* Make sure the driver is actually initialized, this handles
2644 problems with initialization order. */
2646 rv = ipmi_init_msghandler();
2649 /* The init code doesn't return an error if it was turned
2650 off, but it won't initialize. Check that. */
2655 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
2659 intf->ipmi_version_major = ipmi_version_major(device_id);
2660 intf->ipmi_version_minor = ipmi_version_minor(device_id);
2662 intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL);
2667 intf->intf_num = -1; /* Mark it invalid for now. */
2668 kref_init(&intf->refcount);
2669 intf->bmc->id = *device_id;
2670 intf->si_dev = si_dev;
2671 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
2672 intf->channels[j].address = IPMI_BMC_SLAVE_ADDR;
2673 intf->channels[j].lun = 2;
2675 if (slave_addr != 0)
2676 intf->channels[0].address = slave_addr;
2677 INIT_LIST_HEAD(&intf->users);
2678 intf->handlers = handlers;
2679 intf->send_info = send_info;
2680 spin_lock_init(&intf->seq_lock);
2681 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
2682 intf->seq_table[j].inuse = 0;
2683 intf->seq_table[j].seqid = 0;
2686 #ifdef CONFIG_PROC_FS
2687 mutex_init(&intf->proc_entry_lock);
2689 spin_lock_init(&intf->waiting_msgs_lock);
2690 INIT_LIST_HEAD(&intf->waiting_msgs);
2691 spin_lock_init(&intf->events_lock);
2692 INIT_LIST_HEAD(&intf->waiting_events);
2693 intf->waiting_events_count = 0;
2694 mutex_init(&intf->cmd_rcvrs_mutex);
2695 spin_lock_init(&intf->maintenance_mode_lock);
2696 INIT_LIST_HEAD(&intf->cmd_rcvrs);
2697 init_waitqueue_head(&intf->waitq);
2699 spin_lock_init(&intf->counter_lock);
2700 intf->proc_dir = NULL;
2702 mutex_lock(&smi_watchers_mutex);
2703 mutex_lock(&ipmi_interfaces_mutex);
2704 /* Look for a hole in the numbers. */
2706 link = &ipmi_interfaces;
2707 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
2708 if (tintf->intf_num != i) {
2709 link = &tintf->link;
2714 /* Add the new interface in numeric order. */
2716 list_add_rcu(&intf->link, &ipmi_interfaces);
2718 list_add_tail_rcu(&intf->link, link);
2720 rv = handlers->start_processing(send_info, intf);
2726 if ((intf->ipmi_version_major > 1)
2727 || ((intf->ipmi_version_major == 1)
2728 && (intf->ipmi_version_minor >= 5)))
2730 /* Start scanning the channels to see what is
2732 intf->null_user_handler = channel_handler;
2733 intf->curr_channel = 0;
2734 rv = send_channel_info_cmd(intf, 0);
2738 /* Wait for the channel info to be read. */
2739 wait_event(intf->waitq,
2740 intf->curr_channel >= IPMI_MAX_CHANNELS);
2741 intf->null_user_handler = NULL;
2743 /* Assume a single IPMB channel at zero. */
2744 intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
2745 intf->channels[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
2749 rv = add_proc_entries(intf, i);
2751 rv = ipmi_bmc_register(intf, i, sysfs_name);
2756 remove_proc_entries(intf);
2757 intf->handlers = NULL;
2758 list_del_rcu(&intf->link);
2759 mutex_unlock(&ipmi_interfaces_mutex);
2760 mutex_unlock(&smi_watchers_mutex);
2762 kref_put(&intf->refcount, intf_free);
2765 * Keep memory order straight for RCU readers. Make
2766 * sure everything else is committed to memory before
2767 * setting intf_num to mark the interface valid.
2771 mutex_unlock(&ipmi_interfaces_mutex);
2772 /* After this point the interface is legal to use. */
2773 call_smi_watchers(i, intf->si_dev);
2774 mutex_unlock(&smi_watchers_mutex);
2780 static void cleanup_smi_msgs(ipmi_smi_t intf)
2783 struct seq_table *ent;
2785 /* No need for locks, the interface is down. */
2786 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
2787 ent = &(intf->seq_table[i]);
2790 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
2794 int ipmi_unregister_smi(ipmi_smi_t intf)
2796 struct ipmi_smi_watcher *w;
2797 int intf_num = intf->intf_num;
2799 ipmi_bmc_unregister(intf);
2801 mutex_lock(&smi_watchers_mutex);
2802 mutex_lock(&ipmi_interfaces_mutex);
2803 intf->intf_num = -1;
2804 intf->handlers = NULL;
2805 list_del_rcu(&intf->link);
2806 mutex_unlock(&ipmi_interfaces_mutex);
2809 cleanup_smi_msgs(intf);
2811 remove_proc_entries(intf);
2813 /* Call all the watcher interfaces to tell them that
2814 an interface is gone. */
2815 list_for_each_entry(w, &smi_watchers, link)
2816 w->smi_gone(intf_num);
2817 mutex_unlock(&smi_watchers_mutex);
2819 kref_put(&intf->refcount, intf_free);
2823 static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
2824 struct ipmi_smi_msg *msg)
2826 struct ipmi_ipmb_addr ipmb_addr;
2827 struct ipmi_recv_msg *recv_msg;
2828 unsigned long flags;
2831 /* This is 11, not 10, because the response must contain a
2832 * completion code. */
2833 if (msg->rsp_size < 11) {
2834 /* Message not big enough, just ignore it. */
2835 spin_lock_irqsave(&intf->counter_lock, flags);
2836 intf->invalid_ipmb_responses++;
2837 spin_unlock_irqrestore(&intf->counter_lock, flags);
2841 if (msg->rsp[2] != 0) {
2842 /* An error getting the response, just ignore it. */
2846 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
2847 ipmb_addr.slave_addr = msg->rsp[6];
2848 ipmb_addr.channel = msg->rsp[3] & 0x0f;
2849 ipmb_addr.lun = msg->rsp[7] & 3;
2851 /* It's a response from a remote entity. Look up the sequence
2852 number and handle the response. */
2853 if (intf_find_seq(intf,
2857 (msg->rsp[4] >> 2) & (~1),
2858 (struct ipmi_addr *) &(ipmb_addr),
2861 /* We were unable to find the sequence number,
2862 so just nuke the message. */
2863 spin_lock_irqsave(&intf->counter_lock, flags);
2864 intf->unhandled_ipmb_responses++;
2865 spin_unlock_irqrestore(&intf->counter_lock, flags);
2869 memcpy(recv_msg->msg_data,
2872 /* THe other fields matched, so no need to set them, except
2873 for netfn, which needs to be the response that was
2874 returned, not the request value. */
2875 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2876 recv_msg->msg.data = recv_msg->msg_data;
2877 recv_msg->msg.data_len = msg->rsp_size - 10;
2878 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
2879 spin_lock_irqsave(&intf->counter_lock, flags);
2880 intf->handled_ipmb_responses++;
2881 spin_unlock_irqrestore(&intf->counter_lock, flags);
2882 deliver_response(recv_msg);
2887 static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
2888 struct ipmi_smi_msg *msg)
2890 struct cmd_rcvr *rcvr;
2892 unsigned char netfn;
2895 ipmi_user_t user = NULL;
2896 struct ipmi_ipmb_addr *ipmb_addr;
2897 struct ipmi_recv_msg *recv_msg;
2898 unsigned long flags;
2899 struct ipmi_smi_handlers *handlers;
2901 if (msg->rsp_size < 10) {
2902 /* Message not big enough, just ignore it. */
2903 spin_lock_irqsave(&intf->counter_lock, flags);
2904 intf->invalid_commands++;
2905 spin_unlock_irqrestore(&intf->counter_lock, flags);
2909 if (msg->rsp[2] != 0) {
2910 /* An error getting the response, just ignore it. */
2914 netfn = msg->rsp[4] >> 2;
2916 chan = msg->rsp[3] & 0xf;
2919 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
2922 kref_get(&user->refcount);
2928 /* We didn't find a user, deliver an error response. */
2929 spin_lock_irqsave(&intf->counter_lock, flags);
2930 intf->unhandled_commands++;
2931 spin_unlock_irqrestore(&intf->counter_lock, flags);
2933 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
2934 msg->data[1] = IPMI_SEND_MSG_CMD;
2935 msg->data[2] = msg->rsp[3];
2936 msg->data[3] = msg->rsp[6];
2937 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
2938 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
2939 msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
2941 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
2942 msg->data[8] = msg->rsp[8]; /* cmd */
2943 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
2944 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
2945 msg->data_size = 11;
2950 printk("Invalid command:");
2951 for (m = 0; m < msg->data_size; m++)
2952 printk(" %2.2x", msg->data[m]);
2957 handlers = intf->handlers;
2959 handlers->sender(intf->send_info, msg, 0);
2960 /* We used the message, so return the value
2961 that causes it to not be freed or
2967 /* Deliver the message to the user. */
2968 spin_lock_irqsave(&intf->counter_lock, flags);
2969 intf->handled_commands++;
2970 spin_unlock_irqrestore(&intf->counter_lock, flags);
2972 recv_msg = ipmi_alloc_recv_msg();
2974 /* We couldn't allocate memory for the
2975 message, so requeue it for handling
2978 kref_put(&user->refcount, free_user);
2980 /* Extract the source address from the data. */
2981 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
2982 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2983 ipmb_addr->slave_addr = msg->rsp[6];
2984 ipmb_addr->lun = msg->rsp[7] & 3;
2985 ipmb_addr->channel = msg->rsp[3] & 0xf;
2987 /* Extract the rest of the message information
2988 from the IPMB header.*/
2989 recv_msg->user = user;
2990 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
2991 recv_msg->msgid = msg->rsp[7] >> 2;
2992 recv_msg->msg.netfn = msg->rsp[4] >> 2;
2993 recv_msg->msg.cmd = msg->rsp[8];
2994 recv_msg->msg.data = recv_msg->msg_data;
2996 /* We chop off 10, not 9 bytes because the checksum
2997 at the end also needs to be removed. */
2998 recv_msg->msg.data_len = msg->rsp_size - 10;
2999 memcpy(recv_msg->msg_data,
3001 msg->rsp_size - 10);
3002 deliver_response(recv_msg);
3009 static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
3010 struct ipmi_smi_msg *msg)
3012 struct ipmi_lan_addr lan_addr;
3013 struct ipmi_recv_msg *recv_msg;
3014 unsigned long flags;
3017 /* This is 13, not 12, because the response must contain a
3018 * completion code. */
3019 if (msg->rsp_size < 13) {
3020 /* Message not big enough, just ignore it. */
3021 spin_lock_irqsave(&intf->counter_lock, flags);
3022 intf->invalid_lan_responses++;
3023 spin_unlock_irqrestore(&intf->counter_lock, flags);
3027 if (msg->rsp[2] != 0) {
3028 /* An error getting the response, just ignore it. */
3032 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3033 lan_addr.session_handle = msg->rsp[4];
3034 lan_addr.remote_SWID = msg->rsp[8];
3035 lan_addr.local_SWID = msg->rsp[5];
3036 lan_addr.channel = msg->rsp[3] & 0x0f;
3037 lan_addr.privilege = msg->rsp[3] >> 4;
3038 lan_addr.lun = msg->rsp[9] & 3;
3040 /* It's a response from a remote entity. Look up the sequence
3041 number and handle the response. */
3042 if (intf_find_seq(intf,
3046 (msg->rsp[6] >> 2) & (~1),
3047 (struct ipmi_addr *) &(lan_addr),
3050 /* We were unable to find the sequence number,
3051 so just nuke the message. */
3052 spin_lock_irqsave(&intf->counter_lock, flags);
3053 intf->unhandled_lan_responses++;
3054 spin_unlock_irqrestore(&intf->counter_lock, flags);
3058 memcpy(recv_msg->msg_data,
3060 msg->rsp_size - 11);
3061 /* The other fields matched, so no need to set them, except
3062 for netfn, which needs to be the response that was
3063 returned, not the request value. */
3064 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3065 recv_msg->msg.data = recv_msg->msg_data;
3066 recv_msg->msg.data_len = msg->rsp_size - 12;
3067 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3068 spin_lock_irqsave(&intf->counter_lock, flags);
3069 intf->handled_lan_responses++;
3070 spin_unlock_irqrestore(&intf->counter_lock, flags);
3071 deliver_response(recv_msg);
3076 static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3077 struct ipmi_smi_msg *msg)
3079 struct cmd_rcvr *rcvr;
3081 unsigned char netfn;
3084 ipmi_user_t user = NULL;
3085 struct ipmi_lan_addr *lan_addr;
3086 struct ipmi_recv_msg *recv_msg;
3087 unsigned long flags;
3089 if (msg->rsp_size < 12) {
3090 /* Message not big enough, just ignore it. */
3091 spin_lock_irqsave(&intf->counter_lock, flags);
3092 intf->invalid_commands++;
3093 spin_unlock_irqrestore(&intf->counter_lock, flags);
3097 if (msg->rsp[2] != 0) {
3098 /* An error getting the response, just ignore it. */
3102 netfn = msg->rsp[6] >> 2;
3104 chan = msg->rsp[3] & 0xf;
3107 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3110 kref_get(&user->refcount);
3116 /* We didn't find a user, just give up. */
3117 spin_lock_irqsave(&intf->counter_lock, flags);
3118 intf->unhandled_commands++;
3119 spin_unlock_irqrestore(&intf->counter_lock, flags);
3121 rv = 0; /* Don't do anything with these messages, just
3122 allow them to be freed. */
3124 /* Deliver the message to the user. */
3125 spin_lock_irqsave(&intf->counter_lock, flags);
3126 intf->handled_commands++;
3127 spin_unlock_irqrestore(&intf->counter_lock, flags);
3129 recv_msg = ipmi_alloc_recv_msg();
3131 /* We couldn't allocate memory for the
3132 message, so requeue it for handling
3135 kref_put(&user->refcount, free_user);
3137 /* Extract the source address from the data. */
3138 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3139 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3140 lan_addr->session_handle = msg->rsp[4];
3141 lan_addr->remote_SWID = msg->rsp[8];
3142 lan_addr->local_SWID = msg->rsp[5];
3143 lan_addr->lun = msg->rsp[9] & 3;
3144 lan_addr->channel = msg->rsp[3] & 0xf;
3145 lan_addr->privilege = msg->rsp[3] >> 4;
3147 /* Extract the rest of the message information
3148 from the IPMB header.*/
3149 recv_msg->user = user;
3150 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3151 recv_msg->msgid = msg->rsp[9] >> 2;
3152 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3153 recv_msg->msg.cmd = msg->rsp[10];
3154 recv_msg->msg.data = recv_msg->msg_data;
3156 /* We chop off 12, not 11 bytes because the checksum
3157 at the end also needs to be removed. */
3158 recv_msg->msg.data_len = msg->rsp_size - 12;
3159 memcpy(recv_msg->msg_data,
3161 msg->rsp_size - 12);
3162 deliver_response(recv_msg);
3169 static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
3170 struct ipmi_smi_msg *msg)
3172 struct ipmi_system_interface_addr *smi_addr;
3174 recv_msg->msgid = 0;
3175 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
3176 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3177 smi_addr->channel = IPMI_BMC_CHANNEL;
3178 smi_addr->lun = msg->rsp[0] & 3;
3179 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
3180 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3181 recv_msg->msg.cmd = msg->rsp[1];
3182 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
3183 recv_msg->msg.data = recv_msg->msg_data;
3184 recv_msg->msg.data_len = msg->rsp_size - 3;
3187 static int handle_read_event_rsp(ipmi_smi_t intf,
3188 struct ipmi_smi_msg *msg)
3190 struct ipmi_recv_msg *recv_msg, *recv_msg2;
3191 struct list_head msgs;
3194 int deliver_count = 0;
3195 unsigned long flags;
3197 if (msg->rsp_size < 19) {
3198 /* Message is too small to be an IPMB event. */
3199 spin_lock_irqsave(&intf->counter_lock, flags);
3200 intf->invalid_events++;
3201 spin_unlock_irqrestore(&intf->counter_lock, flags);
3205 if (msg->rsp[2] != 0) {
3206 /* An error getting the event, just ignore it. */
3210 INIT_LIST_HEAD(&msgs);
3212 spin_lock_irqsave(&intf->events_lock, flags);
3214 spin_lock(&intf->counter_lock);
3216 spin_unlock(&intf->counter_lock);
3218 /* Allocate and fill in one message for every user that is getting
3221 list_for_each_entry_rcu(user, &intf->users, link) {
3222 if (!user->gets_events)
3225 recv_msg = ipmi_alloc_recv_msg();
3228 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
3230 list_del(&recv_msg->link);
3231 ipmi_free_recv_msg(recv_msg);
3233 /* We couldn't allocate memory for the
3234 message, so requeue it for handling
3242 copy_event_into_recv_msg(recv_msg, msg);
3243 recv_msg->user = user;
3244 kref_get(&user->refcount);
3245 list_add_tail(&(recv_msg->link), &msgs);
3249 if (deliver_count) {
3250 /* Now deliver all the messages. */
3251 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
3252 list_del(&recv_msg->link);
3253 deliver_response(recv_msg);
3255 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
3256 /* No one to receive the message, put it in queue if there's
3257 not already too many things in the queue. */
3258 recv_msg = ipmi_alloc_recv_msg();
3260 /* We couldn't allocate memory for the
3261 message, so requeue it for handling
3267 copy_event_into_recv_msg(recv_msg, msg);
3268 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
3269 intf->waiting_events_count++;
3270 } else if (!intf->event_msg_printed) {
3271 /* There's too many things in the queue, discard this
3273 printk(KERN_WARNING PFX "Event queue full, discarding"
3274 " incoming events\n");
3275 intf->event_msg_printed = 1;
3279 spin_unlock_irqrestore(&(intf->events_lock), flags);
3284 static int handle_bmc_rsp(ipmi_smi_t intf,
3285 struct ipmi_smi_msg *msg)
3287 struct ipmi_recv_msg *recv_msg;
3288 unsigned long flags;
3289 struct ipmi_user *user;
3291 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
3292 if (recv_msg == NULL)
3294 printk(KERN_WARNING"IPMI message received with no owner. This\n"
3295 "could be because of a malformed message, or\n"
3296 "because of a hardware error. Contact your\n"
3297 "hardware vender for assistance\n");
3301 user = recv_msg->user;
3302 /* Make sure the user still exists. */
3303 if (user && !user->valid) {
3304 /* The user for the message went away, so give up. */
3305 spin_lock_irqsave(&intf->counter_lock, flags);
3306 intf->unhandled_local_responses++;
3307 spin_unlock_irqrestore(&intf->counter_lock, flags);
3308 ipmi_free_recv_msg(recv_msg);
3310 struct ipmi_system_interface_addr *smi_addr;
3312 spin_lock_irqsave(&intf->counter_lock, flags);
3313 intf->handled_local_responses++;
3314 spin_unlock_irqrestore(&intf->counter_lock, flags);
3315 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3316 recv_msg->msgid = msg->msgid;
3317 smi_addr = ((struct ipmi_system_interface_addr *)
3319 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3320 smi_addr->channel = IPMI_BMC_CHANNEL;
3321 smi_addr->lun = msg->rsp[0] & 3;
3322 recv_msg->msg.netfn = msg->rsp[0] >> 2;
3323 recv_msg->msg.cmd = msg->rsp[1];
3324 memcpy(recv_msg->msg_data,
3327 recv_msg->msg.data = recv_msg->msg_data;
3328 recv_msg->msg.data_len = msg->rsp_size - 2;
3329 deliver_response(recv_msg);
3335 /* Handle a new message. Return 1 if the message should be requeued,
3336 0 if the message should be freed, or -1 if the message should not
3337 be freed or requeued. */
3338 static int handle_new_recv_msg(ipmi_smi_t intf,
3339 struct ipmi_smi_msg *msg)
3347 for (m = 0; m < msg->rsp_size; m++)
3348 printk(" %2.2x", msg->rsp[m]);
3351 if (msg->rsp_size < 2) {
3352 /* Message is too small to be correct. */
3353 printk(KERN_WARNING PFX "BMC returned to small a message"
3354 " for netfn %x cmd %x, got %d bytes\n",
3355 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
3357 /* Generate an error response for the message. */
3358 msg->rsp[0] = msg->data[0] | (1 << 2);
3359 msg->rsp[1] = msg->data[1];
3360 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3362 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
3363 || (msg->rsp[1] != msg->data[1])) /* Command */
3365 /* The response is not even marginally correct. */
3366 printk(KERN_WARNING PFX "BMC returned incorrect response,"
3367 " expected netfn %x cmd %x, got netfn %x cmd %x\n",
3368 (msg->data[0] >> 2) | 1, msg->data[1],
3369 msg->rsp[0] >> 2, msg->rsp[1]);
3371 /* Generate an error response for the message. */
3372 msg->rsp[0] = msg->data[0] | (1 << 2);
3373 msg->rsp[1] = msg->data[1];
3374 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
3378 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3379 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
3380 && (msg->user_data != NULL))
3382 /* It's a response to a response we sent. For this we
3383 deliver a send message response to the user. */
3384 struct ipmi_recv_msg *recv_msg = msg->user_data;
3387 if (msg->rsp_size < 2)
3388 /* Message is too small to be correct. */
3391 chan = msg->data[2] & 0x0f;
3392 if (chan >= IPMI_MAX_CHANNELS)
3393 /* Invalid channel number */
3399 /* Make sure the user still exists. */
3400 if (!recv_msg->user || !recv_msg->user->valid)
3403 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
3404 recv_msg->msg.data = recv_msg->msg_data;
3405 recv_msg->msg.data_len = 1;
3406 recv_msg->msg_data[0] = msg->rsp[2];
3407 deliver_response(recv_msg);
3408 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3409 && (msg->rsp[1] == IPMI_GET_MSG_CMD))
3411 /* It's from the receive queue. */
3412 chan = msg->rsp[3] & 0xf;
3413 if (chan >= IPMI_MAX_CHANNELS) {
3414 /* Invalid channel number */
3419 switch (intf->channels[chan].medium) {
3420 case IPMI_CHANNEL_MEDIUM_IPMB:
3421 if (msg->rsp[4] & 0x04) {
3422 /* It's a response, so find the
3423 requesting message and send it up. */
3424 requeue = handle_ipmb_get_msg_rsp(intf, msg);
3426 /* It's a command to the SMS from some other
3427 entity. Handle that. */
3428 requeue = handle_ipmb_get_msg_cmd(intf, msg);
3432 case IPMI_CHANNEL_MEDIUM_8023LAN:
3433 case IPMI_CHANNEL_MEDIUM_ASYNC:
3434 if (msg->rsp[6] & 0x04) {
3435 /* It's a response, so find the
3436 requesting message and send it up. */
3437 requeue = handle_lan_get_msg_rsp(intf, msg);
3439 /* It's a command to the SMS from some other
3440 entity. Handle that. */
3441 requeue = handle_lan_get_msg_cmd(intf, msg);
3446 /* We don't handle the channel type, so just
3447 * free the message. */
3451 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
3452 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
3454 /* It's an asyncronous event. */
3455 requeue = handle_read_event_rsp(intf, msg);
3457 /* It's a response from the local BMC. */
3458 requeue = handle_bmc_rsp(intf, msg);
3465 /* Handle a new message from the lower layer. */
3466 void ipmi_smi_msg_received(ipmi_smi_t intf,
3467 struct ipmi_smi_msg *msg)
3469 unsigned long flags = 0; /* keep us warning-free. */
3471 int run_to_completion;
3474 if ((msg->data_size >= 2)
3475 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3476 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3477 && (msg->user_data == NULL))
3479 /* This is the local response to a command send, start
3480 the timer for these. The user_data will not be
3481 NULL if this is a response send, and we will let
3482 response sends just go through. */
3484 /* Check for errors, if we get certain errors (ones
3485 that mean basically we can try again later), we
3486 ignore them and start the timer. Otherwise we
3487 report the error immediately. */
3488 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
3489 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
3490 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
3491 && (msg->rsp[2] != IPMI_BUS_ERR)
3492 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
3494 int chan = msg->rsp[3] & 0xf;
3496 /* Got an error sending the message, handle it. */
3497 spin_lock_irqsave(&intf->counter_lock, flags);
3498 if (chan >= IPMI_MAX_CHANNELS)
3499 ; /* This shouldn't happen */
3500 else if ((intf->channels[chan].medium
3501 == IPMI_CHANNEL_MEDIUM_8023LAN)
3502 || (intf->channels[chan].medium
3503 == IPMI_CHANNEL_MEDIUM_ASYNC))
3504 intf->sent_lan_command_errs++;
3506 intf->sent_ipmb_command_errs++;
3507 spin_unlock_irqrestore(&intf->counter_lock, flags);
3508 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
3510 /* The message was sent, start the timer. */
3511 intf_start_seq_timer(intf, msg->msgid);
3514 ipmi_free_smi_msg(msg);
3518 /* To preserve message order, if the list is not empty, we
3519 tack this message onto the end of the list. */
3520 run_to_completion = intf->run_to_completion;
3521 if (!run_to_completion)
3522 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3523 if (!list_empty(&intf->waiting_msgs)) {
3524 list_add_tail(&msg->link, &intf->waiting_msgs);
3525 if (!run_to_completion)
3526 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3529 if (!run_to_completion)
3530 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3532 rv = handle_new_recv_msg(intf, msg);
3534 /* Could not handle the message now, just add it to a
3535 list to handle later. */
3536 run_to_completion = intf->run_to_completion;
3537 if (!run_to_completion)
3538 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3539 list_add_tail(&msg->link, &intf->waiting_msgs);
3540 if (!run_to_completion)
3541 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3542 } else if (rv == 0) {
3543 ipmi_free_smi_msg(msg);
3550 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3555 list_for_each_entry_rcu(user, &intf->users, link) {
3556 if (!user->handler->ipmi_watchdog_pretimeout)
3559 user->handler->ipmi_watchdog_pretimeout(user->handler_data);
3565 static struct ipmi_smi_msg *
3566 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
3567 unsigned char seq, long seqid)
3569 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
3571 /* If we can't allocate the message, then just return, we
3572 get 4 retries, so this should be ok. */
3575 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
3576 smi_msg->data_size = recv_msg->msg.data_len;
3577 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
3583 for (m = 0; m < smi_msg->data_size; m++)
3584 printk(" %2.2x", smi_msg->data[m]);
3591 static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
3592 struct list_head *timeouts, long timeout_period,
3593 int slot, unsigned long *flags)
3595 struct ipmi_recv_msg *msg;
3596 struct ipmi_smi_handlers *handlers;
3598 if (intf->intf_num == -1)
3604 ent->timeout -= timeout_period;
3605 if (ent->timeout > 0)
3608 if (ent->retries_left == 0) {
3609 /* The message has used all its retries. */
3611 msg = ent->recv_msg;
3612 list_add_tail(&msg->link, timeouts);
3613 spin_lock(&intf->counter_lock);
3615 intf->timed_out_ipmb_broadcasts++;
3616 else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3617 intf->timed_out_lan_commands++;
3619 intf->timed_out_ipmb_commands++;
3620 spin_unlock(&intf->counter_lock);
3622 struct ipmi_smi_msg *smi_msg;
3623 /* More retries, send again. */
3625 /* Start with the max timer, set to normal
3626 timer after the message is sent. */
3627 ent->timeout = MAX_MSG_TIMEOUT;
3628 ent->retries_left--;
3629 spin_lock(&intf->counter_lock);
3630 if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
3631 intf->retransmitted_lan_commands++;
3633 intf->retransmitted_ipmb_commands++;
3634 spin_unlock(&intf->counter_lock);
3636 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
3641 spin_unlock_irqrestore(&intf->seq_lock, *flags);
3643 /* Send the new message. We send with a zero
3644 * priority. It timed out, I doubt time is
3645 * that critical now, and high priority
3646 * messages are really only for messages to the
3647 * local MC, which don't get resent. */
3648 handlers = intf->handlers;
3650 intf->handlers->sender(intf->send_info,
3653 ipmi_free_smi_msg(smi_msg);
3655 spin_lock_irqsave(&intf->seq_lock, *flags);
3659 static void ipmi_timeout_handler(long timeout_period)
3662 struct list_head timeouts;
3663 struct ipmi_recv_msg *msg, *msg2;
3664 struct ipmi_smi_msg *smi_msg, *smi_msg2;
3665 unsigned long flags;
3669 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3670 /* See if any waiting messages need to be processed. */
3671 spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
3672 list_for_each_entry_safe(smi_msg, smi_msg2,
3673 &intf->waiting_msgs, link) {
3674 if (!handle_new_recv_msg(intf, smi_msg)) {
3675 list_del(&smi_msg->link);
3676 ipmi_free_smi_msg(smi_msg);
3678 /* To preserve message order, quit if we
3679 can't handle a message. */
3683 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
3685 /* Go through the seq table and find any messages that
3686 have timed out, putting them in the timeouts
3688 INIT_LIST_HEAD(&timeouts);
3689 spin_lock_irqsave(&intf->seq_lock, flags);
3690 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
3691 check_msg_timeout(intf, &(intf->seq_table[i]),
3692 &timeouts, timeout_period, i,
3694 spin_unlock_irqrestore(&intf->seq_lock, flags);
3696 list_for_each_entry_safe(msg, msg2, &timeouts, link)
3697 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
3700 * Maintenance mode handling. Check the timeout
3701 * optimistically before we claim the lock. It may
3702 * mean a timeout gets missed occasionally, but that
3703 * only means the timeout gets extended by one period
3704 * in that case. No big deal, and it avoids the lock
3707 if (intf->auto_maintenance_timeout > 0) {
3708 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
3709 if (intf->auto_maintenance_timeout > 0) {
3710 intf->auto_maintenance_timeout
3712 if (!intf->maintenance_mode
3713 && (intf->auto_maintenance_timeout <= 0))
3715 intf->maintenance_mode_enable = 0;
3716 maintenance_mode_update(intf);
3719 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
3726 static void ipmi_request_event(void)
3729 struct ipmi_smi_handlers *handlers;
3732 /* Called from the timer, no need to check if handlers is
3734 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3735 /* No event requests when in maintenance mode. */
3736 if (intf->maintenance_mode_enable)
3739 handlers = intf->handlers;
3741 handlers->request_events(intf->send_info);
3746 static struct timer_list ipmi_timer;
3748 /* Call every ~100 ms. */
3749 #define IPMI_TIMEOUT_TIME 100
3751 /* How many jiffies does it take to get to the timeout time. */
3752 #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
3754 /* Request events from the queue every second (this is the number of
3755 IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
3756 future, IPMI will add a way to know immediately if an event is in
3757 the queue and this silliness can go away. */
3758 #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
3760 static atomic_t stop_operation;
3761 static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3763 static void ipmi_timeout(unsigned long data)
3765 if (atomic_read(&stop_operation))
3769 if (ticks_to_req_ev == 0) {
3770 ipmi_request_event();
3771 ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3774 ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
3776 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
3780 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
3781 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
3783 /* FIXME - convert these to slabs. */
3784 static void free_smi_msg(struct ipmi_smi_msg *msg)
3786 atomic_dec(&smi_msg_inuse_count);
3790 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
3792 struct ipmi_smi_msg *rv;
3793 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
3795 rv->done = free_smi_msg;
3796 rv->user_data = NULL;
3797 atomic_inc(&smi_msg_inuse_count);
3802 static void free_recv_msg(struct ipmi_recv_msg *msg)
3804 atomic_dec(&recv_msg_inuse_count);
3808 struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
3810 struct ipmi_recv_msg *rv;
3812 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
3815 rv->done = free_recv_msg;
3816 atomic_inc(&recv_msg_inuse_count);
3821 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
3824 kref_put(&msg->user->refcount, free_user);
3828 #ifdef CONFIG_IPMI_PANIC_EVENT
3830 static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
3834 static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
3838 #ifdef CONFIG_IPMI_PANIC_STRING
3839 static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3841 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3842 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
3843 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
3844 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3846 /* A get event receiver command, save it. */
3847 intf->event_receiver = msg->msg.data[1];
3848 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
3852 static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3854 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3855 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3856 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
3857 && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
3859 /* A get device id command, save if we are an event
3860 receiver or generator. */
3861 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
3862 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
3867 static void send_panic_events(char *str)
3869 struct kernel_ipmi_msg msg;
3871 unsigned char data[16];
3872 struct ipmi_system_interface_addr *si;
3873 struct ipmi_addr addr;
3874 struct ipmi_smi_msg smi_msg;
3875 struct ipmi_recv_msg recv_msg;
3877 si = (struct ipmi_system_interface_addr *) &addr;
3878 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3879 si->channel = IPMI_BMC_CHANNEL;
3882 /* Fill in an event telling that we have failed. */
3883 msg.netfn = 0x04; /* Sensor or Event. */
3884 msg.cmd = 2; /* Platform event command. */
3887 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
3888 data[1] = 0x03; /* This is for IPMI 1.0. */
3889 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
3890 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
3891 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
3893 /* Put a few breadcrumbs in. Hopefully later we can add more things
3894 to make the panic events more useful. */
3901 smi_msg.done = dummy_smi_done_handler;
3902 recv_msg.done = dummy_recv_done_handler;
3904 /* For every registered interface, send the event. */
3905 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3906 if (!intf->handlers)
3907 /* Interface is not ready. */
3910 intf->run_to_completion = 1;
3911 /* Send the event announcing the panic. */
3912 intf->handlers->set_run_to_completion(intf->send_info, 1);
3913 i_ipmi_request(NULL,
3922 intf->channels[0].address,
3923 intf->channels[0].lun,
3924 0, 1); /* Don't retry, and don't wait. */
3927 #ifdef CONFIG_IPMI_PANIC_STRING
3928 /* On every interface, dump a bunch of OEM event holding the
3933 /* For every registered interface, send the event. */
3934 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
3936 struct ipmi_ipmb_addr *ipmb;
3939 if (intf->intf_num == -1)
3940 /* Interface was not ready yet. */
3944 * intf_num is used as an marker to tell if the
3945 * interface is valid. Thus we need a read barrier to
3946 * make sure data fetched before checking intf_num
3951 /* First job here is to figure out where to send the
3952 OEM events. There's no way in IPMI to send OEM
3953 events using an event send command, so we have to
3954 find the SEL to put them in and stick them in
3957 /* Get capabilities from the get device id. */
3958 intf->local_sel_device = 0;
3959 intf->local_event_generator = 0;
3960 intf->event_receiver = 0;
3962 /* Request the device info from the local MC. */
3963 msg.netfn = IPMI_NETFN_APP_REQUEST;
3964 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
3967 intf->null_user_handler = device_id_fetcher;
3968 i_ipmi_request(NULL,
3977 intf->channels[0].address,
3978 intf->channels[0].lun,
3979 0, 1); /* Don't retry, and don't wait. */
3981 if (intf->local_event_generator) {
3982 /* Request the event receiver from the local MC. */
3983 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
3984 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
3987 intf->null_user_handler = event_receiver_fetcher;
3988 i_ipmi_request(NULL,
3997 intf->channels[0].address,
3998 intf->channels[0].lun,
3999 0, 1); /* no retry, and no wait. */
4001 intf->null_user_handler = NULL;
4003 /* Validate the event receiver. The low bit must not
4004 be 1 (it must be a valid IPMB address), it cannot
4005 be zero, and it must not be my address. */
4006 if (((intf->event_receiver & 1) == 0)
4007 && (intf->event_receiver != 0)
4008 && (intf->event_receiver != intf->channels[0].address))
4010 /* The event receiver is valid, send an IPMB
4012 ipmb = (struct ipmi_ipmb_addr *) &addr;
4013 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
4014 ipmb->channel = 0; /* FIXME - is this right? */
4015 ipmb->lun = intf->event_receiver_lun;
4016 ipmb->slave_addr = intf->event_receiver;
4017 } else if (intf->local_sel_device) {
4018 /* The event receiver was not valid (or was
4019 me), but I am an SEL device, just dump it
4021 si = (struct ipmi_system_interface_addr *) &addr;
4022 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4023 si->channel = IPMI_BMC_CHANNEL;
4026 continue; /* No where to send the event. */
4029 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
4030 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
4036 int size = strlen(p);
4042 data[2] = 0xf0; /* OEM event without timestamp. */
4043 data[3] = intf->channels[0].address;
4044 data[4] = j++; /* sequence # */
4045 /* Always give 11 bytes, so strncpy will fill
4046 it with zeroes for me. */
4047 strncpy(data+5, p, 11);
4050 i_ipmi_request(NULL,
4059 intf->channels[0].address,
4060 intf->channels[0].lun,
4061 0, 1); /* no retry, and no wait. */
4064 #endif /* CONFIG_IPMI_PANIC_STRING */
4066 #endif /* CONFIG_IPMI_PANIC_EVENT */
4068 static int has_panicked;
4070 static int panic_event(struct notifier_block *this,
4071 unsigned long event,
4080 /* For every registered interface, set it to run to completion. */
4081 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4082 if (!intf->handlers)
4083 /* Interface is not ready. */
4086 intf->run_to_completion = 1;
4087 intf->handlers->set_run_to_completion(intf->send_info, 1);
4090 #ifdef CONFIG_IPMI_PANIC_EVENT
4091 send_panic_events(ptr);
4097 static struct notifier_block panic_block = {
4098 .notifier_call = panic_event,
4100 .priority = 200 /* priority: INT_MAX >= x >= 0 */
4103 static int ipmi_init_msghandler(void)
4110 rv = driver_register(&ipmidriver);
4112 printk(KERN_ERR PFX "Could not register IPMI driver\n");
4116 printk(KERN_INFO "ipmi message handler version "
4117 IPMI_DRIVER_VERSION "\n");
4119 #ifdef CONFIG_PROC_FS
4120 proc_ipmi_root = proc_mkdir("ipmi", NULL);
4121 if (!proc_ipmi_root) {
4122 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4126 proc_ipmi_root->owner = THIS_MODULE;
4127 #endif /* CONFIG_PROC_FS */
4129 setup_timer(&ipmi_timer, ipmi_timeout, 0);
4130 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4132 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
4139 static __init int ipmi_init_msghandler_mod(void)
4141 ipmi_init_msghandler();
4145 static __exit void cleanup_ipmi(void)
4152 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
4154 /* This can't be called if any interfaces exist, so no worry about
4155 shutting down the interfaces. */
4157 /* Tell the timer to stop, then wait for it to stop. This avoids
4158 problems with race conditions removing the timer here. */
4159 atomic_inc(&stop_operation);
4160 del_timer_sync(&ipmi_timer);
4162 #ifdef CONFIG_PROC_FS
4163 remove_proc_entry(proc_ipmi_root->name, NULL);
4164 #endif /* CONFIG_PROC_FS */
4166 driver_unregister(&ipmidriver);
4170 /* Check for buffer leaks. */
4171 count = atomic_read(&smi_msg_inuse_count);
4173 printk(KERN_WARNING PFX "SMI message count %d at exit\n",
4175 count = atomic_read(&recv_msg_inuse_count);
4177 printk(KERN_WARNING PFX "recv message count %d at exit\n",
4180 module_exit(cleanup_ipmi);
4182 module_init(ipmi_init_msghandler_mod);
4183 MODULE_LICENSE("GPL");
4184 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
4185 MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
4186 MODULE_VERSION(IPMI_DRIVER_VERSION);
4188 EXPORT_SYMBOL(ipmi_create_user);
4189 EXPORT_SYMBOL(ipmi_destroy_user);
4190 EXPORT_SYMBOL(ipmi_get_version);
4191 EXPORT_SYMBOL(ipmi_request_settime);
4192 EXPORT_SYMBOL(ipmi_request_supply_msgs);
4193 EXPORT_SYMBOL(ipmi_poll_interface);
4194 EXPORT_SYMBOL(ipmi_register_smi);
4195 EXPORT_SYMBOL(ipmi_unregister_smi);
4196 EXPORT_SYMBOL(ipmi_register_for_cmd);
4197 EXPORT_SYMBOL(ipmi_unregister_for_cmd);
4198 EXPORT_SYMBOL(ipmi_smi_msg_received);
4199 EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4200 EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4201 EXPORT_SYMBOL(ipmi_addr_length);
4202 EXPORT_SYMBOL(ipmi_validate_addr);
4203 EXPORT_SYMBOL(ipmi_set_gets_events);
4204 EXPORT_SYMBOL(ipmi_smi_watcher_register);
4205 EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
4206 EXPORT_SYMBOL(ipmi_set_my_address);
4207 EXPORT_SYMBOL(ipmi_get_my_address);
4208 EXPORT_SYMBOL(ipmi_set_my_LUN);
4209 EXPORT_SYMBOL(ipmi_get_my_LUN);
4210 EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
4211 EXPORT_SYMBOL(ipmi_free_recv_msg);